git subrepo pull (merge) --force deps/libchdr
[pcsx_rearmed.git] / deps / libchdr / deps / zstd-1.5.5 / lib / common / pool.c
CommitLineData
648db22b 1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11
12/* ====== Dependencies ======= */
13#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */
14#include "zstd_deps.h" /* size_t */
15#include "debug.h" /* assert */
16#include "pool.h"
17
18/* ====== Compiler specifics ====== */
19#if defined(_MSC_VER)
20# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
21#endif
22
23
24#ifdef ZSTD_MULTITHREAD
25
26#include "threading.h" /* pthread adaptation */
27
28/* A job is a function and an opaque argument */
29typedef struct POOL_job_s {
30 POOL_function function;
31 void *opaque;
32} POOL_job;
33
34struct POOL_ctx_s {
35 ZSTD_customMem customMem;
36 /* Keep track of the threads */
37 ZSTD_pthread_t* threads;
38 size_t threadCapacity;
39 size_t threadLimit;
40
41 /* The queue is a circular buffer */
42 POOL_job *queue;
43 size_t queueHead;
44 size_t queueTail;
45 size_t queueSize;
46
47 /* The number of threads working on jobs */
48 size_t numThreadsBusy;
49 /* Indicates if the queue is empty */
50 int queueEmpty;
51
52 /* The mutex protects the queue */
53 ZSTD_pthread_mutex_t queueMutex;
54 /* Condition variable for pushers to wait on when the queue is full */
55 ZSTD_pthread_cond_t queuePushCond;
56 /* Condition variables for poppers to wait on when the queue is empty */
57 ZSTD_pthread_cond_t queuePopCond;
58 /* Indicates if the queue is shutting down */
59 int shutdown;
60};
61
62/* POOL_thread() :
63 * Work thread for the thread pool.
64 * Waits for jobs and executes them.
65 * @returns : NULL on failure else non-null.
66 */
67static void* POOL_thread(void* opaque) {
68 POOL_ctx* const ctx = (POOL_ctx*)opaque;
69 if (!ctx) { return NULL; }
70 for (;;) {
71 /* Lock the mutex and wait for a non-empty queue or until shutdown */
72 ZSTD_pthread_mutex_lock(&ctx->queueMutex);
73
74 while ( ctx->queueEmpty
75 || (ctx->numThreadsBusy >= ctx->threadLimit) ) {
76 if (ctx->shutdown) {
77 /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
78 * a few threads will be shutdown while !queueEmpty,
79 * but enough threads will remain active to finish the queue */
80 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
81 return opaque;
82 }
83 ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
84 }
85 /* Pop a job off the queue */
86 { POOL_job const job = ctx->queue[ctx->queueHead];
87 ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
88 ctx->numThreadsBusy++;
89 ctx->queueEmpty = (ctx->queueHead == ctx->queueTail);
90 /* Unlock the mutex, signal a pusher, and run the job */
91 ZSTD_pthread_cond_signal(&ctx->queuePushCond);
92 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
93
94 job.function(job.opaque);
95
96 /* If the intended queue size was 0, signal after finishing job */
97 ZSTD_pthread_mutex_lock(&ctx->queueMutex);
98 ctx->numThreadsBusy--;
99 ZSTD_pthread_cond_signal(&ctx->queuePushCond);
100 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
101 }
102 } /* for (;;) */
103 assert(0); /* Unreachable */
104}
105
106/* ZSTD_createThreadPool() : public access point */
107POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
108 return POOL_create (numThreads, 0);
109}
110
111POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
112 return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
113}
114
115POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
116 ZSTD_customMem customMem)
117{
118 POOL_ctx* ctx;
119 /* Check parameters */
120 if (!numThreads) { return NULL; }
121 /* Allocate the context and zero initialize */
122 ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
123 if (!ctx) { return NULL; }
124 /* Initialize the job queue.
125 * It needs one extra space since one space is wasted to differentiate
126 * empty and full queues.
127 */
128 ctx->queueSize = queueSize + 1;
129 ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem);
130 ctx->queueHead = 0;
131 ctx->queueTail = 0;
132 ctx->numThreadsBusy = 0;
133 ctx->queueEmpty = 1;
134 {
135 int error = 0;
136 error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
137 error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
138 error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
139 if (error) { POOL_free(ctx); return NULL; }
140 }
141 ctx->shutdown = 0;
142 /* Allocate space for the thread handles */
143 ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
144 ctx->threadCapacity = 0;
145 ctx->customMem = customMem;
146 /* Check for errors */
147 if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
148 /* Initialize the threads */
149 { size_t i;
150 for (i = 0; i < numThreads; ++i) {
151 if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
152 ctx->threadCapacity = i;
153 POOL_free(ctx);
154 return NULL;
155 } }
156 ctx->threadCapacity = numThreads;
157 ctx->threadLimit = numThreads;
158 }
159 return ctx;
160}
161
162/*! POOL_join() :
163 Shutdown the queue, wake any sleeping threads, and join all of the threads.
164*/
165static void POOL_join(POOL_ctx* ctx) {
166 /* Shut down the queue */
167 ZSTD_pthread_mutex_lock(&ctx->queueMutex);
168 ctx->shutdown = 1;
169 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
170 /* Wake up sleeping threads */
171 ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
172 ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
173 /* Join all of the threads */
174 { size_t i;
175 for (i = 0; i < ctx->threadCapacity; ++i) {
176 ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */
177 } }
178}
179
180void POOL_free(POOL_ctx *ctx) {
181 if (!ctx) { return; }
182 POOL_join(ctx);
183 ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
184 ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
185 ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
186 ZSTD_customFree(ctx->queue, ctx->customMem);
187 ZSTD_customFree(ctx->threads, ctx->customMem);
188 ZSTD_customFree(ctx, ctx->customMem);
189}
190
191/*! POOL_joinJobs() :
192 * Waits for all queued jobs to finish executing.
193 */
194void POOL_joinJobs(POOL_ctx* ctx) {
195 ZSTD_pthread_mutex_lock(&ctx->queueMutex);
196 while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) {
197 ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
198 }
199 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
200}
201
202void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
203 POOL_free (pool);
204}
205
206size_t POOL_sizeof(const POOL_ctx* ctx) {
207 if (ctx==NULL) return 0; /* supports sizeof NULL */
208 return sizeof(*ctx)
209 + ctx->queueSize * sizeof(POOL_job)
210 + ctx->threadCapacity * sizeof(ZSTD_pthread_t);
211}
212
213
214/* @return : 0 on success, 1 on error */
215static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
216{
217 if (numThreads <= ctx->threadCapacity) {
218 if (!numThreads) return 1;
219 ctx->threadLimit = numThreads;
220 return 0;
221 }
222 /* numThreads > threadCapacity */
223 { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
224 if (!threadPool) return 1;
225 /* replace existing thread pool */
226 ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
227 ZSTD_customFree(ctx->threads, ctx->customMem);
228 ctx->threads = threadPool;
229 /* Initialize additional threads */
230 { size_t threadId;
231 for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
232 if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
233 ctx->threadCapacity = threadId;
234 return 1;
235 } }
236 } }
237 /* successfully expanded */
238 ctx->threadCapacity = numThreads;
239 ctx->threadLimit = numThreads;
240 return 0;
241}
242
243/* @return : 0 on success, 1 on error */
244int POOL_resize(POOL_ctx* ctx, size_t numThreads)
245{
246 int result;
247 if (ctx==NULL) return 1;
248 ZSTD_pthread_mutex_lock(&ctx->queueMutex);
249 result = POOL_resize_internal(ctx, numThreads);
250 ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
251 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
252 return result;
253}
254
255/**
256 * Returns 1 if the queue is full and 0 otherwise.
257 *
258 * When queueSize is 1 (pool was created with an intended queueSize of 0),
259 * then a queue is empty if there is a thread free _and_ no job is waiting.
260 */
261static int isQueueFull(POOL_ctx const* ctx) {
262 if (ctx->queueSize > 1) {
263 return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
264 } else {
265 return (ctx->numThreadsBusy == ctx->threadLimit) ||
266 !ctx->queueEmpty;
267 }
268}
269
270
271static void
272POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
273{
274 POOL_job job;
275 job.function = function;
276 job.opaque = opaque;
277 assert(ctx != NULL);
278 if (ctx->shutdown) return;
279
280 ctx->queueEmpty = 0;
281 ctx->queue[ctx->queueTail] = job;
282 ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
283 ZSTD_pthread_cond_signal(&ctx->queuePopCond);
284}
285
286void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
287{
288 assert(ctx != NULL);
289 ZSTD_pthread_mutex_lock(&ctx->queueMutex);
290 /* Wait until there is space in the queue for the new job */
291 while (isQueueFull(ctx) && (!ctx->shutdown)) {
292 ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
293 }
294 POOL_add_internal(ctx, function, opaque);
295 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
296}
297
298
299int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
300{
301 assert(ctx != NULL);
302 ZSTD_pthread_mutex_lock(&ctx->queueMutex);
303 if (isQueueFull(ctx)) {
304 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
305 return 0;
306 }
307 POOL_add_internal(ctx, function, opaque);
308 ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
309 return 1;
310}
311
312
313#else /* ZSTD_MULTITHREAD not defined */
314
315/* ========================== */
316/* No multi-threading support */
317/* ========================== */
318
319
320/* We don't need any data, but if it is empty, malloc() might return NULL. */
321struct POOL_ctx_s {
322 int dummy;
323};
324static POOL_ctx g_poolCtx;
325
326POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
327 return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
328}
329
330POOL_ctx*
331POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem)
332{
333 (void)numThreads;
334 (void)queueSize;
335 (void)customMem;
336 return &g_poolCtx;
337}
338
339void POOL_free(POOL_ctx* ctx) {
340 assert(!ctx || ctx == &g_poolCtx);
341 (void)ctx;
342}
343
344void POOL_joinJobs(POOL_ctx* ctx){
345 assert(!ctx || ctx == &g_poolCtx);
346 (void)ctx;
347}
348
349int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
350 (void)ctx; (void)numThreads;
351 return 0;
352}
353
354void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
355 (void)ctx;
356 function(opaque);
357}
358
359int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
360 (void)ctx;
361 function(opaque);
362 return 1;
363}
364
365size_t POOL_sizeof(const POOL_ctx* ctx) {
366 if (ctx==NULL) return 0; /* supports sizeof NULL */
367 assert(ctx == &g_poolCtx);
368 return sizeof(*ctx);
369}
370
371#endif /* ZSTD_MULTITHREAD */