git subrepo pull (merge) --force deps/libchdr
[pcsx_rearmed.git] / deps / libchdr / deps / zstd-1.5.6 / lib / compress / zstdmt_compress.c
CommitLineData
648db22b 1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11
12/* ====== Compiler specifics ====== */
13#if defined(_MSC_VER)
14# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
15#endif
16
17
648db22b 18/* ====== Dependencies ====== */
f535537f 19#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
648db22b 20#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
21#include "../common/mem.h" /* MEM_STATIC */
22#include "../common/pool.h" /* threadpool */
23#include "../common/threading.h" /* mutex */
f535537f 24#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
648db22b 25#include "zstd_ldm.h"
26#include "zstdmt_compress.h"
27
28/* Guards code to support resizing the SeqPool.
29 * We will want to resize the SeqPool to save memory in the future.
30 * Until then, comment the code out since it is unused.
31 */
32#define ZSTD_RESIZE_SEQPOOL 0
33
34/* ====== Debug ====== */
35#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
36 && !defined(_MSC_VER) \
37 && !defined(__MINGW32__)
38
39# include <stdio.h>
40# include <unistd.h>
41# include <sys/times.h>
42
f535537f 43# define DEBUG_PRINTHEX(l,p,n) \
44 do { \
45 unsigned debug_u; \
46 for (debug_u=0; debug_u<(n); debug_u++) \
47 RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
48 RAWLOG(l, " \n"); \
49 } while (0)
648db22b 50
51static unsigned long long GetCurrentClockTimeMicroseconds(void)
52{
53 static clock_t _ticksPerSecond = 0;
54 if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
55
56 { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
57 return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
58} }
59
60#define MUTEX_WAIT_TIME_DLEVEL 6
f535537f 61#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) \
62 do { \
63 if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \
64 unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
65 ZSTD_pthread_mutex_lock(mutex); \
66 { unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
67 unsigned long long const elapsedTime = (afterTime-beforeTime); \
68 if (elapsedTime > 1000) { \
69 /* or whatever threshold you like; I'm using 1 millisecond here */ \
70 DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, \
71 "Thread took %llu microseconds to acquire mutex %s \n", \
72 elapsedTime, #mutex); \
73 } } \
74 } else { \
75 ZSTD_pthread_mutex_lock(mutex); \
76 } \
77 } while (0)
648db22b 78
79#else
80
81# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
f535537f 82# define DEBUG_PRINTHEX(l,p,n) do { } while (0)
648db22b 83
84#endif
85
86
87/* ===== Buffer Pool ===== */
88/* a single Buffer Pool can be invoked from multiple threads in parallel */
89
90typedef struct buffer_s {
91 void* start;
92 size_t capacity;
93} buffer_t;
94
95static const buffer_t g_nullBuffer = { NULL, 0 };
96
97typedef struct ZSTDMT_bufferPool_s {
98 ZSTD_pthread_mutex_t poolMutex;
99 size_t bufferSize;
100 unsigned totalBuffers;
101 unsigned nbBuffers;
102 ZSTD_customMem cMem;
f535537f 103 buffer_t* buffers;
648db22b 104} ZSTDMT_bufferPool;
105
f535537f 106static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
107{
108 DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
109 if (!bufPool) return; /* compatibility with free on NULL */
110 if (bufPool->buffers) {
111 unsigned u;
112 for (u=0; u<bufPool->totalBuffers; u++) {
113 DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->buffers[u].start);
114 ZSTD_customFree(bufPool->buffers[u].start, bufPool->cMem);
115 }
116 ZSTD_customFree(bufPool->buffers, bufPool->cMem);
117 }
118 ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
119 ZSTD_customFree(bufPool, bufPool->cMem);
120}
121
648db22b 122static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem)
123{
f535537f 124 ZSTDMT_bufferPool* const bufPool =
125 (ZSTDMT_bufferPool*)ZSTD_customCalloc(sizeof(ZSTDMT_bufferPool), cMem);
648db22b 126 if (bufPool==NULL) return NULL;
127 if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
128 ZSTD_customFree(bufPool, cMem);
129 return NULL;
130 }
f535537f 131 bufPool->buffers = (buffer_t*)ZSTD_customCalloc(maxNbBuffers * sizeof(buffer_t), cMem);
132 if (bufPool->buffers==NULL) {
133 ZSTDMT_freeBufferPool(bufPool);
134 return NULL;
135 }
648db22b 136 bufPool->bufferSize = 64 KB;
137 bufPool->totalBuffers = maxNbBuffers;
138 bufPool->nbBuffers = 0;
139 bufPool->cMem = cMem;
140 return bufPool;
141}
142
648db22b 143/* only works at initialization, not during compression */
144static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
145{
f535537f 146 size_t const poolSize = sizeof(*bufPool);
147 size_t const arraySize = bufPool->totalBuffers * sizeof(buffer_t);
648db22b 148 unsigned u;
149 size_t totalBufferSize = 0;
150 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
151 for (u=0; u<bufPool->totalBuffers; u++)
f535537f 152 totalBufferSize += bufPool->buffers[u].capacity;
648db22b 153 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
154
f535537f 155 return poolSize + arraySize + totalBufferSize;
648db22b 156}
157
158/* ZSTDMT_setBufferSize() :
159 * all future buffers provided by this buffer pool will have _at least_ this size
160 * note : it's better for all buffers to have same size,
161 * as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
162static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
163{
164 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
165 DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
166 bufPool->bufferSize = bSize;
167 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
168}
169
170
171static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers)
172{
173 if (srcBufPool==NULL) return NULL;
174 if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
175 return srcBufPool;
176 /* need a larger buffer pool */
177 { ZSTD_customMem const cMem = srcBufPool->cMem;
178 size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
179 ZSTDMT_bufferPool* newBufPool;
180 ZSTDMT_freeBufferPool(srcBufPool);
181 newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem);
182 if (newBufPool==NULL) return newBufPool;
183 ZSTDMT_setBufferSize(newBufPool, bSize);
184 return newBufPool;
185 }
186}
187
188/** ZSTDMT_getBuffer() :
189 * assumption : bufPool must be valid
190 * @return : a buffer, with start pointer and size
191 * note: allocation may fail, in this case, start==NULL and size==0 */
192static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
193{
194 size_t const bSize = bufPool->bufferSize;
195 DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
196 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
197 if (bufPool->nbBuffers) { /* try to use an existing buffer */
f535537f 198 buffer_t const buf = bufPool->buffers[--(bufPool->nbBuffers)];
648db22b 199 size_t const availBufferSize = buf.capacity;
f535537f 200 bufPool->buffers[bufPool->nbBuffers] = g_nullBuffer;
648db22b 201 if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
202 /* large enough, but not too much */
203 DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
204 bufPool->nbBuffers, (U32)buf.capacity);
205 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
206 return buf;
207 }
208 /* size conditions not respected : scratch this buffer, create new one */
209 DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
210 ZSTD_customFree(buf.start, bufPool->cMem);
211 }
212 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
213 /* create new buffer */
214 DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
215 { buffer_t buffer;
216 void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
217 buffer.start = start; /* note : start can be NULL if malloc fails ! */
218 buffer.capacity = (start==NULL) ? 0 : bSize;
219 if (start==NULL) {
220 DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
221 } else {
222 DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
223 }
224 return buffer;
225 }
226}
227
228#if ZSTD_RESIZE_SEQPOOL
229/** ZSTDMT_resizeBuffer() :
230 * assumption : bufPool must be valid
231 * @return : a buffer that is at least the buffer pool buffer size.
232 * If a reallocation happens, the data in the input buffer is copied.
233 */
234static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
235{
236 size_t const bSize = bufPool->bufferSize;
237 if (buffer.capacity < bSize) {
238 void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
239 buffer_t newBuffer;
240 newBuffer.start = start;
241 newBuffer.capacity = start == NULL ? 0 : bSize;
242 if (start != NULL) {
243 assert(newBuffer.capacity >= buffer.capacity);
244 ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity);
245 DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
246 return newBuffer;
247 }
248 DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
249 }
250 return buffer;
251}
252#endif
253
254/* store buffer for later re-use, up to pool capacity */
255static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
256{
257 DEBUGLOG(5, "ZSTDMT_releaseBuffer");
258 if (buf.start == NULL) return; /* compatible with release on NULL */
259 ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
260 if (bufPool->nbBuffers < bufPool->totalBuffers) {
f535537f 261 bufPool->buffers[bufPool->nbBuffers++] = buf; /* stored for later use */
648db22b 262 DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
263 (U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
264 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
265 return;
266 }
267 ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
f535537f 268 /* Reached bufferPool capacity (note: should not happen) */
648db22b 269 DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
270 ZSTD_customFree(buf.start, bufPool->cMem);
271}
272
273/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released.
274 * The 3 additional buffers are as follows:
275 * 1 buffer for input loading
276 * 1 buffer for "next input" when submitting current one
277 * 1 buffer stuck in queue */
278#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3)
279
280/* After a worker releases its rawSeqStore, it is immediately ready for reuse.
281 * So we only need one seq buffer per worker. */
282#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) (nbWorkers)
283
284/* ===== Seq Pool Wrapper ====== */
285
286typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
287
288static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
289{
290 return ZSTDMT_sizeof_bufferPool(seqPool);
291}
292
293static rawSeqStore_t bufferToSeq(buffer_t buffer)
294{
295 rawSeqStore_t seq = kNullRawSeqStore;
296 seq.seq = (rawSeq*)buffer.start;
297 seq.capacity = buffer.capacity / sizeof(rawSeq);
298 return seq;
299}
300
301static buffer_t seqToBuffer(rawSeqStore_t seq)
302{
303 buffer_t buffer;
304 buffer.start = seq.seq;
305 buffer.capacity = seq.capacity * sizeof(rawSeq);
306 return buffer;
307}
308
309static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
310{
311 if (seqPool->bufferSize == 0) {
312 return kNullRawSeqStore;
313 }
314 return bufferToSeq(ZSTDMT_getBuffer(seqPool));
315}
316
317#if ZSTD_RESIZE_SEQPOOL
318static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
319{
320 return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
321}
322#endif
323
324static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
325{
326 ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
327}
328
329static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
330{
331 ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
332}
333
334static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
335{
336 ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
337 if (seqPool == NULL) return NULL;
338 ZSTDMT_setNbSeq(seqPool, 0);
339 return seqPool;
340}
341
342static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
343{
344 ZSTDMT_freeBufferPool(seqPool);
345}
346
347static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
348{
349 return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers));
350}
351
352
353/* ===== CCtx Pool ===== */
354/* a single CCtx Pool can be invoked from multiple threads in parallel */
355
356typedef struct {
357 ZSTD_pthread_mutex_t poolMutex;
358 int totalCCtx;
359 int availCCtx;
360 ZSTD_customMem cMem;
f535537f 361 ZSTD_CCtx** cctxs;
648db22b 362} ZSTDMT_CCtxPool;
363
f535537f 364/* note : all CCtx borrowed from the pool must be reverted back to the pool _before_ freeing the pool */
648db22b 365static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
366{
f535537f 367 if (!pool) return;
648db22b 368 ZSTD_pthread_mutex_destroy(&pool->poolMutex);
f535537f 369 if (pool->cctxs) {
370 int cid;
371 for (cid=0; cid<pool->totalCCtx; cid++)
372 ZSTD_freeCCtx(pool->cctxs[cid]); /* free compatible with NULL */
373 ZSTD_customFree(pool->cctxs, pool->cMem);
374 }
648db22b 375 ZSTD_customFree(pool, pool->cMem);
376}
377
378/* ZSTDMT_createCCtxPool() :
379 * implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
380static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
381 ZSTD_customMem cMem)
382{
f535537f 383 ZSTDMT_CCtxPool* const cctxPool =
384 (ZSTDMT_CCtxPool*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtxPool), cMem);
648db22b 385 assert(nbWorkers > 0);
386 if (!cctxPool) return NULL;
387 if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
388 ZSTD_customFree(cctxPool, cMem);
389 return NULL;
390 }
648db22b 391 cctxPool->totalCCtx = nbWorkers;
f535537f 392 cctxPool->cctxs = (ZSTD_CCtx**)ZSTD_customCalloc(nbWorkers * sizeof(ZSTD_CCtx*), cMem);
393 if (!cctxPool->cctxs) {
394 ZSTDMT_freeCCtxPool(cctxPool);
395 return NULL;
396 }
397 cctxPool->cMem = cMem;
398 cctxPool->cctxs[0] = ZSTD_createCCtx_advanced(cMem);
399 if (!cctxPool->cctxs[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
648db22b 400 cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
648db22b 401 DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
402 return cctxPool;
403}
404
405static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
406 int nbWorkers)
407{
408 if (srcPool==NULL) return NULL;
409 if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */
410 /* need a larger cctx pool */
411 { ZSTD_customMem const cMem = srcPool->cMem;
412 ZSTDMT_freeCCtxPool(srcPool);
413 return ZSTDMT_createCCtxPool(nbWorkers, cMem);
414 }
415}
416
417/* only works during initialization phase, not during compression */
418static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
419{
420 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
421 { unsigned const nbWorkers = cctxPool->totalCCtx;
f535537f 422 size_t const poolSize = sizeof(*cctxPool);
423 size_t const arraySize = cctxPool->totalCCtx * sizeof(ZSTD_CCtx*);
648db22b 424 size_t totalCCtxSize = 0;
f535537f 425 unsigned u;
648db22b 426 for (u=0; u<nbWorkers; u++) {
f535537f 427 totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctxs[u]);
648db22b 428 }
429 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
430 assert(nbWorkers > 0);
f535537f 431 return poolSize + arraySize + totalCCtxSize;
648db22b 432 }
433}
434
435static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
436{
437 DEBUGLOG(5, "ZSTDMT_getCCtx");
438 ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
439 if (cctxPool->availCCtx) {
440 cctxPool->availCCtx--;
f535537f 441 { ZSTD_CCtx* const cctx = cctxPool->cctxs[cctxPool->availCCtx];
648db22b 442 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
443 return cctx;
444 } }
445 ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
446 DEBUGLOG(5, "create one more CCtx");
447 return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
448}
449
450static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
451{
452 if (cctx==NULL) return; /* compatibility with release on NULL */
453 ZSTD_pthread_mutex_lock(&pool->poolMutex);
454 if (pool->availCCtx < pool->totalCCtx)
f535537f 455 pool->cctxs[pool->availCCtx++] = cctx;
648db22b 456 else {
457 /* pool overflow : should not happen, since totalCCtx==nbWorkers */
458 DEBUGLOG(4, "CCtx pool overflow : free cctx");
459 ZSTD_freeCCtx(cctx);
460 }
461 ZSTD_pthread_mutex_unlock(&pool->poolMutex);
462}
463
464/* ==== Serial State ==== */
465
466typedef struct {
467 void const* start;
468 size_t size;
469} range_t;
470
471typedef struct {
472 /* All variables in the struct are protected by mutex. */
473 ZSTD_pthread_mutex_t mutex;
474 ZSTD_pthread_cond_t cond;
475 ZSTD_CCtx_params params;
476 ldmState_t ldmState;
477 XXH64_state_t xxhState;
478 unsigned nextJobID;
479 /* Protects ldmWindow.
480 * Must be acquired after the main mutex when acquiring both.
481 */
482 ZSTD_pthread_mutex_t ldmWindowMutex;
483 ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
484 ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
485} serialState_t;
486
487static int
488ZSTDMT_serialState_reset(serialState_t* serialState,
489 ZSTDMT_seqPool* seqPool,
490 ZSTD_CCtx_params params,
491 size_t jobSize,
492 const void* dict, size_t const dictSize,
493 ZSTD_dictContentType_e dictContentType)
494{
495 /* Adjust parameters */
496 if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
497 DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
498 ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
499 assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
500 assert(params.ldmParams.hashRateLog < 32);
501 } else {
502 ZSTD_memset(&params.ldmParams, 0, sizeof(params.ldmParams));
503 }
504 serialState->nextJobID = 0;
505 if (params.fParams.checksumFlag)
506 XXH64_reset(&serialState->xxhState, 0);
507 if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
508 ZSTD_customMem cMem = params.customMem;
509 unsigned const hashLog = params.ldmParams.hashLog;
510 size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
511 unsigned const bucketLog =
512 params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
513 unsigned const prevBucketLog =
514 serialState->params.ldmParams.hashLog -
515 serialState->params.ldmParams.bucketSizeLog;
516 size_t const numBuckets = (size_t)1 << bucketLog;
517 /* Size the seq pool tables */
518 ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
519 /* Reset the window */
520 ZSTD_window_init(&serialState->ldmState.window);
521 /* Resize tables and output space if necessary. */
522 if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
523 ZSTD_customFree(serialState->ldmState.hashTable, cMem);
524 serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem);
525 }
526 if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
527 ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
528 serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem);
529 }
530 if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
531 return 1;
532 /* Zero the tables */
533 ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
534 ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets);
535
536 /* Update window state and fill hash table with dict */
537 serialState->ldmState.loadedDictEnd = 0;
538 if (dictSize > 0) {
539 if (dictContentType == ZSTD_dct_rawContent) {
540 BYTE const* const dictEnd = (const BYTE*)dict + dictSize;
541 ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0);
542 ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, &params.ldmParams);
543 serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base);
544 } else {
545 /* don't even load anything */
546 }
547 }
548
549 /* Initialize serialState's copy of ldmWindow. */
550 serialState->ldmWindow = serialState->ldmState.window;
551 }
552
553 serialState->params = params;
554 serialState->params.jobSize = (U32)jobSize;
555 return 0;
556}
557
558static int ZSTDMT_serialState_init(serialState_t* serialState)
559{
560 int initError = 0;
561 ZSTD_memset(serialState, 0, sizeof(*serialState));
562 initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
563 initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
564 initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
565 initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
566 return initError;
567}
568
569static void ZSTDMT_serialState_free(serialState_t* serialState)
570{
571 ZSTD_customMem cMem = serialState->params.customMem;
572 ZSTD_pthread_mutex_destroy(&serialState->mutex);
573 ZSTD_pthread_cond_destroy(&serialState->cond);
574 ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
575 ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
576 ZSTD_customFree(serialState->ldmState.hashTable, cMem);
577 ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
578}
579
580static void ZSTDMT_serialState_update(serialState_t* serialState,
581 ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
582 range_t src, unsigned jobID)
583{
584 /* Wait for our turn */
585 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
586 while (serialState->nextJobID < jobID) {
587 DEBUGLOG(5, "wait for serialState->cond");
588 ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
589 }
590 /* A future job may error and skip our job */
591 if (serialState->nextJobID == jobID) {
592 /* It is now our turn, do any processing necessary */
593 if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) {
594 size_t error;
595 assert(seqStore.seq != NULL && seqStore.pos == 0 &&
596 seqStore.size == 0 && seqStore.capacity > 0);
597 assert(src.size <= serialState->params.jobSize);
598 ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0);
599 error = ZSTD_ldm_generateSequences(
600 &serialState->ldmState, &seqStore,
601 &serialState->params.ldmParams, src.start, src.size);
602 /* We provide a large enough buffer to never fail. */
603 assert(!ZSTD_isError(error)); (void)error;
604 /* Update ldmWindow to match the ldmState.window and signal the main
605 * thread if it is waiting for a buffer.
606 */
607 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
608 serialState->ldmWindow = serialState->ldmState.window;
609 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
610 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
611 }
612 if (serialState->params.fParams.checksumFlag && src.size > 0)
613 XXH64_update(&serialState->xxhState, src.start, src.size);
614 }
615 /* Now it is the next jobs turn */
616 serialState->nextJobID++;
617 ZSTD_pthread_cond_broadcast(&serialState->cond);
618 ZSTD_pthread_mutex_unlock(&serialState->mutex);
619
620 if (seqStore.size > 0) {
f535537f 621 ZSTD_referenceExternalSequences(jobCCtx, seqStore.seq, seqStore.size);
648db22b 622 assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable);
648db22b 623 }
624}
625
626static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
627 unsigned jobID, size_t cSize)
628{
629 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
630 if (serialState->nextJobID <= jobID) {
631 assert(ZSTD_isError(cSize)); (void)cSize;
632 DEBUGLOG(5, "Skipping past job %u because of error", jobID);
633 serialState->nextJobID = jobID + 1;
634 ZSTD_pthread_cond_broadcast(&serialState->cond);
635
636 ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
637 ZSTD_window_clear(&serialState->ldmWindow);
638 ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
639 ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
640 }
641 ZSTD_pthread_mutex_unlock(&serialState->mutex);
642
643}
644
645
646/* ------------------------------------------ */
647/* ===== Worker thread ===== */
648/* ------------------------------------------ */
649
650static const range_t kNullRange = { NULL, 0 };
651
652typedef struct {
653 size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
654 size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
655 ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
656 ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
657 ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
658 ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
659 ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
660 serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
661 buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
662 range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
663 range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
664 unsigned jobID; /* set by mtctx, then read by worker => no barrier */
665 unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
666 unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
667 ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
668 const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
669 unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
670 size_t dstFlushed; /* used only by mtctx */
671 unsigned frameChecksumNeeded; /* used only by mtctx */
672} ZSTDMT_jobDescription;
673
f535537f 674#define JOB_ERROR(e) \
675 do { \
676 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
677 job->cSize = e; \
678 ZSTD_pthread_mutex_unlock(&job->job_mutex); \
679 goto _endJob; \
680 } while (0)
648db22b 681
682/* ZSTDMT_compressionJob() is a POOL_function type */
683static void ZSTDMT_compressionJob(void* jobDescription)
684{
685 ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
686 ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
687 ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
688 rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
689 buffer_t dstBuff = job->dstBuff;
690 size_t lastCBlockSize = 0;
691
692 /* resources */
693 if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
694 if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
695 dstBuff = ZSTDMT_getBuffer(job->bufPool);
696 if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
697 job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
698 }
699 if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL)
700 JOB_ERROR(ERROR(memory_allocation));
701
702 /* Don't compute the checksum for chunks, since we compute it externally,
703 * but write it in the header.
704 */
705 if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
706 /* Don't run LDM for the chunks, since we handle it externally */
707 jobParams.ldmParams.enableLdm = ZSTD_ps_disable;
708 /* Correct nbWorkers to 0. */
709 jobParams.nbWorkers = 0;
710
711
712 /* init */
713 if (job->cdict) {
714 size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
715 assert(job->firstJob); /* only allowed for first job */
716 if (ZSTD_isError(initError)) JOB_ERROR(initError);
717 } else { /* srcStart points at reloaded section */
718 U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
719 { size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
720 if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
721 }
722 if (!job->firstJob) {
723 size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0);
724 if (ZSTD_isError(err)) JOB_ERROR(err);
725 }
726 { size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
727 job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
728 ZSTD_dtlm_fast,
729 NULL, /*cdict*/
730 &jobParams, pledgedSrcSize);
731 if (ZSTD_isError(initError)) JOB_ERROR(initError);
732 } }
733
734 /* Perform serial step as early as possible, but after CCtx initialization */
735 ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
736
737 if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
738 size_t const hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
739 if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
740 DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
741 ZSTD_invalidateRepCodes(cctx);
742 }
743
744 /* compress */
745 { size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
746 int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
747 const BYTE* ip = (const BYTE*) job->src.start;
748 BYTE* const ostart = (BYTE*)dstBuff.start;
749 BYTE* op = ostart;
750 BYTE* oend = op + dstBuff.capacity;
751 int chunkNb;
752 if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */
753 DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
754 assert(job->cSize == 0);
755 for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
756 size_t const cSize = ZSTD_compressContinue_public(cctx, op, oend-op, ip, chunkSize);
757 if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
758 ip += chunkSize;
759 op += cSize; assert(op < oend);
760 /* stats */
761 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
762 job->cSize += cSize;
763 job->consumed = chunkSize * chunkNb;
764 DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
765 (U32)cSize, (U32)job->cSize);
766 ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */
767 ZSTD_pthread_mutex_unlock(&job->job_mutex);
768 }
769 /* last block */
770 assert(chunkSize > 0);
771 assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
772 if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
773 size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
774 size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
775 size_t const cSize = (job->lastJob) ?
776 ZSTD_compressEnd_public(cctx, op, oend-op, ip, lastBlockSize) :
777 ZSTD_compressContinue_public(cctx, op, oend-op, ip, lastBlockSize);
778 if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
779 lastCBlockSize = cSize;
780 } }
781 if (!job->firstJob) {
782 /* Double check that we don't have an ext-dict, because then our
783 * repcode invalidation doesn't work.
784 */
785 assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
786 }
787 ZSTD_CCtx_trace(cctx, 0);
788
789_endJob:
790 ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
791 if (job->prefix.size > 0)
792 DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
793 DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
794 /* release resources */
795 ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
796 ZSTDMT_releaseCCtx(job->cctxPool, cctx);
797 /* report */
798 ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
799 if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
800 job->cSize += lastCBlockSize;
801 job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */
802 ZSTD_pthread_cond_signal(&job->job_cond);
803 ZSTD_pthread_mutex_unlock(&job->job_mutex);
804}
805
806
807/* ------------------------------------------ */
808/* ===== Multi-threaded compression ===== */
809/* ------------------------------------------ */
810
811typedef struct {
812 range_t prefix; /* read-only non-owned prefix buffer */
813 buffer_t buffer;
814 size_t filled;
815} inBuff_t;
816
817typedef struct {
818 BYTE* buffer; /* The round input buffer. All jobs get references
819 * to pieces of the buffer. ZSTDMT_tryGetInputRange()
820 * handles handing out job input buffers, and makes
821 * sure it doesn't overlap with any pieces still in use.
822 */
823 size_t capacity; /* The capacity of buffer. */
824 size_t pos; /* The position of the current inBuff in the round
825 * buffer. Updated past the end if the inBuff once
826 * the inBuff is sent to the worker thread.
827 * pos <= capacity.
828 */
829} roundBuff_t;
830
831static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
832
833#define RSYNC_LENGTH 32
834/* Don't create chunks smaller than the zstd block size.
835 * This stops us from regressing compression ratio too much,
836 * and ensures our output fits in ZSTD_compressBound().
837 *
838 * If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then
839 * ZSTD_COMPRESSBOUND() will need to be updated.
840 */
841#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX
842#define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG)
843
844typedef struct {
845 U64 hash;
846 U64 hitMask;
847 U64 primePower;
848} rsyncState_t;
849
850struct ZSTDMT_CCtx_s {
851 POOL_ctx* factory;
852 ZSTDMT_jobDescription* jobs;
853 ZSTDMT_bufferPool* bufPool;
854 ZSTDMT_CCtxPool* cctxPool;
855 ZSTDMT_seqPool* seqPool;
856 ZSTD_CCtx_params params;
857 size_t targetSectionSize;
858 size_t targetPrefixSize;
859 int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
860 inBuff_t inBuff;
861 roundBuff_t roundBuff;
862 serialState_t serial;
863 rsyncState_t rsync;
864 unsigned jobIDMask;
865 unsigned doneJobID;
866 unsigned nextJobID;
867 unsigned frameEnded;
868 unsigned allJobsCompleted;
869 unsigned long long frameContentSize;
870 unsigned long long consumed;
871 unsigned long long produced;
872 ZSTD_customMem cMem;
873 ZSTD_CDict* cdictLocal;
874 const ZSTD_CDict* cdict;
875 unsigned providedFactory: 1;
876};
877
878static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
879{
880 U32 jobNb;
881 if (jobTable == NULL) return;
882 for (jobNb=0; jobNb<nbJobs; jobNb++) {
883 ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
884 ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
885 }
886 ZSTD_customFree(jobTable, cMem);
887}
888
889/* ZSTDMT_allocJobsTable()
890 * allocate and init a job table.
891 * update *nbJobsPtr to next power of 2 value, as size of table */
892static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
893{
894 U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
895 U32 const nbJobs = 1 << nbJobsLog2;
896 U32 jobNb;
897 ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
898 ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
899 int initError = 0;
900 if (jobTable==NULL) return NULL;
901 *nbJobsPtr = nbJobs;
902 for (jobNb=0; jobNb<nbJobs; jobNb++) {
903 initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
904 initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
905 }
906 if (initError != 0) {
907 ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
908 return NULL;
909 }
910 return jobTable;
911}
912
913static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
914 U32 nbJobs = nbWorkers + 2;
915 if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */
916 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
917 mtctx->jobIDMask = 0;
918 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
919 if (mtctx->jobs==NULL) return ERROR(memory_allocation);
920 assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */
921 mtctx->jobIDMask = nbJobs - 1;
922 }
923 return 0;
924}
925
926
927/* ZSTDMT_CCtxParam_setNbWorkers():
928 * Internal use only */
929static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
930{
931 return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
932}
933
934MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
935{
936 ZSTDMT_CCtx* mtctx;
937 U32 nbJobs = nbWorkers + 2;
938 int initError;
939 DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
940
941 if (nbWorkers < 1) return NULL;
942 nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
943 if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
944 /* invalid custom allocator */
945 return NULL;
946
947 mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem);
948 if (!mtctx) return NULL;
949 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
950 mtctx->cMem = cMem;
951 mtctx->allJobsCompleted = 1;
952 if (pool != NULL) {
953 mtctx->factory = pool;
954 mtctx->providedFactory = 1;
955 }
956 else {
957 mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
958 mtctx->providedFactory = 0;
959 }
960 mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
961 assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
962 mtctx->jobIDMask = nbJobs - 1;
963 mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
964 mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
965 mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
966 initError = ZSTDMT_serialState_init(&mtctx->serial);
967 mtctx->roundBuff = kNullRoundBuff;
968 if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
969 ZSTDMT_freeCCtx(mtctx);
970 return NULL;
971 }
972 DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
973 return mtctx;
974}
975
976ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
977{
978#ifdef ZSTD_MULTITHREAD
979 return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool);
980#else
981 (void)nbWorkers;
982 (void)cMem;
983 (void)pool;
984 return NULL;
985#endif
986}
987
988
989/* ZSTDMT_releaseAllJobResources() :
990 * note : ensure all workers are killed first ! */
991static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
992{
993 unsigned jobID;
994 DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
995 for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
996 /* Copy the mutex/cond out */
997 ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
998 ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
999
1000 DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
1001 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
1002
1003 /* Clear the job description, but keep the mutex/cond */
1004 ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
1005 mtctx->jobs[jobID].job_mutex = mutex;
1006 mtctx->jobs[jobID].job_cond = cond;
1007 }
1008 mtctx->inBuff.buffer = g_nullBuffer;
1009 mtctx->inBuff.filled = 0;
1010 mtctx->allJobsCompleted = 1;
1011}
1012
1013static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
1014{
1015 DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
1016 while (mtctx->doneJobID < mtctx->nextJobID) {
1017 unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
1018 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
1019 while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
1020 DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
1021 ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
1022 }
1023 ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
1024 mtctx->doneJobID++;
1025 }
1026}
1027
1028size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
1029{
1030 if (mtctx==NULL) return 0; /* compatible with free on NULL */
1031 if (!mtctx->providedFactory)
1032 POOL_free(mtctx->factory); /* stop and free worker threads */
1033 ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
1034 ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
1035 ZSTDMT_freeBufferPool(mtctx->bufPool);
1036 ZSTDMT_freeCCtxPool(mtctx->cctxPool);
1037 ZSTDMT_freeSeqPool(mtctx->seqPool);
1038 ZSTDMT_serialState_free(&mtctx->serial);
1039 ZSTD_freeCDict(mtctx->cdictLocal);
1040 if (mtctx->roundBuff.buffer)
1041 ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
1042 ZSTD_customFree(mtctx, mtctx->cMem);
1043 return 0;
1044}
1045
1046size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
1047{
1048 if (mtctx == NULL) return 0; /* supports sizeof NULL */
1049 return sizeof(*mtctx)
1050 + POOL_sizeof(mtctx->factory)
1051 + ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
1052 + (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
1053 + ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
1054 + ZSTDMT_sizeof_seqPool(mtctx->seqPool)
1055 + ZSTD_sizeof_CDict(mtctx->cdictLocal)
1056 + mtctx->roundBuff.capacity;
1057}
1058
1059
1060/* ZSTDMT_resize() :
1061 * @return : error code if fails, 0 on success */
1062static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
1063{
1064 if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
1065 FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , "");
1066 mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers));
1067 if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
1068 mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
1069 if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
1070 mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
1071 if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
1072 ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
1073 return 0;
1074}
1075
1076
1077/*! ZSTDMT_updateCParams_whileCompressing() :
1078 * Updates a selected set of compression parameters, remaining compatible with currently active frame.
1079 * New parameters will be applied to next compression job. */
1080void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
1081{
1082 U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */
1083 int const compressionLevel = cctxParams->compressionLevel;
1084 DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
1085 compressionLevel);
1086 mtctx->params.compressionLevel = compressionLevel;
1087 { ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
1088 cParams.windowLog = saved_wlog;
1089 mtctx->params.cParams = cParams;
1090 }
1091}
1092
1093/* ZSTDMT_getFrameProgression():
1094 * tells how much data has been consumed (input) and produced (output) for current frame.
1095 * able to count progression inside worker threads.
1096 * Note : mutex will be acquired during statistics collection inside workers. */
1097ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
1098{
1099 ZSTD_frameProgression fps;
1100 DEBUGLOG(5, "ZSTDMT_getFrameProgression");
1101 fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
1102 fps.consumed = mtctx->consumed;
1103 fps.produced = fps.flushed = mtctx->produced;
1104 fps.currentJobID = mtctx->nextJobID;
1105 fps.nbActiveWorkers = 0;
1106 { unsigned jobNb;
1107 unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
1108 DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
f535537f 1109 mtctx->doneJobID, lastJobNb, mtctx->jobReady);
648db22b 1110 for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
1111 unsigned const wJobID = jobNb & mtctx->jobIDMask;
1112 ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
1113 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1114 { size_t const cResult = jobPtr->cSize;
1115 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1116 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1117 assert(flushed <= produced);
1118 fps.ingested += jobPtr->src.size;
1119 fps.consumed += jobPtr->consumed;
1120 fps.produced += produced;
1121 fps.flushed += flushed;
1122 fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
1123 }
1124 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1125 }
1126 }
1127 return fps;
1128}
1129
1130
1131size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
1132{
1133 size_t toFlush;
1134 unsigned const jobID = mtctx->doneJobID;
1135 assert(jobID <= mtctx->nextJobID);
1136 if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */
1137
1138 /* look into oldest non-fully-flushed job */
1139 { unsigned const wJobID = jobID & mtctx->jobIDMask;
1140 ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
1141 ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1142 { size_t const cResult = jobPtr->cSize;
1143 size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1144 size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1145 assert(flushed <= produced);
1146 assert(jobPtr->consumed <= jobPtr->src.size);
1147 toFlush = produced - flushed;
1148 /* if toFlush==0, nothing is available to flush.
1149 * However, jobID is expected to still be active:
1150 * if jobID was already completed and fully flushed,
1151 * ZSTDMT_flushProduced() should have already moved onto next job.
1152 * Therefore, some input has not yet been consumed. */
1153 if (toFlush==0) {
1154 assert(jobPtr->consumed < jobPtr->src.size);
1155 }
1156 }
1157 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1158 }
1159
1160 return toFlush;
1161}
1162
1163
1164/* ------------------------------------------ */
1165/* ===== Multi-threaded compression ===== */
1166/* ------------------------------------------ */
1167
1168static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
1169{
1170 unsigned jobLog;
1171 if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
1172 /* In Long Range Mode, the windowLog is typically oversized.
1173 * In which case, it's preferable to determine the jobSize
1174 * based on cycleLog instead. */
1175 jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3);
1176 } else {
1177 jobLog = MAX(20, params->cParams.windowLog + 2);
1178 }
1179 return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
1180}
1181
1182static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
1183{
1184 switch(strat)
1185 {
1186 case ZSTD_btultra2:
1187 return 9;
1188 case ZSTD_btultra:
1189 case ZSTD_btopt:
1190 return 8;
1191 case ZSTD_btlazy2:
1192 case ZSTD_lazy2:
1193 return 7;
1194 case ZSTD_lazy:
1195 case ZSTD_greedy:
1196 case ZSTD_dfast:
1197 case ZSTD_fast:
1198 default:;
1199 }
1200 return 6;
1201}
1202
1203static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
1204{
1205 assert(0 <= ovlog && ovlog <= 9);
1206 if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
1207 return ovlog;
1208}
1209
1210static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
1211{
1212 int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
1213 int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
1214 assert(0 <= overlapRLog && overlapRLog <= 8);
1215 if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
1216 /* In Long Range Mode, the windowLog is typically oversized.
1217 * In which case, it's preferable to determine the jobSize
1218 * based on chainLog instead.
1219 * Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
1220 ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
1221 - overlapRLog;
1222 }
1223 assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
1224 DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
1225 DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
1226 return (ovLog==0) ? 0 : (size_t)1 << ovLog;
1227}
1228
1229/* ====================================== */
1230/* ======= Streaming API ======= */
1231/* ====================================== */
1232
1233size_t ZSTDMT_initCStream_internal(
1234 ZSTDMT_CCtx* mtctx,
1235 const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
1236 const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
1237 unsigned long long pledgedSrcSize)
1238{
1239 DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
1240 (U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
1241
1242 /* params supposed partially fully validated at this point */
1243 assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1244 assert(!((dict) && (cdict))); /* either dict or cdict, not both */
1245
1246 /* init */
1247 if (params.nbWorkers != mtctx->params.nbWorkers)
1248 FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , "");
1249
1250 if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
1251 if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
1252
1253 DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
1254
1255 if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
1256 ZSTDMT_waitForAllJobsCompleted(mtctx);
1257 ZSTDMT_releaseAllJobResources(mtctx);
1258 mtctx->allJobsCompleted = 1;
1259 }
1260
1261 mtctx->params = params;
1262 mtctx->frameContentSize = pledgedSrcSize;
1263 if (dict) {
1264 ZSTD_freeCDict(mtctx->cdictLocal);
1265 mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
1266 ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
1267 params.cParams, mtctx->cMem);
1268 mtctx->cdict = mtctx->cdictLocal;
1269 if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
1270 } else {
1271 ZSTD_freeCDict(mtctx->cdictLocal);
1272 mtctx->cdictLocal = NULL;
1273 mtctx->cdict = cdict;
1274 }
1275
1276 mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);
1277 DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
1278 mtctx->targetSectionSize = params.jobSize;
1279 if (mtctx->targetSectionSize == 0) {
1280 mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);
1281 }
1282 assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
1283
1284 if (params.rsyncable) {
1285 /* Aim for the targetsectionSize as the average job size. */
1286 U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
1287 U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
1288 /* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our
1289 * expected job size is at least 4x larger. */
1290 assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2);
1291 DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
1292 mtctx->rsync.hash = 0;
1293 mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
1294 mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
1295 }
1296 if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
1297 DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
1298 DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
1299 ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
1300 {
1301 /* If ldm is enabled we need windowSize space. */
1302 size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0;
1303 /* Two buffers of slack, plus extra space for the overlap
1304 * This is the minimum slack that LDM works with. One extra because
1305 * flush might waste up to targetSectionSize-1 bytes. Another extra
1306 * for the overlap (if > 0), then one to fill which doesn't overlap
1307 * with the LDM window.
1308 */
1309 size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
1310 size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
1311 /* Compute the total size, and always have enough slack */
1312 size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
1313 size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
1314 size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
1315 if (mtctx->roundBuff.capacity < capacity) {
1316 if (mtctx->roundBuff.buffer)
1317 ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
1318 mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem);
1319 if (mtctx->roundBuff.buffer == NULL) {
1320 mtctx->roundBuff.capacity = 0;
1321 return ERROR(memory_allocation);
1322 }
1323 mtctx->roundBuff.capacity = capacity;
1324 }
1325 }
1326 DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
1327 mtctx->roundBuff.pos = 0;
1328 mtctx->inBuff.buffer = g_nullBuffer;
1329 mtctx->inBuff.filled = 0;
1330 mtctx->inBuff.prefix = kNullRange;
1331 mtctx->doneJobID = 0;
1332 mtctx->nextJobID = 0;
1333 mtctx->frameEnded = 0;
1334 mtctx->allJobsCompleted = 0;
1335 mtctx->consumed = 0;
1336 mtctx->produced = 0;
1337 if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize,
1338 dict, dictSize, dictContentType))
1339 return ERROR(memory_allocation);
1340 return 0;
1341}
1342
1343
1344/* ZSTDMT_writeLastEmptyBlock()
1345 * Write a single empty block with an end-of-frame to finish a frame.
1346 * Job must be created from streaming variant.
1347 * This function is always successful if expected conditions are fulfilled.
1348 */
1349static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
1350{
1351 assert(job->lastJob == 1);
1352 assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */
1353 assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */
1354 assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
1355 job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
1356 if (job->dstBuff.start == NULL) {
1357 job->cSize = ERROR(memory_allocation);
1358 return;
1359 }
1360 assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */
1361 job->src = kNullRange;
1362 job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
1363 assert(!ZSTD_isError(job->cSize));
1364 assert(job->consumed == 0);
1365}
1366
1367static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
1368{
1369 unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
1370 int const endFrame = (endOp == ZSTD_e_end);
1371
1372 if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
1373 DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
1374 assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
1375 return 0;
1376 }
1377
1378 if (!mtctx->jobReady) {
1379 BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
1380 DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
1381 mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
1382 mtctx->jobs[jobID].src.start = src;
1383 mtctx->jobs[jobID].src.size = srcSize;
1384 assert(mtctx->inBuff.filled >= srcSize);
1385 mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
1386 mtctx->jobs[jobID].consumed = 0;
1387 mtctx->jobs[jobID].cSize = 0;
1388 mtctx->jobs[jobID].params = mtctx->params;
1389 mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
1390 mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
1391 mtctx->jobs[jobID].dstBuff = g_nullBuffer;
1392 mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
1393 mtctx->jobs[jobID].bufPool = mtctx->bufPool;
1394 mtctx->jobs[jobID].seqPool = mtctx->seqPool;
1395 mtctx->jobs[jobID].serial = &mtctx->serial;
1396 mtctx->jobs[jobID].jobID = mtctx->nextJobID;
1397 mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
1398 mtctx->jobs[jobID].lastJob = endFrame;
1399 mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
1400 mtctx->jobs[jobID].dstFlushed = 0;
1401
1402 /* Update the round buffer pos and clear the input buffer to be reset */
1403 mtctx->roundBuff.pos += srcSize;
1404 mtctx->inBuff.buffer = g_nullBuffer;
1405 mtctx->inBuff.filled = 0;
1406 /* Set the prefix */
1407 if (!endFrame) {
1408 size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
1409 mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
1410 mtctx->inBuff.prefix.size = newPrefixSize;
1411 } else { /* endFrame==1 => no need for another input buffer */
1412 mtctx->inBuff.prefix = kNullRange;
1413 mtctx->frameEnded = endFrame;
1414 if (mtctx->nextJobID == 0) {
1415 /* single job exception : checksum is already calculated directly within worker thread */
1416 mtctx->params.fParams.checksumFlag = 0;
1417 } }
1418
1419 if ( (srcSize == 0)
1420 && (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
1421 DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
1422 assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */
1423 ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
1424 mtctx->nextJobID++;
1425 return 0;
1426 }
1427 }
1428
1429 DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))",
1430 mtctx->nextJobID,
1431 (U32)mtctx->jobs[jobID].src.size,
1432 mtctx->jobs[jobID].lastJob,
1433 mtctx->nextJobID,
1434 jobID);
1435 if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
1436 mtctx->nextJobID++;
1437 mtctx->jobReady = 0;
1438 } else {
1439 DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
1440 mtctx->jobReady = 1;
1441 }
1442 return 0;
1443}
1444
1445
1446/*! ZSTDMT_flushProduced() :
1447 * flush whatever data has been produced but not yet flushed in current job.
1448 * move to next job if current one is fully flushed.
1449 * `output` : `pos` will be updated with amount of data flushed .
1450 * `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
1451 * @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
1452static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
1453{
1454 unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
1455 DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
1456 blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
1457 assert(output->size >= output->pos);
1458
1459 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1460 if ( blockToFlush
1461 && (mtctx->doneJobID < mtctx->nextJobID) ) {
1462 assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
1463 while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */
1464 if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
1465 DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
1466 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
1467 break;
1468 }
1469 DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
1470 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1471 ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */
1472 } }
1473
1474 /* try to flush something */
1475 { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
1476 size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
1477 size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
1478 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1479 if (ZSTD_isError(cSize)) {
1480 DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
1481 mtctx->doneJobID, ZSTD_getErrorName(cSize));
1482 ZSTDMT_waitForAllJobsCompleted(mtctx);
1483 ZSTDMT_releaseAllJobResources(mtctx);
1484 return cSize;
1485 }
1486 /* add frame checksum if necessary (can only happen once) */
1487 assert(srcConsumed <= srcSize);
1488 if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */
1489 && mtctx->jobs[wJobID].frameChecksumNeeded ) {
1490 U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
1491 DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
1492 MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
1493 cSize += 4;
1494 mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
1495 mtctx->jobs[wJobID].frameChecksumNeeded = 0;
1496 }
1497
1498 if (cSize > 0) { /* compression is ongoing or completed */
1499 size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
1500 DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
1501 (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
1502 assert(mtctx->doneJobID < mtctx->nextJobID);
1503 assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
1504 assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
1505 if (toFlush > 0) {
1506 ZSTD_memcpy((char*)output->dst + output->pos,
1507 (const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
1508 toFlush);
1509 }
1510 output->pos += toFlush;
1511 mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
1512
1513 if ( (srcConsumed == srcSize) /* job is completed */
1514 && (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
1515 DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
1516 mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1517 ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
1518 DEBUGLOG(5, "dstBuffer released");
1519 mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
1520 mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
1521 mtctx->consumed += srcSize;
1522 mtctx->produced += cSize;
1523 mtctx->doneJobID++;
1524 } }
1525
1526 /* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
1527 if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
1528 if (srcSize > srcConsumed) return 1; /* current job not completely compressed */
1529 }
1530 if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */
1531 if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */
1532 if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */
1533 mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */
1534 if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
1535 return 0; /* internal buffers fully flushed */
1536}
1537
1538/**
1539 * Returns the range of data used by the earliest job that is not yet complete.
1540 * If the data of the first job is broken up into two segments, we cover both
1541 * sections.
1542 */
1543static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
1544{
1545 unsigned const firstJobID = mtctx->doneJobID;
1546 unsigned const lastJobID = mtctx->nextJobID;
1547 unsigned jobID;
1548
1549 for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
1550 unsigned const wJobID = jobID & mtctx->jobIDMask;
1551 size_t consumed;
1552
1553 ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1554 consumed = mtctx->jobs[wJobID].consumed;
1555 ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1556
1557 if (consumed < mtctx->jobs[wJobID].src.size) {
1558 range_t range = mtctx->jobs[wJobID].prefix;
1559 if (range.size == 0) {
1560 /* Empty prefix */
1561 range = mtctx->jobs[wJobID].src;
1562 }
1563 /* Job source in multiple segments not supported yet */
1564 assert(range.start <= mtctx->jobs[wJobID].src.start);
1565 return range;
1566 }
1567 }
1568 return kNullRange;
1569}
1570
1571/**
1572 * Returns non-zero iff buffer and range overlap.
1573 */
1574static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
1575{
1576 BYTE const* const bufferStart = (BYTE const*)buffer.start;
1577 BYTE const* const rangeStart = (BYTE const*)range.start;
1578
1579 if (rangeStart == NULL || bufferStart == NULL)
1580 return 0;
1581
1582 {
1583 BYTE const* const bufferEnd = bufferStart + buffer.capacity;
1584 BYTE const* const rangeEnd = rangeStart + range.size;
1585
1586 /* Empty ranges cannot overlap */
1587 if (bufferStart == bufferEnd || rangeStart == rangeEnd)
1588 return 0;
1589
1590 return bufferStart < rangeEnd && rangeStart < bufferEnd;
1591 }
1592}
1593
1594static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
1595{
1596 range_t extDict;
1597 range_t prefix;
1598
1599 DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
1600 extDict.start = window.dictBase + window.lowLimit;
1601 extDict.size = window.dictLimit - window.lowLimit;
1602
1603 prefix.start = window.base + window.dictLimit;
1604 prefix.size = window.nextSrc - (window.base + window.dictLimit);
1605 DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
1606 (size_t)extDict.start,
1607 (size_t)extDict.start + extDict.size);
1608 DEBUGLOG(5, "prefix [0x%zx, 0x%zx)",
1609 (size_t)prefix.start,
1610 (size_t)prefix.start + prefix.size);
1611
1612 return ZSTDMT_isOverlapped(buffer, extDict)
1613 || ZSTDMT_isOverlapped(buffer, prefix);
1614}
1615
1616static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
1617{
1618 if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) {
1619 ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
1620 DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
1621 DEBUGLOG(5, "source [0x%zx, 0x%zx)",
1622 (size_t)buffer.start,
1623 (size_t)buffer.start + buffer.capacity);
1624 ZSTD_PTHREAD_MUTEX_LOCK(mutex);
1625 while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
1626 DEBUGLOG(5, "Waiting for LDM to finish...");
1627 ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
1628 }
1629 DEBUGLOG(6, "Done waiting for LDM to finish");
1630 ZSTD_pthread_mutex_unlock(mutex);
1631 }
1632}
1633
1634/**
1635 * Attempts to set the inBuff to the next section to fill.
1636 * If any part of the new section is still in use we give up.
1637 * Returns non-zero if the buffer is filled.
1638 */
1639static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
1640{
1641 range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
1642 size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
1643 size_t const target = mtctx->targetSectionSize;
1644 buffer_t buffer;
1645
1646 DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
1647 assert(mtctx->inBuff.buffer.start == NULL);
1648 assert(mtctx->roundBuff.capacity >= target);
1649
1650 if (spaceLeft < target) {
1651 /* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
1652 * Simply copy the prefix to the beginning in that case.
1653 */
1654 BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
1655 size_t const prefixSize = mtctx->inBuff.prefix.size;
1656
1657 buffer.start = start;
1658 buffer.capacity = prefixSize;
1659 if (ZSTDMT_isOverlapped(buffer, inUse)) {
1660 DEBUGLOG(5, "Waiting for buffer...");
1661 return 0;
1662 }
1663 ZSTDMT_waitForLdmComplete(mtctx, buffer);
1664 ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize);
1665 mtctx->inBuff.prefix.start = start;
1666 mtctx->roundBuff.pos = prefixSize;
1667 }
1668 buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
1669 buffer.capacity = target;
1670
1671 if (ZSTDMT_isOverlapped(buffer, inUse)) {
1672 DEBUGLOG(5, "Waiting for buffer...");
1673 return 0;
1674 }
1675 assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
1676
1677 ZSTDMT_waitForLdmComplete(mtctx, buffer);
1678
1679 DEBUGLOG(5, "Using prefix range [%zx, %zx)",
1680 (size_t)mtctx->inBuff.prefix.start,
1681 (size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
1682 DEBUGLOG(5, "Using source range [%zx, %zx)",
1683 (size_t)buffer.start,
1684 (size_t)buffer.start + buffer.capacity);
1685
1686
1687 mtctx->inBuff.buffer = buffer;
1688 mtctx->inBuff.filled = 0;
1689 assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
1690 return 1;
1691}
1692
1693typedef struct {
1694 size_t toLoad; /* The number of bytes to load from the input. */
1695 int flush; /* Boolean declaring if we must flush because we found a synchronization point. */
1696} syncPoint_t;
1697
1698/**
1699 * Searches through the input for a synchronization point. If one is found, we
1700 * will instruct the caller to flush, and return the number of bytes to load.
1701 * Otherwise, we will load as many bytes as possible and instruct the caller
1702 * to continue as normal.
1703 */
1704static syncPoint_t
1705findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
1706{
1707 BYTE const* const istart = (BYTE const*)input.src + input.pos;
1708 U64 const primePower = mtctx->rsync.primePower;
1709 U64 const hitMask = mtctx->rsync.hitMask;
1710
1711 syncPoint_t syncPoint;
1712 U64 hash;
1713 BYTE const* prev;
1714 size_t pos;
1715
1716 syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
1717 syncPoint.flush = 0;
1718 if (!mtctx->params.rsyncable)
1719 /* Rsync is disabled. */
1720 return syncPoint;
1721 if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE)
1722 /* We don't emit synchronization points if it would produce too small blocks.
1723 * We don't have enough input to find a synchronization point, so don't look.
1724 */
1725 return syncPoint;
1726 if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
1727 /* Not enough to compute the hash.
1728 * We will miss any synchronization points in this RSYNC_LENGTH byte
1729 * window. However, since it depends only in the internal buffers, if the
1730 * state is already synchronized, we will remain synchronized.
1731 * Additionally, the probability that we miss a synchronization point is
1732 * low: RSYNC_LENGTH / targetSectionSize.
1733 */
1734 return syncPoint;
1735 /* Initialize the loop variables. */
1736 if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) {
1737 /* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions
1738 * because they can't possibly be a sync point. So we can start
1739 * part way through the input buffer.
1740 */
1741 pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled;
1742 if (pos >= RSYNC_LENGTH) {
1743 prev = istart + pos - RSYNC_LENGTH;
1744 hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1745 } else {
1746 assert(mtctx->inBuff.filled >= RSYNC_LENGTH);
1747 prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1748 hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos));
1749 hash = ZSTD_rollingHash_append(hash, istart, pos);
1750 }
1751 } else {
1752 /* We have enough bytes buffered to initialize the hash,
1753 * and have processed enough bytes to find a sync point.
1754 * Start scanning at the beginning of the input.
1755 */
1756 assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE);
1757 assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH);
1758 pos = 0;
1759 prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1760 hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1761 if ((hash & hitMask) == hitMask) {
1762 /* We're already at a sync point so don't load any more until
1763 * we're able to flush this sync point.
1764 * This likely happened because the job table was full so we
1765 * couldn't add our job.
1766 */
1767 syncPoint.toLoad = 0;
1768 syncPoint.flush = 1;
1769 return syncPoint;
1770 }
1771 }
1772 /* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
1773 * through the input. If we hit a synchronization point, then cut the
1774 * job off, and tell the compressor to flush the job. Otherwise, load
1775 * all the bytes and continue as normal.
1776 * If we go too long without a synchronization point (targetSectionSize)
1777 * then a block will be emitted anyways, but this is okay, since if we
1778 * are already synchronized we will remain synchronized.
1779 */
1780 assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1781 for (; pos < syncPoint.toLoad; ++pos) {
1782 BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
1783 /* This assert is very expensive, and Debian compiles with asserts enabled.
1784 * So disable it for now. We can get similar coverage by checking it at the
1785 * beginning & end of the loop.
1786 * assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1787 */
1788 hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
1789 assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE);
1790 if ((hash & hitMask) == hitMask) {
1791 syncPoint.toLoad = pos + 1;
1792 syncPoint.flush = 1;
1793 ++pos; /* for assert */
1794 break;
1795 }
1796 }
1797 assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1798 return syncPoint;
1799}
1800
1801size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
1802{
1803 size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
1804 if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
1805 return hintInSize;
1806}
1807
1808/** ZSTDMT_compressStream_generic() :
1809 * internal use only - exposed to be invoked from zstd_compress.c
1810 * assumption : output and input are valid (pos <= size)
1811 * @return : minimum amount of data remaining to flush, 0 if none */
1812size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
1813 ZSTD_outBuffer* output,
1814 ZSTD_inBuffer* input,
1815 ZSTD_EndDirective endOp)
1816{
1817 unsigned forwardInputProgress = 0;
1818 DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
1819 (U32)endOp, (U32)(input->size - input->pos));
1820 assert(output->pos <= output->size);
1821 assert(input->pos <= input->size);
1822
1823 if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
1824 /* current frame being ended. Only flush/end are allowed */
1825 return ERROR(stage_wrong);
1826 }
1827
1828 /* fill input buffer */
1829 if ( (!mtctx->jobReady)
1830 && (input->size > input->pos) ) { /* support NULL input */
1831 if (mtctx->inBuff.buffer.start == NULL) {
1832 assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
1833 if (!ZSTDMT_tryGetInputRange(mtctx)) {
1834 /* It is only possible for this operation to fail if there are
1835 * still compression jobs ongoing.
1836 */
1837 DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
1838 assert(mtctx->doneJobID != mtctx->nextJobID);
1839 } else
1840 DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
1841 }
1842 if (mtctx->inBuff.buffer.start != NULL) {
1843 syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
1844 if (syncPoint.flush && endOp == ZSTD_e_continue) {
1845 endOp = ZSTD_e_flush;
1846 }
1847 assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
1848 DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
1849 (U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
1850 ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
1851 input->pos += syncPoint.toLoad;
1852 mtctx->inBuff.filled += syncPoint.toLoad;
1853 forwardInputProgress = syncPoint.toLoad>0;
1854 }
1855 }
1856 if ((input->pos < input->size) && (endOp == ZSTD_e_end)) {
1857 /* Can't end yet because the input is not fully consumed.
1858 * We are in one of these cases:
1859 * - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job.
1860 * - We filled the input buffer: flush this job but don't end the frame.
1861 * - We hit a synchronization point: flush this job but don't end the frame.
1862 */
1863 assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable);
1864 endOp = ZSTD_e_flush;
1865 }
1866
1867 if ( (mtctx->jobReady)
1868 || (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */
1869 || ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */
1870 || ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
1871 size_t const jobSize = mtctx->inBuff.filled;
1872 assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
1873 FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , "");
1874 }
1875
1876 /* check for potential compressed data ready to be flushed */
1877 { size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
1878 if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
1879 DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
1880 return remainingToFlush;
1881 }
1882}