git subrepo pull (merge) --force deps/libchdr
[pcsx_rearmed.git] / deps / libchdr / deps / zstd-1.5.5 / lib / compress / zstd_compress.c
CommitLineData
648db22b 1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11/*-*************************************
12* Dependencies
13***************************************/
14#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
15#include "../common/zstd_deps.h" /* INT_MAX, ZSTD_memset, ZSTD_memcpy */
16#include "../common/mem.h"
17#include "hist.h" /* HIST_countFast_wksp */
18#define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
19#include "../common/fse.h"
20#include "../common/huf.h"
21#include "zstd_compress_internal.h"
22#include "zstd_compress_sequences.h"
23#include "zstd_compress_literals.h"
24#include "zstd_fast.h"
25#include "zstd_double_fast.h"
26#include "zstd_lazy.h"
27#include "zstd_opt.h"
28#include "zstd_ldm.h"
29#include "zstd_compress_superblock.h"
30#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_rotateRight_U64 */
31
32/* ***************************************************************
33* Tuning parameters
34*****************************************************************/
35/*!
36 * COMPRESS_HEAPMODE :
37 * Select how default decompression function ZSTD_compress() allocates its context,
38 * on stack (0, default), or into heap (1).
39 * Note that functions with explicit context such as ZSTD_compressCCtx() are unaffected.
40 */
41#ifndef ZSTD_COMPRESS_HEAPMODE
42# define ZSTD_COMPRESS_HEAPMODE 0
43#endif
44
45/*!
46 * ZSTD_HASHLOG3_MAX :
47 * Maximum size of the hash table dedicated to find 3-bytes matches,
48 * in log format, aka 17 => 1 << 17 == 128Ki positions.
49 * This structure is only used in zstd_opt.
50 * Since allocation is centralized for all strategies, it has to be known here.
51 * The actual (selected) size of the hash table is then stored in ZSTD_matchState_t.hashLog3,
52 * so that zstd_opt.c doesn't need to know about this constant.
53 */
54#ifndef ZSTD_HASHLOG3_MAX
55# define ZSTD_HASHLOG3_MAX 17
56#endif
57
58/*-*************************************
59* Helper functions
60***************************************/
61/* ZSTD_compressBound()
62 * Note that the result from this function is only valid for
63 * the one-pass compression functions.
64 * When employing the streaming mode,
65 * if flushes are frequently altering the size of blocks,
66 * the overhead from block headers can make the compressed data larger
67 * than the return value of ZSTD_compressBound().
68 */
69size_t ZSTD_compressBound(size_t srcSize) {
70 size_t const r = ZSTD_COMPRESSBOUND(srcSize);
71 if (r==0) return ERROR(srcSize_wrong);
72 return r;
73}
74
75
76/*-*************************************
77* Context memory management
78***************************************/
79struct ZSTD_CDict_s {
80 const void* dictContent;
81 size_t dictContentSize;
82 ZSTD_dictContentType_e dictContentType; /* The dictContentType the CDict was created with */
83 U32* entropyWorkspace; /* entropy workspace of HUF_WORKSPACE_SIZE bytes */
84 ZSTD_cwksp workspace;
85 ZSTD_matchState_t matchState;
86 ZSTD_compressedBlockState_t cBlockState;
87 ZSTD_customMem customMem;
88 U32 dictID;
89 int compressionLevel; /* 0 indicates that advanced API was used to select CDict params */
90 ZSTD_paramSwitch_e useRowMatchFinder; /* Indicates whether the CDict was created with params that would use
91 * row-based matchfinder. Unless the cdict is reloaded, we will use
92 * the same greedy/lazy matchfinder at compression time.
93 */
94}; /* typedef'd to ZSTD_CDict within "zstd.h" */
95
96ZSTD_CCtx* ZSTD_createCCtx(void)
97{
98 return ZSTD_createCCtx_advanced(ZSTD_defaultCMem);
99}
100
101static void ZSTD_initCCtx(ZSTD_CCtx* cctx, ZSTD_customMem memManager)
102{
103 assert(cctx != NULL);
104 ZSTD_memset(cctx, 0, sizeof(*cctx));
105 cctx->customMem = memManager;
106 cctx->bmi2 = ZSTD_cpuSupportsBmi2();
107 { size_t const err = ZSTD_CCtx_reset(cctx, ZSTD_reset_parameters);
108 assert(!ZSTD_isError(err));
109 (void)err;
110 }
111}
112
113ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
114{
115 ZSTD_STATIC_ASSERT(zcss_init==0);
116 ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN==(0ULL - 1));
117 if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
118 { ZSTD_CCtx* const cctx = (ZSTD_CCtx*)ZSTD_customMalloc(sizeof(ZSTD_CCtx), customMem);
119 if (!cctx) return NULL;
120 ZSTD_initCCtx(cctx, customMem);
121 return cctx;
122 }
123}
124
125ZSTD_CCtx* ZSTD_initStaticCCtx(void* workspace, size_t workspaceSize)
126{
127 ZSTD_cwksp ws;
128 ZSTD_CCtx* cctx;
129 if (workspaceSize <= sizeof(ZSTD_CCtx)) return NULL; /* minimum size */
130 if ((size_t)workspace & 7) return NULL; /* must be 8-aligned */
131 ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
132
133 cctx = (ZSTD_CCtx*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CCtx));
134 if (cctx == NULL) return NULL;
135
136 ZSTD_memset(cctx, 0, sizeof(ZSTD_CCtx));
137 ZSTD_cwksp_move(&cctx->workspace, &ws);
138 cctx->staticSize = workspaceSize;
139
140 /* statically sized space. entropyWorkspace never moves (but prev/next block swap places) */
141 if (!ZSTD_cwksp_check_available(&cctx->workspace, ENTROPY_WORKSPACE_SIZE + 2 * sizeof(ZSTD_compressedBlockState_t))) return NULL;
142 cctx->blockState.prevCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
143 cctx->blockState.nextCBlock = (ZSTD_compressedBlockState_t*)ZSTD_cwksp_reserve_object(&cctx->workspace, sizeof(ZSTD_compressedBlockState_t));
144 cctx->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cctx->workspace, ENTROPY_WORKSPACE_SIZE);
145 cctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
146 return cctx;
147}
148
149/**
150 * Clears and frees all of the dictionaries in the CCtx.
151 */
152static void ZSTD_clearAllDicts(ZSTD_CCtx* cctx)
153{
154 ZSTD_customFree(cctx->localDict.dictBuffer, cctx->customMem);
155 ZSTD_freeCDict(cctx->localDict.cdict);
156 ZSTD_memset(&cctx->localDict, 0, sizeof(cctx->localDict));
157 ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict));
158 cctx->cdict = NULL;
159}
160
161static size_t ZSTD_sizeof_localDict(ZSTD_localDict dict)
162{
163 size_t const bufferSize = dict.dictBuffer != NULL ? dict.dictSize : 0;
164 size_t const cdictSize = ZSTD_sizeof_CDict(dict.cdict);
165 return bufferSize + cdictSize;
166}
167
168static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
169{
170 assert(cctx != NULL);
171 assert(cctx->staticSize == 0);
172 ZSTD_clearAllDicts(cctx);
173#ifdef ZSTD_MULTITHREAD
174 ZSTDMT_freeCCtx(cctx->mtctx); cctx->mtctx = NULL;
175#endif
176 ZSTD_cwksp_free(&cctx->workspace, cctx->customMem);
177}
178
179size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
180{
181 if (cctx==NULL) return 0; /* support free on NULL */
182 RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
183 "not compatible with static CCtx");
184 { int cctxInWorkspace = ZSTD_cwksp_owns_buffer(&cctx->workspace, cctx);
185 ZSTD_freeCCtxContent(cctx);
186 if (!cctxInWorkspace) ZSTD_customFree(cctx, cctx->customMem);
187 }
188 return 0;
189}
190
191
192static size_t ZSTD_sizeof_mtctx(const ZSTD_CCtx* cctx)
193{
194#ifdef ZSTD_MULTITHREAD
195 return ZSTDMT_sizeof_CCtx(cctx->mtctx);
196#else
197 (void)cctx;
198 return 0;
199#endif
200}
201
202
203size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
204{
205 if (cctx==NULL) return 0; /* support sizeof on NULL */
206 /* cctx may be in the workspace */
207 return (cctx->workspace.workspace == cctx ? 0 : sizeof(*cctx))
208 + ZSTD_cwksp_sizeof(&cctx->workspace)
209 + ZSTD_sizeof_localDict(cctx->localDict)
210 + ZSTD_sizeof_mtctx(cctx);
211}
212
213size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
214{
215 return ZSTD_sizeof_CCtx(zcs); /* same object */
216}
217
218/* private API call, for dictBuilder only */
219const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) { return &(ctx->seqStore); }
220
221/* Returns true if the strategy supports using a row based matchfinder */
222static int ZSTD_rowMatchFinderSupported(const ZSTD_strategy strategy) {
223 return (strategy >= ZSTD_greedy && strategy <= ZSTD_lazy2);
224}
225
226/* Returns true if the strategy and useRowMatchFinder mode indicate that we will use the row based matchfinder
227 * for this compression.
228 */
229static int ZSTD_rowMatchFinderUsed(const ZSTD_strategy strategy, const ZSTD_paramSwitch_e mode) {
230 assert(mode != ZSTD_ps_auto);
231 return ZSTD_rowMatchFinderSupported(strategy) && (mode == ZSTD_ps_enable);
232}
233
234/* Returns row matchfinder usage given an initial mode and cParams */
235static ZSTD_paramSwitch_e ZSTD_resolveRowMatchFinderMode(ZSTD_paramSwitch_e mode,
236 const ZSTD_compressionParameters* const cParams) {
237#if defined(ZSTD_ARCH_X86_SSE2) || defined(ZSTD_ARCH_ARM_NEON)
238 int const kHasSIMD128 = 1;
239#else
240 int const kHasSIMD128 = 0;
241#endif
242 if (mode != ZSTD_ps_auto) return mode; /* if requested enabled, but no SIMD, we still will use row matchfinder */
243 mode = ZSTD_ps_disable;
244 if (!ZSTD_rowMatchFinderSupported(cParams->strategy)) return mode;
245 if (kHasSIMD128) {
246 if (cParams->windowLog > 14) mode = ZSTD_ps_enable;
247 } else {
248 if (cParams->windowLog > 17) mode = ZSTD_ps_enable;
249 }
250 return mode;
251}
252
253/* Returns block splitter usage (generally speaking, when using slower/stronger compression modes) */
254static ZSTD_paramSwitch_e ZSTD_resolveBlockSplitterMode(ZSTD_paramSwitch_e mode,
255 const ZSTD_compressionParameters* const cParams) {
256 if (mode != ZSTD_ps_auto) return mode;
257 return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 17) ? ZSTD_ps_enable : ZSTD_ps_disable;
258}
259
260/* Returns 1 if the arguments indicate that we should allocate a chainTable, 0 otherwise */
261static int ZSTD_allocateChainTable(const ZSTD_strategy strategy,
262 const ZSTD_paramSwitch_e useRowMatchFinder,
263 const U32 forDDSDict) {
264 assert(useRowMatchFinder != ZSTD_ps_auto);
265 /* We always should allocate a chaintable if we are allocating a matchstate for a DDS dictionary matchstate.
266 * We do not allocate a chaintable if we are using ZSTD_fast, or are using the row-based matchfinder.
267 */
268 return forDDSDict || ((strategy != ZSTD_fast) && !ZSTD_rowMatchFinderUsed(strategy, useRowMatchFinder));
269}
270
271/* Returns ZSTD_ps_enable if compression parameters are such that we should
272 * enable long distance matching (wlog >= 27, strategy >= btopt).
273 * Returns ZSTD_ps_disable otherwise.
274 */
275static ZSTD_paramSwitch_e ZSTD_resolveEnableLdm(ZSTD_paramSwitch_e mode,
276 const ZSTD_compressionParameters* const cParams) {
277 if (mode != ZSTD_ps_auto) return mode;
278 return (cParams->strategy >= ZSTD_btopt && cParams->windowLog >= 27) ? ZSTD_ps_enable : ZSTD_ps_disable;
279}
280
281static int ZSTD_resolveExternalSequenceValidation(int mode) {
282 return mode;
283}
284
285/* Resolves maxBlockSize to the default if no value is present. */
286static size_t ZSTD_resolveMaxBlockSize(size_t maxBlockSize) {
287 if (maxBlockSize == 0) {
288 return ZSTD_BLOCKSIZE_MAX;
289 } else {
290 return maxBlockSize;
291 }
292}
293
294static ZSTD_paramSwitch_e ZSTD_resolveExternalRepcodeSearch(ZSTD_paramSwitch_e value, int cLevel) {
295 if (value != ZSTD_ps_auto) return value;
296 if (cLevel < 10) {
297 return ZSTD_ps_disable;
298 } else {
299 return ZSTD_ps_enable;
300 }
301}
302
303/* Returns 1 if compression parameters are such that CDict hashtable and chaintable indices are tagged.
304 * If so, the tags need to be removed in ZSTD_resetCCtx_byCopyingCDict. */
305static int ZSTD_CDictIndicesAreTagged(const ZSTD_compressionParameters* const cParams) {
306 return cParams->strategy == ZSTD_fast || cParams->strategy == ZSTD_dfast;
307}
308
309static ZSTD_CCtx_params ZSTD_makeCCtxParamsFromCParams(
310 ZSTD_compressionParameters cParams)
311{
312 ZSTD_CCtx_params cctxParams;
313 /* should not matter, as all cParams are presumed properly defined */
314 ZSTD_CCtxParams_init(&cctxParams, ZSTD_CLEVEL_DEFAULT);
315 cctxParams.cParams = cParams;
316
317 /* Adjust advanced params according to cParams */
318 cctxParams.ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams.ldmParams.enableLdm, &cParams);
319 if (cctxParams.ldmParams.enableLdm == ZSTD_ps_enable) {
320 ZSTD_ldm_adjustParameters(&cctxParams.ldmParams, &cParams);
321 assert(cctxParams.ldmParams.hashLog >= cctxParams.ldmParams.bucketSizeLog);
322 assert(cctxParams.ldmParams.hashRateLog < 32);
323 }
324 cctxParams.useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams.useBlockSplitter, &cParams);
325 cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
326 cctxParams.validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams.validateSequences);
327 cctxParams.maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams.maxBlockSize);
328 cctxParams.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams.searchForExternalRepcodes,
329 cctxParams.compressionLevel);
330 assert(!ZSTD_checkCParams(cParams));
331 return cctxParams;
332}
333
334static ZSTD_CCtx_params* ZSTD_createCCtxParams_advanced(
335 ZSTD_customMem customMem)
336{
337 ZSTD_CCtx_params* params;
338 if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
339 params = (ZSTD_CCtx_params*)ZSTD_customCalloc(
340 sizeof(ZSTD_CCtx_params), customMem);
341 if (!params) { return NULL; }
342 ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
343 params->customMem = customMem;
344 return params;
345}
346
347ZSTD_CCtx_params* ZSTD_createCCtxParams(void)
348{
349 return ZSTD_createCCtxParams_advanced(ZSTD_defaultCMem);
350}
351
352size_t ZSTD_freeCCtxParams(ZSTD_CCtx_params* params)
353{
354 if (params == NULL) { return 0; }
355 ZSTD_customFree(params, params->customMem);
356 return 0;
357}
358
359size_t ZSTD_CCtxParams_reset(ZSTD_CCtx_params* params)
360{
361 return ZSTD_CCtxParams_init(params, ZSTD_CLEVEL_DEFAULT);
362}
363
364size_t ZSTD_CCtxParams_init(ZSTD_CCtx_params* cctxParams, int compressionLevel) {
365 RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
366 ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
367 cctxParams->compressionLevel = compressionLevel;
368 cctxParams->fParams.contentSizeFlag = 1;
369 return 0;
370}
371
372#define ZSTD_NO_CLEVEL 0
373
374/**
375 * Initializes `cctxParams` from `params` and `compressionLevel`.
376 * @param compressionLevel If params are derived from a compression level then that compression level, otherwise ZSTD_NO_CLEVEL.
377 */
378static void
379ZSTD_CCtxParams_init_internal(ZSTD_CCtx_params* cctxParams,
380 const ZSTD_parameters* params,
381 int compressionLevel)
382{
383 assert(!ZSTD_checkCParams(params->cParams));
384 ZSTD_memset(cctxParams, 0, sizeof(*cctxParams));
385 cctxParams->cParams = params->cParams;
386 cctxParams->fParams = params->fParams;
387 /* Should not matter, as all cParams are presumed properly defined.
388 * But, set it for tracing anyway.
389 */
390 cctxParams->compressionLevel = compressionLevel;
391 cctxParams->useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams->useRowMatchFinder, &params->cParams);
392 cctxParams->useBlockSplitter = ZSTD_resolveBlockSplitterMode(cctxParams->useBlockSplitter, &params->cParams);
393 cctxParams->ldmParams.enableLdm = ZSTD_resolveEnableLdm(cctxParams->ldmParams.enableLdm, &params->cParams);
394 cctxParams->validateSequences = ZSTD_resolveExternalSequenceValidation(cctxParams->validateSequences);
395 cctxParams->maxBlockSize = ZSTD_resolveMaxBlockSize(cctxParams->maxBlockSize);
396 cctxParams->searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(cctxParams->searchForExternalRepcodes, compressionLevel);
397 DEBUGLOG(4, "ZSTD_CCtxParams_init_internal: useRowMatchFinder=%d, useBlockSplitter=%d ldm=%d",
398 cctxParams->useRowMatchFinder, cctxParams->useBlockSplitter, cctxParams->ldmParams.enableLdm);
399}
400
401size_t ZSTD_CCtxParams_init_advanced(ZSTD_CCtx_params* cctxParams, ZSTD_parameters params)
402{
403 RETURN_ERROR_IF(!cctxParams, GENERIC, "NULL pointer!");
404 FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
405 ZSTD_CCtxParams_init_internal(cctxParams, &params, ZSTD_NO_CLEVEL);
406 return 0;
407}
408
409/**
410 * Sets cctxParams' cParams and fParams from params, but otherwise leaves them alone.
411 * @param params Validated zstd parameters.
412 */
413static void ZSTD_CCtxParams_setZstdParams(
414 ZSTD_CCtx_params* cctxParams, const ZSTD_parameters* params)
415{
416 assert(!ZSTD_checkCParams(params->cParams));
417 cctxParams->cParams = params->cParams;
418 cctxParams->fParams = params->fParams;
419 /* Should not matter, as all cParams are presumed properly defined.
420 * But, set it for tracing anyway.
421 */
422 cctxParams->compressionLevel = ZSTD_NO_CLEVEL;
423}
424
425ZSTD_bounds ZSTD_cParam_getBounds(ZSTD_cParameter param)
426{
427 ZSTD_bounds bounds = { 0, 0, 0 };
428
429 switch(param)
430 {
431 case ZSTD_c_compressionLevel:
432 bounds.lowerBound = ZSTD_minCLevel();
433 bounds.upperBound = ZSTD_maxCLevel();
434 return bounds;
435
436 case ZSTD_c_windowLog:
437 bounds.lowerBound = ZSTD_WINDOWLOG_MIN;
438 bounds.upperBound = ZSTD_WINDOWLOG_MAX;
439 return bounds;
440
441 case ZSTD_c_hashLog:
442 bounds.lowerBound = ZSTD_HASHLOG_MIN;
443 bounds.upperBound = ZSTD_HASHLOG_MAX;
444 return bounds;
445
446 case ZSTD_c_chainLog:
447 bounds.lowerBound = ZSTD_CHAINLOG_MIN;
448 bounds.upperBound = ZSTD_CHAINLOG_MAX;
449 return bounds;
450
451 case ZSTD_c_searchLog:
452 bounds.lowerBound = ZSTD_SEARCHLOG_MIN;
453 bounds.upperBound = ZSTD_SEARCHLOG_MAX;
454 return bounds;
455
456 case ZSTD_c_minMatch:
457 bounds.lowerBound = ZSTD_MINMATCH_MIN;
458 bounds.upperBound = ZSTD_MINMATCH_MAX;
459 return bounds;
460
461 case ZSTD_c_targetLength:
462 bounds.lowerBound = ZSTD_TARGETLENGTH_MIN;
463 bounds.upperBound = ZSTD_TARGETLENGTH_MAX;
464 return bounds;
465
466 case ZSTD_c_strategy:
467 bounds.lowerBound = ZSTD_STRATEGY_MIN;
468 bounds.upperBound = ZSTD_STRATEGY_MAX;
469 return bounds;
470
471 case ZSTD_c_contentSizeFlag:
472 bounds.lowerBound = 0;
473 bounds.upperBound = 1;
474 return bounds;
475
476 case ZSTD_c_checksumFlag:
477 bounds.lowerBound = 0;
478 bounds.upperBound = 1;
479 return bounds;
480
481 case ZSTD_c_dictIDFlag:
482 bounds.lowerBound = 0;
483 bounds.upperBound = 1;
484 return bounds;
485
486 case ZSTD_c_nbWorkers:
487 bounds.lowerBound = 0;
488#ifdef ZSTD_MULTITHREAD
489 bounds.upperBound = ZSTDMT_NBWORKERS_MAX;
490#else
491 bounds.upperBound = 0;
492#endif
493 return bounds;
494
495 case ZSTD_c_jobSize:
496 bounds.lowerBound = 0;
497#ifdef ZSTD_MULTITHREAD
498 bounds.upperBound = ZSTDMT_JOBSIZE_MAX;
499#else
500 bounds.upperBound = 0;
501#endif
502 return bounds;
503
504 case ZSTD_c_overlapLog:
505#ifdef ZSTD_MULTITHREAD
506 bounds.lowerBound = ZSTD_OVERLAPLOG_MIN;
507 bounds.upperBound = ZSTD_OVERLAPLOG_MAX;
508#else
509 bounds.lowerBound = 0;
510 bounds.upperBound = 0;
511#endif
512 return bounds;
513
514 case ZSTD_c_enableDedicatedDictSearch:
515 bounds.lowerBound = 0;
516 bounds.upperBound = 1;
517 return bounds;
518
519 case ZSTD_c_enableLongDistanceMatching:
520 bounds.lowerBound = (int)ZSTD_ps_auto;
521 bounds.upperBound = (int)ZSTD_ps_disable;
522 return bounds;
523
524 case ZSTD_c_ldmHashLog:
525 bounds.lowerBound = ZSTD_LDM_HASHLOG_MIN;
526 bounds.upperBound = ZSTD_LDM_HASHLOG_MAX;
527 return bounds;
528
529 case ZSTD_c_ldmMinMatch:
530 bounds.lowerBound = ZSTD_LDM_MINMATCH_MIN;
531 bounds.upperBound = ZSTD_LDM_MINMATCH_MAX;
532 return bounds;
533
534 case ZSTD_c_ldmBucketSizeLog:
535 bounds.lowerBound = ZSTD_LDM_BUCKETSIZELOG_MIN;
536 bounds.upperBound = ZSTD_LDM_BUCKETSIZELOG_MAX;
537 return bounds;
538
539 case ZSTD_c_ldmHashRateLog:
540 bounds.lowerBound = ZSTD_LDM_HASHRATELOG_MIN;
541 bounds.upperBound = ZSTD_LDM_HASHRATELOG_MAX;
542 return bounds;
543
544 /* experimental parameters */
545 case ZSTD_c_rsyncable:
546 bounds.lowerBound = 0;
547 bounds.upperBound = 1;
548 return bounds;
549
550 case ZSTD_c_forceMaxWindow :
551 bounds.lowerBound = 0;
552 bounds.upperBound = 1;
553 return bounds;
554
555 case ZSTD_c_format:
556 ZSTD_STATIC_ASSERT(ZSTD_f_zstd1 < ZSTD_f_zstd1_magicless);
557 bounds.lowerBound = ZSTD_f_zstd1;
558 bounds.upperBound = ZSTD_f_zstd1_magicless; /* note : how to ensure at compile time that this is the highest value enum ? */
559 return bounds;
560
561 case ZSTD_c_forceAttachDict:
562 ZSTD_STATIC_ASSERT(ZSTD_dictDefaultAttach < ZSTD_dictForceLoad);
563 bounds.lowerBound = ZSTD_dictDefaultAttach;
564 bounds.upperBound = ZSTD_dictForceLoad; /* note : how to ensure at compile time that this is the highest value enum ? */
565 return bounds;
566
567 case ZSTD_c_literalCompressionMode:
568 ZSTD_STATIC_ASSERT(ZSTD_ps_auto < ZSTD_ps_enable && ZSTD_ps_enable < ZSTD_ps_disable);
569 bounds.lowerBound = (int)ZSTD_ps_auto;
570 bounds.upperBound = (int)ZSTD_ps_disable;
571 return bounds;
572
573 case ZSTD_c_targetCBlockSize:
574 bounds.lowerBound = ZSTD_TARGETCBLOCKSIZE_MIN;
575 bounds.upperBound = ZSTD_TARGETCBLOCKSIZE_MAX;
576 return bounds;
577
578 case ZSTD_c_srcSizeHint:
579 bounds.lowerBound = ZSTD_SRCSIZEHINT_MIN;
580 bounds.upperBound = ZSTD_SRCSIZEHINT_MAX;
581 return bounds;
582
583 case ZSTD_c_stableInBuffer:
584 case ZSTD_c_stableOutBuffer:
585 bounds.lowerBound = (int)ZSTD_bm_buffered;
586 bounds.upperBound = (int)ZSTD_bm_stable;
587 return bounds;
588
589 case ZSTD_c_blockDelimiters:
590 bounds.lowerBound = (int)ZSTD_sf_noBlockDelimiters;
591 bounds.upperBound = (int)ZSTD_sf_explicitBlockDelimiters;
592 return bounds;
593
594 case ZSTD_c_validateSequences:
595 bounds.lowerBound = 0;
596 bounds.upperBound = 1;
597 return bounds;
598
599 case ZSTD_c_useBlockSplitter:
600 bounds.lowerBound = (int)ZSTD_ps_auto;
601 bounds.upperBound = (int)ZSTD_ps_disable;
602 return bounds;
603
604 case ZSTD_c_useRowMatchFinder:
605 bounds.lowerBound = (int)ZSTD_ps_auto;
606 bounds.upperBound = (int)ZSTD_ps_disable;
607 return bounds;
608
609 case ZSTD_c_deterministicRefPrefix:
610 bounds.lowerBound = 0;
611 bounds.upperBound = 1;
612 return bounds;
613
614 case ZSTD_c_prefetchCDictTables:
615 bounds.lowerBound = (int)ZSTD_ps_auto;
616 bounds.upperBound = (int)ZSTD_ps_disable;
617 return bounds;
618
619 case ZSTD_c_enableSeqProducerFallback:
620 bounds.lowerBound = 0;
621 bounds.upperBound = 1;
622 return bounds;
623
624 case ZSTD_c_maxBlockSize:
625 bounds.lowerBound = ZSTD_BLOCKSIZE_MAX_MIN;
626 bounds.upperBound = ZSTD_BLOCKSIZE_MAX;
627 return bounds;
628
629 case ZSTD_c_searchForExternalRepcodes:
630 bounds.lowerBound = (int)ZSTD_ps_auto;
631 bounds.upperBound = (int)ZSTD_ps_disable;
632 return bounds;
633
634 default:
635 bounds.error = ERROR(parameter_unsupported);
636 return bounds;
637 }
638}
639
640/* ZSTD_cParam_clampBounds:
641 * Clamps the value into the bounded range.
642 */
643static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
644{
645 ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam);
646 if (ZSTD_isError(bounds.error)) return bounds.error;
647 if (*value < bounds.lowerBound) *value = bounds.lowerBound;
648 if (*value > bounds.upperBound) *value = bounds.upperBound;
649 return 0;
650}
651
652#define BOUNDCHECK(cParam, val) { \
653 RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
654 parameter_outOfBound, "Param out of bounds"); \
655}
656
657
658static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
659{
660 switch(param)
661 {
662 case ZSTD_c_compressionLevel:
663 case ZSTD_c_hashLog:
664 case ZSTD_c_chainLog:
665 case ZSTD_c_searchLog:
666 case ZSTD_c_minMatch:
667 case ZSTD_c_targetLength:
668 case ZSTD_c_strategy:
669 return 1;
670
671 case ZSTD_c_format:
672 case ZSTD_c_windowLog:
673 case ZSTD_c_contentSizeFlag:
674 case ZSTD_c_checksumFlag:
675 case ZSTD_c_dictIDFlag:
676 case ZSTD_c_forceMaxWindow :
677 case ZSTD_c_nbWorkers:
678 case ZSTD_c_jobSize:
679 case ZSTD_c_overlapLog:
680 case ZSTD_c_rsyncable:
681 case ZSTD_c_enableDedicatedDictSearch:
682 case ZSTD_c_enableLongDistanceMatching:
683 case ZSTD_c_ldmHashLog:
684 case ZSTD_c_ldmMinMatch:
685 case ZSTD_c_ldmBucketSizeLog:
686 case ZSTD_c_ldmHashRateLog:
687 case ZSTD_c_forceAttachDict:
688 case ZSTD_c_literalCompressionMode:
689 case ZSTD_c_targetCBlockSize:
690 case ZSTD_c_srcSizeHint:
691 case ZSTD_c_stableInBuffer:
692 case ZSTD_c_stableOutBuffer:
693 case ZSTD_c_blockDelimiters:
694 case ZSTD_c_validateSequences:
695 case ZSTD_c_useBlockSplitter:
696 case ZSTD_c_useRowMatchFinder:
697 case ZSTD_c_deterministicRefPrefix:
698 case ZSTD_c_prefetchCDictTables:
699 case ZSTD_c_enableSeqProducerFallback:
700 case ZSTD_c_maxBlockSize:
701 case ZSTD_c_searchForExternalRepcodes:
702 default:
703 return 0;
704 }
705}
706
707size_t ZSTD_CCtx_setParameter(ZSTD_CCtx* cctx, ZSTD_cParameter param, int value)
708{
709 DEBUGLOG(4, "ZSTD_CCtx_setParameter (%i, %i)", (int)param, value);
710 if (cctx->streamStage != zcss_init) {
711 if (ZSTD_isUpdateAuthorized(param)) {
712 cctx->cParamsChanged = 1;
713 } else {
714 RETURN_ERROR(stage_wrong, "can only set params in cctx init stage");
715 } }
716
717 switch(param)
718 {
719 case ZSTD_c_nbWorkers:
720 RETURN_ERROR_IF((value!=0) && cctx->staticSize, parameter_unsupported,
721 "MT not compatible with static alloc");
722 break;
723
724 case ZSTD_c_compressionLevel:
725 case ZSTD_c_windowLog:
726 case ZSTD_c_hashLog:
727 case ZSTD_c_chainLog:
728 case ZSTD_c_searchLog:
729 case ZSTD_c_minMatch:
730 case ZSTD_c_targetLength:
731 case ZSTD_c_strategy:
732 case ZSTD_c_ldmHashRateLog:
733 case ZSTD_c_format:
734 case ZSTD_c_contentSizeFlag:
735 case ZSTD_c_checksumFlag:
736 case ZSTD_c_dictIDFlag:
737 case ZSTD_c_forceMaxWindow:
738 case ZSTD_c_forceAttachDict:
739 case ZSTD_c_literalCompressionMode:
740 case ZSTD_c_jobSize:
741 case ZSTD_c_overlapLog:
742 case ZSTD_c_rsyncable:
743 case ZSTD_c_enableDedicatedDictSearch:
744 case ZSTD_c_enableLongDistanceMatching:
745 case ZSTD_c_ldmHashLog:
746 case ZSTD_c_ldmMinMatch:
747 case ZSTD_c_ldmBucketSizeLog:
748 case ZSTD_c_targetCBlockSize:
749 case ZSTD_c_srcSizeHint:
750 case ZSTD_c_stableInBuffer:
751 case ZSTD_c_stableOutBuffer:
752 case ZSTD_c_blockDelimiters:
753 case ZSTD_c_validateSequences:
754 case ZSTD_c_useBlockSplitter:
755 case ZSTD_c_useRowMatchFinder:
756 case ZSTD_c_deterministicRefPrefix:
757 case ZSTD_c_prefetchCDictTables:
758 case ZSTD_c_enableSeqProducerFallback:
759 case ZSTD_c_maxBlockSize:
760 case ZSTD_c_searchForExternalRepcodes:
761 break;
762
763 default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
764 }
765 return ZSTD_CCtxParams_setParameter(&cctx->requestedParams, param, value);
766}
767
768size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
769 ZSTD_cParameter param, int value)
770{
771 DEBUGLOG(4, "ZSTD_CCtxParams_setParameter (%i, %i)", (int)param, value);
772 switch(param)
773 {
774 case ZSTD_c_format :
775 BOUNDCHECK(ZSTD_c_format, value);
776 CCtxParams->format = (ZSTD_format_e)value;
777 return (size_t)CCtxParams->format;
778
779 case ZSTD_c_compressionLevel : {
780 FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
781 if (value == 0)
782 CCtxParams->compressionLevel = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
783 else
784 CCtxParams->compressionLevel = value;
785 if (CCtxParams->compressionLevel >= 0) return (size_t)CCtxParams->compressionLevel;
786 return 0; /* return type (size_t) cannot represent negative values */
787 }
788
789 case ZSTD_c_windowLog :
790 if (value!=0) /* 0 => use default */
791 BOUNDCHECK(ZSTD_c_windowLog, value);
792 CCtxParams->cParams.windowLog = (U32)value;
793 return CCtxParams->cParams.windowLog;
794
795 case ZSTD_c_hashLog :
796 if (value!=0) /* 0 => use default */
797 BOUNDCHECK(ZSTD_c_hashLog, value);
798 CCtxParams->cParams.hashLog = (U32)value;
799 return CCtxParams->cParams.hashLog;
800
801 case ZSTD_c_chainLog :
802 if (value!=0) /* 0 => use default */
803 BOUNDCHECK(ZSTD_c_chainLog, value);
804 CCtxParams->cParams.chainLog = (U32)value;
805 return CCtxParams->cParams.chainLog;
806
807 case ZSTD_c_searchLog :
808 if (value!=0) /* 0 => use default */
809 BOUNDCHECK(ZSTD_c_searchLog, value);
810 CCtxParams->cParams.searchLog = (U32)value;
811 return (size_t)value;
812
813 case ZSTD_c_minMatch :
814 if (value!=0) /* 0 => use default */
815 BOUNDCHECK(ZSTD_c_minMatch, value);
816 CCtxParams->cParams.minMatch = (U32)value;
817 return CCtxParams->cParams.minMatch;
818
819 case ZSTD_c_targetLength :
820 BOUNDCHECK(ZSTD_c_targetLength, value);
821 CCtxParams->cParams.targetLength = (U32)value;
822 return CCtxParams->cParams.targetLength;
823
824 case ZSTD_c_strategy :
825 if (value!=0) /* 0 => use default */
826 BOUNDCHECK(ZSTD_c_strategy, value);
827 CCtxParams->cParams.strategy = (ZSTD_strategy)value;
828 return (size_t)CCtxParams->cParams.strategy;
829
830 case ZSTD_c_contentSizeFlag :
831 /* Content size written in frame header _when known_ (default:1) */
832 DEBUGLOG(4, "set content size flag = %u", (value!=0));
833 CCtxParams->fParams.contentSizeFlag = value != 0;
834 return (size_t)CCtxParams->fParams.contentSizeFlag;
835
836 case ZSTD_c_checksumFlag :
837 /* A 32-bits content checksum will be calculated and written at end of frame (default:0) */
838 CCtxParams->fParams.checksumFlag = value != 0;
839 return (size_t)CCtxParams->fParams.checksumFlag;
840
841 case ZSTD_c_dictIDFlag : /* When applicable, dictionary's dictID is provided in frame header (default:1) */
842 DEBUGLOG(4, "set dictIDFlag = %u", (value!=0));
843 CCtxParams->fParams.noDictIDFlag = !value;
844 return !CCtxParams->fParams.noDictIDFlag;
845
846 case ZSTD_c_forceMaxWindow :
847 CCtxParams->forceWindow = (value != 0);
848 return (size_t)CCtxParams->forceWindow;
849
850 case ZSTD_c_forceAttachDict : {
851 const ZSTD_dictAttachPref_e pref = (ZSTD_dictAttachPref_e)value;
852 BOUNDCHECK(ZSTD_c_forceAttachDict, (int)pref);
853 CCtxParams->attachDictPref = pref;
854 return CCtxParams->attachDictPref;
855 }
856
857 case ZSTD_c_literalCompressionMode : {
858 const ZSTD_paramSwitch_e lcm = (ZSTD_paramSwitch_e)value;
859 BOUNDCHECK(ZSTD_c_literalCompressionMode, (int)lcm);
860 CCtxParams->literalCompressionMode = lcm;
861 return CCtxParams->literalCompressionMode;
862 }
863
864 case ZSTD_c_nbWorkers :
865#ifndef ZSTD_MULTITHREAD
866 RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
867 return 0;
868#else
869 FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
870 CCtxParams->nbWorkers = value;
871 return CCtxParams->nbWorkers;
872#endif
873
874 case ZSTD_c_jobSize :
875#ifndef ZSTD_MULTITHREAD
876 RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
877 return 0;
878#else
879 /* Adjust to the minimum non-default value. */
880 if (value != 0 && value < ZSTDMT_JOBSIZE_MIN)
881 value = ZSTDMT_JOBSIZE_MIN;
882 FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
883 assert(value >= 0);
884 CCtxParams->jobSize = value;
885 return CCtxParams->jobSize;
886#endif
887
888 case ZSTD_c_overlapLog :
889#ifndef ZSTD_MULTITHREAD
890 RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
891 return 0;
892#else
893 FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
894 CCtxParams->overlapLog = value;
895 return CCtxParams->overlapLog;
896#endif
897
898 case ZSTD_c_rsyncable :
899#ifndef ZSTD_MULTITHREAD
900 RETURN_ERROR_IF(value!=0, parameter_unsupported, "not compiled with multithreading");
901 return 0;
902#else
903 FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
904 CCtxParams->rsyncable = value;
905 return CCtxParams->rsyncable;
906#endif
907
908 case ZSTD_c_enableDedicatedDictSearch :
909 CCtxParams->enableDedicatedDictSearch = (value!=0);
910 return (size_t)CCtxParams->enableDedicatedDictSearch;
911
912 case ZSTD_c_enableLongDistanceMatching :
913 BOUNDCHECK(ZSTD_c_enableLongDistanceMatching, value);
914 CCtxParams->ldmParams.enableLdm = (ZSTD_paramSwitch_e)value;
915 return CCtxParams->ldmParams.enableLdm;
916
917 case ZSTD_c_ldmHashLog :
918 if (value!=0) /* 0 ==> auto */
919 BOUNDCHECK(ZSTD_c_ldmHashLog, value);
920 CCtxParams->ldmParams.hashLog = (U32)value;
921 return CCtxParams->ldmParams.hashLog;
922
923 case ZSTD_c_ldmMinMatch :
924 if (value!=0) /* 0 ==> default */
925 BOUNDCHECK(ZSTD_c_ldmMinMatch, value);
926 CCtxParams->ldmParams.minMatchLength = (U32)value;
927 return CCtxParams->ldmParams.minMatchLength;
928
929 case ZSTD_c_ldmBucketSizeLog :
930 if (value!=0) /* 0 ==> default */
931 BOUNDCHECK(ZSTD_c_ldmBucketSizeLog, value);
932 CCtxParams->ldmParams.bucketSizeLog = (U32)value;
933 return CCtxParams->ldmParams.bucketSizeLog;
934
935 case ZSTD_c_ldmHashRateLog :
936 if (value!=0) /* 0 ==> default */
937 BOUNDCHECK(ZSTD_c_ldmHashRateLog, value);
938 CCtxParams->ldmParams.hashRateLog = (U32)value;
939 return CCtxParams->ldmParams.hashRateLog;
940
941 case ZSTD_c_targetCBlockSize :
942 if (value!=0) /* 0 ==> default */
943 BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
944 CCtxParams->targetCBlockSize = (U32)value;
945 return CCtxParams->targetCBlockSize;
946
947 case ZSTD_c_srcSizeHint :
948 if (value!=0) /* 0 ==> default */
949 BOUNDCHECK(ZSTD_c_srcSizeHint, value);
950 CCtxParams->srcSizeHint = value;
951 return (size_t)CCtxParams->srcSizeHint;
952
953 case ZSTD_c_stableInBuffer:
954 BOUNDCHECK(ZSTD_c_stableInBuffer, value);
955 CCtxParams->inBufferMode = (ZSTD_bufferMode_e)value;
956 return CCtxParams->inBufferMode;
957
958 case ZSTD_c_stableOutBuffer:
959 BOUNDCHECK(ZSTD_c_stableOutBuffer, value);
960 CCtxParams->outBufferMode = (ZSTD_bufferMode_e)value;
961 return CCtxParams->outBufferMode;
962
963 case ZSTD_c_blockDelimiters:
964 BOUNDCHECK(ZSTD_c_blockDelimiters, value);
965 CCtxParams->blockDelimiters = (ZSTD_sequenceFormat_e)value;
966 return CCtxParams->blockDelimiters;
967
968 case ZSTD_c_validateSequences:
969 BOUNDCHECK(ZSTD_c_validateSequences, value);
970 CCtxParams->validateSequences = value;
971 return CCtxParams->validateSequences;
972
973 case ZSTD_c_useBlockSplitter:
974 BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
975 CCtxParams->useBlockSplitter = (ZSTD_paramSwitch_e)value;
976 return CCtxParams->useBlockSplitter;
977
978 case ZSTD_c_useRowMatchFinder:
979 BOUNDCHECK(ZSTD_c_useRowMatchFinder, value);
980 CCtxParams->useRowMatchFinder = (ZSTD_paramSwitch_e)value;
981 return CCtxParams->useRowMatchFinder;
982
983 case ZSTD_c_deterministicRefPrefix:
984 BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
985 CCtxParams->deterministicRefPrefix = !!value;
986 return CCtxParams->deterministicRefPrefix;
987
988 case ZSTD_c_prefetchCDictTables:
989 BOUNDCHECK(ZSTD_c_prefetchCDictTables, value);
990 CCtxParams->prefetchCDictTables = (ZSTD_paramSwitch_e)value;
991 return CCtxParams->prefetchCDictTables;
992
993 case ZSTD_c_enableSeqProducerFallback:
994 BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value);
995 CCtxParams->enableMatchFinderFallback = value;
996 return CCtxParams->enableMatchFinderFallback;
997
998 case ZSTD_c_maxBlockSize:
999 if (value!=0) /* 0 ==> default */
1000 BOUNDCHECK(ZSTD_c_maxBlockSize, value);
1001 CCtxParams->maxBlockSize = value;
1002 return CCtxParams->maxBlockSize;
1003
1004 case ZSTD_c_searchForExternalRepcodes:
1005 BOUNDCHECK(ZSTD_c_searchForExternalRepcodes, value);
1006 CCtxParams->searchForExternalRepcodes = (ZSTD_paramSwitch_e)value;
1007 return CCtxParams->searchForExternalRepcodes;
1008
1009 default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
1010 }
1011}
1012
1013size_t ZSTD_CCtx_getParameter(ZSTD_CCtx const* cctx, ZSTD_cParameter param, int* value)
1014{
1015 return ZSTD_CCtxParams_getParameter(&cctx->requestedParams, param, value);
1016}
1017
1018size_t ZSTD_CCtxParams_getParameter(
1019 ZSTD_CCtx_params const* CCtxParams, ZSTD_cParameter param, int* value)
1020{
1021 switch(param)
1022 {
1023 case ZSTD_c_format :
1024 *value = CCtxParams->format;
1025 break;
1026 case ZSTD_c_compressionLevel :
1027 *value = CCtxParams->compressionLevel;
1028 break;
1029 case ZSTD_c_windowLog :
1030 *value = (int)CCtxParams->cParams.windowLog;
1031 break;
1032 case ZSTD_c_hashLog :
1033 *value = (int)CCtxParams->cParams.hashLog;
1034 break;
1035 case ZSTD_c_chainLog :
1036 *value = (int)CCtxParams->cParams.chainLog;
1037 break;
1038 case ZSTD_c_searchLog :
1039 *value = CCtxParams->cParams.searchLog;
1040 break;
1041 case ZSTD_c_minMatch :
1042 *value = CCtxParams->cParams.minMatch;
1043 break;
1044 case ZSTD_c_targetLength :
1045 *value = CCtxParams->cParams.targetLength;
1046 break;
1047 case ZSTD_c_strategy :
1048 *value = (unsigned)CCtxParams->cParams.strategy;
1049 break;
1050 case ZSTD_c_contentSizeFlag :
1051 *value = CCtxParams->fParams.contentSizeFlag;
1052 break;
1053 case ZSTD_c_checksumFlag :
1054 *value = CCtxParams->fParams.checksumFlag;
1055 break;
1056 case ZSTD_c_dictIDFlag :
1057 *value = !CCtxParams->fParams.noDictIDFlag;
1058 break;
1059 case ZSTD_c_forceMaxWindow :
1060 *value = CCtxParams->forceWindow;
1061 break;
1062 case ZSTD_c_forceAttachDict :
1063 *value = CCtxParams->attachDictPref;
1064 break;
1065 case ZSTD_c_literalCompressionMode :
1066 *value = CCtxParams->literalCompressionMode;
1067 break;
1068 case ZSTD_c_nbWorkers :
1069#ifndef ZSTD_MULTITHREAD
1070 assert(CCtxParams->nbWorkers == 0);
1071#endif
1072 *value = CCtxParams->nbWorkers;
1073 break;
1074 case ZSTD_c_jobSize :
1075#ifndef ZSTD_MULTITHREAD
1076 RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
1077#else
1078 assert(CCtxParams->jobSize <= INT_MAX);
1079 *value = (int)CCtxParams->jobSize;
1080 break;
1081#endif
1082 case ZSTD_c_overlapLog :
1083#ifndef ZSTD_MULTITHREAD
1084 RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
1085#else
1086 *value = CCtxParams->overlapLog;
1087 break;
1088#endif
1089 case ZSTD_c_rsyncable :
1090#ifndef ZSTD_MULTITHREAD
1091 RETURN_ERROR(parameter_unsupported, "not compiled with multithreading");
1092#else
1093 *value = CCtxParams->rsyncable;
1094 break;
1095#endif
1096 case ZSTD_c_enableDedicatedDictSearch :
1097 *value = CCtxParams->enableDedicatedDictSearch;
1098 break;
1099 case ZSTD_c_enableLongDistanceMatching :
1100 *value = CCtxParams->ldmParams.enableLdm;
1101 break;
1102 case ZSTD_c_ldmHashLog :
1103 *value = CCtxParams->ldmParams.hashLog;
1104 break;
1105 case ZSTD_c_ldmMinMatch :
1106 *value = CCtxParams->ldmParams.minMatchLength;
1107 break;
1108 case ZSTD_c_ldmBucketSizeLog :
1109 *value = CCtxParams->ldmParams.bucketSizeLog;
1110 break;
1111 case ZSTD_c_ldmHashRateLog :
1112 *value = CCtxParams->ldmParams.hashRateLog;
1113 break;
1114 case ZSTD_c_targetCBlockSize :
1115 *value = (int)CCtxParams->targetCBlockSize;
1116 break;
1117 case ZSTD_c_srcSizeHint :
1118 *value = (int)CCtxParams->srcSizeHint;
1119 break;
1120 case ZSTD_c_stableInBuffer :
1121 *value = (int)CCtxParams->inBufferMode;
1122 break;
1123 case ZSTD_c_stableOutBuffer :
1124 *value = (int)CCtxParams->outBufferMode;
1125 break;
1126 case ZSTD_c_blockDelimiters :
1127 *value = (int)CCtxParams->blockDelimiters;
1128 break;
1129 case ZSTD_c_validateSequences :
1130 *value = (int)CCtxParams->validateSequences;
1131 break;
1132 case ZSTD_c_useBlockSplitter :
1133 *value = (int)CCtxParams->useBlockSplitter;
1134 break;
1135 case ZSTD_c_useRowMatchFinder :
1136 *value = (int)CCtxParams->useRowMatchFinder;
1137 break;
1138 case ZSTD_c_deterministicRefPrefix:
1139 *value = (int)CCtxParams->deterministicRefPrefix;
1140 break;
1141 case ZSTD_c_prefetchCDictTables:
1142 *value = (int)CCtxParams->prefetchCDictTables;
1143 break;
1144 case ZSTD_c_enableSeqProducerFallback:
1145 *value = CCtxParams->enableMatchFinderFallback;
1146 break;
1147 case ZSTD_c_maxBlockSize:
1148 *value = (int)CCtxParams->maxBlockSize;
1149 break;
1150 case ZSTD_c_searchForExternalRepcodes:
1151 *value = (int)CCtxParams->searchForExternalRepcodes;
1152 break;
1153 default: RETURN_ERROR(parameter_unsupported, "unknown parameter");
1154 }
1155 return 0;
1156}
1157
1158/** ZSTD_CCtx_setParametersUsingCCtxParams() :
1159 * just applies `params` into `cctx`
1160 * no action is performed, parameters are merely stored.
1161 * If ZSTDMT is enabled, parameters are pushed to cctx->mtctx.
1162 * This is possible even if a compression is ongoing.
1163 * In which case, new parameters will be applied on the fly, starting with next compression job.
1164 */
1165size_t ZSTD_CCtx_setParametersUsingCCtxParams(
1166 ZSTD_CCtx* cctx, const ZSTD_CCtx_params* params)
1167{
1168 DEBUGLOG(4, "ZSTD_CCtx_setParametersUsingCCtxParams");
1169 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
1170 "The context is in the wrong stage!");
1171 RETURN_ERROR_IF(cctx->cdict, stage_wrong,
1172 "Can't override parameters with cdict attached (some must "
1173 "be inherited from the cdict).");
1174
1175 cctx->requestedParams = *params;
1176 return 0;
1177}
1178
1179size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams)
1180{
1181 ZSTD_STATIC_ASSERT(sizeof(cparams) == 7 * 4 /* all params are listed below */);
1182 DEBUGLOG(4, "ZSTD_CCtx_setCParams");
1183 /* only update if all parameters are valid */
1184 FORWARD_IF_ERROR(ZSTD_checkCParams(cparams), "");
1185 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_windowLog, cparams.windowLog), "");
1186 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_chainLog, cparams.chainLog), "");
1187 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_hashLog, cparams.hashLog), "");
1188 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_searchLog, cparams.searchLog), "");
1189 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_minMatch, cparams.minMatch), "");
1190 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_targetLength, cparams.targetLength), "");
1191 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_strategy, cparams.strategy), "");
1192 return 0;
1193}
1194
1195size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams)
1196{
1197 ZSTD_STATIC_ASSERT(sizeof(fparams) == 3 * 4 /* all params are listed below */);
1198 DEBUGLOG(4, "ZSTD_CCtx_setFParams");
1199 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_contentSizeFlag, fparams.contentSizeFlag != 0), "");
1200 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_checksumFlag, fparams.checksumFlag != 0), "");
1201 FORWARD_IF_ERROR(ZSTD_CCtx_setParameter(cctx, ZSTD_c_dictIDFlag, fparams.noDictIDFlag == 0), "");
1202 return 0;
1203}
1204
1205size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params)
1206{
1207 DEBUGLOG(4, "ZSTD_CCtx_setParams");
1208 /* First check cParams, because we want to update all or none. */
1209 FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
1210 /* Next set fParams, because this could fail if the cctx isn't in init stage. */
1211 FORWARD_IF_ERROR(ZSTD_CCtx_setFParams(cctx, params.fParams), "");
1212 /* Finally set cParams, which should succeed. */
1213 FORWARD_IF_ERROR(ZSTD_CCtx_setCParams(cctx, params.cParams), "");
1214 return 0;
1215}
1216
1217size_t ZSTD_CCtx_setPledgedSrcSize(ZSTD_CCtx* cctx, unsigned long long pledgedSrcSize)
1218{
1219 DEBUGLOG(4, "ZSTD_CCtx_setPledgedSrcSize to %llu bytes", pledgedSrcSize);
1220 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
1221 "Can't set pledgedSrcSize when not in init stage.");
1222 cctx->pledgedSrcSizePlusOne = pledgedSrcSize+1;
1223 return 0;
1224}
1225
1226static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(
1227 int const compressionLevel,
1228 size_t const dictSize);
1229static int ZSTD_dedicatedDictSearch_isSupported(
1230 const ZSTD_compressionParameters* cParams);
1231static void ZSTD_dedicatedDictSearch_revertCParams(
1232 ZSTD_compressionParameters* cParams);
1233
1234/**
1235 * Initializes the local dictionary using requested parameters.
1236 * NOTE: Initialization does not employ the pledged src size,
1237 * because the dictionary may be used for multiple compressions.
1238 */
1239static size_t ZSTD_initLocalDict(ZSTD_CCtx* cctx)
1240{
1241 ZSTD_localDict* const dl = &cctx->localDict;
1242 if (dl->dict == NULL) {
1243 /* No local dictionary. */
1244 assert(dl->dictBuffer == NULL);
1245 assert(dl->cdict == NULL);
1246 assert(dl->dictSize == 0);
1247 return 0;
1248 }
1249 if (dl->cdict != NULL) {
1250 /* Local dictionary already initialized. */
1251 assert(cctx->cdict == dl->cdict);
1252 return 0;
1253 }
1254 assert(dl->dictSize > 0);
1255 assert(cctx->cdict == NULL);
1256 assert(cctx->prefixDict.dict == NULL);
1257
1258 dl->cdict = ZSTD_createCDict_advanced2(
1259 dl->dict,
1260 dl->dictSize,
1261 ZSTD_dlm_byRef,
1262 dl->dictContentType,
1263 &cctx->requestedParams,
1264 cctx->customMem);
1265 RETURN_ERROR_IF(!dl->cdict, memory_allocation, "ZSTD_createCDict_advanced failed");
1266 cctx->cdict = dl->cdict;
1267 return 0;
1268}
1269
1270size_t ZSTD_CCtx_loadDictionary_advanced(
1271 ZSTD_CCtx* cctx,
1272 const void* dict, size_t dictSize,
1273 ZSTD_dictLoadMethod_e dictLoadMethod,
1274 ZSTD_dictContentType_e dictContentType)
1275{
1276 DEBUGLOG(4, "ZSTD_CCtx_loadDictionary_advanced (size: %u)", (U32)dictSize);
1277 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
1278 "Can't load a dictionary when cctx is not in init stage.");
1279 ZSTD_clearAllDicts(cctx); /* erase any previously set dictionary */
1280 if (dict == NULL || dictSize == 0) /* no dictionary */
1281 return 0;
1282 if (dictLoadMethod == ZSTD_dlm_byRef) {
1283 cctx->localDict.dict = dict;
1284 } else {
1285 /* copy dictionary content inside CCtx to own its lifetime */
1286 void* dictBuffer;
1287 RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
1288 "static CCtx can't allocate for an internal copy of dictionary");
1289 dictBuffer = ZSTD_customMalloc(dictSize, cctx->customMem);
1290 RETURN_ERROR_IF(dictBuffer==NULL, memory_allocation,
1291 "allocation failed for dictionary content");
1292 ZSTD_memcpy(dictBuffer, dict, dictSize);
1293 cctx->localDict.dictBuffer = dictBuffer; /* owned ptr to free */
1294 cctx->localDict.dict = dictBuffer; /* read-only reference */
1295 }
1296 cctx->localDict.dictSize = dictSize;
1297 cctx->localDict.dictContentType = dictContentType;
1298 return 0;
1299}
1300
1301size_t ZSTD_CCtx_loadDictionary_byReference(
1302 ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
1303{
1304 return ZSTD_CCtx_loadDictionary_advanced(
1305 cctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
1306}
1307
1308size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
1309{
1310 return ZSTD_CCtx_loadDictionary_advanced(
1311 cctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
1312}
1313
1314
1315size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
1316{
1317 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
1318 "Can't ref a dict when ctx not in init stage.");
1319 /* Free the existing local cdict (if any) to save memory. */
1320 ZSTD_clearAllDicts(cctx);
1321 cctx->cdict = cdict;
1322 return 0;
1323}
1324
1325size_t ZSTD_CCtx_refThreadPool(ZSTD_CCtx* cctx, ZSTD_threadPool* pool)
1326{
1327 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
1328 "Can't ref a pool when ctx not in init stage.");
1329 cctx->pool = pool;
1330 return 0;
1331}
1332
1333size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize)
1334{
1335 return ZSTD_CCtx_refPrefix_advanced(cctx, prefix, prefixSize, ZSTD_dct_rawContent);
1336}
1337
1338size_t ZSTD_CCtx_refPrefix_advanced(
1339 ZSTD_CCtx* cctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
1340{
1341 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
1342 "Can't ref a prefix when ctx not in init stage.");
1343 ZSTD_clearAllDicts(cctx);
1344 if (prefix != NULL && prefixSize > 0) {
1345 cctx->prefixDict.dict = prefix;
1346 cctx->prefixDict.dictSize = prefixSize;
1347 cctx->prefixDict.dictContentType = dictContentType;
1348 }
1349 return 0;
1350}
1351
1352/*! ZSTD_CCtx_reset() :
1353 * Also dumps dictionary */
1354size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
1355{
1356 if ( (reset == ZSTD_reset_session_only)
1357 || (reset == ZSTD_reset_session_and_parameters) ) {
1358 cctx->streamStage = zcss_init;
1359 cctx->pledgedSrcSizePlusOne = 0;
1360 }
1361 if ( (reset == ZSTD_reset_parameters)
1362 || (reset == ZSTD_reset_session_and_parameters) ) {
1363 RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
1364 "Reset parameters is only possible during init stage.");
1365 ZSTD_clearAllDicts(cctx);
1366 ZSTD_memset(&cctx->externalMatchCtx, 0, sizeof(cctx->externalMatchCtx));
1367 return ZSTD_CCtxParams_reset(&cctx->requestedParams);
1368 }
1369 return 0;
1370}
1371
1372
1373/** ZSTD_checkCParams() :
1374 control CParam values remain within authorized range.
1375 @return : 0, or an error code if one value is beyond authorized range */
1376size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
1377{
1378 BOUNDCHECK(ZSTD_c_windowLog, (int)cParams.windowLog);
1379 BOUNDCHECK(ZSTD_c_chainLog, (int)cParams.chainLog);
1380 BOUNDCHECK(ZSTD_c_hashLog, (int)cParams.hashLog);
1381 BOUNDCHECK(ZSTD_c_searchLog, (int)cParams.searchLog);
1382 BOUNDCHECK(ZSTD_c_minMatch, (int)cParams.minMatch);
1383 BOUNDCHECK(ZSTD_c_targetLength,(int)cParams.targetLength);
1384 BOUNDCHECK(ZSTD_c_strategy, cParams.strategy);
1385 return 0;
1386}
1387
1388/** ZSTD_clampCParams() :
1389 * make CParam values within valid range.
1390 * @return : valid CParams */
1391static ZSTD_compressionParameters
1392ZSTD_clampCParams(ZSTD_compressionParameters cParams)
1393{
1394# define CLAMP_TYPE(cParam, val, type) { \
1395 ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
1396 if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
1397 else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
1398 }
1399# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
1400 CLAMP(ZSTD_c_windowLog, cParams.windowLog);
1401 CLAMP(ZSTD_c_chainLog, cParams.chainLog);
1402 CLAMP(ZSTD_c_hashLog, cParams.hashLog);
1403 CLAMP(ZSTD_c_searchLog, cParams.searchLog);
1404 CLAMP(ZSTD_c_minMatch, cParams.minMatch);
1405 CLAMP(ZSTD_c_targetLength,cParams.targetLength);
1406 CLAMP_TYPE(ZSTD_c_strategy,cParams.strategy, ZSTD_strategy);
1407 return cParams;
1408}
1409
1410/** ZSTD_cycleLog() :
1411 * condition for correct operation : hashLog > 1 */
1412U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
1413{
1414 U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
1415 return hashLog - btScale;
1416}
1417
1418/** ZSTD_dictAndWindowLog() :
1419 * Returns an adjusted window log that is large enough to fit the source and the dictionary.
1420 * The zstd format says that the entire dictionary is valid if one byte of the dictionary
1421 * is within the window. So the hashLog and chainLog should be large enough to reference both
1422 * the dictionary and the window. So we must use this adjusted dictAndWindowLog when downsizing
1423 * the hashLog and windowLog.
1424 * NOTE: srcSize must not be ZSTD_CONTENTSIZE_UNKNOWN.
1425 */
1426static U32 ZSTD_dictAndWindowLog(U32 windowLog, U64 srcSize, U64 dictSize)
1427{
1428 const U64 maxWindowSize = 1ULL << ZSTD_WINDOWLOG_MAX;
1429 /* No dictionary ==> No change */
1430 if (dictSize == 0) {
1431 return windowLog;
1432 }
1433 assert(windowLog <= ZSTD_WINDOWLOG_MAX);
1434 assert(srcSize != ZSTD_CONTENTSIZE_UNKNOWN); /* Handled in ZSTD_adjustCParams_internal() */
1435 {
1436 U64 const windowSize = 1ULL << windowLog;
1437 U64 const dictAndWindowSize = dictSize + windowSize;
1438 /* If the window size is already large enough to fit both the source and the dictionary
1439 * then just use the window size. Otherwise adjust so that it fits the dictionary and
1440 * the window.
1441 */
1442 if (windowSize >= dictSize + srcSize) {
1443 return windowLog; /* Window size large enough already */
1444 } else if (dictAndWindowSize >= maxWindowSize) {
1445 return ZSTD_WINDOWLOG_MAX; /* Larger than max window log */
1446 } else {
1447 return ZSTD_highbit32((U32)dictAndWindowSize - 1) + 1;
1448 }
1449 }
1450}
1451
1452/** ZSTD_adjustCParams_internal() :
1453 * optimize `cPar` for a specified input (`srcSize` and `dictSize`).
1454 * mostly downsize to reduce memory consumption and initialization latency.
1455 * `srcSize` can be ZSTD_CONTENTSIZE_UNKNOWN when not known.
1456 * `mode` is the mode for parameter adjustment. See docs for `ZSTD_cParamMode_e`.
1457 * note : `srcSize==0` means 0!
1458 * condition : cPar is presumed validated (can be checked using ZSTD_checkCParams()). */
1459static ZSTD_compressionParameters
1460ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
1461 unsigned long long srcSize,
1462 size_t dictSize,
1463 ZSTD_cParamMode_e mode,
1464 ZSTD_paramSwitch_e useRowMatchFinder)
1465{
1466 const U64 minSrcSize = 513; /* (1<<9) + 1 */
1467 const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
1468 assert(ZSTD_checkCParams(cPar)==0);
1469
1470 switch (mode) {
1471 case ZSTD_cpm_unknown:
1472 case ZSTD_cpm_noAttachDict:
1473 /* If we don't know the source size, don't make any
1474 * assumptions about it. We will already have selected
1475 * smaller parameters if a dictionary is in use.
1476 */
1477 break;
1478 case ZSTD_cpm_createCDict:
1479 /* Assume a small source size when creating a dictionary
1480 * with an unknown source size.
1481 */
1482 if (dictSize && srcSize == ZSTD_CONTENTSIZE_UNKNOWN)
1483 srcSize = minSrcSize;
1484 break;
1485 case ZSTD_cpm_attachDict:
1486 /* Dictionary has its own dedicated parameters which have
1487 * already been selected. We are selecting parameters
1488 * for only the source.
1489 */
1490 dictSize = 0;
1491 break;
1492 default:
1493 assert(0);
1494 break;
1495 }
1496
1497 /* resize windowLog if input is small enough, to use less memory */
1498 if ( (srcSize <= maxWindowResize)
1499 && (dictSize <= maxWindowResize) ) {
1500 U32 const tSize = (U32)(srcSize + dictSize);
1501 static U32 const hashSizeMin = 1 << ZSTD_HASHLOG_MIN;
1502 U32 const srcLog = (tSize < hashSizeMin) ? ZSTD_HASHLOG_MIN :
1503 ZSTD_highbit32(tSize-1) + 1;
1504 if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
1505 }
1506 if (srcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
1507 U32 const dictAndWindowLog = ZSTD_dictAndWindowLog(cPar.windowLog, (U64)srcSize, (U64)dictSize);
1508 U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
1509 if (cPar.hashLog > dictAndWindowLog+1) cPar.hashLog = dictAndWindowLog+1;
1510 if (cycleLog > dictAndWindowLog)
1511 cPar.chainLog -= (cycleLog - dictAndWindowLog);
1512 }
1513
1514 if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN)
1515 cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* minimum wlog required for valid frame header */
1516
1517 /* We can't use more than 32 bits of hash in total, so that means that we require:
1518 * (hashLog + 8) <= 32 && (chainLog + 8) <= 32
1519 */
1520 if (mode == ZSTD_cpm_createCDict && ZSTD_CDictIndicesAreTagged(&cPar)) {
1521 U32 const maxShortCacheHashLog = 32 - ZSTD_SHORT_CACHE_TAG_BITS;
1522 if (cPar.hashLog > maxShortCacheHashLog) {
1523 cPar.hashLog = maxShortCacheHashLog;
1524 }
1525 if (cPar.chainLog > maxShortCacheHashLog) {
1526 cPar.chainLog = maxShortCacheHashLog;
1527 }
1528 }
1529
1530
1531 /* At this point, we aren't 100% sure if we are using the row match finder.
1532 * Unless it is explicitly disabled, conservatively assume that it is enabled.
1533 * In this case it will only be disabled for small sources, so shrinking the
1534 * hash log a little bit shouldn't result in any ratio loss.
1535 */
1536 if (useRowMatchFinder == ZSTD_ps_auto)
1537 useRowMatchFinder = ZSTD_ps_enable;
1538
1539 /* We can't hash more than 32-bits in total. So that means that we require:
1540 * (hashLog - rowLog + 8) <= 32
1541 */
1542 if (ZSTD_rowMatchFinderUsed(cPar.strategy, useRowMatchFinder)) {
1543 /* Switch to 32-entry rows if searchLog is 5 (or more) */
1544 U32 const rowLog = BOUNDED(4, cPar.searchLog, 6);
1545 U32 const maxRowHashLog = 32 - ZSTD_ROW_HASH_TAG_BITS;
1546 U32 const maxHashLog = maxRowHashLog + rowLog;
1547 assert(cPar.hashLog >= rowLog);
1548 if (cPar.hashLog > maxHashLog) {
1549 cPar.hashLog = maxHashLog;
1550 }
1551 }
1552
1553 return cPar;
1554}
1555
1556ZSTD_compressionParameters
1557ZSTD_adjustCParams(ZSTD_compressionParameters cPar,
1558 unsigned long long srcSize,
1559 size_t dictSize)
1560{
1561 cPar = ZSTD_clampCParams(cPar); /* resulting cPar is necessarily valid (all parameters within range) */
1562 if (srcSize == 0) srcSize = ZSTD_CONTENTSIZE_UNKNOWN;
1563 return ZSTD_adjustCParams_internal(cPar, srcSize, dictSize, ZSTD_cpm_unknown, ZSTD_ps_auto);
1564}
1565
1566static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
1567static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
1568
1569static void ZSTD_overrideCParams(
1570 ZSTD_compressionParameters* cParams,
1571 const ZSTD_compressionParameters* overrides)
1572{
1573 if (overrides->windowLog) cParams->windowLog = overrides->windowLog;
1574 if (overrides->hashLog) cParams->hashLog = overrides->hashLog;
1575 if (overrides->chainLog) cParams->chainLog = overrides->chainLog;
1576 if (overrides->searchLog) cParams->searchLog = overrides->searchLog;
1577 if (overrides->minMatch) cParams->minMatch = overrides->minMatch;
1578 if (overrides->targetLength) cParams->targetLength = overrides->targetLength;
1579 if (overrides->strategy) cParams->strategy = overrides->strategy;
1580}
1581
1582ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
1583 const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
1584{
1585 ZSTD_compressionParameters cParams;
1586 if (srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN && CCtxParams->srcSizeHint > 0) {
1587 srcSizeHint = CCtxParams->srcSizeHint;
1588 }
1589 cParams = ZSTD_getCParams_internal(CCtxParams->compressionLevel, srcSizeHint, dictSize, mode);
1590 if (CCtxParams->ldmParams.enableLdm == ZSTD_ps_enable) cParams.windowLog = ZSTD_LDM_DEFAULT_WINDOW_LOG;
1591 ZSTD_overrideCParams(&cParams, &CCtxParams->cParams);
1592 assert(!ZSTD_checkCParams(cParams));
1593 /* srcSizeHint == 0 means 0 */
1594 return ZSTD_adjustCParams_internal(cParams, srcSizeHint, dictSize, mode, CCtxParams->useRowMatchFinder);
1595}
1596
1597static size_t
1598ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
1599 const ZSTD_paramSwitch_e useRowMatchFinder,
1600 const U32 enableDedicatedDictSearch,
1601 const U32 forCCtx)
1602{
1603 /* chain table size should be 0 for fast or row-hash strategies */
1604 size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder, enableDedicatedDictSearch && !forCCtx)
1605 ? ((size_t)1 << cParams->chainLog)
1606 : 0;
1607 size_t const hSize = ((size_t)1) << cParams->hashLog;
1608 U32 const hashLog3 = (forCCtx && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
1609 size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
1610 /* We don't use ZSTD_cwksp_alloc_size() here because the tables aren't
1611 * surrounded by redzones in ASAN. */
1612 size_t const tableSpace = chainSize * sizeof(U32)
1613 + hSize * sizeof(U32)
1614 + h3Size * sizeof(U32);
1615 size_t const optPotentialSpace =
1616 ZSTD_cwksp_aligned_alloc_size((MaxML+1) * sizeof(U32))
1617 + ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
1618 + ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
1619 + ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
1620 + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t))
1621 + ZSTD_cwksp_aligned_alloc_size((ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
1622 size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
1623 ? ZSTD_cwksp_aligned_alloc_size(hSize)
1624 : 0;
1625 size_t const optSpace = (forCCtx && (cParams->strategy >= ZSTD_btopt))
1626 ? optPotentialSpace
1627 : 0;
1628 size_t const slackSpace = ZSTD_cwksp_slack_space_required();
1629
1630 /* tables are guaranteed to be sized in multiples of 64 bytes (or 16 uint32_t) */
1631 ZSTD_STATIC_ASSERT(ZSTD_HASHLOG_MIN >= 4 && ZSTD_WINDOWLOG_MIN >= 4 && ZSTD_CHAINLOG_MIN >= 4);
1632 assert(useRowMatchFinder != ZSTD_ps_auto);
1633
1634 DEBUGLOG(4, "chainSize: %u - hSize: %u - h3Size: %u",
1635 (U32)chainSize, (U32)hSize, (U32)h3Size);
1636 return tableSpace + optSpace + slackSpace + lazyAdditionalSpace;
1637}
1638
1639/* Helper function for calculating memory requirements.
1640 * Gives a tighter bound than ZSTD_sequenceBound() by taking minMatch into account. */
1641static size_t ZSTD_maxNbSeq(size_t blockSize, unsigned minMatch, int useSequenceProducer) {
1642 U32 const divider = (minMatch==3 || useSequenceProducer) ? 3 : 4;
1643 return blockSize / divider;
1644}
1645
1646static size_t ZSTD_estimateCCtxSize_usingCCtxParams_internal(
1647 const ZSTD_compressionParameters* cParams,
1648 const ldmParams_t* ldmParams,
1649 const int isStatic,
1650 const ZSTD_paramSwitch_e useRowMatchFinder,
1651 const size_t buffInSize,
1652 const size_t buffOutSize,
1653 const U64 pledgedSrcSize,
1654 int useSequenceProducer,
1655 size_t maxBlockSize)
1656{
1657 size_t const windowSize = (size_t) BOUNDED(1ULL, 1ULL << cParams->windowLog, pledgedSrcSize);
1658 size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(maxBlockSize), windowSize);
1659 size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, cParams->minMatch, useSequenceProducer);
1660 size_t const tokenSpace = ZSTD_cwksp_alloc_size(WILDCOPY_OVERLENGTH + blockSize)
1661 + ZSTD_cwksp_aligned_alloc_size(maxNbSeq * sizeof(seqDef))
1662 + 3 * ZSTD_cwksp_alloc_size(maxNbSeq * sizeof(BYTE));
1663 size_t const entropySpace = ZSTD_cwksp_alloc_size(ENTROPY_WORKSPACE_SIZE);
1664 size_t const blockStateSpace = 2 * ZSTD_cwksp_alloc_size(sizeof(ZSTD_compressedBlockState_t));
1665 size_t const matchStateSize = ZSTD_sizeof_matchState(cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 0, /* forCCtx */ 1);
1666
1667 size_t const ldmSpace = ZSTD_ldm_getTableSize(*ldmParams);
1668 size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(*ldmParams, blockSize);
1669 size_t const ldmSeqSpace = ldmParams->enableLdm == ZSTD_ps_enable ?
1670 ZSTD_cwksp_aligned_alloc_size(maxNbLdmSeq * sizeof(rawSeq)) : 0;
1671
1672
1673 size_t const bufferSpace = ZSTD_cwksp_alloc_size(buffInSize)
1674 + ZSTD_cwksp_alloc_size(buffOutSize);
1675
1676 size_t const cctxSpace = isStatic ? ZSTD_cwksp_alloc_size(sizeof(ZSTD_CCtx)) : 0;
1677
1678 size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
1679 size_t const externalSeqSpace = useSequenceProducer
1680 ? ZSTD_cwksp_aligned_alloc_size(maxNbExternalSeq * sizeof(ZSTD_Sequence))
1681 : 0;
1682
1683 size_t const neededSpace =
1684 cctxSpace +
1685 entropySpace +
1686 blockStateSpace +
1687 ldmSpace +
1688 ldmSeqSpace +
1689 matchStateSize +
1690 tokenSpace +
1691 bufferSpace +
1692 externalSeqSpace;
1693
1694 DEBUGLOG(5, "estimate workspace : %u", (U32)neededSpace);
1695 return neededSpace;
1696}
1697
1698size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
1699{
1700 ZSTD_compressionParameters const cParams =
1701 ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
1702 ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder,
1703 &cParams);
1704
1705 RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
1706 /* estimateCCtxSize is for one-shot compression. So no buffers should
1707 * be needed. However, we still allocate two 0-sized buffers, which can
1708 * take space under ASAN. */
1709 return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
1710 &cParams, &params->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, params->useSequenceProducer, params->maxBlockSize);
1711}
1712
1713size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
1714{
1715 ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
1716 if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
1717 /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
1718 size_t noRowCCtxSize;
1719 size_t rowCCtxSize;
1720 initialParams.useRowMatchFinder = ZSTD_ps_disable;
1721 noRowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
1722 initialParams.useRowMatchFinder = ZSTD_ps_enable;
1723 rowCCtxSize = ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
1724 return MAX(noRowCCtxSize, rowCCtxSize);
1725 } else {
1726 return ZSTD_estimateCCtxSize_usingCCtxParams(&initialParams);
1727 }
1728}
1729
1730static size_t ZSTD_estimateCCtxSize_internal(int compressionLevel)
1731{
1732 int tier = 0;
1733 size_t largestSize = 0;
1734 static const unsigned long long srcSizeTiers[4] = {16 KB, 128 KB, 256 KB, ZSTD_CONTENTSIZE_UNKNOWN};
1735 for (; tier < 4; ++tier) {
1736 /* Choose the set of cParams for a given level across all srcSizes that give the largest cctxSize */
1737 ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeTiers[tier], 0, ZSTD_cpm_noAttachDict);
1738 largestSize = MAX(ZSTD_estimateCCtxSize_usingCParams(cParams), largestSize);
1739 }
1740 return largestSize;
1741}
1742
1743size_t ZSTD_estimateCCtxSize(int compressionLevel)
1744{
1745 int level;
1746 size_t memBudget = 0;
1747 for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
1748 /* Ensure monotonically increasing memory usage as compression level increases */
1749 size_t const newMB = ZSTD_estimateCCtxSize_internal(level);
1750 if (newMB > memBudget) memBudget = newMB;
1751 }
1752 return memBudget;
1753}
1754
1755size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
1756{
1757 RETURN_ERROR_IF(params->nbWorkers > 0, GENERIC, "Estimate CCtx size is supported for single-threaded compression only.");
1758 { ZSTD_compressionParameters const cParams =
1759 ZSTD_getCParamsFromCCtxParams(params, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
1760 size_t const blockSize = MIN(ZSTD_resolveMaxBlockSize(params->maxBlockSize), (size_t)1 << cParams.windowLog);
1761 size_t const inBuffSize = (params->inBufferMode == ZSTD_bm_buffered)
1762 ? ((size_t)1 << cParams.windowLog) + blockSize
1763 : 0;
1764 size_t const outBuffSize = (params->outBufferMode == ZSTD_bm_buffered)
1765 ? ZSTD_compressBound(blockSize) + 1
1766 : 0;
1767 ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params->useRowMatchFinder, &params->cParams);
1768
1769 return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
1770 &cParams, &params->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
1771 ZSTD_CONTENTSIZE_UNKNOWN, params->useSequenceProducer, params->maxBlockSize);
1772 }
1773}
1774
1775size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams)
1776{
1777 ZSTD_CCtx_params initialParams = ZSTD_makeCCtxParamsFromCParams(cParams);
1778 if (ZSTD_rowMatchFinderSupported(cParams.strategy)) {
1779 /* Pick bigger of not using and using row-based matchfinder for greedy and lazy strategies */
1780 size_t noRowCCtxSize;
1781 size_t rowCCtxSize;
1782 initialParams.useRowMatchFinder = ZSTD_ps_disable;
1783 noRowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
1784 initialParams.useRowMatchFinder = ZSTD_ps_enable;
1785 rowCCtxSize = ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
1786 return MAX(noRowCCtxSize, rowCCtxSize);
1787 } else {
1788 return ZSTD_estimateCStreamSize_usingCCtxParams(&initialParams);
1789 }
1790}
1791
1792static size_t ZSTD_estimateCStreamSize_internal(int compressionLevel)
1793{
1794 ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
1795 return ZSTD_estimateCStreamSize_usingCParams(cParams);
1796}
1797
1798size_t ZSTD_estimateCStreamSize(int compressionLevel)
1799{
1800 int level;
1801 size_t memBudget = 0;
1802 for (level=MIN(compressionLevel, 1); level<=compressionLevel; level++) {
1803 size_t const newMB = ZSTD_estimateCStreamSize_internal(level);
1804 if (newMB > memBudget) memBudget = newMB;
1805 }
1806 return memBudget;
1807}
1808
1809/* ZSTD_getFrameProgression():
1810 * tells how much data has been consumed (input) and produced (output) for current frame.
1811 * able to count progression inside worker threads (non-blocking mode).
1812 */
1813ZSTD_frameProgression ZSTD_getFrameProgression(const ZSTD_CCtx* cctx)
1814{
1815#ifdef ZSTD_MULTITHREAD
1816 if (cctx->appliedParams.nbWorkers > 0) {
1817 return ZSTDMT_getFrameProgression(cctx->mtctx);
1818 }
1819#endif
1820 { ZSTD_frameProgression fp;
1821 size_t const buffered = (cctx->inBuff == NULL) ? 0 :
1822 cctx->inBuffPos - cctx->inToCompress;
1823 if (buffered) assert(cctx->inBuffPos >= cctx->inToCompress);
1824 assert(buffered <= ZSTD_BLOCKSIZE_MAX);
1825 fp.ingested = cctx->consumedSrcSize + buffered;
1826 fp.consumed = cctx->consumedSrcSize;
1827 fp.produced = cctx->producedCSize;
1828 fp.flushed = cctx->producedCSize; /* simplified; some data might still be left within streaming output buffer */
1829 fp.currentJobID = 0;
1830 fp.nbActiveWorkers = 0;
1831 return fp;
1832} }
1833
1834/*! ZSTD_toFlushNow()
1835 * Only useful for multithreading scenarios currently (nbWorkers >= 1).
1836 */
1837size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx)
1838{
1839#ifdef ZSTD_MULTITHREAD
1840 if (cctx->appliedParams.nbWorkers > 0) {
1841 return ZSTDMT_toFlushNow(cctx->mtctx);
1842 }
1843#endif
1844 (void)cctx;
1845 return 0; /* over-simplification; could also check if context is currently running in streaming mode, and in which case, report how many bytes are left to be flushed within output buffer */
1846}
1847
1848static void ZSTD_assertEqualCParams(ZSTD_compressionParameters cParams1,
1849 ZSTD_compressionParameters cParams2)
1850{
1851 (void)cParams1;
1852 (void)cParams2;
1853 assert(cParams1.windowLog == cParams2.windowLog);
1854 assert(cParams1.chainLog == cParams2.chainLog);
1855 assert(cParams1.hashLog == cParams2.hashLog);
1856 assert(cParams1.searchLog == cParams2.searchLog);
1857 assert(cParams1.minMatch == cParams2.minMatch);
1858 assert(cParams1.targetLength == cParams2.targetLength);
1859 assert(cParams1.strategy == cParams2.strategy);
1860}
1861
1862void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs)
1863{
1864 int i;
1865 for (i = 0; i < ZSTD_REP_NUM; ++i)
1866 bs->rep[i] = repStartValue[i];
1867 bs->entropy.huf.repeatMode = HUF_repeat_none;
1868 bs->entropy.fse.offcode_repeatMode = FSE_repeat_none;
1869 bs->entropy.fse.matchlength_repeatMode = FSE_repeat_none;
1870 bs->entropy.fse.litlength_repeatMode = FSE_repeat_none;
1871}
1872
1873/*! ZSTD_invalidateMatchState()
1874 * Invalidate all the matches in the match finder tables.
1875 * Requires nextSrc and base to be set (can be NULL).
1876 */
1877static void ZSTD_invalidateMatchState(ZSTD_matchState_t* ms)
1878{
1879 ZSTD_window_clear(&ms->window);
1880
1881 ms->nextToUpdate = ms->window.dictLimit;
1882 ms->loadedDictEnd = 0;
1883 ms->opt.litLengthSum = 0; /* force reset of btopt stats */
1884 ms->dictMatchState = NULL;
1885}
1886
1887/**
1888 * Controls, for this matchState reset, whether the tables need to be cleared /
1889 * prepared for the coming compression (ZSTDcrp_makeClean), or whether the
1890 * tables can be left unclean (ZSTDcrp_leaveDirty), because we know that a
1891 * subsequent operation will overwrite the table space anyways (e.g., copying
1892 * the matchState contents in from a CDict).
1893 */
1894typedef enum {
1895 ZSTDcrp_makeClean,
1896 ZSTDcrp_leaveDirty
1897} ZSTD_compResetPolicy_e;
1898
1899/**
1900 * Controls, for this matchState reset, whether indexing can continue where it
1901 * left off (ZSTDirp_continue), or whether it needs to be restarted from zero
1902 * (ZSTDirp_reset).
1903 */
1904typedef enum {
1905 ZSTDirp_continue,
1906 ZSTDirp_reset
1907} ZSTD_indexResetPolicy_e;
1908
1909typedef enum {
1910 ZSTD_resetTarget_CDict,
1911 ZSTD_resetTarget_CCtx
1912} ZSTD_resetTarget_e;
1913
1914/* Mixes bits in a 64 bits in a value, based on XXH3_rrmxmx */
1915static U64 ZSTD_bitmix(U64 val, U64 len) {
1916 val ^= ZSTD_rotateRight_U64(val, 49) ^ ZSTD_rotateRight_U64(val, 24);
1917 val *= 0x9FB21C651E98DF25ULL;
1918 val ^= (val >> 35) + len ;
1919 val *= 0x9FB21C651E98DF25ULL;
1920 return val ^ (val >> 28);
1921}
1922
1923/* Mixes in the hashSalt and hashSaltEntropy to create a new hashSalt */
1924static void ZSTD_advanceHashSalt(ZSTD_matchState_t* ms) {
1925 ms->hashSalt = ZSTD_bitmix(ms->hashSalt, 8) ^ ZSTD_bitmix((U64) ms->hashSaltEntropy, 4);
1926}
1927
1928static size_t
1929ZSTD_reset_matchState(ZSTD_matchState_t* ms,
1930 ZSTD_cwksp* ws,
1931 const ZSTD_compressionParameters* cParams,
1932 const ZSTD_paramSwitch_e useRowMatchFinder,
1933 const ZSTD_compResetPolicy_e crp,
1934 const ZSTD_indexResetPolicy_e forceResetIndex,
1935 const ZSTD_resetTarget_e forWho)
1936{
1937 /* disable chain table allocation for fast or row-based strategies */
1938 size_t const chainSize = ZSTD_allocateChainTable(cParams->strategy, useRowMatchFinder,
1939 ms->dedicatedDictSearch && (forWho == ZSTD_resetTarget_CDict))
1940 ? ((size_t)1 << cParams->chainLog)
1941 : 0;
1942 size_t const hSize = ((size_t)1) << cParams->hashLog;
1943 U32 const hashLog3 = ((forWho == ZSTD_resetTarget_CCtx) && cParams->minMatch==3) ? MIN(ZSTD_HASHLOG3_MAX, cParams->windowLog) : 0;
1944 size_t const h3Size = hashLog3 ? ((size_t)1) << hashLog3 : 0;
1945
1946 DEBUGLOG(4, "reset indices : %u", forceResetIndex == ZSTDirp_reset);
1947 assert(useRowMatchFinder != ZSTD_ps_auto);
1948 if (forceResetIndex == ZSTDirp_reset) {
1949 ZSTD_window_init(&ms->window);
1950 ZSTD_cwksp_mark_tables_dirty(ws);
1951 }
1952
1953 ms->hashLog3 = hashLog3;
1954 ms->lazySkipping = 0;
1955
1956 ZSTD_invalidateMatchState(ms);
1957
1958 assert(!ZSTD_cwksp_reserve_failed(ws)); /* check that allocation hasn't already failed */
1959
1960 ZSTD_cwksp_clear_tables(ws);
1961
1962 DEBUGLOG(5, "reserving table space");
1963 /* table Space */
1964 ms->hashTable = (U32*)ZSTD_cwksp_reserve_table(ws, hSize * sizeof(U32));
1965 ms->chainTable = (U32*)ZSTD_cwksp_reserve_table(ws, chainSize * sizeof(U32));
1966 ms->hashTable3 = (U32*)ZSTD_cwksp_reserve_table(ws, h3Size * sizeof(U32));
1967 RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
1968 "failed a workspace allocation in ZSTD_reset_matchState");
1969
1970 DEBUGLOG(4, "reset table : %u", crp!=ZSTDcrp_leaveDirty);
1971 if (crp!=ZSTDcrp_leaveDirty) {
1972 /* reset tables only */
1973 ZSTD_cwksp_clean_tables(ws);
1974 }
1975
1976 if (ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)) {
1977 /* Row match finder needs an additional table of hashes ("tags") */
1978 size_t const tagTableSize = hSize;
1979 /* We want to generate a new salt in case we reset a Cctx, but we always want to use
1980 * 0 when we reset a Cdict */
1981 if(forWho == ZSTD_resetTarget_CCtx) {
1982 ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned_init_once(ws, tagTableSize);
1983 ZSTD_advanceHashSalt(ms);
1984 } else {
1985 /* When we are not salting we want to always memset the memory */
1986 ms->tagTable = (BYTE*) ZSTD_cwksp_reserve_aligned(ws, tagTableSize);
1987 ZSTD_memset(ms->tagTable, 0, tagTableSize);
1988 ms->hashSalt = 0;
1989 }
1990 { /* Switch to 32-entry rows if searchLog is 5 (or more) */
1991 U32 const rowLog = BOUNDED(4, cParams->searchLog, 6);
1992 assert(cParams->hashLog >= rowLog);
1993 ms->rowHashLog = cParams->hashLog - rowLog;
1994 }
1995 }
1996
1997 /* opt parser space */
1998 if ((forWho == ZSTD_resetTarget_CCtx) && (cParams->strategy >= ZSTD_btopt)) {
1999 DEBUGLOG(4, "reserving optimal parser space");
2000 ms->opt.litFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (1<<Litbits) * sizeof(unsigned));
2001 ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
2002 ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
2003 ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
2004 ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_match_t));
2005 ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, (ZSTD_OPT_NUM+1) * sizeof(ZSTD_optimal_t));
2006 }
2007
2008 ms->cParams = *cParams;
2009
2010 RETURN_ERROR_IF(ZSTD_cwksp_reserve_failed(ws), memory_allocation,
2011 "failed a workspace allocation in ZSTD_reset_matchState");
2012 return 0;
2013}
2014
2015/* ZSTD_indexTooCloseToMax() :
2016 * minor optimization : prefer memset() rather than reduceIndex()
2017 * which is measurably slow in some circumstances (reported for Visual Studio).
2018 * Works when re-using a context for a lot of smallish inputs :
2019 * if all inputs are smaller than ZSTD_INDEXOVERFLOW_MARGIN,
2020 * memset() will be triggered before reduceIndex().
2021 */
2022#define ZSTD_INDEXOVERFLOW_MARGIN (16 MB)
2023static int ZSTD_indexTooCloseToMax(ZSTD_window_t w)
2024{
2025 return (size_t)(w.nextSrc - w.base) > (ZSTD_CURRENT_MAX - ZSTD_INDEXOVERFLOW_MARGIN);
2026}
2027
2028/** ZSTD_dictTooBig():
2029 * When dictionaries are larger than ZSTD_CHUNKSIZE_MAX they can't be loaded in
2030 * one go generically. So we ensure that in that case we reset the tables to zero,
2031 * so that we can load as much of the dictionary as possible.
2032 */
2033static int ZSTD_dictTooBig(size_t const loadedDictSize)
2034{
2035 return loadedDictSize > ZSTD_CHUNKSIZE_MAX;
2036}
2037
2038/*! ZSTD_resetCCtx_internal() :
2039 * @param loadedDictSize The size of the dictionary to be loaded
2040 * into the context, if any. If no dictionary is used, or the
2041 * dictionary is being attached / copied, then pass 0.
2042 * note : `params` are assumed fully validated at this stage.
2043 */
2044static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
2045 ZSTD_CCtx_params const* params,
2046 U64 const pledgedSrcSize,
2047 size_t const loadedDictSize,
2048 ZSTD_compResetPolicy_e const crp,
2049 ZSTD_buffered_policy_e const zbuff)
2050{
2051 ZSTD_cwksp* const ws = &zc->workspace;
2052 DEBUGLOG(4, "ZSTD_resetCCtx_internal: pledgedSrcSize=%u, wlog=%u, useRowMatchFinder=%d useBlockSplitter=%d",
2053 (U32)pledgedSrcSize, params->cParams.windowLog, (int)params->useRowMatchFinder, (int)params->useBlockSplitter);
2054 assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
2055
2056 zc->isFirstBlock = 1;
2057
2058 /* Set applied params early so we can modify them for LDM,
2059 * and point params at the applied params.
2060 */
2061 zc->appliedParams = *params;
2062 params = &zc->appliedParams;
2063
2064 assert(params->useRowMatchFinder != ZSTD_ps_auto);
2065 assert(params->useBlockSplitter != ZSTD_ps_auto);
2066 assert(params->ldmParams.enableLdm != ZSTD_ps_auto);
2067 assert(params->maxBlockSize != 0);
2068 if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
2069 /* Adjust long distance matching parameters */
2070 ZSTD_ldm_adjustParameters(&zc->appliedParams.ldmParams, &params->cParams);
2071 assert(params->ldmParams.hashLog >= params->ldmParams.bucketSizeLog);
2072 assert(params->ldmParams.hashRateLog < 32);
2073 }
2074
2075 { size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
2076 size_t const blockSize = MIN(params->maxBlockSize, windowSize);
2077 size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, params->useSequenceProducer);
2078 size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
2079 ? ZSTD_compressBound(blockSize) + 1
2080 : 0;
2081 size_t const buffInSize = (zbuff == ZSTDb_buffered && params->inBufferMode == ZSTD_bm_buffered)
2082 ? windowSize + blockSize
2083 : 0;
2084 size_t const maxNbLdmSeq = ZSTD_ldm_getMaxNbSeq(params->ldmParams, blockSize);
2085
2086 int const indexTooClose = ZSTD_indexTooCloseToMax(zc->blockState.matchState.window);
2087 int const dictTooBig = ZSTD_dictTooBig(loadedDictSize);
2088 ZSTD_indexResetPolicy_e needsIndexReset =
2089 (indexTooClose || dictTooBig || !zc->initialized) ? ZSTDirp_reset : ZSTDirp_continue;
2090
2091 size_t const neededSpace =
2092 ZSTD_estimateCCtxSize_usingCCtxParams_internal(
2093 &params->cParams, &params->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
2094 buffInSize, buffOutSize, pledgedSrcSize, params->useSequenceProducer, params->maxBlockSize);
2095 int resizeWorkspace;
2096
2097 FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
2098
2099 if (!zc->staticSize) ZSTD_cwksp_bump_oversized_duration(ws, 0);
2100
2101 { /* Check if workspace is large enough, alloc a new one if needed */
2102 int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
2103 int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
2104 resizeWorkspace = workspaceTooSmall || workspaceWasteful;
2105 DEBUGLOG(4, "Need %zu B workspace", neededSpace);
2106 DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
2107
2108 if (resizeWorkspace) {
2109 DEBUGLOG(4, "Resize workspaceSize from %zuKB to %zuKB",
2110 ZSTD_cwksp_sizeof(ws) >> 10,
2111 neededSpace >> 10);
2112
2113 RETURN_ERROR_IF(zc->staticSize, memory_allocation, "static cctx : no resize");
2114
2115 needsIndexReset = ZSTDirp_reset;
2116
2117 ZSTD_cwksp_free(ws, zc->customMem);
2118 FORWARD_IF_ERROR(ZSTD_cwksp_create(ws, neededSpace, zc->customMem), "");
2119
2120 DEBUGLOG(5, "reserving object space");
2121 /* Statically sized space.
2122 * entropyWorkspace never moves,
2123 * though prev/next block swap places */
2124 assert(ZSTD_cwksp_check_available(ws, 2 * sizeof(ZSTD_compressedBlockState_t)));
2125 zc->blockState.prevCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
2126 RETURN_ERROR_IF(zc->blockState.prevCBlock == NULL, memory_allocation, "couldn't allocate prevCBlock");
2127 zc->blockState.nextCBlock = (ZSTD_compressedBlockState_t*) ZSTD_cwksp_reserve_object(ws, sizeof(ZSTD_compressedBlockState_t));
2128 RETURN_ERROR_IF(zc->blockState.nextCBlock == NULL, memory_allocation, "couldn't allocate nextCBlock");
2129 zc->entropyWorkspace = (U32*) ZSTD_cwksp_reserve_object(ws, ENTROPY_WORKSPACE_SIZE);
2130 RETURN_ERROR_IF(zc->entropyWorkspace == NULL, memory_allocation, "couldn't allocate entropyWorkspace");
2131 } }
2132
2133 ZSTD_cwksp_clear(ws);
2134
2135 /* init params */
2136 zc->blockState.matchState.cParams = params->cParams;
2137 zc->blockState.matchState.prefetchCDictTables = params->prefetchCDictTables == ZSTD_ps_enable;
2138 zc->pledgedSrcSizePlusOne = pledgedSrcSize+1;
2139 zc->consumedSrcSize = 0;
2140 zc->producedCSize = 0;
2141 if (pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN)
2142 zc->appliedParams.fParams.contentSizeFlag = 0;
2143 DEBUGLOG(4, "pledged content size : %u ; flag : %u",
2144 (unsigned)pledgedSrcSize, zc->appliedParams.fParams.contentSizeFlag);
2145 zc->blockSize = blockSize;
2146
2147 XXH64_reset(&zc->xxhState, 0);
2148 zc->stage = ZSTDcs_init;
2149 zc->dictID = 0;
2150 zc->dictContentSize = 0;
2151
2152 ZSTD_reset_compressedBlockState(zc->blockState.prevCBlock);
2153
2154 FORWARD_IF_ERROR(ZSTD_reset_matchState(
2155 &zc->blockState.matchState,
2156 ws,
2157 &params->cParams,
2158 params->useRowMatchFinder,
2159 crp,
2160 needsIndexReset,
2161 ZSTD_resetTarget_CCtx), "");
2162
2163 zc->seqStore.sequencesStart = (seqDef*)ZSTD_cwksp_reserve_aligned(ws, maxNbSeq * sizeof(seqDef));
2164
2165 /* ldm hash table */
2166 if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
2167 /* TODO: avoid memset? */
2168 size_t const ldmHSize = ((size_t)1) << params->ldmParams.hashLog;
2169 zc->ldmState.hashTable = (ldmEntry_t*)ZSTD_cwksp_reserve_aligned(ws, ldmHSize * sizeof(ldmEntry_t));
2170 ZSTD_memset(zc->ldmState.hashTable, 0, ldmHSize * sizeof(ldmEntry_t));
2171 zc->ldmSequences = (rawSeq*)ZSTD_cwksp_reserve_aligned(ws, maxNbLdmSeq * sizeof(rawSeq));
2172 zc->maxNbLdmSequences = maxNbLdmSeq;
2173
2174 ZSTD_window_init(&zc->ldmState.window);
2175 zc->ldmState.loadedDictEnd = 0;
2176 }
2177
2178 /* reserve space for block-level external sequences */
2179 if (params->useSequenceProducer) {
2180 size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
2181 zc->externalMatchCtx.seqBufferCapacity = maxNbExternalSeq;
2182 zc->externalMatchCtx.seqBuffer =
2183 (ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence));
2184 }
2185
2186 /* buffers */
2187
2188 /* ZSTD_wildcopy() is used to copy into the literals buffer,
2189 * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
2190 */
2191 zc->seqStore.litStart = ZSTD_cwksp_reserve_buffer(ws, blockSize + WILDCOPY_OVERLENGTH);
2192 zc->seqStore.maxNbLit = blockSize;
2193
2194 zc->bufferedPolicy = zbuff;
2195 zc->inBuffSize = buffInSize;
2196 zc->inBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffInSize);
2197 zc->outBuffSize = buffOutSize;
2198 zc->outBuff = (char*)ZSTD_cwksp_reserve_buffer(ws, buffOutSize);
2199
2200 /* ldm bucketOffsets table */
2201 if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
2202 /* TODO: avoid memset? */
2203 size_t const numBuckets =
2204 ((size_t)1) << (params->ldmParams.hashLog -
2205 params->ldmParams.bucketSizeLog);
2206 zc->ldmState.bucketOffsets = ZSTD_cwksp_reserve_buffer(ws, numBuckets);
2207 ZSTD_memset(zc->ldmState.bucketOffsets, 0, numBuckets);
2208 }
2209
2210 /* sequences storage */
2211 ZSTD_referenceExternalSequences(zc, NULL, 0);
2212 zc->seqStore.maxNbSeq = maxNbSeq;
2213 zc->seqStore.llCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
2214 zc->seqStore.mlCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
2215 zc->seqStore.ofCode = ZSTD_cwksp_reserve_buffer(ws, maxNbSeq * sizeof(BYTE));
2216
2217 DEBUGLOG(3, "wksp: finished allocating, %zd bytes remain available", ZSTD_cwksp_available_space(ws));
2218 assert(ZSTD_cwksp_estimated_space_within_bounds(ws, neededSpace));
2219
2220 zc->initialized = 1;
2221
2222 return 0;
2223 }
2224}
2225
2226/* ZSTD_invalidateRepCodes() :
2227 * ensures next compression will not use repcodes from previous block.
2228 * Note : only works with regular variant;
2229 * do not use with extDict variant ! */
2230void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
2231 int i;
2232 for (i=0; i<ZSTD_REP_NUM; i++) cctx->blockState.prevCBlock->rep[i] = 0;
2233 assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
2234}
2235
2236/* These are the approximate sizes for each strategy past which copying the
2237 * dictionary tables into the working context is faster than using them
2238 * in-place.
2239 */
2240static const size_t attachDictSizeCutoffs[ZSTD_STRATEGY_MAX+1] = {
2241 8 KB, /* unused */
2242 8 KB, /* ZSTD_fast */
2243 16 KB, /* ZSTD_dfast */
2244 32 KB, /* ZSTD_greedy */
2245 32 KB, /* ZSTD_lazy */
2246 32 KB, /* ZSTD_lazy2 */
2247 32 KB, /* ZSTD_btlazy2 */
2248 32 KB, /* ZSTD_btopt */
2249 8 KB, /* ZSTD_btultra */
2250 8 KB /* ZSTD_btultra2 */
2251};
2252
2253static int ZSTD_shouldAttachDict(const ZSTD_CDict* cdict,
2254 const ZSTD_CCtx_params* params,
2255 U64 pledgedSrcSize)
2256{
2257 size_t cutoff = attachDictSizeCutoffs[cdict->matchState.cParams.strategy];
2258 int const dedicatedDictSearch = cdict->matchState.dedicatedDictSearch;
2259 return dedicatedDictSearch
2260 || ( ( pledgedSrcSize <= cutoff
2261 || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
2262 || params->attachDictPref == ZSTD_dictForceAttach )
2263 && params->attachDictPref != ZSTD_dictForceCopy
2264 && !params->forceWindow ); /* dictMatchState isn't correctly
2265 * handled in _enforceMaxDist */
2266}
2267
2268static size_t
2269ZSTD_resetCCtx_byAttachingCDict(ZSTD_CCtx* cctx,
2270 const ZSTD_CDict* cdict,
2271 ZSTD_CCtx_params params,
2272 U64 pledgedSrcSize,
2273 ZSTD_buffered_policy_e zbuff)
2274{
2275 DEBUGLOG(4, "ZSTD_resetCCtx_byAttachingCDict() pledgedSrcSize=%llu",
2276 (unsigned long long)pledgedSrcSize);
2277 {
2278 ZSTD_compressionParameters adjusted_cdict_cParams = cdict->matchState.cParams;
2279 unsigned const windowLog = params.cParams.windowLog;
2280 assert(windowLog != 0);
2281 /* Resize working context table params for input only, since the dict
2282 * has its own tables. */
2283 /* pledgedSrcSize == 0 means 0! */
2284
2285 if (cdict->matchState.dedicatedDictSearch) {
2286 ZSTD_dedicatedDictSearch_revertCParams(&adjusted_cdict_cParams);
2287 }
2288
2289 params.cParams = ZSTD_adjustCParams_internal(adjusted_cdict_cParams, pledgedSrcSize,
2290 cdict->dictContentSize, ZSTD_cpm_attachDict,
2291 params.useRowMatchFinder);
2292 params.cParams.windowLog = windowLog;
2293 params.useRowMatchFinder = cdict->useRowMatchFinder; /* cdict overrides */
2294 FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
2295 /* loadedDictSize */ 0,
2296 ZSTDcrp_makeClean, zbuff), "");
2297 assert(cctx->appliedParams.cParams.strategy == adjusted_cdict_cParams.strategy);
2298 }
2299
2300 { const U32 cdictEnd = (U32)( cdict->matchState.window.nextSrc
2301 - cdict->matchState.window.base);
2302 const U32 cdictLen = cdictEnd - cdict->matchState.window.dictLimit;
2303 if (cdictLen == 0) {
2304 /* don't even attach dictionaries with no contents */
2305 DEBUGLOG(4, "skipping attaching empty dictionary");
2306 } else {
2307 DEBUGLOG(4, "attaching dictionary into context");
2308 cctx->blockState.matchState.dictMatchState = &cdict->matchState;
2309
2310 /* prep working match state so dict matches never have negative indices
2311 * when they are translated to the working context's index space. */
2312 if (cctx->blockState.matchState.window.dictLimit < cdictEnd) {
2313 cctx->blockState.matchState.window.nextSrc =
2314 cctx->blockState.matchState.window.base + cdictEnd;
2315 ZSTD_window_clear(&cctx->blockState.matchState.window);
2316 }
2317 /* loadedDictEnd is expressed within the referential of the active context */
2318 cctx->blockState.matchState.loadedDictEnd = cctx->blockState.matchState.window.dictLimit;
2319 } }
2320
2321 cctx->dictID = cdict->dictID;
2322 cctx->dictContentSize = cdict->dictContentSize;
2323
2324 /* copy block state */
2325 ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
2326
2327 return 0;
2328}
2329
2330static void ZSTD_copyCDictTableIntoCCtx(U32* dst, U32 const* src, size_t tableSize,
2331 ZSTD_compressionParameters const* cParams) {
2332 if (ZSTD_CDictIndicesAreTagged(cParams)){
2333 /* Remove tags from the CDict table if they are present.
2334 * See docs on "short cache" in zstd_compress_internal.h for context. */
2335 size_t i;
2336 for (i = 0; i < tableSize; i++) {
2337 U32 const taggedIndex = src[i];
2338 U32 const index = taggedIndex >> ZSTD_SHORT_CACHE_TAG_BITS;
2339 dst[i] = index;
2340 }
2341 } else {
2342 ZSTD_memcpy(dst, src, tableSize * sizeof(U32));
2343 }
2344}
2345
2346static size_t ZSTD_resetCCtx_byCopyingCDict(ZSTD_CCtx* cctx,
2347 const ZSTD_CDict* cdict,
2348 ZSTD_CCtx_params params,
2349 U64 pledgedSrcSize,
2350 ZSTD_buffered_policy_e zbuff)
2351{
2352 const ZSTD_compressionParameters *cdict_cParams = &cdict->matchState.cParams;
2353
2354 assert(!cdict->matchState.dedicatedDictSearch);
2355 DEBUGLOG(4, "ZSTD_resetCCtx_byCopyingCDict() pledgedSrcSize=%llu",
2356 (unsigned long long)pledgedSrcSize);
2357
2358 { unsigned const windowLog = params.cParams.windowLog;
2359 assert(windowLog != 0);
2360 /* Copy only compression parameters related to tables. */
2361 params.cParams = *cdict_cParams;
2362 params.cParams.windowLog = windowLog;
2363 params.useRowMatchFinder = cdict->useRowMatchFinder;
2364 FORWARD_IF_ERROR(ZSTD_resetCCtx_internal(cctx, &params, pledgedSrcSize,
2365 /* loadedDictSize */ 0,
2366 ZSTDcrp_leaveDirty, zbuff), "");
2367 assert(cctx->appliedParams.cParams.strategy == cdict_cParams->strategy);
2368 assert(cctx->appliedParams.cParams.hashLog == cdict_cParams->hashLog);
2369 assert(cctx->appliedParams.cParams.chainLog == cdict_cParams->chainLog);
2370 }
2371
2372 ZSTD_cwksp_mark_tables_dirty(&cctx->workspace);
2373 assert(params.useRowMatchFinder != ZSTD_ps_auto);
2374
2375 /* copy tables */
2376 { size_t const chainSize = ZSTD_allocateChainTable(cdict_cParams->strategy, cdict->useRowMatchFinder, 0 /* DDS guaranteed disabled */)
2377 ? ((size_t)1 << cdict_cParams->chainLog)
2378 : 0;
2379 size_t const hSize = (size_t)1 << cdict_cParams->hashLog;
2380
2381 ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.hashTable,
2382 cdict->matchState.hashTable,
2383 hSize, cdict_cParams);
2384
2385 /* Do not copy cdict's chainTable if cctx has parameters such that it would not use chainTable */
2386 if (ZSTD_allocateChainTable(cctx->appliedParams.cParams.strategy, cctx->appliedParams.useRowMatchFinder, 0 /* forDDSDict */)) {
2387 ZSTD_copyCDictTableIntoCCtx(cctx->blockState.matchState.chainTable,
2388 cdict->matchState.chainTable,
2389 chainSize, cdict_cParams);
2390 }
2391 /* copy tag table */
2392 if (ZSTD_rowMatchFinderUsed(cdict_cParams->strategy, cdict->useRowMatchFinder)) {
2393 size_t const tagTableSize = hSize;
2394 ZSTD_memcpy(cctx->blockState.matchState.tagTable,
2395 cdict->matchState.tagTable,
2396 tagTableSize);
2397 cctx->blockState.matchState.hashSalt = cdict->matchState.hashSalt;
2398 }
2399 }
2400
2401 /* Zero the hashTable3, since the cdict never fills it */
2402 { int const h3log = cctx->blockState.matchState.hashLog3;
2403 size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
2404 assert(cdict->matchState.hashLog3 == 0);
2405 ZSTD_memset(cctx->blockState.matchState.hashTable3, 0, h3Size * sizeof(U32));
2406 }
2407
2408 ZSTD_cwksp_mark_tables_clean(&cctx->workspace);
2409
2410 /* copy dictionary offsets */
2411 { ZSTD_matchState_t const* srcMatchState = &cdict->matchState;
2412 ZSTD_matchState_t* dstMatchState = &cctx->blockState.matchState;
2413 dstMatchState->window = srcMatchState->window;
2414 dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
2415 dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
2416 }
2417
2418 cctx->dictID = cdict->dictID;
2419 cctx->dictContentSize = cdict->dictContentSize;
2420
2421 /* copy block state */
2422 ZSTD_memcpy(cctx->blockState.prevCBlock, &cdict->cBlockState, sizeof(cdict->cBlockState));
2423
2424 return 0;
2425}
2426
2427/* We have a choice between copying the dictionary context into the working
2428 * context, or referencing the dictionary context from the working context
2429 * in-place. We decide here which strategy to use. */
2430static size_t ZSTD_resetCCtx_usingCDict(ZSTD_CCtx* cctx,
2431 const ZSTD_CDict* cdict,
2432 const ZSTD_CCtx_params* params,
2433 U64 pledgedSrcSize,
2434 ZSTD_buffered_policy_e zbuff)
2435{
2436
2437 DEBUGLOG(4, "ZSTD_resetCCtx_usingCDict (pledgedSrcSize=%u)",
2438 (unsigned)pledgedSrcSize);
2439
2440 if (ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize)) {
2441 return ZSTD_resetCCtx_byAttachingCDict(
2442 cctx, cdict, *params, pledgedSrcSize, zbuff);
2443 } else {
2444 return ZSTD_resetCCtx_byCopyingCDict(
2445 cctx, cdict, *params, pledgedSrcSize, zbuff);
2446 }
2447}
2448
2449/*! ZSTD_copyCCtx_internal() :
2450 * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
2451 * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
2452 * The "context", in this case, refers to the hash and chain tables,
2453 * entropy tables, and dictionary references.
2454 * `windowLog` value is enforced if != 0, otherwise value is copied from srcCCtx.
2455 * @return : 0, or an error code */
2456static size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx,
2457 const ZSTD_CCtx* srcCCtx,
2458 ZSTD_frameParameters fParams,
2459 U64 pledgedSrcSize,
2460 ZSTD_buffered_policy_e zbuff)
2461{
2462 RETURN_ERROR_IF(srcCCtx->stage!=ZSTDcs_init, stage_wrong,
2463 "Can't copy a ctx that's not in init stage.");
2464 DEBUGLOG(5, "ZSTD_copyCCtx_internal");
2465 ZSTD_memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
2466 { ZSTD_CCtx_params params = dstCCtx->requestedParams;
2467 /* Copy only compression parameters related to tables. */
2468 params.cParams = srcCCtx->appliedParams.cParams;
2469 assert(srcCCtx->appliedParams.useRowMatchFinder != ZSTD_ps_auto);
2470 assert(srcCCtx->appliedParams.useBlockSplitter != ZSTD_ps_auto);
2471 assert(srcCCtx->appliedParams.ldmParams.enableLdm != ZSTD_ps_auto);
2472 params.useRowMatchFinder = srcCCtx->appliedParams.useRowMatchFinder;
2473 params.useBlockSplitter = srcCCtx->appliedParams.useBlockSplitter;
2474 params.ldmParams = srcCCtx->appliedParams.ldmParams;
2475 params.fParams = fParams;
2476 params.maxBlockSize = srcCCtx->appliedParams.maxBlockSize;
2477 ZSTD_resetCCtx_internal(dstCCtx, &params, pledgedSrcSize,
2478 /* loadedDictSize */ 0,
2479 ZSTDcrp_leaveDirty, zbuff);
2480 assert(dstCCtx->appliedParams.cParams.windowLog == srcCCtx->appliedParams.cParams.windowLog);
2481 assert(dstCCtx->appliedParams.cParams.strategy == srcCCtx->appliedParams.cParams.strategy);
2482 assert(dstCCtx->appliedParams.cParams.hashLog == srcCCtx->appliedParams.cParams.hashLog);
2483 assert(dstCCtx->appliedParams.cParams.chainLog == srcCCtx->appliedParams.cParams.chainLog);
2484 assert(dstCCtx->blockState.matchState.hashLog3 == srcCCtx->blockState.matchState.hashLog3);
2485 }
2486
2487 ZSTD_cwksp_mark_tables_dirty(&dstCCtx->workspace);
2488
2489 /* copy tables */
2490 { size_t const chainSize = ZSTD_allocateChainTable(srcCCtx->appliedParams.cParams.strategy,
2491 srcCCtx->appliedParams.useRowMatchFinder,
2492 0 /* forDDSDict */)
2493 ? ((size_t)1 << srcCCtx->appliedParams.cParams.chainLog)
2494 : 0;
2495 size_t const hSize = (size_t)1 << srcCCtx->appliedParams.cParams.hashLog;
2496 int const h3log = srcCCtx->blockState.matchState.hashLog3;
2497 size_t const h3Size = h3log ? ((size_t)1 << h3log) : 0;
2498
2499 ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable,
2500 srcCCtx->blockState.matchState.hashTable,
2501 hSize * sizeof(U32));
2502 ZSTD_memcpy(dstCCtx->blockState.matchState.chainTable,
2503 srcCCtx->blockState.matchState.chainTable,
2504 chainSize * sizeof(U32));
2505 ZSTD_memcpy(dstCCtx->blockState.matchState.hashTable3,
2506 srcCCtx->blockState.matchState.hashTable3,
2507 h3Size * sizeof(U32));
2508 }
2509
2510 ZSTD_cwksp_mark_tables_clean(&dstCCtx->workspace);
2511
2512 /* copy dictionary offsets */
2513 {
2514 const ZSTD_matchState_t* srcMatchState = &srcCCtx->blockState.matchState;
2515 ZSTD_matchState_t* dstMatchState = &dstCCtx->blockState.matchState;
2516 dstMatchState->window = srcMatchState->window;
2517 dstMatchState->nextToUpdate = srcMatchState->nextToUpdate;
2518 dstMatchState->loadedDictEnd= srcMatchState->loadedDictEnd;
2519 }
2520 dstCCtx->dictID = srcCCtx->dictID;
2521 dstCCtx->dictContentSize = srcCCtx->dictContentSize;
2522
2523 /* copy block state */
2524 ZSTD_memcpy(dstCCtx->blockState.prevCBlock, srcCCtx->blockState.prevCBlock, sizeof(*srcCCtx->blockState.prevCBlock));
2525
2526 return 0;
2527}
2528
2529/*! ZSTD_copyCCtx() :
2530 * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
2531 * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
2532 * pledgedSrcSize==0 means "unknown".
2533* @return : 0, or an error code */
2534size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
2535{
2536 ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
2537 ZSTD_buffered_policy_e const zbuff = srcCCtx->bufferedPolicy;
2538 ZSTD_STATIC_ASSERT((U32)ZSTDb_buffered==1);
2539 if (pledgedSrcSize==0) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
2540 fParams.contentSizeFlag = (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN);
2541
2542 return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx,
2543 fParams, pledgedSrcSize,
2544 zbuff);
2545}
2546
2547
2548#define ZSTD_ROWSIZE 16
2549/*! ZSTD_reduceTable() :
2550 * reduce table indexes by `reducerValue`, or squash to zero.
2551 * PreserveMark preserves "unsorted mark" for btlazy2 strategy.
2552 * It must be set to a clear 0/1 value, to remove branch during inlining.
2553 * Presume table size is a multiple of ZSTD_ROWSIZE
2554 * to help auto-vectorization */
2555FORCE_INLINE_TEMPLATE void
2556ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerValue, int const preserveMark)
2557{
2558 int const nbRows = (int)size / ZSTD_ROWSIZE;
2559 int cellNb = 0;
2560 int rowNb;
2561 /* Protect special index values < ZSTD_WINDOW_START_INDEX. */
2562 U32 const reducerThreshold = reducerValue + ZSTD_WINDOW_START_INDEX;
2563 assert((size & (ZSTD_ROWSIZE-1)) == 0); /* multiple of ZSTD_ROWSIZE */
2564 assert(size < (1U<<31)); /* can be casted to int */
2565
2566#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
2567 /* To validate that the table re-use logic is sound, and that we don't
2568 * access table space that we haven't cleaned, we re-"poison" the table
2569 * space every time we mark it dirty.
2570 *
2571 * This function however is intended to operate on those dirty tables and
2572 * re-clean them. So when this function is used correctly, we can unpoison
2573 * the memory it operated on. This introduces a blind spot though, since
2574 * if we now try to operate on __actually__ poisoned memory, we will not
2575 * detect that. */
2576 __msan_unpoison(table, size * sizeof(U32));
2577#endif
2578
2579 for (rowNb=0 ; rowNb < nbRows ; rowNb++) {
2580 int column;
2581 for (column=0; column<ZSTD_ROWSIZE; column++) {
2582 U32 newVal;
2583 if (preserveMark && table[cellNb] == ZSTD_DUBT_UNSORTED_MARK) {
2584 /* This write is pointless, but is required(?) for the compiler
2585 * to auto-vectorize the loop. */
2586 newVal = ZSTD_DUBT_UNSORTED_MARK;
2587 } else if (table[cellNb] < reducerThreshold) {
2588 newVal = 0;
2589 } else {
2590 newVal = table[cellNb] - reducerValue;
2591 }
2592 table[cellNb] = newVal;
2593 cellNb++;
2594 } }
2595}
2596
2597static void ZSTD_reduceTable(U32* const table, U32 const size, U32 const reducerValue)
2598{
2599 ZSTD_reduceTable_internal(table, size, reducerValue, 0);
2600}
2601
2602static void ZSTD_reduceTable_btlazy2(U32* const table, U32 const size, U32 const reducerValue)
2603{
2604 ZSTD_reduceTable_internal(table, size, reducerValue, 1);
2605}
2606
2607/*! ZSTD_reduceIndex() :
2608* rescale all indexes to avoid future overflow (indexes are U32) */
2609static void ZSTD_reduceIndex (ZSTD_matchState_t* ms, ZSTD_CCtx_params const* params, const U32 reducerValue)
2610{
2611 { U32 const hSize = (U32)1 << params->cParams.hashLog;
2612 ZSTD_reduceTable(ms->hashTable, hSize, reducerValue);
2613 }
2614
2615 if (ZSTD_allocateChainTable(params->cParams.strategy, params->useRowMatchFinder, (U32)ms->dedicatedDictSearch)) {
2616 U32 const chainSize = (U32)1 << params->cParams.chainLog;
2617 if (params->cParams.strategy == ZSTD_btlazy2)
2618 ZSTD_reduceTable_btlazy2(ms->chainTable, chainSize, reducerValue);
2619 else
2620 ZSTD_reduceTable(ms->chainTable, chainSize, reducerValue);
2621 }
2622
2623 if (ms->hashLog3) {
2624 U32 const h3Size = (U32)1 << ms->hashLog3;
2625 ZSTD_reduceTable(ms->hashTable3, h3Size, reducerValue);
2626 }
2627}
2628
2629
2630/*-*******************************************************
2631* Block entropic compression
2632*********************************************************/
2633
2634/* See doc/zstd_compression_format.md for detailed format description */
2635
2636int ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
2637{
2638 const seqDef* const sequences = seqStorePtr->sequencesStart;
2639 BYTE* const llCodeTable = seqStorePtr->llCode;
2640 BYTE* const ofCodeTable = seqStorePtr->ofCode;
2641 BYTE* const mlCodeTable = seqStorePtr->mlCode;
2642 U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
2643 U32 u;
2644 int longOffsets = 0;
2645 assert(nbSeq <= seqStorePtr->maxNbSeq);
2646 for (u=0; u<nbSeq; u++) {
2647 U32 const llv = sequences[u].litLength;
2648 U32 const ofCode = ZSTD_highbit32(sequences[u].offBase);
2649 U32 const mlv = sequences[u].mlBase;
2650 llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
2651 ofCodeTable[u] = (BYTE)ofCode;
2652 mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
2653 assert(!(MEM_64bits() && ofCode >= STREAM_ACCUMULATOR_MIN));
2654 if (MEM_32bits() && ofCode >= STREAM_ACCUMULATOR_MIN)
2655 longOffsets = 1;
2656 }
2657 if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
2658 llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
2659 if (seqStorePtr->longLengthType==ZSTD_llt_matchLength)
2660 mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
2661 return longOffsets;
2662}
2663
2664/* ZSTD_useTargetCBlockSize():
2665 * Returns if target compressed block size param is being used.
2666 * If used, compression will do best effort to make a compressed block size to be around targetCBlockSize.
2667 * Returns 1 if true, 0 otherwise. */
2668static int ZSTD_useTargetCBlockSize(const ZSTD_CCtx_params* cctxParams)
2669{
2670 DEBUGLOG(5, "ZSTD_useTargetCBlockSize (targetCBlockSize=%zu)", cctxParams->targetCBlockSize);
2671 return (cctxParams->targetCBlockSize != 0);
2672}
2673
2674/* ZSTD_blockSplitterEnabled():
2675 * Returns if block splitting param is being used
2676 * If used, compression will do best effort to split a block in order to improve compression ratio.
2677 * At the time this function is called, the parameter must be finalized.
2678 * Returns 1 if true, 0 otherwise. */
2679static int ZSTD_blockSplitterEnabled(ZSTD_CCtx_params* cctxParams)
2680{
2681 DEBUGLOG(5, "ZSTD_blockSplitterEnabled (useBlockSplitter=%d)", cctxParams->useBlockSplitter);
2682 assert(cctxParams->useBlockSplitter != ZSTD_ps_auto);
2683 return (cctxParams->useBlockSplitter == ZSTD_ps_enable);
2684}
2685
2686/* Type returned by ZSTD_buildSequencesStatistics containing finalized symbol encoding types
2687 * and size of the sequences statistics
2688 */
2689typedef struct {
2690 U32 LLtype;
2691 U32 Offtype;
2692 U32 MLtype;
2693 size_t size;
2694 size_t lastCountSize; /* Accounts for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
2695 int longOffsets;
2696} ZSTD_symbolEncodingTypeStats_t;
2697
2698/* ZSTD_buildSequencesStatistics():
2699 * Returns a ZSTD_symbolEncodingTypeStats_t, or a zstd error code in the `size` field.
2700 * Modifies `nextEntropy` to have the appropriate values as a side effect.
2701 * nbSeq must be greater than 0.
2702 *
2703 * entropyWkspSize must be of size at least ENTROPY_WORKSPACE_SIZE - (MaxSeq + 1)*sizeof(U32)
2704 */
2705static ZSTD_symbolEncodingTypeStats_t
2706ZSTD_buildSequencesStatistics(
2707 const seqStore_t* seqStorePtr, size_t nbSeq,
2708 const ZSTD_fseCTables_t* prevEntropy, ZSTD_fseCTables_t* nextEntropy,
2709 BYTE* dst, const BYTE* const dstEnd,
2710 ZSTD_strategy strategy, unsigned* countWorkspace,
2711 void* entropyWorkspace, size_t entropyWkspSize)
2712{
2713 BYTE* const ostart = dst;
2714 const BYTE* const oend = dstEnd;
2715 BYTE* op = ostart;
2716 FSE_CTable* CTable_LitLength = nextEntropy->litlengthCTable;
2717 FSE_CTable* CTable_OffsetBits = nextEntropy->offcodeCTable;
2718 FSE_CTable* CTable_MatchLength = nextEntropy->matchlengthCTable;
2719 const BYTE* const ofCodeTable = seqStorePtr->ofCode;
2720 const BYTE* const llCodeTable = seqStorePtr->llCode;
2721 const BYTE* const mlCodeTable = seqStorePtr->mlCode;
2722 ZSTD_symbolEncodingTypeStats_t stats;
2723
2724 stats.lastCountSize = 0;
2725 /* convert length/distances into codes */
2726 stats.longOffsets = ZSTD_seqToCodes(seqStorePtr);
2727 assert(op <= oend);
2728 assert(nbSeq != 0); /* ZSTD_selectEncodingType() divides by nbSeq */
2729 /* build CTable for Literal Lengths */
2730 { unsigned max = MaxLL;
2731 size_t const mostFrequent = HIST_countFast_wksp(countWorkspace, &max, llCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
2732 DEBUGLOG(5, "Building LL table");
2733 nextEntropy->litlength_repeatMode = prevEntropy->litlength_repeatMode;
2734 stats.LLtype = ZSTD_selectEncodingType(&nextEntropy->litlength_repeatMode,
2735 countWorkspace, max, mostFrequent, nbSeq,
2736 LLFSELog, prevEntropy->litlengthCTable,
2737 LL_defaultNorm, LL_defaultNormLog,
2738 ZSTD_defaultAllowed, strategy);
2739 assert(set_basic < set_compressed && set_rle < set_compressed);
2740 assert(!(stats.LLtype < set_compressed && nextEntropy->litlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2741 { size_t const countSize = ZSTD_buildCTable(
2742 op, (size_t)(oend - op),
2743 CTable_LitLength, LLFSELog, (symbolEncodingType_e)stats.LLtype,
2744 countWorkspace, max, llCodeTable, nbSeq,
2745 LL_defaultNorm, LL_defaultNormLog, MaxLL,
2746 prevEntropy->litlengthCTable,
2747 sizeof(prevEntropy->litlengthCTable),
2748 entropyWorkspace, entropyWkspSize);
2749 if (ZSTD_isError(countSize)) {
2750 DEBUGLOG(3, "ZSTD_buildCTable for LitLens failed");
2751 stats.size = countSize;
2752 return stats;
2753 }
2754 if (stats.LLtype == set_compressed)
2755 stats.lastCountSize = countSize;
2756 op += countSize;
2757 assert(op <= oend);
2758 } }
2759 /* build CTable for Offsets */
2760 { unsigned max = MaxOff;
2761 size_t const mostFrequent = HIST_countFast_wksp(
2762 countWorkspace, &max, ofCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
2763 /* We can only use the basic table if max <= DefaultMaxOff, otherwise the offsets are too large */
2764 ZSTD_defaultPolicy_e const defaultPolicy = (max <= DefaultMaxOff) ? ZSTD_defaultAllowed : ZSTD_defaultDisallowed;
2765 DEBUGLOG(5, "Building OF table");
2766 nextEntropy->offcode_repeatMode = prevEntropy->offcode_repeatMode;
2767 stats.Offtype = ZSTD_selectEncodingType(&nextEntropy->offcode_repeatMode,
2768 countWorkspace, max, mostFrequent, nbSeq,
2769 OffFSELog, prevEntropy->offcodeCTable,
2770 OF_defaultNorm, OF_defaultNormLog,
2771 defaultPolicy, strategy);
2772 assert(!(stats.Offtype < set_compressed && nextEntropy->offcode_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2773 { size_t const countSize = ZSTD_buildCTable(
2774 op, (size_t)(oend - op),
2775 CTable_OffsetBits, OffFSELog, (symbolEncodingType_e)stats.Offtype,
2776 countWorkspace, max, ofCodeTable, nbSeq,
2777 OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
2778 prevEntropy->offcodeCTable,
2779 sizeof(prevEntropy->offcodeCTable),
2780 entropyWorkspace, entropyWkspSize);
2781 if (ZSTD_isError(countSize)) {
2782 DEBUGLOG(3, "ZSTD_buildCTable for Offsets failed");
2783 stats.size = countSize;
2784 return stats;
2785 }
2786 if (stats.Offtype == set_compressed)
2787 stats.lastCountSize = countSize;
2788 op += countSize;
2789 assert(op <= oend);
2790 } }
2791 /* build CTable for MatchLengths */
2792 { unsigned max = MaxML;
2793 size_t const mostFrequent = HIST_countFast_wksp(
2794 countWorkspace, &max, mlCodeTable, nbSeq, entropyWorkspace, entropyWkspSize); /* can't fail */
2795 DEBUGLOG(5, "Building ML table (remaining space : %i)", (int)(oend-op));
2796 nextEntropy->matchlength_repeatMode = prevEntropy->matchlength_repeatMode;
2797 stats.MLtype = ZSTD_selectEncodingType(&nextEntropy->matchlength_repeatMode,
2798 countWorkspace, max, mostFrequent, nbSeq,
2799 MLFSELog, prevEntropy->matchlengthCTable,
2800 ML_defaultNorm, ML_defaultNormLog,
2801 ZSTD_defaultAllowed, strategy);
2802 assert(!(stats.MLtype < set_compressed && nextEntropy->matchlength_repeatMode != FSE_repeat_none)); /* We don't copy tables */
2803 { size_t const countSize = ZSTD_buildCTable(
2804 op, (size_t)(oend - op),
2805 CTable_MatchLength, MLFSELog, (symbolEncodingType_e)stats.MLtype,
2806 countWorkspace, max, mlCodeTable, nbSeq,
2807 ML_defaultNorm, ML_defaultNormLog, MaxML,
2808 prevEntropy->matchlengthCTable,
2809 sizeof(prevEntropy->matchlengthCTable),
2810 entropyWorkspace, entropyWkspSize);
2811 if (ZSTD_isError(countSize)) {
2812 DEBUGLOG(3, "ZSTD_buildCTable for MatchLengths failed");
2813 stats.size = countSize;
2814 return stats;
2815 }
2816 if (stats.MLtype == set_compressed)
2817 stats.lastCountSize = countSize;
2818 op += countSize;
2819 assert(op <= oend);
2820 } }
2821 stats.size = (size_t)(op-ostart);
2822 return stats;
2823}
2824
2825/* ZSTD_entropyCompressSeqStore_internal():
2826 * compresses both literals and sequences
2827 * Returns compressed size of block, or a zstd error.
2828 */
2829#define SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO 20
2830MEM_STATIC size_t
2831ZSTD_entropyCompressSeqStore_internal(
2832 const seqStore_t* seqStorePtr,
2833 const ZSTD_entropyCTables_t* prevEntropy,
2834 ZSTD_entropyCTables_t* nextEntropy,
2835 const ZSTD_CCtx_params* cctxParams,
2836 void* dst, size_t dstCapacity,
2837 void* entropyWorkspace, size_t entropyWkspSize,
2838 const int bmi2)
2839{
2840 ZSTD_strategy const strategy = cctxParams->cParams.strategy;
2841 unsigned* count = (unsigned*)entropyWorkspace;
2842 FSE_CTable* CTable_LitLength = nextEntropy->fse.litlengthCTable;
2843 FSE_CTable* CTable_OffsetBits = nextEntropy->fse.offcodeCTable;
2844 FSE_CTable* CTable_MatchLength = nextEntropy->fse.matchlengthCTable;
2845 const seqDef* const sequences = seqStorePtr->sequencesStart;
2846 const size_t nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
2847 const BYTE* const ofCodeTable = seqStorePtr->ofCode;
2848 const BYTE* const llCodeTable = seqStorePtr->llCode;
2849 const BYTE* const mlCodeTable = seqStorePtr->mlCode;
2850 BYTE* const ostart = (BYTE*)dst;
2851 BYTE* const oend = ostart + dstCapacity;
2852 BYTE* op = ostart;
2853 size_t lastCountSize;
2854 int longOffsets = 0;
2855
2856 entropyWorkspace = count + (MaxSeq + 1);
2857 entropyWkspSize -= (MaxSeq + 1) * sizeof(*count);
2858
2859 DEBUGLOG(5, "ZSTD_entropyCompressSeqStore_internal (nbSeq=%zu, dstCapacity=%zu)", nbSeq, dstCapacity);
2860 ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
2861 assert(entropyWkspSize >= HUF_WORKSPACE_SIZE);
2862
2863 /* Compress literals */
2864 { const BYTE* const literals = seqStorePtr->litStart;
2865 size_t const numSequences = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
2866 size_t const numLiterals = (size_t)(seqStorePtr->lit - seqStorePtr->litStart);
2867 /* Base suspicion of uncompressibility on ratio of literals to sequences */
2868 unsigned const suspectUncompressible = (numSequences == 0) || (numLiterals / numSequences >= SUSPECT_UNCOMPRESSIBLE_LITERAL_RATIO);
2869 size_t const litSize = (size_t)(seqStorePtr->lit - literals);
2870
2871 size_t const cSize = ZSTD_compressLiterals(
2872 op, dstCapacity,
2873 literals, litSize,
2874 entropyWorkspace, entropyWkspSize,
2875 &prevEntropy->huf, &nextEntropy->huf,
2876 cctxParams->cParams.strategy,
2877 ZSTD_literalsCompressionIsDisabled(cctxParams),
2878 suspectUncompressible, bmi2);
2879 FORWARD_IF_ERROR(cSize, "ZSTD_compressLiterals failed");
2880 assert(cSize <= dstCapacity);
2881 op += cSize;
2882 }
2883
2884 /* Sequences Header */
2885 RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
2886 dstSize_tooSmall, "Can't fit seq hdr in output buf!");
2887 if (nbSeq < 128) {
2888 *op++ = (BYTE)nbSeq;
2889 } else if (nbSeq < LONGNBSEQ) {
2890 op[0] = (BYTE)((nbSeq>>8) + 0x80);
2891 op[1] = (BYTE)nbSeq;
2892 op+=2;
2893 } else {
2894 op[0]=0xFF;
2895 MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ));
2896 op+=3;
2897 }
2898 assert(op <= oend);
2899 if (nbSeq==0) {
2900 /* Copy the old tables over as if we repeated them */
2901 ZSTD_memcpy(&nextEntropy->fse, &prevEntropy->fse, sizeof(prevEntropy->fse));
2902 return (size_t)(op - ostart);
2903 }
2904 { BYTE* const seqHead = op++;
2905 /* build stats for sequences */
2906 const ZSTD_symbolEncodingTypeStats_t stats =
2907 ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
2908 &prevEntropy->fse, &nextEntropy->fse,
2909 op, oend,
2910 strategy, count,
2911 entropyWorkspace, entropyWkspSize);
2912 FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
2913 *seqHead = (BYTE)((stats.LLtype<<6) + (stats.Offtype<<4) + (stats.MLtype<<2));
2914 lastCountSize = stats.lastCountSize;
2915 op += stats.size;
2916 longOffsets = stats.longOffsets;
2917 }
2918
2919 { size_t const bitstreamSize = ZSTD_encodeSequences(
2920 op, (size_t)(oend - op),
2921 CTable_MatchLength, mlCodeTable,
2922 CTable_OffsetBits, ofCodeTable,
2923 CTable_LitLength, llCodeTable,
2924 sequences, nbSeq,
2925 longOffsets, bmi2);
2926 FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
2927 op += bitstreamSize;
2928 assert(op <= oend);
2929 /* zstd versions <= 1.3.4 mistakenly report corruption when
2930 * FSE_readNCount() receives a buffer < 4 bytes.
2931 * Fixed by https://github.com/facebook/zstd/pull/1146.
2932 * This can happen when the last set_compressed table present is 2
2933 * bytes and the bitstream is only one byte.
2934 * In this exceedingly rare case, we will simply emit an uncompressed
2935 * block, since it isn't worth optimizing.
2936 */
2937 if (lastCountSize && (lastCountSize + bitstreamSize) < 4) {
2938 /* lastCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
2939 assert(lastCountSize + bitstreamSize == 3);
2940 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
2941 "emitting an uncompressed block.");
2942 return 0;
2943 }
2944 }
2945
2946 DEBUGLOG(5, "compressed block size : %u", (unsigned)(op - ostart));
2947 return (size_t)(op - ostart);
2948}
2949
2950MEM_STATIC size_t
2951ZSTD_entropyCompressSeqStore(
2952 const seqStore_t* seqStorePtr,
2953 const ZSTD_entropyCTables_t* prevEntropy,
2954 ZSTD_entropyCTables_t* nextEntropy,
2955 const ZSTD_CCtx_params* cctxParams,
2956 void* dst, size_t dstCapacity,
2957 size_t srcSize,
2958 void* entropyWorkspace, size_t entropyWkspSize,
2959 int bmi2)
2960{
2961 size_t const cSize = ZSTD_entropyCompressSeqStore_internal(
2962 seqStorePtr, prevEntropy, nextEntropy, cctxParams,
2963 dst, dstCapacity,
2964 entropyWorkspace, entropyWkspSize, bmi2);
2965 if (cSize == 0) return 0;
2966 /* When srcSize <= dstCapacity, there is enough space to write a raw uncompressed block.
2967 * Since we ran out of space, block must be not compressible, so fall back to raw uncompressed block.
2968 */
2969 if ((cSize == ERROR(dstSize_tooSmall)) & (srcSize <= dstCapacity)) {
2970 DEBUGLOG(4, "not enough dstCapacity (%zu) for ZSTD_entropyCompressSeqStore_internal()=> do not compress block", dstCapacity);
2971 return 0; /* block not compressed */
2972 }
2973 FORWARD_IF_ERROR(cSize, "ZSTD_entropyCompressSeqStore_internal failed");
2974
2975 /* Check compressibility */
2976 { size_t const maxCSize = srcSize - ZSTD_minGain(srcSize, cctxParams->cParams.strategy);
2977 if (cSize >= maxCSize) return 0; /* block not compressed */
2978 }
2979 DEBUGLOG(5, "ZSTD_entropyCompressSeqStore() cSize: %zu", cSize);
2980 /* libzstd decoder before > v1.5.4 is not compatible with compressed blocks of size ZSTD_BLOCKSIZE_MAX exactly.
2981 * This restriction is indirectly already fulfilled by respecting ZSTD_minGain() condition above.
2982 */
2983 assert(cSize < ZSTD_BLOCKSIZE_MAX);
2984 return cSize;
2985}
2986
2987/* ZSTD_selectBlockCompressor() :
2988 * Not static, but internal use only (used by long distance matcher)
2989 * assumption : strat is a valid strategy */
2990ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e useRowMatchFinder, ZSTD_dictMode_e dictMode)
2991{
2992 static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
2993 { ZSTD_compressBlock_fast /* default for 0 */,
2994 ZSTD_compressBlock_fast,
2995 ZSTD_compressBlock_doubleFast,
2996 ZSTD_compressBlock_greedy,
2997 ZSTD_compressBlock_lazy,
2998 ZSTD_compressBlock_lazy2,
2999 ZSTD_compressBlock_btlazy2,
3000 ZSTD_compressBlock_btopt,
3001 ZSTD_compressBlock_btultra,
3002 ZSTD_compressBlock_btultra2 },
3003 { ZSTD_compressBlock_fast_extDict /* default for 0 */,
3004 ZSTD_compressBlock_fast_extDict,
3005 ZSTD_compressBlock_doubleFast_extDict,
3006 ZSTD_compressBlock_greedy_extDict,
3007 ZSTD_compressBlock_lazy_extDict,
3008 ZSTD_compressBlock_lazy2_extDict,
3009 ZSTD_compressBlock_btlazy2_extDict,
3010 ZSTD_compressBlock_btopt_extDict,
3011 ZSTD_compressBlock_btultra_extDict,
3012 ZSTD_compressBlock_btultra_extDict },
3013 { ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
3014 ZSTD_compressBlock_fast_dictMatchState,
3015 ZSTD_compressBlock_doubleFast_dictMatchState,
3016 ZSTD_compressBlock_greedy_dictMatchState,
3017 ZSTD_compressBlock_lazy_dictMatchState,
3018 ZSTD_compressBlock_lazy2_dictMatchState,
3019 ZSTD_compressBlock_btlazy2_dictMatchState,
3020 ZSTD_compressBlock_btopt_dictMatchState,
3021 ZSTD_compressBlock_btultra_dictMatchState,
3022 ZSTD_compressBlock_btultra_dictMatchState },
3023 { NULL /* default for 0 */,
3024 NULL,
3025 NULL,
3026 ZSTD_compressBlock_greedy_dedicatedDictSearch,
3027 ZSTD_compressBlock_lazy_dedicatedDictSearch,
3028 ZSTD_compressBlock_lazy2_dedicatedDictSearch,
3029 NULL,
3030 NULL,
3031 NULL,
3032 NULL }
3033 };
3034 ZSTD_blockCompressor selectedCompressor;
3035 ZSTD_STATIC_ASSERT((unsigned)ZSTD_fast == 1);
3036
3037 assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
3038 DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
3039 if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
3040 static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
3041 { ZSTD_compressBlock_greedy_row,
3042 ZSTD_compressBlock_lazy_row,
3043 ZSTD_compressBlock_lazy2_row },
3044 { ZSTD_compressBlock_greedy_extDict_row,
3045 ZSTD_compressBlock_lazy_extDict_row,
3046 ZSTD_compressBlock_lazy2_extDict_row },
3047 { ZSTD_compressBlock_greedy_dictMatchState_row,
3048 ZSTD_compressBlock_lazy_dictMatchState_row,
3049 ZSTD_compressBlock_lazy2_dictMatchState_row },
3050 { ZSTD_compressBlock_greedy_dedicatedDictSearch_row,
3051 ZSTD_compressBlock_lazy_dedicatedDictSearch_row,
3052 ZSTD_compressBlock_lazy2_dedicatedDictSearch_row }
3053 };
3054 DEBUGLOG(4, "Selecting a row-based matchfinder");
3055 assert(useRowMatchFinder != ZSTD_ps_auto);
3056 selectedCompressor = rowBasedBlockCompressors[(int)dictMode][(int)strat - (int)ZSTD_greedy];
3057 } else {
3058 selectedCompressor = blockCompressor[(int)dictMode][(int)strat];
3059 }
3060 assert(selectedCompressor != NULL);
3061 return selectedCompressor;
3062}
3063
3064static void ZSTD_storeLastLiterals(seqStore_t* seqStorePtr,
3065 const BYTE* anchor, size_t lastLLSize)
3066{
3067 ZSTD_memcpy(seqStorePtr->lit, anchor, lastLLSize);
3068 seqStorePtr->lit += lastLLSize;
3069}
3070
3071void ZSTD_resetSeqStore(seqStore_t* ssPtr)
3072{
3073 ssPtr->lit = ssPtr->litStart;
3074 ssPtr->sequences = ssPtr->sequencesStart;
3075 ssPtr->longLengthType = ZSTD_llt_none;
3076}
3077
3078/* ZSTD_postProcessSequenceProducerResult() :
3079 * Validates and post-processes sequences obtained through the external matchfinder API:
3080 * - Checks whether nbExternalSeqs represents an error condition.
3081 * - Appends a block delimiter to outSeqs if one is not already present.
3082 * See zstd.h for context regarding block delimiters.
3083 * Returns the number of sequences after post-processing, or an error code. */
3084static size_t ZSTD_postProcessSequenceProducerResult(
3085 ZSTD_Sequence* outSeqs, size_t nbExternalSeqs, size_t outSeqsCapacity, size_t srcSize
3086) {
3087 RETURN_ERROR_IF(
3088 nbExternalSeqs > outSeqsCapacity,
3089 sequenceProducer_failed,
3090 "External sequence producer returned error code %lu",
3091 (unsigned long)nbExternalSeqs
3092 );
3093
3094 RETURN_ERROR_IF(
3095 nbExternalSeqs == 0 && srcSize > 0,
3096 sequenceProducer_failed,
3097 "Got zero sequences from external sequence producer for a non-empty src buffer!"
3098 );
3099
3100 if (srcSize == 0) {
3101 ZSTD_memset(&outSeqs[0], 0, sizeof(ZSTD_Sequence));
3102 return 1;
3103 }
3104
3105 {
3106 ZSTD_Sequence const lastSeq = outSeqs[nbExternalSeqs - 1];
3107
3108 /* We can return early if lastSeq is already a block delimiter. */
3109 if (lastSeq.offset == 0 && lastSeq.matchLength == 0) {
3110 return nbExternalSeqs;
3111 }
3112
3113 /* This error condition is only possible if the external matchfinder
3114 * produced an invalid parse, by definition of ZSTD_sequenceBound(). */
3115 RETURN_ERROR_IF(
3116 nbExternalSeqs == outSeqsCapacity,
3117 sequenceProducer_failed,
3118 "nbExternalSeqs == outSeqsCapacity but lastSeq is not a block delimiter!"
3119 );
3120
3121 /* lastSeq is not a block delimiter, so we need to append one. */
3122 ZSTD_memset(&outSeqs[nbExternalSeqs], 0, sizeof(ZSTD_Sequence));
3123 return nbExternalSeqs + 1;
3124 }
3125}
3126
3127/* ZSTD_fastSequenceLengthSum() :
3128 * Returns sum(litLen) + sum(matchLen) + lastLits for *seqBuf*.
3129 * Similar to another function in zstd_compress.c (determine_blockSize),
3130 * except it doesn't check for a block delimiter to end summation.
3131 * Removing the early exit allows the compiler to auto-vectorize (https://godbolt.org/z/cY1cajz9P).
3132 * This function can be deleted and replaced by determine_blockSize after we resolve issue #3456. */
3133static size_t ZSTD_fastSequenceLengthSum(ZSTD_Sequence const* seqBuf, size_t seqBufSize) {
3134 size_t matchLenSum, litLenSum, i;
3135 matchLenSum = 0;
3136 litLenSum = 0;
3137 for (i = 0; i < seqBufSize; i++) {
3138 litLenSum += seqBuf[i].litLength;
3139 matchLenSum += seqBuf[i].matchLength;
3140 }
3141 return litLenSum + matchLenSum;
3142}
3143
3144typedef enum { ZSTDbss_compress, ZSTDbss_noCompress } ZSTD_buildSeqStore_e;
3145
3146static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
3147{
3148 ZSTD_matchState_t* const ms = &zc->blockState.matchState;
3149 DEBUGLOG(5, "ZSTD_buildSeqStore (srcSize=%zu)", srcSize);
3150 assert(srcSize <= ZSTD_BLOCKSIZE_MAX);
3151 /* Assert that we have correctly flushed the ctx params into the ms's copy */
3152 ZSTD_assertEqualCParams(zc->appliedParams.cParams, ms->cParams);
3153 /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
3154 * additional 1. We need to revisit and change this logic to be more consistent */
3155 if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {
3156 if (zc->appliedParams.cParams.strategy >= ZSTD_btopt) {
3157 ZSTD_ldm_skipRawSeqStoreBytes(&zc->externSeqStore, srcSize);
3158 } else {
3159 ZSTD_ldm_skipSequences(&zc->externSeqStore, srcSize, zc->appliedParams.cParams.minMatch);
3160 }
3161 return ZSTDbss_noCompress; /* don't even attempt compression below a certain srcSize */
3162 }
3163 ZSTD_resetSeqStore(&(zc->seqStore));
3164 /* required for optimal parser to read stats from dictionary */
3165 ms->opt.symbolCosts = &zc->blockState.prevCBlock->entropy;
3166 /* tell the optimal parser how we expect to compress literals */
3167 ms->opt.literalCompressionMode = zc->appliedParams.literalCompressionMode;
3168 /* a gap between an attached dict and the current window is not safe,
3169 * they must remain adjacent,
3170 * and when that stops being the case, the dict must be unset */
3171 assert(ms->dictMatchState == NULL || ms->loadedDictEnd == ms->window.dictLimit);
3172
3173 /* limited update after a very long match */
3174 { const BYTE* const base = ms->window.base;
3175 const BYTE* const istart = (const BYTE*)src;
3176 const U32 curr = (U32)(istart-base);
3177 if (sizeof(ptrdiff_t)==8) assert(istart - base < (ptrdiff_t)(U32)(-1)); /* ensure no overflow */
3178 if (curr > ms->nextToUpdate + 384)
3179 ms->nextToUpdate = curr - MIN(192, (U32)(curr - ms->nextToUpdate - 384));
3180 }
3181
3182 /* select and store sequences */
3183 { ZSTD_dictMode_e const dictMode = ZSTD_matchState_dictMode(ms);
3184 size_t lastLLSize;
3185 { int i;
3186 for (i = 0; i < ZSTD_REP_NUM; ++i)
3187 zc->blockState.nextCBlock->rep[i] = zc->blockState.prevCBlock->rep[i];
3188 }
3189 if (zc->externSeqStore.pos < zc->externSeqStore.size) {
3190 assert(zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_disable);
3191
3192 /* External matchfinder + LDM is technically possible, just not implemented yet.
3193 * We need to revisit soon and implement it. */
3194 RETURN_ERROR_IF(
3195 zc->appliedParams.useSequenceProducer,
3196 parameter_combination_unsupported,
3197 "Long-distance matching with external sequence producer enabled is not currently supported."
3198 );
3199
3200 /* Updates ldmSeqStore.pos */
3201 lastLLSize =
3202 ZSTD_ldm_blockCompress(&zc->externSeqStore,
3203 ms, &zc->seqStore,
3204 zc->blockState.nextCBlock->rep,
3205 zc->appliedParams.useRowMatchFinder,
3206 src, srcSize);
3207 assert(zc->externSeqStore.pos <= zc->externSeqStore.size);
3208 } else if (zc->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
3209 rawSeqStore_t ldmSeqStore = kNullRawSeqStore;
3210
3211 /* External matchfinder + LDM is technically possible, just not implemented yet.
3212 * We need to revisit soon and implement it. */
3213 RETURN_ERROR_IF(
3214 zc->appliedParams.useSequenceProducer,
3215 parameter_combination_unsupported,
3216 "Long-distance matching with external sequence producer enabled is not currently supported."
3217 );
3218
3219 ldmSeqStore.seq = zc->ldmSequences;
3220 ldmSeqStore.capacity = zc->maxNbLdmSequences;
3221 /* Updates ldmSeqStore.size */
3222 FORWARD_IF_ERROR(ZSTD_ldm_generateSequences(&zc->ldmState, &ldmSeqStore,
3223 &zc->appliedParams.ldmParams,
3224 src, srcSize), "");
3225 /* Updates ldmSeqStore.pos */
3226 lastLLSize =
3227 ZSTD_ldm_blockCompress(&ldmSeqStore,
3228 ms, &zc->seqStore,
3229 zc->blockState.nextCBlock->rep,
3230 zc->appliedParams.useRowMatchFinder,
3231 src, srcSize);
3232 assert(ldmSeqStore.pos == ldmSeqStore.size);
3233 } else if (zc->appliedParams.useSequenceProducer) {
3234 assert(
3235 zc->externalMatchCtx.seqBufferCapacity >= ZSTD_sequenceBound(srcSize)
3236 );
3237 assert(zc->externalMatchCtx.mFinder != NULL);
3238
3239 { U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog;
3240
3241 size_t const nbExternalSeqs = (zc->externalMatchCtx.mFinder)(
3242 zc->externalMatchCtx.mState,
3243 zc->externalMatchCtx.seqBuffer,
3244 zc->externalMatchCtx.seqBufferCapacity,
3245 src, srcSize,
3246 NULL, 0, /* dict and dictSize, currently not supported */
3247 zc->appliedParams.compressionLevel,
3248 windowSize
3249 );
3250
3251 size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult(
3252 zc->externalMatchCtx.seqBuffer,
3253 nbExternalSeqs,
3254 zc->externalMatchCtx.seqBufferCapacity,
3255 srcSize
3256 );
3257
3258 /* Return early if there is no error, since we don't need to worry about last literals */
3259 if (!ZSTD_isError(nbPostProcessedSeqs)) {
3260 ZSTD_sequencePosition seqPos = {0,0,0};
3261 size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs);
3262 RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!");
3263 FORWARD_IF_ERROR(
3264 ZSTD_copySequencesToSeqStoreExplicitBlockDelim(
3265 zc, &seqPos,
3266 zc->externalMatchCtx.seqBuffer, nbPostProcessedSeqs,
3267 src, srcSize,
3268 zc->appliedParams.searchForExternalRepcodes
3269 ),
3270 "Failed to copy external sequences to seqStore!"
3271 );
3272 ms->ldmSeqStore = NULL;
3273 DEBUGLOG(5, "Copied %lu sequences from external sequence producer to internal seqStore.", (unsigned long)nbExternalSeqs);
3274 return ZSTDbss_compress;
3275 }
3276
3277 /* Propagate the error if fallback is disabled */
3278 if (!zc->appliedParams.enableMatchFinderFallback) {
3279 return nbPostProcessedSeqs;
3280 }
3281
3282 /* Fallback to software matchfinder */
3283 { ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
3284 zc->appliedParams.useRowMatchFinder,
3285 dictMode);
3286 ms->ldmSeqStore = NULL;
3287 DEBUGLOG(
3288 5,
3289 "External sequence producer returned error code %lu. Falling back to internal parser.",
3290 (unsigned long)nbExternalSeqs
3291 );
3292 lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
3293 } }
3294 } else { /* not long range mode and no external matchfinder */
3295 ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->appliedParams.cParams.strategy,
3296 zc->appliedParams.useRowMatchFinder,
3297 dictMode);
3298 ms->ldmSeqStore = NULL;
3299 lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
3300 }
3301 { const BYTE* const lastLiterals = (const BYTE*)src + srcSize - lastLLSize;
3302 ZSTD_storeLastLiterals(&zc->seqStore, lastLiterals, lastLLSize);
3303 } }
3304 return ZSTDbss_compress;
3305}
3306
3307static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
3308{
3309 const seqStore_t* seqStore = ZSTD_getSeqStore(zc);
3310 const seqDef* seqStoreSeqs = seqStore->sequencesStart;
3311 size_t seqStoreSeqSize = seqStore->sequences - seqStoreSeqs;
3312 size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
3313 size_t literalsRead = 0;
3314 size_t lastLLSize;
3315
3316 ZSTD_Sequence* outSeqs = &zc->seqCollector.seqStart[zc->seqCollector.seqIndex];
3317 size_t i;
3318 repcodes_t updatedRepcodes;
3319
3320 assert(zc->seqCollector.seqIndex + 1 < zc->seqCollector.maxSequences);
3321 /* Ensure we have enough space for last literals "sequence" */
3322 assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
3323 ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
3324 for (i = 0; i < seqStoreSeqSize; ++i) {
3325 U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM;
3326 outSeqs[i].litLength = seqStoreSeqs[i].litLength;
3327 outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
3328 outSeqs[i].rep = 0;
3329
3330 if (i == seqStore->longLengthPos) {
3331 if (seqStore->longLengthType == ZSTD_llt_literalLength) {
3332 outSeqs[i].litLength += 0x10000;
3333 } else if (seqStore->longLengthType == ZSTD_llt_matchLength) {
3334 outSeqs[i].matchLength += 0x10000;
3335 }
3336 }
3337
3338 if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
3339 /* Derive the correct offset corresponding to a repcode */
3340 outSeqs[i].rep = seqStoreSeqs[i].offBase;
3341 if (outSeqs[i].litLength != 0) {
3342 rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
3343 } else {
3344 if (outSeqs[i].rep == 3) {
3345 rawOffset = updatedRepcodes.rep[0] - 1;
3346 } else {
3347 rawOffset = updatedRepcodes.rep[outSeqs[i].rep];
3348 }
3349 }
3350 }
3351 outSeqs[i].offset = rawOffset;
3352 /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
3353 so we provide seqStoreSeqs[i].offset - 1 */
3354 ZSTD_updateRep(updatedRepcodes.rep,
3355 seqStoreSeqs[i].offBase,
3356 seqStoreSeqs[i].litLength == 0);
3357 literalsRead += outSeqs[i].litLength;
3358 }
3359 /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
3360 * If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
3361 * for the block boundary, according to the API.
3362 */
3363 assert(seqStoreLiteralsSize >= literalsRead);
3364 lastLLSize = seqStoreLiteralsSize - literalsRead;
3365 outSeqs[i].litLength = (U32)lastLLSize;
3366 outSeqs[i].matchLength = outSeqs[i].offset = outSeqs[i].rep = 0;
3367 seqStoreSeqSize++;
3368 zc->seqCollector.seqIndex += seqStoreSeqSize;
3369}
3370
3371size_t ZSTD_sequenceBound(size_t srcSize) {
3372 return (srcSize / ZSTD_MINMATCH_MIN) + 1;
3373}
3374
3375size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
3376 size_t outSeqsSize, const void* src, size_t srcSize)
3377{
3378 const size_t dstCapacity = ZSTD_compressBound(srcSize);
3379 void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
3380 SeqCollector seqCollector;
3381
3382 RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
3383
3384 seqCollector.collectSequences = 1;
3385 seqCollector.seqStart = outSeqs;
3386 seqCollector.seqIndex = 0;
3387 seqCollector.maxSequences = outSeqsSize;
3388 zc->seqCollector = seqCollector;
3389
3390 ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
3391 ZSTD_customFree(dst, ZSTD_defaultCMem);
3392 return zc->seqCollector.seqIndex;
3393}
3394
3395size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize) {
3396 size_t in = 0;
3397 size_t out = 0;
3398 for (; in < seqsSize; ++in) {
3399 if (sequences[in].offset == 0 && sequences[in].matchLength == 0) {
3400 if (in != seqsSize - 1) {
3401 sequences[in+1].litLength += sequences[in].litLength;
3402 }
3403 } else {
3404 sequences[out] = sequences[in];
3405 ++out;
3406 }
3407 }
3408 return out;
3409}
3410
3411/* Unrolled loop to read four size_ts of input at a time. Returns 1 if is RLE, 0 if not. */
3412static int ZSTD_isRLE(const BYTE* src, size_t length) {
3413 const BYTE* ip = src;
3414 const BYTE value = ip[0];
3415 const size_t valueST = (size_t)((U64)value * 0x0101010101010101ULL);
3416 const size_t unrollSize = sizeof(size_t) * 4;
3417 const size_t unrollMask = unrollSize - 1;
3418 const size_t prefixLength = length & unrollMask;
3419 size_t i;
3420 if (length == 1) return 1;
3421 /* Check if prefix is RLE first before using unrolled loop */
3422 if (prefixLength && ZSTD_count(ip+1, ip, ip+prefixLength) != prefixLength-1) {
3423 return 0;
3424 }
3425 for (i = prefixLength; i != length; i += unrollSize) {
3426 size_t u;
3427 for (u = 0; u < unrollSize; u += sizeof(size_t)) {
3428 if (MEM_readST(ip + i + u) != valueST) {
3429 return 0;
3430 } } }
3431 return 1;
3432}
3433
3434/* Returns true if the given block may be RLE.
3435 * This is just a heuristic based on the compressibility.
3436 * It may return both false positives and false negatives.
3437 */
3438static int ZSTD_maybeRLE(seqStore_t const* seqStore)
3439{
3440 size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
3441 size_t const nbLits = (size_t)(seqStore->lit - seqStore->litStart);
3442
3443 return nbSeqs < 4 && nbLits < 10;
3444}
3445
3446static void
3447ZSTD_blockState_confirmRepcodesAndEntropyTables(ZSTD_blockState_t* const bs)
3448{
3449 ZSTD_compressedBlockState_t* const tmp = bs->prevCBlock;
3450 bs->prevCBlock = bs->nextCBlock;
3451 bs->nextCBlock = tmp;
3452}
3453
3454/* Writes the block header */
3455static void
3456writeBlockHeader(void* op, size_t cSize, size_t blockSize, U32 lastBlock)
3457{
3458 U32 const cBlockHeader = cSize == 1 ?
3459 lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
3460 lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
3461 MEM_writeLE24(op, cBlockHeader);
3462 DEBUGLOG(3, "writeBlockHeader: cSize: %zu blockSize: %zu lastBlock: %u", cSize, blockSize, lastBlock);
3463}
3464
3465/** ZSTD_buildBlockEntropyStats_literals() :
3466 * Builds entropy for the literals.
3467 * Stores literals block type (raw, rle, compressed, repeat) and
3468 * huffman description table to hufMetadata.
3469 * Requires ENTROPY_WORKSPACE_SIZE workspace
3470 * @return : size of huffman description table, or an error code
3471 */
3472static size_t
3473ZSTD_buildBlockEntropyStats_literals(void* const src, size_t srcSize,
3474 const ZSTD_hufCTables_t* prevHuf,
3475 ZSTD_hufCTables_t* nextHuf,
3476 ZSTD_hufCTablesMetadata_t* hufMetadata,
3477 const int literalsCompressionIsDisabled,
3478 void* workspace, size_t wkspSize,
3479 int hufFlags)
3480{
3481 BYTE* const wkspStart = (BYTE*)workspace;
3482 BYTE* const wkspEnd = wkspStart + wkspSize;
3483 BYTE* const countWkspStart = wkspStart;
3484 unsigned* const countWksp = (unsigned*)workspace;
3485 const size_t countWkspSize = (HUF_SYMBOLVALUE_MAX + 1) * sizeof(unsigned);
3486 BYTE* const nodeWksp = countWkspStart + countWkspSize;
3487 const size_t nodeWkspSize = (size_t)(wkspEnd - nodeWksp);
3488 unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
3489 unsigned huffLog = LitHufLog;
3490 HUF_repeat repeat = prevHuf->repeatMode;
3491 DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_literals (srcSize=%zu)", srcSize);
3492
3493 /* Prepare nextEntropy assuming reusing the existing table */
3494 ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
3495
3496 if (literalsCompressionIsDisabled) {
3497 DEBUGLOG(5, "set_basic - disabled");
3498 hufMetadata->hType = set_basic;
3499 return 0;
3500 }
3501
3502 /* small ? don't even attempt compression (speed opt) */
3503#ifndef COMPRESS_LITERALS_SIZE_MIN
3504# define COMPRESS_LITERALS_SIZE_MIN 63 /* heuristic */
3505#endif
3506 { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
3507 if (srcSize <= minLitSize) {
3508 DEBUGLOG(5, "set_basic - too small");
3509 hufMetadata->hType = set_basic;
3510 return 0;
3511 } }
3512
3513 /* Scan input and build symbol stats */
3514 { size_t const largest =
3515 HIST_count_wksp (countWksp, &maxSymbolValue,
3516 (const BYTE*)src, srcSize,
3517 workspace, wkspSize);
3518 FORWARD_IF_ERROR(largest, "HIST_count_wksp failed");
3519 if (largest == srcSize) {
3520 /* only one literal symbol */
3521 DEBUGLOG(5, "set_rle");
3522 hufMetadata->hType = set_rle;
3523 return 0;
3524 }
3525 if (largest <= (srcSize >> 7)+4) {
3526 /* heuristic: likely not compressible */
3527 DEBUGLOG(5, "set_basic - no gain");
3528 hufMetadata->hType = set_basic;
3529 return 0;
3530 } }
3531
3532 /* Validate the previous Huffman table */
3533 if (repeat == HUF_repeat_check
3534 && !HUF_validateCTable((HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue)) {
3535 repeat = HUF_repeat_none;
3536 }
3537
3538 /* Build Huffman Tree */
3539 ZSTD_memset(nextHuf->CTable, 0, sizeof(nextHuf->CTable));
3540 huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, nodeWksp, nodeWkspSize, nextHuf->CTable, countWksp, hufFlags);
3541 assert(huffLog <= LitHufLog);
3542 { size_t const maxBits = HUF_buildCTable_wksp((HUF_CElt*)nextHuf->CTable, countWksp,
3543 maxSymbolValue, huffLog,
3544 nodeWksp, nodeWkspSize);
3545 FORWARD_IF_ERROR(maxBits, "HUF_buildCTable_wksp");
3546 huffLog = (U32)maxBits;
3547 }
3548 { /* Build and write the CTable */
3549 size_t const newCSize = HUF_estimateCompressedSize(
3550 (HUF_CElt*)nextHuf->CTable, countWksp, maxSymbolValue);
3551 size_t const hSize = HUF_writeCTable_wksp(
3552 hufMetadata->hufDesBuffer, sizeof(hufMetadata->hufDesBuffer),
3553 (HUF_CElt*)nextHuf->CTable, maxSymbolValue, huffLog,
3554 nodeWksp, nodeWkspSize);
3555 /* Check against repeating the previous CTable */
3556 if (repeat != HUF_repeat_none) {
3557 size_t const oldCSize = HUF_estimateCompressedSize(
3558 (HUF_CElt const*)prevHuf->CTable, countWksp, maxSymbolValue);
3559 if (oldCSize < srcSize && (oldCSize <= hSize + newCSize || hSize + 12 >= srcSize)) {
3560 DEBUGLOG(5, "set_repeat - smaller");
3561 ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
3562 hufMetadata->hType = set_repeat;
3563 return 0;
3564 } }
3565 if (newCSize + hSize >= srcSize) {
3566 DEBUGLOG(5, "set_basic - no gains");
3567 ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
3568 hufMetadata->hType = set_basic;
3569 return 0;
3570 }
3571 DEBUGLOG(5, "set_compressed (hSize=%u)", (U32)hSize);
3572 hufMetadata->hType = set_compressed;
3573 nextHuf->repeatMode = HUF_repeat_check;
3574 return hSize;
3575 }
3576}
3577
3578
3579/* ZSTD_buildDummySequencesStatistics():
3580 * Returns a ZSTD_symbolEncodingTypeStats_t with all encoding types as set_basic,
3581 * and updates nextEntropy to the appropriate repeatMode.
3582 */
3583static ZSTD_symbolEncodingTypeStats_t
3584ZSTD_buildDummySequencesStatistics(ZSTD_fseCTables_t* nextEntropy)
3585{
3586 ZSTD_symbolEncodingTypeStats_t stats = {set_basic, set_basic, set_basic, 0, 0, 0};
3587 nextEntropy->litlength_repeatMode = FSE_repeat_none;
3588 nextEntropy->offcode_repeatMode = FSE_repeat_none;
3589 nextEntropy->matchlength_repeatMode = FSE_repeat_none;
3590 return stats;
3591}
3592
3593/** ZSTD_buildBlockEntropyStats_sequences() :
3594 * Builds entropy for the sequences.
3595 * Stores symbol compression modes and fse table to fseMetadata.
3596 * Requires ENTROPY_WORKSPACE_SIZE wksp.
3597 * @return : size of fse tables or error code */
3598static size_t
3599ZSTD_buildBlockEntropyStats_sequences(
3600 const seqStore_t* seqStorePtr,
3601 const ZSTD_fseCTables_t* prevEntropy,
3602 ZSTD_fseCTables_t* nextEntropy,
3603 const ZSTD_CCtx_params* cctxParams,
3604 ZSTD_fseCTablesMetadata_t* fseMetadata,
3605 void* workspace, size_t wkspSize)
3606{
3607 ZSTD_strategy const strategy = cctxParams->cParams.strategy;
3608 size_t const nbSeq = (size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
3609 BYTE* const ostart = fseMetadata->fseTablesBuffer;
3610 BYTE* const oend = ostart + sizeof(fseMetadata->fseTablesBuffer);
3611 BYTE* op = ostart;
3612 unsigned* countWorkspace = (unsigned*)workspace;
3613 unsigned* entropyWorkspace = countWorkspace + (MaxSeq + 1);
3614 size_t entropyWorkspaceSize = wkspSize - (MaxSeq + 1) * sizeof(*countWorkspace);
3615 ZSTD_symbolEncodingTypeStats_t stats;
3616
3617 DEBUGLOG(5, "ZSTD_buildBlockEntropyStats_sequences (nbSeq=%zu)", nbSeq);
3618 stats = nbSeq != 0 ? ZSTD_buildSequencesStatistics(seqStorePtr, nbSeq,
3619 prevEntropy, nextEntropy, op, oend,
3620 strategy, countWorkspace,
3621 entropyWorkspace, entropyWorkspaceSize)
3622 : ZSTD_buildDummySequencesStatistics(nextEntropy);
3623 FORWARD_IF_ERROR(stats.size, "ZSTD_buildSequencesStatistics failed!");
3624 fseMetadata->llType = (symbolEncodingType_e) stats.LLtype;
3625 fseMetadata->ofType = (symbolEncodingType_e) stats.Offtype;
3626 fseMetadata->mlType = (symbolEncodingType_e) stats.MLtype;
3627 fseMetadata->lastCountSize = stats.lastCountSize;
3628 return stats.size;
3629}
3630
3631
3632/** ZSTD_buildBlockEntropyStats() :
3633 * Builds entropy for the block.
3634 * Requires workspace size ENTROPY_WORKSPACE_SIZE
3635 * @return : 0 on success, or an error code
3636 * Note : also employed in superblock
3637 */
3638size_t ZSTD_buildBlockEntropyStats(
3639 const seqStore_t* seqStorePtr,
3640 const ZSTD_entropyCTables_t* prevEntropy,
3641 ZSTD_entropyCTables_t* nextEntropy,
3642 const ZSTD_CCtx_params* cctxParams,
3643 ZSTD_entropyCTablesMetadata_t* entropyMetadata,
3644 void* workspace, size_t wkspSize)
3645{
3646 size_t const litSize = (size_t)(seqStorePtr->lit - seqStorePtr->litStart);
3647 int const huf_useOptDepth = (cctxParams->cParams.strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD);
3648 int const hufFlags = huf_useOptDepth ? HUF_flags_optimalDepth : 0;
3649
3650 entropyMetadata->hufMetadata.hufDesSize =
3651 ZSTD_buildBlockEntropyStats_literals(seqStorePtr->litStart, litSize,
3652 &prevEntropy->huf, &nextEntropy->huf,
3653 &entropyMetadata->hufMetadata,
3654 ZSTD_literalsCompressionIsDisabled(cctxParams),
3655 workspace, wkspSize, hufFlags);
3656
3657 FORWARD_IF_ERROR(entropyMetadata->hufMetadata.hufDesSize, "ZSTD_buildBlockEntropyStats_literals failed");
3658 entropyMetadata->fseMetadata.fseTablesSize =
3659 ZSTD_buildBlockEntropyStats_sequences(seqStorePtr,
3660 &prevEntropy->fse, &nextEntropy->fse,
3661 cctxParams,
3662 &entropyMetadata->fseMetadata,
3663 workspace, wkspSize);
3664 FORWARD_IF_ERROR(entropyMetadata->fseMetadata.fseTablesSize, "ZSTD_buildBlockEntropyStats_sequences failed");
3665 return 0;
3666}
3667
3668/* Returns the size estimate for the literals section (header + content) of a block */
3669static size_t
3670ZSTD_estimateBlockSize_literal(const BYTE* literals, size_t litSize,
3671 const ZSTD_hufCTables_t* huf,
3672 const ZSTD_hufCTablesMetadata_t* hufMetadata,
3673 void* workspace, size_t wkspSize,
3674 int writeEntropy)
3675{
3676 unsigned* const countWksp = (unsigned*)workspace;
3677 unsigned maxSymbolValue = HUF_SYMBOLVALUE_MAX;
3678 size_t literalSectionHeaderSize = 3 + (litSize >= 1 KB) + (litSize >= 16 KB);
3679 U32 singleStream = litSize < 256;
3680
3681 if (hufMetadata->hType == set_basic) return litSize;
3682 else if (hufMetadata->hType == set_rle) return 1;
3683 else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
3684 size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
3685 if (ZSTD_isError(largest)) return litSize;
3686 { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
3687 if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
3688 if (!singleStream) cLitSizeEstimate += 6; /* multi-stream huffman uses 6-byte jump table */
3689 return cLitSizeEstimate + literalSectionHeaderSize;
3690 } }
3691 assert(0); /* impossible */
3692 return 0;
3693}
3694
3695/* Returns the size estimate for the FSE-compressed symbols (of, ml, ll) of a block */
3696static size_t
3697ZSTD_estimateBlockSize_symbolType(symbolEncodingType_e type,
3698 const BYTE* codeTable, size_t nbSeq, unsigned maxCode,
3699 const FSE_CTable* fseCTable,
3700 const U8* additionalBits,
3701 short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
3702 void* workspace, size_t wkspSize)
3703{
3704 unsigned* const countWksp = (unsigned*)workspace;
3705 const BYTE* ctp = codeTable;
3706 const BYTE* const ctStart = ctp;
3707 const BYTE* const ctEnd = ctStart + nbSeq;
3708 size_t cSymbolTypeSizeEstimateInBits = 0;
3709 unsigned max = maxCode;
3710
3711 HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
3712 if (type == set_basic) {
3713 /* We selected this encoding type, so it must be valid. */
3714 assert(max <= defaultMax);
3715 (void)defaultMax;
3716 cSymbolTypeSizeEstimateInBits = ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max);
3717 } else if (type == set_rle) {
3718 cSymbolTypeSizeEstimateInBits = 0;
3719 } else if (type == set_compressed || type == set_repeat) {
3720 cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
3721 }
3722 if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) {
3723 return nbSeq * 10;
3724 }
3725 while (ctp < ctEnd) {
3726 if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
3727 else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
3728 ctp++;
3729 }
3730 return cSymbolTypeSizeEstimateInBits >> 3;
3731}
3732
3733/* Returns the size estimate for the sequences section (header + content) of a block */
3734static size_t
3735ZSTD_estimateBlockSize_sequences(const BYTE* ofCodeTable,
3736 const BYTE* llCodeTable,
3737 const BYTE* mlCodeTable,
3738 size_t nbSeq,
3739 const ZSTD_fseCTables_t* fseTables,
3740 const ZSTD_fseCTablesMetadata_t* fseMetadata,
3741 void* workspace, size_t wkspSize,
3742 int writeEntropy)
3743{
3744 size_t sequencesSectionHeaderSize = 1 /* seqHead */ + 1 /* min seqSize size */ + (nbSeq >= 128) + (nbSeq >= LONGNBSEQ);
3745 size_t cSeqSizeEstimate = 0;
3746 cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, nbSeq, MaxOff,
3747 fseTables->offcodeCTable, NULL,
3748 OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
3749 workspace, wkspSize);
3750 cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->llType, llCodeTable, nbSeq, MaxLL,
3751 fseTables->litlengthCTable, LL_bits,
3752 LL_defaultNorm, LL_defaultNormLog, MaxLL,
3753 workspace, wkspSize);
3754 cSeqSizeEstimate += ZSTD_estimateBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, nbSeq, MaxML,
3755 fseTables->matchlengthCTable, ML_bits,
3756 ML_defaultNorm, ML_defaultNormLog, MaxML,
3757 workspace, wkspSize);
3758 if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
3759 return cSeqSizeEstimate + sequencesSectionHeaderSize;
3760}
3761
3762/* Returns the size estimate for a given stream of literals, of, ll, ml */
3763static size_t
3764ZSTD_estimateBlockSize(const BYTE* literals, size_t litSize,
3765 const BYTE* ofCodeTable,
3766 const BYTE* llCodeTable,
3767 const BYTE* mlCodeTable,
3768 size_t nbSeq,
3769 const ZSTD_entropyCTables_t* entropy,
3770 const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
3771 void* workspace, size_t wkspSize,
3772 int writeLitEntropy, int writeSeqEntropy)
3773{
3774 size_t const literalsSize = ZSTD_estimateBlockSize_literal(literals, litSize,
3775 &entropy->huf, &entropyMetadata->hufMetadata,
3776 workspace, wkspSize, writeLitEntropy);
3777 size_t const seqSize = ZSTD_estimateBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
3778 nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
3779 workspace, wkspSize, writeSeqEntropy);
3780 return seqSize + literalsSize + ZSTD_blockHeaderSize;
3781}
3782
3783/* Builds entropy statistics and uses them for blocksize estimation.
3784 *
3785 * @return: estimated compressed size of the seqStore, or a zstd error.
3786 */
3787static size_t
3788ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(seqStore_t* seqStore, ZSTD_CCtx* zc)
3789{
3790 ZSTD_entropyCTablesMetadata_t* const entropyMetadata = &zc->blockSplitCtx.entropyMetadata;
3791 DEBUGLOG(6, "ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize()");
3792 FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(seqStore,
3793 &zc->blockState.prevCBlock->entropy,
3794 &zc->blockState.nextCBlock->entropy,
3795 &zc->appliedParams,
3796 entropyMetadata,
3797 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE), "");
3798 return ZSTD_estimateBlockSize(
3799 seqStore->litStart, (size_t)(seqStore->lit - seqStore->litStart),
3800 seqStore->ofCode, seqStore->llCode, seqStore->mlCode,
3801 (size_t)(seqStore->sequences - seqStore->sequencesStart),
3802 &zc->blockState.nextCBlock->entropy,
3803 entropyMetadata,
3804 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE,
3805 (int)(entropyMetadata->hufMetadata.hType == set_compressed), 1);
3806}
3807
3808/* Returns literals bytes represented in a seqStore */
3809static size_t ZSTD_countSeqStoreLiteralsBytes(const seqStore_t* const seqStore)
3810{
3811 size_t literalsBytes = 0;
3812 size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
3813 size_t i;
3814 for (i = 0; i < nbSeqs; ++i) {
3815 seqDef const seq = seqStore->sequencesStart[i];
3816 literalsBytes += seq.litLength;
3817 if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_literalLength) {
3818 literalsBytes += 0x10000;
3819 } }
3820 return literalsBytes;
3821}
3822
3823/* Returns match bytes represented in a seqStore */
3824static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore)
3825{
3826 size_t matchBytes = 0;
3827 size_t const nbSeqs = (size_t)(seqStore->sequences - seqStore->sequencesStart);
3828 size_t i;
3829 for (i = 0; i < nbSeqs; ++i) {
3830 seqDef seq = seqStore->sequencesStart[i];
3831 matchBytes += seq.mlBase + MINMATCH;
3832 if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
3833 matchBytes += 0x10000;
3834 } }
3835 return matchBytes;
3836}
3837
3838/* Derives the seqStore that is a chunk of the originalSeqStore from [startIdx, endIdx).
3839 * Stores the result in resultSeqStore.
3840 */
3841static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
3842 const seqStore_t* originalSeqStore,
3843 size_t startIdx, size_t endIdx)
3844{
3845 *resultSeqStore = *originalSeqStore;
3846 if (startIdx > 0) {
3847 resultSeqStore->sequences = originalSeqStore->sequencesStart + startIdx;
3848 resultSeqStore->litStart += ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
3849 }
3850
3851 /* Move longLengthPos into the correct position if necessary */
3852 if (originalSeqStore->longLengthType != ZSTD_llt_none) {
3853 if (originalSeqStore->longLengthPos < startIdx || originalSeqStore->longLengthPos > endIdx) {
3854 resultSeqStore->longLengthType = ZSTD_llt_none;
3855 } else {
3856 resultSeqStore->longLengthPos -= (U32)startIdx;
3857 }
3858 }
3859 resultSeqStore->sequencesStart = originalSeqStore->sequencesStart + startIdx;
3860 resultSeqStore->sequences = originalSeqStore->sequencesStart + endIdx;
3861 if (endIdx == (size_t)(originalSeqStore->sequences - originalSeqStore->sequencesStart)) {
3862 /* This accounts for possible last literals if the derived chunk reaches the end of the block */
3863 assert(resultSeqStore->lit == originalSeqStore->lit);
3864 } else {
3865 size_t const literalsBytes = ZSTD_countSeqStoreLiteralsBytes(resultSeqStore);
3866 resultSeqStore->lit = resultSeqStore->litStart + literalsBytes;
3867 }
3868 resultSeqStore->llCode += startIdx;
3869 resultSeqStore->mlCode += startIdx;
3870 resultSeqStore->ofCode += startIdx;
3871}
3872
3873/**
3874 * Returns the raw offset represented by the combination of offBase, ll0, and repcode history.
3875 * offBase must represent a repcode in the numeric representation of ZSTD_storeSeq().
3876 */
3877static U32
3878ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offBase, const U32 ll0)
3879{
3880 U32 const adjustedRepCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0; /* [ 0 - 3 ] */
3881 assert(OFFBASE_IS_REPCODE(offBase));
3882 if (adjustedRepCode == ZSTD_REP_NUM) {
3883 assert(ll0);
3884 /* litlength == 0 and offCode == 2 implies selection of first repcode - 1
3885 * This is only valid if it results in a valid offset value, aka > 0.
3886 * Note : it may happen that `rep[0]==1` in exceptional circumstances.
3887 * In which case this function will return 0, which is an invalid offset.
3888 * It's not an issue though, since this value will be
3889 * compared and discarded within ZSTD_seqStore_resolveOffCodes().
3890 */
3891 return rep[0] - 1;
3892 }
3893 return rep[adjustedRepCode];
3894}
3895
3896/**
3897 * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
3898 * due to emission of RLE/raw blocks that disturb the offset history,
3899 * and replaces any repcodes within the seqStore that may be invalid.
3900 *
3901 * dRepcodes are updated as would be on the decompression side.
3902 * cRepcodes are updated exactly in accordance with the seqStore.
3903 *
3904 * Note : this function assumes seq->offBase respects the following numbering scheme :
3905 * 0 : invalid
3906 * 1-3 : repcode 1-3
3907 * 4+ : real_offset+3
3908 */
3909static void
3910ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
3911 const seqStore_t* const seqStore, U32 const nbSeq)
3912{
3913 U32 idx = 0;
3914 U32 const longLitLenIdx = seqStore->longLengthType == ZSTD_llt_literalLength ? seqStore->longLengthPos : nbSeq;
3915 for (; idx < nbSeq; ++idx) {
3916 seqDef* const seq = seqStore->sequencesStart + idx;
3917 U32 const ll0 = (seq->litLength == 0) && (idx != longLitLenIdx);
3918 U32 const offBase = seq->offBase;
3919 assert(offBase > 0);
3920 if (OFFBASE_IS_REPCODE(offBase)) {
3921 U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offBase, ll0);
3922 U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offBase, ll0);
3923 /* Adjust simulated decompression repcode history if we come across a mismatch. Replace
3924 * the repcode with the offset it actually references, determined by the compression
3925 * repcode history.
3926 */
3927 if (dRawOffset != cRawOffset) {
3928 seq->offBase = OFFSET_TO_OFFBASE(cRawOffset);
3929 }
3930 }
3931 /* Compression repcode history is always updated with values directly from the unmodified seqStore.
3932 * Decompression repcode history may use modified seq->offset value taken from compression repcode history.
3933 */
3934 ZSTD_updateRep(dRepcodes->rep, seq->offBase, ll0);
3935 ZSTD_updateRep(cRepcodes->rep, offBase, ll0);
3936 }
3937}
3938
3939/* ZSTD_compressSeqStore_singleBlock():
3940 * Compresses a seqStore into a block with a block header, into the buffer dst.
3941 *
3942 * Returns the total size of that block (including header) or a ZSTD error code.
3943 */
3944static size_t
3945ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc,
3946 const seqStore_t* const seqStore,
3947 repcodes_t* const dRep, repcodes_t* const cRep,
3948 void* dst, size_t dstCapacity,
3949 const void* src, size_t srcSize,
3950 U32 lastBlock, U32 isPartition)
3951{
3952 const U32 rleMaxLength = 25;
3953 BYTE* op = (BYTE*)dst;
3954 const BYTE* ip = (const BYTE*)src;
3955 size_t cSize;
3956 size_t cSeqsSize;
3957
3958 /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
3959 repcodes_t const dRepOriginal = *dRep;
3960 DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
3961 if (isPartition)
3962 ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
3963
3964 RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "Block header doesn't fit");
3965 cSeqsSize = ZSTD_entropyCompressSeqStore(seqStore,
3966 &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
3967 &zc->appliedParams,
3968 op + ZSTD_blockHeaderSize, dstCapacity - ZSTD_blockHeaderSize,
3969 srcSize,
3970 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
3971 zc->bmi2);
3972 FORWARD_IF_ERROR(cSeqsSize, "ZSTD_entropyCompressSeqStore failed!");
3973
3974 if (!zc->isFirstBlock &&
3975 cSeqsSize < rleMaxLength &&
3976 ZSTD_isRLE((BYTE const*)src, srcSize)) {
3977 /* We don't want to emit our first block as a RLE even if it qualifies because
3978 * doing so will cause the decoder (cli only) to throw a "should consume all input error."
3979 * This is only an issue for zstd <= v1.4.3
3980 */
3981 cSeqsSize = 1;
3982 }
3983
3984 if (zc->seqCollector.collectSequences) {
3985 ZSTD_copyBlockSequences(zc);
3986 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
3987 return 0;
3988 }
3989
3990 if (cSeqsSize == 0) {
3991 cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, srcSize, lastBlock);
3992 FORWARD_IF_ERROR(cSize, "Nocompress block failed");
3993 DEBUGLOG(4, "Writing out nocompress block, size: %zu", cSize);
3994 *dRep = dRepOriginal; /* reset simulated decompression repcode history */
3995 } else if (cSeqsSize == 1) {
3996 cSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, srcSize, lastBlock);
3997 FORWARD_IF_ERROR(cSize, "RLE compress block failed");
3998 DEBUGLOG(4, "Writing out RLE block, size: %zu", cSize);
3999 *dRep = dRepOriginal; /* reset simulated decompression repcode history */
4000 } else {
4001 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
4002 writeBlockHeader(op, cSeqsSize, srcSize, lastBlock);
4003 cSize = ZSTD_blockHeaderSize + cSeqsSize;
4004 DEBUGLOG(4, "Writing out compressed block, size: %zu", cSize);
4005 }
4006
4007 if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
4008 zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
4009
4010 return cSize;
4011}
4012
4013/* Struct to keep track of where we are in our recursive calls. */
4014typedef struct {
4015 U32* splitLocations; /* Array of split indices */
4016 size_t idx; /* The current index within splitLocations being worked on */
4017} seqStoreSplits;
4018
4019#define MIN_SEQUENCES_BLOCK_SPLITTING 300
4020
4021/* Helper function to perform the recursive search for block splits.
4022 * Estimates the cost of seqStore prior to split, and estimates the cost of splitting the sequences in half.
4023 * If advantageous to split, then we recurse down the two sub-blocks.
4024 * If not, or if an error occurred in estimation, then we do not recurse.
4025 *
4026 * Note: The recursion depth is capped by a heuristic minimum number of sequences,
4027 * defined by MIN_SEQUENCES_BLOCK_SPLITTING.
4028 * In theory, this means the absolute largest recursion depth is 10 == log2(maxNbSeqInBlock/MIN_SEQUENCES_BLOCK_SPLITTING).
4029 * In practice, recursion depth usually doesn't go beyond 4.
4030 *
4031 * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS.
4032 * At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
4033 * maximum of 128 KB, this value is actually impossible to reach.
4034 */
4035static void
4036ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
4037 ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
4038{
4039 seqStore_t* const fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
4040 seqStore_t* const firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
4041 seqStore_t* const secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
4042 size_t estimatedOriginalSize;
4043 size_t estimatedFirstHalfSize;
4044 size_t estimatedSecondHalfSize;
4045 size_t midIdx = (startIdx + endIdx)/2;
4046
4047 DEBUGLOG(5, "ZSTD_deriveBlockSplitsHelper: startIdx=%zu endIdx=%zu", startIdx, endIdx);
4048 assert(endIdx >= startIdx);
4049 if (endIdx - startIdx < MIN_SEQUENCES_BLOCK_SPLITTING || splits->idx >= ZSTD_MAX_NB_BLOCK_SPLITS) {
4050 DEBUGLOG(6, "ZSTD_deriveBlockSplitsHelper: Too few sequences (%zu)", endIdx - startIdx);
4051 return;
4052 }
4053 ZSTD_deriveSeqStoreChunk(fullSeqStoreChunk, origSeqStore, startIdx, endIdx);
4054 ZSTD_deriveSeqStoreChunk(firstHalfSeqStore, origSeqStore, startIdx, midIdx);
4055 ZSTD_deriveSeqStoreChunk(secondHalfSeqStore, origSeqStore, midIdx, endIdx);
4056 estimatedOriginalSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(fullSeqStoreChunk, zc);
4057 estimatedFirstHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(firstHalfSeqStore, zc);
4058 estimatedSecondHalfSize = ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(secondHalfSeqStore, zc);
4059 DEBUGLOG(5, "Estimated original block size: %zu -- First half split: %zu -- Second half split: %zu",
4060 estimatedOriginalSize, estimatedFirstHalfSize, estimatedSecondHalfSize);
4061 if (ZSTD_isError(estimatedOriginalSize) || ZSTD_isError(estimatedFirstHalfSize) || ZSTD_isError(estimatedSecondHalfSize)) {
4062 return;
4063 }
4064 if (estimatedFirstHalfSize + estimatedSecondHalfSize < estimatedOriginalSize) {
4065 DEBUGLOG(5, "split decided at seqNb:%zu", midIdx);
4066 ZSTD_deriveBlockSplitsHelper(splits, startIdx, midIdx, zc, origSeqStore);
4067 splits->splitLocations[splits->idx] = (U32)midIdx;
4068 splits->idx++;
4069 ZSTD_deriveBlockSplitsHelper(splits, midIdx, endIdx, zc, origSeqStore);
4070 }
4071}
4072
4073/* Base recursive function.
4074 * Populates a table with intra-block partition indices that can improve compression ratio.
4075 *
4076 * @return: number of splits made (which equals the size of the partition table - 1).
4077 */
4078static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)
4079{
4080 seqStoreSplits splits;
4081 splits.splitLocations = partitions;
4082 splits.idx = 0;
4083 if (nbSeq <= 4) {
4084 DEBUGLOG(5, "ZSTD_deriveBlockSplits: Too few sequences to split (%u <= 4)", nbSeq);
4085 /* Refuse to try and split anything with less than 4 sequences */
4086 return 0;
4087 }
4088 ZSTD_deriveBlockSplitsHelper(&splits, 0, nbSeq, zc, &zc->seqStore);
4089 splits.splitLocations[splits.idx] = nbSeq;
4090 DEBUGLOG(5, "ZSTD_deriveBlockSplits: final nb partitions: %zu", splits.idx+1);
4091 return splits.idx;
4092}
4093
4094/* ZSTD_compressBlock_splitBlock():
4095 * Attempts to split a given block into multiple blocks to improve compression ratio.
4096 *
4097 * Returns combined size of all blocks (which includes headers), or a ZSTD error code.
4098 */
4099static size_t
4100ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc,
4101 void* dst, size_t dstCapacity,
4102 const void* src, size_t blockSize,
4103 U32 lastBlock, U32 nbSeq)
4104{
4105 size_t cSize = 0;
4106 const BYTE* ip = (const BYTE*)src;
4107 BYTE* op = (BYTE*)dst;
4108 size_t i = 0;
4109 size_t srcBytesTotal = 0;
4110 U32* const partitions = zc->blockSplitCtx.partitions; /* size == ZSTD_MAX_NB_BLOCK_SPLITS */
4111 seqStore_t* const nextSeqStore = &zc->blockSplitCtx.nextSeqStore;
4112 seqStore_t* const currSeqStore = &zc->blockSplitCtx.currSeqStore;
4113 size_t const numSplits = ZSTD_deriveBlockSplits(zc, partitions, nbSeq);
4114
4115 /* If a block is split and some partitions are emitted as RLE/uncompressed, then repcode history
4116 * may become invalid. In order to reconcile potentially invalid repcodes, we keep track of two
4117 * separate repcode histories that simulate repcode history on compression and decompression side,
4118 * and use the histories to determine whether we must replace a particular repcode with its raw offset.
4119 *
4120 * 1) cRep gets updated for each partition, regardless of whether the block was emitted as uncompressed
4121 * or RLE. This allows us to retrieve the offset value that an invalid repcode references within
4122 * a nocompress/RLE block.
4123 * 2) dRep gets updated only for compressed partitions, and when a repcode gets replaced, will use
4124 * the replacement offset value rather than the original repcode to update the repcode history.
4125 * dRep also will be the final repcode history sent to the next block.
4126 *
4127 * See ZSTD_seqStore_resolveOffCodes() for more details.
4128 */
4129 repcodes_t dRep;
4130 repcodes_t cRep;
4131 ZSTD_memcpy(dRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
4132 ZSTD_memcpy(cRep.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
4133 ZSTD_memset(nextSeqStore, 0, sizeof(seqStore_t));
4134
4135 DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
4136 (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
4137 (unsigned)zc->blockState.matchState.nextToUpdate);
4138
4139 if (numSplits == 0) {
4140 size_t cSizeSingleBlock =
4141 ZSTD_compressSeqStore_singleBlock(zc, &zc->seqStore,
4142 &dRep, &cRep,
4143 op, dstCapacity,
4144 ip, blockSize,
4145 lastBlock, 0 /* isPartition */);
4146 FORWARD_IF_ERROR(cSizeSingleBlock, "Compressing single block from splitBlock_internal() failed!");
4147 DEBUGLOG(5, "ZSTD_compressBlock_splitBlock_internal: No splits");
4148 assert(zc->blockSize <= ZSTD_BLOCKSIZE_MAX);
4149 assert(cSizeSingleBlock <= zc->blockSize + ZSTD_blockHeaderSize);
4150 return cSizeSingleBlock;
4151 }
4152
4153 ZSTD_deriveSeqStoreChunk(currSeqStore, &zc->seqStore, 0, partitions[0]);
4154 for (i = 0; i <= numSplits; ++i) {
4155 size_t cSizeChunk;
4156 U32 const lastPartition = (i == numSplits);
4157 U32 lastBlockEntireSrc = 0;
4158
4159 size_t srcBytes = ZSTD_countSeqStoreLiteralsBytes(currSeqStore) + ZSTD_countSeqStoreMatchBytes(currSeqStore);
4160 srcBytesTotal += srcBytes;
4161 if (lastPartition) {
4162 /* This is the final partition, need to account for possible last literals */
4163 srcBytes += blockSize - srcBytesTotal;
4164 lastBlockEntireSrc = lastBlock;
4165 } else {
4166 ZSTD_deriveSeqStoreChunk(nextSeqStore, &zc->seqStore, partitions[i], partitions[i+1]);
4167 }
4168
4169 cSizeChunk = ZSTD_compressSeqStore_singleBlock(zc, currSeqStore,
4170 &dRep, &cRep,
4171 op, dstCapacity,
4172 ip, srcBytes,
4173 lastBlockEntireSrc, 1 /* isPartition */);
4174 DEBUGLOG(5, "Estimated size: %zu vs %zu : actual size",
4175 ZSTD_buildEntropyStatisticsAndEstimateSubBlockSize(currSeqStore, zc), cSizeChunk);
4176 FORWARD_IF_ERROR(cSizeChunk, "Compressing chunk failed!");
4177
4178 ip += srcBytes;
4179 op += cSizeChunk;
4180 dstCapacity -= cSizeChunk;
4181 cSize += cSizeChunk;
4182 *currSeqStore = *nextSeqStore;
4183 assert(cSizeChunk <= zc->blockSize + ZSTD_blockHeaderSize);
4184 }
4185 /* cRep and dRep may have diverged during the compression.
4186 * If so, we use the dRep repcodes for the next block.
4187 */
4188 ZSTD_memcpy(zc->blockState.prevCBlock->rep, dRep.rep, sizeof(repcodes_t));
4189 return cSize;
4190}
4191
4192static size_t
4193ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
4194 void* dst, size_t dstCapacity,
4195 const void* src, size_t srcSize, U32 lastBlock)
4196{
4197 U32 nbSeq;
4198 size_t cSize;
4199 DEBUGLOG(4, "ZSTD_compressBlock_splitBlock");
4200 assert(zc->appliedParams.useBlockSplitter == ZSTD_ps_enable);
4201
4202 { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
4203 FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
4204 if (bss == ZSTDbss_noCompress) {
4205 if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
4206 zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
4207 cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
4208 FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
4209 DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
4210 return cSize;
4211 }
4212 nbSeq = (U32)(zc->seqStore.sequences - zc->seqStore.sequencesStart);
4213 }
4214
4215 cSize = ZSTD_compressBlock_splitBlock_internal(zc, dst, dstCapacity, src, srcSize, lastBlock, nbSeq);
4216 FORWARD_IF_ERROR(cSize, "Splitting blocks failed!");
4217 return cSize;
4218}
4219
4220static size_t
4221ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
4222 void* dst, size_t dstCapacity,
4223 const void* src, size_t srcSize, U32 frame)
4224{
4225 /* This is an estimated upper bound for the length of an rle block.
4226 * This isn't the actual upper bound.
4227 * Finding the real threshold needs further investigation.
4228 */
4229 const U32 rleMaxLength = 25;
4230 size_t cSize;
4231 const BYTE* ip = (const BYTE*)src;
4232 BYTE* op = (BYTE*)dst;
4233 DEBUGLOG(5, "ZSTD_compressBlock_internal (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u)",
4234 (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit,
4235 (unsigned)zc->blockState.matchState.nextToUpdate);
4236
4237 { const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
4238 FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
4239 if (bss == ZSTDbss_noCompress) { cSize = 0; goto out; }
4240 }
4241
4242 if (zc->seqCollector.collectSequences) {
4243 ZSTD_copyBlockSequences(zc);
4244 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
4245 return 0;
4246 }
4247
4248 /* encode sequences and literals */
4249 cSize = ZSTD_entropyCompressSeqStore(&zc->seqStore,
4250 &zc->blockState.prevCBlock->entropy, &zc->blockState.nextCBlock->entropy,
4251 &zc->appliedParams,
4252 dst, dstCapacity,
4253 srcSize,
4254 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
4255 zc->bmi2);
4256
4257 if (frame &&
4258 /* We don't want to emit our first block as a RLE even if it qualifies because
4259 * doing so will cause the decoder (cli only) to throw a "should consume all input error."
4260 * This is only an issue for zstd <= v1.4.3
4261 */
4262 !zc->isFirstBlock &&
4263 cSize < rleMaxLength &&
4264 ZSTD_isRLE(ip, srcSize))
4265 {
4266 cSize = 1;
4267 op[0] = ip[0];
4268 }
4269
4270out:
4271 if (!ZSTD_isError(cSize) && cSize > 1) {
4272 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
4273 }
4274 /* We check that dictionaries have offset codes available for the first
4275 * block. After the first block, the offcode table might not have large
4276 * enough codes to represent the offsets in the data.
4277 */
4278 if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
4279 zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
4280
4281 return cSize;
4282}
4283
4284static size_t ZSTD_compressBlock_targetCBlockSize_body(ZSTD_CCtx* zc,
4285 void* dst, size_t dstCapacity,
4286 const void* src, size_t srcSize,
4287 const size_t bss, U32 lastBlock)
4288{
4289 DEBUGLOG(6, "Attempting ZSTD_compressSuperBlock()");
4290 if (bss == ZSTDbss_compress) {
4291 if (/* We don't want to emit our first block as a RLE even if it qualifies because
4292 * doing so will cause the decoder (cli only) to throw a "should consume all input error."
4293 * This is only an issue for zstd <= v1.4.3
4294 */
4295 !zc->isFirstBlock &&
4296 ZSTD_maybeRLE(&zc->seqStore) &&
4297 ZSTD_isRLE((BYTE const*)src, srcSize))
4298 {
4299 return ZSTD_rleCompressBlock(dst, dstCapacity, *(BYTE const*)src, srcSize, lastBlock);
4300 }
4301 /* Attempt superblock compression.
4302 *
4303 * Note that compressed size of ZSTD_compressSuperBlock() is not bound by the
4304 * standard ZSTD_compressBound(). This is a problem, because even if we have
4305 * space now, taking an extra byte now could cause us to run out of space later
4306 * and violate ZSTD_compressBound().
4307 *
4308 * Define blockBound(blockSize) = blockSize + ZSTD_blockHeaderSize.
4309 *
4310 * In order to respect ZSTD_compressBound() we must attempt to emit a raw
4311 * uncompressed block in these cases:
4312 * * cSize == 0: Return code for an uncompressed block.
4313 * * cSize == dstSize_tooSmall: We may have expanded beyond blockBound(srcSize).
4314 * ZSTD_noCompressBlock() will return dstSize_tooSmall if we are really out of
4315 * output space.
4316 * * cSize >= blockBound(srcSize): We have expanded the block too much so
4317 * emit an uncompressed block.
4318 */
4319 { size_t const cSize =
4320 ZSTD_compressSuperBlock(zc, dst, dstCapacity, src, srcSize, lastBlock);
4321 if (cSize != ERROR(dstSize_tooSmall)) {
4322 size_t const maxCSize =
4323 srcSize - ZSTD_minGain(srcSize, zc->appliedParams.cParams.strategy);
4324 FORWARD_IF_ERROR(cSize, "ZSTD_compressSuperBlock failed");
4325 if (cSize != 0 && cSize < maxCSize + ZSTD_blockHeaderSize) {
4326 ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
4327 return cSize;
4328 }
4329 }
4330 }
4331 } /* if (bss == ZSTDbss_compress)*/
4332
4333 DEBUGLOG(6, "Resorting to ZSTD_noCompressBlock()");
4334 /* Superblock compression failed, attempt to emit a single no compress block.
4335 * The decoder will be able to stream this block since it is uncompressed.
4336 */
4337 return ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
4338}
4339
4340static size_t ZSTD_compressBlock_targetCBlockSize(ZSTD_CCtx* zc,
4341 void* dst, size_t dstCapacity,
4342 const void* src, size_t srcSize,
4343 U32 lastBlock)
4344{
4345 size_t cSize = 0;
4346 const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
4347 DEBUGLOG(5, "ZSTD_compressBlock_targetCBlockSize (dstCapacity=%u, dictLimit=%u, nextToUpdate=%u, srcSize=%zu)",
4348 (unsigned)dstCapacity, (unsigned)zc->blockState.matchState.window.dictLimit, (unsigned)zc->blockState.matchState.nextToUpdate, srcSize);
4349 FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
4350
4351 cSize = ZSTD_compressBlock_targetCBlockSize_body(zc, dst, dstCapacity, src, srcSize, bss, lastBlock);
4352 FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize_body failed");
4353
4354 if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
4355 zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
4356
4357 return cSize;
4358}
4359
4360static void ZSTD_overflowCorrectIfNeeded(ZSTD_matchState_t* ms,
4361 ZSTD_cwksp* ws,
4362 ZSTD_CCtx_params const* params,
4363 void const* ip,
4364 void const* iend)
4365{
4366 U32 const cycleLog = ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy);
4367 U32 const maxDist = (U32)1 << params->cParams.windowLog;
4368 if (ZSTD_window_needOverflowCorrection(ms->window, cycleLog, maxDist, ms->loadedDictEnd, ip, iend)) {
4369 U32 const correction = ZSTD_window_correctOverflow(&ms->window, cycleLog, maxDist, ip);
4370 ZSTD_STATIC_ASSERT(ZSTD_CHAINLOG_MAX <= 30);
4371 ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_32 <= 30);
4372 ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX <= 31);
4373 ZSTD_cwksp_mark_tables_dirty(ws);
4374 ZSTD_reduceIndex(ms, params, correction);
4375 ZSTD_cwksp_mark_tables_clean(ws);
4376 if (ms->nextToUpdate < correction) ms->nextToUpdate = 0;
4377 else ms->nextToUpdate -= correction;
4378 /* invalidate dictionaries on overflow correction */
4379 ms->loadedDictEnd = 0;
4380 ms->dictMatchState = NULL;
4381 }
4382}
4383
4384/*! ZSTD_compress_frameChunk() :
4385* Compress a chunk of data into one or multiple blocks.
4386* All blocks will be terminated, all input will be consumed.
4387* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
4388* Frame is supposed already started (header already produced)
4389* @return : compressed size, or an error code
4390*/
4391static size_t ZSTD_compress_frameChunk(ZSTD_CCtx* cctx,
4392 void* dst, size_t dstCapacity,
4393 const void* src, size_t srcSize,
4394 U32 lastFrameChunk)
4395{
4396 size_t blockSize = cctx->blockSize;
4397 size_t remaining = srcSize;
4398 const BYTE* ip = (const BYTE*)src;
4399 BYTE* const ostart = (BYTE*)dst;
4400 BYTE* op = ostart;
4401 U32 const maxDist = (U32)1 << cctx->appliedParams.cParams.windowLog;
4402
4403 assert(cctx->appliedParams.cParams.windowLog <= ZSTD_WINDOWLOG_MAX);
4404
4405 DEBUGLOG(4, "ZSTD_compress_frameChunk (blockSize=%u)", (unsigned)blockSize);
4406 if (cctx->appliedParams.fParams.checksumFlag && srcSize)
4407 XXH64_update(&cctx->xxhState, src, srcSize);
4408
4409 while (remaining) {
4410 ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
4411 U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
4412
4413 /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
4414 * additional 1. We need to revisit and change this logic to be more consistent */
4415 RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE + 1,
4416 dstSize_tooSmall,
4417 "not enough space to store compressed block");
4418 if (remaining < blockSize) blockSize = remaining;
4419
4420 ZSTD_overflowCorrectIfNeeded(
4421 ms, &cctx->workspace, &cctx->appliedParams, ip, ip + blockSize);
4422 ZSTD_checkDictValidity(&ms->window, ip + blockSize, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
4423 ZSTD_window_enforceMaxDist(&ms->window, ip, maxDist, &ms->loadedDictEnd, &ms->dictMatchState);
4424
4425 /* Ensure hash/chain table insertion resumes no sooner than lowlimit */
4426 if (ms->nextToUpdate < ms->window.lowLimit) ms->nextToUpdate = ms->window.lowLimit;
4427
4428 { size_t cSize;
4429 if (ZSTD_useTargetCBlockSize(&cctx->appliedParams)) {
4430 cSize = ZSTD_compressBlock_targetCBlockSize(cctx, op, dstCapacity, ip, blockSize, lastBlock);
4431 FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_targetCBlockSize failed");
4432 assert(cSize > 0);
4433 assert(cSize <= blockSize + ZSTD_blockHeaderSize);
4434 } else if (ZSTD_blockSplitterEnabled(&cctx->appliedParams)) {
4435 cSize = ZSTD_compressBlock_splitBlock(cctx, op, dstCapacity, ip, blockSize, lastBlock);
4436 FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_splitBlock failed");
4437 assert(cSize > 0 || cctx->seqCollector.collectSequences == 1);
4438 } else {
4439 cSize = ZSTD_compressBlock_internal(cctx,
4440 op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize,
4441 ip, blockSize, 1 /* frame */);
4442 FORWARD_IF_ERROR(cSize, "ZSTD_compressBlock_internal failed");
4443
4444 if (cSize == 0) { /* block is not compressible */
4445 cSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
4446 FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
4447 } else {
4448 U32 const cBlockHeader = cSize == 1 ?
4449 lastBlock + (((U32)bt_rle)<<1) + (U32)(blockSize << 3) :
4450 lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
4451 MEM_writeLE24(op, cBlockHeader);
4452 cSize += ZSTD_blockHeaderSize;
4453 }
4454 } /* if (ZSTD_useTargetCBlockSize(&cctx->appliedParams))*/
4455
4456
4457 ip += blockSize;
4458 assert(remaining >= blockSize);
4459 remaining -= blockSize;
4460 op += cSize;
4461 assert(dstCapacity >= cSize);
4462 dstCapacity -= cSize;
4463 cctx->isFirstBlock = 0;
4464 DEBUGLOG(5, "ZSTD_compress_frameChunk: adding a block of size %u",
4465 (unsigned)cSize);
4466 } }
4467
4468 if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
4469 return (size_t)(op-ostart);
4470}
4471
4472
4473static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
4474 const ZSTD_CCtx_params* params, U64 pledgedSrcSize, U32 dictID)
4475{ BYTE* const op = (BYTE*)dst;
4476 U32 const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
4477 U32 const dictIDSizeCode = params->fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength; /* 0-3 */
4478 U32 const checksumFlag = params->fParams.checksumFlag>0;
4479 U32 const windowSize = (U32)1 << params->cParams.windowLog;
4480 U32 const singleSegment = params->fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
4481 BYTE const windowLogByte = (BYTE)((params->cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
4482 U32 const fcsCode = params->fParams.contentSizeFlag ?
4483 (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : 0; /* 0-3 */
4484 BYTE const frameHeaderDescriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
4485 size_t pos=0;
4486
4487 assert(!(params->fParams.contentSizeFlag && pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN));
4488 RETURN_ERROR_IF(dstCapacity < ZSTD_FRAMEHEADERSIZE_MAX, dstSize_tooSmall,
4489 "dst buf is too small to fit worst-case frame header size.");
4490 DEBUGLOG(4, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u",
4491 !params->fParams.noDictIDFlag, (unsigned)dictID, (unsigned)dictIDSizeCode);
4492 if (params->format == ZSTD_f_zstd1) {
4493 MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
4494 pos = 4;
4495 }
4496 op[pos++] = frameHeaderDescriptionByte;
4497 if (!singleSegment) op[pos++] = windowLogByte;
4498 switch(dictIDSizeCode)
4499 {
4500 default:
4501 assert(0); /* impossible */
4502 ZSTD_FALLTHROUGH;
4503 case 0 : break;
4504 case 1 : op[pos] = (BYTE)(dictID); pos++; break;
4505 case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
4506 case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
4507 }
4508 switch(fcsCode)
4509 {
4510 default:
4511 assert(0); /* impossible */
4512 ZSTD_FALLTHROUGH;
4513 case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
4514 case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
4515 case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
4516 case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
4517 }
4518 return pos;
4519}
4520
4521/* ZSTD_writeSkippableFrame_advanced() :
4522 * Writes out a skippable frame with the specified magic number variant (16 are supported),
4523 * from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15, and the desired source data.
4524 *
4525 * Returns the total number of bytes written, or a ZSTD error code.
4526 */
4527size_t ZSTD_writeSkippableFrame(void* dst, size_t dstCapacity,
4528 const void* src, size_t srcSize, unsigned magicVariant) {
4529 BYTE* op = (BYTE*)dst;
4530 RETURN_ERROR_IF(dstCapacity < srcSize + ZSTD_SKIPPABLEHEADERSIZE /* Skippable frame overhead */,
4531 dstSize_tooSmall, "Not enough room for skippable frame");
4532 RETURN_ERROR_IF(srcSize > (unsigned)0xFFFFFFFF, srcSize_wrong, "Src size too large for skippable frame");
4533 RETURN_ERROR_IF(magicVariant > 15, parameter_outOfBound, "Skippable frame magic number variant not supported");
4534
4535 MEM_writeLE32(op, (U32)(ZSTD_MAGIC_SKIPPABLE_START + magicVariant));
4536 MEM_writeLE32(op+4, (U32)srcSize);
4537 ZSTD_memcpy(op+8, src, srcSize);
4538 return srcSize + ZSTD_SKIPPABLEHEADERSIZE;
4539}
4540
4541/* ZSTD_writeLastEmptyBlock() :
4542 * output an empty Block with end-of-frame mark to complete a frame
4543 * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
4544 * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
4545 */
4546size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
4547{
4548 RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall,
4549 "dst buf is too small to write frame trailer empty block.");
4550 { U32 const cBlockHeader24 = 1 /*lastBlock*/ + (((U32)bt_raw)<<1); /* 0 size */
4551 MEM_writeLE24(dst, cBlockHeader24);
4552 return ZSTD_blockHeaderSize;
4553 }
4554}
4555
4556size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
4557{
4558 RETURN_ERROR_IF(cctx->stage != ZSTDcs_init, stage_wrong,
4559 "wrong cctx stage");
4560 RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable,
4561 parameter_unsupported,
4562 "incompatible with ldm");
4563 cctx->externSeqStore.seq = seq;
4564 cctx->externSeqStore.size = nbSeq;
4565 cctx->externSeqStore.capacity = nbSeq;
4566 cctx->externSeqStore.pos = 0;
4567 cctx->externSeqStore.posInSequence = 0;
4568 return 0;
4569}
4570
4571
4572static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
4573 void* dst, size_t dstCapacity,
4574 const void* src, size_t srcSize,
4575 U32 frame, U32 lastFrameChunk)
4576{
4577 ZSTD_matchState_t* const ms = &cctx->blockState.matchState;
4578 size_t fhSize = 0;
4579
4580 DEBUGLOG(5, "ZSTD_compressContinue_internal, stage: %u, srcSize: %u",
4581 cctx->stage, (unsigned)srcSize);
4582 RETURN_ERROR_IF(cctx->stage==ZSTDcs_created, stage_wrong,
4583 "missing init (ZSTD_compressBegin)");
4584
4585 if (frame && (cctx->stage==ZSTDcs_init)) {
4586 fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams,
4587 cctx->pledgedSrcSizePlusOne-1, cctx->dictID);
4588 FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
4589 assert(fhSize <= dstCapacity);
4590 dstCapacity -= fhSize;
4591 dst = (char*)dst + fhSize;
4592 cctx->stage = ZSTDcs_ongoing;
4593 }
4594
4595 if (!srcSize) return fhSize; /* do not generate an empty block if no input */
4596
4597 if (!ZSTD_window_update(&ms->window, src, srcSize, ms->forceNonContiguous)) {
4598 ms->forceNonContiguous = 0;
4599 ms->nextToUpdate = ms->window.dictLimit;
4600 }
4601 if (cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable) {
4602 ZSTD_window_update(&cctx->ldmState.window, src, srcSize, /* forceNonContiguous */ 0);
4603 }
4604
4605 if (!frame) {
4606 /* overflow check and correction for block mode */
4607 ZSTD_overflowCorrectIfNeeded(
4608 ms, &cctx->workspace, &cctx->appliedParams,
4609 src, (BYTE const*)src + srcSize);
4610 }
4611
4612 DEBUGLOG(5, "ZSTD_compressContinue_internal (blockSize=%u)", (unsigned)cctx->blockSize);
4613 { size_t const cSize = frame ?
4614 ZSTD_compress_frameChunk (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
4615 ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize, 0 /* frame */);
4616 FORWARD_IF_ERROR(cSize, "%s", frame ? "ZSTD_compress_frameChunk failed" : "ZSTD_compressBlock_internal failed");
4617 cctx->consumedSrcSize += srcSize;
4618 cctx->producedCSize += (cSize + fhSize);
4619 assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
4620 if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
4621 ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
4622 RETURN_ERROR_IF(
4623 cctx->consumedSrcSize+1 > cctx->pledgedSrcSizePlusOne,
4624 srcSize_wrong,
4625 "error : pledgedSrcSize = %u, while realSrcSize >= %u",
4626 (unsigned)cctx->pledgedSrcSizePlusOne-1,
4627 (unsigned)cctx->consumedSrcSize);
4628 }
4629 return cSize + fhSize;
4630 }
4631}
4632
4633size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx,
4634 void* dst, size_t dstCapacity,
4635 const void* src, size_t srcSize)
4636{
4637 DEBUGLOG(5, "ZSTD_compressContinue (srcSize=%u)", (unsigned)srcSize);
4638 return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
4639}
4640
4641/* NOTE: Must just wrap ZSTD_compressContinue_public() */
4642size_t ZSTD_compressContinue(ZSTD_CCtx* cctx,
4643 void* dst, size_t dstCapacity,
4644 const void* src, size_t srcSize)
4645{
4646 return ZSTD_compressContinue_public(cctx, dst, dstCapacity, src, srcSize);
4647}
4648
4649static size_t ZSTD_getBlockSize_deprecated(const ZSTD_CCtx* cctx)
4650{
4651 ZSTD_compressionParameters const cParams = cctx->appliedParams.cParams;
4652 assert(!ZSTD_checkCParams(cParams));
4653 return MIN(cctx->appliedParams.maxBlockSize, (size_t)1 << cParams.windowLog);
4654}
4655
4656/* NOTE: Must just wrap ZSTD_getBlockSize_deprecated() */
4657size_t ZSTD_getBlockSize(const ZSTD_CCtx* cctx)
4658{
4659 return ZSTD_getBlockSize_deprecated(cctx);
4660}
4661
4662/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */
4663size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
4664{
4665 DEBUGLOG(5, "ZSTD_compressBlock: srcSize = %u", (unsigned)srcSize);
4666 { size_t const blockSizeMax = ZSTD_getBlockSize_deprecated(cctx);
4667 RETURN_ERROR_IF(srcSize > blockSizeMax, srcSize_wrong, "input is larger than a block"); }
4668
4669 return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
4670}
4671
4672/* NOTE: Must just wrap ZSTD_compressBlock_deprecated() */
4673size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
4674{
4675 return ZSTD_compressBlock_deprecated(cctx, dst, dstCapacity, src, srcSize);
4676}
4677
4678/*! ZSTD_loadDictionaryContent() :
4679 * @return : 0, or an error code
4680 */
4681static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
4682 ldmState_t* ls,
4683 ZSTD_cwksp* ws,
4684 ZSTD_CCtx_params const* params,
4685 const void* src, size_t srcSize,
4686 ZSTD_dictTableLoadMethod_e dtlm,
4687 ZSTD_tableFillPurpose_e tfp)
4688{
4689 const BYTE* ip = (const BYTE*) src;
4690 const BYTE* const iend = ip + srcSize;
4691 int const loadLdmDict = params->ldmParams.enableLdm == ZSTD_ps_enable && ls != NULL;
4692
4693 /* Assert that the ms params match the params we're being given */
4694 ZSTD_assertEqualCParams(params->cParams, ms->cParams);
4695
4696 { /* Ensure large dictionaries can't cause index overflow */
4697
4698 /* Allow the dictionary to set indices up to exactly ZSTD_CURRENT_MAX.
4699 * Dictionaries right at the edge will immediately trigger overflow
4700 * correction, but I don't want to insert extra constraints here.
4701 */
4702 U32 maxDictSize = ZSTD_CURRENT_MAX - ZSTD_WINDOW_START_INDEX;
4703
4704 int const CDictTaggedIndices = ZSTD_CDictIndicesAreTagged(&params->cParams);
4705 if (CDictTaggedIndices && tfp == ZSTD_tfp_forCDict) {
4706 /* Some dictionary matchfinders in zstd use "short cache",
4707 * which treats the lower ZSTD_SHORT_CACHE_TAG_BITS of each
4708 * CDict hashtable entry as a tag rather than as part of an index.
4709 * When short cache is used, we need to truncate the dictionary
4710 * so that its indices don't overlap with the tag. */
4711 U32 const shortCacheMaxDictSize = (1u << (32 - ZSTD_SHORT_CACHE_TAG_BITS)) - ZSTD_WINDOW_START_INDEX;
4712 maxDictSize = MIN(maxDictSize, shortCacheMaxDictSize);
4713 assert(!loadLdmDict);
4714 }
4715
4716 /* If the dictionary is too large, only load the suffix of the dictionary. */
4717 if (srcSize > maxDictSize) {
4718 ip = iend - maxDictSize;
4719 src = ip;
4720 srcSize = maxDictSize;
4721 }
4722 }
4723
4724 if (srcSize > ZSTD_CHUNKSIZE_MAX) {
4725 /* We must have cleared our windows when our source is this large. */
4726 assert(ZSTD_window_isEmpty(ms->window));
4727 if (loadLdmDict) assert(ZSTD_window_isEmpty(ls->window));
4728 }
4729 ZSTD_window_update(&ms->window, src, srcSize, /* forceNonContiguous */ 0);
4730
4731 DEBUGLOG(4, "ZSTD_loadDictionaryContent(): useRowMatchFinder=%d", (int)params->useRowMatchFinder);
4732
4733 if (loadLdmDict) { /* Load the entire dict into LDM matchfinders. */
4734 ZSTD_window_update(&ls->window, src, srcSize, /* forceNonContiguous */ 0);
4735 ls->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ls->window.base);
4736 ZSTD_ldm_fillHashTable(ls, ip, iend, &params->ldmParams);
4737 }
4738
4739 /* If the dict is larger than we can reasonably index in our tables, only load the suffix. */
4740 if (params->cParams.strategy < ZSTD_btultra) {
4741 U32 maxDictSize = 8U << MIN(MAX(params->cParams.hashLog, params->cParams.chainLog), 28);
4742 if (srcSize > maxDictSize) {
4743 ip = iend - maxDictSize;
4744 src = ip;
4745 srcSize = maxDictSize;
4746 }
4747 }
4748
4749 ms->nextToUpdate = (U32)(ip - ms->window.base);
4750 ms->loadedDictEnd = params->forceWindow ? 0 : (U32)(iend - ms->window.base);
4751 ms->forceNonContiguous = params->deterministicRefPrefix;
4752
4753 if (srcSize <= HASH_READ_SIZE) return 0;
4754
4755 ZSTD_overflowCorrectIfNeeded(ms, ws, params, ip, iend);
4756
4757 switch(params->cParams.strategy)
4758 {
4759 case ZSTD_fast:
4760 ZSTD_fillHashTable(ms, iend, dtlm, tfp);
4761 break;
4762 case ZSTD_dfast:
4763 ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp);
4764 break;
4765
4766 case ZSTD_greedy:
4767 case ZSTD_lazy:
4768 case ZSTD_lazy2:
4769 assert(srcSize >= HASH_READ_SIZE);
4770 if (ms->dedicatedDictSearch) {
4771 assert(ms->chainTable != NULL);
4772 ZSTD_dedicatedDictSearch_lazy_loadDictionary(ms, iend-HASH_READ_SIZE);
4773 } else {
4774 assert(params->useRowMatchFinder != ZSTD_ps_auto);
4775 if (params->useRowMatchFinder == ZSTD_ps_enable) {
4776 size_t const tagTableSize = ((size_t)1 << params->cParams.hashLog);
4777 ZSTD_memset(ms->tagTable, 0, tagTableSize);
4778 ZSTD_row_update(ms, iend-HASH_READ_SIZE);
4779 DEBUGLOG(4, "Using row-based hash table for lazy dict");
4780 } else {
4781 ZSTD_insertAndFindFirstIndex(ms, iend-HASH_READ_SIZE);
4782 DEBUGLOG(4, "Using chain-based hash table for lazy dict");
4783 }
4784 }
4785 break;
4786
4787 case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
4788 case ZSTD_btopt:
4789 case ZSTD_btultra:
4790 case ZSTD_btultra2:
4791 assert(srcSize >= HASH_READ_SIZE);
4792 ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
4793 break;
4794
4795 default:
4796 assert(0); /* not possible : not a valid strategy id */
4797 }
4798
4799 ms->nextToUpdate = (U32)(iend - ms->window.base);
4800 return 0;
4801}
4802
4803
4804/* Dictionaries that assign zero probability to symbols that show up causes problems
4805 * when FSE encoding. Mark dictionaries with zero probability symbols as FSE_repeat_check
4806 * and only dictionaries with 100% valid symbols can be assumed valid.
4807 */
4808static FSE_repeat ZSTD_dictNCountRepeat(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue)
4809{
4810 U32 s;
4811 if (dictMaxSymbolValue < maxSymbolValue) {
4812 return FSE_repeat_check;
4813 }
4814 for (s = 0; s <= maxSymbolValue; ++s) {
4815 if (normalizedCounter[s] == 0) {
4816 return FSE_repeat_check;
4817 }
4818 }
4819 return FSE_repeat_valid;
4820}
4821
4822size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
4823 const void* const dict, size_t dictSize)
4824{
4825 short offcodeNCount[MaxOff+1];
4826 unsigned offcodeMaxValue = MaxOff;
4827 const BYTE* dictPtr = (const BYTE*)dict; /* skip magic num and dict ID */
4828 const BYTE* const dictEnd = dictPtr + dictSize;
4829 dictPtr += 8;
4830 bs->entropy.huf.repeatMode = HUF_repeat_check;
4831
4832 { unsigned maxSymbolValue = 255;
4833 unsigned hasZeroWeights = 1;
4834 size_t const hufHeaderSize = HUF_readCTable((HUF_CElt*)bs->entropy.huf.CTable, &maxSymbolValue, dictPtr,
4835 dictEnd-dictPtr, &hasZeroWeights);
4836
4837 /* We only set the loaded table as valid if it contains all non-zero
4838 * weights. Otherwise, we set it to check */
4839 if (!hasZeroWeights)
4840 bs->entropy.huf.repeatMode = HUF_repeat_valid;
4841
4842 RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
4843 RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
4844 dictPtr += hufHeaderSize;
4845 }
4846
4847 { unsigned offcodeLog;
4848 size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
4849 RETURN_ERROR_IF(FSE_isError(offcodeHeaderSize), dictionary_corrupted, "");
4850 RETURN_ERROR_IF(offcodeLog > OffFSELog, dictionary_corrupted, "");
4851 /* fill all offset symbols to avoid garbage at end of table */
4852 RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
4853 bs->entropy.fse.offcodeCTable,
4854 offcodeNCount, MaxOff, offcodeLog,
4855 workspace, HUF_WORKSPACE_SIZE)),
4856 dictionary_corrupted, "");
4857 /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
4858 dictPtr += offcodeHeaderSize;
4859 }
4860
4861 { short matchlengthNCount[MaxML+1];
4862 unsigned matchlengthMaxValue = MaxML, matchlengthLog;
4863 size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
4864 RETURN_ERROR_IF(FSE_isError(matchlengthHeaderSize), dictionary_corrupted, "");
4865 RETURN_ERROR_IF(matchlengthLog > MLFSELog, dictionary_corrupted, "");
4866 RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
4867 bs->entropy.fse.matchlengthCTable,
4868 matchlengthNCount, matchlengthMaxValue, matchlengthLog,
4869 workspace, HUF_WORKSPACE_SIZE)),
4870 dictionary_corrupted, "");
4871 bs->entropy.fse.matchlength_repeatMode = ZSTD_dictNCountRepeat(matchlengthNCount, matchlengthMaxValue, MaxML);
4872 dictPtr += matchlengthHeaderSize;
4873 }
4874
4875 { short litlengthNCount[MaxLL+1];
4876 unsigned litlengthMaxValue = MaxLL, litlengthLog;
4877 size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
4878 RETURN_ERROR_IF(FSE_isError(litlengthHeaderSize), dictionary_corrupted, "");
4879 RETURN_ERROR_IF(litlengthLog > LLFSELog, dictionary_corrupted, "");
4880 RETURN_ERROR_IF(FSE_isError(FSE_buildCTable_wksp(
4881 bs->entropy.fse.litlengthCTable,
4882 litlengthNCount, litlengthMaxValue, litlengthLog,
4883 workspace, HUF_WORKSPACE_SIZE)),
4884 dictionary_corrupted, "");
4885 bs->entropy.fse.litlength_repeatMode = ZSTD_dictNCountRepeat(litlengthNCount, litlengthMaxValue, MaxLL);
4886 dictPtr += litlengthHeaderSize;
4887 }
4888
4889 RETURN_ERROR_IF(dictPtr+12 > dictEnd, dictionary_corrupted, "");
4890 bs->rep[0] = MEM_readLE32(dictPtr+0);
4891 bs->rep[1] = MEM_readLE32(dictPtr+4);
4892 bs->rep[2] = MEM_readLE32(dictPtr+8);
4893 dictPtr += 12;
4894
4895 { size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
4896 U32 offcodeMax = MaxOff;
4897 if (dictContentSize <= ((U32)-1) - 128 KB) {
4898 U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
4899 offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
4900 }
4901 /* All offset values <= dictContentSize + 128 KB must be representable for a valid table */
4902 bs->entropy.fse.offcode_repeatMode = ZSTD_dictNCountRepeat(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff));
4903
4904 /* All repCodes must be <= dictContentSize and != 0 */
4905 { U32 u;
4906 for (u=0; u<3; u++) {
4907 RETURN_ERROR_IF(bs->rep[u] == 0, dictionary_corrupted, "");
4908 RETURN_ERROR_IF(bs->rep[u] > dictContentSize, dictionary_corrupted, "");
4909 } } }
4910
4911 return dictPtr - (const BYTE*)dict;
4912}
4913
4914/* Dictionary format :
4915 * See :
4916 * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
4917 */
4918/*! ZSTD_loadZstdDictionary() :
4919 * @return : dictID, or an error code
4920 * assumptions : magic number supposed already checked
4921 * dictSize supposed >= 8
4922 */
4923static size_t ZSTD_loadZstdDictionary(ZSTD_compressedBlockState_t* bs,
4924 ZSTD_matchState_t* ms,
4925 ZSTD_cwksp* ws,
4926 ZSTD_CCtx_params const* params,
4927 const void* dict, size_t dictSize,
4928 ZSTD_dictTableLoadMethod_e dtlm,
4929 ZSTD_tableFillPurpose_e tfp,
4930 void* workspace)
4931{
4932 const BYTE* dictPtr = (const BYTE*)dict;
4933 const BYTE* const dictEnd = dictPtr + dictSize;
4934 size_t dictID;
4935 size_t eSize;
4936 ZSTD_STATIC_ASSERT(HUF_WORKSPACE_SIZE >= (1<<MAX(MLFSELog,LLFSELog)));
4937 assert(dictSize >= 8);
4938 assert(MEM_readLE32(dictPtr) == ZSTD_MAGIC_DICTIONARY);
4939
4940 dictID = params->fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr + 4 /* skip magic number */ );
4941 eSize = ZSTD_loadCEntropy(bs, workspace, dict, dictSize);
4942 FORWARD_IF_ERROR(eSize, "ZSTD_loadCEntropy failed");
4943 dictPtr += eSize;
4944
4945 {
4946 size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
4947 FORWARD_IF_ERROR(ZSTD_loadDictionaryContent(
4948 ms, NULL, ws, params, dictPtr, dictContentSize, dtlm, tfp), "");
4949 }
4950 return dictID;
4951}
4952
4953/** ZSTD_compress_insertDictionary() :
4954* @return : dictID, or an error code */
4955static size_t
4956ZSTD_compress_insertDictionary(ZSTD_compressedBlockState_t* bs,
4957 ZSTD_matchState_t* ms,
4958 ldmState_t* ls,
4959 ZSTD_cwksp* ws,
4960 const ZSTD_CCtx_params* params,
4961 const void* dict, size_t dictSize,
4962 ZSTD_dictContentType_e dictContentType,
4963 ZSTD_dictTableLoadMethod_e dtlm,
4964 ZSTD_tableFillPurpose_e tfp,
4965 void* workspace)
4966{
4967 DEBUGLOG(4, "ZSTD_compress_insertDictionary (dictSize=%u)", (U32)dictSize);
4968 if ((dict==NULL) || (dictSize<8)) {
4969 RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
4970 return 0;
4971 }
4972
4973 ZSTD_reset_compressedBlockState(bs);
4974
4975 /* dict restricted modes */
4976 if (dictContentType == ZSTD_dct_rawContent)
4977 return ZSTD_loadDictionaryContent(ms, ls, ws, params, dict, dictSize, dtlm, tfp);
4978
4979 if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) {
4980 if (dictContentType == ZSTD_dct_auto) {
4981 DEBUGLOG(4, "raw content dictionary detected");
4982 return ZSTD_loadDictionaryContent(
4983 ms, ls, ws, params, dict, dictSize, dtlm, tfp);
4984 }
4985 RETURN_ERROR_IF(dictContentType == ZSTD_dct_fullDict, dictionary_wrong, "");
4986 assert(0); /* impossible */
4987 }
4988
4989 /* dict as full zstd dictionary */
4990 return ZSTD_loadZstdDictionary(
4991 bs, ms, ws, params, dict, dictSize, dtlm, tfp, workspace);
4992}
4993
4994#define ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF (128 KB)
4995#define ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER (6ULL)
4996
4997/*! ZSTD_compressBegin_internal() :
4998 * Assumption : either @dict OR @cdict (or none) is non-NULL, never both
4999 * @return : 0, or an error code */
5000static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
5001 const void* dict, size_t dictSize,
5002 ZSTD_dictContentType_e dictContentType,
5003 ZSTD_dictTableLoadMethod_e dtlm,
5004 const ZSTD_CDict* cdict,
5005 const ZSTD_CCtx_params* params, U64 pledgedSrcSize,
5006 ZSTD_buffered_policy_e zbuff)
5007{
5008 size_t const dictContentSize = cdict ? cdict->dictContentSize : dictSize;
5009#if ZSTD_TRACE
5010 cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
5011#endif
5012 DEBUGLOG(4, "ZSTD_compressBegin_internal: wlog=%u", params->cParams.windowLog);
5013 /* params are supposed to be fully validated at this point */
5014 assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
5015 assert(!((dict) && (cdict))); /* either dict or cdict, not both */
5016 if ( (cdict)
5017 && (cdict->dictContentSize > 0)
5018 && ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
5019 || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
5020 || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
5021 || cdict->compressionLevel == 0)
5022 && (params->attachDictPref != ZSTD_dictForceLoad) ) {
5023 return ZSTD_resetCCtx_usingCDict(cctx, cdict, params, pledgedSrcSize, zbuff);
5024 }
5025
5026 FORWARD_IF_ERROR( ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize,
5027 dictContentSize,
5028 ZSTDcrp_makeClean, zbuff) , "");
5029 { size_t const dictID = cdict ?
5030 ZSTD_compress_insertDictionary(
5031 cctx->blockState.prevCBlock, &cctx->blockState.matchState,
5032 &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, cdict->dictContent,
5033 cdict->dictContentSize, cdict->dictContentType, dtlm,
5034 ZSTD_tfp_forCCtx, cctx->entropyWorkspace)
5035 : ZSTD_compress_insertDictionary(
5036 cctx->blockState.prevCBlock, &cctx->blockState.matchState,
5037 &cctx->ldmState, &cctx->workspace, &cctx->appliedParams, dict, dictSize,
5038 dictContentType, dtlm, ZSTD_tfp_forCCtx, cctx->entropyWorkspace);
5039 FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
5040 assert(dictID <= UINT_MAX);
5041 cctx->dictID = (U32)dictID;
5042 cctx->dictContentSize = dictContentSize;
5043 }
5044 return 0;
5045}
5046
5047size_t ZSTD_compressBegin_advanced_internal(ZSTD_CCtx* cctx,
5048 const void* dict, size_t dictSize,
5049 ZSTD_dictContentType_e dictContentType,
5050 ZSTD_dictTableLoadMethod_e dtlm,
5051 const ZSTD_CDict* cdict,
5052 const ZSTD_CCtx_params* params,
5053 unsigned long long pledgedSrcSize)
5054{
5055 DEBUGLOG(4, "ZSTD_compressBegin_advanced_internal: wlog=%u", params->cParams.windowLog);
5056 /* compression parameters verification and optimization */
5057 FORWARD_IF_ERROR( ZSTD_checkCParams(params->cParams) , "");
5058 return ZSTD_compressBegin_internal(cctx,
5059 dict, dictSize, dictContentType, dtlm,
5060 cdict,
5061 params, pledgedSrcSize,
5062 ZSTDb_not_buffered);
5063}
5064
5065/*! ZSTD_compressBegin_advanced() :
5066* @return : 0, or an error code */
5067size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
5068 const void* dict, size_t dictSize,
5069 ZSTD_parameters params, unsigned long long pledgedSrcSize)
5070{
5071 ZSTD_CCtx_params cctxParams;
5072 ZSTD_CCtxParams_init_internal(&cctxParams, &params, ZSTD_NO_CLEVEL);
5073 return ZSTD_compressBegin_advanced_internal(cctx,
5074 dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast,
5075 NULL /*cdict*/,
5076 &cctxParams, pledgedSrcSize);
5077}
5078
5079static size_t
5080ZSTD_compressBegin_usingDict_deprecated(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
5081{
5082 ZSTD_CCtx_params cctxParams;
5083 { ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_noAttachDict);
5084 ZSTD_CCtxParams_init_internal(&cctxParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel);
5085 }
5086 DEBUGLOG(4, "ZSTD_compressBegin_usingDict (dictSize=%u)", (unsigned)dictSize);
5087 return ZSTD_compressBegin_internal(cctx, dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
5088 &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, ZSTDb_not_buffered);
5089}
5090
5091size_t
5092ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
5093{
5094 return ZSTD_compressBegin_usingDict_deprecated(cctx, dict, dictSize, compressionLevel);
5095}
5096
5097size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel)
5098{
5099 return ZSTD_compressBegin_usingDict_deprecated(cctx, NULL, 0, compressionLevel);
5100}
5101
5102
5103/*! ZSTD_writeEpilogue() :
5104* Ends a frame.
5105* @return : nb of bytes written into dst (or an error code) */
5106static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
5107{
5108 BYTE* const ostart = (BYTE*)dst;
5109 BYTE* op = ostart;
5110 size_t fhSize = 0;
5111
5112 DEBUGLOG(4, "ZSTD_writeEpilogue");
5113 RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
5114
5115 /* special case : empty frame */
5116 if (cctx->stage == ZSTDcs_init) {
5117 fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
5118 FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
5119 dstCapacity -= fhSize;
5120 op += fhSize;
5121 cctx->stage = ZSTDcs_ongoing;
5122 }
5123
5124 if (cctx->stage != ZSTDcs_ending) {
5125 /* write one last empty block, make it the "last" block */
5126 U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
5127 RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for epilogue");
5128 MEM_writeLE32(op, cBlockHeader24);
5129 op += ZSTD_blockHeaderSize;
5130 dstCapacity -= ZSTD_blockHeaderSize;
5131 }
5132
5133 if (cctx->appliedParams.fParams.checksumFlag) {
5134 U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
5135 RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
5136 DEBUGLOG(4, "ZSTD_writeEpilogue: write checksum : %08X", (unsigned)checksum);
5137 MEM_writeLE32(op, checksum);
5138 op += 4;
5139 }
5140
5141 cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
5142 return op-ostart;
5143}
5144
5145void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize)
5146{
5147#if ZSTD_TRACE
5148 if (cctx->traceCtx && ZSTD_trace_compress_end != NULL) {
5149 int const streaming = cctx->inBuffSize > 0 || cctx->outBuffSize > 0 || cctx->appliedParams.nbWorkers > 0;
5150 ZSTD_Trace trace;
5151 ZSTD_memset(&trace, 0, sizeof(trace));
5152 trace.version = ZSTD_VERSION_NUMBER;
5153 trace.streaming = streaming;
5154 trace.dictionaryID = cctx->dictID;
5155 trace.dictionarySize = cctx->dictContentSize;
5156 trace.uncompressedSize = cctx->consumedSrcSize;
5157 trace.compressedSize = cctx->producedCSize + extraCSize;
5158 trace.params = &cctx->appliedParams;
5159 trace.cctx = cctx;
5160 ZSTD_trace_compress_end(cctx->traceCtx, &trace);
5161 }
5162 cctx->traceCtx = 0;
5163#else
5164 (void)cctx;
5165 (void)extraCSize;
5166#endif
5167}
5168
5169size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx,
5170 void* dst, size_t dstCapacity,
5171 const void* src, size_t srcSize)
5172{
5173 size_t endResult;
5174 size_t const cSize = ZSTD_compressContinue_internal(cctx,
5175 dst, dstCapacity, src, srcSize,
5176 1 /* frame mode */, 1 /* last chunk */);
5177 FORWARD_IF_ERROR(cSize, "ZSTD_compressContinue_internal failed");
5178 endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
5179 FORWARD_IF_ERROR(endResult, "ZSTD_writeEpilogue failed");
5180 assert(!(cctx->appliedParams.fParams.contentSizeFlag && cctx->pledgedSrcSizePlusOne == 0));
5181 if (cctx->pledgedSrcSizePlusOne != 0) { /* control src size */
5182 ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_UNKNOWN == (unsigned long long)-1);
5183 DEBUGLOG(4, "end of frame : controlling src size");
5184 RETURN_ERROR_IF(
5185 cctx->pledgedSrcSizePlusOne != cctx->consumedSrcSize+1,
5186 srcSize_wrong,
5187 "error : pledgedSrcSize = %u, while realSrcSize = %u",
5188 (unsigned)cctx->pledgedSrcSizePlusOne-1,
5189 (unsigned)cctx->consumedSrcSize);
5190 }
5191 ZSTD_CCtx_trace(cctx, endResult);
5192 return cSize + endResult;
5193}
5194
5195/* NOTE: Must just wrap ZSTD_compressEnd_public() */
5196size_t ZSTD_compressEnd(ZSTD_CCtx* cctx,
5197 void* dst, size_t dstCapacity,
5198 const void* src, size_t srcSize)
5199{
5200 return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);
5201}
5202
5203size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
5204 void* dst, size_t dstCapacity,
5205 const void* src, size_t srcSize,
5206 const void* dict,size_t dictSize,
5207 ZSTD_parameters params)
5208{
5209 DEBUGLOG(4, "ZSTD_compress_advanced");
5210 FORWARD_IF_ERROR(ZSTD_checkCParams(params.cParams), "");
5211 ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, ZSTD_NO_CLEVEL);
5212 return ZSTD_compress_advanced_internal(cctx,
5213 dst, dstCapacity,
5214 src, srcSize,
5215 dict, dictSize,
5216 &cctx->simpleApiParams);
5217}
5218
5219/* Internal */
5220size_t ZSTD_compress_advanced_internal(
5221 ZSTD_CCtx* cctx,
5222 void* dst, size_t dstCapacity,
5223 const void* src, size_t srcSize,
5224 const void* dict,size_t dictSize,
5225 const ZSTD_CCtx_params* params)
5226{
5227 DEBUGLOG(4, "ZSTD_compress_advanced_internal (srcSize:%u)", (unsigned)srcSize);
5228 FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
5229 dict, dictSize, ZSTD_dct_auto, ZSTD_dtlm_fast, NULL,
5230 params, srcSize, ZSTDb_not_buffered) , "");
5231 return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);
5232}
5233
5234size_t ZSTD_compress_usingDict(ZSTD_CCtx* cctx,
5235 void* dst, size_t dstCapacity,
5236 const void* src, size_t srcSize,
5237 const void* dict, size_t dictSize,
5238 int compressionLevel)
5239{
5240 {
5241 ZSTD_parameters const params = ZSTD_getParams_internal(compressionLevel, srcSize, dict ? dictSize : 0, ZSTD_cpm_noAttachDict);
5242 assert(params.fParams.contentSizeFlag == 1);
5243 ZSTD_CCtxParams_init_internal(&cctx->simpleApiParams, &params, (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT: compressionLevel);
5244 }
5245 DEBUGLOG(4, "ZSTD_compress_usingDict (srcSize=%u)", (unsigned)srcSize);
5246 return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, dict, dictSize, &cctx->simpleApiParams);
5247}
5248
5249size_t ZSTD_compressCCtx(ZSTD_CCtx* cctx,
5250 void* dst, size_t dstCapacity,
5251 const void* src, size_t srcSize,
5252 int compressionLevel)
5253{
5254 DEBUGLOG(4, "ZSTD_compressCCtx (srcSize=%u)", (unsigned)srcSize);
5255 assert(cctx != NULL);
5256 return ZSTD_compress_usingDict(cctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
5257}
5258
5259size_t ZSTD_compress(void* dst, size_t dstCapacity,
5260 const void* src, size_t srcSize,
5261 int compressionLevel)
5262{
5263 size_t result;
5264#if ZSTD_COMPRESS_HEAPMODE
5265 ZSTD_CCtx* cctx = ZSTD_createCCtx();
5266 RETURN_ERROR_IF(!cctx, memory_allocation, "ZSTD_createCCtx failed");
5267 result = ZSTD_compressCCtx(cctx, dst, dstCapacity, src, srcSize, compressionLevel);
5268 ZSTD_freeCCtx(cctx);
5269#else
5270 ZSTD_CCtx ctxBody;
5271 ZSTD_initCCtx(&ctxBody, ZSTD_defaultCMem);
5272 result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
5273 ZSTD_freeCCtxContent(&ctxBody); /* can't free ctxBody itself, as it's on stack; free only heap content */
5274#endif
5275 return result;
5276}
5277
5278
5279/* ===== Dictionary API ===== */
5280
5281/*! ZSTD_estimateCDictSize_advanced() :
5282 * Estimate amount of memory that will be needed to create a dictionary with following arguments */
5283size_t ZSTD_estimateCDictSize_advanced(
5284 size_t dictSize, ZSTD_compressionParameters cParams,
5285 ZSTD_dictLoadMethod_e dictLoadMethod)
5286{
5287 DEBUGLOG(5, "sizeof(ZSTD_CDict) : %u", (unsigned)sizeof(ZSTD_CDict));
5288 return ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
5289 + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
5290 /* enableDedicatedDictSearch == 1 ensures that CDict estimation will not be too small
5291 * in case we are using DDS with row-hash. */
5292 + ZSTD_sizeof_matchState(&cParams, ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams),
5293 /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0)
5294 + (dictLoadMethod == ZSTD_dlm_byRef ? 0
5295 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void *))));
5296}
5297
5298size_t ZSTD_estimateCDictSize(size_t dictSize, int compressionLevel)
5299{
5300 ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
5301 return ZSTD_estimateCDictSize_advanced(dictSize, cParams, ZSTD_dlm_byCopy);
5302}
5303
5304size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
5305{
5306 if (cdict==NULL) return 0; /* support sizeof on NULL */
5307 DEBUGLOG(5, "sizeof(*cdict) : %u", (unsigned)sizeof(*cdict));
5308 /* cdict may be in the workspace */
5309 return (cdict->workspace.workspace == cdict ? 0 : sizeof(*cdict))
5310 + ZSTD_cwksp_sizeof(&cdict->workspace);
5311}
5312
5313static size_t ZSTD_initCDict_internal(
5314 ZSTD_CDict* cdict,
5315 const void* dictBuffer, size_t dictSize,
5316 ZSTD_dictLoadMethod_e dictLoadMethod,
5317 ZSTD_dictContentType_e dictContentType,
5318 ZSTD_CCtx_params params)
5319{
5320 DEBUGLOG(3, "ZSTD_initCDict_internal (dictContentType:%u)", (unsigned)dictContentType);
5321 assert(!ZSTD_checkCParams(params.cParams));
5322 cdict->matchState.cParams = params.cParams;
5323 cdict->matchState.dedicatedDictSearch = params.enableDedicatedDictSearch;
5324 if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dictBuffer) || (!dictSize)) {
5325 cdict->dictContent = dictBuffer;
5326 } else {
5327 void *internalBuffer = ZSTD_cwksp_reserve_object(&cdict->workspace, ZSTD_cwksp_align(dictSize, sizeof(void*)));
5328 RETURN_ERROR_IF(!internalBuffer, memory_allocation, "NULL pointer!");
5329 cdict->dictContent = internalBuffer;
5330 ZSTD_memcpy(internalBuffer, dictBuffer, dictSize);
5331 }
5332 cdict->dictContentSize = dictSize;
5333 cdict->dictContentType = dictContentType;
5334
5335 cdict->entropyWorkspace = (U32*)ZSTD_cwksp_reserve_object(&cdict->workspace, HUF_WORKSPACE_SIZE);
5336
5337
5338 /* Reset the state to no dictionary */
5339 ZSTD_reset_compressedBlockState(&cdict->cBlockState);
5340 FORWARD_IF_ERROR(ZSTD_reset_matchState(
5341 &cdict->matchState,
5342 &cdict->workspace,
5343 &params.cParams,
5344 params.useRowMatchFinder,
5345 ZSTDcrp_makeClean,
5346 ZSTDirp_reset,
5347 ZSTD_resetTarget_CDict), "");
5348 /* (Maybe) load the dictionary
5349 * Skips loading the dictionary if it is < 8 bytes.
5350 */
5351 { params.compressionLevel = ZSTD_CLEVEL_DEFAULT;
5352 params.fParams.contentSizeFlag = 1;
5353 { size_t const dictID = ZSTD_compress_insertDictionary(
5354 &cdict->cBlockState, &cdict->matchState, NULL, &cdict->workspace,
5355 &params, cdict->dictContent, cdict->dictContentSize,
5356 dictContentType, ZSTD_dtlm_full, ZSTD_tfp_forCDict, cdict->entropyWorkspace);
5357 FORWARD_IF_ERROR(dictID, "ZSTD_compress_insertDictionary failed");
5358 assert(dictID <= (size_t)(U32)-1);
5359 cdict->dictID = (U32)dictID;
5360 }
5361 }
5362
5363 return 0;
5364}
5365
5366static ZSTD_CDict* ZSTD_createCDict_advanced_internal(size_t dictSize,
5367 ZSTD_dictLoadMethod_e dictLoadMethod,
5368 ZSTD_compressionParameters cParams,
5369 ZSTD_paramSwitch_e useRowMatchFinder,
5370 U32 enableDedicatedDictSearch,
5371 ZSTD_customMem customMem)
5372{
5373 if ((!customMem.customAlloc) ^ (!customMem.customFree)) return NULL;
5374
5375 { size_t const workspaceSize =
5376 ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict)) +
5377 ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE) +
5378 ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, enableDedicatedDictSearch, /* forCCtx */ 0) +
5379 (dictLoadMethod == ZSTD_dlm_byRef ? 0
5380 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))));
5381 void* const workspace = ZSTD_customMalloc(workspaceSize, customMem);
5382 ZSTD_cwksp ws;
5383 ZSTD_CDict* cdict;
5384
5385 if (!workspace) {
5386 ZSTD_customFree(workspace, customMem);
5387 return NULL;
5388 }
5389
5390 ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_dynamic_alloc);
5391
5392 cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
5393 assert(cdict != NULL);
5394 ZSTD_cwksp_move(&cdict->workspace, &ws);
5395 cdict->customMem = customMem;
5396 cdict->compressionLevel = ZSTD_NO_CLEVEL; /* signals advanced API usage */
5397 cdict->useRowMatchFinder = useRowMatchFinder;
5398 return cdict;
5399 }
5400}
5401
5402ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize,
5403 ZSTD_dictLoadMethod_e dictLoadMethod,
5404 ZSTD_dictContentType_e dictContentType,
5405 ZSTD_compressionParameters cParams,
5406 ZSTD_customMem customMem)
5407{
5408 ZSTD_CCtx_params cctxParams;
5409 ZSTD_memset(&cctxParams, 0, sizeof(cctxParams));
5410 ZSTD_CCtxParams_init(&cctxParams, 0);
5411 cctxParams.cParams = cParams;
5412 cctxParams.customMem = customMem;
5413 return ZSTD_createCDict_advanced2(
5414 dictBuffer, dictSize,
5415 dictLoadMethod, dictContentType,
5416 &cctxParams, customMem);
5417}
5418
5419ZSTD_CDict* ZSTD_createCDict_advanced2(
5420 const void* dict, size_t dictSize,
5421 ZSTD_dictLoadMethod_e dictLoadMethod,
5422 ZSTD_dictContentType_e dictContentType,
5423 const ZSTD_CCtx_params* originalCctxParams,
5424 ZSTD_customMem customMem)
5425{
5426 ZSTD_CCtx_params cctxParams = *originalCctxParams;
5427 ZSTD_compressionParameters cParams;
5428 ZSTD_CDict* cdict;
5429
5430 DEBUGLOG(3, "ZSTD_createCDict_advanced2, mode %u", (unsigned)dictContentType);
5431 if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
5432
5433 if (cctxParams.enableDedicatedDictSearch) {
5434 cParams = ZSTD_dedicatedDictSearch_getCParams(
5435 cctxParams.compressionLevel, dictSize);
5436 ZSTD_overrideCParams(&cParams, &cctxParams.cParams);
5437 } else {
5438 cParams = ZSTD_getCParamsFromCCtxParams(
5439 &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
5440 }
5441
5442 if (!ZSTD_dedicatedDictSearch_isSupported(&cParams)) {
5443 /* Fall back to non-DDSS params */
5444 cctxParams.enableDedicatedDictSearch = 0;
5445 cParams = ZSTD_getCParamsFromCCtxParams(
5446 &cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
5447 }
5448
5449 DEBUGLOG(3, "ZSTD_createCDict_advanced2: DDS: %u", cctxParams.enableDedicatedDictSearch);
5450 cctxParams.cParams = cParams;
5451 cctxParams.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(cctxParams.useRowMatchFinder, &cParams);
5452
5453 cdict = ZSTD_createCDict_advanced_internal(dictSize,
5454 dictLoadMethod, cctxParams.cParams,
5455 cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
5456 customMem);
5457
5458 if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
5459 dict, dictSize,
5460 dictLoadMethod, dictContentType,
5461 cctxParams) )) {
5462 ZSTD_freeCDict(cdict);
5463 return NULL;
5464 }
5465
5466 return cdict;
5467}
5468
5469ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
5470{
5471 ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
5472 ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
5473 ZSTD_dlm_byCopy, ZSTD_dct_auto,
5474 cParams, ZSTD_defaultCMem);
5475 if (cdict)
5476 cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
5477 return cdict;
5478}
5479
5480ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
5481{
5482 ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, dictSize, ZSTD_cpm_createCDict);
5483 ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dict, dictSize,
5484 ZSTD_dlm_byRef, ZSTD_dct_auto,
5485 cParams, ZSTD_defaultCMem);
5486 if (cdict)
5487 cdict->compressionLevel = (compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : compressionLevel;
5488 return cdict;
5489}
5490
5491size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
5492{
5493 if (cdict==NULL) return 0; /* support free on NULL */
5494 { ZSTD_customMem const cMem = cdict->customMem;
5495 int cdictInWorkspace = ZSTD_cwksp_owns_buffer(&cdict->workspace, cdict);
5496 ZSTD_cwksp_free(&cdict->workspace, cMem);
5497 if (!cdictInWorkspace) {
5498 ZSTD_customFree(cdict, cMem);
5499 }
5500 return 0;
5501 }
5502}
5503
5504/*! ZSTD_initStaticCDict_advanced() :
5505 * Generate a digested dictionary in provided memory area.
5506 * workspace: The memory area to emplace the dictionary into.
5507 * Provided pointer must 8-bytes aligned.
5508 * It must outlive dictionary usage.
5509 * workspaceSize: Use ZSTD_estimateCDictSize()
5510 * to determine how large workspace must be.
5511 * cParams : use ZSTD_getCParams() to transform a compression level
5512 * into its relevants cParams.
5513 * @return : pointer to ZSTD_CDict*, or NULL if error (size too small)
5514 * Note : there is no corresponding "free" function.
5515 * Since workspace was allocated externally, it must be freed externally.
5516 */
5517const ZSTD_CDict* ZSTD_initStaticCDict(
5518 void* workspace, size_t workspaceSize,
5519 const void* dict, size_t dictSize,
5520 ZSTD_dictLoadMethod_e dictLoadMethod,
5521 ZSTD_dictContentType_e dictContentType,
5522 ZSTD_compressionParameters cParams)
5523{
5524 ZSTD_paramSwitch_e const useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(ZSTD_ps_auto, &cParams);
5525 /* enableDedicatedDictSearch == 1 ensures matchstate is not too small in case this CDict will be used for DDS + row hash */
5526 size_t const matchStateSize = ZSTD_sizeof_matchState(&cParams, useRowMatchFinder, /* enableDedicatedDictSearch */ 1, /* forCCtx */ 0);
5527 size_t const neededSize = ZSTD_cwksp_alloc_size(sizeof(ZSTD_CDict))
5528 + (dictLoadMethod == ZSTD_dlm_byRef ? 0
5529 : ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(dictSize, sizeof(void*))))
5530 + ZSTD_cwksp_alloc_size(HUF_WORKSPACE_SIZE)
5531 + matchStateSize;
5532 ZSTD_CDict* cdict;
5533 ZSTD_CCtx_params params;
5534
5535 if ((size_t)workspace & 7) return NULL; /* 8-aligned */
5536
5537 {
5538 ZSTD_cwksp ws;
5539 ZSTD_cwksp_init(&ws, workspace, workspaceSize, ZSTD_cwksp_static_alloc);
5540 cdict = (ZSTD_CDict*)ZSTD_cwksp_reserve_object(&ws, sizeof(ZSTD_CDict));
5541 if (cdict == NULL) return NULL;
5542 ZSTD_cwksp_move(&cdict->workspace, &ws);
5543 }
5544
5545 DEBUGLOG(4, "(workspaceSize < neededSize) : (%u < %u) => %u",
5546 (unsigned)workspaceSize, (unsigned)neededSize, (unsigned)(workspaceSize < neededSize));
5547 if (workspaceSize < neededSize) return NULL;
5548
5549 ZSTD_CCtxParams_init(&params, 0);
5550 params.cParams = cParams;
5551 params.useRowMatchFinder = useRowMatchFinder;
5552 cdict->useRowMatchFinder = useRowMatchFinder;
5553 cdict->compressionLevel = ZSTD_NO_CLEVEL;
5554
5555 if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
5556 dict, dictSize,
5557 dictLoadMethod, dictContentType,
5558 params) ))
5559 return NULL;
5560
5561 return cdict;
5562}
5563
5564ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict)
5565{
5566 assert(cdict != NULL);
5567 return cdict->matchState.cParams;
5568}
5569
5570/*! ZSTD_getDictID_fromCDict() :
5571 * Provides the dictID of the dictionary loaded into `cdict`.
5572 * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
5573 * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
5574unsigned ZSTD_getDictID_fromCDict(const ZSTD_CDict* cdict)
5575{
5576 if (cdict==NULL) return 0;
5577 return cdict->dictID;
5578}
5579
5580/* ZSTD_compressBegin_usingCDict_internal() :
5581 * Implementation of various ZSTD_compressBegin_usingCDict* functions.
5582 */
5583static size_t ZSTD_compressBegin_usingCDict_internal(
5584 ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
5585 ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
5586{
5587 ZSTD_CCtx_params cctxParams;
5588 DEBUGLOG(4, "ZSTD_compressBegin_usingCDict_internal");
5589 RETURN_ERROR_IF(cdict==NULL, dictionary_wrong, "NULL pointer!");
5590 /* Initialize the cctxParams from the cdict */
5591 {
5592 ZSTD_parameters params;
5593 params.fParams = fParams;
5594 params.cParams = ( pledgedSrcSize < ZSTD_USE_CDICT_PARAMS_SRCSIZE_CUTOFF
5595 || pledgedSrcSize < cdict->dictContentSize * ZSTD_USE_CDICT_PARAMS_DICTSIZE_MULTIPLIER
5596 || pledgedSrcSize == ZSTD_CONTENTSIZE_UNKNOWN
5597 || cdict->compressionLevel == 0 ) ?
5598 ZSTD_getCParamsFromCDict(cdict)
5599 : ZSTD_getCParams(cdict->compressionLevel,
5600 pledgedSrcSize,
5601 cdict->dictContentSize);
5602 ZSTD_CCtxParams_init_internal(&cctxParams, &params, cdict->compressionLevel);
5603 }
5604 /* Increase window log to fit the entire dictionary and source if the
5605 * source size is known. Limit the increase to 19, which is the
5606 * window log for compression level 1 with the largest source size.
5607 */
5608 if (pledgedSrcSize != ZSTD_CONTENTSIZE_UNKNOWN) {
5609 U32 const limitedSrcSize = (U32)MIN(pledgedSrcSize, 1U << 19);
5610 U32 const limitedSrcLog = limitedSrcSize > 1 ? ZSTD_highbit32(limitedSrcSize - 1) + 1 : 1;
5611 cctxParams.cParams.windowLog = MAX(cctxParams.cParams.windowLog, limitedSrcLog);
5612 }
5613 return ZSTD_compressBegin_internal(cctx,
5614 NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast,
5615 cdict,
5616 &cctxParams, pledgedSrcSize,
5617 ZSTDb_not_buffered);
5618}
5619
5620
5621/* ZSTD_compressBegin_usingCDict_advanced() :
5622 * This function is DEPRECATED.
5623 * cdict must be != NULL */
5624size_t ZSTD_compressBegin_usingCDict_advanced(
5625 ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
5626 ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
5627{
5628 return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, pledgedSrcSize);
5629}
5630
5631/* ZSTD_compressBegin_usingCDict() :
5632 * cdict must be != NULL */
5633size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
5634{
5635 ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
5636 return ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, ZSTD_CONTENTSIZE_UNKNOWN);
5637}
5638
5639size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
5640{
5641 return ZSTD_compressBegin_usingCDict_deprecated(cctx, cdict);
5642}
5643
5644/*! ZSTD_compress_usingCDict_internal():
5645 * Implementation of various ZSTD_compress_usingCDict* functions.
5646 */
5647static size_t ZSTD_compress_usingCDict_internal(ZSTD_CCtx* cctx,
5648 void* dst, size_t dstCapacity,
5649 const void* src, size_t srcSize,
5650 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
5651{
5652 FORWARD_IF_ERROR(ZSTD_compressBegin_usingCDict_internal(cctx, cdict, fParams, srcSize), ""); /* will check if cdict != NULL */
5653 return ZSTD_compressEnd_public(cctx, dst, dstCapacity, src, srcSize);
5654}
5655
5656/*! ZSTD_compress_usingCDict_advanced():
5657 * This function is DEPRECATED.
5658 */
5659size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
5660 void* dst, size_t dstCapacity,
5661 const void* src, size_t srcSize,
5662 const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
5663{
5664 return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
5665}
5666
5667/*! ZSTD_compress_usingCDict() :
5668 * Compression using a digested Dictionary.
5669 * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
5670 * Note that compression parameters are decided at CDict creation time
5671 * while frame parameters are hardcoded */
5672size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
5673 void* dst, size_t dstCapacity,
5674 const void* src, size_t srcSize,
5675 const ZSTD_CDict* cdict)
5676{
5677 ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
5678 return ZSTD_compress_usingCDict_internal(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
5679}
5680
5681
5682
5683/* ******************************************************************
5684* Streaming
5685********************************************************************/
5686
5687ZSTD_CStream* ZSTD_createCStream(void)
5688{
5689 DEBUGLOG(3, "ZSTD_createCStream");
5690 return ZSTD_createCStream_advanced(ZSTD_defaultCMem);
5691}
5692
5693ZSTD_CStream* ZSTD_initStaticCStream(void *workspace, size_t workspaceSize)
5694{
5695 return ZSTD_initStaticCCtx(workspace, workspaceSize);
5696}
5697
5698ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
5699{ /* CStream and CCtx are now same object */
5700 return ZSTD_createCCtx_advanced(customMem);
5701}
5702
5703size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
5704{
5705 return ZSTD_freeCCtx(zcs); /* same object */
5706}
5707
5708
5709
5710/*====== Initialization ======*/
5711
5712size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX; }
5713
5714size_t ZSTD_CStreamOutSize(void)
5715{
5716 return ZSTD_compressBound(ZSTD_BLOCKSIZE_MAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
5717}
5718
5719static ZSTD_cParamMode_e ZSTD_getCParamMode(ZSTD_CDict const* cdict, ZSTD_CCtx_params const* params, U64 pledgedSrcSize)
5720{
5721 if (cdict != NULL && ZSTD_shouldAttachDict(cdict, params, pledgedSrcSize))
5722 return ZSTD_cpm_attachDict;
5723 else
5724 return ZSTD_cpm_noAttachDict;
5725}
5726
5727/* ZSTD_resetCStream():
5728 * pledgedSrcSize == 0 means "unknown" */
5729size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pss)
5730{
5731 /* temporary : 0 interpreted as "unknown" during transition period.
5732 * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
5733 * 0 will be interpreted as "empty" in the future.
5734 */
5735 U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
5736 DEBUGLOG(4, "ZSTD_resetCStream: pledgedSrcSize = %u", (unsigned)pledgedSrcSize);
5737 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
5738 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
5739 return 0;
5740}
5741
5742/*! ZSTD_initCStream_internal() :
5743 * Note : for lib/compress only. Used by zstdmt_compress.c.
5744 * Assumption 1 : params are valid
5745 * Assumption 2 : either dict, or cdict, is defined, not both */
5746size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
5747 const void* dict, size_t dictSize, const ZSTD_CDict* cdict,
5748 const ZSTD_CCtx_params* params,
5749 unsigned long long pledgedSrcSize)
5750{
5751 DEBUGLOG(4, "ZSTD_initCStream_internal");
5752 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
5753 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
5754 assert(!ZSTD_isError(ZSTD_checkCParams(params->cParams)));
5755 zcs->requestedParams = *params;
5756 assert(!((dict) && (cdict))); /* either dict or cdict, not both */
5757 if (dict) {
5758 FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
5759 } else {
5760 /* Dictionary is cleared if !cdict */
5761 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
5762 }
5763 return 0;
5764}
5765
5766/* ZSTD_initCStream_usingCDict_advanced() :
5767 * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
5768size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
5769 const ZSTD_CDict* cdict,
5770 ZSTD_frameParameters fParams,
5771 unsigned long long pledgedSrcSize)
5772{
5773 DEBUGLOG(4, "ZSTD_initCStream_usingCDict_advanced");
5774 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
5775 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
5776 zcs->requestedParams.fParams = fParams;
5777 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
5778 return 0;
5779}
5780
5781/* note : cdict must outlive compression session */
5782size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
5783{
5784 DEBUGLOG(4, "ZSTD_initCStream_usingCDict");
5785 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
5786 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, cdict) , "");
5787 return 0;
5788}
5789
5790
5791/* ZSTD_initCStream_advanced() :
5792 * pledgedSrcSize must be exact.
5793 * if srcSize is not known at init time, use value ZSTD_CONTENTSIZE_UNKNOWN.
5794 * dict is loaded with default parameters ZSTD_dct_auto and ZSTD_dlm_byCopy. */
5795size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
5796 const void* dict, size_t dictSize,
5797 ZSTD_parameters params, unsigned long long pss)
5798{
5799 /* for compatibility with older programs relying on this behavior.
5800 * Users should now specify ZSTD_CONTENTSIZE_UNKNOWN.
5801 * This line will be removed in the future.
5802 */
5803 U64 const pledgedSrcSize = (pss==0 && params.fParams.contentSizeFlag==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
5804 DEBUGLOG(4, "ZSTD_initCStream_advanced");
5805 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
5806 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
5807 FORWARD_IF_ERROR( ZSTD_checkCParams(params.cParams) , "");
5808 ZSTD_CCtxParams_setZstdParams(&zcs->requestedParams, &params);
5809 FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
5810 return 0;
5811}
5812
5813size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
5814{
5815 DEBUGLOG(4, "ZSTD_initCStream_usingDict");
5816 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
5817 FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
5818 FORWARD_IF_ERROR( ZSTD_CCtx_loadDictionary(zcs, dict, dictSize) , "");
5819 return 0;
5820}
5821
5822size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pss)
5823{
5824 /* temporary : 0 interpreted as "unknown" during transition period.
5825 * Users willing to specify "unknown" **must** use ZSTD_CONTENTSIZE_UNKNOWN.
5826 * 0 will be interpreted as "empty" in the future.
5827 */
5828 U64 const pledgedSrcSize = (pss==0) ? ZSTD_CONTENTSIZE_UNKNOWN : pss;
5829 DEBUGLOG(4, "ZSTD_initCStream_srcSize");
5830 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
5831 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
5832 FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
5833 FORWARD_IF_ERROR( ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize) , "");
5834 return 0;
5835}
5836
5837size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
5838{
5839 DEBUGLOG(4, "ZSTD_initCStream");
5840 FORWARD_IF_ERROR( ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only) , "");
5841 FORWARD_IF_ERROR( ZSTD_CCtx_refCDict(zcs, NULL) , "");
5842 FORWARD_IF_ERROR( ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel) , "");
5843 return 0;
5844}
5845
5846/*====== Compression ======*/
5847
5848static size_t ZSTD_nextInputSizeHint(const ZSTD_CCtx* cctx)
5849{
5850 if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
5851 return cctx->blockSize - cctx->stableIn_notConsumed;
5852 }
5853 assert(cctx->appliedParams.inBufferMode == ZSTD_bm_buffered);
5854 { size_t hintInSize = cctx->inBuffTarget - cctx->inBuffPos;
5855 if (hintInSize==0) hintInSize = cctx->blockSize;
5856 return hintInSize;
5857 }
5858}
5859
5860/** ZSTD_compressStream_generic():
5861 * internal function for all *compressStream*() variants
5862 * @return : hint size for next input to complete ongoing block */
5863static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
5864 ZSTD_outBuffer* output,
5865 ZSTD_inBuffer* input,
5866 ZSTD_EndDirective const flushMode)
5867{
5868 const char* const istart = (assert(input != NULL), (const char*)input->src);
5869 const char* const iend = (istart != NULL) ? istart + input->size : istart;
5870 const char* ip = (istart != NULL) ? istart + input->pos : istart;
5871 char* const ostart = (assert(output != NULL), (char*)output->dst);
5872 char* const oend = (ostart != NULL) ? ostart + output->size : ostart;
5873 char* op = (ostart != NULL) ? ostart + output->pos : ostart;
5874 U32 someMoreWork = 1;
5875
5876 /* check expectations */
5877 DEBUGLOG(5, "ZSTD_compressStream_generic, flush=%i, srcSize = %zu", (int)flushMode, input->size - input->pos);
5878 assert(zcs != NULL);
5879 if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) {
5880 assert(input->pos >= zcs->stableIn_notConsumed);
5881 input->pos -= zcs->stableIn_notConsumed;
5882 ip -= zcs->stableIn_notConsumed;
5883 zcs->stableIn_notConsumed = 0;
5884 }
5885 if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
5886 assert(zcs->inBuff != NULL);
5887 assert(zcs->inBuffSize > 0);
5888 }
5889 if (zcs->appliedParams.outBufferMode == ZSTD_bm_buffered) {
5890 assert(zcs->outBuff != NULL);
5891 assert(zcs->outBuffSize > 0);
5892 }
5893 if (input->src == NULL) assert(input->size == 0);
5894 assert(input->pos <= input->size);
5895 if (output->dst == NULL) assert(output->size == 0);
5896 assert(output->pos <= output->size);
5897 assert((U32)flushMode <= (U32)ZSTD_e_end);
5898
5899 while (someMoreWork) {
5900 switch(zcs->streamStage)
5901 {
5902 case zcss_init:
5903 RETURN_ERROR(init_missing, "call ZSTD_initCStream() first!");
5904
5905 case zcss_load:
5906 if ( (flushMode == ZSTD_e_end)
5907 && ( (size_t)(oend-op) >= ZSTD_compressBound(iend-ip) /* Enough output space */
5908 || zcs->appliedParams.outBufferMode == ZSTD_bm_stable) /* OR we are allowed to return dstSizeTooSmall */
5909 && (zcs->inBuffPos == 0) ) {
5910 /* shortcut to compression pass directly into output buffer */
5911 size_t const cSize = ZSTD_compressEnd_public(zcs,
5912 op, oend-op, ip, iend-ip);
5913 DEBUGLOG(4, "ZSTD_compressEnd : cSize=%u", (unsigned)cSize);
5914 FORWARD_IF_ERROR(cSize, "ZSTD_compressEnd failed");
5915 ip = iend;
5916 op += cSize;
5917 zcs->frameEnded = 1;
5918 ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
5919 someMoreWork = 0; break;
5920 }
5921 /* complete loading into inBuffer in buffered mode */
5922 if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
5923 size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
5924 size_t const loaded = ZSTD_limitCopy(
5925 zcs->inBuff + zcs->inBuffPos, toLoad,
5926 ip, iend-ip);
5927 zcs->inBuffPos += loaded;
5928 if (ip) ip += loaded;
5929 if ( (flushMode == ZSTD_e_continue)
5930 && (zcs->inBuffPos < zcs->inBuffTarget) ) {
5931 /* not enough input to fill full block : stop here */
5932 someMoreWork = 0; break;
5933 }
5934 if ( (flushMode == ZSTD_e_flush)
5935 && (zcs->inBuffPos == zcs->inToCompress) ) {
5936 /* empty */
5937 someMoreWork = 0; break;
5938 }
5939 } else {
5940 assert(zcs->appliedParams.inBufferMode == ZSTD_bm_stable);
5941 if ( (flushMode == ZSTD_e_continue)
5942 && ( (size_t)(iend - ip) < zcs->blockSize) ) {
5943 /* can't compress a full block : stop here */
5944 zcs->stableIn_notConsumed = (size_t)(iend - ip);
5945 ip = iend; /* pretend to have consumed input */
5946 someMoreWork = 0; break;
5947 }
5948 if ( (flushMode == ZSTD_e_flush)
5949 && (ip == iend) ) {
5950 /* empty */
5951 someMoreWork = 0; break;
5952 }
5953 }
5954 /* compress current block (note : this stage cannot be stopped in the middle) */
5955 DEBUGLOG(5, "stream compression stage (flushMode==%u)", flushMode);
5956 { int const inputBuffered = (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered);
5957 void* cDst;
5958 size_t cSize;
5959 size_t oSize = oend-op;
5960 size_t const iSize = inputBuffered ? zcs->inBuffPos - zcs->inToCompress
5961 : MIN((size_t)(iend - ip), zcs->blockSize);
5962 if (oSize >= ZSTD_compressBound(iSize) || zcs->appliedParams.outBufferMode == ZSTD_bm_stable)
5963 cDst = op; /* compress into output buffer, to skip flush stage */
5964 else
5965 cDst = zcs->outBuff, oSize = zcs->outBuffSize;
5966 if (inputBuffered) {
5967 unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip==iend);
5968 cSize = lastBlock ?
5969 ZSTD_compressEnd_public(zcs, cDst, oSize,
5970 zcs->inBuff + zcs->inToCompress, iSize) :
5971 ZSTD_compressContinue_public(zcs, cDst, oSize,
5972 zcs->inBuff + zcs->inToCompress, iSize);
5973 FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
5974 zcs->frameEnded = lastBlock;
5975 /* prepare next block */
5976 zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
5977 if (zcs->inBuffTarget > zcs->inBuffSize)
5978 zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;
5979 DEBUGLOG(5, "inBuffTarget:%u / inBuffSize:%u",
5980 (unsigned)zcs->inBuffTarget, (unsigned)zcs->inBuffSize);
5981 if (!lastBlock)
5982 assert(zcs->inBuffTarget <= zcs->inBuffSize);
5983 zcs->inToCompress = zcs->inBuffPos;
5984 } else { /* !inputBuffered, hence ZSTD_bm_stable */
5985 unsigned const lastBlock = (flushMode == ZSTD_e_end) && (ip + iSize == iend);
5986 cSize = lastBlock ?
5987 ZSTD_compressEnd_public(zcs, cDst, oSize, ip, iSize) :
5988 ZSTD_compressContinue_public(zcs, cDst, oSize, ip, iSize);
5989 /* Consume the input prior to error checking to mirror buffered mode. */
5990 if (ip) ip += iSize;
5991 FORWARD_IF_ERROR(cSize, "%s", lastBlock ? "ZSTD_compressEnd failed" : "ZSTD_compressContinue failed");
5992 zcs->frameEnded = lastBlock;
5993 if (lastBlock) assert(ip == iend);
5994 }
5995 if (cDst == op) { /* no need to flush */
5996 op += cSize;
5997 if (zcs->frameEnded) {
5998 DEBUGLOG(5, "Frame completed directly in outBuffer");
5999 someMoreWork = 0;
6000 ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
6001 }
6002 break;
6003 }
6004 zcs->outBuffContentSize = cSize;
6005 zcs->outBuffFlushedSize = 0;
6006 zcs->streamStage = zcss_flush; /* pass-through to flush stage */
6007 }
6008 ZSTD_FALLTHROUGH;
6009 case zcss_flush:
6010 DEBUGLOG(5, "flush stage");
6011 assert(zcs->appliedParams.outBufferMode == ZSTD_bm_buffered);
6012 { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
6013 size_t const flushed = ZSTD_limitCopy(op, (size_t)(oend-op),
6014 zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
6015 DEBUGLOG(5, "toFlush: %u into %u ==> flushed: %u",
6016 (unsigned)toFlush, (unsigned)(oend-op), (unsigned)flushed);
6017 if (flushed)
6018 op += flushed;
6019 zcs->outBuffFlushedSize += flushed;
6020 if (toFlush!=flushed) {
6021 /* flush not fully completed, presumably because dst is too small */
6022 assert(op==oend);
6023 someMoreWork = 0;
6024 break;
6025 }
6026 zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
6027 if (zcs->frameEnded) {
6028 DEBUGLOG(5, "Frame completed on flush");
6029 someMoreWork = 0;
6030 ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
6031 break;
6032 }
6033 zcs->streamStage = zcss_load;
6034 break;
6035 }
6036
6037 default: /* impossible */
6038 assert(0);
6039 }
6040 }
6041
6042 input->pos = ip - istart;
6043 output->pos = op - ostart;
6044 if (zcs->frameEnded) return 0;
6045 return ZSTD_nextInputSizeHint(zcs);
6046}
6047
6048static size_t ZSTD_nextInputSizeHint_MTorST(const ZSTD_CCtx* cctx)
6049{
6050#ifdef ZSTD_MULTITHREAD
6051 if (cctx->appliedParams.nbWorkers >= 1) {
6052 assert(cctx->mtctx != NULL);
6053 return ZSTDMT_nextInputSizeHint(cctx->mtctx);
6054 }
6055#endif
6056 return ZSTD_nextInputSizeHint(cctx);
6057
6058}
6059
6060size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
6061{
6062 FORWARD_IF_ERROR( ZSTD_compressStream2(zcs, output, input, ZSTD_e_continue) , "");
6063 return ZSTD_nextInputSizeHint_MTorST(zcs);
6064}
6065
6066/* After a compression call set the expected input/output buffer.
6067 * This is validated at the start of the next compression call.
6068 */
6069static void
6070ZSTD_setBufferExpectations(ZSTD_CCtx* cctx, const ZSTD_outBuffer* output, const ZSTD_inBuffer* input)
6071{
6072 DEBUGLOG(5, "ZSTD_setBufferExpectations (for advanced stable in/out modes)");
6073 if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
6074 cctx->expectedInBuffer = *input;
6075 }
6076 if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
6077 cctx->expectedOutBufferSize = output->size - output->pos;
6078 }
6079}
6080
6081/* Validate that the input/output buffers match the expectations set by
6082 * ZSTD_setBufferExpectations.
6083 */
6084static size_t ZSTD_checkBufferStability(ZSTD_CCtx const* cctx,
6085 ZSTD_outBuffer const* output,
6086 ZSTD_inBuffer const* input,
6087 ZSTD_EndDirective endOp)
6088{
6089 if (cctx->appliedParams.inBufferMode == ZSTD_bm_stable) {
6090 ZSTD_inBuffer const expect = cctx->expectedInBuffer;
6091 if (expect.src != input->src || expect.pos != input->pos)
6092 RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableInBuffer enabled but input differs!");
6093 }
6094 (void)endOp;
6095 if (cctx->appliedParams.outBufferMode == ZSTD_bm_stable) {
6096 size_t const outBufferSize = output->size - output->pos;
6097 if (cctx->expectedOutBufferSize != outBufferSize)
6098 RETURN_ERROR(stabilityCondition_notRespected, "ZSTD_c_stableOutBuffer enabled but output size differs!");
6099 }
6100 return 0;
6101}
6102
6103static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
6104 ZSTD_EndDirective endOp,
6105 size_t inSize)
6106{
6107 ZSTD_CCtx_params params = cctx->requestedParams;
6108 ZSTD_prefixDict const prefixDict = cctx->prefixDict;
6109 FORWARD_IF_ERROR( ZSTD_initLocalDict(cctx) , ""); /* Init the local dict if present. */
6110 ZSTD_memset(&cctx->prefixDict, 0, sizeof(cctx->prefixDict)); /* single usage */
6111 assert(prefixDict.dict==NULL || cctx->cdict==NULL); /* only one can be set */
6112 if (cctx->cdict && !cctx->localDict.cdict) {
6113 /* Let the cdict's compression level take priority over the requested params.
6114 * But do not take the cdict's compression level if the "cdict" is actually a localDict
6115 * generated from ZSTD_initLocalDict().
6116 */
6117 params.compressionLevel = cctx->cdict->compressionLevel;
6118 }
6119 DEBUGLOG(4, "ZSTD_compressStream2 : transparent init stage");
6120 if (endOp == ZSTD_e_end) cctx->pledgedSrcSizePlusOne = inSize + 1; /* auto-determine pledgedSrcSize */
6121
6122 { size_t const dictSize = prefixDict.dict
6123 ? prefixDict.dictSize
6124 : (cctx->cdict ? cctx->cdict->dictContentSize : 0);
6125 ZSTD_cParamMode_e const mode = ZSTD_getCParamMode(cctx->cdict, &params, cctx->pledgedSrcSizePlusOne - 1);
6126 params.cParams = ZSTD_getCParamsFromCCtxParams(
6127 &params, cctx->pledgedSrcSizePlusOne-1,
6128 dictSize, mode);
6129 }
6130
6131 params.useBlockSplitter = ZSTD_resolveBlockSplitterMode(params.useBlockSplitter, &params.cParams);
6132 params.ldmParams.enableLdm = ZSTD_resolveEnableLdm(params.ldmParams.enableLdm, &params.cParams);
6133 params.useRowMatchFinder = ZSTD_resolveRowMatchFinderMode(params.useRowMatchFinder, &params.cParams);
6134 params.validateSequences = ZSTD_resolveExternalSequenceValidation(params.validateSequences);
6135 params.maxBlockSize = ZSTD_resolveMaxBlockSize(params.maxBlockSize);
6136 params.searchForExternalRepcodes = ZSTD_resolveExternalRepcodeSearch(params.searchForExternalRepcodes, params.compressionLevel);
6137
6138#ifdef ZSTD_MULTITHREAD
6139 /* If external matchfinder is enabled, make sure to fail before checking job size (for consistency) */
6140 RETURN_ERROR_IF(
6141 params.useSequenceProducer == 1 && params.nbWorkers >= 1,
6142 parameter_combination_unsupported,
6143 "External sequence producer isn't supported with nbWorkers >= 1"
6144 );
6145
6146 if ((cctx->pledgedSrcSizePlusOne-1) <= ZSTDMT_JOBSIZE_MIN) {
6147 params.nbWorkers = 0; /* do not invoke multi-threading when src size is too small */
6148 }
6149 if (params.nbWorkers > 0) {
6150#if ZSTD_TRACE
6151 cctx->traceCtx = (ZSTD_trace_compress_begin != NULL) ? ZSTD_trace_compress_begin(cctx) : 0;
6152#endif
6153 /* mt context creation */
6154 if (cctx->mtctx == NULL) {
6155 DEBUGLOG(4, "ZSTD_compressStream2: creating new mtctx for nbWorkers=%u",
6156 params.nbWorkers);
6157 cctx->mtctx = ZSTDMT_createCCtx_advanced((U32)params.nbWorkers, cctx->customMem, cctx->pool);
6158 RETURN_ERROR_IF(cctx->mtctx == NULL, memory_allocation, "NULL pointer!");
6159 }
6160 /* mt compression */
6161 DEBUGLOG(4, "call ZSTDMT_initCStream_internal as nbWorkers=%u", params.nbWorkers);
6162 FORWARD_IF_ERROR( ZSTDMT_initCStream_internal(
6163 cctx->mtctx,
6164 prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType,
6165 cctx->cdict, params, cctx->pledgedSrcSizePlusOne-1) , "");
6166 cctx->dictID = cctx->cdict ? cctx->cdict->dictID : 0;
6167 cctx->dictContentSize = cctx->cdict ? cctx->cdict->dictContentSize : prefixDict.dictSize;
6168 cctx->consumedSrcSize = 0;
6169 cctx->producedCSize = 0;
6170 cctx->streamStage = zcss_load;
6171 cctx->appliedParams = params;
6172 } else
6173#endif /* ZSTD_MULTITHREAD */
6174 { U64 const pledgedSrcSize = cctx->pledgedSrcSizePlusOne - 1;
6175 assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
6176 FORWARD_IF_ERROR( ZSTD_compressBegin_internal(cctx,
6177 prefixDict.dict, prefixDict.dictSize, prefixDict.dictContentType, ZSTD_dtlm_fast,
6178 cctx->cdict,
6179 &params, pledgedSrcSize,
6180 ZSTDb_buffered) , "");
6181 assert(cctx->appliedParams.nbWorkers == 0);
6182 cctx->inToCompress = 0;
6183 cctx->inBuffPos = 0;
6184 if (cctx->appliedParams.inBufferMode == ZSTD_bm_buffered) {
6185 /* for small input: avoid automatic flush on reaching end of block, since
6186 * it would require to add a 3-bytes null block to end frame
6187 */
6188 cctx->inBuffTarget = cctx->blockSize + (cctx->blockSize == pledgedSrcSize);
6189 } else {
6190 cctx->inBuffTarget = 0;
6191 }
6192 cctx->outBuffContentSize = cctx->outBuffFlushedSize = 0;
6193 cctx->streamStage = zcss_load;
6194 cctx->frameEnded = 0;
6195 }
6196 return 0;
6197}
6198
6199/* @return provides a minimum amount of data remaining to be flushed from internal buffers
6200 */
6201size_t ZSTD_compressStream2( ZSTD_CCtx* cctx,
6202 ZSTD_outBuffer* output,
6203 ZSTD_inBuffer* input,
6204 ZSTD_EndDirective endOp)
6205{
6206 DEBUGLOG(5, "ZSTD_compressStream2, endOp=%u ", (unsigned)endOp);
6207 /* check conditions */
6208 RETURN_ERROR_IF(output->pos > output->size, dstSize_tooSmall, "invalid output buffer");
6209 RETURN_ERROR_IF(input->pos > input->size, srcSize_wrong, "invalid input buffer");
6210 RETURN_ERROR_IF((U32)endOp > (U32)ZSTD_e_end, parameter_outOfBound, "invalid endDirective");
6211 assert(cctx != NULL);
6212
6213 /* transparent initialization stage */
6214 if (cctx->streamStage == zcss_init) {
6215 size_t const inputSize = input->size - input->pos; /* no obligation to start from pos==0 */
6216 size_t const totalInputSize = inputSize + cctx->stableIn_notConsumed;
6217 if ( (cctx->requestedParams.inBufferMode == ZSTD_bm_stable) /* input is presumed stable, across invocations */
6218 && (endOp == ZSTD_e_continue) /* no flush requested, more input to come */
6219 && (totalInputSize < ZSTD_BLOCKSIZE_MAX) ) { /* not even reached one block yet */
6220 if (cctx->stableIn_notConsumed) { /* not the first time */
6221 /* check stable source guarantees */
6222 RETURN_ERROR_IF(input->src != cctx->expectedInBuffer.src, stabilityCondition_notRespected, "stableInBuffer condition not respected: wrong src pointer");
6223 RETURN_ERROR_IF(input->pos != cctx->expectedInBuffer.size, stabilityCondition_notRespected, "stableInBuffer condition not respected: externally modified pos");
6224 }
6225 /* pretend input was consumed, to give a sense forward progress */
6226 input->pos = input->size;
6227 /* save stable inBuffer, for later control, and flush/end */
6228 cctx->expectedInBuffer = *input;
6229 /* but actually input wasn't consumed, so keep track of position from where compression shall resume */
6230 cctx->stableIn_notConsumed += inputSize;
6231 /* don't initialize yet, wait for the first block of flush() order, for better parameters adaptation */
6232 return ZSTD_FRAMEHEADERSIZE_MIN(cctx->requestedParams.format); /* at least some header to produce */
6233 }
6234 FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, endOp, totalInputSize), "compressStream2 initialization failed");
6235 ZSTD_setBufferExpectations(cctx, output, input); /* Set initial buffer expectations now that we've initialized */
6236 }
6237 /* end of transparent initialization stage */
6238
6239 FORWARD_IF_ERROR(ZSTD_checkBufferStability(cctx, output, input, endOp), "invalid buffers");
6240 /* compression stage */
6241#ifdef ZSTD_MULTITHREAD
6242 if (cctx->appliedParams.nbWorkers > 0) {
6243 size_t flushMin;
6244 if (cctx->cParamsChanged) {
6245 ZSTDMT_updateCParams_whileCompressing(cctx->mtctx, &cctx->requestedParams);
6246 cctx->cParamsChanged = 0;
6247 }
6248 if (cctx->stableIn_notConsumed) {
6249 assert(cctx->appliedParams.inBufferMode == ZSTD_bm_stable);
6250 /* some early data was skipped - make it available for consumption */
6251 assert(input->pos >= cctx->stableIn_notConsumed);
6252 input->pos -= cctx->stableIn_notConsumed;
6253 cctx->stableIn_notConsumed = 0;
6254 }
6255 for (;;) {
6256 size_t const ipos = input->pos;
6257 size_t const opos = output->pos;
6258 flushMin = ZSTDMT_compressStream_generic(cctx->mtctx, output, input, endOp);
6259 cctx->consumedSrcSize += (U64)(input->pos - ipos);
6260 cctx->producedCSize += (U64)(output->pos - opos);
6261 if ( ZSTD_isError(flushMin)
6262 || (endOp == ZSTD_e_end && flushMin == 0) ) { /* compression completed */
6263 if (flushMin == 0)
6264 ZSTD_CCtx_trace(cctx, 0);
6265 ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
6266 }
6267 FORWARD_IF_ERROR(flushMin, "ZSTDMT_compressStream_generic failed");
6268
6269 if (endOp == ZSTD_e_continue) {
6270 /* We only require some progress with ZSTD_e_continue, not maximal progress.
6271 * We're done if we've consumed or produced any bytes, or either buffer is
6272 * full.
6273 */
6274 if (input->pos != ipos || output->pos != opos || input->pos == input->size || output->pos == output->size)
6275 break;
6276 } else {
6277 assert(endOp == ZSTD_e_flush || endOp == ZSTD_e_end);
6278 /* We require maximal progress. We're done when the flush is complete or the
6279 * output buffer is full.
6280 */
6281 if (flushMin == 0 || output->pos == output->size)
6282 break;
6283 }
6284 }
6285 DEBUGLOG(5, "completed ZSTD_compressStream2 delegating to ZSTDMT_compressStream_generic");
6286 /* Either we don't require maximum forward progress, we've finished the
6287 * flush, or we are out of output space.
6288 */
6289 assert(endOp == ZSTD_e_continue || flushMin == 0 || output->pos == output->size);
6290 ZSTD_setBufferExpectations(cctx, output, input);
6291 return flushMin;
6292 }
6293#endif /* ZSTD_MULTITHREAD */
6294 FORWARD_IF_ERROR( ZSTD_compressStream_generic(cctx, output, input, endOp) , "");
6295 DEBUGLOG(5, "completed ZSTD_compressStream2");
6296 ZSTD_setBufferExpectations(cctx, output, input);
6297 return cctx->outBuffContentSize - cctx->outBuffFlushedSize; /* remaining to flush */
6298}
6299
6300size_t ZSTD_compressStream2_simpleArgs (
6301 ZSTD_CCtx* cctx,
6302 void* dst, size_t dstCapacity, size_t* dstPos,
6303 const void* src, size_t srcSize, size_t* srcPos,
6304 ZSTD_EndDirective endOp)
6305{
6306 ZSTD_outBuffer output;
6307 ZSTD_inBuffer input;
6308 output.dst = dst;
6309 output.size = dstCapacity;
6310 output.pos = *dstPos;
6311 input.src = src;
6312 input.size = srcSize;
6313 input.pos = *srcPos;
6314 /* ZSTD_compressStream2() will check validity of dstPos and srcPos */
6315 { size_t const cErr = ZSTD_compressStream2(cctx, &output, &input, endOp);
6316 *dstPos = output.pos;
6317 *srcPos = input.pos;
6318 return cErr;
6319 }
6320}
6321
6322size_t ZSTD_compress2(ZSTD_CCtx* cctx,
6323 void* dst, size_t dstCapacity,
6324 const void* src, size_t srcSize)
6325{
6326 ZSTD_bufferMode_e const originalInBufferMode = cctx->requestedParams.inBufferMode;
6327 ZSTD_bufferMode_e const originalOutBufferMode = cctx->requestedParams.outBufferMode;
6328 DEBUGLOG(4, "ZSTD_compress2 (srcSize=%u)", (unsigned)srcSize);
6329 ZSTD_CCtx_reset(cctx, ZSTD_reset_session_only);
6330 /* Enable stable input/output buffers. */
6331 cctx->requestedParams.inBufferMode = ZSTD_bm_stable;
6332 cctx->requestedParams.outBufferMode = ZSTD_bm_stable;
6333 { size_t oPos = 0;
6334 size_t iPos = 0;
6335 size_t const result = ZSTD_compressStream2_simpleArgs(cctx,
6336 dst, dstCapacity, &oPos,
6337 src, srcSize, &iPos,
6338 ZSTD_e_end);
6339 /* Reset to the original values. */
6340 cctx->requestedParams.inBufferMode = originalInBufferMode;
6341 cctx->requestedParams.outBufferMode = originalOutBufferMode;
6342
6343 FORWARD_IF_ERROR(result, "ZSTD_compressStream2_simpleArgs failed");
6344 if (result != 0) { /* compression not completed, due to lack of output space */
6345 assert(oPos == dstCapacity);
6346 RETURN_ERROR(dstSize_tooSmall, "");
6347 }
6348 assert(iPos == srcSize); /* all input is expected consumed */
6349 return oPos;
6350 }
6351}
6352
6353/* ZSTD_validateSequence() :
6354 * @offCode : is presumed to follow format required by ZSTD_storeSeq()
6355 * @returns a ZSTD error code if sequence is not valid
6356 */
6357static size_t
6358ZSTD_validateSequence(U32 offCode, U32 matchLength, U32 minMatch,
6359 size_t posInSrc, U32 windowLog, size_t dictSize, int useSequenceProducer)
6360{
6361 U32 const windowSize = 1u << windowLog;
6362 /* posInSrc represents the amount of data the decoder would decode up to this point.
6363 * As long as the amount of data decoded is less than or equal to window size, offsets may be
6364 * larger than the total length of output decoded in order to reference the dict, even larger than
6365 * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
6366 */
6367 size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
6368 size_t const matchLenLowerBound = (minMatch == 3 || useSequenceProducer) ? 3 : 4;
6369 RETURN_ERROR_IF(offCode > OFFSET_TO_OFFBASE(offsetBound), externalSequences_invalid, "Offset too large!");
6370 /* Validate maxNbSeq is large enough for the given matchLength and minMatch */
6371 RETURN_ERROR_IF(matchLength < matchLenLowerBound, externalSequences_invalid, "Matchlength too small for the minMatch");
6372 return 0;
6373}
6374
6375/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
6376static U32 ZSTD_finalizeOffBase(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
6377{
6378 U32 offBase = OFFSET_TO_OFFBASE(rawOffset);
6379
6380 if (!ll0 && rawOffset == rep[0]) {
6381 offBase = REPCODE1_TO_OFFBASE;
6382 } else if (rawOffset == rep[1]) {
6383 offBase = REPCODE_TO_OFFBASE(2 - ll0);
6384 } else if (rawOffset == rep[2]) {
6385 offBase = REPCODE_TO_OFFBASE(3 - ll0);
6386 } else if (ll0 && rawOffset == rep[0] - 1) {
6387 offBase = REPCODE3_TO_OFFBASE;
6388 }
6389 return offBase;
6390}
6391
6392size_t
6393ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
6394 ZSTD_sequencePosition* seqPos,
6395 const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
6396 const void* src, size_t blockSize,
6397 ZSTD_paramSwitch_e externalRepSearch)
6398{
6399 U32 idx = seqPos->idx;
6400 U32 const startIdx = idx;
6401 BYTE const* ip = (BYTE const*)(src);
6402 const BYTE* const iend = ip + blockSize;
6403 repcodes_t updatedRepcodes;
6404 U32 dictSize;
6405
6406 DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreExplicitBlockDelim (blockSize = %zu)", blockSize);
6407
6408 if (cctx->cdict) {
6409 dictSize = (U32)cctx->cdict->dictContentSize;
6410 } else if (cctx->prefixDict.dict) {
6411 dictSize = (U32)cctx->prefixDict.dictSize;
6412 } else {
6413 dictSize = 0;
6414 }
6415 ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
6416 for (; idx < inSeqsSize && (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0); ++idx) {
6417 U32 const litLength = inSeqs[idx].litLength;
6418 U32 const matchLength = inSeqs[idx].matchLength;
6419 U32 offBase;
6420
6421 if (externalRepSearch == ZSTD_ps_disable) {
6422 offBase = OFFSET_TO_OFFBASE(inSeqs[idx].offset);
6423 } else {
6424 U32 const ll0 = (litLength == 0);
6425 offBase = ZSTD_finalizeOffBase(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
6426 ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
6427 }
6428
6429 DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
6430 if (cctx->appliedParams.validateSequences) {
6431 seqPos->posInSrc += litLength + matchLength;
6432 FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
6433 cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer),
6434 "Sequence validation failed");
6435 }
6436 RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
6437 "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
6438 ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength);
6439 ip += matchLength + litLength;
6440 }
6441
6442 /* If we skipped repcode search while parsing, we need to update repcodes now */
6443 assert(externalRepSearch != ZSTD_ps_auto);
6444 assert(idx >= startIdx);
6445 if (externalRepSearch == ZSTD_ps_disable && idx != startIdx) {
6446 U32* const rep = updatedRepcodes.rep;
6447 U32 lastSeqIdx = idx - 1; /* index of last non-block-delimiter sequence */
6448
6449 if (lastSeqIdx >= startIdx + 2) {
6450 rep[2] = inSeqs[lastSeqIdx - 2].offset;
6451 rep[1] = inSeqs[lastSeqIdx - 1].offset;
6452 rep[0] = inSeqs[lastSeqIdx].offset;
6453 } else if (lastSeqIdx == startIdx + 1) {
6454 rep[2] = rep[0];
6455 rep[1] = inSeqs[lastSeqIdx - 1].offset;
6456 rep[0] = inSeqs[lastSeqIdx].offset;
6457 } else {
6458 assert(lastSeqIdx == startIdx);
6459 rep[2] = rep[1];
6460 rep[1] = rep[0];
6461 rep[0] = inSeqs[lastSeqIdx].offset;
6462 }
6463 }
6464
6465 ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
6466
6467 if (inSeqs[idx].litLength) {
6468 DEBUGLOG(6, "Storing last literals of size: %u", inSeqs[idx].litLength);
6469 ZSTD_storeLastLiterals(&cctx->seqStore, ip, inSeqs[idx].litLength);
6470 ip += inSeqs[idx].litLength;
6471 seqPos->posInSrc += inSeqs[idx].litLength;
6472 }
6473 RETURN_ERROR_IF(ip != iend, externalSequences_invalid, "Blocksize doesn't agree with block delimiter!");
6474 seqPos->idx = idx+1;
6475 return 0;
6476}
6477
6478size_t
6479ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
6480 const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
6481 const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch)
6482{
6483 U32 idx = seqPos->idx;
6484 U32 startPosInSequence = seqPos->posInSequence;
6485 U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
6486 size_t dictSize;
6487 BYTE const* ip = (BYTE const*)(src);
6488 BYTE const* iend = ip + blockSize; /* May be adjusted if we decide to process fewer than blockSize bytes */
6489 repcodes_t updatedRepcodes;
6490 U32 bytesAdjustment = 0;
6491 U32 finalMatchSplit = 0;
6492
6493 /* TODO(embg) support fast parsing mode in noBlockDelim mode */
6494 (void)externalRepSearch;
6495
6496 if (cctx->cdict) {
6497 dictSize = cctx->cdict->dictContentSize;
6498 } else if (cctx->prefixDict.dict) {
6499 dictSize = cctx->prefixDict.dictSize;
6500 } else {
6501 dictSize = 0;
6502 }
6503 DEBUGLOG(5, "ZSTD_copySequencesToSeqStoreNoBlockDelim: idx: %u PIS: %u blockSize: %zu", idx, startPosInSequence, blockSize);
6504 DEBUGLOG(5, "Start seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
6505 ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
6506 while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
6507 const ZSTD_Sequence currSeq = inSeqs[idx];
6508 U32 litLength = currSeq.litLength;
6509 U32 matchLength = currSeq.matchLength;
6510 U32 const rawOffset = currSeq.offset;
6511 U32 offBase;
6512
6513 /* Modify the sequence depending on where endPosInSequence lies */
6514 if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
6515 if (startPosInSequence >= litLength) {
6516 startPosInSequence -= litLength;
6517 litLength = 0;
6518 matchLength -= startPosInSequence;
6519 } else {
6520 litLength -= startPosInSequence;
6521 }
6522 /* Move to the next sequence */
6523 endPosInSequence -= currSeq.litLength + currSeq.matchLength;
6524 startPosInSequence = 0;
6525 } else {
6526 /* This is the final (partial) sequence we're adding from inSeqs, and endPosInSequence
6527 does not reach the end of the match. So, we have to split the sequence */
6528 DEBUGLOG(6, "Require a split: diff: %u, idx: %u PIS: %u",
6529 currSeq.litLength + currSeq.matchLength - endPosInSequence, idx, endPosInSequence);
6530 if (endPosInSequence > litLength) {
6531 U32 firstHalfMatchLength;
6532 litLength = startPosInSequence >= litLength ? 0 : litLength - startPosInSequence;
6533 firstHalfMatchLength = endPosInSequence - startPosInSequence - litLength;
6534 if (matchLength > blockSize && firstHalfMatchLength >= cctx->appliedParams.cParams.minMatch) {
6535 /* Only ever split the match if it is larger than the block size */
6536 U32 secondHalfMatchLength = currSeq.matchLength + currSeq.litLength - endPosInSequence;
6537 if (secondHalfMatchLength < cctx->appliedParams.cParams.minMatch) {
6538 /* Move the endPosInSequence backward so that it creates match of minMatch length */
6539 endPosInSequence -= cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
6540 bytesAdjustment = cctx->appliedParams.cParams.minMatch - secondHalfMatchLength;
6541 firstHalfMatchLength -= bytesAdjustment;
6542 }
6543 matchLength = firstHalfMatchLength;
6544 /* Flag that we split the last match - after storing the sequence, exit the loop,
6545 but keep the value of endPosInSequence */
6546 finalMatchSplit = 1;
6547 } else {
6548 /* Move the position in sequence backwards so that we don't split match, and break to store
6549 * the last literals. We use the original currSeq.litLength as a marker for where endPosInSequence
6550 * should go. We prefer to do this whenever it is not necessary to split the match, or if doing so
6551 * would cause the first half of the match to be too small
6552 */
6553 bytesAdjustment = endPosInSequence - currSeq.litLength;
6554 endPosInSequence = currSeq.litLength;
6555 break;
6556 }
6557 } else {
6558 /* This sequence ends inside the literals, break to store the last literals */
6559 break;
6560 }
6561 }
6562 /* Check if this offset can be represented with a repcode */
6563 { U32 const ll0 = (litLength == 0);
6564 offBase = ZSTD_finalizeOffBase(rawOffset, updatedRepcodes.rep, ll0);
6565 ZSTD_updateRep(updatedRepcodes.rep, offBase, ll0);
6566 }
6567
6568 if (cctx->appliedParams.validateSequences) {
6569 seqPos->posInSrc += litLength + matchLength;
6570 FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
6571 cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams.useSequenceProducer),
6572 "Sequence validation failed");
6573 }
6574 DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
6575 RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
6576 "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
6577 ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offBase, matchLength);
6578 ip += matchLength + litLength;
6579 if (!finalMatchSplit)
6580 idx++; /* Next Sequence */
6581 }
6582 DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
6583 assert(idx == inSeqsSize || endPosInSequence <= inSeqs[idx].litLength + inSeqs[idx].matchLength);
6584 seqPos->idx = idx;
6585 seqPos->posInSequence = endPosInSequence;
6586 ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
6587
6588 iend -= bytesAdjustment;
6589 if (ip != iend) {
6590 /* Store any last literals */
6591 U32 lastLLSize = (U32)(iend - ip);
6592 assert(ip <= iend);
6593 DEBUGLOG(6, "Storing last literals of size: %u", lastLLSize);
6594 ZSTD_storeLastLiterals(&cctx->seqStore, ip, lastLLSize);
6595 seqPos->posInSrc += lastLLSize;
6596 }
6597
6598 return bytesAdjustment;
6599}
6600
6601typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
6602 const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
6603 const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
6604static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
6605{
6606 ZSTD_sequenceCopier sequenceCopier = NULL;
6607 assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
6608 if (mode == ZSTD_sf_explicitBlockDelimiters) {
6609 return ZSTD_copySequencesToSeqStoreExplicitBlockDelim;
6610 } else if (mode == ZSTD_sf_noBlockDelimiters) {
6611 return ZSTD_copySequencesToSeqStoreNoBlockDelim;
6612 }
6613 assert(sequenceCopier != NULL);
6614 return sequenceCopier;
6615}
6616
6617/* Discover the size of next block by searching for the delimiter.
6618 * Note that a block delimiter **must** exist in this mode,
6619 * otherwise it's an input error.
6620 * The block size retrieved will be later compared to ensure it remains within bounds */
6621static size_t
6622blockSize_explicitDelimiter(const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos)
6623{
6624 int end = 0;
6625 size_t blockSize = 0;
6626 size_t spos = seqPos.idx;
6627 DEBUGLOG(6, "blockSize_explicitDelimiter : seq %zu / %zu", spos, inSeqsSize);
6628 assert(spos <= inSeqsSize);
6629 while (spos < inSeqsSize) {
6630 end = (inSeqs[spos].offset == 0);
6631 blockSize += inSeqs[spos].litLength + inSeqs[spos].matchLength;
6632 if (end) {
6633 if (inSeqs[spos].matchLength != 0)
6634 RETURN_ERROR(externalSequences_invalid, "delimiter format error : both matchlength and offset must be == 0");
6635 break;
6636 }
6637 spos++;
6638 }
6639 if (!end)
6640 RETURN_ERROR(externalSequences_invalid, "Reached end of sequences without finding a block delimiter");
6641 return blockSize;
6642}
6643
6644/* More a "target" block size */
6645static size_t blockSize_noDelimiter(size_t blockSize, size_t remaining)
6646{
6647 int const lastBlock = (remaining <= blockSize);
6648 return lastBlock ? remaining : blockSize;
6649}
6650
6651static size_t determine_blockSize(ZSTD_sequenceFormat_e mode,
6652 size_t blockSize, size_t remaining,
6653 const ZSTD_Sequence* inSeqs, size_t inSeqsSize, ZSTD_sequencePosition seqPos)
6654{
6655 DEBUGLOG(6, "determine_blockSize : remainingSize = %zu", remaining);
6656 if (mode == ZSTD_sf_noBlockDelimiters)
6657 return blockSize_noDelimiter(blockSize, remaining);
6658 { size_t const explicitBlockSize = blockSize_explicitDelimiter(inSeqs, inSeqsSize, seqPos);
6659 FORWARD_IF_ERROR(explicitBlockSize, "Error while determining block size with explicit delimiters");
6660 if (explicitBlockSize > blockSize)
6661 RETURN_ERROR(externalSequences_invalid, "sequences incorrectly define a too large block");
6662 if (explicitBlockSize > remaining)
6663 RETURN_ERROR(externalSequences_invalid, "sequences define a frame longer than source");
6664 return explicitBlockSize;
6665 }
6666}
6667
6668/* Compress, block-by-block, all of the sequences given.
6669 *
6670 * Returns the cumulative size of all compressed blocks (including their headers),
6671 * otherwise a ZSTD error.
6672 */
6673static size_t
6674ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
6675 void* dst, size_t dstCapacity,
6676 const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
6677 const void* src, size_t srcSize)
6678{
6679 size_t cSize = 0;
6680 size_t remaining = srcSize;
6681 ZSTD_sequencePosition seqPos = {0, 0, 0};
6682
6683 BYTE const* ip = (BYTE const*)src;
6684 BYTE* op = (BYTE*)dst;
6685 ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
6686
6687 DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
6688 /* Special case: empty frame */
6689 if (remaining == 0) {
6690 U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1);
6691 RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "No room for empty frame block header");
6692 MEM_writeLE32(op, cBlockHeader24);
6693 op += ZSTD_blockHeaderSize;
6694 dstCapacity -= ZSTD_blockHeaderSize;
6695 cSize += ZSTD_blockHeaderSize;
6696 }
6697
6698 while (remaining) {
6699 size_t compressedSeqsSize;
6700 size_t cBlockSize;
6701 size_t additionalByteAdjustment;
6702 size_t blockSize = determine_blockSize(cctx->appliedParams.blockDelimiters,
6703 cctx->blockSize, remaining,
6704 inSeqs, inSeqsSize, seqPos);
6705 U32 const lastBlock = (blockSize == remaining);
6706 FORWARD_IF_ERROR(blockSize, "Error while trying to determine block size");
6707 assert(blockSize <= remaining);
6708 ZSTD_resetSeqStore(&cctx->seqStore);
6709 DEBUGLOG(5, "Working on new block. Blocksize: %zu (total:%zu)", blockSize, (ip - (const BYTE*)src) + blockSize);
6710
6711 additionalByteAdjustment = sequenceCopier(cctx, &seqPos, inSeqs, inSeqsSize, ip, blockSize, cctx->appliedParams.searchForExternalRepcodes);
6712 FORWARD_IF_ERROR(additionalByteAdjustment, "Bad sequence copy");
6713 blockSize -= additionalByteAdjustment;
6714
6715 /* If blocks are too small, emit as a nocompress block */
6716 /* TODO: See 3090. We reduced MIN_CBLOCK_SIZE from 3 to 2 so to compensate we are adding
6717 * additional 1. We need to revisit and change this logic to be more consistent */
6718 if (blockSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1+1) {
6719 cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
6720 FORWARD_IF_ERROR(cBlockSize, "Nocompress block failed");
6721 DEBUGLOG(5, "Block too small, writing out nocompress block: cSize: %zu", cBlockSize);
6722 cSize += cBlockSize;
6723 ip += blockSize;
6724 op += cBlockSize;
6725 remaining -= blockSize;
6726 dstCapacity -= cBlockSize;
6727 continue;
6728 }
6729
6730 RETURN_ERROR_IF(dstCapacity < ZSTD_blockHeaderSize, dstSize_tooSmall, "not enough dstCapacity to write a new compressed block");
6731 compressedSeqsSize = ZSTD_entropyCompressSeqStore(&cctx->seqStore,
6732 &cctx->blockState.prevCBlock->entropy, &cctx->blockState.nextCBlock->entropy,
6733 &cctx->appliedParams,
6734 op + ZSTD_blockHeaderSize /* Leave space for block header */, dstCapacity - ZSTD_blockHeaderSize,
6735 blockSize,
6736 cctx->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */,
6737 cctx->bmi2);
6738 FORWARD_IF_ERROR(compressedSeqsSize, "Compressing sequences of block failed");
6739 DEBUGLOG(5, "Compressed sequences size: %zu", compressedSeqsSize);
6740
6741 if (!cctx->isFirstBlock &&
6742 ZSTD_maybeRLE(&cctx->seqStore) &&
6743 ZSTD_isRLE(ip, blockSize)) {
6744 /* We don't want to emit our first block as a RLE even if it qualifies because
6745 * doing so will cause the decoder (cli only) to throw a "should consume all input error."
6746 * This is only an issue for zstd <= v1.4.3
6747 */
6748 compressedSeqsSize = 1;
6749 }
6750
6751 if (compressedSeqsSize == 0) {
6752 /* ZSTD_noCompressBlock writes the block header as well */
6753 cBlockSize = ZSTD_noCompressBlock(op, dstCapacity, ip, blockSize, lastBlock);
6754 FORWARD_IF_ERROR(cBlockSize, "ZSTD_noCompressBlock failed");
6755 DEBUGLOG(5, "Writing out nocompress block, size: %zu", cBlockSize);
6756 } else if (compressedSeqsSize == 1) {
6757 cBlockSize = ZSTD_rleCompressBlock(op, dstCapacity, *ip, blockSize, lastBlock);
6758 FORWARD_IF_ERROR(cBlockSize, "ZSTD_rleCompressBlock failed");
6759 DEBUGLOG(5, "Writing out RLE block, size: %zu", cBlockSize);
6760 } else {
6761 U32 cBlockHeader;
6762 /* Error checking and repcodes update */
6763 ZSTD_blockState_confirmRepcodesAndEntropyTables(&cctx->blockState);
6764 if (cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
6765 cctx->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
6766
6767 /* Write block header into beginning of block*/
6768 cBlockHeader = lastBlock + (((U32)bt_compressed)<<1) + (U32)(compressedSeqsSize << 3);
6769 MEM_writeLE24(op, cBlockHeader);
6770 cBlockSize = ZSTD_blockHeaderSize + compressedSeqsSize;
6771 DEBUGLOG(5, "Writing out compressed block, size: %zu", cBlockSize);
6772 }
6773
6774 cSize += cBlockSize;
6775
6776 if (lastBlock) {
6777 break;
6778 } else {
6779 ip += blockSize;
6780 op += cBlockSize;
6781 remaining -= blockSize;
6782 dstCapacity -= cBlockSize;
6783 cctx->isFirstBlock = 0;
6784 }
6785 DEBUGLOG(5, "cSize running total: %zu (remaining dstCapacity=%zu)", cSize, dstCapacity);
6786 }
6787
6788 DEBUGLOG(4, "cSize final total: %zu", cSize);
6789 return cSize;
6790}
6791
6792size_t ZSTD_compressSequences(ZSTD_CCtx* cctx,
6793 void* dst, size_t dstCapacity,
6794 const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
6795 const void* src, size_t srcSize)
6796{
6797 BYTE* op = (BYTE*)dst;
6798 size_t cSize = 0;
6799 size_t compressedBlocksSize = 0;
6800 size_t frameHeaderSize = 0;
6801
6802 /* Transparent initialization stage, same as compressStream2() */
6803 DEBUGLOG(4, "ZSTD_compressSequences (dstCapacity=%zu)", dstCapacity);
6804 assert(cctx != NULL);
6805 FORWARD_IF_ERROR(ZSTD_CCtx_init_compressStream2(cctx, ZSTD_e_end, srcSize), "CCtx initialization failed");
6806 /* Begin writing output, starting with frame header */
6807 frameHeaderSize = ZSTD_writeFrameHeader(op, dstCapacity, &cctx->appliedParams, srcSize, cctx->dictID);
6808 op += frameHeaderSize;
6809 dstCapacity -= frameHeaderSize;
6810 cSize += frameHeaderSize;
6811 if (cctx->appliedParams.fParams.checksumFlag && srcSize) {
6812 XXH64_update(&cctx->xxhState, src, srcSize);
6813 }
6814 /* cSize includes block header size and compressed sequences size */
6815 compressedBlocksSize = ZSTD_compressSequences_internal(cctx,
6816 op, dstCapacity,
6817 inSeqs, inSeqsSize,
6818 src, srcSize);
6819 FORWARD_IF_ERROR(compressedBlocksSize, "Compressing blocks failed!");
6820 cSize += compressedBlocksSize;
6821 dstCapacity -= compressedBlocksSize;
6822
6823 if (cctx->appliedParams.fParams.checksumFlag) {
6824 U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
6825 RETURN_ERROR_IF(dstCapacity<4, dstSize_tooSmall, "no room for checksum");
6826 DEBUGLOG(4, "Write checksum : %08X", (unsigned)checksum);
6827 MEM_writeLE32((char*)dst + cSize, checksum);
6828 cSize += 4;
6829 }
6830
6831 DEBUGLOG(4, "Final compressed size: %zu", cSize);
6832 return cSize;
6833}
6834
6835/*====== Finalize ======*/
6836
6837static ZSTD_inBuffer inBuffer_forEndFlush(const ZSTD_CStream* zcs)
6838{
6839 const ZSTD_inBuffer nullInput = { NULL, 0, 0 };
6840 const int stableInput = (zcs->appliedParams.inBufferMode == ZSTD_bm_stable);
6841 return stableInput ? zcs->expectedInBuffer : nullInput;
6842}
6843
6844/*! ZSTD_flushStream() :
6845 * @return : amount of data remaining to flush */
6846size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
6847{
6848 ZSTD_inBuffer input = inBuffer_forEndFlush(zcs);
6849 input.size = input.pos; /* do not ingest more input during flush */
6850 return ZSTD_compressStream2(zcs, output, &input, ZSTD_e_flush);
6851}
6852
6853
6854size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
6855{
6856 ZSTD_inBuffer input = inBuffer_forEndFlush(zcs);
6857 size_t const remainingToFlush = ZSTD_compressStream2(zcs, output, &input, ZSTD_e_end);
6858 FORWARD_IF_ERROR(remainingToFlush , "ZSTD_compressStream2(,,ZSTD_e_end) failed");
6859 if (zcs->appliedParams.nbWorkers > 0) return remainingToFlush; /* minimal estimation */
6860 /* single thread mode : attempt to calculate remaining to flush more precisely */
6861 { size_t const lastBlockSize = zcs->frameEnded ? 0 : ZSTD_BLOCKHEADERSIZE;
6862 size_t const checksumSize = (size_t)(zcs->frameEnded ? 0 : zcs->appliedParams.fParams.checksumFlag * 4);
6863 size_t const toFlush = remainingToFlush + lastBlockSize + checksumSize;
6864 DEBUGLOG(4, "ZSTD_endStream : remaining to flush : %u", (unsigned)toFlush);
6865 return toFlush;
6866 }
6867}
6868
6869
6870/*-===== Pre-defined compression levels =====-*/
6871#include "clevels.h"
6872
6873int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
6874int ZSTD_minCLevel(void) { return (int)-ZSTD_TARGETLENGTH_MAX; }
6875int ZSTD_defaultCLevel(void) { return ZSTD_CLEVEL_DEFAULT; }
6876
6877static ZSTD_compressionParameters ZSTD_dedicatedDictSearch_getCParams(int const compressionLevel, size_t const dictSize)
6878{
6879 ZSTD_compressionParameters cParams = ZSTD_getCParams_internal(compressionLevel, 0, dictSize, ZSTD_cpm_createCDict);
6880 switch (cParams.strategy) {
6881 case ZSTD_fast:
6882 case ZSTD_dfast:
6883 break;
6884 case ZSTD_greedy:
6885 case ZSTD_lazy:
6886 case ZSTD_lazy2:
6887 cParams.hashLog += ZSTD_LAZY_DDSS_BUCKET_LOG;
6888 break;
6889 case ZSTD_btlazy2:
6890 case ZSTD_btopt:
6891 case ZSTD_btultra:
6892 case ZSTD_btultra2:
6893 break;
6894 }
6895 return cParams;
6896}
6897
6898static int ZSTD_dedicatedDictSearch_isSupported(
6899 ZSTD_compressionParameters const* cParams)
6900{
6901 return (cParams->strategy >= ZSTD_greedy)
6902 && (cParams->strategy <= ZSTD_lazy2)
6903 && (cParams->hashLog > cParams->chainLog)
6904 && (cParams->chainLog <= 24);
6905}
6906
6907/**
6908 * Reverses the adjustment applied to cparams when enabling dedicated dict
6909 * search. This is used to recover the params set to be used in the working
6910 * context. (Otherwise, those tables would also grow.)
6911 */
6912static void ZSTD_dedicatedDictSearch_revertCParams(
6913 ZSTD_compressionParameters* cParams) {
6914 switch (cParams->strategy) {
6915 case ZSTD_fast:
6916 case ZSTD_dfast:
6917 break;
6918 case ZSTD_greedy:
6919 case ZSTD_lazy:
6920 case ZSTD_lazy2:
6921 cParams->hashLog -= ZSTD_LAZY_DDSS_BUCKET_LOG;
6922 if (cParams->hashLog < ZSTD_HASHLOG_MIN) {
6923 cParams->hashLog = ZSTD_HASHLOG_MIN;
6924 }
6925 break;
6926 case ZSTD_btlazy2:
6927 case ZSTD_btopt:
6928 case ZSTD_btultra:
6929 case ZSTD_btultra2:
6930 break;
6931 }
6932}
6933
6934static U64 ZSTD_getCParamRowSize(U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
6935{
6936 switch (mode) {
6937 case ZSTD_cpm_unknown:
6938 case ZSTD_cpm_noAttachDict:
6939 case ZSTD_cpm_createCDict:
6940 break;
6941 case ZSTD_cpm_attachDict:
6942 dictSize = 0;
6943 break;
6944 default:
6945 assert(0);
6946 break;
6947 }
6948 { int const unknown = srcSizeHint == ZSTD_CONTENTSIZE_UNKNOWN;
6949 size_t const addedSize = unknown && dictSize > 0 ? 500 : 0;
6950 return unknown && dictSize == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : srcSizeHint+dictSize+addedSize;
6951 }
6952}
6953
6954/*! ZSTD_getCParams_internal() :
6955 * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
6956 * Note: srcSizeHint 0 means 0, use ZSTD_CONTENTSIZE_UNKNOWN for unknown.
6957 * Use dictSize == 0 for unknown or unused.
6958 * Note: `mode` controls how we treat the `dictSize`. See docs for `ZSTD_cParamMode_e`. */
6959static ZSTD_compressionParameters ZSTD_getCParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode)
6960{
6961 U64 const rSize = ZSTD_getCParamRowSize(srcSizeHint, dictSize, mode);
6962 U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB);
6963 int row;
6964 DEBUGLOG(5, "ZSTD_getCParams_internal (cLevel=%i)", compressionLevel);
6965
6966 /* row */
6967 if (compressionLevel == 0) row = ZSTD_CLEVEL_DEFAULT; /* 0 == default */
6968 else if (compressionLevel < 0) row = 0; /* entry 0 is baseline for fast mode */
6969 else if (compressionLevel > ZSTD_MAX_CLEVEL) row = ZSTD_MAX_CLEVEL;
6970 else row = compressionLevel;
6971
6972 { ZSTD_compressionParameters cp = ZSTD_defaultCParameters[tableID][row];
6973 DEBUGLOG(5, "ZSTD_getCParams_internal selected tableID: %u row: %u strat: %u", tableID, row, (U32)cp.strategy);
6974 /* acceleration factor */
6975 if (compressionLevel < 0) {
6976 int const clampedCompressionLevel = MAX(ZSTD_minCLevel(), compressionLevel);
6977 cp.targetLength = (unsigned)(-clampedCompressionLevel);
6978 }
6979 /* refine parameters based on srcSize & dictSize */
6980 return ZSTD_adjustCParams_internal(cp, srcSizeHint, dictSize, mode, ZSTD_ps_auto);
6981 }
6982}
6983
6984/*! ZSTD_getCParams() :
6985 * @return ZSTD_compressionParameters structure for a selected compression level, srcSize and dictSize.
6986 * Size values are optional, provide 0 if not known or unused */
6987ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize)
6988{
6989 if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
6990 return ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
6991}
6992
6993/*! ZSTD_getParams() :
6994 * same idea as ZSTD_getCParams()
6995 * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
6996 * Fields of `ZSTD_frameParameters` are set to default values */
6997static ZSTD_parameters ZSTD_getParams_internal(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode) {
6998 ZSTD_parameters params;
6999 ZSTD_compressionParameters const cParams = ZSTD_getCParams_internal(compressionLevel, srcSizeHint, dictSize, mode);
7000 DEBUGLOG(5, "ZSTD_getParams (cLevel=%i)", compressionLevel);
7001 ZSTD_memset(&params, 0, sizeof(params));
7002 params.cParams = cParams;
7003 params.fParams.contentSizeFlag = 1;
7004 return params;
7005}
7006
7007/*! ZSTD_getParams() :
7008 * same idea as ZSTD_getCParams()
7009 * @return a `ZSTD_parameters` structure (instead of `ZSTD_compressionParameters`).
7010 * Fields of `ZSTD_frameParameters` are set to default values */
7011ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeHint, size_t dictSize) {
7012 if (srcSizeHint == 0) srcSizeHint = ZSTD_CONTENTSIZE_UNKNOWN;
7013 return ZSTD_getParams_internal(compressionLevel, srcSizeHint, dictSize, ZSTD_cpm_unknown);
7014}
7015
7016void ZSTD_registerSequenceProducer(
7017 ZSTD_CCtx* zc, void* mState,
7018 ZSTD_sequenceProducer_F* mFinder
7019) {
7020 if (mFinder != NULL) {
7021 ZSTD_externalMatchCtx emctx;
7022 emctx.mState = mState;
7023 emctx.mFinder = mFinder;
7024 emctx.seqBuffer = NULL;
7025 emctx.seqBufferCapacity = 0;
7026 zc->externalMatchCtx = emctx;
7027 zc->requestedParams.useSequenceProducer = 1;
7028 } else {
7029 ZSTD_memset(&zc->externalMatchCtx, 0, sizeof(zc->externalMatchCtx));
7030 zc->requestedParams.useSequenceProducer = 0;
7031 }
7032}