git subrepo pull (merge) --force deps/libchdr
[pcsx_rearmed.git] / deps / libchdr / deps / zstd-1.5.6 / lib / compress / zstd_ldm.c
CommitLineData
648db22b 1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11#include "zstd_ldm.h"
12
13#include "../common/debug.h"
14#include "../common/xxhash.h"
15#include "zstd_fast.h" /* ZSTD_fillHashTable() */
16#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
17#include "zstd_ldm_geartab.h"
18
19#define LDM_BUCKET_SIZE_LOG 3
20#define LDM_MIN_MATCH_LENGTH 64
21#define LDM_HASH_RLOG 7
22
23typedef struct {
24 U64 rolling;
25 U64 stopMask;
26} ldmRollingHashState_t;
27
28/** ZSTD_ldm_gear_init():
29 *
30 * Initializes the rolling hash state such that it will honor the
31 * settings in params. */
32static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
33{
34 unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
35 unsigned hashRateLog = params->hashRateLog;
36
37 state->rolling = ~(U32)0;
38
39 /* The choice of the splitting criterion is subject to two conditions:
40 * 1. it has to trigger on average every 2^(hashRateLog) bytes;
41 * 2. ideally, it has to depend on a window of minMatchLength bytes.
42 *
43 * In the gear hash algorithm, bit n depends on the last n bytes;
44 * so in order to obtain a good quality splitting criterion it is
45 * preferable to use bits with high weight.
46 *
47 * To match condition 1 we use a mask with hashRateLog bits set
48 * and, because of the previous remark, we make sure these bits
49 * have the highest possible weight while still respecting
50 * condition 2.
51 */
52 if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
53 state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
54 } else {
55 /* In this degenerate case we simply honor the hash rate. */
56 state->stopMask = ((U64)1 << hashRateLog) - 1;
57 }
58}
59
60/** ZSTD_ldm_gear_reset()
61 * Feeds [data, data + minMatchLength) into the hash without registering any
62 * splits. This effectively resets the hash state. This is used when skipping
63 * over data, either at the beginning of a block, or skipping sections.
64 */
65static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
66 BYTE const* data, size_t minMatchLength)
67{
68 U64 hash = state->rolling;
69 size_t n = 0;
70
71#define GEAR_ITER_ONCE() do { \
72 hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
73 n += 1; \
74 } while (0)
75 while (n + 3 < minMatchLength) {
76 GEAR_ITER_ONCE();
77 GEAR_ITER_ONCE();
78 GEAR_ITER_ONCE();
79 GEAR_ITER_ONCE();
80 }
81 while (n < minMatchLength) {
82 GEAR_ITER_ONCE();
83 }
84#undef GEAR_ITER_ONCE
85}
86
87/** ZSTD_ldm_gear_feed():
88 *
89 * Registers in the splits array all the split points found in the first
90 * size bytes following the data pointer. This function terminates when
91 * either all the data has been processed or LDM_BATCH_SIZE splits are
92 * present in the splits array.
93 *
94 * Precondition: The splits array must not be full.
95 * Returns: The number of bytes processed. */
96static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
97 BYTE const* data, size_t size,
98 size_t* splits, unsigned* numSplits)
99{
100 size_t n;
101 U64 hash, mask;
102
103 hash = state->rolling;
104 mask = state->stopMask;
105 n = 0;
106
107#define GEAR_ITER_ONCE() do { \
108 hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
109 n += 1; \
110 if (UNLIKELY((hash & mask) == 0)) { \
111 splits[*numSplits] = n; \
112 *numSplits += 1; \
113 if (*numSplits == LDM_BATCH_SIZE) \
114 goto done; \
115 } \
116 } while (0)
117
118 while (n + 3 < size) {
119 GEAR_ITER_ONCE();
120 GEAR_ITER_ONCE();
121 GEAR_ITER_ONCE();
122 GEAR_ITER_ONCE();
123 }
124 while (n < size) {
125 GEAR_ITER_ONCE();
126 }
127
128#undef GEAR_ITER_ONCE
129
130done:
131 state->rolling = hash;
132 return n;
133}
134
135void ZSTD_ldm_adjustParameters(ldmParams_t* params,
136 ZSTD_compressionParameters const* cParams)
137{
138 params->windowLog = cParams->windowLog;
139 ZSTD_STATIC_ASSERT(LDM_BUCKET_SIZE_LOG <= ZSTD_LDM_BUCKETSIZELOG_MAX);
140 DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
141 if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
142 if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
143 if (params->hashLog == 0) {
144 params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
145 assert(params->hashLog <= ZSTD_HASHLOG_MAX);
146 }
147 if (params->hashRateLog == 0) {
148 params->hashRateLog = params->windowLog < params->hashLog
149 ? 0
150 : params->windowLog - params->hashLog;
151 }
152 params->bucketSizeLog = MIN(params->bucketSizeLog, params->hashLog);
153}
154
155size_t ZSTD_ldm_getTableSize(ldmParams_t params)
156{
157 size_t const ldmHSize = ((size_t)1) << params.hashLog;
158 size_t const ldmBucketSizeLog = MIN(params.bucketSizeLog, params.hashLog);
159 size_t const ldmBucketSize = ((size_t)1) << (params.hashLog - ldmBucketSizeLog);
160 size_t const totalSize = ZSTD_cwksp_alloc_size(ldmBucketSize)
161 + ZSTD_cwksp_alloc_size(ldmHSize * sizeof(ldmEntry_t));
162 return params.enableLdm == ZSTD_ps_enable ? totalSize : 0;
163}
164
165size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
166{
167 return params.enableLdm == ZSTD_ps_enable ? (maxChunkSize / params.minMatchLength) : 0;
168}
169
170/** ZSTD_ldm_getBucket() :
171 * Returns a pointer to the start of the bucket associated with hash. */
172static ldmEntry_t* ZSTD_ldm_getBucket(
173 ldmState_t* ldmState, size_t hash, ldmParams_t const ldmParams)
174{
175 return ldmState->hashTable + (hash << ldmParams.bucketSizeLog);
176}
177
178/** ZSTD_ldm_insertEntry() :
179 * Insert the entry with corresponding hash into the hash table */
180static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
181 size_t const hash, const ldmEntry_t entry,
182 ldmParams_t const ldmParams)
183{
184 BYTE* const pOffset = ldmState->bucketOffsets + hash;
185 unsigned const offset = *pOffset;
186
187 *(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
188 *pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
189
190}
191
192/** ZSTD_ldm_countBackwardsMatch() :
193 * Returns the number of bytes that match backwards before pIn and pMatch.
194 *
195 * We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
196static size_t ZSTD_ldm_countBackwardsMatch(
197 const BYTE* pIn, const BYTE* pAnchor,
198 const BYTE* pMatch, const BYTE* pMatchBase)
199{
200 size_t matchLength = 0;
201 while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
202 pIn--;
203 pMatch--;
204 matchLength++;
205 }
206 return matchLength;
207}
208
209/** ZSTD_ldm_countBackwardsMatch_2segments() :
210 * Returns the number of bytes that match backwards from pMatch,
211 * even with the backwards match spanning 2 different segments.
212 *
213 * On reaching `pMatchBase`, start counting from mEnd */
214static size_t ZSTD_ldm_countBackwardsMatch_2segments(
215 const BYTE* pIn, const BYTE* pAnchor,
216 const BYTE* pMatch, const BYTE* pMatchBase,
217 const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
218{
219 size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
220 if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
221 /* If backwards match is entirely in the extDict or prefix, immediately return */
222 return matchLength;
223 }
224 DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
225 matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
226 DEBUGLOG(7, "final backwards match length = %zu", matchLength);
227 return matchLength;
228}
229
230/** ZSTD_ldm_fillFastTables() :
231 *
232 * Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
233 * This is similar to ZSTD_loadDictionaryContent.
234 *
235 * The tables for the other strategies are filled within their
236 * block compressors. */
237static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
238 void const* end)
239{
240 const BYTE* const iend = (const BYTE*)end;
241
242 switch(ms->cParams.strategy)
243 {
244 case ZSTD_fast:
245 ZSTD_fillHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
246 break;
247
248 case ZSTD_dfast:
f535537f 249#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
648db22b 250 ZSTD_fillDoubleHashTable(ms, iend, ZSTD_dtlm_fast, ZSTD_tfp_forCCtx);
f535537f 251#else
252 assert(0); /* shouldn't be called: cparams should've been adjusted. */
253#endif
648db22b 254 break;
255
256 case ZSTD_greedy:
257 case ZSTD_lazy:
258 case ZSTD_lazy2:
259 case ZSTD_btlazy2:
260 case ZSTD_btopt:
261 case ZSTD_btultra:
262 case ZSTD_btultra2:
263 break;
264 default:
265 assert(0); /* not possible : not a valid strategy id */
266 }
267
268 return 0;
269}
270
271void ZSTD_ldm_fillHashTable(
272 ldmState_t* ldmState, const BYTE* ip,
273 const BYTE* iend, ldmParams_t const* params)
274{
275 U32 const minMatchLength = params->minMatchLength;
276 U32 const hBits = params->hashLog - params->bucketSizeLog;
277 BYTE const* const base = ldmState->window.base;
278 BYTE const* const istart = ip;
279 ldmRollingHashState_t hashState;
280 size_t* const splits = ldmState->splitIndices;
281 unsigned numSplits;
282
283 DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
284
285 ZSTD_ldm_gear_init(&hashState, params);
286 while (ip < iend) {
287 size_t hashed;
288 unsigned n;
289
290 numSplits = 0;
291 hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
292
293 for (n = 0; n < numSplits; n++) {
294 if (ip + splits[n] >= istart + minMatchLength) {
295 BYTE const* const split = ip + splits[n] - minMatchLength;
296 U64 const xxhash = XXH64(split, minMatchLength, 0);
297 U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
298 ldmEntry_t entry;
299
300 entry.offset = (U32)(split - base);
301 entry.checksum = (U32)(xxhash >> 32);
302 ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
303 }
304 }
305
306 ip += hashed;
307 }
308}
309
310
311/** ZSTD_ldm_limitTableUpdate() :
312 *
313 * Sets cctx->nextToUpdate to a position corresponding closer to anchor
314 * if it is far way
315 * (after a long match, only update tables a limited amount). */
316static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
317{
318 U32 const curr = (U32)(anchor - ms->window.base);
319 if (curr > ms->nextToUpdate + 1024) {
320 ms->nextToUpdate =
321 curr - MIN(512, curr - ms->nextToUpdate - 1024);
322 }
323}
324
f535537f 325static
326ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
327size_t ZSTD_ldm_generateSequences_internal(
648db22b 328 ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
329 ldmParams_t const* params, void const* src, size_t srcSize)
330{
331 /* LDM parameters */
332 int const extDict = ZSTD_window_hasExtDict(ldmState->window);
333 U32 const minMatchLength = params->minMatchLength;
334 U32 const entsPerBucket = 1U << params->bucketSizeLog;
335 U32 const hBits = params->hashLog - params->bucketSizeLog;
336 /* Prefix and extDict parameters */
337 U32 const dictLimit = ldmState->window.dictLimit;
338 U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
339 BYTE const* const base = ldmState->window.base;
340 BYTE const* const dictBase = extDict ? ldmState->window.dictBase : NULL;
341 BYTE const* const dictStart = extDict ? dictBase + lowestIndex : NULL;
342 BYTE const* const dictEnd = extDict ? dictBase + dictLimit : NULL;
343 BYTE const* const lowPrefixPtr = base + dictLimit;
344 /* Input bounds */
345 BYTE const* const istart = (BYTE const*)src;
346 BYTE const* const iend = istart + srcSize;
347 BYTE const* const ilimit = iend - HASH_READ_SIZE;
348 /* Input positions */
349 BYTE const* anchor = istart;
350 BYTE const* ip = istart;
351 /* Rolling hash state */
352 ldmRollingHashState_t hashState;
353 /* Arrays for staged-processing */
354 size_t* const splits = ldmState->splitIndices;
355 ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
356 unsigned numSplits;
357
358 if (srcSize < minMatchLength)
359 return iend - anchor;
360
361 /* Initialize the rolling hash state with the first minMatchLength bytes */
362 ZSTD_ldm_gear_init(&hashState, params);
363 ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
364 ip += minMatchLength;
365
366 while (ip < ilimit) {
367 size_t hashed;
368 unsigned n;
369
370 numSplits = 0;
371 hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
372 splits, &numSplits);
373
374 for (n = 0; n < numSplits; n++) {
375 BYTE const* const split = ip + splits[n] - minMatchLength;
376 U64 const xxhash = XXH64(split, minMatchLength, 0);
377 U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
378
379 candidates[n].split = split;
380 candidates[n].hash = hash;
381 candidates[n].checksum = (U32)(xxhash >> 32);
382 candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
383 PREFETCH_L1(candidates[n].bucket);
384 }
385
386 for (n = 0; n < numSplits; n++) {
387 size_t forwardMatchLength = 0, backwardMatchLength = 0,
388 bestMatchLength = 0, mLength;
389 U32 offset;
390 BYTE const* const split = candidates[n].split;
391 U32 const checksum = candidates[n].checksum;
392 U32 const hash = candidates[n].hash;
393 ldmEntry_t* const bucket = candidates[n].bucket;
394 ldmEntry_t const* cur;
395 ldmEntry_t const* bestEntry = NULL;
396 ldmEntry_t newEntry;
397
398 newEntry.offset = (U32)(split - base);
399 newEntry.checksum = checksum;
400
401 /* If a split point would generate a sequence overlapping with
402 * the previous one, we merely register it in the hash table and
403 * move on */
404 if (split < anchor) {
405 ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
406 continue;
407 }
408
409 for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
410 size_t curForwardMatchLength, curBackwardMatchLength,
411 curTotalMatchLength;
412 if (cur->checksum != checksum || cur->offset <= lowestIndex) {
413 continue;
414 }
415 if (extDict) {
416 BYTE const* const curMatchBase =
417 cur->offset < dictLimit ? dictBase : base;
418 BYTE const* const pMatch = curMatchBase + cur->offset;
419 BYTE const* const matchEnd =
420 cur->offset < dictLimit ? dictEnd : iend;
421 BYTE const* const lowMatchPtr =
422 cur->offset < dictLimit ? dictStart : lowPrefixPtr;
423 curForwardMatchLength =
424 ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
425 if (curForwardMatchLength < minMatchLength) {
426 continue;
427 }
428 curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
429 split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
430 } else { /* !extDict */
431 BYTE const* const pMatch = base + cur->offset;
432 curForwardMatchLength = ZSTD_count(split, pMatch, iend);
433 if (curForwardMatchLength < minMatchLength) {
434 continue;
435 }
436 curBackwardMatchLength =
437 ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
438 }
439 curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
440
441 if (curTotalMatchLength > bestMatchLength) {
442 bestMatchLength = curTotalMatchLength;
443 forwardMatchLength = curForwardMatchLength;
444 backwardMatchLength = curBackwardMatchLength;
445 bestEntry = cur;
446 }
447 }
448
449 /* No match found -- insert an entry into the hash table
450 * and process the next candidate match */
451 if (bestEntry == NULL) {
452 ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
453 continue;
454 }
455
456 /* Match found */
457 offset = (U32)(split - base) - bestEntry->offset;
458 mLength = forwardMatchLength + backwardMatchLength;
459 {
460 rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
461
462 /* Out of sequence storage */
463 if (rawSeqStore->size == rawSeqStore->capacity)
464 return ERROR(dstSize_tooSmall);
465 seq->litLength = (U32)(split - backwardMatchLength - anchor);
466 seq->matchLength = (U32)mLength;
467 seq->offset = offset;
468 rawSeqStore->size++;
469 }
470
471 /* Insert the current entry into the hash table --- it must be
472 * done after the previous block to avoid clobbering bestEntry */
473 ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
474
475 anchor = split + forwardMatchLength;
476
477 /* If we find a match that ends after the data that we've hashed
478 * then we have a repeating, overlapping, pattern. E.g. all zeros.
479 * If one repetition of the pattern matches our `stopMask` then all
480 * repetitions will. We don't need to insert them all into out table,
481 * only the first one. So skip over overlapping matches.
482 * This is a major speed boost (20x) for compressing a single byte
483 * repeated, when that byte ends up in the table.
484 */
485 if (anchor > ip + hashed) {
486 ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
487 /* Continue the outer loop at anchor (ip + hashed == anchor). */
488 ip = anchor - hashed;
489 break;
490 }
491 }
492
493 ip += hashed;
494 }
495
496 return iend - anchor;
497}
498
499/*! ZSTD_ldm_reduceTable() :
500 * reduce table indexes by `reducerValue` */
501static void ZSTD_ldm_reduceTable(ldmEntry_t* const table, U32 const size,
502 U32 const reducerValue)
503{
504 U32 u;
505 for (u = 0; u < size; u++) {
506 if (table[u].offset < reducerValue) table[u].offset = 0;
507 else table[u].offset -= reducerValue;
508 }
509}
510
511size_t ZSTD_ldm_generateSequences(
512 ldmState_t* ldmState, rawSeqStore_t* sequences,
513 ldmParams_t const* params, void const* src, size_t srcSize)
514{
515 U32 const maxDist = 1U << params->windowLog;
516 BYTE const* const istart = (BYTE const*)src;
517 BYTE const* const iend = istart + srcSize;
518 size_t const kMaxChunkSize = 1 << 20;
519 size_t const nbChunks = (srcSize / kMaxChunkSize) + ((srcSize % kMaxChunkSize) != 0);
520 size_t chunk;
521 size_t leftoverSize = 0;
522
523 assert(ZSTD_CHUNKSIZE_MAX >= kMaxChunkSize);
524 /* Check that ZSTD_window_update() has been called for this chunk prior
525 * to passing it to this function.
526 */
527 assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
528 /* The input could be very large (in zstdmt), so it must be broken up into
529 * chunks to enforce the maximum distance and handle overflow correction.
530 */
531 assert(sequences->pos <= sequences->size);
532 assert(sequences->size <= sequences->capacity);
533 for (chunk = 0; chunk < nbChunks && sequences->size < sequences->capacity; ++chunk) {
534 BYTE const* const chunkStart = istart + chunk * kMaxChunkSize;
535 size_t const remaining = (size_t)(iend - chunkStart);
536 BYTE const *const chunkEnd =
537 (remaining < kMaxChunkSize) ? iend : chunkStart + kMaxChunkSize;
538 size_t const chunkSize = chunkEnd - chunkStart;
539 size_t newLeftoverSize;
540 size_t const prevSize = sequences->size;
541
542 assert(chunkStart < iend);
543 /* 1. Perform overflow correction if necessary. */
544 if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
545 U32 const ldmHSize = 1U << params->hashLog;
546 U32 const correction = ZSTD_window_correctOverflow(
547 &ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
548 ZSTD_ldm_reduceTable(ldmState->hashTable, ldmHSize, correction);
549 /* invalidate dictionaries on overflow correction */
550 ldmState->loadedDictEnd = 0;
551 }
552 /* 2. We enforce the maximum offset allowed.
553 *
554 * kMaxChunkSize should be small enough that we don't lose too much of
555 * the window through early invalidation.
556 * TODO: * Test the chunk size.
557 * * Try invalidation after the sequence generation and test the
558 * offset against maxDist directly.
559 *
560 * NOTE: Because of dictionaries + sequence splitting we MUST make sure
561 * that any offset used is valid at the END of the sequence, since it may
562 * be split into two sequences. This condition holds when using
563 * ZSTD_window_enforceMaxDist(), but if we move to checking offsets
564 * against maxDist directly, we'll have to carefully handle that case.
565 */
566 ZSTD_window_enforceMaxDist(&ldmState->window, chunkEnd, maxDist, &ldmState->loadedDictEnd, NULL);
567 /* 3. Generate the sequences for the chunk, and get newLeftoverSize. */
568 newLeftoverSize = ZSTD_ldm_generateSequences_internal(
569 ldmState, sequences, params, chunkStart, chunkSize);
570 if (ZSTD_isError(newLeftoverSize))
571 return newLeftoverSize;
572 /* 4. We add the leftover literals from previous iterations to the first
573 * newly generated sequence, or add the `newLeftoverSize` if none are
574 * generated.
575 */
576 /* Prepend the leftover literals from the last call */
577 if (prevSize < sequences->size) {
578 sequences->seq[prevSize].litLength += (U32)leftoverSize;
579 leftoverSize = newLeftoverSize;
580 } else {
581 assert(newLeftoverSize == chunkSize);
582 leftoverSize += chunkSize;
583 }
584 }
585 return 0;
586}
587
588void
589ZSTD_ldm_skipSequences(rawSeqStore_t* rawSeqStore, size_t srcSize, U32 const minMatch)
590{
591 while (srcSize > 0 && rawSeqStore->pos < rawSeqStore->size) {
592 rawSeq* seq = rawSeqStore->seq + rawSeqStore->pos;
593 if (srcSize <= seq->litLength) {
594 /* Skip past srcSize literals */
595 seq->litLength -= (U32)srcSize;
596 return;
597 }
598 srcSize -= seq->litLength;
599 seq->litLength = 0;
600 if (srcSize < seq->matchLength) {
601 /* Skip past the first srcSize of the match */
602 seq->matchLength -= (U32)srcSize;
603 if (seq->matchLength < minMatch) {
604 /* The match is too short, omit it */
605 if (rawSeqStore->pos + 1 < rawSeqStore->size) {
606 seq[1].litLength += seq[0].matchLength;
607 }
608 rawSeqStore->pos++;
609 }
610 return;
611 }
612 srcSize -= seq->matchLength;
613 seq->matchLength = 0;
614 rawSeqStore->pos++;
615 }
616}
617
618/**
619 * If the sequence length is longer than remaining then the sequence is split
620 * between this block and the next.
621 *
622 * Returns the current sequence to handle, or if the rest of the block should
623 * be literals, it returns a sequence with offset == 0.
624 */
625static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
626 U32 const remaining, U32 const minMatch)
627{
628 rawSeq sequence = rawSeqStore->seq[rawSeqStore->pos];
629 assert(sequence.offset > 0);
630 /* Likely: No partial sequence */
631 if (remaining >= sequence.litLength + sequence.matchLength) {
632 rawSeqStore->pos++;
633 return sequence;
634 }
635 /* Cut the sequence short (offset == 0 ==> rest is literals). */
636 if (remaining <= sequence.litLength) {
637 sequence.offset = 0;
638 } else if (remaining < sequence.litLength + sequence.matchLength) {
639 sequence.matchLength = remaining - sequence.litLength;
640 if (sequence.matchLength < minMatch) {
641 sequence.offset = 0;
642 }
643 }
644 /* Skip past `remaining` bytes for the future sequences. */
645 ZSTD_ldm_skipSequences(rawSeqStore, remaining, minMatch);
646 return sequence;
647}
648
649void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
650 U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
651 while (currPos && rawSeqStore->pos < rawSeqStore->size) {
652 rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
653 if (currPos >= currSeq.litLength + currSeq.matchLength) {
654 currPos -= currSeq.litLength + currSeq.matchLength;
655 rawSeqStore->pos++;
656 } else {
657 rawSeqStore->posInSequence = currPos;
658 break;
659 }
660 }
661 if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
662 rawSeqStore->posInSequence = 0;
663 }
664}
665
666size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
667 ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
668 ZSTD_paramSwitch_e useRowMatchFinder,
669 void const* src, size_t srcSize)
670{
671 const ZSTD_compressionParameters* const cParams = &ms->cParams;
672 unsigned const minMatch = cParams->minMatch;
673 ZSTD_blockCompressor const blockCompressor =
674 ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
675 /* Input bounds */
676 BYTE const* const istart = (BYTE const*)src;
677 BYTE const* const iend = istart + srcSize;
678 /* Input positions */
679 BYTE const* ip = istart;
680
681 DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
682 /* If using opt parser, use LDMs only as candidates rather than always accepting them */
683 if (cParams->strategy >= ZSTD_btopt) {
684 size_t lastLLSize;
685 ms->ldmSeqStore = rawSeqStore;
686 lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
687 ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
688 return lastLLSize;
689 }
690
691 assert(rawSeqStore->pos <= rawSeqStore->size);
692 assert(rawSeqStore->size <= rawSeqStore->capacity);
693 /* Loop through each sequence and apply the block compressor to the literals */
694 while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
695 /* maybeSplitSequence updates rawSeqStore->pos */
696 rawSeq const sequence = maybeSplitSequence(rawSeqStore,
697 (U32)(iend - ip), minMatch);
648db22b 698 /* End signal */
699 if (sequence.offset == 0)
700 break;
701
702 assert(ip + sequence.litLength + sequence.matchLength <= iend);
703
704 /* Fill tables for block compressor */
705 ZSTD_ldm_limitTableUpdate(ms, ip);
706 ZSTD_ldm_fillFastTables(ms, ip);
707 /* Run the block compressor */
708 DEBUGLOG(5, "pos %u : calling block compressor on segment of size %u", (unsigned)(ip-istart), sequence.litLength);
709 {
f535537f 710 int i;
648db22b 711 size_t const newLitLength =
712 blockCompressor(ms, seqStore, rep, ip, sequence.litLength);
713 ip += sequence.litLength;
714 /* Update the repcodes */
715 for (i = ZSTD_REP_NUM - 1; i > 0; i--)
716 rep[i] = rep[i-1];
717 rep[0] = sequence.offset;
718 /* Store the sequence */
719 ZSTD_storeSeq(seqStore, newLitLength, ip - newLitLength, iend,
720 OFFSET_TO_OFFBASE(sequence.offset),
721 sequence.matchLength);
722 ip += sequence.matchLength;
723 }
724 }
725 /* Fill the tables for the block compressor */
726 ZSTD_ldm_limitTableUpdate(ms, ip);
727 ZSTD_ldm_fillFastTables(ms, ip);
728 /* Compress the last literals */
729 return blockCompressor(ms, seqStore, rep, ip, iend - ip);
730}