git subrepo pull (merge) --force deps/libchdr
[pcsx_rearmed.git] / deps / libchdr / deps / zstd-1.5.5 / lib / compress / zstd_compress_superblock.c
CommitLineData
648db22b 1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11 /*-*************************************
12 * Dependencies
13 ***************************************/
14#include "zstd_compress_superblock.h"
15
16#include "../common/zstd_internal.h" /* ZSTD_getSequenceLength */
17#include "hist.h" /* HIST_countFast_wksp */
18#include "zstd_compress_internal.h" /* ZSTD_[huf|fse|entropy]CTablesMetadata_t */
19#include "zstd_compress_sequences.h"
20#include "zstd_compress_literals.h"
21
22/** ZSTD_compressSubBlock_literal() :
23 * Compresses literals section for a sub-block.
24 * When we have to write the Huffman table we will sometimes choose a header
25 * size larger than necessary. This is because we have to pick the header size
26 * before we know the table size + compressed size, so we have a bound on the
27 * table size. If we guessed incorrectly, we fall back to uncompressed literals.
28 *
29 * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded
30 * in writing the header, otherwise it is set to 0.
31 *
32 * hufMetadata->hType has literals block type info.
33 * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block.
34 * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block.
35 * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block
36 * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
37 * and the following sub-blocks' literals sections will be Treeless_Literals_Block.
38 * @return : compressed size of literals section of a sub-block
39 * Or 0 if unable to compress.
40 * Or error code */
41static size_t
42ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
43 const ZSTD_hufCTablesMetadata_t* hufMetadata,
44 const BYTE* literals, size_t litSize,
45 void* dst, size_t dstSize,
46 const int bmi2, int writeEntropy, int* entropyWritten)
47{
48 size_t const header = writeEntropy ? 200 : 0;
49 size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
50 BYTE* const ostart = (BYTE*)dst;
51 BYTE* const oend = ostart + dstSize;
52 BYTE* op = ostart + lhSize;
53 U32 const singleStream = lhSize == 3;
54 symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
55 size_t cLitSize = 0;
56
57 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
58
59 *entropyWritten = 0;
60 if (litSize == 0 || hufMetadata->hType == set_basic) {
61 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal");
62 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
63 } else if (hufMetadata->hType == set_rle) {
64 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using rle literal");
65 return ZSTD_compressRleLiteralsBlock(dst, dstSize, literals, litSize);
66 }
67
68 assert(litSize > 0);
69 assert(hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat);
70
71 if (writeEntropy && hufMetadata->hType == set_compressed) {
72 ZSTD_memcpy(op, hufMetadata->hufDesBuffer, hufMetadata->hufDesSize);
73 op += hufMetadata->hufDesSize;
74 cLitSize += hufMetadata->hufDesSize;
75 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
76 }
77
78 { int const flags = bmi2 ? HUF_flags_bmi2 : 0;
79 const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable, flags)
80 : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable, flags);
81 op += cSize;
82 cLitSize += cSize;
83 if (cSize == 0 || ERR_isError(cSize)) {
84 DEBUGLOG(5, "Failed to write entropy tables %s", ZSTD_getErrorName(cSize));
85 return 0;
86 }
87 /* If we expand and we aren't writing a header then emit uncompressed */
88 if (!writeEntropy && cLitSize >= litSize) {
89 DEBUGLOG(5, "ZSTD_compressSubBlock_literal using raw literal because uncompressible");
90 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
91 }
92 /* If we are writing headers then allow expansion that doesn't change our header size. */
93 if (lhSize < (size_t)(3 + (cLitSize >= 1 KB) + (cLitSize >= 16 KB))) {
94 assert(cLitSize > litSize);
95 DEBUGLOG(5, "Literals expanded beyond allowed header size");
96 return ZSTD_noCompressLiterals(dst, dstSize, literals, litSize);
97 }
98 DEBUGLOG(5, "ZSTD_compressSubBlock_literal (cSize=%zu)", cSize);
99 }
100
101 /* Build header */
102 switch(lhSize)
103 {
104 case 3: /* 2 - 2 - 10 - 10 */
105 { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<14);
106 MEM_writeLE24(ostart, lhc);
107 break;
108 }
109 case 4: /* 2 - 2 - 14 - 14 */
110 { U32 const lhc = hType + (2 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<18);
111 MEM_writeLE32(ostart, lhc);
112 break;
113 }
114 case 5: /* 2 - 2 - 18 - 18 */
115 { U32 const lhc = hType + (3 << 2) + ((U32)litSize<<4) + ((U32)cLitSize<<22);
116 MEM_writeLE32(ostart, lhc);
117 ostart[4] = (BYTE)(cLitSize >> 10);
118 break;
119 }
120 default: /* not possible : lhSize is {3,4,5} */
121 assert(0);
122 }
123 *entropyWritten = 1;
124 DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)litSize, (U32)(op-ostart));
125 return op-ostart;
126}
127
128static size_t
129ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
130 const seqDef* sequences, size_t nbSeq,
131 size_t litSize, int lastSequence)
132{
133 const seqDef* const sstart = sequences;
134 const seqDef* const send = sequences + nbSeq;
135 const seqDef* sp = sstart;
136 size_t matchLengthSum = 0;
137 size_t litLengthSum = 0;
138 (void)(litLengthSum); /* suppress unused variable warning on some environments */
139 while (send-sp > 0) {
140 ZSTD_sequenceLength const seqLen = ZSTD_getSequenceLength(seqStore, sp);
141 litLengthSum += seqLen.litLength;
142 matchLengthSum += seqLen.matchLength;
143 sp++;
144 }
145 assert(litLengthSum <= litSize);
146 if (!lastSequence) {
147 assert(litLengthSum == litSize);
148 }
149 return matchLengthSum + litSize;
150}
151
152/** ZSTD_compressSubBlock_sequences() :
153 * Compresses sequences section for a sub-block.
154 * fseMetadata->llType, fseMetadata->ofType, and fseMetadata->mlType have
155 * symbol compression modes for the super-block.
156 * The first successfully compressed block will have these in its header.
157 * We set entropyWritten=1 when we succeed in compressing the sequences.
158 * The following sub-blocks will always have repeat mode.
159 * @return : compressed size of sequences section of a sub-block
160 * Or 0 if it is unable to compress
161 * Or error code. */
162static size_t
163ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
164 const ZSTD_fseCTablesMetadata_t* fseMetadata,
165 const seqDef* sequences, size_t nbSeq,
166 const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
167 const ZSTD_CCtx_params* cctxParams,
168 void* dst, size_t dstCapacity,
169 const int bmi2, int writeEntropy, int* entropyWritten)
170{
171 const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
172 BYTE* const ostart = (BYTE*)dst;
173 BYTE* const oend = ostart + dstCapacity;
174 BYTE* op = ostart;
175 BYTE* seqHead;
176
177 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (nbSeq=%zu, writeEntropy=%d, longOffsets=%d)", nbSeq, writeEntropy, longOffsets);
178
179 *entropyWritten = 0;
180 /* Sequences Header */
181 RETURN_ERROR_IF((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead*/,
182 dstSize_tooSmall, "");
183 if (nbSeq < 0x7F)
184 *op++ = (BYTE)nbSeq;
185 else if (nbSeq < LONGNBSEQ)
186 op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
187 else
188 op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
189 if (nbSeq==0) {
190 return op - ostart;
191 }
192
193 /* seqHead : flags for FSE encoding type */
194 seqHead = op++;
195
196 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (seqHeadSize=%u)", (unsigned)(op-ostart));
197
198 if (writeEntropy) {
199 const U32 LLtype = fseMetadata->llType;
200 const U32 Offtype = fseMetadata->ofType;
201 const U32 MLtype = fseMetadata->mlType;
202 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (fseTablesSize=%zu)", fseMetadata->fseTablesSize);
203 *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
204 ZSTD_memcpy(op, fseMetadata->fseTablesBuffer, fseMetadata->fseTablesSize);
205 op += fseMetadata->fseTablesSize;
206 } else {
207 const U32 repeat = set_repeat;
208 *seqHead = (BYTE)((repeat<<6) + (repeat<<4) + (repeat<<2));
209 }
210
211 { size_t const bitstreamSize = ZSTD_encodeSequences(
212 op, oend - op,
213 fseTables->matchlengthCTable, mlCode,
214 fseTables->offcodeCTable, ofCode,
215 fseTables->litlengthCTable, llCode,
216 sequences, nbSeq,
217 longOffsets, bmi2);
218 FORWARD_IF_ERROR(bitstreamSize, "ZSTD_encodeSequences failed");
219 op += bitstreamSize;
220 /* zstd versions <= 1.3.4 mistakenly report corruption when
221 * FSE_readNCount() receives a buffer < 4 bytes.
222 * Fixed by https://github.com/facebook/zstd/pull/1146.
223 * This can happen when the last set_compressed table present is 2
224 * bytes and the bitstream is only one byte.
225 * In this exceedingly rare case, we will simply emit an uncompressed
226 * block, since it isn't worth optimizing.
227 */
228#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
229 if (writeEntropy && fseMetadata->lastCountSize && fseMetadata->lastCountSize + bitstreamSize < 4) {
230 /* NCountSize >= 2 && bitstreamSize > 0 ==> lastCountSize == 3 */
231 assert(fseMetadata->lastCountSize + bitstreamSize == 3);
232 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.3.4 by "
233 "emitting an uncompressed block.");
234 return 0;
235 }
236#endif
237 DEBUGLOG(5, "ZSTD_compressSubBlock_sequences (bitstreamSize=%zu)", bitstreamSize);
238 }
239
240 /* zstd versions <= 1.4.0 mistakenly report error when
241 * sequences section body size is less than 3 bytes.
242 * Fixed by https://github.com/facebook/zstd/pull/1664.
243 * This can happen when the previous sequences section block is compressed
244 * with rle mode and the current block's sequences section is compressed
245 * with repeat mode where sequences section body size can be 1 byte.
246 */
247#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
248 if (op-seqHead < 4) {
249 DEBUGLOG(5, "Avoiding bug in zstd decoder in versions <= 1.4.0 by emitting "
250 "an uncompressed block when sequences are < 4 bytes");
251 return 0;
252 }
253#endif
254
255 *entropyWritten = 1;
256 return op - ostart;
257}
258
259/** ZSTD_compressSubBlock() :
260 * Compresses a single sub-block.
261 * @return : compressed size of the sub-block
262 * Or 0 if it failed to compress. */
263static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
264 const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
265 const seqDef* sequences, size_t nbSeq,
266 const BYTE* literals, size_t litSize,
267 const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
268 const ZSTD_CCtx_params* cctxParams,
269 void* dst, size_t dstCapacity,
270 const int bmi2,
271 int writeLitEntropy, int writeSeqEntropy,
272 int* litEntropyWritten, int* seqEntropyWritten,
273 U32 lastBlock)
274{
275 BYTE* const ostart = (BYTE*)dst;
276 BYTE* const oend = ostart + dstCapacity;
277 BYTE* op = ostart + ZSTD_blockHeaderSize;
278 DEBUGLOG(5, "ZSTD_compressSubBlock (litSize=%zu, nbSeq=%zu, writeLitEntropy=%d, writeSeqEntropy=%d, lastBlock=%d)",
279 litSize, nbSeq, writeLitEntropy, writeSeqEntropy, lastBlock);
280 { size_t cLitSize = ZSTD_compressSubBlock_literal((const HUF_CElt*)entropy->huf.CTable,
281 &entropyMetadata->hufMetadata, literals, litSize,
282 op, oend-op, bmi2, writeLitEntropy, litEntropyWritten);
283 FORWARD_IF_ERROR(cLitSize, "ZSTD_compressSubBlock_literal failed");
284 if (cLitSize == 0) return 0;
285 op += cLitSize;
286 }
287 { size_t cSeqSize = ZSTD_compressSubBlock_sequences(&entropy->fse,
288 &entropyMetadata->fseMetadata,
289 sequences, nbSeq,
290 llCode, mlCode, ofCode,
291 cctxParams,
292 op, oend-op,
293 bmi2, writeSeqEntropy, seqEntropyWritten);
294 FORWARD_IF_ERROR(cSeqSize, "ZSTD_compressSubBlock_sequences failed");
295 if (cSeqSize == 0) return 0;
296 op += cSeqSize;
297 }
298 /* Write block header */
299 { size_t cSize = (op-ostart)-ZSTD_blockHeaderSize;
300 U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
301 MEM_writeLE24(ostart, cBlockHeader24);
302 }
303 return op-ostart;
304}
305
306static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t litSize,
307 const ZSTD_hufCTables_t* huf,
308 const ZSTD_hufCTablesMetadata_t* hufMetadata,
309 void* workspace, size_t wkspSize,
310 int writeEntropy)
311{
312 unsigned* const countWksp = (unsigned*)workspace;
313 unsigned maxSymbolValue = 255;
314 size_t literalSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
315
316 if (hufMetadata->hType == set_basic) return litSize;
317 else if (hufMetadata->hType == set_rle) return 1;
318 else if (hufMetadata->hType == set_compressed || hufMetadata->hType == set_repeat) {
319 size_t const largest = HIST_count_wksp (countWksp, &maxSymbolValue, (const BYTE*)literals, litSize, workspace, wkspSize);
320 if (ZSTD_isError(largest)) return litSize;
321 { size_t cLitSizeEstimate = HUF_estimateCompressedSize((const HUF_CElt*)huf->CTable, countWksp, maxSymbolValue);
322 if (writeEntropy) cLitSizeEstimate += hufMetadata->hufDesSize;
323 return cLitSizeEstimate + literalSectionHeaderSize;
324 } }
325 assert(0); /* impossible */
326 return 0;
327}
328
329static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
330 const BYTE* codeTable, unsigned maxCode,
331 size_t nbSeq, const FSE_CTable* fseCTable,
332 const U8* additionalBits,
333 short const* defaultNorm, U32 defaultNormLog, U32 defaultMax,
334 void* workspace, size_t wkspSize)
335{
336 unsigned* const countWksp = (unsigned*)workspace;
337 const BYTE* ctp = codeTable;
338 const BYTE* const ctStart = ctp;
339 const BYTE* const ctEnd = ctStart + nbSeq;
340 size_t cSymbolTypeSizeEstimateInBits = 0;
341 unsigned max = maxCode;
342
343 HIST_countFast_wksp(countWksp, &max, codeTable, nbSeq, workspace, wkspSize); /* can't fail */
344 if (type == set_basic) {
345 /* We selected this encoding type, so it must be valid. */
346 assert(max <= defaultMax);
347 cSymbolTypeSizeEstimateInBits = max <= defaultMax
348 ? ZSTD_crossEntropyCost(defaultNorm, defaultNormLog, countWksp, max)
349 : ERROR(GENERIC);
350 } else if (type == set_rle) {
351 cSymbolTypeSizeEstimateInBits = 0;
352 } else if (type == set_compressed || type == set_repeat) {
353 cSymbolTypeSizeEstimateInBits = ZSTD_fseBitCost(fseCTable, countWksp, max);
354 }
355 if (ZSTD_isError(cSymbolTypeSizeEstimateInBits)) return nbSeq * 10;
356 while (ctp < ctEnd) {
357 if (additionalBits) cSymbolTypeSizeEstimateInBits += additionalBits[*ctp];
358 else cSymbolTypeSizeEstimateInBits += *ctp; /* for offset, offset code is also the number of additional bits */
359 ctp++;
360 }
361 return cSymbolTypeSizeEstimateInBits / 8;
362}
363
364static size_t ZSTD_estimateSubBlockSize_sequences(const BYTE* ofCodeTable,
365 const BYTE* llCodeTable,
366 const BYTE* mlCodeTable,
367 size_t nbSeq,
368 const ZSTD_fseCTables_t* fseTables,
369 const ZSTD_fseCTablesMetadata_t* fseMetadata,
370 void* workspace, size_t wkspSize,
371 int writeEntropy)
372{
373 size_t const sequencesSectionHeaderSize = 3; /* Use hard coded size of 3 bytes */
374 size_t cSeqSizeEstimate = 0;
375 if (nbSeq == 0) return sequencesSectionHeaderSize;
376 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->ofType, ofCodeTable, MaxOff,
377 nbSeq, fseTables->offcodeCTable, NULL,
378 OF_defaultNorm, OF_defaultNormLog, DefaultMaxOff,
379 workspace, wkspSize);
380 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->llType, llCodeTable, MaxLL,
381 nbSeq, fseTables->litlengthCTable, LL_bits,
382 LL_defaultNorm, LL_defaultNormLog, MaxLL,
383 workspace, wkspSize);
384 cSeqSizeEstimate += ZSTD_estimateSubBlockSize_symbolType(fseMetadata->mlType, mlCodeTable, MaxML,
385 nbSeq, fseTables->matchlengthCTable, ML_bits,
386 ML_defaultNorm, ML_defaultNormLog, MaxML,
387 workspace, wkspSize);
388 if (writeEntropy) cSeqSizeEstimate += fseMetadata->fseTablesSize;
389 return cSeqSizeEstimate + sequencesSectionHeaderSize;
390}
391
392static size_t ZSTD_estimateSubBlockSize(const BYTE* literals, size_t litSize,
393 const BYTE* ofCodeTable,
394 const BYTE* llCodeTable,
395 const BYTE* mlCodeTable,
396 size_t nbSeq,
397 const ZSTD_entropyCTables_t* entropy,
398 const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
399 void* workspace, size_t wkspSize,
400 int writeLitEntropy, int writeSeqEntropy) {
401 size_t cSizeEstimate = 0;
402 cSizeEstimate += ZSTD_estimateSubBlockSize_literal(literals, litSize,
403 &entropy->huf, &entropyMetadata->hufMetadata,
404 workspace, wkspSize, writeLitEntropy);
405 cSizeEstimate += ZSTD_estimateSubBlockSize_sequences(ofCodeTable, llCodeTable, mlCodeTable,
406 nbSeq, &entropy->fse, &entropyMetadata->fseMetadata,
407 workspace, wkspSize, writeSeqEntropy);
408 return cSizeEstimate + ZSTD_blockHeaderSize;
409}
410
411static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMetadata)
412{
413 if (fseMetadata->llType == set_compressed || fseMetadata->llType == set_rle)
414 return 1;
415 if (fseMetadata->mlType == set_compressed || fseMetadata->mlType == set_rle)
416 return 1;
417 if (fseMetadata->ofType == set_compressed || fseMetadata->ofType == set_rle)
418 return 1;
419 return 0;
420}
421
422/** ZSTD_compressSubBlock_multi() :
423 * Breaks super-block into multiple sub-blocks and compresses them.
424 * Entropy will be written to the first block.
425 * The following blocks will use repeat mode to compress.
426 * All sub-blocks are compressed blocks (no raw or rle blocks).
427 * @return : compressed size of the super block (which is multiple ZSTD blocks)
428 * Or 0 if it failed to compress. */
429static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
430 const ZSTD_compressedBlockState_t* prevCBlock,
431 ZSTD_compressedBlockState_t* nextCBlock,
432 const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
433 const ZSTD_CCtx_params* cctxParams,
434 void* dst, size_t dstCapacity,
435 const void* src, size_t srcSize,
436 const int bmi2, U32 lastBlock,
437 void* workspace, size_t wkspSize)
438{
439 const seqDef* const sstart = seqStorePtr->sequencesStart;
440 const seqDef* const send = seqStorePtr->sequences;
441 const seqDef* sp = sstart;
442 const BYTE* const lstart = seqStorePtr->litStart;
443 const BYTE* const lend = seqStorePtr->lit;
444 const BYTE* lp = lstart;
445 BYTE const* ip = (BYTE const*)src;
446 BYTE const* const iend = ip + srcSize;
447 BYTE* const ostart = (BYTE*)dst;
448 BYTE* const oend = ostart + dstCapacity;
449 BYTE* op = ostart;
450 const BYTE* llCodePtr = seqStorePtr->llCode;
451 const BYTE* mlCodePtr = seqStorePtr->mlCode;
452 const BYTE* ofCodePtr = seqStorePtr->ofCode;
453 size_t targetCBlockSize = cctxParams->targetCBlockSize;
454 size_t litSize, seqCount;
455 int writeLitEntropy = entropyMetadata->hufMetadata.hType == set_compressed;
456 int writeSeqEntropy = 1;
457 int lastSequence = 0;
458
459 DEBUGLOG(5, "ZSTD_compressSubBlock_multi (litSize=%u, nbSeq=%u)",
460 (unsigned)(lend-lp), (unsigned)(send-sstart));
461
462 litSize = 0;
463 seqCount = 0;
464 do {
465 size_t cBlockSizeEstimate = 0;
466 if (sstart == send) {
467 lastSequence = 1;
468 } else {
469 const seqDef* const sequence = sp + seqCount;
470 lastSequence = sequence == send - 1;
471 litSize += ZSTD_getSequenceLength(seqStorePtr, sequence).litLength;
472 seqCount++;
473 }
474 if (lastSequence) {
475 assert(lp <= lend);
476 assert(litSize <= (size_t)(lend - lp));
477 litSize = (size_t)(lend - lp);
478 }
479 /* I think there is an optimization opportunity here.
480 * Calling ZSTD_estimateSubBlockSize for every sequence can be wasteful
481 * since it recalculates estimate from scratch.
482 * For example, it would recount literal distribution and symbol codes every time.
483 */
484 cBlockSizeEstimate = ZSTD_estimateSubBlockSize(lp, litSize, ofCodePtr, llCodePtr, mlCodePtr, seqCount,
485 &nextCBlock->entropy, entropyMetadata,
486 workspace, wkspSize, writeLitEntropy, writeSeqEntropy);
487 if (cBlockSizeEstimate > targetCBlockSize || lastSequence) {
488 int litEntropyWritten = 0;
489 int seqEntropyWritten = 0;
490 const size_t decompressedSize = ZSTD_seqDecompressedSize(seqStorePtr, sp, seqCount, litSize, lastSequence);
491 const size_t cSize = ZSTD_compressSubBlock(&nextCBlock->entropy, entropyMetadata,
492 sp, seqCount,
493 lp, litSize,
494 llCodePtr, mlCodePtr, ofCodePtr,
495 cctxParams,
496 op, oend-op,
497 bmi2, writeLitEntropy, writeSeqEntropy,
498 &litEntropyWritten, &seqEntropyWritten,
499 lastBlock && lastSequence);
500 FORWARD_IF_ERROR(cSize, "ZSTD_compressSubBlock failed");
501 if (cSize > 0 && cSize < decompressedSize) {
502 DEBUGLOG(5, "Committed the sub-block");
503 assert(ip + decompressedSize <= iend);
504 ip += decompressedSize;
505 sp += seqCount;
506 lp += litSize;
507 op += cSize;
508 llCodePtr += seqCount;
509 mlCodePtr += seqCount;
510 ofCodePtr += seqCount;
511 litSize = 0;
512 seqCount = 0;
513 /* Entropy only needs to be written once */
514 if (litEntropyWritten) {
515 writeLitEntropy = 0;
516 }
517 if (seqEntropyWritten) {
518 writeSeqEntropy = 0;
519 }
520 }
521 }
522 } while (!lastSequence);
523 if (writeLitEntropy) {
524 DEBUGLOG(5, "ZSTD_compressSubBlock_multi has literal entropy tables unwritten");
525 ZSTD_memcpy(&nextCBlock->entropy.huf, &prevCBlock->entropy.huf, sizeof(prevCBlock->entropy.huf));
526 }
527 if (writeSeqEntropy && ZSTD_needSequenceEntropyTables(&entropyMetadata->fseMetadata)) {
528 /* If we haven't written our entropy tables, then we've violated our contract and
529 * must emit an uncompressed block.
530 */
531 DEBUGLOG(5, "ZSTD_compressSubBlock_multi has sequence entropy tables unwritten");
532 return 0;
533 }
534 if (ip < iend) {
535 size_t const cSize = ZSTD_noCompressBlock(op, oend - op, ip, iend - ip, lastBlock);
536 DEBUGLOG(5, "ZSTD_compressSubBlock_multi last sub-block uncompressed, %zu bytes", (size_t)(iend - ip));
537 FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
538 assert(cSize != 0);
539 op += cSize;
540 /* We have to regenerate the repcodes because we've skipped some sequences */
541 if (sp < send) {
542 seqDef const* seq;
543 repcodes_t rep;
544 ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
545 for (seq = sstart; seq < sp; ++seq) {
546 ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
547 }
548 ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
549 }
550 }
551 DEBUGLOG(5, "ZSTD_compressSubBlock_multi compressed");
552 return op-ostart;
553}
554
555size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
556 void* dst, size_t dstCapacity,
557 void const* src, size_t srcSize,
558 unsigned lastBlock) {
559 ZSTD_entropyCTablesMetadata_t entropyMetadata;
560
561 FORWARD_IF_ERROR(ZSTD_buildBlockEntropyStats(&zc->seqStore,
562 &zc->blockState.prevCBlock->entropy,
563 &zc->blockState.nextCBlock->entropy,
564 &zc->appliedParams,
565 &entropyMetadata,
566 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
567
568 return ZSTD_compressSubBlock_multi(&zc->seqStore,
569 zc->blockState.prevCBlock,
570 zc->blockState.nextCBlock,
571 &entropyMetadata,
572 &zc->appliedParams,
573 dst, dstCapacity,
574 src, srcSize,
575 zc->bmi2, lastBlock,
576 zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
577}