648db22b |
1 | /* ****************************************************************** |
2 | * Huffman encoder, part of New Generation Entropy library |
3 | * Copyright (c) Meta Platforms, Inc. and affiliates. |
4 | * |
5 | * You can contact the author at : |
6 | * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy |
7 | * - Public forum : https://groups.google.com/forum/#!forum/lz4c |
8 | * |
9 | * This source code is licensed under both the BSD-style license (found in the |
10 | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
11 | * in the COPYING file in the root directory of this source tree). |
12 | * You may select, at your option, one of the above-listed licenses. |
13 | ****************************************************************** */ |
14 | |
15 | /* ************************************************************** |
16 | * Compiler specifics |
17 | ****************************************************************/ |
18 | #ifdef _MSC_VER /* Visual Studio */ |
19 | # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ |
20 | #endif |
21 | |
22 | |
23 | /* ************************************************************** |
24 | * Includes |
25 | ****************************************************************/ |
26 | #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */ |
27 | #include "../common/compiler.h" |
28 | #include "../common/bitstream.h" |
29 | #include "hist.h" |
30 | #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ |
31 | #include "../common/fse.h" /* header compression */ |
32 | #include "../common/huf.h" |
33 | #include "../common/error_private.h" |
34 | #include "../common/bits.h" /* ZSTD_highbit32 */ |
35 | |
36 | |
37 | /* ************************************************************** |
38 | * Error Management |
39 | ****************************************************************/ |
40 | #define HUF_isError ERR_isError |
41 | #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ |
42 | |
43 | |
44 | /* ************************************************************** |
45 | * Required declarations |
46 | ****************************************************************/ |
47 | typedef struct nodeElt_s { |
48 | U32 count; |
49 | U16 parent; |
50 | BYTE byte; |
51 | BYTE nbBits; |
52 | } nodeElt; |
53 | |
54 | |
55 | /* ************************************************************** |
56 | * Debug Traces |
57 | ****************************************************************/ |
58 | |
59 | #if DEBUGLEVEL >= 2 |
60 | |
61 | static size_t showU32(const U32* arr, size_t size) |
62 | { |
63 | size_t u; |
64 | for (u=0; u<size; u++) { |
65 | RAWLOG(6, " %u", arr[u]); (void)arr; |
66 | } |
67 | RAWLOG(6, " \n"); |
68 | return size; |
69 | } |
70 | |
71 | static size_t HUF_getNbBits(HUF_CElt elt); |
72 | |
73 | static size_t showCTableBits(const HUF_CElt* ctable, size_t size) |
74 | { |
75 | size_t u; |
76 | for (u=0; u<size; u++) { |
77 | RAWLOG(6, " %zu", HUF_getNbBits(ctable[u])); (void)ctable; |
78 | } |
79 | RAWLOG(6, " \n"); |
80 | return size; |
81 | |
82 | } |
83 | |
84 | static size_t showHNodeSymbols(const nodeElt* hnode, size_t size) |
85 | { |
86 | size_t u; |
87 | for (u=0; u<size; u++) { |
88 | RAWLOG(6, " %u", hnode[u].byte); (void)hnode; |
89 | } |
90 | RAWLOG(6, " \n"); |
91 | return size; |
92 | } |
93 | |
94 | static size_t showHNodeBits(const nodeElt* hnode, size_t size) |
95 | { |
96 | size_t u; |
97 | for (u=0; u<size; u++) { |
98 | RAWLOG(6, " %u", hnode[u].nbBits); (void)hnode; |
99 | } |
100 | RAWLOG(6, " \n"); |
101 | return size; |
102 | } |
103 | |
104 | #endif |
105 | |
106 | |
107 | /* ******************************************************* |
108 | * HUF : Huffman block compression |
109 | *********************************************************/ |
110 | #define HUF_WORKSPACE_MAX_ALIGNMENT 8 |
111 | |
112 | static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align) |
113 | { |
114 | size_t const mask = align - 1; |
115 | size_t const rem = (size_t)workspace & mask; |
116 | size_t const add = (align - rem) & mask; |
117 | BYTE* const aligned = (BYTE*)workspace + add; |
118 | assert((align & (align - 1)) == 0); /* pow 2 */ |
119 | assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT); |
120 | if (*workspaceSizePtr >= add) { |
121 | assert(add < align); |
122 | assert(((size_t)aligned & mask) == 0); |
123 | *workspaceSizePtr -= add; |
124 | return aligned; |
125 | } else { |
126 | *workspaceSizePtr = 0; |
127 | return NULL; |
128 | } |
129 | } |
130 | |
131 | |
132 | /* HUF_compressWeights() : |
133 | * Same as FSE_compress(), but dedicated to huff0's weights compression. |
134 | * The use case needs much less stack memory. |
135 | * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. |
136 | */ |
137 | #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 |
138 | |
139 | typedef struct { |
140 | FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; |
141 | U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)]; |
142 | unsigned count[HUF_TABLELOG_MAX+1]; |
143 | S16 norm[HUF_TABLELOG_MAX+1]; |
144 | } HUF_CompressWeightsWksp; |
145 | |
146 | static size_t |
147 | HUF_compressWeights(void* dst, size_t dstSize, |
148 | const void* weightTable, size_t wtSize, |
149 | void* workspace, size_t workspaceSize) |
150 | { |
151 | BYTE* const ostart = (BYTE*) dst; |
152 | BYTE* op = ostart; |
153 | BYTE* const oend = ostart + dstSize; |
154 | |
155 | unsigned maxSymbolValue = HUF_TABLELOG_MAX; |
156 | U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; |
157 | HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); |
158 | |
159 | if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC); |
160 | |
161 | /* init conditions */ |
162 | if (wtSize <= 1) return 0; /* Not compressible */ |
163 | |
164 | /* Scan input and build symbol stats */ |
165 | { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */ |
166 | if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */ |
167 | if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ |
168 | } |
169 | |
170 | tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); |
171 | CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) ); |
172 | |
173 | /* Write table description header */ |
174 | { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) ); |
175 | op += hSize; |
176 | } |
177 | |
178 | /* Compress */ |
179 | CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) ); |
180 | { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) ); |
181 | if (cSize == 0) return 0; /* not enough space for compressed data */ |
182 | op += cSize; |
183 | } |
184 | |
185 | return (size_t)(op-ostart); |
186 | } |
187 | |
188 | static size_t HUF_getNbBits(HUF_CElt elt) |
189 | { |
190 | return elt & 0xFF; |
191 | } |
192 | |
193 | static size_t HUF_getNbBitsFast(HUF_CElt elt) |
194 | { |
195 | return elt; |
196 | } |
197 | |
198 | static size_t HUF_getValue(HUF_CElt elt) |
199 | { |
200 | return elt & ~(size_t)0xFF; |
201 | } |
202 | |
203 | static size_t HUF_getValueFast(HUF_CElt elt) |
204 | { |
205 | return elt; |
206 | } |
207 | |
208 | static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits) |
209 | { |
210 | assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX); |
211 | *elt = nbBits; |
212 | } |
213 | |
214 | static void HUF_setValue(HUF_CElt* elt, size_t value) |
215 | { |
216 | size_t const nbBits = HUF_getNbBits(*elt); |
217 | if (nbBits > 0) { |
218 | assert((value >> nbBits) == 0); |
219 | *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits); |
220 | } |
221 | } |
222 | |
f535537f |
223 | HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable) |
224 | { |
225 | HUF_CTableHeader header; |
226 | ZSTD_memcpy(&header, ctable, sizeof(header)); |
227 | return header; |
228 | } |
229 | |
230 | static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue) |
231 | { |
232 | HUF_CTableHeader header; |
233 | HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header)); |
234 | ZSTD_memset(&header, 0, sizeof(header)); |
235 | assert(tableLog < 256); |
236 | header.tableLog = (BYTE)tableLog; |
237 | assert(maxSymbolValue < 256); |
238 | header.maxSymbolValue = (BYTE)maxSymbolValue; |
239 | ZSTD_memcpy(ctable, &header, sizeof(header)); |
240 | } |
241 | |
648db22b |
242 | typedef struct { |
243 | HUF_CompressWeightsWksp wksp; |
244 | BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ |
245 | BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; |
246 | } HUF_WriteCTableWksp; |
247 | |
248 | size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, |
249 | const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, |
250 | void* workspace, size_t workspaceSize) |
251 | { |
252 | HUF_CElt const* const ct = CTable + 1; |
253 | BYTE* op = (BYTE*)dst; |
254 | U32 n; |
255 | HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); |
256 | |
257 | HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp)); |
258 | |
f535537f |
259 | assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue); |
260 | assert(HUF_readCTableHeader(CTable).tableLog == huffLog); |
261 | |
648db22b |
262 | /* check conditions */ |
263 | if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); |
264 | if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); |
265 | |
266 | /* convert to weight */ |
267 | wksp->bitsToWeight[0] = 0; |
268 | for (n=1; n<huffLog+1; n++) |
269 | wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n); |
270 | for (n=0; n<maxSymbolValue; n++) |
271 | wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; |
272 | |
273 | /* attempt weights compression by FSE */ |
274 | if (maxDstSize < 1) return ERROR(dstSize_tooSmall); |
275 | { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) ); |
276 | if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ |
277 | op[0] = (BYTE)hSize; |
278 | return hSize+1; |
279 | } } |
280 | |
281 | /* write raw values as 4-bits (max : 15) */ |
282 | if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ |
283 | if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ |
284 | op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); |
285 | wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ |
286 | for (n=0; n<maxSymbolValue; n+=2) |
287 | op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]); |
288 | return ((maxSymbolValue+1)/2) + 1; |
289 | } |
290 | |
291 | |
292 | size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights) |
293 | { |
294 | BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ |
295 | U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ |
296 | U32 tableLog = 0; |
297 | U32 nbSymbols = 0; |
298 | HUF_CElt* const ct = CTable + 1; |
299 | |
300 | /* get symbol weights */ |
301 | CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); |
302 | *hasZeroWeights = (rankVal[0] > 0); |
303 | |
304 | /* check result */ |
305 | if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); |
306 | if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); |
307 | |
f535537f |
308 | *maxSymbolValuePtr = nbSymbols - 1; |
309 | |
310 | HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr); |
648db22b |
311 | |
312 | /* Prepare base value per rank */ |
313 | { U32 n, nextRankStart = 0; |
314 | for (n=1; n<=tableLog; n++) { |
315 | U32 curr = nextRankStart; |
316 | nextRankStart += (rankVal[n] << (n-1)); |
317 | rankVal[n] = curr; |
318 | } } |
319 | |
320 | /* fill nbBits */ |
321 | { U32 n; for (n=0; n<nbSymbols; n++) { |
322 | const U32 w = huffWeight[n]; |
323 | HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0)); |
324 | } } |
325 | |
326 | /* fill val */ |
327 | { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */ |
328 | U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; |
329 | { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; } |
330 | /* determine stating value per rank */ |
331 | valPerRank[tableLog+1] = 0; /* for w==0 */ |
332 | { U16 min = 0; |
333 | U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */ |
334 | valPerRank[n] = min; /* get starting value within each rank */ |
335 | min += nbPerRank[n]; |
336 | min >>= 1; |
337 | } } |
338 | /* assign value within rank, symbol order */ |
339 | { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); } |
340 | } |
341 | |
648db22b |
342 | return readSize; |
343 | } |
344 | |
345 | U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue) |
346 | { |
347 | const HUF_CElt* const ct = CTable + 1; |
348 | assert(symbolValue <= HUF_SYMBOLVALUE_MAX); |
f535537f |
349 | if (symbolValue > HUF_readCTableHeader(CTable).maxSymbolValue) |
350 | return 0; |
648db22b |
351 | return (U32)HUF_getNbBits(ct[symbolValue]); |
352 | } |
353 | |
354 | |
355 | /** |
356 | * HUF_setMaxHeight(): |
357 | * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. |
358 | * |
359 | * It attempts to convert all nodes with nbBits > @targetNbBits |
360 | * to employ @targetNbBits instead. Then it adjusts the tree |
361 | * so that it remains a valid canonical Huffman tree. |
362 | * |
363 | * @pre The sum of the ranks of each symbol == 2^largestBits, |
364 | * where largestBits == huffNode[lastNonNull].nbBits. |
365 | * @post The sum of the ranks of each symbol == 2^largestBits, |
366 | * where largestBits is the return value (expected <= targetNbBits). |
367 | * |
368 | * @param huffNode The Huffman tree modified in place to enforce targetNbBits. |
369 | * It's presumed sorted, from most frequent to rarest symbol. |
370 | * @param lastNonNull The symbol with the lowest count in the Huffman tree. |
371 | * @param targetNbBits The allowed number of bits, which the Huffman tree |
372 | * may not respect. After this function the Huffman tree will |
373 | * respect targetNbBits. |
374 | * @return The maximum number of bits of the Huffman tree after adjustment. |
375 | */ |
376 | static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits) |
377 | { |
378 | const U32 largestBits = huffNode[lastNonNull].nbBits; |
379 | /* early exit : no elt > targetNbBits, so the tree is already valid. */ |
380 | if (largestBits <= targetNbBits) return largestBits; |
381 | |
382 | DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits); |
383 | |
384 | /* there are several too large elements (at least >= 2) */ |
385 | { int totalCost = 0; |
386 | const U32 baseCost = 1 << (largestBits - targetNbBits); |
387 | int n = (int)lastNonNull; |
388 | |
389 | /* Adjust any ranks > targetNbBits to targetNbBits. |
390 | * Compute totalCost, which is how far the sum of the ranks is |
391 | * we are over 2^largestBits after adjust the offending ranks. |
392 | */ |
393 | while (huffNode[n].nbBits > targetNbBits) { |
394 | totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); |
395 | huffNode[n].nbBits = (BYTE)targetNbBits; |
396 | n--; |
397 | } |
398 | /* n stops at huffNode[n].nbBits <= targetNbBits */ |
399 | assert(huffNode[n].nbBits <= targetNbBits); |
400 | /* n end at index of smallest symbol using < targetNbBits */ |
401 | while (huffNode[n].nbBits == targetNbBits) --n; |
402 | |
403 | /* renorm totalCost from 2^largestBits to 2^targetNbBits |
404 | * note : totalCost is necessarily a multiple of baseCost */ |
405 | assert(((U32)totalCost & (baseCost - 1)) == 0); |
406 | totalCost >>= (largestBits - targetNbBits); |
407 | assert(totalCost > 0); |
408 | |
409 | /* repay normalized cost */ |
410 | { U32 const noSymbol = 0xF0F0F0F0; |
411 | U32 rankLast[HUF_TABLELOG_MAX+2]; |
412 | |
413 | /* Get pos of last (smallest = lowest cum. count) symbol per rank */ |
414 | ZSTD_memset(rankLast, 0xF0, sizeof(rankLast)); |
415 | { U32 currentNbBits = targetNbBits; |
416 | int pos; |
417 | for (pos=n ; pos >= 0; pos--) { |
418 | if (huffNode[pos].nbBits >= currentNbBits) continue; |
419 | currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */ |
420 | rankLast[targetNbBits-currentNbBits] = (U32)pos; |
421 | } } |
422 | |
423 | while (totalCost > 0) { |
424 | /* Try to reduce the next power of 2 above totalCost because we |
425 | * gain back half the rank. |
426 | */ |
427 | U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1; |
428 | for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { |
429 | U32 const highPos = rankLast[nBitsToDecrease]; |
430 | U32 const lowPos = rankLast[nBitsToDecrease-1]; |
431 | if (highPos == noSymbol) continue; |
432 | /* Decrease highPos if no symbols of lowPos or if it is |
433 | * not cheaper to remove 2 lowPos than highPos. |
434 | */ |
435 | if (lowPos == noSymbol) break; |
436 | { U32 const highTotal = huffNode[highPos].count; |
437 | U32 const lowTotal = 2 * huffNode[lowPos].count; |
438 | if (highTotal <= lowTotal) break; |
439 | } } |
440 | /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ |
441 | assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); |
442 | /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ |
443 | while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) |
444 | nBitsToDecrease++; |
445 | assert(rankLast[nBitsToDecrease] != noSymbol); |
446 | /* Increase the number of bits to gain back half the rank cost. */ |
447 | totalCost -= 1 << (nBitsToDecrease-1); |
448 | huffNode[rankLast[nBitsToDecrease]].nbBits++; |
449 | |
450 | /* Fix up the new rank. |
451 | * If the new rank was empty, this symbol is now its smallest. |
452 | * Otherwise, this symbol will be the largest in the new rank so no adjustment. |
453 | */ |
454 | if (rankLast[nBitsToDecrease-1] == noSymbol) |
455 | rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; |
456 | /* Fix up the old rank. |
457 | * If the symbol was at position 0, meaning it was the highest weight symbol in the tree, |
458 | * it must be the only symbol in its rank, so the old rank now has no symbols. |
459 | * Otherwise, since the Huffman nodes are sorted by count, the previous position is now |
460 | * the smallest node in the rank. If the previous position belongs to a different rank, |
461 | * then the rank is now empty. |
462 | */ |
463 | if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ |
464 | rankLast[nBitsToDecrease] = noSymbol; |
465 | else { |
466 | rankLast[nBitsToDecrease]--; |
467 | if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease) |
468 | rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ |
469 | } |
470 | } /* while (totalCost > 0) */ |
471 | |
472 | /* If we've removed too much weight, then we have to add it back. |
473 | * To avoid overshooting again, we only adjust the smallest rank. |
474 | * We take the largest nodes from the lowest rank 0 and move them |
475 | * to rank 1. There's guaranteed to be enough rank 0 symbols because |
476 | * TODO. |
477 | */ |
478 | while (totalCost < 0) { /* Sometimes, cost correction overshoot */ |
479 | /* special case : no rank 1 symbol (using targetNbBits-1); |
480 | * let's create one from largest rank 0 (using targetNbBits). |
481 | */ |
482 | if (rankLast[1] == noSymbol) { |
483 | while (huffNode[n].nbBits == targetNbBits) n--; |
484 | huffNode[n+1].nbBits--; |
485 | assert(n >= 0); |
486 | rankLast[1] = (U32)(n+1); |
487 | totalCost++; |
488 | continue; |
489 | } |
490 | huffNode[ rankLast[1] + 1 ].nbBits--; |
491 | rankLast[1]++; |
492 | totalCost ++; |
493 | } |
494 | } /* repay normalized cost */ |
495 | } /* there are several too large elements (at least >= 2) */ |
496 | |
497 | return targetNbBits; |
498 | } |
499 | |
500 | typedef struct { |
501 | U16 base; |
502 | U16 curr; |
503 | } rankPos; |
504 | |
505 | typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)]; |
506 | |
507 | /* Number of buckets available for HUF_sort() */ |
508 | #define RANK_POSITION_TABLE_SIZE 192 |
509 | |
510 | typedef struct { |
511 | huffNodeTable huffNodeTbl; |
512 | rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; |
513 | } HUF_buildCTable_wksp_tables; |
514 | |
515 | /* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing. |
516 | * Strategy is to use as many buckets as possible for representing distinct |
517 | * counts while using the remainder to represent all "large" counts. |
518 | * |
519 | * To satisfy this requirement for 192 buckets, we can do the following: |
520 | * Let buckets 0-166 represent distinct counts of [0, 166] |
521 | * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing. |
522 | */ |
523 | #define RANK_POSITION_MAX_COUNT_LOG 32 |
524 | #define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) |
525 | #define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) |
526 | |
527 | /* Return the appropriate bucket index for a given count. See definition of |
528 | * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. |
529 | */ |
530 | static U32 HUF_getIndex(U32 const count) { |
531 | return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF) |
532 | ? count |
533 | : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; |
534 | } |
535 | |
536 | /* Helper swap function for HUF_quickSortPartition() */ |
537 | static void HUF_swapNodes(nodeElt* a, nodeElt* b) { |
538 | nodeElt tmp = *a; |
539 | *a = *b; |
540 | *b = tmp; |
541 | } |
542 | |
543 | /* Returns 0 if the huffNode array is not sorted by descending count */ |
544 | MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) { |
545 | U32 i; |
546 | for (i = 1; i < maxSymbolValue1; ++i) { |
547 | if (huffNode[i].count > huffNode[i-1].count) { |
548 | return 0; |
549 | } |
550 | } |
551 | return 1; |
552 | } |
553 | |
554 | /* Insertion sort by descending order */ |
555 | HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) { |
556 | int i; |
557 | int const size = high-low+1; |
558 | huffNode += low; |
559 | for (i = 1; i < size; ++i) { |
560 | nodeElt const key = huffNode[i]; |
561 | int j = i - 1; |
562 | while (j >= 0 && huffNode[j].count < key.count) { |
563 | huffNode[j + 1] = huffNode[j]; |
564 | j--; |
565 | } |
566 | huffNode[j + 1] = key; |
567 | } |
568 | } |
569 | |
570 | /* Pivot helper function for quicksort. */ |
571 | static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) { |
572 | /* Simply select rightmost element as pivot. "Better" selectors like |
573 | * median-of-three don't experimentally appear to have any benefit. |
574 | */ |
575 | U32 const pivot = arr[high].count; |
576 | int i = low - 1; |
577 | int j = low; |
578 | for ( ; j < high; j++) { |
579 | if (arr[j].count > pivot) { |
580 | i++; |
581 | HUF_swapNodes(&arr[i], &arr[j]); |
582 | } |
583 | } |
584 | HUF_swapNodes(&arr[i + 1], &arr[high]); |
585 | return i + 1; |
586 | } |
587 | |
588 | /* Classic quicksort by descending with partially iterative calls |
589 | * to reduce worst case callstack size. |
590 | */ |
591 | static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) { |
592 | int const kInsertionSortThreshold = 8; |
593 | if (high - low < kInsertionSortThreshold) { |
594 | HUF_insertionSort(arr, low, high); |
595 | return; |
596 | } |
597 | while (low < high) { |
598 | int const idx = HUF_quickSortPartition(arr, low, high); |
599 | if (idx - low < high - idx) { |
600 | HUF_simpleQuickSort(arr, low, idx - 1); |
601 | low = idx + 1; |
602 | } else { |
603 | HUF_simpleQuickSort(arr, idx + 1, high); |
604 | high = idx - 1; |
605 | } |
606 | } |
607 | } |
608 | |
609 | /** |
610 | * HUF_sort(): |
611 | * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. |
612 | * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. |
613 | * |
614 | * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. |
615 | * Must have (maxSymbolValue + 1) entries. |
616 | * @param[in] count Histogram of the symbols. |
617 | * @param[in] maxSymbolValue Maximum symbol value. |
618 | * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. |
619 | */ |
620 | static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) { |
621 | U32 n; |
622 | U32 const maxSymbolValue1 = maxSymbolValue+1; |
623 | |
624 | /* Compute base and set curr to base. |
625 | * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1. |
626 | * See HUF_getIndex to see bucketing strategy. |
627 | * We attribute each symbol to lowerRank's base value, because we want to know where |
628 | * each rank begins in the output, so for rank R we want to count ranks R+1 and above. |
629 | */ |
630 | ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); |
631 | for (n = 0; n < maxSymbolValue1; ++n) { |
632 | U32 lowerRank = HUF_getIndex(count[n]); |
633 | assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1); |
634 | rankPosition[lowerRank].base++; |
635 | } |
636 | |
637 | assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0); |
638 | /* Set up the rankPosition table */ |
639 | for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) { |
640 | rankPosition[n-1].base += rankPosition[n].base; |
641 | rankPosition[n-1].curr = rankPosition[n-1].base; |
642 | } |
643 | |
644 | /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */ |
645 | for (n = 0; n < maxSymbolValue1; ++n) { |
646 | U32 const c = count[n]; |
647 | U32 const r = HUF_getIndex(c) + 1; |
648 | U32 const pos = rankPosition[r].curr++; |
649 | assert(pos < maxSymbolValue1); |
650 | huffNode[pos].count = c; |
651 | huffNode[pos].byte = (BYTE)n; |
652 | } |
653 | |
654 | /* Sort each bucket. */ |
655 | for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { |
656 | int const bucketSize = rankPosition[n].curr - rankPosition[n].base; |
657 | U32 const bucketStartIdx = rankPosition[n].base; |
658 | if (bucketSize > 1) { |
659 | assert(bucketStartIdx < maxSymbolValue1); |
660 | HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1); |
661 | } |
662 | } |
663 | |
664 | assert(HUF_isSorted(huffNode, maxSymbolValue1)); |
665 | } |
666 | |
667 | |
668 | /** HUF_buildCTable_wksp() : |
669 | * Same as HUF_buildCTable(), but using externally allocated scratch buffer. |
670 | * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). |
671 | */ |
672 | #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) |
673 | |
674 | /* HUF_buildTree(): |
675 | * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. |
676 | * |
677 | * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. |
678 | * @param maxSymbolValue The maximum symbol value. |
679 | * @return The smallest node in the Huffman tree (by count). |
680 | */ |
681 | static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) |
682 | { |
683 | nodeElt* const huffNode0 = huffNode - 1; |
684 | int nonNullRank; |
685 | int lowS, lowN; |
686 | int nodeNb = STARTNODE; |
687 | int n, nodeRoot; |
688 | DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1); |
689 | /* init for parents */ |
690 | nonNullRank = (int)maxSymbolValue; |
691 | while(huffNode[nonNullRank].count == 0) nonNullRank--; |
692 | lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; |
693 | huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; |
694 | huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb; |
695 | nodeNb++; lowS-=2; |
696 | for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); |
697 | huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ |
698 | |
699 | /* create parents */ |
700 | while (nodeNb <= nodeRoot) { |
701 | int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; |
702 | int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; |
703 | huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; |
704 | huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb; |
705 | nodeNb++; |
706 | } |
707 | |
708 | /* distribute weights (unlimited tree height) */ |
709 | huffNode[nodeRoot].nbBits = 0; |
710 | for (n=nodeRoot-1; n>=STARTNODE; n--) |
711 | huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; |
712 | for (n=0; n<=nonNullRank; n++) |
713 | huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; |
714 | |
715 | DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1)); |
716 | |
717 | return nonNullRank; |
718 | } |
719 | |
720 | /** |
721 | * HUF_buildCTableFromTree(): |
722 | * Build the CTable given the Huffman tree in huffNode. |
723 | * |
724 | * @param[out] CTable The output Huffman CTable. |
725 | * @param huffNode The Huffman tree. |
726 | * @param nonNullRank The last and smallest node in the Huffman tree. |
727 | * @param maxSymbolValue The maximum symbol value. |
728 | * @param maxNbBits The exact maximum number of bits used in the Huffman tree. |
729 | */ |
730 | static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) |
731 | { |
732 | HUF_CElt* const ct = CTable + 1; |
733 | /* fill result into ctable (val, nbBits) */ |
734 | int n; |
735 | U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; |
736 | U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; |
737 | int const alphabetSize = (int)(maxSymbolValue + 1); |
738 | for (n=0; n<=nonNullRank; n++) |
739 | nbPerRank[huffNode[n].nbBits]++; |
740 | /* determine starting value per rank */ |
741 | { U16 min = 0; |
742 | for (n=(int)maxNbBits; n>0; n--) { |
743 | valPerRank[n] = min; /* get starting value within each rank */ |
744 | min += nbPerRank[n]; |
745 | min >>= 1; |
746 | } } |
747 | for (n=0; n<alphabetSize; n++) |
748 | HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */ |
749 | for (n=0; n<alphabetSize; n++) |
750 | HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */ |
f535537f |
751 | |
752 | HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue); |
648db22b |
753 | } |
754 | |
755 | size_t |
756 | HUF_buildCTable_wksp(HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, |
757 | void* workSpace, size_t wkspSize) |
758 | { |
759 | HUF_buildCTable_wksp_tables* const wksp_tables = |
760 | (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32)); |
761 | nodeElt* const huffNode0 = wksp_tables->huffNodeTbl; |
762 | nodeElt* const huffNode = huffNode0+1; |
763 | int nonNullRank; |
764 | |
765 | HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables)); |
766 | |
767 | DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1); |
768 | |
769 | /* safety checks */ |
770 | if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) |
771 | return ERROR(workSpace_tooSmall); |
772 | if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; |
773 | if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) |
774 | return ERROR(maxSymbolValue_tooLarge); |
775 | ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable)); |
776 | |
777 | /* sort, decreasing order */ |
778 | HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); |
779 | DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1)); |
780 | |
781 | /* build tree */ |
782 | nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); |
783 | |
784 | /* determine and enforce maxTableLog */ |
785 | maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); |
786 | if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ |
787 | |
788 | HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); |
789 | |
790 | return maxNbBits; |
791 | } |
792 | |
793 | size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) |
794 | { |
795 | HUF_CElt const* ct = CTable + 1; |
796 | size_t nbBits = 0; |
797 | int s; |
798 | for (s = 0; s <= (int)maxSymbolValue; ++s) { |
799 | nbBits += HUF_getNbBits(ct[s]) * count[s]; |
800 | } |
801 | return nbBits >> 3; |
802 | } |
803 | |
804 | int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { |
f535537f |
805 | HUF_CTableHeader header = HUF_readCTableHeader(CTable); |
806 | HUF_CElt const* ct = CTable + 1; |
807 | int bad = 0; |
808 | int s; |
809 | |
810 | assert(header.tableLog <= HUF_TABLELOG_ABSOLUTEMAX); |
811 | |
812 | if (header.maxSymbolValue < maxSymbolValue) |
813 | return 0; |
814 | |
815 | for (s = 0; s <= (int)maxSymbolValue; ++s) { |
816 | bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0); |
817 | } |
818 | return !bad; |
648db22b |
819 | } |
820 | |
821 | size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } |
822 | |
823 | /** HUF_CStream_t: |
824 | * Huffman uses its own BIT_CStream_t implementation. |
825 | * There are three major differences from BIT_CStream_t: |
826 | * 1. HUF_addBits() takes a HUF_CElt (size_t) which is |
827 | * the pair (nbBits, value) in the format: |
828 | * format: |
829 | * - Bits [0, 4) = nbBits |
830 | * - Bits [4, 64 - nbBits) = 0 |
831 | * - Bits [64 - nbBits, 64) = value |
832 | * 2. The bitContainer is built from the upper bits and |
833 | * right shifted. E.g. to add a new value of N bits |
834 | * you right shift the bitContainer by N, then or in |
835 | * the new value into the N upper bits. |
836 | * 3. The bitstream has two bit containers. You can add |
837 | * bits to the second container and merge them into |
838 | * the first container. |
839 | */ |
840 | |
841 | #define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) |
842 | |
843 | typedef struct { |
844 | size_t bitContainer[2]; |
845 | size_t bitPos[2]; |
846 | |
847 | BYTE* startPtr; |
848 | BYTE* ptr; |
849 | BYTE* endPtr; |
850 | } HUF_CStream_t; |
851 | |
852 | /**! HUF_initCStream(): |
853 | * Initializes the bitstream. |
854 | * @returns 0 or an error code. |
855 | */ |
856 | static size_t HUF_initCStream(HUF_CStream_t* bitC, |
857 | void* startPtr, size_t dstCapacity) |
858 | { |
859 | ZSTD_memset(bitC, 0, sizeof(*bitC)); |
860 | bitC->startPtr = (BYTE*)startPtr; |
861 | bitC->ptr = bitC->startPtr; |
862 | bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]); |
863 | if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall); |
864 | return 0; |
865 | } |
866 | |
867 | /*! HUF_addBits(): |
868 | * Adds the symbol stored in HUF_CElt elt to the bitstream. |
869 | * |
870 | * @param elt The element we're adding. This is a (nbBits, value) pair. |
871 | * See the HUF_CStream_t docs for the format. |
872 | * @param idx Insert into the bitstream at this idx. |
873 | * @param kFast This is a template parameter. If the bitstream is guaranteed |
874 | * to have at least 4 unused bits after this call it may be 1, |
875 | * otherwise it must be 0. HUF_addBits() is faster when fast is set. |
876 | */ |
877 | FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast) |
878 | { |
879 | assert(idx <= 1); |
880 | assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX); |
881 | /* This is efficient on x86-64 with BMI2 because shrx |
882 | * only reads the low 6 bits of the register. The compiler |
883 | * knows this and elides the mask. When fast is set, |
884 | * every operation can use the same value loaded from elt. |
885 | */ |
886 | bitC->bitContainer[idx] >>= HUF_getNbBits(elt); |
887 | bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt); |
888 | /* We only read the low 8 bits of bitC->bitPos[idx] so it |
889 | * doesn't matter that the high bits have noise from the value. |
890 | */ |
891 | bitC->bitPos[idx] += HUF_getNbBitsFast(elt); |
892 | assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); |
893 | /* The last 4-bits of elt are dirty if fast is set, |
894 | * so we must not be overwriting bits that have already been |
895 | * inserted into the bit container. |
896 | */ |
897 | #if DEBUGLEVEL >= 1 |
898 | { |
899 | size_t const nbBits = HUF_getNbBits(elt); |
900 | size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1; |
901 | (void)dirtyBits; |
902 | /* Middle bits are 0. */ |
903 | assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); |
904 | /* We didn't overwrite any bits in the bit container. */ |
905 | assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); |
906 | (void)dirtyBits; |
907 | } |
908 | #endif |
909 | } |
910 | |
911 | FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC) |
912 | { |
913 | bitC->bitContainer[1] = 0; |
914 | bitC->bitPos[1] = 0; |
915 | } |
916 | |
917 | /*! HUF_mergeIndex1() : |
918 | * Merges the bit container @ index 1 into the bit container @ index 0 |
919 | * and zeros the bit container @ index 1. |
920 | */ |
921 | FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC) |
922 | { |
923 | assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER); |
924 | bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF); |
925 | bitC->bitContainer[0] |= bitC->bitContainer[1]; |
926 | bitC->bitPos[0] += bitC->bitPos[1]; |
927 | assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER); |
928 | } |
929 | |
930 | /*! HUF_flushBits() : |
931 | * Flushes the bits in the bit container @ index 0. |
932 | * |
933 | * @post bitPos will be < 8. |
934 | * @param kFast If kFast is set then we must know a-priori that |
935 | * the bit container will not overflow. |
936 | */ |
937 | FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast) |
938 | { |
939 | /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ |
940 | size_t const nbBits = bitC->bitPos[0] & 0xFF; |
941 | size_t const nbBytes = nbBits >> 3; |
942 | /* The top nbBits bits of bitContainer are the ones we need. */ |
943 | size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits); |
944 | /* Mask bitPos to account for the bytes we consumed. */ |
945 | bitC->bitPos[0] &= 7; |
946 | assert(nbBits > 0); |
947 | assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8); |
948 | assert(bitC->ptr <= bitC->endPtr); |
949 | MEM_writeLEST(bitC->ptr, bitContainer); |
950 | bitC->ptr += nbBytes; |
951 | assert(!kFast || bitC->ptr <= bitC->endPtr); |
952 | if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; |
953 | /* bitContainer doesn't need to be modified because the leftover |
954 | * bits are already the top bitPos bits. And we don't care about |
955 | * noise in the lower values. |
956 | */ |
957 | } |
958 | |
959 | /*! HUF_endMark() |
960 | * @returns The Huffman stream end mark: A 1-bit value = 1. |
961 | */ |
962 | static HUF_CElt HUF_endMark(void) |
963 | { |
964 | HUF_CElt endMark; |
965 | HUF_setNbBits(&endMark, 1); |
966 | HUF_setValue(&endMark, 1); |
967 | return endMark; |
968 | } |
969 | |
970 | /*! HUF_closeCStream() : |
971 | * @return Size of CStream, in bytes, |
972 | * or 0 if it could not fit into dstBuffer */ |
973 | static size_t HUF_closeCStream(HUF_CStream_t* bitC) |
974 | { |
975 | HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0); |
976 | HUF_flushBits(bitC, /* kFast */ 0); |
977 | { |
978 | size_t const nbBits = bitC->bitPos[0] & 0xFF; |
979 | if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ |
980 | return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0); |
981 | } |
982 | } |
983 | |
984 | FORCE_INLINE_TEMPLATE void |
985 | HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast) |
986 | { |
987 | HUF_addBits(bitCPtr, CTable[symbol], idx, fast); |
988 | } |
989 | |
990 | FORCE_INLINE_TEMPLATE void |
991 | HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC, |
992 | const BYTE* ip, size_t srcSize, |
993 | const HUF_CElt* ct, |
994 | int kUnroll, int kFastFlush, int kLastFast) |
995 | { |
996 | /* Join to kUnroll */ |
997 | int n = (int)srcSize; |
998 | int rem = n % kUnroll; |
999 | if (rem > 0) { |
1000 | for (; rem > 0; --rem) { |
1001 | HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0); |
1002 | } |
1003 | HUF_flushBits(bitC, kFastFlush); |
1004 | } |
1005 | assert(n % kUnroll == 0); |
1006 | |
1007 | /* Join to 2 * kUnroll */ |
1008 | if (n % (2 * kUnroll)) { |
1009 | int u; |
1010 | for (u = 1; u < kUnroll; ++u) { |
1011 | HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1); |
1012 | } |
1013 | HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast); |
1014 | HUF_flushBits(bitC, kFastFlush); |
1015 | n -= kUnroll; |
1016 | } |
1017 | assert(n % (2 * kUnroll) == 0); |
1018 | |
1019 | for (; n>0; n-= 2 * kUnroll) { |
1020 | /* Encode kUnroll symbols into the bitstream @ index 0. */ |
1021 | int u; |
1022 | for (u = 1; u < kUnroll; ++u) { |
1023 | HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1); |
1024 | } |
1025 | HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast); |
1026 | HUF_flushBits(bitC, kFastFlush); |
1027 | /* Encode kUnroll symbols into the bitstream @ index 1. |
1028 | * This allows us to start filling the bit container |
1029 | * without any data dependencies. |
1030 | */ |
1031 | HUF_zeroIndex1(bitC); |
1032 | for (u = 1; u < kUnroll; ++u) { |
1033 | HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1); |
1034 | } |
1035 | HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast); |
1036 | /* Merge bitstream @ index 1 into the bitstream @ index 0 */ |
1037 | HUF_mergeIndex1(bitC); |
1038 | HUF_flushBits(bitC, kFastFlush); |
1039 | } |
1040 | assert(n == 0); |
1041 | |
1042 | } |
1043 | |
1044 | /** |
1045 | * Returns a tight upper bound on the output space needed by Huffman |
1046 | * with 8 bytes buffer to handle over-writes. If the output is at least |
1047 | * this large we don't need to do bounds checks during Huffman encoding. |
1048 | */ |
1049 | static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog) |
1050 | { |
1051 | return ((srcSize * tableLog) >> 3) + 8; |
1052 | } |
1053 | |
1054 | |
1055 | FORCE_INLINE_TEMPLATE size_t |
1056 | HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, |
1057 | const void* src, size_t srcSize, |
1058 | const HUF_CElt* CTable) |
1059 | { |
f535537f |
1060 | U32 const tableLog = HUF_readCTableHeader(CTable).tableLog; |
648db22b |
1061 | HUF_CElt const* ct = CTable + 1; |
1062 | const BYTE* ip = (const BYTE*) src; |
1063 | BYTE* const ostart = (BYTE*)dst; |
1064 | BYTE* const oend = ostart + dstSize; |
648db22b |
1065 | HUF_CStream_t bitC; |
1066 | |
1067 | /* init */ |
1068 | if (dstSize < 8) return 0; /* not enough space to compress */ |
f535537f |
1069 | { BYTE* op = ostart; |
1070 | size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op)); |
648db22b |
1071 | if (HUF_isError(initErr)) return 0; } |
1072 | |
1073 | if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11) |
1074 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0); |
1075 | else { |
1076 | if (MEM_32bits()) { |
1077 | switch (tableLog) { |
1078 | case 11: |
1079 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0); |
1080 | break; |
1081 | case 10: ZSTD_FALLTHROUGH; |
1082 | case 9: ZSTD_FALLTHROUGH; |
1083 | case 8: |
1084 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1); |
1085 | break; |
1086 | case 7: ZSTD_FALLTHROUGH; |
1087 | default: |
1088 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1); |
1089 | break; |
1090 | } |
1091 | } else { |
1092 | switch (tableLog) { |
1093 | case 11: |
1094 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0); |
1095 | break; |
1096 | case 10: |
1097 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1); |
1098 | break; |
1099 | case 9: |
1100 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0); |
1101 | break; |
1102 | case 8: |
1103 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0); |
1104 | break; |
1105 | case 7: |
1106 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0); |
1107 | break; |
1108 | case 6: ZSTD_FALLTHROUGH; |
1109 | default: |
1110 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1); |
1111 | break; |
1112 | } |
1113 | } |
1114 | } |
1115 | assert(bitC.ptr <= bitC.endPtr); |
1116 | |
1117 | return HUF_closeCStream(&bitC); |
1118 | } |
1119 | |
1120 | #if DYNAMIC_BMI2 |
1121 | |
1122 | static BMI2_TARGET_ATTRIBUTE size_t |
1123 | HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, |
1124 | const void* src, size_t srcSize, |
1125 | const HUF_CElt* CTable) |
1126 | { |
1127 | return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); |
1128 | } |
1129 | |
1130 | static size_t |
1131 | HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, |
1132 | const void* src, size_t srcSize, |
1133 | const HUF_CElt* CTable) |
1134 | { |
1135 | return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); |
1136 | } |
1137 | |
1138 | static size_t |
1139 | HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, |
1140 | const void* src, size_t srcSize, |
1141 | const HUF_CElt* CTable, const int flags) |
1142 | { |
1143 | if (flags & HUF_flags_bmi2) { |
1144 | return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); |
1145 | } |
1146 | return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); |
1147 | } |
1148 | |
1149 | #else |
1150 | |
1151 | static size_t |
1152 | HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, |
1153 | const void* src, size_t srcSize, |
1154 | const HUF_CElt* CTable, const int flags) |
1155 | { |
1156 | (void)flags; |
1157 | return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); |
1158 | } |
1159 | |
1160 | #endif |
1161 | |
1162 | size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) |
1163 | { |
1164 | return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); |
1165 | } |
1166 | |
1167 | static size_t |
1168 | HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, |
1169 | const void* src, size_t srcSize, |
1170 | const HUF_CElt* CTable, int flags) |
1171 | { |
1172 | size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ |
1173 | const BYTE* ip = (const BYTE*) src; |
1174 | const BYTE* const iend = ip + srcSize; |
1175 | BYTE* const ostart = (BYTE*) dst; |
1176 | BYTE* const oend = ostart + dstSize; |
1177 | BYTE* op = ostart; |
1178 | |
1179 | if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ |
1180 | if (srcSize < 12) return 0; /* no saving possible : too small input */ |
1181 | op += 6; /* jumpTable */ |
1182 | |
1183 | assert(op <= oend); |
1184 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); |
1185 | if (cSize == 0 || cSize > 65535) return 0; |
1186 | MEM_writeLE16(ostart, (U16)cSize); |
1187 | op += cSize; |
1188 | } |
1189 | |
1190 | ip += segmentSize; |
1191 | assert(op <= oend); |
1192 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); |
1193 | if (cSize == 0 || cSize > 65535) return 0; |
1194 | MEM_writeLE16(ostart+2, (U16)cSize); |
1195 | op += cSize; |
1196 | } |
1197 | |
1198 | ip += segmentSize; |
1199 | assert(op <= oend); |
1200 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); |
1201 | if (cSize == 0 || cSize > 65535) return 0; |
1202 | MEM_writeLE16(ostart+4, (U16)cSize); |
1203 | op += cSize; |
1204 | } |
1205 | |
1206 | ip += segmentSize; |
1207 | assert(op <= oend); |
1208 | assert(ip <= iend); |
1209 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) ); |
1210 | if (cSize == 0 || cSize > 65535) return 0; |
1211 | op += cSize; |
1212 | } |
1213 | |
1214 | return (size_t)(op-ostart); |
1215 | } |
1216 | |
1217 | size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) |
1218 | { |
1219 | return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); |
1220 | } |
1221 | |
1222 | typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; |
1223 | |
1224 | static size_t HUF_compressCTable_internal( |
1225 | BYTE* const ostart, BYTE* op, BYTE* const oend, |
1226 | const void* src, size_t srcSize, |
1227 | HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags) |
1228 | { |
1229 | size_t const cSize = (nbStreams==HUF_singleStream) ? |
1230 | HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) : |
1231 | HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags); |
1232 | if (HUF_isError(cSize)) { return cSize; } |
1233 | if (cSize==0) { return 0; } /* uncompressible */ |
1234 | op += cSize; |
1235 | /* check compressibility */ |
1236 | assert(op >= ostart); |
1237 | if ((size_t)(op-ostart) >= srcSize-1) { return 0; } |
1238 | return (size_t)(op-ostart); |
1239 | } |
1240 | |
1241 | typedef struct { |
1242 | unsigned count[HUF_SYMBOLVALUE_MAX + 1]; |
1243 | HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)]; |
1244 | union { |
1245 | HUF_buildCTable_wksp_tables buildCTable_wksp; |
1246 | HUF_WriteCTableWksp writeCTable_wksp; |
1247 | U32 hist_wksp[HIST_WKSP_SIZE_U32]; |
1248 | } wksps; |
1249 | } HUF_compress_tables_t; |
1250 | |
1251 | #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 |
1252 | #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ |
1253 | |
1254 | unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue) |
1255 | { |
1256 | unsigned cardinality = 0; |
1257 | unsigned i; |
1258 | |
1259 | for (i = 0; i < maxSymbolValue + 1; i++) { |
1260 | if (count[i] != 0) cardinality += 1; |
1261 | } |
1262 | |
1263 | return cardinality; |
1264 | } |
1265 | |
1266 | unsigned HUF_minTableLog(unsigned symbolCardinality) |
1267 | { |
1268 | U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; |
1269 | return minBitsSymbols; |
1270 | } |
1271 | |
1272 | unsigned HUF_optimalTableLog( |
1273 | unsigned maxTableLog, |
1274 | size_t srcSize, |
1275 | unsigned maxSymbolValue, |
1276 | void* workSpace, size_t wkspSize, |
1277 | HUF_CElt* table, |
1278 | const unsigned* count, |
1279 | int flags) |
1280 | { |
1281 | assert(srcSize > 1); /* Not supported, RLE should be used instead */ |
1282 | assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables)); |
1283 | |
1284 | if (!(flags & HUF_flags_optimalDepth)) { |
1285 | /* cheap evaluation, based on FSE */ |
1286 | return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); |
1287 | } |
1288 | |
1289 | { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp); |
1290 | size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp); |
f535537f |
1291 | size_t hSize, newSize; |
648db22b |
1292 | const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue); |
1293 | const unsigned minTableLog = HUF_minTableLog(symbolCardinality); |
1294 | size_t optSize = ((size_t) ~0) - 1; |
1295 | unsigned optLog = maxTableLog, optLogGuess; |
1296 | |
1297 | DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize); |
1298 | |
1299 | /* Search until size increases */ |
1300 | for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { |
1301 | DEBUGLOG(7, "checking for huffLog=%u", optLogGuess); |
648db22b |
1302 | |
f535537f |
1303 | { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); |
1304 | if (ERR_isError(maxBits)) continue; |
1305 | |
1306 | if (maxBits < optLogGuess && optLogGuess > minTableLog) break; |
648db22b |
1307 | |
f535537f |
1308 | hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize); |
1309 | } |
648db22b |
1310 | |
1311 | if (ERR_isError(hSize)) continue; |
1312 | |
1313 | newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; |
1314 | |
1315 | if (newSize > optSize + 1) { |
1316 | break; |
1317 | } |
1318 | |
1319 | if (newSize < optSize) { |
1320 | optSize = newSize; |
1321 | optLog = optLogGuess; |
1322 | } |
1323 | } |
1324 | assert(optLog <= HUF_TABLELOG_MAX); |
1325 | return optLog; |
1326 | } |
1327 | } |
1328 | |
1329 | /* HUF_compress_internal() : |
1330 | * `workSpace_align4` must be aligned on 4-bytes boundaries, |
1331 | * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ |
1332 | static size_t |
1333 | HUF_compress_internal (void* dst, size_t dstSize, |
1334 | const void* src, size_t srcSize, |
1335 | unsigned maxSymbolValue, unsigned huffLog, |
1336 | HUF_nbStreams_e nbStreams, |
1337 | void* workSpace, size_t wkspSize, |
1338 | HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags) |
1339 | { |
1340 | HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t)); |
1341 | BYTE* const ostart = (BYTE*)dst; |
1342 | BYTE* const oend = ostart + dstSize; |
1343 | BYTE* op = ostart; |
1344 | |
1345 | DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize); |
1346 | HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); |
1347 | |
1348 | /* checks & inits */ |
1349 | if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); |
1350 | if (!srcSize) return 0; /* Uncompressed */ |
1351 | if (!dstSize) return 0; /* cannot fit anything within dst budget */ |
1352 | if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ |
1353 | if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); |
1354 | if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); |
1355 | if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; |
1356 | if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; |
1357 | |
1358 | /* Heuristic : If old table is valid, use it for small inputs */ |
1359 | if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) { |
1360 | return HUF_compressCTable_internal(ostart, op, oend, |
1361 | src, srcSize, |
1362 | nbStreams, oldHufTable, flags); |
1363 | } |
1364 | |
1365 | /* If uncompressible data is suspected, do a smaller sampling first */ |
1366 | DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); |
1367 | if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { |
1368 | size_t largestTotal = 0; |
1369 | DEBUGLOG(5, "input suspected incompressible : sampling to check"); |
1370 | { unsigned maxSymbolValueBegin = maxSymbolValue; |
1371 | CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); |
1372 | largestTotal += largestBegin; |
1373 | } |
1374 | { unsigned maxSymbolValueEnd = maxSymbolValue; |
1375 | CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); |
1376 | largestTotal += largestEnd; |
1377 | } |
1378 | if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */ |
1379 | } |
1380 | |
1381 | /* Scan input and build symbol stats */ |
1382 | { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) ); |
1383 | if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ |
1384 | if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ |
1385 | } |
1386 | DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1)); |
1387 | |
1388 | /* Check validity of previous table */ |
1389 | if ( repeat |
1390 | && *repeat == HUF_repeat_check |
1391 | && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) { |
1392 | *repeat = HUF_repeat_none; |
1393 | } |
1394 | /* Heuristic : use existing table for small inputs */ |
1395 | if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) { |
1396 | return HUF_compressCTable_internal(ostart, op, oend, |
1397 | src, srcSize, |
1398 | nbStreams, oldHufTable, flags); |
1399 | } |
1400 | |
1401 | /* Build Huffman Tree */ |
1402 | huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags); |
1403 | { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, |
1404 | maxSymbolValue, huffLog, |
1405 | &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); |
1406 | CHECK_F(maxBits); |
1407 | huffLog = (U32)maxBits; |
1408 | DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1)); |
1409 | } |
648db22b |
1410 | |
1411 | /* Write table description header */ |
1412 | { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog, |
1413 | &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) ); |
1414 | /* Check if using previous huffman table is beneficial */ |
1415 | if (repeat && *repeat != HUF_repeat_none) { |
1416 | size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); |
1417 | size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue); |
1418 | if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { |
1419 | return HUF_compressCTable_internal(ostart, op, oend, |
1420 | src, srcSize, |
1421 | nbStreams, oldHufTable, flags); |
1422 | } } |
1423 | |
1424 | /* Use the new huffman table */ |
1425 | if (hSize + 12ul >= srcSize) { return 0; } |
1426 | op += hSize; |
1427 | if (repeat) { *repeat = HUF_repeat_none; } |
1428 | if (oldHufTable) |
1429 | ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ |
1430 | } |
1431 | return HUF_compressCTable_internal(ostart, op, oend, |
1432 | src, srcSize, |
1433 | nbStreams, table->CTable, flags); |
1434 | } |
1435 | |
1436 | size_t HUF_compress1X_repeat (void* dst, size_t dstSize, |
1437 | const void* src, size_t srcSize, |
1438 | unsigned maxSymbolValue, unsigned huffLog, |
1439 | void* workSpace, size_t wkspSize, |
1440 | HUF_CElt* hufTable, HUF_repeat* repeat, int flags) |
1441 | { |
1442 | DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize); |
1443 | return HUF_compress_internal(dst, dstSize, src, srcSize, |
1444 | maxSymbolValue, huffLog, HUF_singleStream, |
1445 | workSpace, wkspSize, hufTable, |
1446 | repeat, flags); |
1447 | } |
1448 | |
1449 | /* HUF_compress4X_repeat(): |
1450 | * compress input using 4 streams. |
1451 | * consider skipping quickly |
f535537f |
1452 | * reuse an existing huffman compression table */ |
648db22b |
1453 | size_t HUF_compress4X_repeat (void* dst, size_t dstSize, |
1454 | const void* src, size_t srcSize, |
1455 | unsigned maxSymbolValue, unsigned huffLog, |
1456 | void* workSpace, size_t wkspSize, |
1457 | HUF_CElt* hufTable, HUF_repeat* repeat, int flags) |
1458 | { |
1459 | DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize); |
1460 | return HUF_compress_internal(dst, dstSize, src, srcSize, |
1461 | maxSymbolValue, huffLog, HUF_fourStreams, |
1462 | workSpace, wkspSize, |
1463 | hufTable, repeat, flags); |
1464 | } |