1 /* ******************************************************************
2 * Huffman encoder, part of New Generation Entropy library
3 * Copyright (c) Meta Platforms, Inc. and affiliates.
5 * You can contact the author at :
6 * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
7 * - Public forum : https://groups.google.com/forum/#!forum/lz4c
9 * This source code is licensed under both the BSD-style license (found in the
10 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11 * in the COPYING file in the root directory of this source tree).
12 * You may select, at your option, one of the above-listed licenses.
13 ****************************************************************** */
15 /* **************************************************************
17 ****************************************************************/
18 #ifdef _MSC_VER /* Visual Studio */
19 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
23 /* **************************************************************
25 ****************************************************************/
26 #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
27 #include "../common/compiler.h"
28 #include "../common/bitstream.h"
30 #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */
31 #include "../common/fse.h" /* header compression */
32 #include "../common/huf.h"
33 #include "../common/error_private.h"
34 #include "../common/bits.h" /* ZSTD_highbit32 */
37 /* **************************************************************
39 ****************************************************************/
40 #define HUF_isError ERR_isError
41 #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
44 /* **************************************************************
45 * Required declarations
46 ****************************************************************/
47 typedef struct nodeElt_s {
55 /* **************************************************************
57 ****************************************************************/
61 static size_t showU32(const U32* arr, size_t size)
64 for (u=0; u<size; u++) {
65 RAWLOG(6, " %u", arr[u]); (void)arr;
71 static size_t HUF_getNbBits(HUF_CElt elt);
73 static size_t showCTableBits(const HUF_CElt* ctable, size_t size)
76 for (u=0; u<size; u++) {
77 RAWLOG(6, " %zu", HUF_getNbBits(ctable[u])); (void)ctable;
84 static size_t showHNodeSymbols(const nodeElt* hnode, size_t size)
87 for (u=0; u<size; u++) {
88 RAWLOG(6, " %u", hnode[u].byte); (void)hnode;
94 static size_t showHNodeBits(const nodeElt* hnode, size_t size)
97 for (u=0; u<size; u++) {
98 RAWLOG(6, " %u", hnode[u].nbBits); (void)hnode;
107 /* *******************************************************
108 * HUF : Huffman block compression
109 *********************************************************/
110 #define HUF_WORKSPACE_MAX_ALIGNMENT 8
112 static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align)
114 size_t const mask = align - 1;
115 size_t const rem = (size_t)workspace & mask;
116 size_t const add = (align - rem) & mask;
117 BYTE* const aligned = (BYTE*)workspace + add;
118 assert((align & (align - 1)) == 0); /* pow 2 */
119 assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT);
120 if (*workspaceSizePtr >= add) {
122 assert(((size_t)aligned & mask) == 0);
123 *workspaceSizePtr -= add;
126 *workspaceSizePtr = 0;
132 /* HUF_compressWeights() :
133 * Same as FSE_compress(), but dedicated to huff0's weights compression.
134 * The use case needs much less stack memory.
135 * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
137 #define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
140 FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
141 U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
142 unsigned count[HUF_TABLELOG_MAX+1];
143 S16 norm[HUF_TABLELOG_MAX+1];
144 } HUF_CompressWeightsWksp;
147 HUF_compressWeights(void* dst, size_t dstSize,
148 const void* weightTable, size_t wtSize,
149 void* workspace, size_t workspaceSize)
151 BYTE* const ostart = (BYTE*) dst;
153 BYTE* const oend = ostart + dstSize;
155 unsigned maxSymbolValue = HUF_TABLELOG_MAX;
156 U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
157 HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
159 if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC);
161 /* init conditions */
162 if (wtSize <= 1) return 0; /* Not compressible */
164 /* Scan input and build symbol stats */
165 { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */
166 if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
167 if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
170 tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
171 CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
173 /* Write table description header */
174 { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) );
179 CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) );
180 { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) );
181 if (cSize == 0) return 0; /* not enough space for compressed data */
185 return (size_t)(op-ostart);
188 static size_t HUF_getNbBits(HUF_CElt elt)
193 static size_t HUF_getNbBitsFast(HUF_CElt elt)
198 static size_t HUF_getValue(HUF_CElt elt)
200 return elt & ~(size_t)0xFF;
203 static size_t HUF_getValueFast(HUF_CElt elt)
208 static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits)
210 assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX);
214 static void HUF_setValue(HUF_CElt* elt, size_t value)
216 size_t const nbBits = HUF_getNbBits(*elt);
218 assert((value >> nbBits) == 0);
219 *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits);
223 HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable)
225 HUF_CTableHeader header;
226 ZSTD_memcpy(&header, ctable, sizeof(header));
230 static void HUF_writeCTableHeader(HUF_CElt* ctable, U32 tableLog, U32 maxSymbolValue)
232 HUF_CTableHeader header;
233 HUF_STATIC_ASSERT(sizeof(ctable[0]) == sizeof(header));
234 ZSTD_memset(&header, 0, sizeof(header));
235 assert(tableLog < 256);
236 header.tableLog = (BYTE)tableLog;
237 assert(maxSymbolValue < 256);
238 header.maxSymbolValue = (BYTE)maxSymbolValue;
239 ZSTD_memcpy(ctable, &header, sizeof(header));
243 HUF_CompressWeightsWksp wksp;
244 BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
245 BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
246 } HUF_WriteCTableWksp;
248 size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize,
249 const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog,
250 void* workspace, size_t workspaceSize)
252 HUF_CElt const* const ct = CTable + 1;
253 BYTE* op = (BYTE*)dst;
255 HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32));
257 HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp));
259 assert(HUF_readCTableHeader(CTable).maxSymbolValue == maxSymbolValue);
260 assert(HUF_readCTableHeader(CTable).tableLog == huffLog);
262 /* check conditions */
263 if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC);
264 if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
266 /* convert to weight */
267 wksp->bitsToWeight[0] = 0;
268 for (n=1; n<huffLog+1; n++)
269 wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
270 for (n=0; n<maxSymbolValue; n++)
271 wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])];
273 /* attempt weights compression by FSE */
274 if (maxDstSize < 1) return ERROR(dstSize_tooSmall);
275 { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) );
276 if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
281 /* write raw values as 4-bits (max : 15) */
282 if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
283 if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
284 op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
285 wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
286 for (n=0; n<maxSymbolValue; n+=2)
287 op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]);
288 return ((maxSymbolValue+1)/2) + 1;
292 size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights)
294 BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
295 U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
298 HUF_CElt* const ct = CTable + 1;
300 /* get symbol weights */
301 CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
302 *hasZeroWeights = (rankVal[0] > 0);
305 if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
306 if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall);
308 *maxSymbolValuePtr = nbSymbols - 1;
310 HUF_writeCTableHeader(CTable, tableLog, *maxSymbolValuePtr);
312 /* Prepare base value per rank */
313 { U32 n, nextRankStart = 0;
314 for (n=1; n<=tableLog; n++) {
315 U32 curr = nextRankStart;
316 nextRankStart += (rankVal[n] << (n-1));
321 { U32 n; for (n=0; n<nbSymbols; n++) {
322 const U32 w = huffWeight[n];
323 HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0));
327 { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
328 U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
329 { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; }
330 /* determine stating value per rank */
331 valPerRank[tableLog+1] = 0; /* for w==0 */
333 U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
334 valPerRank[n] = min; /* get starting value within each rank */
338 /* assign value within rank, symbol order */
339 { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); }
345 U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue)
347 const HUF_CElt* const ct = CTable + 1;
348 assert(symbolValue <= HUF_SYMBOLVALUE_MAX);
349 if (symbolValue > HUF_readCTableHeader(CTable).maxSymbolValue)
351 return (U32)HUF_getNbBits(ct[symbolValue]);
356 * HUF_setMaxHeight():
357 * Try to enforce @targetNbBits on the Huffman tree described in @huffNode.
359 * It attempts to convert all nodes with nbBits > @targetNbBits
360 * to employ @targetNbBits instead. Then it adjusts the tree
361 * so that it remains a valid canonical Huffman tree.
363 * @pre The sum of the ranks of each symbol == 2^largestBits,
364 * where largestBits == huffNode[lastNonNull].nbBits.
365 * @post The sum of the ranks of each symbol == 2^largestBits,
366 * where largestBits is the return value (expected <= targetNbBits).
368 * @param huffNode The Huffman tree modified in place to enforce targetNbBits.
369 * It's presumed sorted, from most frequent to rarest symbol.
370 * @param lastNonNull The symbol with the lowest count in the Huffman tree.
371 * @param targetNbBits The allowed number of bits, which the Huffman tree
372 * may not respect. After this function the Huffman tree will
373 * respect targetNbBits.
374 * @return The maximum number of bits of the Huffman tree after adjustment.
376 static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits)
378 const U32 largestBits = huffNode[lastNonNull].nbBits;
379 /* early exit : no elt > targetNbBits, so the tree is already valid. */
380 if (largestBits <= targetNbBits) return largestBits;
382 DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)", targetNbBits);
384 /* there are several too large elements (at least >= 2) */
386 const U32 baseCost = 1 << (largestBits - targetNbBits);
387 int n = (int)lastNonNull;
389 /* Adjust any ranks > targetNbBits to targetNbBits.
390 * Compute totalCost, which is how far the sum of the ranks is
391 * we are over 2^largestBits after adjust the offending ranks.
393 while (huffNode[n].nbBits > targetNbBits) {
394 totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
395 huffNode[n].nbBits = (BYTE)targetNbBits;
398 /* n stops at huffNode[n].nbBits <= targetNbBits */
399 assert(huffNode[n].nbBits <= targetNbBits);
400 /* n end at index of smallest symbol using < targetNbBits */
401 while (huffNode[n].nbBits == targetNbBits) --n;
403 /* renorm totalCost from 2^largestBits to 2^targetNbBits
404 * note : totalCost is necessarily a multiple of baseCost */
405 assert(((U32)totalCost & (baseCost - 1)) == 0);
406 totalCost >>= (largestBits - targetNbBits);
407 assert(totalCost > 0);
409 /* repay normalized cost */
410 { U32 const noSymbol = 0xF0F0F0F0;
411 U32 rankLast[HUF_TABLELOG_MAX+2];
413 /* Get pos of last (smallest = lowest cum. count) symbol per rank */
414 ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
415 { U32 currentNbBits = targetNbBits;
417 for (pos=n ; pos >= 0; pos--) {
418 if (huffNode[pos].nbBits >= currentNbBits) continue;
419 currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */
420 rankLast[targetNbBits-currentNbBits] = (U32)pos;
423 while (totalCost > 0) {
424 /* Try to reduce the next power of 2 above totalCost because we
425 * gain back half the rank.
427 U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1;
428 for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
429 U32 const highPos = rankLast[nBitsToDecrease];
430 U32 const lowPos = rankLast[nBitsToDecrease-1];
431 if (highPos == noSymbol) continue;
432 /* Decrease highPos if no symbols of lowPos or if it is
433 * not cheaper to remove 2 lowPos than highPos.
435 if (lowPos == noSymbol) break;
436 { U32 const highTotal = huffNode[highPos].count;
437 U32 const lowTotal = 2 * huffNode[lowPos].count;
438 if (highTotal <= lowTotal) break;
440 /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
441 assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
442 /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
443 while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
445 assert(rankLast[nBitsToDecrease] != noSymbol);
446 /* Increase the number of bits to gain back half the rank cost. */
447 totalCost -= 1 << (nBitsToDecrease-1);
448 huffNode[rankLast[nBitsToDecrease]].nbBits++;
450 /* Fix up the new rank.
451 * If the new rank was empty, this symbol is now its smallest.
452 * Otherwise, this symbol will be the largest in the new rank so no adjustment.
454 if (rankLast[nBitsToDecrease-1] == noSymbol)
455 rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
456 /* Fix up the old rank.
457 * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
458 * it must be the only symbol in its rank, so the old rank now has no symbols.
459 * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
460 * the smallest node in the rank. If the previous position belongs to a different rank,
461 * then the rank is now empty.
463 if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
464 rankLast[nBitsToDecrease] = noSymbol;
466 rankLast[nBitsToDecrease]--;
467 if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease)
468 rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
470 } /* while (totalCost > 0) */
472 /* If we've removed too much weight, then we have to add it back.
473 * To avoid overshooting again, we only adjust the smallest rank.
474 * We take the largest nodes from the lowest rank 0 and move them
475 * to rank 1. There's guaranteed to be enough rank 0 symbols because
478 while (totalCost < 0) { /* Sometimes, cost correction overshoot */
479 /* special case : no rank 1 symbol (using targetNbBits-1);
480 * let's create one from largest rank 0 (using targetNbBits).
482 if (rankLast[1] == noSymbol) {
483 while (huffNode[n].nbBits == targetNbBits) n--;
484 huffNode[n+1].nbBits--;
486 rankLast[1] = (U32)(n+1);
490 huffNode[ rankLast[1] + 1 ].nbBits--;
494 } /* repay normalized cost */
495 } /* there are several too large elements (at least >= 2) */
505 typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)];
507 /* Number of buckets available for HUF_sort() */
508 #define RANK_POSITION_TABLE_SIZE 192
511 huffNodeTable huffNodeTbl;
512 rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
513 } HUF_buildCTable_wksp_tables;
515 /* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing.
516 * Strategy is to use as many buckets as possible for representing distinct
517 * counts while using the remainder to represent all "large" counts.
519 * To satisfy this requirement for 192 buckets, we can do the following:
520 * Let buckets 0-166 represent distinct counts of [0, 166]
521 * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing.
523 #define RANK_POSITION_MAX_COUNT_LOG 32
524 #define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */)
525 #define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */)
527 /* Return the appropriate bucket index for a given count. See definition of
528 * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy.
530 static U32 HUF_getIndex(U32 const count) {
531 return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF)
533 : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN;
536 /* Helper swap function for HUF_quickSortPartition() */
537 static void HUF_swapNodes(nodeElt* a, nodeElt* b) {
543 /* Returns 0 if the huffNode array is not sorted by descending count */
544 MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) {
546 for (i = 1; i < maxSymbolValue1; ++i) {
547 if (huffNode[i].count > huffNode[i-1].count) {
554 /* Insertion sort by descending order */
555 HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) {
557 int const size = high-low+1;
559 for (i = 1; i < size; ++i) {
560 nodeElt const key = huffNode[i];
562 while (j >= 0 && huffNode[j].count < key.count) {
563 huffNode[j + 1] = huffNode[j];
566 huffNode[j + 1] = key;
570 /* Pivot helper function for quicksort. */
571 static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) {
572 /* Simply select rightmost element as pivot. "Better" selectors like
573 * median-of-three don't experimentally appear to have any benefit.
575 U32 const pivot = arr[high].count;
578 for ( ; j < high; j++) {
579 if (arr[j].count > pivot) {
581 HUF_swapNodes(&arr[i], &arr[j]);
584 HUF_swapNodes(&arr[i + 1], &arr[high]);
588 /* Classic quicksort by descending with partially iterative calls
589 * to reduce worst case callstack size.
591 static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) {
592 int const kInsertionSortThreshold = 8;
593 if (high - low < kInsertionSortThreshold) {
594 HUF_insertionSort(arr, low, high);
598 int const idx = HUF_quickSortPartition(arr, low, high);
599 if (idx - low < high - idx) {
600 HUF_simpleQuickSort(arr, low, idx - 1);
603 HUF_simpleQuickSort(arr, idx + 1, high);
611 * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
612 * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket.
614 * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
615 * Must have (maxSymbolValue + 1) entries.
616 * @param[in] count Histogram of the symbols.
617 * @param[in] maxSymbolValue Maximum symbol value.
618 * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
620 static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) {
622 U32 const maxSymbolValue1 = maxSymbolValue+1;
624 /* Compute base and set curr to base.
625 * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1.
626 * See HUF_getIndex to see bucketing strategy.
627 * We attribute each symbol to lowerRank's base value, because we want to know where
628 * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
630 ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
631 for (n = 0; n < maxSymbolValue1; ++n) {
632 U32 lowerRank = HUF_getIndex(count[n]);
633 assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1);
634 rankPosition[lowerRank].base++;
637 assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
638 /* Set up the rankPosition table */
639 for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
640 rankPosition[n-1].base += rankPosition[n].base;
641 rankPosition[n-1].curr = rankPosition[n-1].base;
644 /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */
645 for (n = 0; n < maxSymbolValue1; ++n) {
646 U32 const c = count[n];
647 U32 const r = HUF_getIndex(c) + 1;
648 U32 const pos = rankPosition[r].curr++;
649 assert(pos < maxSymbolValue1);
650 huffNode[pos].count = c;
651 huffNode[pos].byte = (BYTE)n;
654 /* Sort each bucket. */
655 for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) {
656 int const bucketSize = rankPosition[n].curr - rankPosition[n].base;
657 U32 const bucketStartIdx = rankPosition[n].base;
658 if (bucketSize > 1) {
659 assert(bucketStartIdx < maxSymbolValue1);
660 HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1);
664 assert(HUF_isSorted(huffNode, maxSymbolValue1));
668 /** HUF_buildCTable_wksp() :
669 * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
670 * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables).
672 #define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
675 * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
677 * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array.
678 * @param maxSymbolValue The maximum symbol value.
679 * @return The smallest node in the Huffman tree (by count).
681 static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
683 nodeElt* const huffNode0 = huffNode - 1;
686 int nodeNb = STARTNODE;
688 DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)", maxSymbolValue + 1);
689 /* init for parents */
690 nonNullRank = (int)maxSymbolValue;
691 while(huffNode[nonNullRank].count == 0) nonNullRank--;
692 lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
693 huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
694 huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb;
696 for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
697 huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
700 while (nodeNb <= nodeRoot) {
701 int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
702 int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
703 huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
704 huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb;
708 /* distribute weights (unlimited tree height) */
709 huffNode[nodeRoot].nbBits = 0;
710 for (n=nodeRoot-1; n>=STARTNODE; n--)
711 huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
712 for (n=0; n<=nonNullRank; n++)
713 huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
715 DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)", showHNodeBits(huffNode, maxSymbolValue+1));
721 * HUF_buildCTableFromTree():
722 * Build the CTable given the Huffman tree in huffNode.
724 * @param[out] CTable The output Huffman CTable.
725 * @param huffNode The Huffman tree.
726 * @param nonNullRank The last and smallest node in the Huffman tree.
727 * @param maxSymbolValue The maximum symbol value.
728 * @param maxNbBits The exact maximum number of bits used in the Huffman tree.
730 static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
732 HUF_CElt* const ct = CTable + 1;
733 /* fill result into ctable (val, nbBits) */
735 U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
736 U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
737 int const alphabetSize = (int)(maxSymbolValue + 1);
738 for (n=0; n<=nonNullRank; n++)
739 nbPerRank[huffNode[n].nbBits]++;
740 /* determine starting value per rank */
742 for (n=(int)maxNbBits; n>0; n--) {
743 valPerRank[n] = min; /* get starting value within each rank */
747 for (n=0; n<alphabetSize; n++)
748 HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */
749 for (n=0; n<alphabetSize; n++)
750 HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */
752 HUF_writeCTableHeader(CTable, maxNbBits, maxSymbolValue);
756 HUF_buildCTable_wksp(HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
757 void* workSpace, size_t wkspSize)
759 HUF_buildCTable_wksp_tables* const wksp_tables =
760 (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32));
761 nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
762 nodeElt* const huffNode = huffNode0+1;
765 HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables));
767 DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)", maxSymbolValue+1);
770 if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
771 return ERROR(workSpace_tooSmall);
772 if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
773 if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
774 return ERROR(maxSymbolValue_tooLarge);
775 ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
777 /* sort, decreasing order */
778 HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
779 DEBUGLOG(6, "sorted symbols completed (%zu symbols)", showHNodeSymbols(huffNode, maxSymbolValue+1));
782 nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
784 /* determine and enforce maxTableLog */
785 maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
786 if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
788 HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
793 size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
795 HUF_CElt const* ct = CTable + 1;
798 for (s = 0; s <= (int)maxSymbolValue; ++s) {
799 nbBits += HUF_getNbBits(ct[s]) * count[s];
804 int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
805 HUF_CTableHeader header = HUF_readCTableHeader(CTable);
806 HUF_CElt const* ct = CTable + 1;
810 assert(header.tableLog <= HUF_TABLELOG_ABSOLUTEMAX);
812 if (header.maxSymbolValue < maxSymbolValue)
815 for (s = 0; s <= (int)maxSymbolValue; ++s) {
816 bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0);
821 size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
824 * Huffman uses its own BIT_CStream_t implementation.
825 * There are three major differences from BIT_CStream_t:
826 * 1. HUF_addBits() takes a HUF_CElt (size_t) which is
827 * the pair (nbBits, value) in the format:
829 * - Bits [0, 4) = nbBits
830 * - Bits [4, 64 - nbBits) = 0
831 * - Bits [64 - nbBits, 64) = value
832 * 2. The bitContainer is built from the upper bits and
833 * right shifted. E.g. to add a new value of N bits
834 * you right shift the bitContainer by N, then or in
835 * the new value into the N upper bits.
836 * 3. The bitstream has two bit containers. You can add
837 * bits to the second container and merge them into
838 * the first container.
841 #define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8)
844 size_t bitContainer[2];
852 /**! HUF_initCStream():
853 * Initializes the bitstream.
854 * @returns 0 or an error code.
856 static size_t HUF_initCStream(HUF_CStream_t* bitC,
857 void* startPtr, size_t dstCapacity)
859 ZSTD_memset(bitC, 0, sizeof(*bitC));
860 bitC->startPtr = (BYTE*)startPtr;
861 bitC->ptr = bitC->startPtr;
862 bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]);
863 if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall);
868 * Adds the symbol stored in HUF_CElt elt to the bitstream.
870 * @param elt The element we're adding. This is a (nbBits, value) pair.
871 * See the HUF_CStream_t docs for the format.
872 * @param idx Insert into the bitstream at this idx.
873 * @param kFast This is a template parameter. If the bitstream is guaranteed
874 * to have at least 4 unused bits after this call it may be 1,
875 * otherwise it must be 0. HUF_addBits() is faster when fast is set.
877 FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast)
880 assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX);
881 /* This is efficient on x86-64 with BMI2 because shrx
882 * only reads the low 6 bits of the register. The compiler
883 * knows this and elides the mask. When fast is set,
884 * every operation can use the same value loaded from elt.
886 bitC->bitContainer[idx] >>= HUF_getNbBits(elt);
887 bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt);
888 /* We only read the low 8 bits of bitC->bitPos[idx] so it
889 * doesn't matter that the high bits have noise from the value.
891 bitC->bitPos[idx] += HUF_getNbBitsFast(elt);
892 assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
893 /* The last 4-bits of elt are dirty if fast is set,
894 * so we must not be overwriting bits that have already been
895 * inserted into the bit container.
899 size_t const nbBits = HUF_getNbBits(elt);
900 size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1;
902 /* Middle bits are 0. */
903 assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0);
904 /* We didn't overwrite any bits in the bit container. */
905 assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER);
911 FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC)
913 bitC->bitContainer[1] = 0;
917 /*! HUF_mergeIndex1() :
918 * Merges the bit container @ index 1 into the bit container @ index 0
919 * and zeros the bit container @ index 1.
921 FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC)
923 assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER);
924 bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF);
925 bitC->bitContainer[0] |= bitC->bitContainer[1];
926 bitC->bitPos[0] += bitC->bitPos[1];
927 assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER);
930 /*! HUF_flushBits() :
931 * Flushes the bits in the bit container @ index 0.
933 * @post bitPos will be < 8.
934 * @param kFast If kFast is set then we must know a-priori that
935 * the bit container will not overflow.
937 FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast)
939 /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */
940 size_t const nbBits = bitC->bitPos[0] & 0xFF;
941 size_t const nbBytes = nbBits >> 3;
942 /* The top nbBits bits of bitContainer are the ones we need. */
943 size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits);
944 /* Mask bitPos to account for the bytes we consumed. */
945 bitC->bitPos[0] &= 7;
947 assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8);
948 assert(bitC->ptr <= bitC->endPtr);
949 MEM_writeLEST(bitC->ptr, bitContainer);
950 bitC->ptr += nbBytes;
951 assert(!kFast || bitC->ptr <= bitC->endPtr);
952 if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
953 /* bitContainer doesn't need to be modified because the leftover
954 * bits are already the top bitPos bits. And we don't care about
955 * noise in the lower values.
960 * @returns The Huffman stream end mark: A 1-bit value = 1.
962 static HUF_CElt HUF_endMark(void)
965 HUF_setNbBits(&endMark, 1);
966 HUF_setValue(&endMark, 1);
970 /*! HUF_closeCStream() :
971 * @return Size of CStream, in bytes,
972 * or 0 if it could not fit into dstBuffer */
973 static size_t HUF_closeCStream(HUF_CStream_t* bitC)
975 HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0);
976 HUF_flushBits(bitC, /* kFast */ 0);
978 size_t const nbBits = bitC->bitPos[0] & 0xFF;
979 if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
980 return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0);
984 FORCE_INLINE_TEMPLATE void
985 HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast)
987 HUF_addBits(bitCPtr, CTable[symbol], idx, fast);
990 FORCE_INLINE_TEMPLATE void
991 HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC,
992 const BYTE* ip, size_t srcSize,
994 int kUnroll, int kFastFlush, int kLastFast)
996 /* Join to kUnroll */
997 int n = (int)srcSize;
998 int rem = n % kUnroll;
1000 for (; rem > 0; --rem) {
1001 HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0);
1003 HUF_flushBits(bitC, kFastFlush);
1005 assert(n % kUnroll == 0);
1007 /* Join to 2 * kUnroll */
1008 if (n % (2 * kUnroll)) {
1010 for (u = 1; u < kUnroll; ++u) {
1011 HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1);
1013 HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast);
1014 HUF_flushBits(bitC, kFastFlush);
1017 assert(n % (2 * kUnroll) == 0);
1019 for (; n>0; n-= 2 * kUnroll) {
1020 /* Encode kUnroll symbols into the bitstream @ index 0. */
1022 for (u = 1; u < kUnroll; ++u) {
1023 HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1);
1025 HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast);
1026 HUF_flushBits(bitC, kFastFlush);
1027 /* Encode kUnroll symbols into the bitstream @ index 1.
1028 * This allows us to start filling the bit container
1029 * without any data dependencies.
1031 HUF_zeroIndex1(bitC);
1032 for (u = 1; u < kUnroll; ++u) {
1033 HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1);
1035 HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast);
1036 /* Merge bitstream @ index 1 into the bitstream @ index 0 */
1037 HUF_mergeIndex1(bitC);
1038 HUF_flushBits(bitC, kFastFlush);
1045 * Returns a tight upper bound on the output space needed by Huffman
1046 * with 8 bytes buffer to handle over-writes. If the output is at least
1047 * this large we don't need to do bounds checks during Huffman encoding.
1049 static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog)
1051 return ((srcSize * tableLog) >> 3) + 8;
1055 FORCE_INLINE_TEMPLATE size_t
1056 HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize,
1057 const void* src, size_t srcSize,
1058 const HUF_CElt* CTable)
1060 U32 const tableLog = HUF_readCTableHeader(CTable).tableLog;
1061 HUF_CElt const* ct = CTable + 1;
1062 const BYTE* ip = (const BYTE*) src;
1063 BYTE* const ostart = (BYTE*)dst;
1064 BYTE* const oend = ostart + dstSize;
1068 if (dstSize < 8) return 0; /* not enough space to compress */
1069 { BYTE* op = ostart;
1070 size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op));
1071 if (HUF_isError(initErr)) return 0; }
1073 if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11)
1074 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0);
1079 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0);
1081 case 10: ZSTD_FALLTHROUGH;
1082 case 9: ZSTD_FALLTHROUGH;
1084 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1);
1086 case 7: ZSTD_FALLTHROUGH;
1088 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1);
1094 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0);
1097 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1);
1100 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0);
1103 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0);
1106 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0);
1108 case 6: ZSTD_FALLTHROUGH;
1110 HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1);
1115 assert(bitC.ptr <= bitC.endPtr);
1117 return HUF_closeCStream(&bitC);
1122 static BMI2_TARGET_ATTRIBUTE size_t
1123 HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize,
1124 const void* src, size_t srcSize,
1125 const HUF_CElt* CTable)
1127 return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
1131 HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize,
1132 const void* src, size_t srcSize,
1133 const HUF_CElt* CTable)
1135 return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
1139 HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
1140 const void* src, size_t srcSize,
1141 const HUF_CElt* CTable, const int flags)
1143 if (flags & HUF_flags_bmi2) {
1144 return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable);
1146 return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable);
1152 HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize,
1153 const void* src, size_t srcSize,
1154 const HUF_CElt* CTable, const int flags)
1157 return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable);
1162 size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
1164 return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
1168 HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize,
1169 const void* src, size_t srcSize,
1170 const HUF_CElt* CTable, int flags)
1172 size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
1173 const BYTE* ip = (const BYTE*) src;
1174 const BYTE* const iend = ip + srcSize;
1175 BYTE* const ostart = (BYTE*) dst;
1176 BYTE* const oend = ostart + dstSize;
1179 if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
1180 if (srcSize < 12) return 0; /* no saving possible : too small input */
1181 op += 6; /* jumpTable */
1184 { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
1185 if (cSize == 0 || cSize > 65535) return 0;
1186 MEM_writeLE16(ostart, (U16)cSize);
1192 { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
1193 if (cSize == 0 || cSize > 65535) return 0;
1194 MEM_writeLE16(ostart+2, (U16)cSize);
1200 { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) );
1201 if (cSize == 0 || cSize > 65535) return 0;
1202 MEM_writeLE16(ostart+4, (U16)cSize);
1209 { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) );
1210 if (cSize == 0 || cSize > 65535) return 0;
1214 return (size_t)(op-ostart);
1217 size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags)
1219 return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags);
1222 typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
1224 static size_t HUF_compressCTable_internal(
1225 BYTE* const ostart, BYTE* op, BYTE* const oend,
1226 const void* src, size_t srcSize,
1227 HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags)
1229 size_t const cSize = (nbStreams==HUF_singleStream) ?
1230 HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) :
1231 HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags);
1232 if (HUF_isError(cSize)) { return cSize; }
1233 if (cSize==0) { return 0; } /* uncompressible */
1235 /* check compressibility */
1236 assert(op >= ostart);
1237 if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
1238 return (size_t)(op-ostart);
1242 unsigned count[HUF_SYMBOLVALUE_MAX + 1];
1243 HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)];
1245 HUF_buildCTable_wksp_tables buildCTable_wksp;
1246 HUF_WriteCTableWksp writeCTable_wksp;
1247 U32 hist_wksp[HIST_WKSP_SIZE_U32];
1249 } HUF_compress_tables_t;
1251 #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096
1252 #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */
1254 unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue)
1256 unsigned cardinality = 0;
1259 for (i = 0; i < maxSymbolValue + 1; i++) {
1260 if (count[i] != 0) cardinality += 1;
1266 unsigned HUF_minTableLog(unsigned symbolCardinality)
1268 U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1;
1269 return minBitsSymbols;
1272 unsigned HUF_optimalTableLog(
1273 unsigned maxTableLog,
1275 unsigned maxSymbolValue,
1276 void* workSpace, size_t wkspSize,
1278 const unsigned* count,
1281 assert(srcSize > 1); /* Not supported, RLE should be used instead */
1282 assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables));
1284 if (!(flags & HUF_flags_optimalDepth)) {
1285 /* cheap evaluation, based on FSE */
1286 return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
1289 { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp);
1290 size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp);
1291 size_t hSize, newSize;
1292 const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue);
1293 const unsigned minTableLog = HUF_minTableLog(symbolCardinality);
1294 size_t optSize = ((size_t) ~0) - 1;
1295 unsigned optLog = maxTableLog, optLogGuess;
1297 DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)", srcSize);
1299 /* Search until size increases */
1300 for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) {
1301 DEBUGLOG(7, "checking for huffLog=%u", optLogGuess);
1303 { size_t maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize);
1304 if (ERR_isError(maxBits)) continue;
1306 if (maxBits < optLogGuess && optLogGuess > minTableLog) break;
1308 hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize);
1311 if (ERR_isError(hSize)) continue;
1313 newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize;
1315 if (newSize > optSize + 1) {
1319 if (newSize < optSize) {
1321 optLog = optLogGuess;
1324 assert(optLog <= HUF_TABLELOG_MAX);
1329 /* HUF_compress_internal() :
1330 * `workSpace_align4` must be aligned on 4-bytes boundaries,
1331 * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */
1333 HUF_compress_internal (void* dst, size_t dstSize,
1334 const void* src, size_t srcSize,
1335 unsigned maxSymbolValue, unsigned huffLog,
1336 HUF_nbStreams_e nbStreams,
1337 void* workSpace, size_t wkspSize,
1338 HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags)
1340 HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t));
1341 BYTE* const ostart = (BYTE*)dst;
1342 BYTE* const oend = ostart + dstSize;
1345 DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)", srcSize);
1346 HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE);
1348 /* checks & inits */
1349 if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
1350 if (!srcSize) return 0; /* Uncompressed */
1351 if (!dstSize) return 0; /* cannot fit anything within dst budget */
1352 if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
1353 if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
1354 if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
1355 if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
1356 if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
1358 /* Heuristic : If old table is valid, use it for small inputs */
1359 if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) {
1360 return HUF_compressCTable_internal(ostart, op, oend,
1362 nbStreams, oldHufTable, flags);
1365 /* If uncompressible data is suspected, do a smaller sampling first */
1366 DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2);
1367 if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) {
1368 size_t largestTotal = 0;
1369 DEBUGLOG(5, "input suspected incompressible : sampling to check");
1370 { unsigned maxSymbolValueBegin = maxSymbolValue;
1371 CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
1372 largestTotal += largestBegin;
1374 { unsigned maxSymbolValueEnd = maxSymbolValue;
1375 CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) );
1376 largestTotal += largestEnd;
1378 if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */
1381 /* Scan input and build symbol stats */
1382 { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) );
1383 if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
1384 if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
1386 DEBUGLOG(6, "histogram detail completed (%zu symbols)", showU32(table->count, maxSymbolValue+1));
1388 /* Check validity of previous table */
1390 && *repeat == HUF_repeat_check
1391 && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) {
1392 *repeat = HUF_repeat_none;
1394 /* Heuristic : use existing table for small inputs */
1395 if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) {
1396 return HUF_compressCTable_internal(ostart, op, oend,
1398 nbStreams, oldHufTable, flags);
1401 /* Build Huffman Tree */
1402 huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags);
1403 { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
1404 maxSymbolValue, huffLog,
1405 &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp));
1407 huffLog = (U32)maxBits;
1408 DEBUGLOG(6, "bit distribution completed (%zu symbols)", showCTableBits(table->CTable + 1, maxSymbolValue+1));
1411 /* Write table description header */
1412 { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog,
1413 &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) );
1414 /* Check if using previous huffman table is beneficial */
1415 if (repeat && *repeat != HUF_repeat_none) {
1416 size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue);
1417 size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue);
1418 if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
1419 return HUF_compressCTable_internal(ostart, op, oend,
1421 nbStreams, oldHufTable, flags);
1424 /* Use the new huffman table */
1425 if (hSize + 12ul >= srcSize) { return 0; }
1427 if (repeat) { *repeat = HUF_repeat_none; }
1429 ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
1431 return HUF_compressCTable_internal(ostart, op, oend,
1433 nbStreams, table->CTable, flags);
1436 size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
1437 const void* src, size_t srcSize,
1438 unsigned maxSymbolValue, unsigned huffLog,
1439 void* workSpace, size_t wkspSize,
1440 HUF_CElt* hufTable, HUF_repeat* repeat, int flags)
1442 DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)", srcSize);
1443 return HUF_compress_internal(dst, dstSize, src, srcSize,
1444 maxSymbolValue, huffLog, HUF_singleStream,
1445 workSpace, wkspSize, hufTable,
1449 /* HUF_compress4X_repeat():
1450 * compress input using 4 streams.
1451 * consider skipping quickly
1452 * reuse an existing huffman compression table */
1453 size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
1454 const void* src, size_t srcSize,
1455 unsigned maxSymbolValue, unsigned huffLog,
1456 void* workSpace, size_t wkspSize,
1457 HUF_CElt* hufTable, HUF_repeat* repeat, int flags)
1459 DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)", srcSize);
1460 return HUF_compress_internal(dst, dstSize, src, srcSize,
1461 maxSymbolValue, huffLog, HUF_fourStreams,
1462 workSpace, wkspSize,
1463 hufTable, repeat, flags);