1 /* LzFind.c -- Match finder for LZ algorithms
\r
2 2021-11-29 : Igor Pavlov : Public domain */
\r
7 // #include <stdio.h>
\r
13 #define kBlockMoveAlign (1 << 7) // alignment for memmove()
\r
14 #define kBlockSizeAlign (1 << 16) // alignment for block allocation
\r
15 #define kBlockSizeReserveMin (1 << 24) // it's 1/256 from 4 GB dictinary
\r
17 #define kEmptyHashValue 0
\r
19 #define kMaxValForNormalize ((UInt32)0)
\r
20 // #define kMaxValForNormalize ((UInt32)(1 << 20) + 0xFFF) // for debug
\r
22 // #define kNormalizeAlign (1 << 7) // alignment for speculated accesses
\r
24 #define GET_AVAIL_BYTES(p) \
\r
25 Inline_MatchFinder_GetNumAvailableBytes(p)
\r
28 // #define kFix5HashSize (kHash2Size + kHash3Size + kHash4Size)
\r
29 #define kFix5HashSize kFix4HashSize
\r
33 if (hv) match, then cur[0] and cur[1] also match
\r
35 #define HASH2_CALC hv = GetUi16(cur);
\r
37 // (crc[0 ... 255] & 0xFF) provides one-to-one correspondence to [0 ... 255]
\r
41 if (cur[0]) and (h2) match, then cur[1] also match
\r
42 if (cur[0]) and (hv) match, then cur[1] and cur[2] also match
\r
44 #define HASH3_CALC { \
\r
45 UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
\r
46 h2 = temp & (kHash2Size - 1); \
\r
47 hv = (temp ^ ((UInt32)cur[2] << 8)) & p->hashMask; }
\r
49 #define HASH4_CALC { \
\r
50 UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
\r
51 h2 = temp & (kHash2Size - 1); \
\r
52 temp ^= ((UInt32)cur[2] << 8); \
\r
53 h3 = temp & (kHash3Size - 1); \
\r
54 hv = (temp ^ (p->crc[cur[3]] << kLzHash_CrcShift_1)) & p->hashMask; }
\r
56 #define HASH5_CALC { \
\r
57 UInt32 temp = p->crc[cur[0]] ^ cur[1]; \
\r
58 h2 = temp & (kHash2Size - 1); \
\r
59 temp ^= ((UInt32)cur[2] << 8); \
\r
60 h3 = temp & (kHash3Size - 1); \
\r
61 temp ^= (p->crc[cur[3]] << kLzHash_CrcShift_1); \
\r
62 /* h4 = temp & p->hash4Mask; */ /* (kHash4Size - 1); */ \
\r
63 hv = (temp ^ (p->crc[cur[4]] << kLzHash_CrcShift_2)) & p->hashMask; }
\r
65 #define HASH_ZIP_CALC hv = ((cur[2] | ((UInt32)cur[0] << 8)) ^ p->crc[cur[1]]) & 0xFFFF;
\r
68 static void LzInWindow_Free(CMatchFinder *p, ISzAllocPtr alloc)
\r
70 if (!p->directInput)
\r
72 ISzAlloc_Free(alloc, p->bufferBase);
\r
73 p->bufferBase = NULL;
\r
78 static int LzInWindow_Create2(CMatchFinder *p, UInt32 blockSize, ISzAllocPtr alloc)
\r
82 if (!p->bufferBase || p->blockSize != blockSize)
\r
84 // size_t blockSizeT;
\r
85 LzInWindow_Free(p, alloc);
\r
86 p->blockSize = blockSize;
\r
87 // blockSizeT = blockSize;
\r
89 // printf("\nblockSize = 0x%x\n", blockSize);
\r
92 // we can allocate 4GiB, but still use UInt32 for (p->blockSize)
\r
93 // we use UInt32 type for (p->blockSize), because
\r
94 // we don't want to wrap over 4 GiB,
\r
95 // when we use (p->streamPos - p->pos) that is UInt32.
\r
96 if (blockSize >= (UInt32)0 - (UInt32)kBlockSizeAlign)
\r
98 blockSizeT = ((size_t)1 << 32);
\r
99 printf("\nchanged to blockSizeT = 4GiB\n");
\r
104 p->bufferBase = (Byte *)ISzAlloc_Alloc(alloc, blockSize);
\r
105 // printf("\nbufferBase = %p\n", p->bufferBase);
\r
106 // return 0; // for debug
\r
108 return (p->bufferBase != NULL);
\r
111 static const Byte *MatchFinder_GetPointerToCurrentPos(CMatchFinder *p) { return p->buffer; }
\r
113 static UInt32 MatchFinder_GetNumAvailableBytes(CMatchFinder *p) { return GET_AVAIL_BYTES(p); }
\r
117 static void MatchFinder_ReadBlock(CMatchFinder *p)
\r
119 if (p->streamEndWasReached || p->result != SZ_OK)
\r
122 /* We use (p->streamPos - p->pos) value.
\r
123 (p->streamPos < p->pos) is allowed. */
\r
125 if (p->directInput)
\r
127 UInt32 curSize = 0xFFFFFFFF - GET_AVAIL_BYTES(p);
\r
128 if (curSize > p->directInputRem)
\r
129 curSize = (UInt32)p->directInputRem;
\r
130 p->directInputRem -= curSize;
\r
131 p->streamPos += curSize;
\r
132 if (p->directInputRem == 0)
\r
133 p->streamEndWasReached = 1;
\r
139 Byte *dest = p->buffer + GET_AVAIL_BYTES(p);
\r
140 size_t size = (size_t)(p->bufferBase + p->blockSize - dest);
\r
143 /* we call ReadBlock() after NeedMove() and MoveBlock().
\r
144 NeedMove() and MoveBlock() povide more than (keepSizeAfter)
\r
145 to the end of (blockSize).
\r
146 So we don't execute this branch in normal code flow.
\r
147 We can go here, if we will call ReadBlock() before NeedMove(), MoveBlock().
\r
149 // p->result = SZ_ERROR_FAIL; // we can show error here
\r
154 // if (size > kRead) size = kRead; // for debug
\r
156 p->result = ISeqInStream_Read(p->stream, dest, &size);
\r
157 if (p->result != SZ_OK)
\r
161 p->streamEndWasReached = 1;
\r
164 p->streamPos += (UInt32)size;
\r
165 if (GET_AVAIL_BYTES(p) > p->keepSizeAfter)
\r
167 /* here and in another (p->keepSizeAfter) checks we keep on 1 byte more than was requested by Create() function
\r
168 (GET_AVAIL_BYTES(p) >= p->keepSizeAfter) - minimal required size */
\r
171 // on exit: (p->result != SZ_OK || p->streamEndWasReached || GET_AVAIL_BYTES(p) > p->keepSizeAfter)
\r
177 void MatchFinder_MoveBlock(CMatchFinder *p)
\r
179 const size_t offset = (size_t)(p->buffer - p->bufferBase) - p->keepSizeBefore;
\r
180 const size_t keepBefore = (offset & (kBlockMoveAlign - 1)) + p->keepSizeBefore;
\r
181 p->buffer = p->bufferBase + keepBefore;
\r
182 memmove(p->bufferBase,
\r
183 p->bufferBase + (offset & ~((size_t)kBlockMoveAlign - 1)),
\r
184 keepBefore + (size_t)GET_AVAIL_BYTES(p));
\r
187 /* We call MoveBlock() before ReadBlock().
\r
188 So MoveBlock() can be wasteful operation, if the whole input data
\r
189 can fit in current block even without calling MoveBlock().
\r
190 in important case where (dataSize <= historySize)
\r
191 condition (p->blockSize > dataSize + p->keepSizeAfter) is met
\r
192 So there is no MoveBlock() in that case case.
\r
195 int MatchFinder_NeedMove(CMatchFinder *p)
\r
197 if (p->directInput)
\r
199 if (p->streamEndWasReached || p->result != SZ_OK)
\r
201 return ((size_t)(p->bufferBase + p->blockSize - p->buffer) <= p->keepSizeAfter);
\r
204 void MatchFinder_ReadIfRequired(CMatchFinder *p)
\r
206 if (p->keepSizeAfter >= GET_AVAIL_BYTES(p))
\r
207 MatchFinder_ReadBlock(p);
\r
212 static void MatchFinder_SetDefaultSettings(CMatchFinder *p)
\r
216 p->numHashBytes = 4;
\r
220 #define kCrcPoly 0xEDB88320
\r
222 void MatchFinder_Construct(CMatchFinder *p)
\r
225 p->bufferBase = NULL;
\r
226 p->directInput = 0;
\r
228 p->expectedDataSize = (UInt64)(Int64)-1;
\r
229 MatchFinder_SetDefaultSettings(p);
\r
231 for (i = 0; i < 256; i++)
\r
233 UInt32 r = (UInt32)i;
\r
235 for (j = 0; j < 8; j++)
\r
236 r = (r >> 1) ^ (kCrcPoly & ((UInt32)0 - (r & 1)));
\r
241 static void MatchFinder_FreeThisClassMemory(CMatchFinder *p, ISzAllocPtr alloc)
\r
243 ISzAlloc_Free(alloc, p->hash);
\r
247 void MatchFinder_Free(CMatchFinder *p, ISzAllocPtr alloc)
\r
249 MatchFinder_FreeThisClassMemory(p, alloc);
\r
250 LzInWindow_Free(p, alloc);
\r
253 static CLzRef* AllocRefs(size_t num, ISzAllocPtr alloc)
\r
255 size_t sizeInBytes = (size_t)num * sizeof(CLzRef);
\r
256 if (sizeInBytes / sizeof(CLzRef) != num)
\r
258 return (CLzRef *)ISzAlloc_Alloc(alloc, sizeInBytes);
\r
261 #if (kBlockSizeReserveMin < kBlockSizeAlign * 2)
\r
262 #error Stop_Compiling_Bad_Reserve
\r
267 static UInt32 GetBlockSize(CMatchFinder *p, UInt32 historySize)
\r
269 UInt32 blockSize = (p->keepSizeBefore + p->keepSizeAfter);
\r
271 if (historySize > kMaxHistorySize)
\r
274 // printf("\nhistorySize == 0x%x\n", historySize);
\r
276 if (p->keepSizeBefore < historySize || blockSize < p->keepSizeBefore) // if 32-bit overflow
\r
280 const UInt32 kBlockSizeMax = (UInt32)0 - (UInt32)kBlockSizeAlign;
\r
281 const UInt32 rem = kBlockSizeMax - blockSize;
\r
282 const UInt32 reserve = (blockSize >> (blockSize < ((UInt32)1 << 30) ? 1 : 2))
\r
283 + (1 << 12) + kBlockMoveAlign + kBlockSizeAlign; // do not overflow 32-bit here
\r
284 if (blockSize >= kBlockSizeMax
\r
285 || rem < kBlockSizeReserveMin) // we reject settings that will be slow
\r
287 if (reserve >= rem)
\r
288 blockSize = kBlockSizeMax;
\r
291 blockSize += reserve;
\r
292 blockSize &= ~(UInt32)(kBlockSizeAlign - 1);
\r
295 // printf("\n LzFind_blockSize = %x\n", blockSize);
\r
296 // printf("\n LzFind_blockSize = %d\n", blockSize >> 20);
\r
301 int MatchFinder_Create(CMatchFinder *p, UInt32 historySize,
\r
302 UInt32 keepAddBufferBefore, UInt32 matchMaxLen, UInt32 keepAddBufferAfter,
\r
305 /* we need one additional byte in (p->keepSizeBefore),
\r
306 since we use MoveBlock() after (p->pos++) and before dictionary using */
\r
307 // keepAddBufferBefore = (UInt32)0xFFFFFFFF - (1 << 22); // for debug
\r
308 p->keepSizeBefore = historySize + keepAddBufferBefore + 1;
\r
310 keepAddBufferAfter += matchMaxLen;
\r
311 /* we need (p->keepSizeAfter >= p->numHashBytes) */
\r
312 if (keepAddBufferAfter < p->numHashBytes)
\r
313 keepAddBufferAfter = p->numHashBytes;
\r
314 // keepAddBufferAfter -= 2; // for debug
\r
315 p->keepSizeAfter = keepAddBufferAfter;
\r
317 if (p->directInput)
\r
319 if (p->directInput || LzInWindow_Create2(p, GetBlockSize(p, historySize), alloc))
\r
321 const UInt32 newCyclicBufferSize = historySize + 1; // do not change it
\r
323 p->matchMaxLen = matchMaxLen;
\r
326 p->fixedHashSize = 0;
\r
327 hs = (1 << 16) - 1;
\r
328 if (p->numHashBytes != 2)
\r
331 if (hs > p->expectedDataSize)
\r
332 hs = (UInt32)p->expectedDataSize;
\r
339 // we propagated 16 bits in (hs). Low 16 bits must be set later
\r
341 if (hs >= (1 << 24))
\r
343 if (p->numHashBytes == 3)
\r
344 hs = (1 << 24) - 1;
\r
347 /* if (bigHash) mode, GetHeads4b() in LzFindMt.c needs (hs >= ((1 << 24) - 1))) */
\r
350 // hs = ((UInt32)1 << 25) - 1; // for test
\r
352 // (hash_size >= (1 << 16)) : Required for (numHashBytes > 2)
\r
353 hs |= (1 << 16) - 1; /* don't change it! */
\r
355 // bt5: we adjust the size with recommended minimum size
\r
356 if (p->numHashBytes >= 5)
\r
357 hs |= (256 << kLzHash_CrcShift_2) - 1;
\r
366 // hs4 = (1 << 16); // for test
\r
367 p->hash4Mask = hs4 - 1;
\r
370 if (p->numHashBytes > 2) p->fixedHashSize += kHash2Size;
\r
371 if (p->numHashBytes > 3) p->fixedHashSize += kHash3Size;
\r
372 // if (p->numHashBytes > 4) p->fixedHashSize += hs4; // kHash4Size;
\r
373 hs += p->fixedHashSize;
\r
379 p->historySize = historySize;
\r
380 p->hashSizeSum = hs;
\r
381 p->cyclicBufferSize = newCyclicBufferSize; // it must be = (historySize + 1)
\r
383 numSons = newCyclicBufferSize;
\r
386 newSize = hs + numSons;
\r
388 // aligned size is not required here, but it can be better for some loops
\r
389 #define NUM_REFS_ALIGN_MASK 0xF
\r
390 newSize = (newSize + NUM_REFS_ALIGN_MASK) & ~(size_t)NUM_REFS_ALIGN_MASK;
\r
392 if (p->hash && p->numRefs == newSize)
\r
395 MatchFinder_FreeThisClassMemory(p, alloc);
\r
396 p->numRefs = newSize;
\r
397 p->hash = AllocRefs(newSize, alloc);
\r
401 p->son = p->hash + p->hashSizeSum;
\r
407 MatchFinder_Free(p, alloc);
\r
412 static void MatchFinder_SetLimits(CMatchFinder *p)
\r
415 UInt32 n = kMaxValForNormalize - p->pos;
\r
417 n = (UInt32)(Int32)-1; // we allow (pos == 0) at start even with (kMaxValForNormalize == 0)
\r
419 k = p->cyclicBufferSize - p->cyclicBufferPos;
\r
423 k = GET_AVAIL_BYTES(p);
\r
425 const UInt32 ksa = p->keepSizeAfter;
\r
426 UInt32 mm = p->matchMaxLen;
\r
428 k -= ksa; // we must limit exactly to keepSizeAfter for ReadBlock
\r
431 // the limitation for (p->lenLimit) update
\r
432 k -= mm; // optimization : to reduce the number of checks
\r
434 // k = 1; // non-optimized version : for debug
\r
447 p->posLimit = p->pos + n;
\r
451 void MatchFinder_Init_LowHash(CMatchFinder *p)
\r
454 CLzRef *items = p->hash;
\r
455 const size_t numItems = p->fixedHashSize;
\r
456 for (i = 0; i < numItems; i++)
\r
457 items[i] = kEmptyHashValue;
\r
461 void MatchFinder_Init_HighHash(CMatchFinder *p)
\r
464 CLzRef *items = p->hash + p->fixedHashSize;
\r
465 const size_t numItems = (size_t)p->hashMask + 1;
\r
466 for (i = 0; i < numItems; i++)
\r
467 items[i] = kEmptyHashValue;
\r
471 void MatchFinder_Init_4(CMatchFinder *p)
\r
473 p->buffer = p->bufferBase;
\r
475 /* kEmptyHashValue = 0 (Zero) is used in hash tables as NO-VALUE marker.
\r
476 the code in CMatchFinderMt expects (pos = 1) */
\r
479 1; // it's smallest optimal value. do not change it
\r
483 p->streamEndWasReached = 0;
\r
487 // (CYC_TO_POS_OFFSET == 0) is expected by some optimized code
\r
488 #define CYC_TO_POS_OFFSET 0
\r
489 // #define CYC_TO_POS_OFFSET 1 // for debug
\r
491 void MatchFinder_Init(CMatchFinder *p)
\r
493 MatchFinder_Init_HighHash(p);
\r
494 MatchFinder_Init_LowHash(p);
\r
495 MatchFinder_Init_4(p);
\r
497 MatchFinder_ReadBlock(p);
\r
499 /* if we init (cyclicBufferPos = pos), then we can use one variable
\r
500 instead of both (cyclicBufferPos) and (pos) : only before (cyclicBufferPos) wrapping */
\r
501 p->cyclicBufferPos = (p->pos - CYC_TO_POS_OFFSET); // init with relation to (pos)
\r
502 // p->cyclicBufferPos = 0; // smallest value
\r
503 // p->son[0] = p->son[1] = 0; // unused: we can init skipped record for speculated accesses.
\r
504 MatchFinder_SetLimits(p);
\r
509 #ifdef MY_CPU_X86_OR_AMD64
\r
510 #if defined(__clang__) && (__clang_major__ >= 8) \
\r
511 || defined(__GNUC__) && (__GNUC__ >= 8) \
\r
512 || defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1900)
\r
513 #define USE_SATUR_SUB_128
\r
515 #define ATTRIB_SSE41 __attribute__((__target__("sse4.1")))
\r
516 #define ATTRIB_AVX2 __attribute__((__target__("avx2")))
\r
517 #elif defined(_MSC_VER)
\r
518 #if (_MSC_VER >= 1600)
\r
519 #define USE_SATUR_SUB_128
\r
520 #if (_MSC_VER >= 1900)
\r
522 #include <immintrin.h> // avx
\r
527 // #elif defined(MY_CPU_ARM_OR_ARM64)
\r
528 #elif defined(MY_CPU_ARM64)
\r
530 #if defined(__clang__) && (__clang_major__ >= 8) \
\r
531 || defined(__GNUC__) && (__GNUC__ >= 8)
\r
532 #define USE_SATUR_SUB_128
\r
533 #ifdef MY_CPU_ARM64
\r
534 // #define ATTRIB_SSE41 __attribute__((__target__("")))
\r
536 // #define ATTRIB_SSE41 __attribute__((__target__("fpu=crypto-neon-fp-armv8")))
\r
539 #elif defined(_MSC_VER)
\r
540 #if (_MSC_VER >= 1910)
\r
541 #define USE_SATUR_SUB_128
\r
545 #if defined(_MSC_VER) && defined(MY_CPU_ARM64)
\r
546 #include <arm64_neon.h>
\r
548 #include <arm_neon.h>
\r
554 #ifndef ATTRIB_SSE41
\r
555 #define ATTRIB_SSE41
\r
557 #ifndef ATTRIB_AVX2
\r
558 #define ATTRIB_AVX2
\r
562 #ifdef USE_SATUR_SUB_128
\r
564 // #define _SHOW_HW_STATUS
\r
566 #ifdef _SHOW_HW_STATUS
\r
574 #ifdef MY_CPU_ARM_OR_ARM64
\r
576 #ifdef MY_CPU_ARM64
\r
577 // #define FORCE_SATUR_SUB_128
\r
580 typedef uint32x4_t v128;
\r
581 #define SASUB_128(i) \
\r
582 *(v128 *)(void *)(items + (i) * 4) = \
\r
583 vsubq_u32(vmaxq_u32(*(const v128 *)(const void *)(items + (i) * 4), sub2), sub2);
\r
587 #include <smmintrin.h> // sse4.1
\r
589 typedef __m128i v128;
\r
590 #define SASUB_128(i) \
\r
591 *(v128 *)(void *)(items + (i) * 4) = \
\r
592 _mm_sub_epi32(_mm_max_epu32(*(const v128 *)(const void *)(items + (i) * 4), sub2), sub2); // SSE 4.1
\r
600 #ifdef ATTRIB_SSE41
\r
605 LzFind_SaturSub_128(UInt32 subValue, CLzRef *items, const CLzRef *lim)
\r
608 #ifdef MY_CPU_ARM_OR_ARM64
\r
609 vdupq_n_u32(subValue);
\r
611 _mm_set_epi32((Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue);
\r
621 while (items != lim);
\r
628 #include <immintrin.h> // avx
\r
630 #define SASUB_256(i) *(__m256i *)(void *)(items + (i) * 8) = _mm256_sub_epi32(_mm256_max_epu32(*(const __m256i *)(const void *)(items + (i) * 8), sub2), sub2); // AVX2
\r
639 LzFind_SaturSub_256(UInt32 subValue, CLzRef *items, const CLzRef *lim)
\r
641 __m256i sub2 = _mm256_set_epi32(
\r
642 (Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue,
\r
643 (Int32)subValue, (Int32)subValue, (Int32)subValue, (Int32)subValue);
\r
650 while (items != lim);
\r
654 #ifndef FORCE_SATUR_SUB_128
\r
655 typedef void (MY_FAST_CALL *LZFIND_SATUR_SUB_CODE_FUNC)(
\r
656 UInt32 subValue, CLzRef *items, const CLzRef *lim);
\r
657 static LZFIND_SATUR_SUB_CODE_FUNC g_LzFind_SaturSub;
\r
658 #endif // FORCE_SATUR_SUB_128
\r
660 #endif // USE_SATUR_SUB_128
\r
663 // kEmptyHashValue must be zero
\r
664 // #define SASUB_32(i) v = items[i]; m = v - subValue; if (v < subValue) m = kEmptyHashValue; items[i] = m;
\r
665 #define SASUB_32(i) v = items[i]; if (v < subValue) v = subValue; items[i] = v - subValue;
\r
667 #ifdef FORCE_SATUR_SUB_128
\r
669 #define DEFAULT_SaturSub LzFind_SaturSub_128
\r
673 #define DEFAULT_SaturSub LzFind_SaturSub_32
\r
679 LzFind_SaturSub_32(UInt32 subValue, CLzRef *items, const CLzRef *lim)
\r
694 while (items != lim);
\r
701 void MatchFinder_Normalize3(UInt32 subValue, CLzRef *items, size_t numItems)
\r
703 #define K_NORM_ALIGN_BLOCK_SIZE (1 << 6)
\r
707 for (; numItems != 0 && ((unsigned)(ptrdiff_t)items & (K_NORM_ALIGN_BLOCK_SIZE - 1)) != 0; numItems--)
\r
715 #define K_NORM_ALIGN_MASK (K_NORM_ALIGN_BLOCK_SIZE / 4 - 1)
\r
716 lim = items + (numItems & ~(size_t)K_NORM_ALIGN_MASK);
\r
717 numItems &= K_NORM_ALIGN_MASK;
\r
720 #if defined(USE_SATUR_SUB_128) && !defined(FORCE_SATUR_SUB_128)
\r
721 if (g_LzFind_SaturSub)
\r
722 g_LzFind_SaturSub(subValue, items, lim);
\r
725 DEFAULT_SaturSub(subValue, items, lim);
\r
731 for (; numItems != 0; numItems--)
\r
741 // call MatchFinder_CheckLimits() only after (p->pos++) update
\r
744 static void MatchFinder_CheckLimits(CMatchFinder *p)
\r
746 if (// !p->streamEndWasReached && p->result == SZ_OK &&
\r
747 p->keepSizeAfter == GET_AVAIL_BYTES(p))
\r
749 // we try to read only in exact state (p->keepSizeAfter == GET_AVAIL_BYTES(p))
\r
750 if (MatchFinder_NeedMove(p))
\r
751 MatchFinder_MoveBlock(p);
\r
752 MatchFinder_ReadBlock(p);
\r
755 if (p->pos == kMaxValForNormalize)
\r
756 if (GET_AVAIL_BYTES(p) >= p->numHashBytes) // optional optimization for last bytes of data.
\r
758 if we disable normalization for last bytes of data, and
\r
759 if (data_size == 4 GiB), we don't call wastfull normalization,
\r
760 but (pos) will be wrapped over Zero (0) in that case.
\r
761 And we cannot resume later to normal operation
\r
764 // MatchFinder_Normalize(p);
\r
765 /* after normalization we need (p->pos >= p->historySize + 1); */
\r
766 /* we can reduce subValue to aligned value, if want to keep alignment
\r
767 of (p->pos) and (p->buffer) for speculated accesses. */
\r
768 const UInt32 subValue = (p->pos - p->historySize - 1) /* & ~(UInt32)(kNormalizeAlign - 1) */;
\r
769 // const UInt32 subValue = (1 << 15); // for debug
\r
770 // printf("\nMatchFinder_Normalize() subValue == 0x%x\n", subValue);
\r
771 size_t numSonRefs = p->cyclicBufferSize;
\r
774 Inline_MatchFinder_ReduceOffsets(p, subValue);
\r
775 MatchFinder_Normalize3(subValue, p->hash, (size_t)p->hashSizeSum + numSonRefs);
\r
778 if (p->cyclicBufferPos == p->cyclicBufferSize)
\r
779 p->cyclicBufferPos = 0;
\r
781 MatchFinder_SetLimits(p);
\r
786 (lenLimit > maxLen)
\r
789 static UInt32 * Hc_GetMatchesSpec(size_t lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
\r
790 size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
\r
791 UInt32 *d, unsigned maxLen)
\r
794 son[_cyclicBufferPos] = curMatch;
\r
797 UInt32 delta = pos - curMatch;
\r
798 if (cutValue-- == 0 || delta >= _cyclicBufferSize)
\r
801 const Byte *pb = cur - delta;
\r
802 curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
\r
803 if (pb[maxLen] == cur[maxLen] && *pb == *cur)
\r
806 while (++len != lenLimit)
\r
807 if (pb[len] != cur[len])
\r
814 if (len == lenLimit)
\r
822 const Byte *lim = cur + lenLimit;
\r
823 son[_cyclicBufferPos] = curMatch;
\r
831 // if (curMatch2 >= curMatch) return NULL;
\r
832 delta = pos - curMatch;
\r
833 if (delta >= _cyclicBufferSize)
\r
837 curMatch = son[_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)];
\r
838 diff = (ptrdiff_t)0 - (ptrdiff_t)delta;
\r
839 if (cur[maxLen] == cur[(ptrdiff_t)maxLen + diff])
\r
841 const Byte *c = cur;
\r
842 while (*c == c[diff])
\r
846 d[0] = (UInt32)(lim - cur);
\r
852 const unsigned len = (unsigned)(c - cur);
\r
856 d[0] = (UInt32)len;
\r
864 while (--cutValue);
\r
871 UInt32 * GetMatchesSpec1(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
\r
872 size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue,
\r
873 UInt32 *d, UInt32 maxLen)
\r
875 CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
\r
876 CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
\r
877 unsigned len0 = 0, len1 = 0;
\r
881 // if (curMatch >= pos) { *ptr0 = *ptr1 = kEmptyHashValue; return NULL; }
\r
883 cmCheck = (UInt32)(pos - _cyclicBufferSize);
\r
884 if ((UInt32)pos <= _cyclicBufferSize)
\r
887 if (cmCheck < curMatch)
\r
890 const UInt32 delta = pos - curMatch;
\r
892 CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
\r
893 const Byte *pb = cur - delta;
\r
894 unsigned len = (len0 < len1 ? len0 : len1);
\r
895 const UInt32 pair0 = pair[0];
\r
896 if (pb[len] == cur[len])
\r
898 if (++len != lenLimit && pb[len] == cur[len])
\r
899 while (++len != lenLimit)
\r
900 if (pb[len] != cur[len])
\r
904 maxLen = (UInt32)len;
\r
905 *d++ = (UInt32)len;
\r
907 if (len == lenLimit)
\r
915 if (pb[len] < cur[len])
\r
918 // const UInt32 curMatch2 = pair[1];
\r
919 // if (curMatch2 >= curMatch) { *ptr0 = *ptr1 = kEmptyHashValue; return NULL; }
\r
920 // curMatch = curMatch2;
\r
921 curMatch = pair[1];
\r
928 curMatch = pair[0];
\r
934 while(--cutValue && cmCheck < curMatch);
\r
936 *ptr0 = *ptr1 = kEmptyHashValue;
\r
941 static void SkipMatchesSpec(UInt32 lenLimit, UInt32 curMatch, UInt32 pos, const Byte *cur, CLzRef *son,
\r
942 size_t _cyclicBufferPos, UInt32 _cyclicBufferSize, UInt32 cutValue)
\r
944 CLzRef *ptr0 = son + ((size_t)_cyclicBufferPos << 1) + 1;
\r
945 CLzRef *ptr1 = son + ((size_t)_cyclicBufferPos << 1);
\r
946 unsigned len0 = 0, len1 = 0;
\r
950 cmCheck = (UInt32)(pos - _cyclicBufferSize);
\r
951 if ((UInt32)pos <= _cyclicBufferSize)
\r
954 if (// curMatch >= pos || // failure
\r
955 cmCheck < curMatch)
\r
958 const UInt32 delta = pos - curMatch;
\r
960 CLzRef *pair = son + ((size_t)(_cyclicBufferPos - delta + ((delta > _cyclicBufferPos) ? _cyclicBufferSize : 0)) << 1);
\r
961 const Byte *pb = cur - delta;
\r
962 unsigned len = (len0 < len1 ? len0 : len1);
\r
963 if (pb[len] == cur[len])
\r
965 while (++len != lenLimit)
\r
966 if (pb[len] != cur[len])
\r
969 if (len == lenLimit)
\r
977 if (pb[len] < cur[len])
\r
980 curMatch = pair[1];
\r
987 curMatch = pair[0];
\r
993 while(--cutValue && cmCheck < curMatch);
\r
995 *ptr0 = *ptr1 = kEmptyHashValue;
\r
1000 #define MOVE_POS \
\r
1001 ++p->cyclicBufferPos; \
\r
1003 { const UInt32 pos1 = p->pos + 1; p->pos = pos1; if (pos1 == p->posLimit) MatchFinder_CheckLimits(p); }
\r
1005 #define MOVE_POS_RET MOVE_POS return distances;
\r
1008 static void MatchFinder_MovePos(CMatchFinder *p)
\r
1010 /* we go here at the end of stream data, when (avail < num_hash_bytes)
\r
1011 We don't update sons[cyclicBufferPos << btMode].
\r
1012 So (sons) record will contain junk. And we cannot resume match searching
\r
1013 to normal operation, even if we will provide more input data in buffer.
\r
1014 p->sons[p->cyclicBufferPos << p->btMode] = 0; // kEmptyHashValue
\r
1016 p->sons[(p->cyclicBufferPos << p->btMode) + 1] = 0; // kEmptyHashValue
\r
1021 #define GET_MATCHES_HEADER2(minLen, ret_op) \
\r
1022 unsigned lenLimit; UInt32 hv; Byte *cur; UInt32 curMatch; \
\r
1023 lenLimit = (unsigned)p->lenLimit; { if (lenLimit < minLen) { MatchFinder_MovePos(p); ret_op; }} \
\r
1026 #define GET_MATCHES_HEADER(minLen) GET_MATCHES_HEADER2(minLen, return distances)
\r
1027 #define SKIP_HEADER(minLen) do { GET_MATCHES_HEADER2(minLen, continue)
\r
1029 #define MF_PARAMS(p) lenLimit, curMatch, p->pos, p->buffer, p->son, p->cyclicBufferPos, p->cyclicBufferSize, p->cutValue
\r
1031 #define SKIP_FOOTER SkipMatchesSpec(MF_PARAMS(p)); MOVE_POS; } while (--num);
\r
1033 #define GET_MATCHES_FOOTER_BASE(_maxLen_, func) \
\r
1034 distances = func(MF_PARAMS(p), \
\r
1035 distances, (UInt32)_maxLen_); MOVE_POS_RET;
\r
1037 #define GET_MATCHES_FOOTER_BT(_maxLen_) \
\r
1038 GET_MATCHES_FOOTER_BASE(_maxLen_, GetMatchesSpec1)
\r
1040 #define GET_MATCHES_FOOTER_HC(_maxLen_) \
\r
1041 GET_MATCHES_FOOTER_BASE(_maxLen_, Hc_GetMatchesSpec)
\r
1045 #define UPDATE_maxLen { \
\r
1046 const ptrdiff_t diff = (ptrdiff_t)0 - (ptrdiff_t)d2; \
\r
1047 const Byte *c = cur + maxLen; \
\r
1048 const Byte *lim = cur + lenLimit; \
\r
1049 for (; c != lim; c++) if (*(c + diff) != *c) break; \
\r
1050 maxLen = (unsigned)(c - cur); }
\r
1052 static UInt32* Bt2_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
\r
1054 GET_MATCHES_HEADER(2)
\r
1056 curMatch = p->hash[hv];
\r
1057 p->hash[hv] = p->pos;
\r
1058 GET_MATCHES_FOOTER_BT(1)
\r
1061 UInt32* Bt3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
\r
1063 GET_MATCHES_HEADER(3)
\r
1065 curMatch = p->hash[hv];
\r
1066 p->hash[hv] = p->pos;
\r
1067 GET_MATCHES_FOOTER_BT(2)
\r
1072 mmm = p->cyclicBufferSize; \
\r
1077 static UInt32* Bt3_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
\r
1080 UInt32 h2, d2, pos;
\r
1083 GET_MATCHES_HEADER(3)
\r
1090 d2 = pos - hash[h2];
\r
1092 curMatch = (hash + kFix3HashSize)[hv];
\r
1095 (hash + kFix3HashSize)[hv] = pos;
\r
1101 if (d2 < mmm && *(cur - d2) == *cur)
\r
1104 distances[0] = (UInt32)maxLen;
\r
1105 distances[1] = d2 - 1;
\r
1107 if (maxLen == lenLimit)
\r
1109 SkipMatchesSpec(MF_PARAMS(p));
\r
1114 GET_MATCHES_FOOTER_BT(maxLen)
\r
1118 static UInt32* Bt4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
\r
1121 UInt32 h2, h3, d2, d3, pos;
\r
1124 GET_MATCHES_HEADER(4)
\r
1131 d2 = pos - hash [h2];
\r
1132 d3 = pos - (hash + kFix3HashSize)[h3];
\r
1133 curMatch = (hash + kFix4HashSize)[hv];
\r
1136 (hash + kFix3HashSize)[h3] = pos;
\r
1137 (hash + kFix4HashSize)[hv] = pos;
\r
1145 if (d2 < mmm && *(cur - d2) == *cur)
\r
1148 distances[1] = d2 - 1;
\r
1150 if (*(cur - d2 + 2) == cur[2])
\r
1152 // distances[-2] = 3;
\r
1154 else if (d3 < mmm && *(cur - d3) == *cur)
\r
1157 distances[1] = d3 - 1;
\r
1163 else if (d3 < mmm && *(cur - d3) == *cur)
\r
1166 distances[1] = d3 - 1;
\r
1173 distances[-2] = (UInt32)maxLen;
\r
1174 if (maxLen == lenLimit)
\r
1176 SkipMatchesSpec(MF_PARAMS(p));
\r
1182 GET_MATCHES_FOOTER_BT(maxLen)
\r
1186 static UInt32* Bt5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
\r
1189 UInt32 h2, h3, d2, d3, maxLen, pos;
\r
1191 GET_MATCHES_HEADER(5)
\r
1198 d2 = pos - hash [h2];
\r
1199 d3 = pos - (hash + kFix3HashSize)[h3];
\r
1200 // d4 = pos - (hash + kFix4HashSize)[h4];
\r
1202 curMatch = (hash + kFix5HashSize)[hv];
\r
1205 (hash + kFix3HashSize)[h3] = pos;
\r
1206 // (hash + kFix4HashSize)[h4] = pos;
\r
1207 (hash + kFix5HashSize)[hv] = pos;
\r
1215 if (d2 < mmm && *(cur - d2) == *cur)
\r
1218 distances[1] = d2 - 1;
\r
1220 if (*(cur - d2 + 2) == cur[2])
\r
1223 else if (d3 < mmm && *(cur - d3) == *cur)
\r
1225 distances[1] = d3 - 1;
\r
1232 else if (d3 < mmm && *(cur - d3) == *cur)
\r
1234 distances[1] = d3 - 1;
\r
1241 distances[-2] = 3;
\r
1242 if (*(cur - d2 + 3) != cur[3])
\r
1245 distances[-2] = (UInt32)maxLen;
\r
1246 if (maxLen == lenLimit)
\r
1248 SkipMatchesSpec(MF_PARAMS(p));
\r
1254 GET_MATCHES_FOOTER_BT(maxLen)
\r
1258 static UInt32* Hc4_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
\r
1261 UInt32 h2, h3, d2, d3, pos;
\r
1264 GET_MATCHES_HEADER(4)
\r
1271 d2 = pos - hash [h2];
\r
1272 d3 = pos - (hash + kFix3HashSize)[h3];
\r
1273 curMatch = (hash + kFix4HashSize)[hv];
\r
1276 (hash + kFix3HashSize)[h3] = pos;
\r
1277 (hash + kFix4HashSize)[hv] = pos;
\r
1285 if (d2 < mmm && *(cur - d2) == *cur)
\r
1288 distances[1] = d2 - 1;
\r
1290 if (*(cur - d2 + 2) == cur[2])
\r
1292 // distances[-2] = 3;
\r
1294 else if (d3 < mmm && *(cur - d3) == *cur)
\r
1297 distances[1] = d3 - 1;
\r
1303 else if (d3 < mmm && *(cur - d3) == *cur)
\r
1306 distances[1] = d3 - 1;
\r
1313 distances[-2] = (UInt32)maxLen;
\r
1314 if (maxLen == lenLimit)
\r
1316 p->son[p->cyclicBufferPos] = curMatch;
\r
1322 GET_MATCHES_FOOTER_HC(maxLen);
\r
1326 static UInt32 * Hc5_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
\r
1329 UInt32 h2, h3, d2, d3, maxLen, pos;
\r
1331 GET_MATCHES_HEADER(5)
\r
1338 d2 = pos - hash [h2];
\r
1339 d3 = pos - (hash + kFix3HashSize)[h3];
\r
1340 // d4 = pos - (hash + kFix4HashSize)[h4];
\r
1342 curMatch = (hash + kFix5HashSize)[hv];
\r
1345 (hash + kFix3HashSize)[h3] = pos;
\r
1346 // (hash + kFix4HashSize)[h4] = pos;
\r
1347 (hash + kFix5HashSize)[hv] = pos;
\r
1355 if (d2 < mmm && *(cur - d2) == *cur)
\r
1358 distances[1] = d2 - 1;
\r
1360 if (*(cur - d2 + 2) == cur[2])
\r
1363 else if (d3 < mmm && *(cur - d3) == *cur)
\r
1365 distances[1] = d3 - 1;
\r
1372 else if (d3 < mmm && *(cur - d3) == *cur)
\r
1374 distances[1] = d3 - 1;
\r
1381 distances[-2] = 3;
\r
1382 if (*(cur - d2 + 3) != cur[3])
\r
1385 distances[-2] = maxLen;
\r
1386 if (maxLen == lenLimit)
\r
1388 p->son[p->cyclicBufferPos] = curMatch;
\r
1394 GET_MATCHES_FOOTER_HC(maxLen);
\r
1398 UInt32* Hc3Zip_MatchFinder_GetMatches(CMatchFinder *p, UInt32 *distances)
\r
1400 GET_MATCHES_HEADER(3)
\r
1402 curMatch = p->hash[hv];
\r
1403 p->hash[hv] = p->pos;
\r
1404 GET_MATCHES_FOOTER_HC(2)
\r
1408 static void Bt2_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
\r
1413 curMatch = p->hash[hv];
\r
1414 p->hash[hv] = p->pos;
\r
1419 void Bt3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
\r
1424 curMatch = p->hash[hv];
\r
1425 p->hash[hv] = p->pos;
\r
1430 static void Bt3_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
\r
1438 curMatch = (hash + kFix3HashSize)[hv];
\r
1440 (hash + kFix3HashSize)[hv] = p->pos;
\r
1445 static void Bt4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
\r
1453 curMatch = (hash + kFix4HashSize)[hv];
\r
1455 (hash + kFix3HashSize)[h3] =
\r
1456 (hash + kFix4HashSize)[hv] = p->pos;
\r
1461 static void Bt5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
\r
1469 curMatch = (hash + kFix5HashSize)[hv];
\r
1471 (hash + kFix3HashSize)[h3] =
\r
1472 // (hash + kFix4HashSize)[h4] =
\r
1473 (hash + kFix5HashSize)[hv] = p->pos;
\r
1479 #define HC_SKIP_HEADER(minLen) \
\r
1480 do { if (p->lenLimit < minLen) { MatchFinder_MovePos(p); num--; continue; } { \
\r
1484 UInt32 pos = p->pos; \
\r
1485 UInt32 num2 = num; \
\r
1486 /* (p->pos == p->posLimit) is not allowed here !!! */ \
\r
1487 { const UInt32 rem = p->posLimit - pos; if (num2 > rem) num2 = rem; } \
\r
1489 { const UInt32 cycPos = p->cyclicBufferPos; \
\r
1490 son = p->son + cycPos; \
\r
1491 p->cyclicBufferPos = cycPos + num2; } \
\r
1492 cur = p->buffer; \
\r
1495 UInt32 curMatch; \
\r
1499 #define HC_SKIP_FOOTER \
\r
1500 cur++; pos++; *son++ = curMatch; \
\r
1501 } while (--num2); \
\r
1502 p->buffer = cur; \
\r
1504 if (pos == p->posLimit) MatchFinder_CheckLimits(p); \
\r
1508 static void Hc4_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
\r
1514 curMatch = (hash + kFix4HashSize)[hv];
\r
1516 (hash + kFix3HashSize)[h3] =
\r
1517 (hash + kFix4HashSize)[hv] = pos;
\r
1523 static void Hc5_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
\r
1529 curMatch = (hash + kFix5HashSize)[hv];
\r
1531 (hash + kFix3HashSize)[h3] =
\r
1532 // (hash + kFix4HashSize)[h4] =
\r
1533 (hash + kFix5HashSize)[hv] = pos;
\r
1539 void Hc3Zip_MatchFinder_Skip(CMatchFinder *p, UInt32 num)
\r
1544 curMatch = hash[hv];
\r
1551 void MatchFinder_CreateVTable(CMatchFinder *p, IMatchFinder2 *vTable)
\r
1553 vTable->Init = (Mf_Init_Func)MatchFinder_Init;
\r
1554 vTable->GetNumAvailableBytes = (Mf_GetNumAvailableBytes_Func)MatchFinder_GetNumAvailableBytes;
\r
1555 vTable->GetPointerToCurrentPos = (Mf_GetPointerToCurrentPos_Func)MatchFinder_GetPointerToCurrentPos;
\r
1558 if (p->numHashBytes <= 4)
\r
1560 vTable->GetMatches = (Mf_GetMatches_Func)Hc4_MatchFinder_GetMatches;
\r
1561 vTable->Skip = (Mf_Skip_Func)Hc4_MatchFinder_Skip;
\r
1565 vTable->GetMatches = (Mf_GetMatches_Func)Hc5_MatchFinder_GetMatches;
\r
1566 vTable->Skip = (Mf_Skip_Func)Hc5_MatchFinder_Skip;
\r
1569 else if (p->numHashBytes == 2)
\r
1571 vTable->GetMatches = (Mf_GetMatches_Func)Bt2_MatchFinder_GetMatches;
\r
1572 vTable->Skip = (Mf_Skip_Func)Bt2_MatchFinder_Skip;
\r
1574 else if (p->numHashBytes == 3)
\r
1576 vTable->GetMatches = (Mf_GetMatches_Func)Bt3_MatchFinder_GetMatches;
\r
1577 vTable->Skip = (Mf_Skip_Func)Bt3_MatchFinder_Skip;
\r
1579 else if (p->numHashBytes == 4)
\r
1581 vTable->GetMatches = (Mf_GetMatches_Func)Bt4_MatchFinder_GetMatches;
\r
1582 vTable->Skip = (Mf_Skip_Func)Bt4_MatchFinder_Skip;
\r
1586 vTable->GetMatches = (Mf_GetMatches_Func)Bt5_MatchFinder_GetMatches;
\r
1587 vTable->Skip = (Mf_Skip_Func)Bt5_MatchFinder_Skip;
\r
1593 void LzFindPrepare()
\r
1595 #ifndef FORCE_SATUR_SUB_128
\r
1596 #ifdef USE_SATUR_SUB_128
\r
1597 LZFIND_SATUR_SUB_CODE_FUNC f = NULL;
\r
1598 #ifdef MY_CPU_ARM_OR_ARM64
\r
1600 if (CPU_IsSupported_NEON())
\r
1602 // #pragma message ("=== LzFind NEON")
\r
1603 _PRF(printf("\n=== LzFind NEON\n"));
\r
1604 f = LzFind_SaturSub_128;
\r
1606 // f = 0; // for debug
\r
1608 #else // MY_CPU_ARM_OR_ARM64
\r
1609 if (CPU_IsSupported_SSE41())
\r
1611 // #pragma message ("=== LzFind SSE41")
\r
1612 _PRF(printf("\n=== LzFind SSE41\n"));
\r
1613 f = LzFind_SaturSub_128;
\r
1616 if (CPU_IsSupported_AVX2())
\r
1618 // #pragma message ("=== LzFind AVX2")
\r
1619 _PRF(printf("\n=== LzFind AVX2\n"));
\r
1620 f = LzFind_SaturSub_256;
\r
1624 #endif // MY_CPU_ARM_OR_ARM64
\r
1625 g_LzFind_SaturSub = f;
\r
1626 #endif // USE_SATUR_SUB_128
\r
1627 #endif // FORCE_SATUR_SUB_128
\r