cdrom: more hacks for more timing issues
[pcsx_rearmed.git] / deps / libchdr / deps / zstd-1.5.6 / lib / common / xxhash.h
CommitLineData
648db22b 1/*
f535537f 2 * xxHash - Extremely Fast Hash algorithm
3 * Header File
4 * Copyright (c) Yann Collet - Meta Platforms, Inc
648db22b 5 *
6 * This source code is licensed under both the BSD-style license (found in the
7 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
8 * in the COPYING file in the root directory of this source tree).
9 * You may select, at your option, one of the above-listed licenses.
f535537f 10 */
648db22b 11
f535537f 12/* Local adaptations for Zstandard */
648db22b 13
14#ifndef XXH_NO_XXH3
15# define XXH_NO_XXH3
16#endif
17
18#ifndef XXH_NAMESPACE
19# define XXH_NAMESPACE ZSTD_
20#endif
21
22/*!
23 * @mainpage xxHash
24 *
f535537f 25 * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed
26 * limits.
27 *
28 * It is proposed in four flavors, in three families:
29 * 1. @ref XXH32_family
30 * - Classic 32-bit hash function. Simple, compact, and runs on almost all
31 * 32-bit and 64-bit systems.
32 * 2. @ref XXH64_family
33 * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most
34 * 64-bit systems (but _not_ 32-bit systems).
35 * 3. @ref XXH3_family
36 * - Modern 64-bit and 128-bit hash function family which features improved
37 * strength and performance across the board, especially on smaller data.
38 * It benefits greatly from SIMD and 64-bit without requiring it.
39 *
40 * Benchmarks
41 * ---
42 * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04.
43 * The open source benchmark program is compiled with clang v10.0 using -O3 flag.
44 *
45 * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity |
46 * | -------------------- | ------- | ----: | ---------------: | ------------------: |
47 * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 |
48 * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 |
49 * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 |
50 * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 |
51 * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 |
52 * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 |
53 * | RAM sequential read | | N/A | 28.0 GB/s | N/A |
54 * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 |
55 * | City64 | | 64 | 22.0 GB/s | 76.6 |
56 * | T1ha2 | | 64 | 22.0 GB/s | 99.0 |
57 * | City128 | | 128 | 21.7 GB/s | 57.7 |
58 * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 |
59 * | XXH64() | | 64 | 19.4 GB/s | 71.0 |
60 * | SpookyHash | | 64 | 19.3 GB/s | 53.2 |
61 * | Mum | | 64 | 18.0 GB/s | 67.0 |
62 * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 |
63 * | XXH32() | | 32 | 9.7 GB/s | 71.9 |
64 * | City32 | | 32 | 9.1 GB/s | 66.0 |
65 * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 |
66 * | Murmur3 | | 32 | 3.9 GB/s | 56.1 |
67 * | SipHash* | | 64 | 3.0 GB/s | 43.2 |
68 * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 |
69 * | HighwayHash | | 64 | 1.4 GB/s | 6.0 |
70 * | FNV64 | | 64 | 1.2 GB/s | 62.7 |
71 * | Blake2* | | 256 | 1.1 GB/s | 5.1 |
72 * | SHA1* | | 160 | 0.8 GB/s | 5.6 |
73 * | MD5* | | 128 | 0.6 GB/s | 7.8 |
74 * @note
75 * - Hashes which require a specific ISA extension are noted. SSE2 is also noted,
76 * even though it is mandatory on x64.
77 * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic
78 * by modern standards.
79 * - Small data velocity is a rough average of algorithm's efficiency for small
80 * data. For more accurate information, see the wiki.
81 * - More benchmarks and strength tests are found on the wiki:
82 * https://github.com/Cyan4973/xxHash/wiki
83 *
84 * Usage
85 * ------
86 * All xxHash variants use a similar API. Changing the algorithm is a trivial
87 * substitution.
88 *
89 * @pre
90 * For functions which take an input and length parameter, the following
91 * requirements are assumed:
92 * - The range from [`input`, `input + length`) is valid, readable memory.
93 * - The only exception is if the `length` is `0`, `input` may be `NULL`.
94 * - For C++, the objects must have the *TriviallyCopyable* property, as the
95 * functions access bytes directly as if it was an array of `unsigned char`.
96 *
97 * @anchor single_shot_example
98 * **Single Shot**
99 *
100 * These functions are stateless functions which hash a contiguous block of memory,
101 * immediately returning the result. They are the easiest and usually the fastest
102 * option.
103 *
104 * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits()
105 *
106 * @code{.c}
107 * #include <string.h>
108 * #include "xxhash.h"
109 *
110 * // Example for a function which hashes a null terminated string with XXH32().
111 * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed)
112 * {
113 * // NULL pointers are only valid if the length is zero
114 * size_t length = (string == NULL) ? 0 : strlen(string);
115 * return XXH32(string, length, seed);
116 * }
117 * @endcode
118 *
119 *
120 * @anchor streaming_example
121 * **Streaming**
122 *
123 * These groups of functions allow incremental hashing of unknown size, even
124 * more than what would fit in a size_t.
125 *
126 * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset()
127 *
128 * @code{.c}
129 * #include <stdio.h>
130 * #include <assert.h>
131 * #include "xxhash.h"
132 * // Example for a function which hashes a FILE incrementally with XXH3_64bits().
133 * XXH64_hash_t hashFile(FILE* f)
134 * {
135 * // Allocate a state struct. Do not just use malloc() or new.
136 * XXH3_state_t* state = XXH3_createState();
137 * assert(state != NULL && "Out of memory!");
138 * // Reset the state to start a new hashing session.
139 * XXH3_64bits_reset(state);
140 * char buffer[4096];
141 * size_t count;
142 * // Read the file in chunks
143 * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) {
144 * // Run update() as many times as necessary to process the data
145 * XXH3_64bits_update(state, buffer, count);
146 * }
147 * // Retrieve the finalized hash. This will not change the state.
148 * XXH64_hash_t result = XXH3_64bits_digest(state);
149 * // Free the state. Do not use free().
150 * XXH3_freeState(state);
151 * return result;
152 * }
153 * @endcode
154 *
155 * Streaming functions generate the xxHash value from an incremental input.
156 * This method is slower than single-call functions, due to state management.
157 * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
158 *
159 * An XXH state must first be allocated using `XXH*_createState()`.
160 *
161 * Start a new hash by initializing the state with a seed using `XXH*_reset()`.
162 *
163 * Then, feed the hash state by calling `XXH*_update()` as many times as necessary.
164 *
165 * The function returns an error code, with 0 meaning OK, and any other value
166 * meaning there is an error.
167 *
168 * Finally, a hash value can be produced anytime, by using `XXH*_digest()`.
169 * This function returns the nn-bits hash as an int or long long.
170 *
171 * It's still possible to continue inserting input into the hash state after a
172 * digest, and generate new hash values later on by invoking `XXH*_digest()`.
173 *
174 * When done, release the state using `XXH*_freeState()`.
175 *
176 *
177 * @anchor canonical_representation_example
178 * **Canonical Representation**
179 *
180 * The default return values from XXH functions are unsigned 32, 64 and 128 bit
181 * integers.
182 * This the simplest and fastest format for further post-processing.
183 *
184 * However, this leaves open the question of what is the order on the byte level,
185 * since little and big endian conventions will store the same number differently.
186 *
187 * The canonical representation settles this issue by mandating big-endian
188 * convention, the same convention as human-readable numbers (large digits first).
189 *
190 * When writing hash values to storage, sending them over a network, or printing
191 * them, it's highly recommended to use the canonical representation to ensure
192 * portability across a wider range of systems, present and future.
193 *
194 * The following functions allow transformation of hash values to and from
195 * canonical format.
196 *
197 * XXH32_canonicalFromHash(), XXH32_hashFromCanonical(),
198 * XXH64_canonicalFromHash(), XXH64_hashFromCanonical(),
199 * XXH128_canonicalFromHash(), XXH128_hashFromCanonical(),
200 *
201 * @code{.c}
202 * #include <stdio.h>
203 * #include "xxhash.h"
204 *
205 * // Example for a function which prints XXH32_hash_t in human readable format
206 * void printXxh32(XXH32_hash_t hash)
207 * {
208 * XXH32_canonical_t cano;
209 * XXH32_canonicalFromHash(&cano, hash);
210 * size_t i;
211 * for(i = 0; i < sizeof(cano.digest); ++i) {
212 * printf("%02x", cano.digest[i]);
213 * }
214 * printf("\n");
215 * }
216 *
217 * // Example for a function which converts XXH32_canonical_t to XXH32_hash_t
218 * XXH32_hash_t convertCanonicalToXxh32(XXH32_canonical_t cano)
219 * {
220 * XXH32_hash_t hash = XXH32_hashFromCanonical(&cano);
221 * return hash;
222 * }
223 * @endcode
224 *
225 *
648db22b 226 * @file xxhash.h
227 * xxHash prototypes and implementation
228 */
648db22b 229
230#if defined (__cplusplus)
231extern "C" {
232#endif
233
234/* ****************************
235 * INLINE mode
236 ******************************/
237/*!
f535537f 238 * @defgroup public Public API
239 * Contains details on the public xxHash functions.
240 * @{
241 */
242#ifdef XXH_DOXYGEN
243/*!
244 * @brief Gives access to internal state declaration, required for static allocation.
245 *
246 * Incompatible with dynamic linking, due to risks of ABI changes.
247 *
248 * Usage:
249 * @code{.c}
250 * #define XXH_STATIC_LINKING_ONLY
251 * #include "xxhash.h"
252 * @endcode
253 */
254# define XXH_STATIC_LINKING_ONLY
255/* Do not undef XXH_STATIC_LINKING_ONLY for Doxygen */
256
257/*!
258 * @brief Gives access to internal definitions.
259 *
260 * Usage:
261 * @code{.c}
262 * #define XXH_STATIC_LINKING_ONLY
263 * #define XXH_IMPLEMENTATION
264 * #include "xxhash.h"
265 * @endcode
266 */
267# define XXH_IMPLEMENTATION
268/* Do not undef XXH_IMPLEMENTATION for Doxygen */
269
270/*!
271 * @brief Exposes the implementation and marks all functions as `inline`.
272 *
648db22b 273 * Use these build macros to inline xxhash into the target unit.
274 * Inlining improves performance on small inputs, especially when the length is
275 * expressed as a compile-time constant:
276 *
f535537f 277 * https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html
648db22b 278 *
279 * It also keeps xxHash symbols private to the unit, so they are not exported.
280 *
281 * Usage:
f535537f 282 * @code{.c}
648db22b 283 * #define XXH_INLINE_ALL
284 * #include "xxhash.h"
f535537f 285 * @endcode
648db22b 286 * Do not compile and link xxhash.o as a separate object, as it is not useful.
287 */
f535537f 288# define XXH_INLINE_ALL
289# undef XXH_INLINE_ALL
290/*!
291 * @brief Exposes the implementation without marking functions as inline.
292 */
293# define XXH_PRIVATE_API
294# undef XXH_PRIVATE_API
295/*!
296 * @brief Emulate a namespace by transparently prefixing all symbols.
297 *
298 * If you want to include _and expose_ xxHash functions from within your own
299 * library, but also want to avoid symbol collisions with other libraries which
300 * may also include xxHash, you can use @ref XXH_NAMESPACE to automatically prefix
301 * any public symbol from xxhash library with the value of @ref XXH_NAMESPACE
302 * (therefore, avoid empty or numeric values).
303 *
304 * Note that no change is required within the calling program as long as it
305 * includes `xxhash.h`: Regular symbol names will be automatically translated
306 * by this header.
307 */
308# define XXH_NAMESPACE /* YOUR NAME HERE */
309# undef XXH_NAMESPACE
310#endif
311
648db22b 312#if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
313 && !defined(XXH_INLINE_ALL_31684351384)
314 /* this section should be traversed only once */
315# define XXH_INLINE_ALL_31684351384
316 /* give access to the advanced API, required to compile implementations */
317# undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
318# define XXH_STATIC_LINKING_ONLY
319 /* make all functions private */
320# undef XXH_PUBLIC_API
321# if defined(__GNUC__)
322# define XXH_PUBLIC_API static __inline __attribute__((unused))
323# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
324# define XXH_PUBLIC_API static inline
325# elif defined(_MSC_VER)
326# define XXH_PUBLIC_API static __inline
327# else
328 /* note: this version may generate warnings for unused static functions */
329# define XXH_PUBLIC_API static
330# endif
331
332 /*
333 * This part deals with the special case where a unit wants to inline xxHash,
334 * but "xxhash.h" has previously been included without XXH_INLINE_ALL,
335 * such as part of some previously included *.h header file.
336 * Without further action, the new include would just be ignored,
337 * and functions would effectively _not_ be inlined (silent failure).
338 * The following macros solve this situation by prefixing all inlined names,
339 * avoiding naming collision with previous inclusions.
340 */
341 /* Before that, we unconditionally #undef all symbols,
342 * in case they were already defined with XXH_NAMESPACE.
343 * They will then be redefined for XXH_INLINE_ALL
344 */
345# undef XXH_versionNumber
346 /* XXH32 */
347# undef XXH32
348# undef XXH32_createState
349# undef XXH32_freeState
350# undef XXH32_reset
351# undef XXH32_update
352# undef XXH32_digest
353# undef XXH32_copyState
354# undef XXH32_canonicalFromHash
355# undef XXH32_hashFromCanonical
356 /* XXH64 */
357# undef XXH64
358# undef XXH64_createState
359# undef XXH64_freeState
360# undef XXH64_reset
361# undef XXH64_update
362# undef XXH64_digest
363# undef XXH64_copyState
364# undef XXH64_canonicalFromHash
365# undef XXH64_hashFromCanonical
366 /* XXH3_64bits */
367# undef XXH3_64bits
368# undef XXH3_64bits_withSecret
369# undef XXH3_64bits_withSeed
370# undef XXH3_64bits_withSecretandSeed
371# undef XXH3_createState
372# undef XXH3_freeState
373# undef XXH3_copyState
374# undef XXH3_64bits_reset
375# undef XXH3_64bits_reset_withSeed
376# undef XXH3_64bits_reset_withSecret
377# undef XXH3_64bits_update
378# undef XXH3_64bits_digest
379# undef XXH3_generateSecret
380 /* XXH3_128bits */
381# undef XXH128
382# undef XXH3_128bits
383# undef XXH3_128bits_withSeed
384# undef XXH3_128bits_withSecret
385# undef XXH3_128bits_reset
386# undef XXH3_128bits_reset_withSeed
387# undef XXH3_128bits_reset_withSecret
388# undef XXH3_128bits_reset_withSecretandSeed
389# undef XXH3_128bits_update
390# undef XXH3_128bits_digest
391# undef XXH128_isEqual
392# undef XXH128_cmp
393# undef XXH128_canonicalFromHash
394# undef XXH128_hashFromCanonical
395 /* Finally, free the namespace itself */
396# undef XXH_NAMESPACE
397
398 /* employ the namespace for XXH_INLINE_ALL */
399# define XXH_NAMESPACE XXH_INLINE_
400 /*
401 * Some identifiers (enums, type names) are not symbols,
402 * but they must nonetheless be renamed to avoid redeclaration.
403 * Alternative solution: do not redeclare them.
404 * However, this requires some #ifdefs, and has a more dispersed impact.
405 * Meanwhile, renaming can be achieved in a single place.
406 */
407# define XXH_IPREF(Id) XXH_NAMESPACE ## Id
408# define XXH_OK XXH_IPREF(XXH_OK)
409# define XXH_ERROR XXH_IPREF(XXH_ERROR)
410# define XXH_errorcode XXH_IPREF(XXH_errorcode)
411# define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
412# define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
413# define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
414# define XXH32_state_s XXH_IPREF(XXH32_state_s)
415# define XXH32_state_t XXH_IPREF(XXH32_state_t)
416# define XXH64_state_s XXH_IPREF(XXH64_state_s)
417# define XXH64_state_t XXH_IPREF(XXH64_state_t)
418# define XXH3_state_s XXH_IPREF(XXH3_state_s)
419# define XXH3_state_t XXH_IPREF(XXH3_state_t)
420# define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
421 /* Ensure the header is parsed again, even if it was previously included */
422# undef XXHASH_H_5627135585666179
423# undef XXHASH_H_STATIC_13879238742
424#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
425
648db22b 426/* ****************************************************************
427 * Stable API
428 *****************************************************************/
429#ifndef XXHASH_H_5627135585666179
430#define XXHASH_H_5627135585666179 1
431
f535537f 432/*! @brief Marks a global symbol. */
648db22b 433#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
434# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
435# ifdef XXH_EXPORT
436# define XXH_PUBLIC_API __declspec(dllexport)
437# elif XXH_IMPORT
438# define XXH_PUBLIC_API __declspec(dllimport)
439# endif
440# else
441# define XXH_PUBLIC_API /* do nothing */
442# endif
443#endif
444
648db22b 445#ifdef XXH_NAMESPACE
446# define XXH_CAT(A,B) A##B
447# define XXH_NAME2(A,B) XXH_CAT(A,B)
448# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
449/* XXH32 */
450# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
451# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
452# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
453# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
454# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
455# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
456# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
457# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
458# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
459/* XXH64 */
460# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
461# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
462# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
463# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
464# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
465# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
466# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
467# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
468# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
469/* XXH3_64bits */
470# define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
471# define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
472# define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
473# define XXH3_64bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecretandSeed)
474# define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
475# define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
476# define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
477# define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
478# define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
479# define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
480# define XXH3_64bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecretandSeed)
481# define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
482# define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
483# define XXH3_generateSecret XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret)
484# define XXH3_generateSecret_fromSeed XXH_NAME2(XXH_NAMESPACE, XXH3_generateSecret_fromSeed)
485/* XXH3_128bits */
486# define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
487# define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
488# define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
489# define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
490# define XXH3_128bits_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecretandSeed)
491# define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
492# define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
493# define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
494# define XXH3_128bits_reset_withSecretandSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecretandSeed)
495# define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
496# define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
497# define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
498# define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
499# define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
500# define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
501#endif
502
503
f535537f 504/* *************************************
505* Compiler specifics
506***************************************/
507
508/* specific declaration modes for Windows */
509#if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
510# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
511# ifdef XXH_EXPORT
512# define XXH_PUBLIC_API __declspec(dllexport)
513# elif XXH_IMPORT
514# define XXH_PUBLIC_API __declspec(dllimport)
515# endif
516# else
517# define XXH_PUBLIC_API /* do nothing */
518# endif
519#endif
520
521#if defined (__GNUC__)
522# define XXH_CONSTF __attribute__((const))
523# define XXH_PUREF __attribute__((pure))
524# define XXH_MALLOCF __attribute__((malloc))
525#else
526# define XXH_CONSTF /* disable */
527# define XXH_PUREF
528# define XXH_MALLOCF
529#endif
530
648db22b 531/* *************************************
532* Version
533***************************************/
534#define XXH_VERSION_MAJOR 0
535#define XXH_VERSION_MINOR 8
f535537f 536#define XXH_VERSION_RELEASE 2
537/*! @brief Version number, encoded as two digits each */
648db22b 538#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
539
540/*!
541 * @brief Obtains the xxHash version.
542 *
543 * This is mostly useful when xxHash is compiled as a shared library,
544 * since the returned value comes from the library, as opposed to header file.
545 *
f535537f 546 * @return @ref XXH_VERSION_NUMBER of the invoked library.
648db22b 547 */
f535537f 548XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void);
648db22b 549
550
551/* ****************************
552* Common basic types
553******************************/
554#include <stddef.h> /* size_t */
f535537f 555/*!
556 * @brief Exit code for the streaming API.
557 */
558typedef enum {
559 XXH_OK = 0, /*!< OK */
560 XXH_ERROR /*!< Error */
561} XXH_errorcode;
648db22b 562
563
564/*-**********************************************************************
565* 32-bit hash
566************************************************************************/
567#if defined(XXH_DOXYGEN) /* Don't show <stdint.h> include */
568/*!
569 * @brief An unsigned 32-bit integer.
570 *
571 * Not necessarily defined to `uint32_t` but functionally equivalent.
572 */
573typedef uint32_t XXH32_hash_t;
574
575#elif !defined (__VMS) \
576 && (defined (__cplusplus) \
577 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
f535537f 578# ifdef _AIX
579# include <inttypes.h>
580# else
581# include <stdint.h>
582# endif
648db22b 583 typedef uint32_t XXH32_hash_t;
584
585#else
586# include <limits.h>
587# if UINT_MAX == 0xFFFFFFFFUL
588 typedef unsigned int XXH32_hash_t;
f535537f 589# elif ULONG_MAX == 0xFFFFFFFFUL
590 typedef unsigned long XXH32_hash_t;
648db22b 591# else
f535537f 592# error "unsupported platform: need a 32-bit type"
648db22b 593# endif
594#endif
595
596/*!
597 * @}
598 *
f535537f 599 * @defgroup XXH32_family XXH32 family
648db22b 600 * @ingroup public
601 * Contains functions used in the classic 32-bit xxHash algorithm.
602 *
603 * @note
604 * XXH32 is useful for older platforms, with no or poor 64-bit performance.
f535537f 605 * Note that the @ref XXH3_family provides competitive speed for both 32-bit
606 * and 64-bit systems, and offers true 64/128 bit hash results.
648db22b 607 *
f535537f 608 * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families
609 * @see @ref XXH32_impl for implementation details
648db22b 610 * @{
611 */
612
613/*!
614 * @brief Calculates the 32-bit hash of @p input using xxHash32.
615 *
648db22b 616 * @param input The block of data to be hashed, at least @p length bytes in size.
617 * @param length The length of @p input, in bytes.
618 * @param seed The 32-bit seed to alter the hash's output predictably.
619 *
620 * @pre
621 * The memory between @p input and @p input + @p length must be valid,
622 * readable, contiguous memory. However, if @p length is `0`, @p input may be
623 * `NULL`. In C++, this also must be *TriviallyCopyable*.
624 *
f535537f 625 * @return The calculated 32-bit xxHash32 value.
648db22b 626 *
f535537f 627 * @see @ref single_shot_example "Single Shot Example" for an example.
648db22b 628 */
f535537f 629XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
648db22b 630
f535537f 631#ifndef XXH_NO_STREAM
648db22b 632/*!
633 * @typedef struct XXH32_state_s XXH32_state_t
634 * @brief The opaque state struct for the XXH32 streaming API.
635 *
636 * @see XXH32_state_s for details.
637 */
638typedef struct XXH32_state_s XXH32_state_t;
639
640/*!
641 * @brief Allocates an @ref XXH32_state_t.
642 *
f535537f 643 * @return An allocated pointer of @ref XXH32_state_t on success.
644 * @return `NULL` on failure.
645 *
646 * @note Must be freed with XXH32_freeState().
648db22b 647 */
f535537f 648XXH_PUBLIC_API XXH_MALLOCF XXH32_state_t* XXH32_createState(void);
648db22b 649/*!
650 * @brief Frees an @ref XXH32_state_t.
651 *
648db22b 652 * @param statePtr A pointer to an @ref XXH32_state_t allocated with @ref XXH32_createState().
f535537f 653 *
654 * @return @ref XXH_OK.
655 *
656 * @note @p statePtr must be allocated with XXH32_createState().
657 *
648db22b 658 */
659XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
660/*!
661 * @brief Copies one @ref XXH32_state_t to another.
662 *
663 * @param dst_state The state to copy to.
664 * @param src_state The state to copy from.
665 * @pre
666 * @p dst_state and @p src_state must not be `NULL` and must not overlap.
667 */
668XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
669
670/*!
671 * @brief Resets an @ref XXH32_state_t to begin a new hash.
672 *
648db22b 673 * @param statePtr The state struct to reset.
674 * @param seed The 32-bit seed to alter the hash result predictably.
675 *
676 * @pre
677 * @p statePtr must not be `NULL`.
678 *
f535537f 679 * @return @ref XXH_OK on success.
680 * @return @ref XXH_ERROR on failure.
681 *
682 * @note This function resets and seeds a state. Call it before @ref XXH32_update().
648db22b 683 */
684XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
685
686/*!
687 * @brief Consumes a block of @p input to an @ref XXH32_state_t.
688 *
648db22b 689 * @param statePtr The state struct to update.
690 * @param input The block of data to be hashed, at least @p length bytes in size.
691 * @param length The length of @p input, in bytes.
692 *
693 * @pre
694 * @p statePtr must not be `NULL`.
695 * @pre
696 * The memory between @p input and @p input + @p length must be valid,
697 * readable, contiguous memory. However, if @p length is `0`, @p input may be
698 * `NULL`. In C++, this also must be *TriviallyCopyable*.
699 *
f535537f 700 * @return @ref XXH_OK on success.
701 * @return @ref XXH_ERROR on failure.
702 *
703 * @note Call this to incrementally consume blocks of data.
648db22b 704 */
705XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
706
707/*!
708 * @brief Returns the calculated hash value from an @ref XXH32_state_t.
709 *
648db22b 710 * @param statePtr The state struct to calculate the hash from.
711 *
712 * @pre
713 * @p statePtr must not be `NULL`.
714 *
f535537f 715 * @return The calculated 32-bit xxHash32 value from that state.
716 *
717 * @note
718 * Calling XXH32_digest() will not affect @p statePtr, so you can update,
719 * digest, and update again.
648db22b 720 */
f535537f 721XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
722#endif /* !XXH_NO_STREAM */
648db22b 723
724/******* Canonical representation *******/
725
648db22b 726/*!
727 * @brief Canonical (big endian) representation of @ref XXH32_hash_t.
728 */
729typedef struct {
730 unsigned char digest[4]; /*!< Hash bytes, big endian */
731} XXH32_canonical_t;
732
733/*!
734 * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t.
735 *
f535537f 736 * @param dst The @ref XXH32_canonical_t pointer to be stored to.
648db22b 737 * @param hash The @ref XXH32_hash_t to be converted.
738 *
739 * @pre
740 * @p dst must not be `NULL`.
f535537f 741 *
742 * @see @ref canonical_representation_example "Canonical Representation Example"
648db22b 743 */
744XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
745
746/*!
747 * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t.
748 *
749 * @param src The @ref XXH32_canonical_t to convert.
750 *
751 * @pre
752 * @p src must not be `NULL`.
753 *
754 * @return The converted hash.
f535537f 755 *
756 * @see @ref canonical_representation_example "Canonical Representation Example"
648db22b 757 */
f535537f 758XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
648db22b 759
760
f535537f 761/*! @cond Doxygen ignores this part */
648db22b 762#ifdef __has_attribute
763# define XXH_HAS_ATTRIBUTE(x) __has_attribute(x)
764#else
765# define XXH_HAS_ATTRIBUTE(x) 0
766#endif
f535537f 767/*! @endcond */
768
769/*! @cond Doxygen ignores this part */
770/*
771 * C23 __STDC_VERSION__ number hasn't been specified yet. For now
772 * leave as `201711L` (C17 + 1).
773 * TODO: Update to correct value when its been specified.
774 */
775#define XXH_C23_VN 201711L
776/*! @endcond */
648db22b 777
f535537f 778/*! @cond Doxygen ignores this part */
648db22b 779/* C-language Attributes are added in C23. */
f535537f 780#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN) && defined(__has_c_attribute)
648db22b 781# define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
782#else
783# define XXH_HAS_C_ATTRIBUTE(x) 0
784#endif
f535537f 785/*! @endcond */
648db22b 786
f535537f 787/*! @cond Doxygen ignores this part */
648db22b 788#if defined(__cplusplus) && defined(__has_cpp_attribute)
789# define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
790#else
791# define XXH_HAS_CPP_ATTRIBUTE(x) 0
792#endif
f535537f 793/*! @endcond */
648db22b 794
f535537f 795/*! @cond Doxygen ignores this part */
648db22b 796/*
f535537f 797 * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute
798 * introduced in CPP17 and C23.
799 * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough
800 * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough
801 */
802#if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough)
648db22b 803# define XXH_FALLTHROUGH [[fallthrough]]
804#elif XXH_HAS_ATTRIBUTE(__fallthrough__)
f535537f 805# define XXH_FALLTHROUGH __attribute__ ((__fallthrough__))
806#else
807# define XXH_FALLTHROUGH /* fallthrough */
808#endif
809/*! @endcond */
810
811/*! @cond Doxygen ignores this part */
812/*
813 * Define XXH_NOESCAPE for annotated pointers in public API.
814 * https://clang.llvm.org/docs/AttributeReference.html#noescape
815 * As of writing this, only supported by clang.
816 */
817#if XXH_HAS_ATTRIBUTE(noescape)
818# define XXH_NOESCAPE __attribute__((noescape))
648db22b 819#else
f535537f 820# define XXH_NOESCAPE
648db22b 821#endif
f535537f 822/*! @endcond */
823
648db22b 824
825/*!
826 * @}
827 * @ingroup public
828 * @{
829 */
830
831#ifndef XXH_NO_LONG_LONG
832/*-**********************************************************************
833* 64-bit hash
834************************************************************************/
835#if defined(XXH_DOXYGEN) /* don't include <stdint.h> */
836/*!
837 * @brief An unsigned 64-bit integer.
838 *
839 * Not necessarily defined to `uint64_t` but functionally equivalent.
840 */
841typedef uint64_t XXH64_hash_t;
842#elif !defined (__VMS) \
843 && (defined (__cplusplus) \
844 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
f535537f 845# ifdef _AIX
846# include <inttypes.h>
847# else
848# include <stdint.h>
849# endif
648db22b 850 typedef uint64_t XXH64_hash_t;
851#else
852# include <limits.h>
853# if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL
854 /* LP64 ABI says uint64_t is unsigned long */
855 typedef unsigned long XXH64_hash_t;
856# else
857 /* the following type must have a width of 64-bit */
858 typedef unsigned long long XXH64_hash_t;
859# endif
860#endif
861
862/*!
863 * @}
864 *
f535537f 865 * @defgroup XXH64_family XXH64 family
648db22b 866 * @ingroup public
867 * @{
868 * Contains functions used in the classic 64-bit xxHash algorithm.
869 *
870 * @note
871 * XXH3 provides competitive speed for both 32-bit and 64-bit systems,
872 * and offers true 64/128 bit hash results.
873 * It provides better speed for systems with vector processing capabilities.
874 */
875
648db22b 876/*!
877 * @brief Calculates the 64-bit hash of @p input using xxHash64.
878 *
648db22b 879 * @param input The block of data to be hashed, at least @p length bytes in size.
880 * @param length The length of @p input, in bytes.
881 * @param seed The 64-bit seed to alter the hash's output predictably.
882 *
883 * @pre
884 * The memory between @p input and @p input + @p length must be valid,
885 * readable, contiguous memory. However, if @p length is `0`, @p input may be
886 * `NULL`. In C++, this also must be *TriviallyCopyable*.
887 *
f535537f 888 * @return The calculated 64-bit xxHash64 value.
648db22b 889 *
f535537f 890 * @see @ref single_shot_example "Single Shot Example" for an example.
648db22b 891 */
f535537f 892XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
648db22b 893
894/******* Streaming *******/
f535537f 895#ifndef XXH_NO_STREAM
648db22b 896/*!
897 * @brief The opaque state struct for the XXH64 streaming API.
898 *
899 * @see XXH64_state_s for details.
900 */
901typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
648db22b 902
f535537f 903/*!
904 * @brief Allocates an @ref XXH64_state_t.
905 *
906 * @return An allocated pointer of @ref XXH64_state_t on success.
907 * @return `NULL` on failure.
908 *
909 * @note Must be freed with XXH64_freeState().
910 */
911XXH_PUBLIC_API XXH_MALLOCF XXH64_state_t* XXH64_createState(void);
912
913/*!
914 * @brief Frees an @ref XXH64_state_t.
915 *
916 * @param statePtr A pointer to an @ref XXH64_state_t allocated with @ref XXH64_createState().
917 *
918 * @return @ref XXH_OK.
919 *
920 * @note @p statePtr must be allocated with XXH64_createState().
921 */
922XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
923
924/*!
925 * @brief Copies one @ref XXH64_state_t to another.
926 *
927 * @param dst_state The state to copy to.
928 * @param src_state The state to copy from.
929 * @pre
930 * @p dst_state and @p src_state must not be `NULL` and must not overlap.
931 */
932XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dst_state, const XXH64_state_t* src_state);
933
934/*!
935 * @brief Resets an @ref XXH64_state_t to begin a new hash.
936 *
937 * @param statePtr The state struct to reset.
938 * @param seed The 64-bit seed to alter the hash result predictably.
939 *
940 * @pre
941 * @p statePtr must not be `NULL`.
942 *
943 * @return @ref XXH_OK on success.
944 * @return @ref XXH_ERROR on failure.
945 *
946 * @note This function resets and seeds a state. Call it before @ref XXH64_update().
947 */
948XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed);
949
950/*!
951 * @brief Consumes a block of @p input to an @ref XXH64_state_t.
952 *
953 * @param statePtr The state struct to update.
954 * @param input The block of data to be hashed, at least @p length bytes in size.
955 * @param length The length of @p input, in bytes.
956 *
957 * @pre
958 * @p statePtr must not be `NULL`.
959 * @pre
960 * The memory between @p input and @p input + @p length must be valid,
961 * readable, contiguous memory. However, if @p length is `0`, @p input may be
962 * `NULL`. In C++, this also must be *TriviallyCopyable*.
963 *
964 * @return @ref XXH_OK on success.
965 * @return @ref XXH_ERROR on failure.
966 *
967 * @note Call this to incrementally consume blocks of data.
968 */
969XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH_NOESCAPE XXH64_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
970
971/*!
972 * @brief Returns the calculated hash value from an @ref XXH64_state_t.
973 *
974 * @param statePtr The state struct to calculate the hash from.
975 *
976 * @pre
977 * @p statePtr must not be `NULL`.
978 *
979 * @return The calculated 64-bit xxHash64 value from that state.
980 *
981 * @note
982 * Calling XXH64_digest() will not affect @p statePtr, so you can update,
983 * digest, and update again.
984 */
985XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_digest (XXH_NOESCAPE const XXH64_state_t* statePtr);
986#endif /* !XXH_NO_STREAM */
987/******* Canonical representation *******/
988
989/*!
990 * @brief Canonical (big endian) representation of @ref XXH64_hash_t.
991 */
992typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t;
993
994/*!
995 * @brief Converts an @ref XXH64_hash_t to a big endian @ref XXH64_canonical_t.
996 *
997 * @param dst The @ref XXH64_canonical_t pointer to be stored to.
998 * @param hash The @ref XXH64_hash_t to be converted.
999 *
1000 * @pre
1001 * @p dst must not be `NULL`.
1002 *
1003 * @see @ref canonical_representation_example "Canonical Representation Example"
1004 */
1005XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash);
1006
1007/*!
1008 * @brief Converts an @ref XXH64_canonical_t to a native @ref XXH64_hash_t.
1009 *
1010 * @param src The @ref XXH64_canonical_t to convert.
1011 *
1012 * @pre
1013 * @p src must not be `NULL`.
1014 *
1015 * @return The converted hash.
1016 *
1017 * @see @ref canonical_representation_example "Canonical Representation Example"
1018 */
1019XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src);
1020
1021#ifndef XXH_NO_XXH3
648db22b 1022
648db22b 1023/*!
1024 * @}
1025 * ************************************************************************
f535537f 1026 * @defgroup XXH3_family XXH3 family
648db22b 1027 * @ingroup public
1028 * @{
1029 *
1030 * XXH3 is a more recent hash algorithm featuring:
1031 * - Improved speed for both small and large inputs
1032 * - True 64-bit and 128-bit outputs
1033 * - SIMD acceleration
1034 * - Improved 32-bit viability
1035 *
1036 * Speed analysis methodology is explained here:
1037 *
1038 * https://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
1039 *
1040 * Compared to XXH64, expect XXH3 to run approximately
1041 * ~2x faster on large inputs and >3x faster on small ones,
1042 * exact differences vary depending on platform.
1043 *
1044 * XXH3's speed benefits greatly from SIMD and 64-bit arithmetic,
1045 * but does not require it.
f535537f 1046 * Most 32-bit and 64-bit targets that can run XXH32 smoothly can run XXH3
1047 * at competitive speeds, even without vector support. Further details are
1048 * explained in the implementation.
1049 *
1050 * XXH3 has a fast scalar implementation, but it also includes accelerated SIMD
1051 * implementations for many common platforms:
1052 * - AVX512
1053 * - AVX2
1054 * - SSE2
1055 * - ARM NEON
1056 * - WebAssembly SIMD128
1057 * - POWER8 VSX
1058 * - s390x ZVector
1059 * This can be controlled via the @ref XXH_VECTOR macro, but it automatically
1060 * selects the best version according to predefined macros. For the x86 family, an
1061 * automatic runtime dispatcher is included separately in @ref xxh_x86dispatch.c.
648db22b 1062 *
1063 * XXH3 implementation is portable:
1064 * it has a generic C90 formulation that can be compiled on any platform,
f535537f 1065 * all implementations generate exactly the same hash value on all platforms.
648db22b 1066 * Starting from v0.8.0, it's also labelled "stable", meaning that
1067 * any future version will also generate the same hash value.
1068 *
1069 * XXH3 offers 2 variants, _64bits and _128bits.
1070 *
1071 * When only 64 bits are needed, prefer invoking the _64bits variant, as it
1072 * reduces the amount of mixing, resulting in faster speed on small inputs.
1073 * It's also generally simpler to manipulate a scalar return type than a struct.
1074 *
1075 * The API supports one-shot hashing, streaming mode, and custom secrets.
1076 */
648db22b 1077/*-**********************************************************************
1078* XXH3 64-bit variant
1079************************************************************************/
1080
f535537f 1081/*!
1082 * @brief Calculates 64-bit unseeded variant of XXH3 hash of @p input.
1083 *
1084 * @param input The block of data to be hashed, at least @p length bytes in size.
1085 * @param length The length of @p input, in bytes.
1086 *
1087 * @pre
1088 * The memory between @p input and @p input + @p length must be valid,
1089 * readable, contiguous memory. However, if @p length is `0`, @p input may be
1090 * `NULL`. In C++, this also must be *TriviallyCopyable*.
1091 *
1092 * @return The calculated 64-bit XXH3 hash value.
1093 *
1094 * @note
1095 * This is equivalent to @ref XXH3_64bits_withSeed() with a seed of `0`, however
1096 * it may have slightly better performance due to constant propagation of the
1097 * defaults.
1098 *
1099 * @see
1100 * XXH3_64bits_withSeed(), XXH3_64bits_withSecret(): other seeding variants
1101 * @see @ref single_shot_example "Single Shot Example" for an example.
1102 */
1103XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length);
648db22b 1104
f535537f 1105/*!
1106 * @brief Calculates 64-bit seeded variant of XXH3 hash of @p input.
1107 *
1108 * @param input The block of data to be hashed, at least @p length bytes in size.
1109 * @param length The length of @p input, in bytes.
1110 * @param seed The 64-bit seed to alter the hash result predictably.
1111 *
1112 * @pre
1113 * The memory between @p input and @p input + @p length must be valid,
1114 * readable, contiguous memory. However, if @p length is `0`, @p input may be
1115 * `NULL`. In C++, this also must be *TriviallyCopyable*.
1116 *
1117 * @return The calculated 64-bit XXH3 hash value.
1118 *
1119 * @note
1120 * seed == 0 produces the same results as @ref XXH3_64bits().
1121 *
1122 * This variant generates a custom secret on the fly based on default secret
1123 * altered using the @p seed value.
1124 *
648db22b 1125 * While this operation is decently fast, note that it's not completely free.
f535537f 1126 *
1127 * @see @ref single_shot_example "Single Shot Example" for an example.
648db22b 1128 */
f535537f 1129XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed);
648db22b 1130
1131/*!
1132 * The bare minimum size for a custom secret.
1133 *
1134 * @see
1135 * XXH3_64bits_withSecret(), XXH3_64bits_reset_withSecret(),
1136 * XXH3_128bits_withSecret(), XXH3_128bits_reset_withSecret().
1137 */
1138#define XXH3_SECRET_SIZE_MIN 136
1139
f535537f 1140/*!
1141 * @brief Calculates 64-bit variant of XXH3 with a custom "secret".
1142 *
1143 * @param data The block of data to be hashed, at least @p len bytes in size.
1144 * @param len The length of @p data, in bytes.
1145 * @param secret The secret data.
1146 * @param secretSize The length of @p secret, in bytes.
1147 *
1148 * @return The calculated 64-bit XXH3 hash value.
1149 *
1150 * @pre
1151 * The memory between @p data and @p data + @p len must be valid,
1152 * readable, contiguous memory. However, if @p length is `0`, @p data may be
1153 * `NULL`. In C++, this also must be *TriviallyCopyable*.
1154 *
648db22b 1155 * It's possible to provide any blob of bytes as a "secret" to generate the hash.
1156 * This makes it more difficult for an external actor to prepare an intentional collision.
f535537f 1157 * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN).
648db22b 1158 * However, the quality of the secret impacts the dispersion of the hash algorithm.
1159 * Therefore, the secret _must_ look like a bunch of random bytes.
1160 * Avoid "trivial" or structured data such as repeated sequences or a text document.
1161 * Whenever in doubt about the "randomness" of the blob of bytes,
f535537f 1162 * consider employing @ref XXH3_generateSecret() instead (see below).
648db22b 1163 * It will generate a proper high entropy secret derived from the blob of bytes.
1164 * Another advantage of using XXH3_generateSecret() is that
1165 * it guarantees that all bits within the initial blob of bytes
1166 * will impact every bit of the output.
1167 * This is not necessarily the case when using the blob of bytes directly
1168 * because, when hashing _small_ inputs, only a portion of the secret is employed.
f535537f 1169 *
1170 * @see @ref single_shot_example "Single Shot Example" for an example.
648db22b 1171 */
f535537f 1172XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
648db22b 1173
1174
1175/******* Streaming *******/
f535537f 1176#ifndef XXH_NO_STREAM
648db22b 1177/*
1178 * Streaming requires state maintenance.
1179 * This operation costs memory and CPU.
1180 * As a consequence, streaming is slower than one-shot hashing.
1181 * For better performance, prefer one-shot functions whenever applicable.
1182 */
1183
1184/*!
f535537f 1185 * @brief The opaque state struct for the XXH3 streaming API.
648db22b 1186 *
1187 * @see XXH3_state_s for details.
1188 */
1189typedef struct XXH3_state_s XXH3_state_t;
f535537f 1190XXH_PUBLIC_API XXH_MALLOCF XXH3_state_t* XXH3_createState(void);
648db22b 1191XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
648db22b 1192
f535537f 1193/*!
1194 * @brief Copies one @ref XXH3_state_t to another.
1195 *
1196 * @param dst_state The state to copy to.
1197 * @param src_state The state to copy from.
1198 * @pre
1199 * @p dst_state and @p src_state must not be `NULL` and must not overlap.
648db22b 1200 */
f535537f 1201XXH_PUBLIC_API void XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state);
1202
1203/*!
1204 * @brief Resets an @ref XXH3_state_t to begin a new hash.
1205 *
1206 * @param statePtr The state struct to reset.
1207 *
1208 * @pre
1209 * @p statePtr must not be `NULL`.
1210 *
1211 * @return @ref XXH_OK on success.
1212 * @return @ref XXH_ERROR on failure.
1213 *
1214 * @note
1215 * - This function resets `statePtr` and generate a secret with default parameters.
1216 * - Call this function before @ref XXH3_64bits_update().
1217 * - Digest will be equivalent to `XXH3_64bits()`.
1218 *
648db22b 1219 */
f535537f 1220XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
1221
1222/*!
1223 * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
1224 *
1225 * @param statePtr The state struct to reset.
1226 * @param seed The 64-bit seed to alter the hash result predictably.
1227 *
1228 * @pre
1229 * @p statePtr must not be `NULL`.
1230 *
1231 * @return @ref XXH_OK on success.
1232 * @return @ref XXH_ERROR on failure.
1233 *
1234 * @note
1235 * - This function resets `statePtr` and generate a secret from `seed`.
1236 * - Call this function before @ref XXH3_64bits_update().
1237 * - Digest will be equivalent to `XXH3_64bits_withSeed()`.
1238 *
1239 */
1240XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
1241
1242/*!
1243 * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
1244 *
1245 * @param statePtr The state struct to reset.
1246 * @param secret The secret data.
1247 * @param secretSize The length of @p secret, in bytes.
1248 *
1249 * @pre
1250 * @p statePtr must not be `NULL`.
1251 *
1252 * @return @ref XXH_OK on success.
1253 * @return @ref XXH_ERROR on failure.
1254 *
1255 * @note
1256 * `secret` is referenced, it _must outlive_ the hash streaming session.
1257 *
1258 * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN,
648db22b 1259 * and the quality of produced hash values depends on secret's entropy
1260 * (secret's content should look like a bunch of random bytes).
1261 * When in doubt about the randomness of a candidate `secret`,
1262 * consider employing `XXH3_generateSecret()` instead (see below).
1263 */
f535537f 1264XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
648db22b 1265
f535537f 1266/*!
1267 * @brief Consumes a block of @p input to an @ref XXH3_state_t.
1268 *
1269 * @param statePtr The state struct to update.
1270 * @param input The block of data to be hashed, at least @p length bytes in size.
1271 * @param length The length of @p input, in bytes.
1272 *
1273 * @pre
1274 * @p statePtr must not be `NULL`.
1275 * @pre
1276 * The memory between @p input and @p input + @p length must be valid,
1277 * readable, contiguous memory. However, if @p length is `0`, @p input may be
1278 * `NULL`. In C++, this also must be *TriviallyCopyable*.
1279 *
1280 * @return @ref XXH_OK on success.
1281 * @return @ref XXH_ERROR on failure.
1282 *
1283 * @note Call this to incrementally consume blocks of data.
1284 */
1285XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
1286
1287/*!
1288 * @brief Returns the calculated XXH3 64-bit hash value from an @ref XXH3_state_t.
1289 *
1290 * @param statePtr The state struct to calculate the hash from.
1291 *
1292 * @pre
1293 * @p statePtr must not be `NULL`.
1294 *
1295 * @return The calculated XXH3 64-bit hash value from that state.
1296 *
1297 * @note
1298 * Calling XXH3_64bits_digest() will not affect @p statePtr, so you can update,
1299 * digest, and update again.
1300 */
1301XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
1302#endif /* !XXH_NO_STREAM */
648db22b 1303
1304/* note : canonical representation of XXH3 is the same as XXH64
1305 * since they both produce XXH64_hash_t values */
1306
1307
1308/*-**********************************************************************
1309* XXH3 128-bit variant
1310************************************************************************/
1311
1312/*!
1313 * @brief The return value from 128-bit hashes.
1314 *
1315 * Stored in little endian order, although the fields themselves are in native
1316 * endianness.
1317 */
1318typedef struct {
1319 XXH64_hash_t low64; /*!< `value & 0xFFFFFFFFFFFFFFFF` */
1320 XXH64_hash_t high64; /*!< `value >> 64` */
1321} XXH128_hash_t;
1322
f535537f 1323/*!
1324 * @brief Calculates 128-bit unseeded variant of XXH3 of @p data.
1325 *
1326 * @param data The block of data to be hashed, at least @p length bytes in size.
1327 * @param len The length of @p data, in bytes.
1328 *
1329 * @return The calculated 128-bit variant of XXH3 value.
1330 *
1331 * The 128-bit variant of XXH3 has more strength, but it has a bit of overhead
1332 * for shorter inputs.
1333 *
1334 * This is equivalent to @ref XXH3_128bits_withSeed() with a seed of `0`, however
1335 * it may have slightly better performance due to constant propagation of the
1336 * defaults.
1337 *
1338 * @see XXH3_128bits_withSeed(), XXH3_128bits_withSecret(): other seeding variants
1339 * @see @ref single_shot_example "Single Shot Example" for an example.
1340 */
1341XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* data, size_t len);
1342/*! @brief Calculates 128-bit seeded variant of XXH3 hash of @p data.
1343 *
1344 * @param data The block of data to be hashed, at least @p length bytes in size.
1345 * @param len The length of @p data, in bytes.
1346 * @param seed The 64-bit seed to alter the hash result predictably.
1347 *
1348 * @return The calculated 128-bit variant of XXH3 value.
1349 *
1350 * @note
1351 * seed == 0 produces the same results as @ref XXH3_64bits().
1352 *
1353 * This variant generates a custom secret on the fly based on default secret
1354 * altered using the @p seed value.
1355 *
1356 * While this operation is decently fast, note that it's not completely free.
1357 *
1358 * @see XXH3_128bits(), XXH3_128bits_withSecret(): other seeding variants
1359 * @see @ref single_shot_example "Single Shot Example" for an example.
1360 */
1361XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSeed(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
1362/*!
1363 * @brief Calculates 128-bit variant of XXH3 with a custom "secret".
1364 *
1365 * @param data The block of data to be hashed, at least @p len bytes in size.
1366 * @param len The length of @p data, in bytes.
1367 * @param secret The secret data.
1368 * @param secretSize The length of @p secret, in bytes.
1369 *
1370 * @return The calculated 128-bit variant of XXH3 value.
1371 *
1372 * It's possible to provide any blob of bytes as a "secret" to generate the hash.
1373 * This makes it more difficult for an external actor to prepare an intentional collision.
1374 * The main condition is that @p secretSize *must* be large enough (>= @ref XXH3_SECRET_SIZE_MIN).
1375 * However, the quality of the secret impacts the dispersion of the hash algorithm.
1376 * Therefore, the secret _must_ look like a bunch of random bytes.
1377 * Avoid "trivial" or structured data such as repeated sequences or a text document.
1378 * Whenever in doubt about the "randomness" of the blob of bytes,
1379 * consider employing @ref XXH3_generateSecret() instead (see below).
1380 * It will generate a proper high entropy secret derived from the blob of bytes.
1381 * Another advantage of using XXH3_generateSecret() is that
1382 * it guarantees that all bits within the initial blob of bytes
1383 * will impact every bit of the output.
1384 * This is not necessarily the case when using the blob of bytes directly
1385 * because, when hashing _small_ inputs, only a portion of the secret is employed.
1386 *
1387 * @see @ref single_shot_example "Single Shot Example" for an example.
1388 */
1389XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_withSecret(XXH_NOESCAPE const void* data, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize);
648db22b 1390
1391/******* Streaming *******/
f535537f 1392#ifndef XXH_NO_STREAM
648db22b 1393/*
1394 * Streaming requires state maintenance.
1395 * This operation costs memory and CPU.
1396 * As a consequence, streaming is slower than one-shot hashing.
1397 * For better performance, prefer one-shot functions whenever applicable.
1398 *
1399 * XXH3_128bits uses the same XXH3_state_t as XXH3_64bits().
1400 * Use already declared XXH3_createState() and XXH3_freeState().
1401 *
f535537f 1402 * All reset and streaming functions have same meaning as their 64-bit counterpart.
1403 */
1404
1405/*!
1406 * @brief Resets an @ref XXH3_state_t to begin a new hash.
1407 *
1408 * @param statePtr The state struct to reset.
1409 *
1410 * @pre
1411 * @p statePtr must not be `NULL`.
1412 *
1413 * @return @ref XXH_OK on success.
1414 * @return @ref XXH_ERROR on failure.
1415 *
1416 * @note
1417 * - This function resets `statePtr` and generate a secret with default parameters.
1418 * - Call it before @ref XXH3_128bits_update().
1419 * - Digest will be equivalent to `XXH3_128bits()`.
1420 */
1421XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr);
1422
1423/*!
1424 * @brief Resets an @ref XXH3_state_t with 64-bit seed to begin a new hash.
1425 *
1426 * @param statePtr The state struct to reset.
1427 * @param seed The 64-bit seed to alter the hash result predictably.
1428 *
1429 * @pre
1430 * @p statePtr must not be `NULL`.
1431 *
1432 * @return @ref XXH_OK on success.
1433 * @return @ref XXH_ERROR on failure.
1434 *
1435 * @note
1436 * - This function resets `statePtr` and generate a secret from `seed`.
1437 * - Call it before @ref XXH3_128bits_update().
1438 * - Digest will be equivalent to `XXH3_128bits_withSeed()`.
1439 */
1440XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed);
1441/*!
1442 * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
1443 *
1444 * @param statePtr The state struct to reset.
1445 * @param secret The secret data.
1446 * @param secretSize The length of @p secret, in bytes.
1447 *
1448 * @pre
1449 * @p statePtr must not be `NULL`.
1450 *
1451 * @return @ref XXH_OK on success.
1452 * @return @ref XXH_ERROR on failure.
1453 *
1454 * `secret` is referenced, it _must outlive_ the hash streaming session.
1455 * Similar to one-shot API, `secretSize` must be >= @ref XXH3_SECRET_SIZE_MIN,
1456 * and the quality of produced hash values depends on secret's entropy
1457 * (secret's content should look like a bunch of random bytes).
1458 * When in doubt about the randomness of a candidate `secret`,
1459 * consider employing `XXH3_generateSecret()` instead (see below).
1460 */
1461XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize);
1462
1463/*!
1464 * @brief Consumes a block of @p input to an @ref XXH3_state_t.
1465 *
1466 * Call this to incrementally consume blocks of data.
1467 *
1468 * @param statePtr The state struct to update.
1469 * @param input The block of data to be hashed, at least @p length bytes in size.
1470 * @param length The length of @p input, in bytes.
1471 *
1472 * @pre
1473 * @p statePtr must not be `NULL`.
1474 *
1475 * @return @ref XXH_OK on success.
1476 * @return @ref XXH_ERROR on failure.
1477 *
1478 * @note
1479 * The memory between @p input and @p input + @p length must be valid,
1480 * readable, contiguous memory. However, if @p length is `0`, @p input may be
1481 * `NULL`. In C++, this also must be *TriviallyCopyable*.
1482 *
1483 */
1484XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* input, size_t length);
1485
1486/*!
1487 * @brief Returns the calculated XXH3 128-bit hash value from an @ref XXH3_state_t.
1488 *
1489 * @param statePtr The state struct to calculate the hash from.
1490 *
1491 * @pre
1492 * @p statePtr must not be `NULL`.
1493 *
1494 * @return The calculated XXH3 128-bit hash value from that state.
1495 *
1496 * @note
1497 * Calling XXH3_128bits_digest() will not affect @p statePtr, so you can update,
1498 * digest, and update again.
1499 *
648db22b 1500 */
f535537f 1501XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* statePtr);
1502#endif /* !XXH_NO_STREAM */
648db22b 1503
1504/* Following helper functions make it possible to compare XXH128_hast_t values.
1505 * Since XXH128_hash_t is a structure, this capability is not offered by the language.
1506 * Note: For better performance, these functions can be inlined using XXH_INLINE_ALL */
1507
1508/*!
f535537f 1509 * @brief Check equality of two XXH128_hash_t values
1510 *
1511 * @param h1 The 128-bit hash value.
1512 * @param h2 Another 128-bit hash value.
1513 *
1514 * @return `1` if `h1` and `h2` are equal.
1515 * @return `0` if they are not.
648db22b 1516 */
f535537f 1517XXH_PUBLIC_API XXH_PUREF int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
648db22b 1518
1519/*!
f535537f 1520 * @brief Compares two @ref XXH128_hash_t
648db22b 1521 *
1522 * This comparator is compatible with stdlib's `qsort()`/`bsearch()`.
1523 *
f535537f 1524 * @param h128_1 Left-hand side value
1525 * @param h128_2 Right-hand side value
1526 *
1527 * @return >0 if @p h128_1 > @p h128_2
1528 * @return =0 if @p h128_1 == @p h128_2
1529 * @return <0 if @p h128_1 < @p h128_2
648db22b 1530 */
f535537f 1531XXH_PUBLIC_API XXH_PUREF int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2);
648db22b 1532
1533
1534/******* Canonical representation *******/
1535typedef struct { unsigned char digest[sizeof(XXH128_hash_t)]; } XXH128_canonical_t;
f535537f 1536
1537
1538/*!
1539 * @brief Converts an @ref XXH128_hash_t to a big endian @ref XXH128_canonical_t.
1540 *
1541 * @param dst The @ref XXH128_canonical_t pointer to be stored to.
1542 * @param hash The @ref XXH128_hash_t to be converted.
1543 *
1544 * @pre
1545 * @p dst must not be `NULL`.
1546 * @see @ref canonical_representation_example "Canonical Representation Example"
1547 */
1548XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash);
1549
1550/*!
1551 * @brief Converts an @ref XXH128_canonical_t to a native @ref XXH128_hash_t.
1552 *
1553 * @param src The @ref XXH128_canonical_t to convert.
1554 *
1555 * @pre
1556 * @p src must not be `NULL`.
1557 *
1558 * @return The converted hash.
1559 * @see @ref canonical_representation_example "Canonical Representation Example"
1560 */
1561XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src);
648db22b 1562
1563
1564#endif /* !XXH_NO_XXH3 */
1565#endif /* XXH_NO_LONG_LONG */
1566
1567/*!
1568 * @}
1569 */
1570#endif /* XXHASH_H_5627135585666179 */
1571
1572
1573
1574#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
1575#define XXHASH_H_STATIC_13879238742
1576/* ****************************************************************************
1577 * This section contains declarations which are not guaranteed to remain stable.
1578 * They may change in future versions, becoming incompatible with a different
1579 * version of the library.
1580 * These declarations should only be used with static linking.
1581 * Never use them in association with dynamic linking!
1582 ***************************************************************************** */
1583
1584/*
1585 * These definitions are only present to allow static allocation
1586 * of XXH states, on stack or in a struct, for example.
1587 * Never **ever** access their members directly.
1588 */
1589
1590/*!
1591 * @internal
1592 * @brief Structure for XXH32 streaming API.
1593 *
1594 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1595 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
1596 * an opaque type. This allows fields to safely be changed.
1597 *
1598 * Typedef'd to @ref XXH32_state_t.
1599 * Do not access the members of this struct directly.
1600 * @see XXH64_state_s, XXH3_state_s
1601 */
1602struct XXH32_state_s {
1603 XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */
1604 XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */
1605 XXH32_hash_t v[4]; /*!< Accumulator lanes */
1606 XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */
1607 XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */
1608 XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */
1609}; /* typedef'd to XXH32_state_t */
1610
1611
1612#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
1613
1614/*!
1615 * @internal
1616 * @brief Structure for XXH64 streaming API.
1617 *
1618 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1619 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is
1620 * an opaque type. This allows fields to safely be changed.
1621 *
1622 * Typedef'd to @ref XXH64_state_t.
1623 * Do not access the members of this struct directly.
1624 * @see XXH32_state_s, XXH3_state_s
1625 */
1626struct XXH64_state_s {
1627 XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */
1628 XXH64_hash_t v[4]; /*!< Accumulator lanes */
1629 XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */
1630 XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */
1631 XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/
1632 XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */
1633}; /* typedef'd to XXH64_state_t */
1634
648db22b 1635#ifndef XXH_NO_XXH3
1636
1637#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* >= C11 */
1638# include <stdalign.h>
1639# define XXH_ALIGN(n) alignas(n)
1640#elif defined(__cplusplus) && (__cplusplus >= 201103L) /* >= C++11 */
1641/* In C++ alignas() is a keyword */
1642# define XXH_ALIGN(n) alignas(n)
1643#elif defined(__GNUC__)
1644# define XXH_ALIGN(n) __attribute__ ((aligned(n)))
1645#elif defined(_MSC_VER)
1646# define XXH_ALIGN(n) __declspec(align(n))
1647#else
1648# define XXH_ALIGN(n) /* disabled */
1649#endif
1650
1651/* Old GCC versions only accept the attribute after the type in structures. */
1652#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)) /* C11+ */ \
1653 && ! (defined(__cplusplus) && (__cplusplus >= 201103L)) /* >= C++11 */ \
1654 && defined(__GNUC__)
1655# define XXH_ALIGN_MEMBER(align, type) type XXH_ALIGN(align)
1656#else
1657# define XXH_ALIGN_MEMBER(align, type) XXH_ALIGN(align) type
1658#endif
1659
1660/*!
1661 * @brief The size of the internal XXH3 buffer.
1662 *
1663 * This is the optimal update size for incremental hashing.
1664 *
1665 * @see XXH3_64b_update(), XXH3_128b_update().
1666 */
1667#define XXH3_INTERNALBUFFER_SIZE 256
1668
1669/*!
f535537f 1670 * @internal
648db22b 1671 * @brief Default size of the secret buffer (and @ref XXH3_kSecret).
1672 *
1673 * This is the size used in @ref XXH3_kSecret and the seeded functions.
1674 *
1675 * Not to be confused with @ref XXH3_SECRET_SIZE_MIN.
1676 */
1677#define XXH3_SECRET_DEFAULT_SIZE 192
1678
1679/*!
1680 * @internal
1681 * @brief Structure for XXH3 streaming API.
1682 *
1683 * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY,
1684 * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined.
1685 * Otherwise it is an opaque type.
1686 * Never use this definition in combination with dynamic library.
1687 * This allows fields to safely be changed in the future.
1688 *
1689 * @note ** This structure has a strict alignment requirement of 64 bytes!! **
1690 * Do not allocate this with `malloc()` or `new`,
1691 * it will not be sufficiently aligned.
1692 * Use @ref XXH3_createState() and @ref XXH3_freeState(), or stack allocation.
1693 *
1694 * Typedef'd to @ref XXH3_state_t.
1695 * Do never access the members of this struct directly.
1696 *
1697 * @see XXH3_INITSTATE() for stack initialization.
1698 * @see XXH3_createState(), XXH3_freeState().
1699 * @see XXH32_state_s, XXH64_state_s
1700 */
1701struct XXH3_state_s {
1702 XXH_ALIGN_MEMBER(64, XXH64_hash_t acc[8]);
f535537f 1703 /*!< The 8 accumulators. See @ref XXH32_state_s::v and @ref XXH64_state_s::v */
648db22b 1704 XXH_ALIGN_MEMBER(64, unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]);
1705 /*!< Used to store a custom secret generated from a seed. */
1706 XXH_ALIGN_MEMBER(64, unsigned char buffer[XXH3_INTERNALBUFFER_SIZE]);
1707 /*!< The internal buffer. @see XXH32_state_s::mem32 */
1708 XXH32_hash_t bufferedSize;
1709 /*!< The amount of memory in @ref buffer, @see XXH32_state_s::memsize */
1710 XXH32_hash_t useSeed;
1711 /*!< Reserved field. Needed for padding on 64-bit. */
1712 size_t nbStripesSoFar;
1713 /*!< Number or stripes processed. */
1714 XXH64_hash_t totalLen;
1715 /*!< Total length hashed. 64-bit even on 32-bit targets. */
1716 size_t nbStripesPerBlock;
1717 /*!< Number of stripes per block. */
1718 size_t secretLimit;
1719 /*!< Size of @ref customSecret or @ref extSecret */
1720 XXH64_hash_t seed;
1721 /*!< Seed for _withSeed variants. Must be zero otherwise, @see XXH3_INITSTATE() */
1722 XXH64_hash_t reserved64;
1723 /*!< Reserved field. */
1724 const unsigned char* extSecret;
1725 /*!< Reference to an external secret for the _withSecret variants, NULL
1726 * for other variants. */
1727 /* note: there may be some padding at the end due to alignment on 64 bytes */
1728}; /* typedef'd to XXH3_state_t */
1729
1730#undef XXH_ALIGN_MEMBER
1731
1732/*!
1733 * @brief Initializes a stack-allocated `XXH3_state_s`.
1734 *
1735 * When the @ref XXH3_state_t structure is merely emplaced on stack,
1736 * it should be initialized with XXH3_INITSTATE() or a memset()
1737 * in case its first reset uses XXH3_NNbits_reset_withSeed().
1738 * This init can be omitted if the first reset uses default or _withSecret mode.
1739 * This operation isn't necessary when the state is created with XXH3_createState().
1740 * Note that this doesn't prepare the state for a streaming operation,
1741 * it's still necessary to use XXH3_NNbits_reset*() afterwards.
1742 */
f535537f 1743#define XXH3_INITSTATE(XXH3_state_ptr) \
1744 do { \
1745 XXH3_state_t* tmp_xxh3_state_ptr = (XXH3_state_ptr); \
1746 tmp_xxh3_state_ptr->seed = 0; \
1747 tmp_xxh3_state_ptr->extSecret = NULL; \
1748 } while(0)
648db22b 1749
1750
f535537f 1751/*!
1752 * @brief Calculates the 128-bit hash of @p data using XXH3.
1753 *
1754 * @param data The block of data to be hashed, at least @p len bytes in size.
1755 * @param len The length of @p data, in bytes.
1756 * @param seed The 64-bit seed to alter the hash's output predictably.
1757 *
1758 * @pre
1759 * The memory between @p data and @p data + @p len must be valid,
1760 * readable, contiguous memory. However, if @p len is `0`, @p data may be
1761 * `NULL`. In C++, this also must be *TriviallyCopyable*.
1762 *
1763 * @return The calculated 128-bit XXH3 value.
1764 *
1765 * @see @ref single_shot_example "Single Shot Example" for an example.
648db22b 1766 */
f535537f 1767XXH_PUBLIC_API XXH_PUREF XXH128_hash_t XXH128(XXH_NOESCAPE const void* data, size_t len, XXH64_hash_t seed);
648db22b 1768
1769
1770/* === Experimental API === */
1771/* Symbols defined below must be considered tied to a specific library version. */
1772
f535537f 1773/*!
1774 * @brief Derive a high-entropy secret from any user-defined content, named customSeed.
1775 *
1776 * @param secretBuffer A writable buffer for derived high-entropy secret data.
1777 * @param secretSize Size of secretBuffer, in bytes. Must be >= XXH3_SECRET_DEFAULT_SIZE.
1778 * @param customSeed A user-defined content.
1779 * @param customSeedSize Size of customSeed, in bytes.
1780 *
1781 * @return @ref XXH_OK on success.
1782 * @return @ref XXH_ERROR on failure.
648db22b 1783 *
648db22b 1784 * The generated secret can be used in combination with `*_withSecret()` functions.
f535537f 1785 * The `_withSecret()` variants are useful to provide a higher level of protection
1786 * than 64-bit seed, as it becomes much more difficult for an external actor to
1787 * guess how to impact the calculation logic.
648db22b 1788 *
1789 * The function accepts as input a custom seed of any length and any content,
f535537f 1790 * and derives from it a high-entropy secret of length @p secretSize into an
1791 * already allocated buffer @p secretBuffer.
648db22b 1792 *
1793 * The generated secret can then be used with any `*_withSecret()` variant.
f535537f 1794 * The functions @ref XXH3_128bits_withSecret(), @ref XXH3_64bits_withSecret(),
1795 * @ref XXH3_128bits_reset_withSecret() and @ref XXH3_64bits_reset_withSecret()
648db22b 1796 * are part of this list. They all accept a `secret` parameter
f535537f 1797 * which must be large enough for implementation reasons (>= @ref XXH3_SECRET_SIZE_MIN)
648db22b 1798 * _and_ feature very high entropy (consist of random-looking bytes).
f535537f 1799 * These conditions can be a high bar to meet, so @ref XXH3_generateSecret() can
1800 * be employed to ensure proper quality.
648db22b 1801 *
f535537f 1802 * @p customSeed can be anything. It can have any size, even small ones,
1803 * and its content can be anything, even "poor entropy" sources such as a bunch
1804 * of zeroes. The resulting `secret` will nonetheless provide all required qualities.
648db22b 1805 *
f535537f 1806 * @pre
1807 * - @p secretSize must be >= @ref XXH3_SECRET_SIZE_MIN
1808 * - When @p customSeedSize > 0, supplying NULL as customSeed is undefined behavior.
1809 *
1810 * Example code:
1811 * @code{.c}
1812 * #include <stdio.h>
1813 * #include <stdlib.h>
1814 * #include <string.h>
1815 * #define XXH_STATIC_LINKING_ONLY // expose unstable API
1816 * #include "xxhash.h"
1817 * // Hashes argv[2] using the entropy from argv[1].
1818 * int main(int argc, char* argv[])
1819 * {
1820 * char secret[XXH3_SECRET_SIZE_MIN];
1821 * if (argv != 3) { return 1; }
1822 * XXH3_generateSecret(secret, sizeof(secret), argv[1], strlen(argv[1]));
1823 * XXH64_hash_t h = XXH3_64bits_withSecret(
1824 * argv[2], strlen(argv[2]),
1825 * secret, sizeof(secret)
1826 * );
1827 * printf("%016llx\n", (unsigned long long) h);
1828 * }
1829 * @endcode
648db22b 1830 */
f535537f 1831XXH_PUBLIC_API XXH_errorcode XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize);
648db22b 1832
f535537f 1833/*!
1834 * @brief Generate the same secret as the _withSeed() variants.
648db22b 1835 *
f535537f 1836 * @param secretBuffer A writable buffer of @ref XXH3_SECRET_SIZE_MIN bytes
1837 * @param seed The 64-bit seed to alter the hash result predictably.
648db22b 1838 *
1839 * The generated secret can be used in combination with
1840 *`*_withSecret()` and `_withSecretandSeed()` variants.
f535537f 1841 *
1842 * Example C++ `std::string` hash class:
1843 * @code{.cpp}
1844 * #include <string>
1845 * #define XXH_STATIC_LINKING_ONLY // expose unstable API
1846 * #include "xxhash.h"
1847 * // Slow, seeds each time
1848 * class HashSlow {
1849 * XXH64_hash_t seed;
1850 * public:
1851 * HashSlow(XXH64_hash_t s) : seed{s} {}
1852 * size_t operator()(const std::string& x) const {
1853 * return size_t{XXH3_64bits_withSeed(x.c_str(), x.length(), seed)};
1854 * }
1855 * };
1856 * // Fast, caches the seeded secret for future uses.
1857 * class HashFast {
1858 * unsigned char secret[XXH3_SECRET_SIZE_MIN];
1859 * public:
1860 * HashFast(XXH64_hash_t s) {
1861 * XXH3_generateSecret_fromSeed(secret, seed);
1862 * }
1863 * size_t operator()(const std::string& x) const {
1864 * return size_t{
1865 * XXH3_64bits_withSecret(x.c_str(), x.length(), secret, sizeof(secret))
1866 * };
1867 * }
1868 * };
1869 * @endcode
648db22b 1870 */
f535537f 1871XXH_PUBLIC_API void XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed);
648db22b 1872
f535537f 1873/*!
1874 * @brief Calculates 64/128-bit seeded variant of XXH3 hash of @p data.
1875 *
1876 * @param data The block of data to be hashed, at least @p len bytes in size.
1877 * @param len The length of @p data, in bytes.
1878 * @param secret The secret data.
1879 * @param secretSize The length of @p secret, in bytes.
1880 * @param seed The 64-bit seed to alter the hash result predictably.
1881 *
648db22b 1882 * These variants generate hash values using either
f535537f 1883 * @p seed for "short" keys (< @ref XXH3_MIDSIZE_MAX = 240 bytes)
1884 * or @p secret for "large" keys (>= @ref XXH3_MIDSIZE_MAX).
648db22b 1885 *
1886 * This generally benefits speed, compared to `_withSeed()` or `_withSecret()`.
1887 * `_withSeed()` has to generate the secret on the fly for "large" keys.
1888 * It's fast, but can be perceptible for "not so large" keys (< 1 KB).
1889 * `_withSecret()` has to generate the masks on the fly for "small" keys,
1890 * which requires more instructions than _withSeed() variants.
1891 * Therefore, _withSecretandSeed variant combines the best of both worlds.
1892 *
f535537f 1893 * When @p secret has been generated by XXH3_generateSecret_fromSeed(),
648db22b 1894 * this variant produces *exactly* the same results as `_withSeed()` variant,
1895 * hence offering only a pure speed benefit on "large" input,
1896 * by skipping the need to regenerate the secret for every large input.
1897 *
1898 * Another usage scenario is to hash the secret to a 64-bit hash value,
1899 * for example with XXH3_64bits(), which then becomes the seed,
1900 * and then employ both the seed and the secret in _withSecretandSeed().
1901 * On top of speed, an added benefit is that each bit in the secret
f535537f 1902 * has a 50% chance to swap each bit in the output, via its impact to the seed.
1903 *
648db22b 1904 * This is not guaranteed when using the secret directly in "small data" scenarios,
1905 * because only portions of the secret are employed for small data.
1906 */
f535537f 1907XXH_PUBLIC_API XXH_PUREF XXH64_hash_t
1908XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* data, size_t len,
1909 XXH_NOESCAPE const void* secret, size_t secretSize,
648db22b 1910 XXH64_hash_t seed);
f535537f 1911/*!
1912 * @brief Calculates 128-bit seeded variant of XXH3 hash of @p data.
1913 *
1914 * @param input The block of data to be hashed, at least @p len bytes in size.
1915 * @param length The length of @p data, in bytes.
1916 * @param secret The secret data.
1917 * @param secretSize The length of @p secret, in bytes.
1918 * @param seed64 The 64-bit seed to alter the hash result predictably.
1919 *
1920 * @return @ref XXH_OK on success.
1921 * @return @ref XXH_ERROR on failure.
1922 *
1923 * @see XXH3_64bits_withSecretandSeed()
1924 */
1925XXH_PUBLIC_API XXH_PUREF XXH128_hash_t
1926XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length,
1927 XXH_NOESCAPE const void* secret, size_t secretSize,
648db22b 1928 XXH64_hash_t seed64);
f535537f 1929#ifndef XXH_NO_STREAM
1930/*!
1931 * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
1932 *
1933 * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
1934 * @param secret The secret data.
1935 * @param secretSize The length of @p secret, in bytes.
1936 * @param seed64 The 64-bit seed to alter the hash result predictably.
1937 *
1938 * @return @ref XXH_OK on success.
1939 * @return @ref XXH_ERROR on failure.
1940 *
1941 * @see XXH3_64bits_withSecretandSeed()
1942 */
648db22b 1943XXH_PUBLIC_API XXH_errorcode
f535537f 1944XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
1945 XXH_NOESCAPE const void* secret, size_t secretSize,
648db22b 1946 XXH64_hash_t seed64);
f535537f 1947/*!
1948 * @brief Resets an @ref XXH3_state_t with secret data to begin a new hash.
1949 *
1950 * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
1951 * @param secret The secret data.
1952 * @param secretSize The length of @p secret, in bytes.
1953 * @param seed64 The 64-bit seed to alter the hash result predictably.
1954 *
1955 * @return @ref XXH_OK on success.
1956 * @return @ref XXH_ERROR on failure.
1957 *
1958 * @see XXH3_64bits_withSecretandSeed()
1959 */
648db22b 1960XXH_PUBLIC_API XXH_errorcode
f535537f 1961XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr,
1962 XXH_NOESCAPE const void* secret, size_t secretSize,
648db22b 1963 XXH64_hash_t seed64);
f535537f 1964#endif /* !XXH_NO_STREAM */
648db22b 1965
f535537f 1966#endif /* !XXH_NO_XXH3 */
648db22b 1967#endif /* XXH_NO_LONG_LONG */
1968#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
1969# define XXH_IMPLEMENTATION
1970#endif
1971
1972#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
1973
1974
1975/* ======================================================================== */
1976/* ======================================================================== */
1977/* ======================================================================== */
1978
1979
1980/*-**********************************************************************
1981 * xxHash implementation
1982 *-**********************************************************************
1983 * xxHash's implementation used to be hosted inside xxhash.c.
1984 *
1985 * However, inlining requires implementation to be visible to the compiler,
1986 * hence be included alongside the header.
1987 * Previously, implementation was hosted inside xxhash.c,
1988 * which was then #included when inlining was activated.
1989 * This construction created issues with a few build and install systems,
1990 * as it required xxhash.c to be stored in /include directory.
1991 *
1992 * xxHash implementation is now directly integrated within xxhash.h.
1993 * As a consequence, xxhash.c is no longer needed in /include.
1994 *
1995 * xxhash.c is still available and is still useful.
1996 * In a "normal" setup, when xxhash is not inlined,
1997 * xxhash.h only exposes the prototypes and public symbols,
1998 * while xxhash.c can be built into an object file xxhash.o
1999 * which can then be linked into the final binary.
2000 ************************************************************************/
2001
2002#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
2003 || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
2004# define XXH_IMPLEM_13a8737387
2005
2006/* *************************************
2007* Tuning parameters
2008***************************************/
2009
2010/*!
2011 * @defgroup tuning Tuning parameters
2012 * @{
2013 *
2014 * Various macros to control xxHash's behavior.
2015 */
2016#ifdef XXH_DOXYGEN
2017/*!
2018 * @brief Define this to disable 64-bit code.
2019 *
f535537f 2020 * Useful if only using the @ref XXH32_family and you have a strict C90 compiler.
648db22b 2021 */
2022# define XXH_NO_LONG_LONG
2023# undef XXH_NO_LONG_LONG /* don't actually */
2024/*!
2025 * @brief Controls how unaligned memory is accessed.
2026 *
2027 * By default, access to unaligned memory is controlled by `memcpy()`, which is
2028 * safe and portable.
2029 *
2030 * Unfortunately, on some target/compiler combinations, the generated assembly
2031 * is sub-optimal.
2032 *
2033 * The below switch allow selection of a different access method
2034 * in the search for improved performance.
2035 *
2036 * @par Possible options:
2037 *
2038 * - `XXH_FORCE_MEMORY_ACCESS=0` (default): `memcpy`
2039 * @par
2040 * Use `memcpy()`. Safe and portable. Note that most modern compilers will
2041 * eliminate the function call and treat it as an unaligned access.
2042 *
f535537f 2043 * - `XXH_FORCE_MEMORY_ACCESS=1`: `__attribute__((aligned(1)))`
648db22b 2044 * @par
2045 * Depends on compiler extensions and is therefore not portable.
2046 * This method is safe _if_ your compiler supports it,
2047 * and *generally* as fast or faster than `memcpy`.
2048 *
2049 * - `XXH_FORCE_MEMORY_ACCESS=2`: Direct cast
2050 * @par
2051 * Casts directly and dereferences. This method doesn't depend on the
2052 * compiler, but it violates the C standard as it directly dereferences an
2053 * unaligned pointer. It can generate buggy code on targets which do not
2054 * support unaligned memory accesses, but in some circumstances, it's the
2055 * only known way to get the most performance.
2056 *
2057 * - `XXH_FORCE_MEMORY_ACCESS=3`: Byteshift
2058 * @par
2059 * Also portable. This can generate the best code on old compilers which don't
2060 * inline small `memcpy()` calls, and it might also be faster on big-endian
2061 * systems which lack a native byteswap instruction. However, some compilers
2062 * will emit literal byteshifts even if the target supports unaligned access.
f535537f 2063 *
648db22b 2064 *
2065 * @warning
2066 * Methods 1 and 2 rely on implementation-defined behavior. Use these with
2067 * care, as what works on one compiler/platform/optimization level may cause
2068 * another to read garbage data or even crash.
2069 *
2070 * See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
2071 *
2072 * Prefer these methods in priority order (0 > 3 > 1 > 2)
2073 */
2074# define XXH_FORCE_MEMORY_ACCESS 0
2075
f535537f 2076/*!
2077 * @def XXH_SIZE_OPT
2078 * @brief Controls how much xxHash optimizes for size.
2079 *
2080 * xxHash, when compiled, tends to result in a rather large binary size. This
2081 * is mostly due to heavy usage to forced inlining and constant folding of the
2082 * @ref XXH3_family to increase performance.
2083 *
2084 * However, some developers prefer size over speed. This option can
2085 * significantly reduce the size of the generated code. When using the `-Os`
2086 * or `-Oz` options on GCC or Clang, this is defined to 1 by default,
2087 * otherwise it is defined to 0.
2088 *
2089 * Most of these size optimizations can be controlled manually.
2090 *
2091 * This is a number from 0-2.
2092 * - `XXH_SIZE_OPT` == 0: Default. xxHash makes no size optimizations. Speed
2093 * comes first.
2094 * - `XXH_SIZE_OPT` == 1: Default for `-Os` and `-Oz`. xxHash is more
2095 * conservative and disables hacks that increase code size. It implies the
2096 * options @ref XXH_NO_INLINE_HINTS == 1, @ref XXH_FORCE_ALIGN_CHECK == 0,
2097 * and @ref XXH3_NEON_LANES == 8 if they are not already defined.
2098 * - `XXH_SIZE_OPT` == 2: xxHash tries to make itself as small as possible.
2099 * Performance may cry. For example, the single shot functions just use the
2100 * streaming API.
2101 */
2102# define XXH_SIZE_OPT 0
2103
648db22b 2104/*!
2105 * @def XXH_FORCE_ALIGN_CHECK
2106 * @brief If defined to non-zero, adds a special path for aligned inputs (XXH32()
2107 * and XXH64() only).
2108 *
2109 * This is an important performance trick for architectures without decent
2110 * unaligned memory access performance.
2111 *
2112 * It checks for input alignment, and when conditions are met, uses a "fast
2113 * path" employing direct 32-bit/64-bit reads, resulting in _dramatically
2114 * faster_ read speed.
2115 *
2116 * The check costs one initial branch per hash, which is generally negligible,
2117 * but not zero.
2118 *
2119 * Moreover, it's not useful to generate an additional code path if memory
2120 * access uses the same instruction for both aligned and unaligned
2121 * addresses (e.g. x86 and aarch64).
2122 *
2123 * In these cases, the alignment check can be removed by setting this macro to 0.
2124 * Then the code will always use unaligned memory access.
f535537f 2125 * Align check is automatically disabled on x86, x64, ARM64, and some ARM chips
648db22b 2126 * which are platforms known to offer good unaligned memory accesses performance.
2127 *
f535537f 2128 * It is also disabled by default when @ref XXH_SIZE_OPT >= 1.
2129 *
648db22b 2130 * This option does not affect XXH3 (only XXH32 and XXH64).
2131 */
2132# define XXH_FORCE_ALIGN_CHECK 0
2133
2134/*!
2135 * @def XXH_NO_INLINE_HINTS
2136 * @brief When non-zero, sets all functions to `static`.
2137 *
2138 * By default, xxHash tries to force the compiler to inline almost all internal
2139 * functions.
2140 *
2141 * This can usually improve performance due to reduced jumping and improved
2142 * constant folding, but significantly increases the size of the binary which
2143 * might not be favorable.
2144 *
2145 * Additionally, sometimes the forced inlining can be detrimental to performance,
2146 * depending on the architecture.
2147 *
2148 * XXH_NO_INLINE_HINTS marks all internal functions as static, giving the
2149 * compiler full control on whether to inline or not.
2150 *
f535537f 2151 * When not optimizing (-O0), using `-fno-inline` with GCC or Clang, or if
2152 * @ref XXH_SIZE_OPT >= 1, this will automatically be defined.
648db22b 2153 */
2154# define XXH_NO_INLINE_HINTS 0
2155
f535537f 2156/*!
2157 * @def XXH3_INLINE_SECRET
2158 * @brief Determines whether to inline the XXH3 withSecret code.
2159 *
2160 * When the secret size is known, the compiler can improve the performance
2161 * of XXH3_64bits_withSecret() and XXH3_128bits_withSecret().
2162 *
2163 * However, if the secret size is not known, it doesn't have any benefit. This
2164 * happens when xxHash is compiled into a global symbol. Therefore, if
2165 * @ref XXH_INLINE_ALL is *not* defined, this will be defined to 0.
2166 *
2167 * Additionally, this defaults to 0 on GCC 12+, which has an issue with function pointers
2168 * that are *sometimes* force inline on -Og, and it is impossible to automatically
2169 * detect this optimization level.
2170 */
2171# define XXH3_INLINE_SECRET 0
2172
648db22b 2173/*!
2174 * @def XXH32_ENDJMP
2175 * @brief Whether to use a jump for `XXH32_finalize`.
2176 *
2177 * For performance, `XXH32_finalize` uses multiple branches in the finalizer.
2178 * This is generally preferable for performance,
2179 * but depending on exact architecture, a jmp may be preferable.
2180 *
2181 * This setting is only possibly making a difference for very small inputs.
2182 */
2183# define XXH32_ENDJMP 0
2184
2185/*!
2186 * @internal
2187 * @brief Redefines old internal names.
2188 *
2189 * For compatibility with code that uses xxHash's internals before the names
2190 * were changed to improve namespacing. There is no other reason to use this.
2191 */
2192# define XXH_OLD_NAMES
2193# undef XXH_OLD_NAMES /* don't actually use, it is ugly. */
f535537f 2194
2195/*!
2196 * @def XXH_NO_STREAM
2197 * @brief Disables the streaming API.
2198 *
2199 * When xxHash is not inlined and the streaming functions are not used, disabling
2200 * the streaming functions can improve code size significantly, especially with
2201 * the @ref XXH3_family which tends to make constant folded copies of itself.
2202 */
2203# define XXH_NO_STREAM
2204# undef XXH_NO_STREAM /* don't actually */
648db22b 2205#endif /* XXH_DOXYGEN */
2206/*!
2207 * @}
2208 */
2209
2210#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
f535537f 2211 /* prefer __packed__ structures (method 1) for GCC
2212 * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy
2213 * which for some reason does unaligned loads. */
2214# if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED))
648db22b 2215# define XXH_FORCE_MEMORY_ACCESS 1
2216# endif
2217#endif
2218
f535537f 2219#ifndef XXH_SIZE_OPT
2220 /* default to 1 for -Os or -Oz */
2221# if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__)
2222# define XXH_SIZE_OPT 1
2223# else
2224# define XXH_SIZE_OPT 0
2225# endif
2226#endif
2227
648db22b 2228#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
f535537f 2229 /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */
2230# if XXH_SIZE_OPT >= 1 || \
2231 defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \
2232 || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */
648db22b 2233# define XXH_FORCE_ALIGN_CHECK 0
2234# else
2235# define XXH_FORCE_ALIGN_CHECK 1
2236# endif
2237#endif
2238
2239#ifndef XXH_NO_INLINE_HINTS
f535537f 2240# if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */
648db22b 2241# define XXH_NO_INLINE_HINTS 1
2242# else
2243# define XXH_NO_INLINE_HINTS 0
2244# endif
2245#endif
2246
f535537f 2247#ifndef XXH3_INLINE_SECRET
2248# if (defined(__GNUC__) && !defined(__clang__) && __GNUC__ >= 12) \
2249 || !defined(XXH_INLINE_ALL)
2250# define XXH3_INLINE_SECRET 0
2251# else
2252# define XXH3_INLINE_SECRET 1
2253# endif
2254#endif
2255
648db22b 2256#ifndef XXH32_ENDJMP
2257/* generally preferable for performance */
2258# define XXH32_ENDJMP 0
2259#endif
2260
2261/*!
2262 * @defgroup impl Implementation
2263 * @{
2264 */
2265
2266
2267/* *************************************
2268* Includes & Memory related functions
2269***************************************/
f535537f 2270#if defined(XXH_NO_STREAM)
2271/* nothing */
2272#elif defined(XXH_NO_STDLIB)
2273
2274/* When requesting to disable any mention of stdlib,
2275 * the library loses the ability to invoked malloc / free.
2276 * In practice, it means that functions like `XXH*_createState()`
2277 * will always fail, and return NULL.
2278 * This flag is useful in situations where
2279 * xxhash.h is integrated into some kernel, embedded or limited environment
2280 * without access to dynamic allocation.
2281 */
2282
2283static XXH_CONSTF void* XXH_malloc(size_t s) { (void)s; return NULL; }
2284static void XXH_free(void* p) { (void)p; }
2285
2286#else
2287
2288/*
2289 * Modify the local functions below should you wish to use
2290 * different memory routines for malloc() and free()
2291 */
2292#include <stdlib.h>
2293
2294/*!
2295 * @internal
2296 * @brief Modify this function to use a different routine than malloc().
2297 */
2298static XXH_MALLOCF void* XXH_malloc(size_t s) { return malloc(s); }
2299
2300/*!
2301 * @internal
2302 * @brief Modify this function to use a different routine than free().
2303 */
2304static void XXH_free(void* p) { free(p); }
2305
2306#endif /* XXH_NO_STDLIB */
2307
2308#include <string.h>
2309
2310/*!
2311 * @internal
2312 * @brief Modify this function to use a different routine than memcpy().
2313 */
2314static void* XXH_memcpy(void* dest, const void* src, size_t size)
2315{
2316 return memcpy(dest,src,size);
2317}
2318
2319#include <limits.h> /* ULLONG_MAX */
648db22b 2320
2321
2322/* *************************************
2323* Compiler Specific Options
2324***************************************/
2325#ifdef _MSC_VER /* Visual Studio warning fix */
2326# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
2327#endif
2328
2329#if XXH_NO_INLINE_HINTS /* disable inlining hints */
2330# if defined(__GNUC__) || defined(__clang__)
2331# define XXH_FORCE_INLINE static __attribute__((unused))
2332# else
2333# define XXH_FORCE_INLINE static
2334# endif
2335# define XXH_NO_INLINE static
2336/* enable inlining hints */
2337#elif defined(__GNUC__) || defined(__clang__)
2338# define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused))
2339# define XXH_NO_INLINE static __attribute__((noinline))
2340#elif defined(_MSC_VER) /* Visual Studio */
2341# define XXH_FORCE_INLINE static __forceinline
2342# define XXH_NO_INLINE static __declspec(noinline)
2343#elif defined (__cplusplus) \
2344 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */
2345# define XXH_FORCE_INLINE static inline
2346# define XXH_NO_INLINE static
2347#else
2348# define XXH_FORCE_INLINE static
2349# define XXH_NO_INLINE static
2350#endif
2351
f535537f 2352#if XXH3_INLINE_SECRET
2353# define XXH3_WITH_SECRET_INLINE XXH_FORCE_INLINE
2354#else
2355# define XXH3_WITH_SECRET_INLINE XXH_NO_INLINE
2356#endif
648db22b 2357
2358
2359/* *************************************
2360* Debug
2361***************************************/
2362/*!
2363 * @ingroup tuning
2364 * @def XXH_DEBUGLEVEL
2365 * @brief Sets the debugging level.
2366 *
2367 * XXH_DEBUGLEVEL is expected to be defined externally, typically via the
2368 * compiler's command line options. The value must be a number.
2369 */
2370#ifndef XXH_DEBUGLEVEL
2371# ifdef DEBUGLEVEL /* backwards compat */
2372# define XXH_DEBUGLEVEL DEBUGLEVEL
2373# else
2374# define XXH_DEBUGLEVEL 0
2375# endif
2376#endif
2377
2378#if (XXH_DEBUGLEVEL>=1)
2379# include <assert.h> /* note: can still be disabled with NDEBUG */
2380# define XXH_ASSERT(c) assert(c)
2381#else
f535537f 2382# if defined(__INTEL_COMPILER)
2383# define XXH_ASSERT(c) XXH_ASSUME((unsigned char) (c))
2384# else
2385# define XXH_ASSERT(c) XXH_ASSUME(c)
2386# endif
648db22b 2387#endif
2388
2389/* note: use after variable declarations */
2390#ifndef XXH_STATIC_ASSERT
2391# if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */
f535537f 2392# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0)
648db22b 2393# elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */
2394# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0)
2395# else
2396# define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0)
2397# endif
2398# define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c)
2399#endif
2400
2401/*!
2402 * @internal
2403 * @def XXH_COMPILER_GUARD(var)
2404 * @brief Used to prevent unwanted optimizations for @p var.
2405 *
2406 * It uses an empty GCC inline assembly statement with a register constraint
f535537f 2407 * which forces @p var into a general purpose register (eg eax, ebx, ecx
648db22b 2408 * on x86) and marks it as modified.
2409 *
2410 * This is used in a few places to avoid unwanted autovectorization (e.g.
2411 * XXH32_round()). All vectorization we want is explicit via intrinsics,
2412 * and _usually_ isn't wanted elsewhere.
2413 *
2414 * We also use it to prevent unwanted constant folding for AArch64 in
2415 * XXH3_initCustomSecret_scalar().
2416 */
2417#if defined(__GNUC__) || defined(__clang__)
f535537f 2418# define XXH_COMPILER_GUARD(var) __asm__("" : "+r" (var))
648db22b 2419#else
2420# define XXH_COMPILER_GUARD(var) ((void)0)
2421#endif
2422
f535537f 2423/* Specifically for NEON vectors which use the "w" constraint, on
2424 * Clang. */
2425#if defined(__clang__) && defined(__ARM_ARCH) && !defined(__wasm__)
2426# define XXH_COMPILER_GUARD_CLANG_NEON(var) __asm__("" : "+w" (var))
2427#else
2428# define XXH_COMPILER_GUARD_CLANG_NEON(var) ((void)0)
2429#endif
2430
648db22b 2431/* *************************************
2432* Basic Types
2433***************************************/
2434#if !defined (__VMS) \
2435 && (defined (__cplusplus) \
2436 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
f535537f 2437# ifdef _AIX
2438# include <inttypes.h>
2439# else
2440# include <stdint.h>
2441# endif
648db22b 2442 typedef uint8_t xxh_u8;
2443#else
2444 typedef unsigned char xxh_u8;
2445#endif
2446typedef XXH32_hash_t xxh_u32;
2447
2448#ifdef XXH_OLD_NAMES
f535537f 2449# warning "XXH_OLD_NAMES is planned to be removed starting v0.9. If the program depends on it, consider moving away from it by employing newer type names directly"
648db22b 2450# define BYTE xxh_u8
2451# define U8 xxh_u8
2452# define U32 xxh_u32
2453#endif
2454
2455/* *** Memory access *** */
2456
2457/*!
2458 * @internal
2459 * @fn xxh_u32 XXH_read32(const void* ptr)
2460 * @brief Reads an unaligned 32-bit integer from @p ptr in native endianness.
2461 *
2462 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
2463 *
2464 * @param ptr The pointer to read from.
2465 * @return The 32-bit native endian integer from the bytes at @p ptr.
2466 */
2467
2468/*!
2469 * @internal
2470 * @fn xxh_u32 XXH_readLE32(const void* ptr)
2471 * @brief Reads an unaligned 32-bit little endian integer from @p ptr.
2472 *
2473 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
2474 *
2475 * @param ptr The pointer to read from.
2476 * @return The 32-bit little endian integer from the bytes at @p ptr.
2477 */
2478
2479/*!
2480 * @internal
2481 * @fn xxh_u32 XXH_readBE32(const void* ptr)
2482 * @brief Reads an unaligned 32-bit big endian integer from @p ptr.
2483 *
2484 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
2485 *
2486 * @param ptr The pointer to read from.
2487 * @return The 32-bit big endian integer from the bytes at @p ptr.
2488 */
2489
2490/*!
2491 * @internal
2492 * @fn xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align)
2493 * @brief Like @ref XXH_readLE32(), but has an option for aligned reads.
2494 *
2495 * Affected by @ref XXH_FORCE_MEMORY_ACCESS.
2496 * Note that when @ref XXH_FORCE_ALIGN_CHECK == 0, the @p align parameter is
2497 * always @ref XXH_alignment::XXH_unaligned.
2498 *
2499 * @param ptr The pointer to read from.
2500 * @param align Whether @p ptr is aligned.
2501 * @pre
2502 * If @p align == @ref XXH_alignment::XXH_aligned, @p ptr must be 4 byte
2503 * aligned.
2504 * @return The 32-bit little endian integer from the bytes at @p ptr.
2505 */
2506
2507#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2508/*
2509 * Manual byteshift. Best for old compilers which don't inline memcpy.
2510 * We actually directly use XXH_readLE32 and XXH_readBE32.
2511 */
2512#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
2513
2514/*
2515 * Force direct memory access. Only works on CPU which support unaligned memory
2516 * access in hardware.
2517 */
2518static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
2519
2520#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
2521
2522/*
f535537f 2523 * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
2524 * documentation claimed that it only increased the alignment, but actually it
2525 * can decrease it on gcc, clang, and icc:
2526 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
2527 * https://gcc.godbolt.org/z/xYez1j67Y.
648db22b 2528 */
2529#ifdef XXH_OLD_NAMES
2530typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
2531#endif
2532static xxh_u32 XXH_read32(const void* ptr)
2533{
f535537f 2534 typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32;
2535 return *((const xxh_unalign32*)ptr);
648db22b 2536}
2537
2538#else
2539
2540/*
2541 * Portable and safe solution. Generally efficient.
2542 * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
2543 */
2544static xxh_u32 XXH_read32(const void* memPtr)
2545{
2546 xxh_u32 val;
2547 XXH_memcpy(&val, memPtr, sizeof(val));
2548 return val;
2549}
2550
2551#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
2552
2553
2554/* *** Endianness *** */
2555
2556/*!
2557 * @ingroup tuning
2558 * @def XXH_CPU_LITTLE_ENDIAN
2559 * @brief Whether the target is little endian.
2560 *
2561 * Defined to 1 if the target is little endian, or 0 if it is big endian.
2562 * It can be defined externally, for example on the compiler command line.
2563 *
2564 * If it is not defined,
2565 * a runtime check (which is usually constant folded) is used instead.
2566 *
2567 * @note
2568 * This is not necessarily defined to an integer constant.
2569 *
2570 * @see XXH_isLittleEndian() for the runtime check.
2571 */
2572#ifndef XXH_CPU_LITTLE_ENDIAN
2573/*
2574 * Try to detect endianness automatically, to avoid the nonstandard behavior
2575 * in `XXH_isLittleEndian()`
2576 */
2577# if defined(_WIN32) /* Windows is always little endian */ \
2578 || defined(__LITTLE_ENDIAN__) \
2579 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
2580# define XXH_CPU_LITTLE_ENDIAN 1
2581# elif defined(__BIG_ENDIAN__) \
2582 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
2583# define XXH_CPU_LITTLE_ENDIAN 0
2584# else
2585/*!
2586 * @internal
2587 * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN.
2588 *
2589 * Most compilers will constant fold this.
2590 */
2591static int XXH_isLittleEndian(void)
2592{
2593 /*
2594 * Portable and well-defined behavior.
2595 * Don't use static: it is detrimental to performance.
2596 */
2597 const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 };
2598 return one.c[0];
2599}
2600# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
2601# endif
2602#endif
2603
2604
2605
2606
2607/* ****************************************
2608* Compiler-specific Functions and Macros
2609******************************************/
2610#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
2611
2612#ifdef __has_builtin
2613# define XXH_HAS_BUILTIN(x) __has_builtin(x)
2614#else
2615# define XXH_HAS_BUILTIN(x) 0
2616#endif
2617
f535537f 2618
2619
2620/*
2621 * C23 and future versions have standard "unreachable()".
2622 * Once it has been implemented reliably we can add it as an
2623 * additional case:
2624 *
2625 * ```
2626 * #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= XXH_C23_VN)
2627 * # include <stddef.h>
2628 * # ifdef unreachable
2629 * # define XXH_UNREACHABLE() unreachable()
2630 * # endif
2631 * #endif
2632 * ```
2633 *
2634 * Note C++23 also has std::unreachable() which can be detected
2635 * as follows:
2636 * ```
2637 * #if defined(__cpp_lib_unreachable) && (__cpp_lib_unreachable >= 202202L)
2638 * # include <utility>
2639 * # define XXH_UNREACHABLE() std::unreachable()
2640 * #endif
2641 * ```
2642 * NB: `__cpp_lib_unreachable` is defined in the `<version>` header.
2643 * We don't use that as including `<utility>` in `extern "C"` blocks
2644 * doesn't work on GCC12
2645 */
2646
2647#if XXH_HAS_BUILTIN(__builtin_unreachable)
2648# define XXH_UNREACHABLE() __builtin_unreachable()
2649
2650#elif defined(_MSC_VER)
2651# define XXH_UNREACHABLE() __assume(0)
2652
2653#else
2654# define XXH_UNREACHABLE()
2655#endif
2656
2657#if XXH_HAS_BUILTIN(__builtin_assume)
2658# define XXH_ASSUME(c) __builtin_assume(c)
2659#else
2660# define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); }
2661#endif
2662
648db22b 2663/*!
2664 * @internal
2665 * @def XXH_rotl32(x,r)
2666 * @brief 32-bit rotate left.
2667 *
2668 * @param x The 32-bit integer to be rotated.
2669 * @param r The number of bits to rotate.
2670 * @pre
2671 * @p r > 0 && @p r < 32
2672 * @note
2673 * @p x and @p r may be evaluated multiple times.
2674 * @return The rotated result.
2675 */
2676#if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \
2677 && XXH_HAS_BUILTIN(__builtin_rotateleft64)
2678# define XXH_rotl32 __builtin_rotateleft32
2679# define XXH_rotl64 __builtin_rotateleft64
2680/* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */
2681#elif defined(_MSC_VER)
2682# define XXH_rotl32(x,r) _rotl(x,r)
2683# define XXH_rotl64(x,r) _rotl64(x,r)
2684#else
2685# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
2686# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
2687#endif
2688
2689/*!
2690 * @internal
2691 * @fn xxh_u32 XXH_swap32(xxh_u32 x)
2692 * @brief A 32-bit byteswap.
2693 *
2694 * @param x The 32-bit integer to byteswap.
2695 * @return @p x, byteswapped.
2696 */
2697#if defined(_MSC_VER) /* Visual Studio */
2698# define XXH_swap32 _byteswap_ulong
2699#elif XXH_GCC_VERSION >= 403
2700# define XXH_swap32 __builtin_bswap32
2701#else
2702static xxh_u32 XXH_swap32 (xxh_u32 x)
2703{
2704 return ((x << 24) & 0xff000000 ) |
2705 ((x << 8) & 0x00ff0000 ) |
2706 ((x >> 8) & 0x0000ff00 ) |
2707 ((x >> 24) & 0x000000ff );
2708}
2709#endif
2710
2711
2712/* ***************************
2713* Memory reads
2714*****************************/
2715
2716/*!
2717 * @internal
2718 * @brief Enum to indicate whether a pointer is aligned.
2719 */
2720typedef enum {
2721 XXH_aligned, /*!< Aligned */
2722 XXH_unaligned /*!< Possibly unaligned */
2723} XXH_alignment;
2724
2725/*
2726 * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load.
2727 *
2728 * This is ideal for older compilers which don't inline memcpy.
2729 */
2730#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
2731
2732XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr)
2733{
2734 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2735 return bytePtr[0]
2736 | ((xxh_u32)bytePtr[1] << 8)
2737 | ((xxh_u32)bytePtr[2] << 16)
2738 | ((xxh_u32)bytePtr[3] << 24);
2739}
2740
2741XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr)
2742{
2743 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
2744 return bytePtr[3]
2745 | ((xxh_u32)bytePtr[2] << 8)
2746 | ((xxh_u32)bytePtr[1] << 16)
2747 | ((xxh_u32)bytePtr[0] << 24);
2748}
2749
2750#else
2751XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
2752{
2753 return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
2754}
2755
2756static xxh_u32 XXH_readBE32(const void* ptr)
2757{
2758 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
2759}
2760#endif
2761
2762XXH_FORCE_INLINE xxh_u32
2763XXH_readLE32_align(const void* ptr, XXH_alignment align)
2764{
2765 if (align==XXH_unaligned) {
2766 return XXH_readLE32(ptr);
2767 } else {
2768 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
2769 }
2770}
2771
2772
2773/* *************************************
2774* Misc
2775***************************************/
2776/*! @ingroup public */
2777XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
2778
2779
2780/* *******************************************************************
2781* 32-bit hash functions
2782*********************************************************************/
2783/*!
2784 * @}
f535537f 2785 * @defgroup XXH32_impl XXH32 implementation
648db22b 2786 * @ingroup impl
f535537f 2787 *
2788 * Details on the XXH32 implementation.
648db22b 2789 * @{
2790 */
2791 /* #define instead of static const, to be used as initializers */
2792#define XXH_PRIME32_1 0x9E3779B1U /*!< 0b10011110001101110111100110110001 */
2793#define XXH_PRIME32_2 0x85EBCA77U /*!< 0b10000101111010111100101001110111 */
2794#define XXH_PRIME32_3 0xC2B2AE3DU /*!< 0b11000010101100101010111000111101 */
2795#define XXH_PRIME32_4 0x27D4EB2FU /*!< 0b00100111110101001110101100101111 */
2796#define XXH_PRIME32_5 0x165667B1U /*!< 0b00010110010101100110011110110001 */
2797
2798#ifdef XXH_OLD_NAMES
2799# define PRIME32_1 XXH_PRIME32_1
2800# define PRIME32_2 XXH_PRIME32_2
2801# define PRIME32_3 XXH_PRIME32_3
2802# define PRIME32_4 XXH_PRIME32_4
2803# define PRIME32_5 XXH_PRIME32_5
2804#endif
2805
2806/*!
2807 * @internal
2808 * @brief Normal stripe processing routine.
2809 *
2810 * This shuffles the bits so that any bit from @p input impacts several bits in
2811 * @p acc.
2812 *
2813 * @param acc The accumulator lane.
2814 * @param input The stripe of input to mix.
2815 * @return The mixed accumulator lane.
2816 */
2817static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
2818{
2819 acc += input * XXH_PRIME32_2;
2820 acc = XXH_rotl32(acc, 13);
2821 acc *= XXH_PRIME32_1;
f535537f 2822#if (defined(__SSE4_1__) || defined(__aarch64__) || defined(__wasm_simd128__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
648db22b 2823 /*
2824 * UGLY HACK:
2825 * A compiler fence is the only thing that prevents GCC and Clang from
2826 * autovectorizing the XXH32 loop (pragmas and attributes don't work for some
2827 * reason) without globally disabling SSE4.1.
2828 *
2829 * The reason we want to avoid vectorization is because despite working on
2830 * 4 integers at a time, there are multiple factors slowing XXH32 down on
2831 * SSE4:
2832 * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on
2833 * newer chips!) making it slightly slower to multiply four integers at
2834 * once compared to four integers independently. Even when pmulld was
2835 * fastest, Sandy/Ivy Bridge, it is still not worth it to go into SSE
2836 * just to multiply unless doing a long operation.
2837 *
2838 * - Four instructions are required to rotate,
2839 * movqda tmp, v // not required with VEX encoding
2840 * pslld tmp, 13 // tmp <<= 13
2841 * psrld v, 19 // x >>= 19
2842 * por v, tmp // x |= tmp
2843 * compared to one for scalar:
2844 * roll v, 13 // reliably fast across the board
2845 * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
2846 *
2847 * - Instruction level parallelism is actually more beneficial here because
2848 * the SIMD actually serializes this operation: While v1 is rotating, v2
2849 * can load data, while v3 can multiply. SSE forces them to operate
2850 * together.
2851 *
f535537f 2852 * This is also enabled on AArch64, as Clang is *very aggressive* in vectorizing
2853 * the loop. NEON is only faster on the A53, and with the newer cores, it is less
2854 * than half the speed.
2855 *
2856 * Additionally, this is used on WASM SIMD128 because it JITs to the same
2857 * SIMD instructions and has the same issue.
648db22b 2858 */
2859 XXH_COMPILER_GUARD(acc);
2860#endif
2861 return acc;
2862}
2863
2864/*!
2865 * @internal
2866 * @brief Mixes all bits to finalize the hash.
2867 *
2868 * The final mix ensures that all input bits have a chance to impact any bit in
2869 * the output digest, resulting in an unbiased distribution.
2870 *
f535537f 2871 * @param hash The hash to avalanche.
648db22b 2872 * @return The avalanched hash.
2873 */
f535537f 2874static xxh_u32 XXH32_avalanche(xxh_u32 hash)
648db22b 2875{
f535537f 2876 hash ^= hash >> 15;
2877 hash *= XXH_PRIME32_2;
2878 hash ^= hash >> 13;
2879 hash *= XXH_PRIME32_3;
2880 hash ^= hash >> 16;
2881 return hash;
648db22b 2882}
2883
2884#define XXH_get32bits(p) XXH_readLE32_align(p, align)
2885
2886/*!
2887 * @internal
2888 * @brief Processes the last 0-15 bytes of @p ptr.
2889 *
2890 * There may be up to 15 bytes remaining to consume from the input.
2891 * This final stage will digest them to ensure that all input bytes are present
2892 * in the final mix.
2893 *
f535537f 2894 * @param hash The hash to finalize.
648db22b 2895 * @param ptr The pointer to the remaining input.
2896 * @param len The remaining length, modulo 16.
2897 * @param align Whether @p ptr is aligned.
2898 * @return The finalized hash.
f535537f 2899 * @see XXH64_finalize().
648db22b 2900 */
f535537f 2901static XXH_PUREF xxh_u32
2902XXH32_finalize(xxh_u32 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
648db22b 2903{
f535537f 2904#define XXH_PROCESS1 do { \
2905 hash += (*ptr++) * XXH_PRIME32_5; \
2906 hash = XXH_rotl32(hash, 11) * XXH_PRIME32_1; \
648db22b 2907} while (0)
2908
f535537f 2909#define XXH_PROCESS4 do { \
2910 hash += XXH_get32bits(ptr) * XXH_PRIME32_3; \
2911 ptr += 4; \
2912 hash = XXH_rotl32(hash, 17) * XXH_PRIME32_4; \
648db22b 2913} while (0)
2914
2915 if (ptr==NULL) XXH_ASSERT(len == 0);
2916
2917 /* Compact rerolled version; generally faster */
2918 if (!XXH32_ENDJMP) {
2919 len &= 15;
2920 while (len >= 4) {
2921 XXH_PROCESS4;
2922 len -= 4;
2923 }
2924 while (len > 0) {
2925 XXH_PROCESS1;
2926 --len;
2927 }
f535537f 2928 return XXH32_avalanche(hash);
648db22b 2929 } else {
2930 switch(len&15) /* or switch(bEnd - p) */ {
2931 case 12: XXH_PROCESS4;
f535537f 2932 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2933 case 8: XXH_PROCESS4;
f535537f 2934 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2935 case 4: XXH_PROCESS4;
f535537f 2936 return XXH32_avalanche(hash);
648db22b 2937
2938 case 13: XXH_PROCESS4;
f535537f 2939 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2940 case 9: XXH_PROCESS4;
f535537f 2941 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2942 case 5: XXH_PROCESS4;
2943 XXH_PROCESS1;
f535537f 2944 return XXH32_avalanche(hash);
648db22b 2945
2946 case 14: XXH_PROCESS4;
f535537f 2947 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2948 case 10: XXH_PROCESS4;
f535537f 2949 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2950 case 6: XXH_PROCESS4;
2951 XXH_PROCESS1;
2952 XXH_PROCESS1;
f535537f 2953 return XXH32_avalanche(hash);
648db22b 2954
2955 case 15: XXH_PROCESS4;
f535537f 2956 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2957 case 11: XXH_PROCESS4;
f535537f 2958 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2959 case 7: XXH_PROCESS4;
f535537f 2960 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2961 case 3: XXH_PROCESS1;
f535537f 2962 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2963 case 2: XXH_PROCESS1;
f535537f 2964 XXH_FALLTHROUGH; /* fallthrough */
648db22b 2965 case 1: XXH_PROCESS1;
f535537f 2966 XXH_FALLTHROUGH; /* fallthrough */
2967 case 0: return XXH32_avalanche(hash);
648db22b 2968 }
2969 XXH_ASSERT(0);
f535537f 2970 return hash; /* reaching this point is deemed impossible */
648db22b 2971 }
2972}
2973
2974#ifdef XXH_OLD_NAMES
2975# define PROCESS1 XXH_PROCESS1
2976# define PROCESS4 XXH_PROCESS4
2977#else
2978# undef XXH_PROCESS1
2979# undef XXH_PROCESS4
2980#endif
2981
2982/*!
2983 * @internal
2984 * @brief The implementation for @ref XXH32().
2985 *
2986 * @param input , len , seed Directly passed from @ref XXH32().
2987 * @param align Whether @p input is aligned.
2988 * @return The calculated hash.
2989 */
f535537f 2990XXH_FORCE_INLINE XXH_PUREF xxh_u32
648db22b 2991XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
2992{
2993 xxh_u32 h32;
2994
2995 if (input==NULL) XXH_ASSERT(len == 0);
2996
2997 if (len>=16) {
2998 const xxh_u8* const bEnd = input + len;
2999 const xxh_u8* const limit = bEnd - 15;
3000 xxh_u32 v1 = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
3001 xxh_u32 v2 = seed + XXH_PRIME32_2;
3002 xxh_u32 v3 = seed + 0;
3003 xxh_u32 v4 = seed - XXH_PRIME32_1;
3004
3005 do {
3006 v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
3007 v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
3008 v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
3009 v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
3010 } while (input < limit);
3011
3012 h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
3013 + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
3014 } else {
3015 h32 = seed + XXH_PRIME32_5;
3016 }
3017
3018 h32 += (xxh_u32)len;
3019
3020 return XXH32_finalize(h32, input, len&15, align);
3021}
3022
f535537f 3023/*! @ingroup XXH32_family */
648db22b 3024XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
3025{
f535537f 3026#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
648db22b 3027 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
3028 XXH32_state_t state;
3029 XXH32_reset(&state, seed);
3030 XXH32_update(&state, (const xxh_u8*)input, len);
3031 return XXH32_digest(&state);
3032#else
3033 if (XXH_FORCE_ALIGN_CHECK) {
3034 if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
3035 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
3036 } }
3037
3038 return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
3039#endif
3040}
3041
3042
3043
3044/******* Hash streaming *******/
f535537f 3045#ifndef XXH_NO_STREAM
3046/*! @ingroup XXH32_family */
648db22b 3047XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
3048{
3049 return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
3050}
f535537f 3051/*! @ingroup XXH32_family */
648db22b 3052XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
3053{
3054 XXH_free(statePtr);
3055 return XXH_OK;
3056}
3057
f535537f 3058/*! @ingroup XXH32_family */
648db22b 3059XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
3060{
3061 XXH_memcpy(dstState, srcState, sizeof(*dstState));
3062}
3063
f535537f 3064/*! @ingroup XXH32_family */
648db22b 3065XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
3066{
3067 XXH_ASSERT(statePtr != NULL);
3068 memset(statePtr, 0, sizeof(*statePtr));
3069 statePtr->v[0] = seed + XXH_PRIME32_1 + XXH_PRIME32_2;
3070 statePtr->v[1] = seed + XXH_PRIME32_2;
3071 statePtr->v[2] = seed + 0;
3072 statePtr->v[3] = seed - XXH_PRIME32_1;
3073 return XXH_OK;
3074}
3075
3076
f535537f 3077/*! @ingroup XXH32_family */
648db22b 3078XXH_PUBLIC_API XXH_errorcode
3079XXH32_update(XXH32_state_t* state, const void* input, size_t len)
3080{
3081 if (input==NULL) {
3082 XXH_ASSERT(len == 0);
3083 return XXH_OK;
3084 }
3085
3086 { const xxh_u8* p = (const xxh_u8*)input;
3087 const xxh_u8* const bEnd = p + len;
3088
3089 state->total_len_32 += (XXH32_hash_t)len;
3090 state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
3091
3092 if (state->memsize + len < 16) { /* fill in tmp buffer */
3093 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
3094 state->memsize += (XXH32_hash_t)len;
3095 return XXH_OK;
3096 }
3097
3098 if (state->memsize) { /* some data left from previous update */
3099 XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
3100 { const xxh_u32* p32 = state->mem32;
3101 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p32)); p32++;
3102 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p32)); p32++;
3103 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p32)); p32++;
3104 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p32));
3105 }
3106 p += 16-state->memsize;
3107 state->memsize = 0;
3108 }
3109
3110 if (p <= bEnd-16) {
3111 const xxh_u8* const limit = bEnd - 16;
3112
3113 do {
3114 state->v[0] = XXH32_round(state->v[0], XXH_readLE32(p)); p+=4;
3115 state->v[1] = XXH32_round(state->v[1], XXH_readLE32(p)); p+=4;
3116 state->v[2] = XXH32_round(state->v[2], XXH_readLE32(p)); p+=4;
3117 state->v[3] = XXH32_round(state->v[3], XXH_readLE32(p)); p+=4;
3118 } while (p<=limit);
3119
3120 }
3121
3122 if (p < bEnd) {
3123 XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
3124 state->memsize = (unsigned)(bEnd-p);
3125 }
3126 }
3127
3128 return XXH_OK;
3129}
3130
3131
f535537f 3132/*! @ingroup XXH32_family */
648db22b 3133XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t* state)
3134{
3135 xxh_u32 h32;
3136
3137 if (state->large_len) {
3138 h32 = XXH_rotl32(state->v[0], 1)
3139 + XXH_rotl32(state->v[1], 7)
3140 + XXH_rotl32(state->v[2], 12)
3141 + XXH_rotl32(state->v[3], 18);
3142 } else {
3143 h32 = state->v[2] /* == seed */ + XXH_PRIME32_5;
3144 }
3145
3146 h32 += state->total_len_32;
3147
3148 return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
3149}
f535537f 3150#endif /* !XXH_NO_STREAM */
648db22b 3151
3152/******* Canonical representation *******/
3153
f535537f 3154/*! @ingroup XXH32_family */
648db22b 3155XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
3156{
f535537f 3157 XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
648db22b 3158 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
3159 XXH_memcpy(dst, &hash, sizeof(*dst));
3160}
f535537f 3161/*! @ingroup XXH32_family */
648db22b 3162XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
3163{
3164 return XXH_readBE32(src);
3165}
3166
3167
3168#ifndef XXH_NO_LONG_LONG
3169
3170/* *******************************************************************
3171* 64-bit hash functions
3172*********************************************************************/
3173/*!
3174 * @}
3175 * @ingroup impl
3176 * @{
3177 */
3178/******* Memory access *******/
3179
3180typedef XXH64_hash_t xxh_u64;
3181
3182#ifdef XXH_OLD_NAMES
3183# define U64 xxh_u64
3184#endif
3185
3186#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
3187/*
3188 * Manual byteshift. Best for old compilers which don't inline memcpy.
3189 * We actually directly use XXH_readLE64 and XXH_readBE64.
3190 */
3191#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
3192
3193/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
3194static xxh_u64 XXH_read64(const void* memPtr)
3195{
3196 return *(const xxh_u64*) memPtr;
3197}
3198
3199#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
3200
3201/*
f535537f 3202 * __attribute__((aligned(1))) is supported by gcc and clang. Originally the
3203 * documentation claimed that it only increased the alignment, but actually it
3204 * can decrease it on gcc, clang, and icc:
3205 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502,
3206 * https://gcc.godbolt.org/z/xYez1j67Y.
648db22b 3207 */
3208#ifdef XXH_OLD_NAMES
3209typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
3210#endif
3211static xxh_u64 XXH_read64(const void* ptr)
3212{
f535537f 3213 typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64;
3214 return *((const xxh_unalign64*)ptr);
648db22b 3215}
3216
3217#else
3218
3219/*
3220 * Portable and safe solution. Generally efficient.
3221 * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
3222 */
3223static xxh_u64 XXH_read64(const void* memPtr)
3224{
3225 xxh_u64 val;
3226 XXH_memcpy(&val, memPtr, sizeof(val));
3227 return val;
3228}
3229
3230#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
3231
3232#if defined(_MSC_VER) /* Visual Studio */
3233# define XXH_swap64 _byteswap_uint64
3234#elif XXH_GCC_VERSION >= 403
3235# define XXH_swap64 __builtin_bswap64
3236#else
3237static xxh_u64 XXH_swap64(xxh_u64 x)
3238{
3239 return ((x << 56) & 0xff00000000000000ULL) |
3240 ((x << 40) & 0x00ff000000000000ULL) |
3241 ((x << 24) & 0x0000ff0000000000ULL) |
3242 ((x << 8) & 0x000000ff00000000ULL) |
3243 ((x >> 8) & 0x00000000ff000000ULL) |
3244 ((x >> 24) & 0x0000000000ff0000ULL) |
3245 ((x >> 40) & 0x000000000000ff00ULL) |
3246 ((x >> 56) & 0x00000000000000ffULL);
3247}
3248#endif
3249
3250
3251/* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */
3252#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3))
3253
3254XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr)
3255{
3256 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
3257 return bytePtr[0]
3258 | ((xxh_u64)bytePtr[1] << 8)
3259 | ((xxh_u64)bytePtr[2] << 16)
3260 | ((xxh_u64)bytePtr[3] << 24)
3261 | ((xxh_u64)bytePtr[4] << 32)
3262 | ((xxh_u64)bytePtr[5] << 40)
3263 | ((xxh_u64)bytePtr[6] << 48)
3264 | ((xxh_u64)bytePtr[7] << 56);
3265}
3266
3267XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr)
3268{
3269 const xxh_u8* bytePtr = (const xxh_u8 *)memPtr;
3270 return bytePtr[7]
3271 | ((xxh_u64)bytePtr[6] << 8)
3272 | ((xxh_u64)bytePtr[5] << 16)
3273 | ((xxh_u64)bytePtr[4] << 24)
3274 | ((xxh_u64)bytePtr[3] << 32)
3275 | ((xxh_u64)bytePtr[2] << 40)
3276 | ((xxh_u64)bytePtr[1] << 48)
3277 | ((xxh_u64)bytePtr[0] << 56);
3278}
3279
3280#else
3281XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
3282{
3283 return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
3284}
3285
3286static xxh_u64 XXH_readBE64(const void* ptr)
3287{
3288 return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
3289}
3290#endif
3291
3292XXH_FORCE_INLINE xxh_u64
3293XXH_readLE64_align(const void* ptr, XXH_alignment align)
3294{
3295 if (align==XXH_unaligned)
3296 return XXH_readLE64(ptr);
3297 else
3298 return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
3299}
3300
3301
3302/******* xxh64 *******/
3303/*!
3304 * @}
f535537f 3305 * @defgroup XXH64_impl XXH64 implementation
648db22b 3306 * @ingroup impl
f535537f 3307 *
3308 * Details on the XXH64 implementation.
648db22b 3309 * @{
3310 */
3311/* #define rather that static const, to be used as initializers */
3312#define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */
3313#define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */
3314#define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */
3315#define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */
3316#define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */
3317
3318#ifdef XXH_OLD_NAMES
3319# define PRIME64_1 XXH_PRIME64_1
3320# define PRIME64_2 XXH_PRIME64_2
3321# define PRIME64_3 XXH_PRIME64_3
3322# define PRIME64_4 XXH_PRIME64_4
3323# define PRIME64_5 XXH_PRIME64_5
3324#endif
3325
f535537f 3326/*! @copydoc XXH32_round */
648db22b 3327static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
3328{
3329 acc += input * XXH_PRIME64_2;
3330 acc = XXH_rotl64(acc, 31);
3331 acc *= XXH_PRIME64_1;
f535537f 3332#if (defined(__AVX512F__)) && !defined(XXH_ENABLE_AUTOVECTORIZE)
3333 /*
3334 * DISABLE AUTOVECTORIZATION:
3335 * A compiler fence is used to prevent GCC and Clang from
3336 * autovectorizing the XXH64 loop (pragmas and attributes don't work for some
3337 * reason) without globally disabling AVX512.
3338 *
3339 * Autovectorization of XXH64 tends to be detrimental,
3340 * though the exact outcome may change depending on exact cpu and compiler version.
3341 * For information, it has been reported as detrimental for Skylake-X,
3342 * but possibly beneficial for Zen4.
3343 *
3344 * The default is to disable auto-vectorization,
3345 * but you can select to enable it instead using `XXH_ENABLE_AUTOVECTORIZE` build variable.
3346 */
3347 XXH_COMPILER_GUARD(acc);
3348#endif
648db22b 3349 return acc;
3350}
3351
3352static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
3353{
3354 val = XXH64_round(0, val);
3355 acc ^= val;
3356 acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4;
3357 return acc;
3358}
3359
f535537f 3360/*! @copydoc XXH32_avalanche */
3361static xxh_u64 XXH64_avalanche(xxh_u64 hash)
648db22b 3362{
f535537f 3363 hash ^= hash >> 33;
3364 hash *= XXH_PRIME64_2;
3365 hash ^= hash >> 29;
3366 hash *= XXH_PRIME64_3;
3367 hash ^= hash >> 32;
3368 return hash;
648db22b 3369}
3370
3371
3372#define XXH_get64bits(p) XXH_readLE64_align(p, align)
3373
f535537f 3374/*!
3375 * @internal
3376 * @brief Processes the last 0-31 bytes of @p ptr.
3377 *
3378 * There may be up to 31 bytes remaining to consume from the input.
3379 * This final stage will digest them to ensure that all input bytes are present
3380 * in the final mix.
3381 *
3382 * @param hash The hash to finalize.
3383 * @param ptr The pointer to the remaining input.
3384 * @param len The remaining length, modulo 32.
3385 * @param align Whether @p ptr is aligned.
3386 * @return The finalized hash
3387 * @see XXH32_finalize().
3388 */
3389static XXH_PUREF xxh_u64
3390XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align)
648db22b 3391{
3392 if (ptr==NULL) XXH_ASSERT(len == 0);
3393 len &= 31;
3394 while (len >= 8) {
3395 xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr));
3396 ptr += 8;
f535537f 3397 hash ^= k1;
3398 hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4;
648db22b 3399 len -= 8;
3400 }
3401 if (len >= 4) {
f535537f 3402 hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1;
648db22b 3403 ptr += 4;
f535537f 3404 hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3;
648db22b 3405 len -= 4;
3406 }
3407 while (len > 0) {
f535537f 3408 hash ^= (*ptr++) * XXH_PRIME64_5;
3409 hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1;
648db22b 3410 --len;
3411 }
f535537f 3412 return XXH64_avalanche(hash);
648db22b 3413}
3414
3415#ifdef XXH_OLD_NAMES
3416# define PROCESS1_64 XXH_PROCESS1_64
3417# define PROCESS4_64 XXH_PROCESS4_64
3418# define PROCESS8_64 XXH_PROCESS8_64
3419#else
3420# undef XXH_PROCESS1_64
3421# undef XXH_PROCESS4_64
3422# undef XXH_PROCESS8_64
3423#endif
3424
f535537f 3425/*!
3426 * @internal
3427 * @brief The implementation for @ref XXH64().
3428 *
3429 * @param input , len , seed Directly passed from @ref XXH64().
3430 * @param align Whether @p input is aligned.
3431 * @return The calculated hash.
3432 */
3433XXH_FORCE_INLINE XXH_PUREF xxh_u64
648db22b 3434XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
3435{
3436 xxh_u64 h64;
3437 if (input==NULL) XXH_ASSERT(len == 0);
3438
3439 if (len>=32) {
3440 const xxh_u8* const bEnd = input + len;
3441 const xxh_u8* const limit = bEnd - 31;
3442 xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
3443 xxh_u64 v2 = seed + XXH_PRIME64_2;
3444 xxh_u64 v3 = seed + 0;
3445 xxh_u64 v4 = seed - XXH_PRIME64_1;
3446
3447 do {
3448 v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
3449 v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
3450 v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
3451 v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
3452 } while (input<limit);
3453
3454 h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
3455 h64 = XXH64_mergeRound(h64, v1);
3456 h64 = XXH64_mergeRound(h64, v2);
3457 h64 = XXH64_mergeRound(h64, v3);
3458 h64 = XXH64_mergeRound(h64, v4);
3459
3460 } else {
3461 h64 = seed + XXH_PRIME64_5;
3462 }
3463
3464 h64 += (xxh_u64) len;
3465
3466 return XXH64_finalize(h64, input, len, align);
3467}
3468
3469
f535537f 3470/*! @ingroup XXH64_family */
3471XXH_PUBLIC_API XXH64_hash_t XXH64 (XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
648db22b 3472{
f535537f 3473#if !defined(XXH_NO_STREAM) && XXH_SIZE_OPT >= 2
648db22b 3474 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
3475 XXH64_state_t state;
3476 XXH64_reset(&state, seed);
3477 XXH64_update(&state, (const xxh_u8*)input, len);
3478 return XXH64_digest(&state);
3479#else
3480 if (XXH_FORCE_ALIGN_CHECK) {
3481 if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
3482 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
3483 } }
3484
3485 return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
3486
3487#endif
3488}
3489
3490/******* Hash Streaming *******/
f535537f 3491#ifndef XXH_NO_STREAM
3492/*! @ingroup XXH64_family*/
648db22b 3493XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
3494{
3495 return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
3496}
f535537f 3497/*! @ingroup XXH64_family */
648db22b 3498XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
3499{
3500 XXH_free(statePtr);
3501 return XXH_OK;
3502}
3503
f535537f 3504/*! @ingroup XXH64_family */
3505XXH_PUBLIC_API void XXH64_copyState(XXH_NOESCAPE XXH64_state_t* dstState, const XXH64_state_t* srcState)
648db22b 3506{
3507 XXH_memcpy(dstState, srcState, sizeof(*dstState));
3508}
3509
f535537f 3510/*! @ingroup XXH64_family */
3511XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH_NOESCAPE XXH64_state_t* statePtr, XXH64_hash_t seed)
648db22b 3512{
3513 XXH_ASSERT(statePtr != NULL);
3514 memset(statePtr, 0, sizeof(*statePtr));
3515 statePtr->v[0] = seed + XXH_PRIME64_1 + XXH_PRIME64_2;
3516 statePtr->v[1] = seed + XXH_PRIME64_2;
3517 statePtr->v[2] = seed + 0;
3518 statePtr->v[3] = seed - XXH_PRIME64_1;
3519 return XXH_OK;
3520}
3521
f535537f 3522/*! @ingroup XXH64_family */
648db22b 3523XXH_PUBLIC_API XXH_errorcode
f535537f 3524XXH64_update (XXH_NOESCAPE XXH64_state_t* state, XXH_NOESCAPE const void* input, size_t len)
648db22b 3525{
3526 if (input==NULL) {
3527 XXH_ASSERT(len == 0);
3528 return XXH_OK;
3529 }
3530
3531 { const xxh_u8* p = (const xxh_u8*)input;
3532 const xxh_u8* const bEnd = p + len;
3533
3534 state->total_len += len;
3535
3536 if (state->memsize + len < 32) { /* fill in tmp buffer */
3537 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
3538 state->memsize += (xxh_u32)len;
3539 return XXH_OK;
3540 }
3541
3542 if (state->memsize) { /* tmp buffer is full */
3543 XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
3544 state->v[0] = XXH64_round(state->v[0], XXH_readLE64(state->mem64+0));
3545 state->v[1] = XXH64_round(state->v[1], XXH_readLE64(state->mem64+1));
3546 state->v[2] = XXH64_round(state->v[2], XXH_readLE64(state->mem64+2));
3547 state->v[3] = XXH64_round(state->v[3], XXH_readLE64(state->mem64+3));
3548 p += 32 - state->memsize;
3549 state->memsize = 0;
3550 }
3551
3552 if (p+32 <= bEnd) {
3553 const xxh_u8* const limit = bEnd - 32;
3554
3555 do {
3556 state->v[0] = XXH64_round(state->v[0], XXH_readLE64(p)); p+=8;
3557 state->v[1] = XXH64_round(state->v[1], XXH_readLE64(p)); p+=8;
3558 state->v[2] = XXH64_round(state->v[2], XXH_readLE64(p)); p+=8;
3559 state->v[3] = XXH64_round(state->v[3], XXH_readLE64(p)); p+=8;
3560 } while (p<=limit);
3561
3562 }
3563
3564 if (p < bEnd) {
3565 XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
3566 state->memsize = (unsigned)(bEnd-p);
3567 }
3568 }
3569
3570 return XXH_OK;
3571}
3572
3573
f535537f 3574/*! @ingroup XXH64_family */
3575XXH_PUBLIC_API XXH64_hash_t XXH64_digest(XXH_NOESCAPE const XXH64_state_t* state)
648db22b 3576{
3577 xxh_u64 h64;
3578
3579 if (state->total_len >= 32) {
3580 h64 = XXH_rotl64(state->v[0], 1) + XXH_rotl64(state->v[1], 7) + XXH_rotl64(state->v[2], 12) + XXH_rotl64(state->v[3], 18);
3581 h64 = XXH64_mergeRound(h64, state->v[0]);
3582 h64 = XXH64_mergeRound(h64, state->v[1]);
3583 h64 = XXH64_mergeRound(h64, state->v[2]);
3584 h64 = XXH64_mergeRound(h64, state->v[3]);
3585 } else {
3586 h64 = state->v[2] /*seed*/ + XXH_PRIME64_5;
3587 }
3588
3589 h64 += (xxh_u64) state->total_len;
3590
3591 return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
3592}
f535537f 3593#endif /* !XXH_NO_STREAM */
648db22b 3594
3595/******* Canonical representation *******/
3596
f535537f 3597/*! @ingroup XXH64_family */
3598XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash)
648db22b 3599{
f535537f 3600 XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
648db22b 3601 if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
3602 XXH_memcpy(dst, &hash, sizeof(*dst));
3603}
3604
f535537f 3605/*! @ingroup XXH64_family */
3606XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src)
648db22b 3607{
3608 return XXH_readBE64(src);
3609}
3610
3611#ifndef XXH_NO_XXH3
3612
3613/* *********************************************************************
3614* XXH3
3615* New generation hash designed for speed on small keys and vectorization
3616************************************************************************ */
3617/*!
3618 * @}
f535537f 3619 * @defgroup XXH3_impl XXH3 implementation
648db22b 3620 * @ingroup impl
3621 * @{
3622 */
3623
3624/* === Compiler specifics === */
3625
3626#if ((defined(sun) || defined(__sun)) && __cplusplus) /* Solaris includes __STDC_VERSION__ with C++. Tested with GCC 5.5 */
f535537f 3627# define XXH_RESTRICT /* disable */
648db22b 3628#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* >= C99 */
3629# define XXH_RESTRICT restrict
f535537f 3630#elif (defined (__GNUC__) && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))) \
3631 || (defined (__clang__)) \
3632 || (defined (_MSC_VER) && (_MSC_VER >= 1400)) \
3633 || (defined (__INTEL_COMPILER) && (__INTEL_COMPILER >= 1300))
3634/*
3635 * There are a LOT more compilers that recognize __restrict but this
3636 * covers the major ones.
3637 */
3638# define XXH_RESTRICT __restrict
648db22b 3639#else
648db22b 3640# define XXH_RESTRICT /* disable */
3641#endif
3642
3643#if (defined(__GNUC__) && (__GNUC__ >= 3)) \
3644 || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) \
3645 || defined(__clang__)
3646# define XXH_likely(x) __builtin_expect(x, 1)
3647# define XXH_unlikely(x) __builtin_expect(x, 0)
3648#else
3649# define XXH_likely(x) (x)
3650# define XXH_unlikely(x) (x)
3651#endif
3652
f535537f 3653#ifndef XXH_HAS_INCLUDE
3654# ifdef __has_include
3655/*
3656 * Not defined as XXH_HAS_INCLUDE(x) (function-like) because
3657 * this causes segfaults in Apple Clang 4.2 (on Mac OS X 10.7 Lion)
3658 */
3659# define XXH_HAS_INCLUDE __has_include
3660# else
3661# define XXH_HAS_INCLUDE(x) 0
3662# endif
3663#endif
3664
648db22b 3665#if defined(__GNUC__) || defined(__clang__)
f535537f 3666# if defined(__ARM_FEATURE_SVE)
3667# include <arm_sve.h>
3668# endif
648db22b 3669# if defined(__ARM_NEON__) || defined(__ARM_NEON) \
f535537f 3670 || (defined(_M_ARM) && _M_ARM >= 7) \
3671 || defined(_M_ARM64) || defined(_M_ARM64EC) \
3672 || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* WASM SIMD128 via SIMDe */
648db22b 3673# define inline __inline__ /* circumvent a clang bug */
3674# include <arm_neon.h>
3675# undef inline
3676# elif defined(__AVX2__)
3677# include <immintrin.h>
3678# elif defined(__SSE2__)
3679# include <emmintrin.h>
3680# endif
3681#endif
3682
3683#if defined(_MSC_VER)
3684# include <intrin.h>
3685#endif
3686
3687/*
3688 * One goal of XXH3 is to make it fast on both 32-bit and 64-bit, while
3689 * remaining a true 64-bit/128-bit hash function.
3690 *
3691 * This is done by prioritizing a subset of 64-bit operations that can be
3692 * emulated without too many steps on the average 32-bit machine.
3693 *
3694 * For example, these two lines seem similar, and run equally fast on 64-bit:
3695 *
3696 * xxh_u64 x;
3697 * x ^= (x >> 47); // good
3698 * x ^= (x >> 13); // bad
3699 *
3700 * However, to a 32-bit machine, there is a major difference.
3701 *
3702 * x ^= (x >> 47) looks like this:
3703 *
3704 * x.lo ^= (x.hi >> (47 - 32));
3705 *
3706 * while x ^= (x >> 13) looks like this:
3707 *
3708 * // note: funnel shifts are not usually cheap.
3709 * x.lo ^= (x.lo >> 13) | (x.hi << (32 - 13));
3710 * x.hi ^= (x.hi >> 13);
3711 *
3712 * The first one is significantly faster than the second, simply because the
3713 * shift is larger than 32. This means:
3714 * - All the bits we need are in the upper 32 bits, so we can ignore the lower
3715 * 32 bits in the shift.
3716 * - The shift result will always fit in the lower 32 bits, and therefore,
3717 * we can ignore the upper 32 bits in the xor.
3718 *
3719 * Thanks to this optimization, XXH3 only requires these features to be efficient:
3720 *
3721 * - Usable unaligned access
3722 * - A 32-bit or 64-bit ALU
3723 * - If 32-bit, a decent ADC instruction
3724 * - A 32 or 64-bit multiply with a 64-bit result
3725 * - For the 128-bit variant, a decent byteswap helps short inputs.
3726 *
3727 * The first two are already required by XXH32, and almost all 32-bit and 64-bit
3728 * platforms which can run XXH32 can run XXH3 efficiently.
3729 *
3730 * Thumb-1, the classic 16-bit only subset of ARM's instruction set, is one
3731 * notable exception.
3732 *
3733 * First of all, Thumb-1 lacks support for the UMULL instruction which
3734 * performs the important long multiply. This means numerous __aeabi_lmul
3735 * calls.
3736 *
3737 * Second of all, the 8 functional registers are just not enough.
3738 * Setup for __aeabi_lmul, byteshift loads, pointers, and all arithmetic need
3739 * Lo registers, and this shuffling results in thousands more MOVs than A32.
3740 *
3741 * A32 and T32 don't have this limitation. They can access all 14 registers,
3742 * do a 32->64 multiply with UMULL, and the flexible operand allowing free
3743 * shifts is helpful, too.
3744 *
3745 * Therefore, we do a quick sanity check.
3746 *
3747 * If compiling Thumb-1 for a target which supports ARM instructions, we will
3748 * emit a warning, as it is not a "sane" platform to compile for.
3749 *
3750 * Usually, if this happens, it is because of an accident and you probably need
3751 * to specify -march, as you likely meant to compile for a newer architecture.
3752 *
3753 * Credit: large sections of the vectorial and asm source code paths
3754 * have been contributed by @easyaspi314
3755 */
3756#if defined(__thumb__) && !defined(__thumb2__) && defined(__ARM_ARCH_ISA_ARM)
3757# warning "XXH3 is highly inefficient without ARM or Thumb-2."
3758#endif
3759
3760/* ==========================================
3761 * Vectorization detection
3762 * ========================================== */
3763
3764#ifdef XXH_DOXYGEN
3765/*!
3766 * @ingroup tuning
3767 * @brief Overrides the vectorization implementation chosen for XXH3.
3768 *
3769 * Can be defined to 0 to disable SIMD or any of the values mentioned in
3770 * @ref XXH_VECTOR_TYPE.
3771 *
3772 * If this is not defined, it uses predefined macros to determine the best
3773 * implementation.
3774 */
3775# define XXH_VECTOR XXH_SCALAR
3776/*!
3777 * @ingroup tuning
3778 * @brief Possible values for @ref XXH_VECTOR.
3779 *
3780 * Note that these are actually implemented as macros.
3781 *
3782 * If this is not defined, it is detected automatically.
f535537f 3783 * internal macro XXH_X86DISPATCH overrides this.
648db22b 3784 */
3785enum XXH_VECTOR_TYPE /* fake enum */ {
3786 XXH_SCALAR = 0, /*!< Portable scalar version */
3787 XXH_SSE2 = 1, /*!<
3788 * SSE2 for Pentium 4, Opteron, all x86_64.
3789 *
3790 * @note SSE2 is also guaranteed on Windows 10, macOS, and
3791 * Android x86.
3792 */
3793 XXH_AVX2 = 2, /*!< AVX2 for Haswell and Bulldozer */
3794 XXH_AVX512 = 3, /*!< AVX512 for Skylake and Icelake */
f535537f 3795 XXH_NEON = 4, /*!<
3796 * NEON for most ARMv7-A, all AArch64, and WASM SIMD128
3797 * via the SIMDeverywhere polyfill provided with the
3798 * Emscripten SDK.
3799 */
648db22b 3800 XXH_VSX = 5, /*!< VSX and ZVector for POWER8/z13 (64-bit) */
f535537f 3801 XXH_SVE = 6, /*!< SVE for some ARMv8-A and ARMv9-A */
648db22b 3802};
3803/*!
3804 * @ingroup tuning
3805 * @brief Selects the minimum alignment for XXH3's accumulators.
3806 *
3807 * When using SIMD, this should match the alignment required for said vector
3808 * type, so, for example, 32 for AVX2.
3809 *
3810 * Default: Auto detected.
3811 */
3812# define XXH_ACC_ALIGN 8
3813#endif
3814
3815/* Actual definition */
3816#ifndef XXH_DOXYGEN
3817# define XXH_SCALAR 0
3818# define XXH_SSE2 1
3819# define XXH_AVX2 2
3820# define XXH_AVX512 3
3821# define XXH_NEON 4
3822# define XXH_VSX 5
f535537f 3823# define XXH_SVE 6
648db22b 3824#endif
3825
3826#ifndef XXH_VECTOR /* can be defined on command line */
f535537f 3827# if defined(__ARM_FEATURE_SVE)
3828# define XXH_VECTOR XXH_SVE
3829# elif ( \
648db22b 3830 defined(__ARM_NEON__) || defined(__ARM_NEON) /* gcc */ \
3831 || defined(_M_ARM) || defined(_M_ARM64) || defined(_M_ARM64EC) /* msvc */ \
f535537f 3832 || (defined(__wasm_simd128__) && XXH_HAS_INCLUDE(<arm_neon.h>)) /* wasm simd128 via SIMDe */ \
648db22b 3833 ) && ( \
3834 defined(_WIN32) || defined(__LITTLE_ENDIAN__) /* little endian only */ \
3835 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) \
3836 )
3837# define XXH_VECTOR XXH_NEON
3838# elif defined(__AVX512F__)
3839# define XXH_VECTOR XXH_AVX512
3840# elif defined(__AVX2__)
3841# define XXH_VECTOR XXH_AVX2
3842# elif defined(__SSE2__) || defined(_M_AMD64) || defined(_M_X64) || (defined(_M_IX86_FP) && (_M_IX86_FP == 2))
3843# define XXH_VECTOR XXH_SSE2
3844# elif (defined(__PPC64__) && defined(__POWER8_VECTOR__)) \
3845 || (defined(__s390x__) && defined(__VEC__)) \
3846 && defined(__GNUC__) /* TODO: IBM XL */
3847# define XXH_VECTOR XXH_VSX
3848# else
3849# define XXH_VECTOR XXH_SCALAR
3850# endif
3851#endif
3852
f535537f 3853/* __ARM_FEATURE_SVE is only supported by GCC & Clang. */
3854#if (XXH_VECTOR == XXH_SVE) && !defined(__ARM_FEATURE_SVE)
3855# ifdef _MSC_VER
3856# pragma warning(once : 4606)
3857# else
3858# warning "__ARM_FEATURE_SVE isn't supported. Use SCALAR instead."
3859# endif
3860# undef XXH_VECTOR
3861# define XXH_VECTOR XXH_SCALAR
3862#endif
3863
648db22b 3864/*
3865 * Controls the alignment of the accumulator,
3866 * for compatibility with aligned vector loads, which are usually faster.
3867 */
3868#ifndef XXH_ACC_ALIGN
3869# if defined(XXH_X86DISPATCH)
3870# define XXH_ACC_ALIGN 64 /* for compatibility with avx512 */
3871# elif XXH_VECTOR == XXH_SCALAR /* scalar */
3872# define XXH_ACC_ALIGN 8
3873# elif XXH_VECTOR == XXH_SSE2 /* sse2 */
3874# define XXH_ACC_ALIGN 16
3875# elif XXH_VECTOR == XXH_AVX2 /* avx2 */
3876# define XXH_ACC_ALIGN 32
3877# elif XXH_VECTOR == XXH_NEON /* neon */
3878# define XXH_ACC_ALIGN 16
3879# elif XXH_VECTOR == XXH_VSX /* vsx */
3880# define XXH_ACC_ALIGN 16
3881# elif XXH_VECTOR == XXH_AVX512 /* avx512 */
3882# define XXH_ACC_ALIGN 64
f535537f 3883# elif XXH_VECTOR == XXH_SVE /* sve */
3884# define XXH_ACC_ALIGN 64
648db22b 3885# endif
3886#endif
3887
3888#if defined(XXH_X86DISPATCH) || XXH_VECTOR == XXH_SSE2 \
3889 || XXH_VECTOR == XXH_AVX2 || XXH_VECTOR == XXH_AVX512
3890# define XXH_SEC_ALIGN XXH_ACC_ALIGN
f535537f 3891#elif XXH_VECTOR == XXH_SVE
3892# define XXH_SEC_ALIGN XXH_ACC_ALIGN
648db22b 3893#else
3894# define XXH_SEC_ALIGN 8
3895#endif
3896
f535537f 3897#if defined(__GNUC__) || defined(__clang__)
3898# define XXH_ALIASING __attribute__((may_alias))
3899#else
3900# define XXH_ALIASING /* nothing */
3901#endif
3902
648db22b 3903/*
3904 * UGLY HACK:
3905 * GCC usually generates the best code with -O3 for xxHash.
3906 *
3907 * However, when targeting AVX2, it is overzealous in its unrolling resulting
3908 * in code roughly 3/4 the speed of Clang.
3909 *
3910 * There are other issues, such as GCC splitting _mm256_loadu_si256 into
3911 * _mm_loadu_si128 + _mm256_inserti128_si256. This is an optimization which
3912 * only applies to Sandy and Ivy Bridge... which don't even support AVX2.
3913 *
3914 * That is why when compiling the AVX2 version, it is recommended to use either
f535537f 3915 * -O2 -mavx2 -march=haswell
3916 * or
3917 * -O2 -mavx2 -mno-avx256-split-unaligned-load
3918 * for decent performance, or to use Clang instead.
648db22b 3919 *
f535537f 3920 * Fortunately, we can control the first one with a pragma that forces GCC into
3921 * -O2, but the other one we can't control without "failed to inline always
3922 * inline function due to target mismatch" warnings.
3923 */
3924#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
3925 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
3926 && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
3927# pragma GCC push_options
3928# pragma GCC optimize("-O2")
3929#endif
3930
3931#if XXH_VECTOR == XXH_NEON
3932
3933/*
3934 * UGLY HACK: While AArch64 GCC on Linux does not seem to care, on macOS, GCC -O3
3935 * optimizes out the entire hashLong loop because of the aliasing violation.
648db22b 3936 *
f535537f 3937 * However, GCC is also inefficient at load-store optimization with vld1q/vst1q,
3938 * so the only option is to mark it as aliasing.
3939 */
3940typedef uint64x2_t xxh_aliasing_uint64x2_t XXH_ALIASING;
3941
3942/*!
3943 * @internal
3944 * @brief `vld1q_u64` but faster and alignment-safe.
648db22b 3945 *
f535537f 3946 * On AArch64, unaligned access is always safe, but on ARMv7-a, it is only
3947 * *conditionally* safe (`vld1` has an alignment bit like `movdq[ua]` in x86).
648db22b 3948 *
f535537f 3949 * GCC for AArch64 sees `vld1q_u8` as an intrinsic instead of a load, so it
3950 * prohibits load-store optimizations. Therefore, a direct dereference is used.
648db22b 3951 *
f535537f 3952 * Otherwise, `vld1q_u8` is used with `vreinterpretq_u8_u64` to do a safe
3953 * unaligned load.
648db22b 3954 */
f535537f 3955#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__)
3956XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr) /* silence -Wcast-align */
3957{
3958 return *(xxh_aliasing_uint64x2_t const *)ptr;
3959}
3960#else
3961XXH_FORCE_INLINE uint64x2_t XXH_vld1q_u64(void const* ptr)
3962{
3963 return vreinterpretq_u64_u8(vld1q_u8((uint8_t const*)ptr));
3964}
3965#endif
648db22b 3966
3967/*!
f535537f 3968 * @internal
3969 * @brief `vmlal_u32` on low and high halves of a vector.
3970 *
3971 * This is a workaround for AArch64 GCC < 11 which implemented arm_neon.h with
3972 * inline assembly and were therefore incapable of merging the `vget_{low, high}_u32`
3973 * with `vmlal_u32`.
648db22b 3974 */
f535537f 3975#if defined(__aarch64__) && defined(__GNUC__) && !defined(__clang__) && __GNUC__ < 11
3976XXH_FORCE_INLINE uint64x2_t
3977XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3978{
3979 /* Inline assembly is the only way */
3980 __asm__("umlal %0.2d, %1.2s, %2.2s" : "+w" (acc) : "w" (lhs), "w" (rhs));
3981 return acc;
3982}
3983XXH_FORCE_INLINE uint64x2_t
3984XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3985{
3986 /* This intrinsic works as expected */
3987 return vmlal_high_u32(acc, lhs, rhs);
3988}
3989#else
3990/* Portable intrinsic versions */
3991XXH_FORCE_INLINE uint64x2_t
3992XXH_vmlal_low_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
3993{
3994 return vmlal_u32(acc, vget_low_u32(lhs), vget_low_u32(rhs));
3995}
3996/*! @copydoc XXH_vmlal_low_u32
3997 * Assume the compiler converts this to vmlal_high_u32 on aarch64 */
3998XXH_FORCE_INLINE uint64x2_t
3999XXH_vmlal_high_u32(uint64x2_t acc, uint32x4_t lhs, uint32x4_t rhs)
4000{
4001 return vmlal_u32(acc, vget_high_u32(lhs), vget_high_u32(rhs));
4002}
4003#endif
648db22b 4004
4005/*!
4006 * @ingroup tuning
4007 * @brief Controls the NEON to scalar ratio for XXH3
4008 *
f535537f 4009 * This can be set to 2, 4, 6, or 8.
648db22b 4010 *
f535537f 4011 * ARM Cortex CPUs are _very_ sensitive to how their pipelines are used.
648db22b 4012 *
f535537f 4013 * For example, the Cortex-A73 can dispatch 3 micro-ops per cycle, but only 2 of those
4014 * can be NEON. If you are only using NEON instructions, you are only using 2/3 of the CPU
4015 * bandwidth.
648db22b 4016 *
f535537f 4017 * This is even more noticeable on the more advanced cores like the Cortex-A76 which
648db22b 4018 * can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
4019 *
f535537f 4020 * Therefore, to make the most out of the pipeline, it is beneficial to run 6 NEON lanes
4021 * and 2 scalar lanes, which is chosen by default.
4022 *
4023 * This does not apply to Apple processors or 32-bit processors, which run better with
4024 * full NEON. These will default to 8. Additionally, size-optimized builds run 8 lanes.
648db22b 4025 *
4026 * This change benefits CPUs with large micro-op buffers without negatively affecting
f535537f 4027 * most other CPUs:
648db22b 4028 *
4029 * | Chipset | Dispatch type | NEON only | 6:2 hybrid | Diff. |
4030 * |:----------------------|:--------------------|----------:|-----------:|------:|
4031 * | Snapdragon 730 (A76) | 2 NEON/8 micro-ops | 8.8 GB/s | 10.1 GB/s | ~16% |
4032 * | Snapdragon 835 (A73) | 2 NEON/3 micro-ops | 5.1 GB/s | 5.3 GB/s | ~5% |
4033 * | Marvell PXA1928 (A53) | In-order dual-issue | 1.9 GB/s | 1.9 GB/s | 0% |
f535537f 4034 * | Apple M1 | 4 NEON/8 micro-ops | 37.3 GB/s | 36.1 GB/s | ~-3% |
648db22b 4035 *
4036 * It also seems to fix some bad codegen on GCC, making it almost as fast as clang.
4037 *
f535537f 4038 * When using WASM SIMD128, if this is 2 or 6, SIMDe will scalarize 2 of the lanes meaning
4039 * it effectively becomes worse 4.
4040 *
648db22b 4041 * @see XXH3_accumulate_512_neon()
4042 */
4043# ifndef XXH3_NEON_LANES
4044# if (defined(__aarch64__) || defined(__arm64__) || defined(_M_ARM64) || defined(_M_ARM64EC)) \
f535537f 4045 && !defined(__APPLE__) && XXH_SIZE_OPT <= 0
648db22b 4046# define XXH3_NEON_LANES 6
4047# else
4048# define XXH3_NEON_LANES XXH_ACC_NB
4049# endif
4050# endif
4051#endif /* XXH_VECTOR == XXH_NEON */
4052
4053/*
4054 * VSX and Z Vector helpers.
4055 *
4056 * This is very messy, and any pull requests to clean this up are welcome.
4057 *
4058 * There are a lot of problems with supporting VSX and s390x, due to
4059 * inconsistent intrinsics, spotty coverage, and multiple endiannesses.
4060 */
4061#if XXH_VECTOR == XXH_VSX
f535537f 4062/* Annoyingly, these headers _may_ define three macros: `bool`, `vector`,
4063 * and `pixel`. This is a problem for obvious reasons.
4064 *
4065 * These keywords are unnecessary; the spec literally says they are
4066 * equivalent to `__bool`, `__vector`, and `__pixel` and may be undef'd
4067 * after including the header.
4068 *
4069 * We use pragma push_macro/pop_macro to keep the namespace clean. */
4070# pragma push_macro("bool")
4071# pragma push_macro("vector")
4072# pragma push_macro("pixel")
4073/* silence potential macro redefined warnings */
4074# undef bool
4075# undef vector
4076# undef pixel
4077
648db22b 4078# if defined(__s390x__)
4079# include <s390intrin.h>
4080# else
648db22b 4081# include <altivec.h>
4082# endif
4083
f535537f 4084/* Restore the original macro values, if applicable. */
4085# pragma pop_macro("pixel")
4086# pragma pop_macro("vector")
4087# pragma pop_macro("bool")
4088
648db22b 4089typedef __vector unsigned long long xxh_u64x2;
4090typedef __vector unsigned char xxh_u8x16;
4091typedef __vector unsigned xxh_u32x4;
4092
f535537f 4093/*
4094 * UGLY HACK: Similar to aarch64 macOS GCC, s390x GCC has the same aliasing issue.
4095 */
4096typedef xxh_u64x2 xxh_aliasing_u64x2 XXH_ALIASING;
4097
648db22b 4098# ifndef XXH_VSX_BE
4099# if defined(__BIG_ENDIAN__) \
4100 || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
4101# define XXH_VSX_BE 1
4102# elif defined(__VEC_ELEMENT_REG_ORDER__) && __VEC_ELEMENT_REG_ORDER__ == __ORDER_BIG_ENDIAN__
4103# warning "-maltivec=be is not recommended. Please use native endianness."
4104# define XXH_VSX_BE 1
4105# else
4106# define XXH_VSX_BE 0
4107# endif
4108# endif /* !defined(XXH_VSX_BE) */
4109
4110# if XXH_VSX_BE
4111# if defined(__POWER9_VECTOR__) || (defined(__clang__) && defined(__s390x__))
4112# define XXH_vec_revb vec_revb
4113# else
4114/*!
4115 * A polyfill for POWER9's vec_revb().
4116 */
4117XXH_FORCE_INLINE xxh_u64x2 XXH_vec_revb(xxh_u64x2 val)
4118{
4119 xxh_u8x16 const vByteSwap = { 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
4120 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08 };
4121 return vec_perm(val, val, vByteSwap);
4122}
4123# endif
4124# endif /* XXH_VSX_BE */
4125
4126/*!
4127 * Performs an unaligned vector load and byte swaps it on big endian.
4128 */
4129XXH_FORCE_INLINE xxh_u64x2 XXH_vec_loadu(const void *ptr)
4130{
4131 xxh_u64x2 ret;
4132 XXH_memcpy(&ret, ptr, sizeof(xxh_u64x2));
4133# if XXH_VSX_BE
4134 ret = XXH_vec_revb(ret);
4135# endif
4136 return ret;
4137}
4138
4139/*
4140 * vec_mulo and vec_mule are very problematic intrinsics on PowerPC
4141 *
4142 * These intrinsics weren't added until GCC 8, despite existing for a while,
4143 * and they are endian dependent. Also, their meaning swap depending on version.
4144 * */
4145# if defined(__s390x__)
4146 /* s390x is always big endian, no issue on this platform */
4147# define XXH_vec_mulo vec_mulo
4148# define XXH_vec_mule vec_mule
f535537f 4149# elif defined(__clang__) && XXH_HAS_BUILTIN(__builtin_altivec_vmuleuw) && !defined(__ibmxl__)
648db22b 4150/* Clang has a better way to control this, we can just use the builtin which doesn't swap. */
f535537f 4151 /* The IBM XL Compiler (which defined __clang__) only implements the vec_* operations */
648db22b 4152# define XXH_vec_mulo __builtin_altivec_vmulouw
4153# define XXH_vec_mule __builtin_altivec_vmuleuw
4154# else
4155/* gcc needs inline assembly */
4156/* Adapted from https://github.com/google/highwayhash/blob/master/highwayhash/hh_vsx.h. */
4157XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mulo(xxh_u32x4 a, xxh_u32x4 b)
4158{
4159 xxh_u64x2 result;
4160 __asm__("vmulouw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
4161 return result;
4162}
4163XXH_FORCE_INLINE xxh_u64x2 XXH_vec_mule(xxh_u32x4 a, xxh_u32x4 b)
4164{
4165 xxh_u64x2 result;
4166 __asm__("vmuleuw %0, %1, %2" : "=v" (result) : "v" (a), "v" (b));
4167 return result;
4168}
4169# endif /* XXH_vec_mulo, XXH_vec_mule */
4170#endif /* XXH_VECTOR == XXH_VSX */
4171
f535537f 4172#if XXH_VECTOR == XXH_SVE
4173#define ACCRND(acc, offset) \
4174do { \
4175 svuint64_t input_vec = svld1_u64(mask, xinput + offset); \
4176 svuint64_t secret_vec = svld1_u64(mask, xsecret + offset); \
4177 svuint64_t mixed = sveor_u64_x(mask, secret_vec, input_vec); \
4178 svuint64_t swapped = svtbl_u64(input_vec, kSwap); \
4179 svuint64_t mixed_lo = svextw_u64_x(mask, mixed); \
4180 svuint64_t mixed_hi = svlsr_n_u64_x(mask, mixed, 32); \
4181 svuint64_t mul = svmad_u64_x(mask, mixed_lo, mixed_hi, swapped); \
4182 acc = svadd_u64_x(mask, acc, mul); \
4183} while (0)
4184#endif /* XXH_VECTOR == XXH_SVE */
648db22b 4185
4186/* prefetch
4187 * can be disabled, by declaring XXH_NO_PREFETCH build macro */
4188#if defined(XXH_NO_PREFETCH)
4189# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
4190#else
f535537f 4191# if XXH_SIZE_OPT >= 1
4192# define XXH_PREFETCH(ptr) (void)(ptr)
4193# elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_IX86)) /* _mm_prefetch() not defined outside of x86/x64 */
648db22b 4194# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
4195# define XXH_PREFETCH(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
4196# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
4197# define XXH_PREFETCH(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
4198# else
4199# define XXH_PREFETCH(ptr) (void)(ptr) /* disabled */
4200# endif
4201#endif /* XXH_NO_PREFETCH */
4202
4203
4204/* ==========================================
4205 * XXH3 default settings
4206 * ========================================== */
4207
4208#define XXH_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
4209
4210#if (XXH_SECRET_DEFAULT_SIZE < XXH3_SECRET_SIZE_MIN)
4211# error "default keyset is not large enough"
4212#endif
4213
4214/*! Pseudorandom secret taken directly from FARSH. */
4215XXH_ALIGN(64) static const xxh_u8 XXH3_kSecret[XXH_SECRET_DEFAULT_SIZE] = {
4216 0xb8, 0xfe, 0x6c, 0x39, 0x23, 0xa4, 0x4b, 0xbe, 0x7c, 0x01, 0x81, 0x2c, 0xf7, 0x21, 0xad, 0x1c,
4217 0xde, 0xd4, 0x6d, 0xe9, 0x83, 0x90, 0x97, 0xdb, 0x72, 0x40, 0xa4, 0xa4, 0xb7, 0xb3, 0x67, 0x1f,
4218 0xcb, 0x79, 0xe6, 0x4e, 0xcc, 0xc0, 0xe5, 0x78, 0x82, 0x5a, 0xd0, 0x7d, 0xcc, 0xff, 0x72, 0x21,
4219 0xb8, 0x08, 0x46, 0x74, 0xf7, 0x43, 0x24, 0x8e, 0xe0, 0x35, 0x90, 0xe6, 0x81, 0x3a, 0x26, 0x4c,
4220 0x3c, 0x28, 0x52, 0xbb, 0x91, 0xc3, 0x00, 0xcb, 0x88, 0xd0, 0x65, 0x8b, 0x1b, 0x53, 0x2e, 0xa3,
4221 0x71, 0x64, 0x48, 0x97, 0xa2, 0x0d, 0xf9, 0x4e, 0x38, 0x19, 0xef, 0x46, 0xa9, 0xde, 0xac, 0xd8,
4222 0xa8, 0xfa, 0x76, 0x3f, 0xe3, 0x9c, 0x34, 0x3f, 0xf9, 0xdc, 0xbb, 0xc7, 0xc7, 0x0b, 0x4f, 0x1d,
4223 0x8a, 0x51, 0xe0, 0x4b, 0xcd, 0xb4, 0x59, 0x31, 0xc8, 0x9f, 0x7e, 0xc9, 0xd9, 0x78, 0x73, 0x64,
4224 0xea, 0xc5, 0xac, 0x83, 0x34, 0xd3, 0xeb, 0xc3, 0xc5, 0x81, 0xa0, 0xff, 0xfa, 0x13, 0x63, 0xeb,
4225 0x17, 0x0d, 0xdd, 0x51, 0xb7, 0xf0, 0xda, 0x49, 0xd3, 0x16, 0x55, 0x26, 0x29, 0xd4, 0x68, 0x9e,
4226 0x2b, 0x16, 0xbe, 0x58, 0x7d, 0x47, 0xa1, 0xfc, 0x8f, 0xf8, 0xb8, 0xd1, 0x7a, 0xd0, 0x31, 0xce,
4227 0x45, 0xcb, 0x3a, 0x8f, 0x95, 0x16, 0x04, 0x28, 0xaf, 0xd7, 0xfb, 0xca, 0xbb, 0x4b, 0x40, 0x7e,
4228};
4229
f535537f 4230static const xxh_u64 PRIME_MX1 = 0x165667919E3779F9ULL; /*!< 0b0001011001010110011001111001000110011110001101110111100111111001 */
4231static const xxh_u64 PRIME_MX2 = 0x9FB21C651E98DF25ULL; /*!< 0b1001111110110010000111000110010100011110100110001101111100100101 */
648db22b 4232
4233#ifdef XXH_OLD_NAMES
4234# define kSecret XXH3_kSecret
4235#endif
4236
4237#ifdef XXH_DOXYGEN
4238/*!
4239 * @brief Calculates a 32-bit to 64-bit long multiply.
4240 *
4241 * Implemented as a macro.
4242 *
4243 * Wraps `__emulu` on MSVC x86 because it tends to call `__allmul` when it doesn't
4244 * need to (but it shouldn't need to anyways, it is about 7 instructions to do
4245 * a 64x64 multiply...). Since we know that this will _always_ emit `MULL`, we
4246 * use that instead of the normal method.
4247 *
4248 * If you are compiling for platforms like Thumb-1 and don't have a better option,
4249 * you may also want to write your own long multiply routine here.
4250 *
4251 * @param x, y Numbers to be multiplied
4252 * @return 64-bit product of the low 32 bits of @p x and @p y.
4253 */
4254XXH_FORCE_INLINE xxh_u64
4255XXH_mult32to64(xxh_u64 x, xxh_u64 y)
4256{
4257 return (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF);
4258}
4259#elif defined(_MSC_VER) && defined(_M_IX86)
4260# define XXH_mult32to64(x, y) __emulu((unsigned)(x), (unsigned)(y))
4261#else
4262/*
4263 * Downcast + upcast is usually better than masking on older compilers like
4264 * GCC 4.2 (especially 32-bit ones), all without affecting newer compilers.
4265 *
4266 * The other method, (x & 0xFFFFFFFF) * (y & 0xFFFFFFFF), will AND both operands
4267 * and perform a full 64x64 multiply -- entirely redundant on 32-bit.
4268 */
4269# define XXH_mult32to64(x, y) ((xxh_u64)(xxh_u32)(x) * (xxh_u64)(xxh_u32)(y))
4270#endif
4271
4272/*!
4273 * @brief Calculates a 64->128-bit long multiply.
4274 *
4275 * Uses `__uint128_t` and `_umul128` if available, otherwise uses a scalar
4276 * version.
4277 *
4278 * @param lhs , rhs The 64-bit integers to be multiplied
4279 * @return The 128-bit result represented in an @ref XXH128_hash_t.
4280 */
4281static XXH128_hash_t
4282XXH_mult64to128(xxh_u64 lhs, xxh_u64 rhs)
4283{
4284 /*
4285 * GCC/Clang __uint128_t method.
4286 *
4287 * On most 64-bit targets, GCC and Clang define a __uint128_t type.
4288 * This is usually the best way as it usually uses a native long 64-bit
4289 * multiply, such as MULQ on x86_64 or MUL + UMULH on aarch64.
4290 *
4291 * Usually.
4292 *
4293 * Despite being a 32-bit platform, Clang (and emscripten) define this type
4294 * despite not having the arithmetic for it. This results in a laggy
4295 * compiler builtin call which calculates a full 128-bit multiply.
4296 * In that case it is best to use the portable one.
4297 * https://github.com/Cyan4973/xxHash/issues/211#issuecomment-515575677
4298 */
4299#if (defined(__GNUC__) || defined(__clang__)) && !defined(__wasm__) \
4300 && defined(__SIZEOF_INT128__) \
4301 || (defined(_INTEGRAL_MAX_BITS) && _INTEGRAL_MAX_BITS >= 128)
4302
4303 __uint128_t const product = (__uint128_t)lhs * (__uint128_t)rhs;
4304 XXH128_hash_t r128;
4305 r128.low64 = (xxh_u64)(product);
4306 r128.high64 = (xxh_u64)(product >> 64);
4307 return r128;
4308
4309 /*
4310 * MSVC for x64's _umul128 method.
4311 *
4312 * xxh_u64 _umul128(xxh_u64 Multiplier, xxh_u64 Multiplicand, xxh_u64 *HighProduct);
4313 *
4314 * This compiles to single operand MUL on x64.
4315 */
4316#elif (defined(_M_X64) || defined(_M_IA64)) && !defined(_M_ARM64EC)
4317
4318#ifndef _MSC_VER
4319# pragma intrinsic(_umul128)
4320#endif
4321 xxh_u64 product_high;
4322 xxh_u64 const product_low = _umul128(lhs, rhs, &product_high);
4323 XXH128_hash_t r128;
4324 r128.low64 = product_low;
4325 r128.high64 = product_high;
4326 return r128;
4327
4328 /*
4329 * MSVC for ARM64's __umulh method.
4330 *
4331 * This compiles to the same MUL + UMULH as GCC/Clang's __uint128_t method.
4332 */
4333#elif defined(_M_ARM64) || defined(_M_ARM64EC)
4334
4335#ifndef _MSC_VER
4336# pragma intrinsic(__umulh)
4337#endif
4338 XXH128_hash_t r128;
4339 r128.low64 = lhs * rhs;
4340 r128.high64 = __umulh(lhs, rhs);
4341 return r128;
4342
4343#else
4344 /*
4345 * Portable scalar method. Optimized for 32-bit and 64-bit ALUs.
4346 *
4347 * This is a fast and simple grade school multiply, which is shown below
4348 * with base 10 arithmetic instead of base 0x100000000.
4349 *
4350 * 9 3 // D2 lhs = 93
4351 * x 7 5 // D2 rhs = 75
4352 * ----------
4353 * 1 5 // D2 lo_lo = (93 % 10) * (75 % 10) = 15
4354 * 4 5 | // D2 hi_lo = (93 / 10) * (75 % 10) = 45
4355 * 2 1 | // D2 lo_hi = (93 % 10) * (75 / 10) = 21
4356 * + 6 3 | | // D2 hi_hi = (93 / 10) * (75 / 10) = 63
4357 * ---------
4358 * 2 7 | // D2 cross = (15 / 10) + (45 % 10) + 21 = 27
4359 * + 6 7 | | // D2 upper = (27 / 10) + (45 / 10) + 63 = 67
4360 * ---------
4361 * 6 9 7 5 // D4 res = (27 * 10) + (15 % 10) + (67 * 100) = 6975
4362 *
4363 * The reasons for adding the products like this are:
4364 * 1. It avoids manual carry tracking. Just like how
4365 * (9 * 9) + 9 + 9 = 99, the same applies with this for UINT64_MAX.
4366 * This avoids a lot of complexity.
4367 *
4368 * 2. It hints for, and on Clang, compiles to, the powerful UMAAL
4369 * instruction available in ARM's Digital Signal Processing extension
4370 * in 32-bit ARMv6 and later, which is shown below:
4371 *
4372 * void UMAAL(xxh_u32 *RdLo, xxh_u32 *RdHi, xxh_u32 Rn, xxh_u32 Rm)
4373 * {
4374 * xxh_u64 product = (xxh_u64)*RdLo * (xxh_u64)*RdHi + Rn + Rm;
4375 * *RdLo = (xxh_u32)(product & 0xFFFFFFFF);
4376 * *RdHi = (xxh_u32)(product >> 32);
4377 * }
4378 *
4379 * This instruction was designed for efficient long multiplication, and
4380 * allows this to be calculated in only 4 instructions at speeds
4381 * comparable to some 64-bit ALUs.
4382 *
4383 * 3. It isn't terrible on other platforms. Usually this will be a couple
4384 * of 32-bit ADD/ADCs.
4385 */
4386
4387 /* First calculate all of the cross products. */
4388 xxh_u64 const lo_lo = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs & 0xFFFFFFFF);
4389 xxh_u64 const hi_lo = XXH_mult32to64(lhs >> 32, rhs & 0xFFFFFFFF);
4390 xxh_u64 const lo_hi = XXH_mult32to64(lhs & 0xFFFFFFFF, rhs >> 32);
4391 xxh_u64 const hi_hi = XXH_mult32to64(lhs >> 32, rhs >> 32);
4392
4393 /* Now add the products together. These will never overflow. */
4394 xxh_u64 const cross = (lo_lo >> 32) + (hi_lo & 0xFFFFFFFF) + lo_hi;
4395 xxh_u64 const upper = (hi_lo >> 32) + (cross >> 32) + hi_hi;
4396 xxh_u64 const lower = (cross << 32) | (lo_lo & 0xFFFFFFFF);
4397
4398 XXH128_hash_t r128;
4399 r128.low64 = lower;
4400 r128.high64 = upper;
4401 return r128;
4402#endif
4403}
4404
4405/*!
4406 * @brief Calculates a 64-bit to 128-bit multiply, then XOR folds it.
4407 *
4408 * The reason for the separate function is to prevent passing too many structs
4409 * around by value. This will hopefully inline the multiply, but we don't force it.
4410 *
4411 * @param lhs , rhs The 64-bit integers to multiply
4412 * @return The low 64 bits of the product XOR'd by the high 64 bits.
4413 * @see XXH_mult64to128()
4414 */
4415static xxh_u64
4416XXH3_mul128_fold64(xxh_u64 lhs, xxh_u64 rhs)
4417{
4418 XXH128_hash_t product = XXH_mult64to128(lhs, rhs);
4419 return product.low64 ^ product.high64;
4420}
4421
4422/*! Seems to produce slightly better code on GCC for some reason. */
f535537f 4423XXH_FORCE_INLINE XXH_CONSTF xxh_u64 XXH_xorshift64(xxh_u64 v64, int shift)
648db22b 4424{
4425 XXH_ASSERT(0 <= shift && shift < 64);
4426 return v64 ^ (v64 >> shift);
4427}
4428
4429/*
4430 * This is a fast avalanche stage,
4431 * suitable when input bits are already partially mixed
4432 */
4433static XXH64_hash_t XXH3_avalanche(xxh_u64 h64)
4434{
4435 h64 = XXH_xorshift64(h64, 37);
f535537f 4436 h64 *= PRIME_MX1;
648db22b 4437 h64 = XXH_xorshift64(h64, 32);
4438 return h64;
4439}
4440
4441/*
4442 * This is a stronger avalanche,
4443 * inspired by Pelle Evensen's rrmxmx
4444 * preferable when input has not been previously mixed
4445 */
4446static XXH64_hash_t XXH3_rrmxmx(xxh_u64 h64, xxh_u64 len)
4447{
4448 /* this mix is inspired by Pelle Evensen's rrmxmx */
4449 h64 ^= XXH_rotl64(h64, 49) ^ XXH_rotl64(h64, 24);
f535537f 4450 h64 *= PRIME_MX2;
648db22b 4451 h64 ^= (h64 >> 35) + len ;
f535537f 4452 h64 *= PRIME_MX2;
648db22b 4453 return XXH_xorshift64(h64, 28);
4454}
4455
4456
4457/* ==========================================
4458 * Short keys
4459 * ==========================================
4460 * One of the shortcomings of XXH32 and XXH64 was that their performance was
4461 * sub-optimal on short lengths. It used an iterative algorithm which strongly
4462 * favored lengths that were a multiple of 4 or 8.
4463 *
4464 * Instead of iterating over individual inputs, we use a set of single shot
4465 * functions which piece together a range of lengths and operate in constant time.
4466 *
4467 * Additionally, the number of multiplies has been significantly reduced. This
4468 * reduces latency, especially when emulating 64-bit multiplies on 32-bit.
4469 *
4470 * Depending on the platform, this may or may not be faster than XXH32, but it
4471 * is almost guaranteed to be faster than XXH64.
4472 */
4473
4474/*
4475 * At very short lengths, there isn't enough input to fully hide secrets, or use
4476 * the entire secret.
4477 *
4478 * There is also only a limited amount of mixing we can do before significantly
4479 * impacting performance.
4480 *
4481 * Therefore, we use different sections of the secret and always mix two secret
4482 * samples with an XOR. This should have no effect on performance on the
4483 * seedless or withSeed variants because everything _should_ be constant folded
4484 * by modern compilers.
4485 *
4486 * The XOR mixing hides individual parts of the secret and increases entropy.
4487 *
4488 * This adds an extra layer of strength for custom secrets.
4489 */
f535537f 4490XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
648db22b 4491XXH3_len_1to3_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4492{
4493 XXH_ASSERT(input != NULL);
4494 XXH_ASSERT(1 <= len && len <= 3);
4495 XXH_ASSERT(secret != NULL);
4496 /*
4497 * len = 1: combined = { input[0], 0x01, input[0], input[0] }
4498 * len = 2: combined = { input[1], 0x02, input[0], input[1] }
4499 * len = 3: combined = { input[2], 0x03, input[0], input[1] }
4500 */
4501 { xxh_u8 const c1 = input[0];
4502 xxh_u8 const c2 = input[len >> 1];
4503 xxh_u8 const c3 = input[len - 1];
4504 xxh_u32 const combined = ((xxh_u32)c1 << 16) | ((xxh_u32)c2 << 24)
4505 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
4506 xxh_u64 const bitflip = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
4507 xxh_u64 const keyed = (xxh_u64)combined ^ bitflip;
4508 return XXH64_avalanche(keyed);
4509 }
4510}
4511
f535537f 4512XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
648db22b 4513XXH3_len_4to8_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4514{
4515 XXH_ASSERT(input != NULL);
4516 XXH_ASSERT(secret != NULL);
4517 XXH_ASSERT(4 <= len && len <= 8);
4518 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
4519 { xxh_u32 const input1 = XXH_readLE32(input);
4520 xxh_u32 const input2 = XXH_readLE32(input + len - 4);
4521 xxh_u64 const bitflip = (XXH_readLE64(secret+8) ^ XXH_readLE64(secret+16)) - seed;
4522 xxh_u64 const input64 = input2 + (((xxh_u64)input1) << 32);
4523 xxh_u64 const keyed = input64 ^ bitflip;
4524 return XXH3_rrmxmx(keyed, len);
4525 }
4526}
4527
f535537f 4528XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
648db22b 4529XXH3_len_9to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4530{
4531 XXH_ASSERT(input != NULL);
4532 XXH_ASSERT(secret != NULL);
4533 XXH_ASSERT(9 <= len && len <= 16);
4534 { xxh_u64 const bitflip1 = (XXH_readLE64(secret+24) ^ XXH_readLE64(secret+32)) + seed;
4535 xxh_u64 const bitflip2 = (XXH_readLE64(secret+40) ^ XXH_readLE64(secret+48)) - seed;
4536 xxh_u64 const input_lo = XXH_readLE64(input) ^ bitflip1;
4537 xxh_u64 const input_hi = XXH_readLE64(input + len - 8) ^ bitflip2;
4538 xxh_u64 const acc = len
4539 + XXH_swap64(input_lo) + input_hi
4540 + XXH3_mul128_fold64(input_lo, input_hi);
4541 return XXH3_avalanche(acc);
4542 }
4543}
4544
f535537f 4545XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
648db22b 4546XXH3_len_0to16_64b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
4547{
4548 XXH_ASSERT(len <= 16);
4549 { if (XXH_likely(len > 8)) return XXH3_len_9to16_64b(input, len, secret, seed);
4550 if (XXH_likely(len >= 4)) return XXH3_len_4to8_64b(input, len, secret, seed);
4551 if (len) return XXH3_len_1to3_64b(input, len, secret, seed);
4552 return XXH64_avalanche(seed ^ (XXH_readLE64(secret+56) ^ XXH_readLE64(secret+64)));
4553 }
4554}
4555
4556/*
4557 * DISCLAIMER: There are known *seed-dependent* multicollisions here due to
4558 * multiplication by zero, affecting hashes of lengths 17 to 240.
4559 *
4560 * However, they are very unlikely.
4561 *
4562 * Keep this in mind when using the unseeded XXH3_64bits() variant: As with all
4563 * unseeded non-cryptographic hashes, it does not attempt to defend itself
4564 * against specially crafted inputs, only random inputs.
4565 *
4566 * Compared to classic UMAC where a 1 in 2^31 chance of 4 consecutive bytes
4567 * cancelling out the secret is taken an arbitrary number of times (addressed
4568 * in XXH3_accumulate_512), this collision is very unlikely with random inputs
4569 * and/or proper seeding:
4570 *
4571 * This only has a 1 in 2^63 chance of 8 consecutive bytes cancelling out, in a
4572 * function that is only called up to 16 times per hash with up to 240 bytes of
4573 * input.
4574 *
4575 * This is not too bad for a non-cryptographic hash function, especially with
4576 * only 64 bit outputs.
4577 *
4578 * The 128-bit variant (which trades some speed for strength) is NOT affected
4579 * by this, although it is always a good idea to use a proper seed if you care
4580 * about strength.
4581 */
4582XXH_FORCE_INLINE xxh_u64 XXH3_mix16B(const xxh_u8* XXH_RESTRICT input,
4583 const xxh_u8* XXH_RESTRICT secret, xxh_u64 seed64)
4584{
4585#if defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
4586 && defined(__i386__) && defined(__SSE2__) /* x86 + SSE2 */ \
4587 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable like XXH32 hack */
4588 /*
4589 * UGLY HACK:
4590 * GCC for x86 tends to autovectorize the 128-bit multiply, resulting in
4591 * slower code.
4592 *
4593 * By forcing seed64 into a register, we disrupt the cost model and
4594 * cause it to scalarize. See `XXH32_round()`
4595 *
4596 * FIXME: Clang's output is still _much_ faster -- On an AMD Ryzen 3600,
4597 * XXH3_64bits @ len=240 runs at 4.6 GB/s with Clang 9, but 3.3 GB/s on
4598 * GCC 9.2, despite both emitting scalar code.
4599 *
4600 * GCC generates much better scalar code than Clang for the rest of XXH3,
4601 * which is why finding a more optimal codepath is an interest.
4602 */
4603 XXH_COMPILER_GUARD(seed64);
4604#endif
4605 { xxh_u64 const input_lo = XXH_readLE64(input);
4606 xxh_u64 const input_hi = XXH_readLE64(input+8);
4607 return XXH3_mul128_fold64(
4608 input_lo ^ (XXH_readLE64(secret) + seed64),
4609 input_hi ^ (XXH_readLE64(secret+8) - seed64)
4610 );
4611 }
4612}
4613
4614/* For mid range keys, XXH3 uses a Mum-hash variant. */
f535537f 4615XXH_FORCE_INLINE XXH_PUREF XXH64_hash_t
648db22b 4616XXH3_len_17to128_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
4617 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4618 XXH64_hash_t seed)
4619{
4620 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4621 XXH_ASSERT(16 < len && len <= 128);
4622
4623 { xxh_u64 acc = len * XXH_PRIME64_1;
f535537f 4624#if XXH_SIZE_OPT >= 1
4625 /* Smaller and cleaner, but slightly slower. */
4626 unsigned int i = (unsigned int)(len - 1) / 32;
4627 do {
4628 acc += XXH3_mix16B(input+16 * i, secret+32*i, seed);
4629 acc += XXH3_mix16B(input+len-16*(i+1), secret+32*i+16, seed);
4630 } while (i-- != 0);
4631#else
648db22b 4632 if (len > 32) {
4633 if (len > 64) {
4634 if (len > 96) {
4635 acc += XXH3_mix16B(input+48, secret+96, seed);
4636 acc += XXH3_mix16B(input+len-64, secret+112, seed);
4637 }
4638 acc += XXH3_mix16B(input+32, secret+64, seed);
4639 acc += XXH3_mix16B(input+len-48, secret+80, seed);
4640 }
4641 acc += XXH3_mix16B(input+16, secret+32, seed);
4642 acc += XXH3_mix16B(input+len-32, secret+48, seed);
4643 }
4644 acc += XXH3_mix16B(input+0, secret+0, seed);
4645 acc += XXH3_mix16B(input+len-16, secret+16, seed);
f535537f 4646#endif
648db22b 4647 return XXH3_avalanche(acc);
4648 }
4649}
4650
f535537f 4651/*!
4652 * @brief Maximum size of "short" key in bytes.
4653 */
648db22b 4654#define XXH3_MIDSIZE_MAX 240
4655
f535537f 4656XXH_NO_INLINE XXH_PUREF XXH64_hash_t
648db22b 4657XXH3_len_129to240_64b(const xxh_u8* XXH_RESTRICT input, size_t len,
4658 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
4659 XXH64_hash_t seed)
4660{
4661 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
4662 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
4663
4664 #define XXH3_MIDSIZE_STARTOFFSET 3
4665 #define XXH3_MIDSIZE_LASTOFFSET 17
4666
4667 { xxh_u64 acc = len * XXH_PRIME64_1;
f535537f 4668 xxh_u64 acc_end;
4669 unsigned int const nbRounds = (unsigned int)len / 16;
4670 unsigned int i;
4671 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
648db22b 4672 for (i=0; i<8; i++) {
4673 acc += XXH3_mix16B(input+(16*i), secret+(16*i), seed);
4674 }
f535537f 4675 /* last bytes */
4676 acc_end = XXH3_mix16B(input + len - 16, secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET, seed);
648db22b 4677 XXH_ASSERT(nbRounds >= 8);
f535537f 4678 acc = XXH3_avalanche(acc);
648db22b 4679#if defined(__clang__) /* Clang */ \
4680 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
4681 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
4682 /*
4683 * UGLY HACK:
4684 * Clang for ARMv7-A tries to vectorize this loop, similar to GCC x86.
4685 * In everywhere else, it uses scalar code.
4686 *
4687 * For 64->128-bit multiplies, even if the NEON was 100% optimal, it
4688 * would still be slower than UMAAL (see XXH_mult64to128).
4689 *
4690 * Unfortunately, Clang doesn't handle the long multiplies properly and
4691 * converts them to the nonexistent "vmulq_u64" intrinsic, which is then
4692 * scalarized into an ugly mess of VMOV.32 instructions.
4693 *
4694 * This mess is difficult to avoid without turning autovectorization
4695 * off completely, but they are usually relatively minor and/or not
4696 * worth it to fix.
4697 *
4698 * This loop is the easiest to fix, as unlike XXH32, this pragma
4699 * _actually works_ because it is a loop vectorization instead of an
4700 * SLP vectorization.
4701 */
4702 #pragma clang loop vectorize(disable)
4703#endif
4704 for (i=8 ; i < nbRounds; i++) {
f535537f 4705 /*
4706 * Prevents clang for unrolling the acc loop and interleaving with this one.
4707 */
4708 XXH_COMPILER_GUARD(acc);
4709 acc_end += XXH3_mix16B(input+(16*i), secret+(16*(i-8)) + XXH3_MIDSIZE_STARTOFFSET, seed);
648db22b 4710 }
f535537f 4711 return XXH3_avalanche(acc + acc_end);
648db22b 4712 }
4713}
4714
4715
4716/* ======= Long Keys ======= */
4717
4718#define XXH_STRIPE_LEN 64
4719#define XXH_SECRET_CONSUME_RATE 8 /* nb of secret bytes consumed at each accumulation */
4720#define XXH_ACC_NB (XXH_STRIPE_LEN / sizeof(xxh_u64))
4721
4722#ifdef XXH_OLD_NAMES
4723# define STRIPE_LEN XXH_STRIPE_LEN
4724# define ACC_NB XXH_ACC_NB
4725#endif
4726
f535537f 4727#ifndef XXH_PREFETCH_DIST
4728# ifdef __clang__
4729# define XXH_PREFETCH_DIST 320
4730# else
4731# if (XXH_VECTOR == XXH_AVX512)
4732# define XXH_PREFETCH_DIST 512
4733# else
4734# define XXH_PREFETCH_DIST 384
4735# endif
4736# endif /* __clang__ */
4737#endif /* XXH_PREFETCH_DIST */
4738
4739/*
4740 * These macros are to generate an XXH3_accumulate() function.
4741 * The two arguments select the name suffix and target attribute.
4742 *
4743 * The name of this symbol is XXH3_accumulate_<name>() and it calls
4744 * XXH3_accumulate_512_<name>().
4745 *
4746 * It may be useful to hand implement this function if the compiler fails to
4747 * optimize the inline function.
4748 */
4749#define XXH3_ACCUMULATE_TEMPLATE(name) \
4750void \
4751XXH3_accumulate_##name(xxh_u64* XXH_RESTRICT acc, \
4752 const xxh_u8* XXH_RESTRICT input, \
4753 const xxh_u8* XXH_RESTRICT secret, \
4754 size_t nbStripes) \
4755{ \
4756 size_t n; \
4757 for (n = 0; n < nbStripes; n++ ) { \
4758 const xxh_u8* const in = input + n*XXH_STRIPE_LEN; \
4759 XXH_PREFETCH(in + XXH_PREFETCH_DIST); \
4760 XXH3_accumulate_512_##name( \
4761 acc, \
4762 in, \
4763 secret + n*XXH_SECRET_CONSUME_RATE); \
4764 } \
4765}
4766
4767
648db22b 4768XXH_FORCE_INLINE void XXH_writeLE64(void* dst, xxh_u64 v64)
4769{
4770 if (!XXH_CPU_LITTLE_ENDIAN) v64 = XXH_swap64(v64);
4771 XXH_memcpy(dst, &v64, sizeof(v64));
4772}
4773
4774/* Several intrinsic functions below are supposed to accept __int64 as argument,
4775 * as documented in https://software.intel.com/sites/landingpage/IntrinsicsGuide/ .
4776 * However, several environments do not define __int64 type,
4777 * requiring a workaround.
4778 */
4779#if !defined (__VMS) \
4780 && (defined (__cplusplus) \
4781 || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
4782 typedef int64_t xxh_i64;
4783#else
4784 /* the following type must have a width of 64-bit */
4785 typedef long long xxh_i64;
4786#endif
4787
4788
4789/*
4790 * XXH3_accumulate_512 is the tightest loop for long inputs, and it is the most optimized.
4791 *
4792 * It is a hardened version of UMAC, based off of FARSH's implementation.
4793 *
4794 * This was chosen because it adapts quite well to 32-bit, 64-bit, and SIMD
4795 * implementations, and it is ridiculously fast.
4796 *
4797 * We harden it by mixing the original input to the accumulators as well as the product.
4798 *
4799 * This means that in the (relatively likely) case of a multiply by zero, the
4800 * original input is preserved.
4801 *
4802 * On 128-bit inputs, we swap 64-bit pairs when we add the input to improve
4803 * cross-pollination, as otherwise the upper and lower halves would be
4804 * essentially independent.
4805 *
4806 * This doesn't matter on 64-bit hashes since they all get merged together in
4807 * the end, so we skip the extra step.
4808 *
4809 * Both XXH3_64bits and XXH3_128bits use this subroutine.
4810 */
4811
4812#if (XXH_VECTOR == XXH_AVX512) \
4813 || (defined(XXH_DISPATCH_AVX512) && XXH_DISPATCH_AVX512 != 0)
4814
4815#ifndef XXH_TARGET_AVX512
4816# define XXH_TARGET_AVX512 /* disable attribute target */
4817#endif
4818
4819XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4820XXH3_accumulate_512_avx512(void* XXH_RESTRICT acc,
4821 const void* XXH_RESTRICT input,
4822 const void* XXH_RESTRICT secret)
4823{
4824 __m512i* const xacc = (__m512i *) acc;
4825 XXH_ASSERT((((size_t)acc) & 63) == 0);
4826 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
4827
4828 {
4829 /* data_vec = input[0]; */
4830 __m512i const data_vec = _mm512_loadu_si512 (input);
4831 /* key_vec = secret[0]; */
4832 __m512i const key_vec = _mm512_loadu_si512 (secret);
4833 /* data_key = data_vec ^ key_vec; */
4834 __m512i const data_key = _mm512_xor_si512 (data_vec, key_vec);
4835 /* data_key_lo = data_key >> 32; */
f535537f 4836 __m512i const data_key_lo = _mm512_srli_epi64 (data_key, 32);
648db22b 4837 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4838 __m512i const product = _mm512_mul_epu32 (data_key, data_key_lo);
4839 /* xacc[0] += swap(data_vec); */
4840 __m512i const data_swap = _mm512_shuffle_epi32(data_vec, (_MM_PERM_ENUM)_MM_SHUFFLE(1, 0, 3, 2));
4841 __m512i const sum = _mm512_add_epi64(*xacc, data_swap);
4842 /* xacc[0] += product; */
4843 *xacc = _mm512_add_epi64(product, sum);
4844 }
4845}
f535537f 4846XXH_FORCE_INLINE XXH_TARGET_AVX512 XXH3_ACCUMULATE_TEMPLATE(avx512)
648db22b 4847
4848/*
4849 * XXH3_scrambleAcc: Scrambles the accumulators to improve mixing.
4850 *
4851 * Multiplication isn't perfect, as explained by Google in HighwayHash:
4852 *
4853 * // Multiplication mixes/scrambles bytes 0-7 of the 64-bit result to
4854 * // varying degrees. In descending order of goodness, bytes
4855 * // 3 4 2 5 1 6 0 7 have quality 228 224 164 160 100 96 36 32.
4856 * // As expected, the upper and lower bytes are much worse.
4857 *
4858 * Source: https://github.com/google/highwayhash/blob/0aaf66b/highwayhash/hh_avx2.h#L291
4859 *
4860 * Since our algorithm uses a pseudorandom secret to add some variance into the
4861 * mix, we don't need to (or want to) mix as often or as much as HighwayHash does.
4862 *
4863 * This isn't as tight as XXH3_accumulate, but still written in SIMD to avoid
4864 * extraction.
4865 *
4866 * Both XXH3_64bits and XXH3_128bits use this subroutine.
4867 */
4868
4869XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4870XXH3_scrambleAcc_avx512(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4871{
4872 XXH_ASSERT((((size_t)acc) & 63) == 0);
4873 XXH_STATIC_ASSERT(XXH_STRIPE_LEN == sizeof(__m512i));
4874 { __m512i* const xacc = (__m512i*) acc;
4875 const __m512i prime32 = _mm512_set1_epi32((int)XXH_PRIME32_1);
4876
4877 /* xacc[0] ^= (xacc[0] >> 47) */
4878 __m512i const acc_vec = *xacc;
4879 __m512i const shifted = _mm512_srli_epi64 (acc_vec, 47);
648db22b 4880 /* xacc[0] ^= secret; */
4881 __m512i const key_vec = _mm512_loadu_si512 (secret);
f535537f 4882 __m512i const data_key = _mm512_ternarylogic_epi32(key_vec, acc_vec, shifted, 0x96 /* key_vec ^ acc_vec ^ shifted */);
648db22b 4883
4884 /* xacc[0] *= XXH_PRIME32_1; */
f535537f 4885 __m512i const data_key_hi = _mm512_srli_epi64 (data_key, 32);
648db22b 4886 __m512i const prod_lo = _mm512_mul_epu32 (data_key, prime32);
4887 __m512i const prod_hi = _mm512_mul_epu32 (data_key_hi, prime32);
4888 *xacc = _mm512_add_epi64(prod_lo, _mm512_slli_epi64(prod_hi, 32));
4889 }
4890}
4891
4892XXH_FORCE_INLINE XXH_TARGET_AVX512 void
4893XXH3_initCustomSecret_avx512(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4894{
4895 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 63) == 0);
4896 XXH_STATIC_ASSERT(XXH_SEC_ALIGN == 64);
4897 XXH_ASSERT(((size_t)customSecret & 63) == 0);
4898 (void)(&XXH_writeLE64);
4899 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m512i);
f535537f 4900 __m512i const seed_pos = _mm512_set1_epi64((xxh_i64)seed64);
4901 __m512i const seed = _mm512_mask_sub_epi64(seed_pos, 0xAA, _mm512_set1_epi8(0), seed_pos);
648db22b 4902
4903 const __m512i* const src = (const __m512i*) ((const void*) XXH3_kSecret);
4904 __m512i* const dest = ( __m512i*) customSecret;
4905 int i;
4906 XXH_ASSERT(((size_t)src & 63) == 0); /* control alignment */
4907 XXH_ASSERT(((size_t)dest & 63) == 0);
4908 for (i=0; i < nbRounds; ++i) {
f535537f 4909 dest[i] = _mm512_add_epi64(_mm512_load_si512(src + i), seed);
648db22b 4910 } }
4911}
4912
4913#endif
4914
4915#if (XXH_VECTOR == XXH_AVX2) \
4916 || (defined(XXH_DISPATCH_AVX2) && XXH_DISPATCH_AVX2 != 0)
4917
4918#ifndef XXH_TARGET_AVX2
4919# define XXH_TARGET_AVX2 /* disable attribute target */
4920#endif
4921
4922XXH_FORCE_INLINE XXH_TARGET_AVX2 void
4923XXH3_accumulate_512_avx2( void* XXH_RESTRICT acc,
4924 const void* XXH_RESTRICT input,
4925 const void* XXH_RESTRICT secret)
4926{
4927 XXH_ASSERT((((size_t)acc) & 31) == 0);
4928 { __m256i* const xacc = (__m256i *) acc;
4929 /* Unaligned. This is mainly for pointer arithmetic, and because
4930 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4931 const __m256i* const xinput = (const __m256i *) input;
4932 /* Unaligned. This is mainly for pointer arithmetic, and because
4933 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4934 const __m256i* const xsecret = (const __m256i *) secret;
4935
4936 size_t i;
4937 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
4938 /* data_vec = xinput[i]; */
4939 __m256i const data_vec = _mm256_loadu_si256 (xinput+i);
4940 /* key_vec = xsecret[i]; */
4941 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
4942 /* data_key = data_vec ^ key_vec; */
4943 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
4944 /* data_key_lo = data_key >> 32; */
f535537f 4945 __m256i const data_key_lo = _mm256_srli_epi64 (data_key, 32);
648db22b 4946 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
4947 __m256i const product = _mm256_mul_epu32 (data_key, data_key_lo);
4948 /* xacc[i] += swap(data_vec); */
4949 __m256i const data_swap = _mm256_shuffle_epi32(data_vec, _MM_SHUFFLE(1, 0, 3, 2));
4950 __m256i const sum = _mm256_add_epi64(xacc[i], data_swap);
4951 /* xacc[i] += product; */
4952 xacc[i] = _mm256_add_epi64(product, sum);
4953 } }
4954}
f535537f 4955XXH_FORCE_INLINE XXH_TARGET_AVX2 XXH3_ACCUMULATE_TEMPLATE(avx2)
648db22b 4956
4957XXH_FORCE_INLINE XXH_TARGET_AVX2 void
4958XXH3_scrambleAcc_avx2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
4959{
4960 XXH_ASSERT((((size_t)acc) & 31) == 0);
4961 { __m256i* const xacc = (__m256i*) acc;
4962 /* Unaligned. This is mainly for pointer arithmetic, and because
4963 * _mm256_loadu_si256 requires a const __m256i * pointer for some reason. */
4964 const __m256i* const xsecret = (const __m256i *) secret;
4965 const __m256i prime32 = _mm256_set1_epi32((int)XXH_PRIME32_1);
4966
4967 size_t i;
4968 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m256i); i++) {
4969 /* xacc[i] ^= (xacc[i] >> 47) */
4970 __m256i const acc_vec = xacc[i];
4971 __m256i const shifted = _mm256_srli_epi64 (acc_vec, 47);
4972 __m256i const data_vec = _mm256_xor_si256 (acc_vec, shifted);
4973 /* xacc[i] ^= xsecret; */
4974 __m256i const key_vec = _mm256_loadu_si256 (xsecret+i);
4975 __m256i const data_key = _mm256_xor_si256 (data_vec, key_vec);
4976
4977 /* xacc[i] *= XXH_PRIME32_1; */
f535537f 4978 __m256i const data_key_hi = _mm256_srli_epi64 (data_key, 32);
648db22b 4979 __m256i const prod_lo = _mm256_mul_epu32 (data_key, prime32);
4980 __m256i const prod_hi = _mm256_mul_epu32 (data_key_hi, prime32);
4981 xacc[i] = _mm256_add_epi64(prod_lo, _mm256_slli_epi64(prod_hi, 32));
4982 }
4983 }
4984}
4985
4986XXH_FORCE_INLINE XXH_TARGET_AVX2 void XXH3_initCustomSecret_avx2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
4987{
4988 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 31) == 0);
4989 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE / sizeof(__m256i)) == 6);
4990 XXH_STATIC_ASSERT(XXH_SEC_ALIGN <= 64);
4991 (void)(&XXH_writeLE64);
4992 XXH_PREFETCH(customSecret);
4993 { __m256i const seed = _mm256_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64, (xxh_i64)(0U - seed64), (xxh_i64)seed64);
4994
4995 const __m256i* const src = (const __m256i*) ((const void*) XXH3_kSecret);
4996 __m256i* dest = ( __m256i*) customSecret;
4997
4998# if defined(__GNUC__) || defined(__clang__)
4999 /*
5000 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
5001 * - do not extract the secret from sse registers in the internal loop
5002 * - use less common registers, and avoid pushing these reg into stack
5003 */
5004 XXH_COMPILER_GUARD(dest);
5005# endif
5006 XXH_ASSERT(((size_t)src & 31) == 0); /* control alignment */
5007 XXH_ASSERT(((size_t)dest & 31) == 0);
5008
5009 /* GCC -O2 need unroll loop manually */
f535537f 5010 dest[0] = _mm256_add_epi64(_mm256_load_si256(src+0), seed);
5011 dest[1] = _mm256_add_epi64(_mm256_load_si256(src+1), seed);
5012 dest[2] = _mm256_add_epi64(_mm256_load_si256(src+2), seed);
5013 dest[3] = _mm256_add_epi64(_mm256_load_si256(src+3), seed);
5014 dest[4] = _mm256_add_epi64(_mm256_load_si256(src+4), seed);
5015 dest[5] = _mm256_add_epi64(_mm256_load_si256(src+5), seed);
648db22b 5016 }
5017}
5018
5019#endif
5020
5021/* x86dispatch always generates SSE2 */
5022#if (XXH_VECTOR == XXH_SSE2) || defined(XXH_X86DISPATCH)
5023
5024#ifndef XXH_TARGET_SSE2
5025# define XXH_TARGET_SSE2 /* disable attribute target */
5026#endif
5027
5028XXH_FORCE_INLINE XXH_TARGET_SSE2 void
5029XXH3_accumulate_512_sse2( void* XXH_RESTRICT acc,
5030 const void* XXH_RESTRICT input,
5031 const void* XXH_RESTRICT secret)
5032{
5033 /* SSE2 is just a half-scale version of the AVX2 version. */
5034 XXH_ASSERT((((size_t)acc) & 15) == 0);
5035 { __m128i* const xacc = (__m128i *) acc;
5036 /* Unaligned. This is mainly for pointer arithmetic, and because
5037 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
5038 const __m128i* const xinput = (const __m128i *) input;
5039 /* Unaligned. This is mainly for pointer arithmetic, and because
5040 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
5041 const __m128i* const xsecret = (const __m128i *) secret;
5042
5043 size_t i;
5044 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
5045 /* data_vec = xinput[i]; */
5046 __m128i const data_vec = _mm_loadu_si128 (xinput+i);
5047 /* key_vec = xsecret[i]; */
5048 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
5049 /* data_key = data_vec ^ key_vec; */
5050 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
5051 /* data_key_lo = data_key >> 32; */
5052 __m128i const data_key_lo = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
5053 /* product = (data_key & 0xffffffff) * (data_key_lo & 0xffffffff); */
5054 __m128i const product = _mm_mul_epu32 (data_key, data_key_lo);
5055 /* xacc[i] += swap(data_vec); */
5056 __m128i const data_swap = _mm_shuffle_epi32(data_vec, _MM_SHUFFLE(1,0,3,2));
5057 __m128i const sum = _mm_add_epi64(xacc[i], data_swap);
5058 /* xacc[i] += product; */
5059 xacc[i] = _mm_add_epi64(product, sum);
5060 } }
5061}
f535537f 5062XXH_FORCE_INLINE XXH_TARGET_SSE2 XXH3_ACCUMULATE_TEMPLATE(sse2)
648db22b 5063
5064XXH_FORCE_INLINE XXH_TARGET_SSE2 void
5065XXH3_scrambleAcc_sse2(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5066{
5067 XXH_ASSERT((((size_t)acc) & 15) == 0);
5068 { __m128i* const xacc = (__m128i*) acc;
5069 /* Unaligned. This is mainly for pointer arithmetic, and because
5070 * _mm_loadu_si128 requires a const __m128i * pointer for some reason. */
5071 const __m128i* const xsecret = (const __m128i *) secret;
5072 const __m128i prime32 = _mm_set1_epi32((int)XXH_PRIME32_1);
5073
5074 size_t i;
5075 for (i=0; i < XXH_STRIPE_LEN/sizeof(__m128i); i++) {
5076 /* xacc[i] ^= (xacc[i] >> 47) */
5077 __m128i const acc_vec = xacc[i];
5078 __m128i const shifted = _mm_srli_epi64 (acc_vec, 47);
5079 __m128i const data_vec = _mm_xor_si128 (acc_vec, shifted);
5080 /* xacc[i] ^= xsecret[i]; */
5081 __m128i const key_vec = _mm_loadu_si128 (xsecret+i);
5082 __m128i const data_key = _mm_xor_si128 (data_vec, key_vec);
5083
5084 /* xacc[i] *= XXH_PRIME32_1; */
5085 __m128i const data_key_hi = _mm_shuffle_epi32 (data_key, _MM_SHUFFLE(0, 3, 0, 1));
5086 __m128i const prod_lo = _mm_mul_epu32 (data_key, prime32);
5087 __m128i const prod_hi = _mm_mul_epu32 (data_key_hi, prime32);
5088 xacc[i] = _mm_add_epi64(prod_lo, _mm_slli_epi64(prod_hi, 32));
5089 }
5090 }
5091}
5092
5093XXH_FORCE_INLINE XXH_TARGET_SSE2 void XXH3_initCustomSecret_sse2(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
5094{
5095 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
5096 (void)(&XXH_writeLE64);
5097 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / sizeof(__m128i);
5098
5099# if defined(_MSC_VER) && defined(_M_IX86) && _MSC_VER < 1900
5100 /* MSVC 32bit mode does not support _mm_set_epi64x before 2015 */
5101 XXH_ALIGN(16) const xxh_i64 seed64x2[2] = { (xxh_i64)seed64, (xxh_i64)(0U - seed64) };
5102 __m128i const seed = _mm_load_si128((__m128i const*)seed64x2);
5103# else
5104 __m128i const seed = _mm_set_epi64x((xxh_i64)(0U - seed64), (xxh_i64)seed64);
5105# endif
5106 int i;
5107
5108 const void* const src16 = XXH3_kSecret;
5109 __m128i* dst16 = (__m128i*) customSecret;
5110# if defined(__GNUC__) || defined(__clang__)
5111 /*
5112 * On GCC & Clang, marking 'dest' as modified will cause the compiler:
5113 * - do not extract the secret from sse registers in the internal loop
5114 * - use less common registers, and avoid pushing these reg into stack
5115 */
5116 XXH_COMPILER_GUARD(dst16);
5117# endif
5118 XXH_ASSERT(((size_t)src16 & 15) == 0); /* control alignment */
5119 XXH_ASSERT(((size_t)dst16 & 15) == 0);
5120
5121 for (i=0; i < nbRounds; ++i) {
5122 dst16[i] = _mm_add_epi64(_mm_load_si128((const __m128i *)src16+i), seed);
5123 } }
5124}
5125
5126#endif
5127
5128#if (XXH_VECTOR == XXH_NEON)
5129
5130/* forward declarations for the scalar routines */
5131XXH_FORCE_INLINE void
5132XXH3_scalarRound(void* XXH_RESTRICT acc, void const* XXH_RESTRICT input,
5133 void const* XXH_RESTRICT secret, size_t lane);
5134
5135XXH_FORCE_INLINE void
5136XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
5137 void const* XXH_RESTRICT secret, size_t lane);
5138
5139/*!
5140 * @internal
f535537f 5141 * @brief The bulk processing loop for NEON and WASM SIMD128.
648db22b 5142 *
5143 * The NEON code path is actually partially scalar when running on AArch64. This
5144 * is to optimize the pipelining and can have up to 15% speedup depending on the
5145 * CPU, and it also mitigates some GCC codegen issues.
5146 *
5147 * @see XXH3_NEON_LANES for configuring this and details about this optimization.
f535537f 5148 *
5149 * NEON's 32-bit to 64-bit long multiply takes a half vector of 32-bit
5150 * integers instead of the other platforms which mask full 64-bit vectors,
5151 * so the setup is more complicated than just shifting right.
5152 *
5153 * Additionally, there is an optimization for 4 lanes at once noted below.
5154 *
5155 * Since, as stated, the most optimal amount of lanes for Cortexes is 6,
5156 * there needs to be *three* versions of the accumulate operation used
5157 * for the remaining 2 lanes.
5158 *
5159 * WASM's SIMD128 uses SIMDe's arm_neon.h polyfill because the intrinsics overlap
5160 * nearly perfectly.
648db22b 5161 */
f535537f 5162
648db22b 5163XXH_FORCE_INLINE void
5164XXH3_accumulate_512_neon( void* XXH_RESTRICT acc,
5165 const void* XXH_RESTRICT input,
5166 const void* XXH_RESTRICT secret)
5167{
5168 XXH_ASSERT((((size_t)acc) & 15) == 0);
5169 XXH_STATIC_ASSERT(XXH3_NEON_LANES > 0 && XXH3_NEON_LANES <= XXH_ACC_NB && XXH3_NEON_LANES % 2 == 0);
f535537f 5170 { /* GCC for darwin arm64 does not like aliasing here */
5171 xxh_aliasing_uint64x2_t* const xacc = (xxh_aliasing_uint64x2_t*) acc;
648db22b 5172 /* We don't use a uint32x4_t pointer because it causes bus errors on ARMv7. */
f535537f 5173 uint8_t const* xinput = (const uint8_t *) input;
5174 uint8_t const* xsecret = (const uint8_t *) secret;
648db22b 5175
5176 size_t i;
f535537f 5177#ifdef __wasm_simd128__
5178 /*
5179 * On WASM SIMD128, Clang emits direct address loads when XXH3_kSecret
5180 * is constant propagated, which results in it converting it to this
5181 * inside the loop:
5182 *
5183 * a = v128.load(XXH3_kSecret + 0 + $secret_offset, offset = 0)
5184 * b = v128.load(XXH3_kSecret + 16 + $secret_offset, offset = 0)
5185 * ...
5186 *
5187 * This requires a full 32-bit address immediate (and therefore a 6 byte
5188 * instruction) as well as an add for each offset.
5189 *
5190 * Putting an asm guard prevents it from folding (at the cost of losing
5191 * the alignment hint), and uses the free offset in `v128.load` instead
5192 * of adding secret_offset each time which overall reduces code size by
5193 * about a kilobyte and improves performance.
5194 */
5195 XXH_COMPILER_GUARD(xsecret);
5196#endif
5197 /* Scalar lanes use the normal scalarRound routine */
5198 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
5199 XXH3_scalarRound(acc, input, secret, i);
5200 }
5201 i = 0;
5202 /* 4 NEON lanes at a time. */
5203 for (; i+1 < XXH3_NEON_LANES / 2; i+=2) {
648db22b 5204 /* data_vec = xinput[i]; */
f535537f 5205 uint64x2_t data_vec_1 = XXH_vld1q_u64(xinput + (i * 16));
5206 uint64x2_t data_vec_2 = XXH_vld1q_u64(xinput + ((i+1) * 16));
648db22b 5207 /* key_vec = xsecret[i]; */
f535537f 5208 uint64x2_t key_vec_1 = XXH_vld1q_u64(xsecret + (i * 16));
5209 uint64x2_t key_vec_2 = XXH_vld1q_u64(xsecret + ((i+1) * 16));
5210 /* data_swap = swap(data_vec) */
5211 uint64x2_t data_swap_1 = vextq_u64(data_vec_1, data_vec_1, 1);
5212 uint64x2_t data_swap_2 = vextq_u64(data_vec_2, data_vec_2, 1);
648db22b 5213 /* data_key = data_vec ^ key_vec; */
f535537f 5214 uint64x2_t data_key_1 = veorq_u64(data_vec_1, key_vec_1);
5215 uint64x2_t data_key_2 = veorq_u64(data_vec_2, key_vec_2);
648db22b 5216
f535537f 5217 /*
5218 * If we reinterpret the 64x2 vectors as 32x4 vectors, we can use a
5219 * de-interleave operation for 4 lanes in 1 step with `vuzpq_u32` to
5220 * get one vector with the low 32 bits of each lane, and one vector
5221 * with the high 32 bits of each lane.
5222 *
5223 * The intrinsic returns a double vector because the original ARMv7-a
5224 * instruction modified both arguments in place. AArch64 and SIMD128 emit
5225 * two instructions from this intrinsic.
5226 *
5227 * [ dk11L | dk11H | dk12L | dk12H ] -> [ dk11L | dk12L | dk21L | dk22L ]
5228 * [ dk21L | dk21H | dk22L | dk22H ] -> [ dk11H | dk12H | dk21H | dk22H ]
5229 */
5230 uint32x4x2_t unzipped = vuzpq_u32(
5231 vreinterpretq_u32_u64(data_key_1),
5232 vreinterpretq_u32_u64(data_key_2)
5233 );
5234 /* data_key_lo = data_key & 0xFFFFFFFF */
5235 uint32x4_t data_key_lo = unzipped.val[0];
5236 /* data_key_hi = data_key >> 32 */
5237 uint32x4_t data_key_hi = unzipped.val[1];
5238 /*
5239 * Then, we can split the vectors horizontally and multiply which, as for most
5240 * widening intrinsics, have a variant that works on both high half vectors
5241 * for free on AArch64. A similar instruction is available on SIMD128.
5242 *
5243 * sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi
5244 */
5245 uint64x2_t sum_1 = XXH_vmlal_low_u32(data_swap_1, data_key_lo, data_key_hi);
5246 uint64x2_t sum_2 = XXH_vmlal_high_u32(data_swap_2, data_key_lo, data_key_hi);
5247 /*
5248 * Clang reorders
5249 * a += b * c; // umlal swap.2d, dkl.2s, dkh.2s
5250 * c += a; // add acc.2d, acc.2d, swap.2d
5251 * to
5252 * c += a; // add acc.2d, acc.2d, swap.2d
5253 * c += b * c; // umlal acc.2d, dkl.2s, dkh.2s
5254 *
5255 * While it would make sense in theory since the addition is faster,
5256 * for reasons likely related to umlal being limited to certain NEON
5257 * pipelines, this is worse. A compiler guard fixes this.
5258 */
5259 XXH_COMPILER_GUARD_CLANG_NEON(sum_1);
5260 XXH_COMPILER_GUARD_CLANG_NEON(sum_2);
5261 /* xacc[i] = acc_vec + sum; */
5262 xacc[i] = vaddq_u64(xacc[i], sum_1);
5263 xacc[i+1] = vaddq_u64(xacc[i+1], sum_2);
648db22b 5264 }
f535537f 5265 /* Operate on the remaining NEON lanes 2 at a time. */
5266 for (; i < XXH3_NEON_LANES / 2; i++) {
5267 /* data_vec = xinput[i]; */
5268 uint64x2_t data_vec = XXH_vld1q_u64(xinput + (i * 16));
5269 /* key_vec = xsecret[i]; */
5270 uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
5271 /* acc_vec_2 = swap(data_vec) */
5272 uint64x2_t data_swap = vextq_u64(data_vec, data_vec, 1);
5273 /* data_key = data_vec ^ key_vec; */
5274 uint64x2_t data_key = veorq_u64(data_vec, key_vec);
5275 /* For two lanes, just use VMOVN and VSHRN. */
5276 /* data_key_lo = data_key & 0xFFFFFFFF; */
5277 uint32x2_t data_key_lo = vmovn_u64(data_key);
5278 /* data_key_hi = data_key >> 32; */
5279 uint32x2_t data_key_hi = vshrn_n_u64(data_key, 32);
5280 /* sum = data_swap + (u64x2) data_key_lo * (u64x2) data_key_hi; */
5281 uint64x2_t sum = vmlal_u32(data_swap, data_key_lo, data_key_hi);
5282 /* Same Clang workaround as before */
5283 XXH_COMPILER_GUARD_CLANG_NEON(sum);
5284 /* xacc[i] = acc_vec + sum; */
5285 xacc[i] = vaddq_u64 (xacc[i], sum);
648db22b 5286 }
5287 }
5288}
f535537f 5289XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(neon)
648db22b 5290
5291XXH_FORCE_INLINE void
5292XXH3_scrambleAcc_neon(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5293{
5294 XXH_ASSERT((((size_t)acc) & 15) == 0);
5295
f535537f 5296 { xxh_aliasing_uint64x2_t* xacc = (xxh_aliasing_uint64x2_t*) acc;
648db22b 5297 uint8_t const* xsecret = (uint8_t const*) secret;
648db22b 5298
5299 size_t i;
f535537f 5300 /* WASM uses operator overloads and doesn't need these. */
5301#ifndef __wasm_simd128__
5302 /* { prime32_1, prime32_1 } */
5303 uint32x2_t const kPrimeLo = vdup_n_u32(XXH_PRIME32_1);
5304 /* { 0, prime32_1, 0, prime32_1 } */
5305 uint32x4_t const kPrimeHi = vreinterpretq_u32_u64(vdupq_n_u64((xxh_u64)XXH_PRIME32_1 << 32));
5306#endif
5307
5308 /* AArch64 uses both scalar and neon at the same time */
5309 for (i = XXH3_NEON_LANES; i < XXH_ACC_NB; i++) {
5310 XXH3_scalarScrambleRound(acc, secret, i);
5311 }
648db22b 5312 for (i=0; i < XXH3_NEON_LANES / 2; i++) {
5313 /* xacc[i] ^= (xacc[i] >> 47); */
5314 uint64x2_t acc_vec = xacc[i];
f535537f 5315 uint64x2_t shifted = vshrq_n_u64(acc_vec, 47);
5316 uint64x2_t data_vec = veorq_u64(acc_vec, shifted);
648db22b 5317
5318 /* xacc[i] ^= xsecret[i]; */
f535537f 5319 uint64x2_t key_vec = XXH_vld1q_u64(xsecret + (i * 16));
5320 uint64x2_t data_key = veorq_u64(data_vec, key_vec);
648db22b 5321 /* xacc[i] *= XXH_PRIME32_1 */
f535537f 5322#ifdef __wasm_simd128__
5323 /* SIMD128 has multiply by u64x2, use it instead of expanding and scalarizing */
5324 xacc[i] = data_key * XXH_PRIME32_1;
5325#else
5326 /*
5327 * Expanded version with portable NEON intrinsics
5328 *
5329 * lo(x) * lo(y) + (hi(x) * lo(y) << 32)
5330 *
5331 * prod_hi = hi(data_key) * lo(prime) << 32
5332 *
5333 * Since we only need 32 bits of this multiply a trick can be used, reinterpreting the vector
5334 * as a uint32x4_t and multiplying by { 0, prime, 0, prime } to cancel out the unwanted bits
5335 * and avoid the shift.
5336 */
5337 uint32x4_t prod_hi = vmulq_u32 (vreinterpretq_u32_u64(data_key), kPrimeHi);
5338 /* Extract low bits for vmlal_u32 */
5339 uint32x2_t data_key_lo = vmovn_u64(data_key);
5340 /* xacc[i] = prod_hi + lo(data_key) * XXH_PRIME32_1; */
5341 xacc[i] = vmlal_u32(vreinterpretq_u64_u32(prod_hi), data_key_lo, kPrimeLo);
5342#endif
648db22b 5343 }
5344 }
5345}
648db22b 5346#endif
5347
5348#if (XXH_VECTOR == XXH_VSX)
5349
5350XXH_FORCE_INLINE void
5351XXH3_accumulate_512_vsx( void* XXH_RESTRICT acc,
5352 const void* XXH_RESTRICT input,
5353 const void* XXH_RESTRICT secret)
5354{
5355 /* presumed aligned */
f535537f 5356 xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
5357 xxh_u8 const* const xinput = (xxh_u8 const*) input; /* no alignment restriction */
5358 xxh_u8 const* const xsecret = (xxh_u8 const*) secret; /* no alignment restriction */
648db22b 5359 xxh_u64x2 const v32 = { 32, 32 };
5360 size_t i;
5361 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
5362 /* data_vec = xinput[i]; */
f535537f 5363 xxh_u64x2 const data_vec = XXH_vec_loadu(xinput + 16*i);
648db22b 5364 /* key_vec = xsecret[i]; */
f535537f 5365 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
648db22b 5366 xxh_u64x2 const data_key = data_vec ^ key_vec;
5367 /* shuffled = (data_key << 32) | (data_key >> 32); */
5368 xxh_u32x4 const shuffled = (xxh_u32x4)vec_rl(data_key, v32);
5369 /* product = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)shuffled & 0xFFFFFFFF); */
5370 xxh_u64x2 const product = XXH_vec_mulo((xxh_u32x4)data_key, shuffled);
5371 /* acc_vec = xacc[i]; */
f535537f 5372 xxh_u64x2 acc_vec = xacc[i];
648db22b 5373 acc_vec += product;
5374
5375 /* swap high and low halves */
5376#ifdef __s390x__
5377 acc_vec += vec_permi(data_vec, data_vec, 2);
5378#else
5379 acc_vec += vec_xxpermdi(data_vec, data_vec, 2);
5380#endif
f535537f 5381 xacc[i] = acc_vec;
648db22b 5382 }
5383}
f535537f 5384XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(vsx)
648db22b 5385
5386XXH_FORCE_INLINE void
5387XXH3_scrambleAcc_vsx(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5388{
5389 XXH_ASSERT((((size_t)acc) & 15) == 0);
5390
f535537f 5391 { xxh_aliasing_u64x2* const xacc = (xxh_aliasing_u64x2*) acc;
5392 const xxh_u8* const xsecret = (const xxh_u8*) secret;
648db22b 5393 /* constants */
5394 xxh_u64x2 const v32 = { 32, 32 };
5395 xxh_u64x2 const v47 = { 47, 47 };
5396 xxh_u32x4 const prime = { XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1, XXH_PRIME32_1 };
5397 size_t i;
5398 for (i = 0; i < XXH_STRIPE_LEN / sizeof(xxh_u64x2); i++) {
5399 /* xacc[i] ^= (xacc[i] >> 47); */
5400 xxh_u64x2 const acc_vec = xacc[i];
5401 xxh_u64x2 const data_vec = acc_vec ^ (acc_vec >> v47);
5402
5403 /* xacc[i] ^= xsecret[i]; */
f535537f 5404 xxh_u64x2 const key_vec = XXH_vec_loadu(xsecret + 16*i);
648db22b 5405 xxh_u64x2 const data_key = data_vec ^ key_vec;
5406
5407 /* xacc[i] *= XXH_PRIME32_1 */
5408 /* prod_lo = ((xxh_u64x2)data_key & 0xFFFFFFFF) * ((xxh_u64x2)prime & 0xFFFFFFFF); */
5409 xxh_u64x2 const prod_even = XXH_vec_mule((xxh_u32x4)data_key, prime);
5410 /* prod_hi = ((xxh_u64x2)data_key >> 32) * ((xxh_u64x2)prime >> 32); */
5411 xxh_u64x2 const prod_odd = XXH_vec_mulo((xxh_u32x4)data_key, prime);
5412 xacc[i] = prod_odd + (prod_even << v32);
5413 } }
5414}
5415
5416#endif
5417
f535537f 5418#if (XXH_VECTOR == XXH_SVE)
5419
5420XXH_FORCE_INLINE void
5421XXH3_accumulate_512_sve( void* XXH_RESTRICT acc,
5422 const void* XXH_RESTRICT input,
5423 const void* XXH_RESTRICT secret)
5424{
5425 uint64_t *xacc = (uint64_t *)acc;
5426 const uint64_t *xinput = (const uint64_t *)(const void *)input;
5427 const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
5428 svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
5429 uint64_t element_count = svcntd();
5430 if (element_count >= 8) {
5431 svbool_t mask = svptrue_pat_b64(SV_VL8);
5432 svuint64_t vacc = svld1_u64(mask, xacc);
5433 ACCRND(vacc, 0);
5434 svst1_u64(mask, xacc, vacc);
5435 } else if (element_count == 2) { /* sve128 */
5436 svbool_t mask = svptrue_pat_b64(SV_VL2);
5437 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5438 svuint64_t acc1 = svld1_u64(mask, xacc + 2);
5439 svuint64_t acc2 = svld1_u64(mask, xacc + 4);
5440 svuint64_t acc3 = svld1_u64(mask, xacc + 6);
5441 ACCRND(acc0, 0);
5442 ACCRND(acc1, 2);
5443 ACCRND(acc2, 4);
5444 ACCRND(acc3, 6);
5445 svst1_u64(mask, xacc + 0, acc0);
5446 svst1_u64(mask, xacc + 2, acc1);
5447 svst1_u64(mask, xacc + 4, acc2);
5448 svst1_u64(mask, xacc + 6, acc3);
5449 } else {
5450 svbool_t mask = svptrue_pat_b64(SV_VL4);
5451 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5452 svuint64_t acc1 = svld1_u64(mask, xacc + 4);
5453 ACCRND(acc0, 0);
5454 ACCRND(acc1, 4);
5455 svst1_u64(mask, xacc + 0, acc0);
5456 svst1_u64(mask, xacc + 4, acc1);
5457 }
5458}
5459
5460XXH_FORCE_INLINE void
5461XXH3_accumulate_sve(xxh_u64* XXH_RESTRICT acc,
5462 const xxh_u8* XXH_RESTRICT input,
5463 const xxh_u8* XXH_RESTRICT secret,
5464 size_t nbStripes)
5465{
5466 if (nbStripes != 0) {
5467 uint64_t *xacc = (uint64_t *)acc;
5468 const uint64_t *xinput = (const uint64_t *)(const void *)input;
5469 const uint64_t *xsecret = (const uint64_t *)(const void *)secret;
5470 svuint64_t kSwap = sveor_n_u64_z(svptrue_b64(), svindex_u64(0, 1), 1);
5471 uint64_t element_count = svcntd();
5472 if (element_count >= 8) {
5473 svbool_t mask = svptrue_pat_b64(SV_VL8);
5474 svuint64_t vacc = svld1_u64(mask, xacc + 0);
5475 do {
5476 /* svprfd(svbool_t, void *, enum svfprop); */
5477 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5478 ACCRND(vacc, 0);
5479 xinput += 8;
5480 xsecret += 1;
5481 nbStripes--;
5482 } while (nbStripes != 0);
5483
5484 svst1_u64(mask, xacc + 0, vacc);
5485 } else if (element_count == 2) { /* sve128 */
5486 svbool_t mask = svptrue_pat_b64(SV_VL2);
5487 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5488 svuint64_t acc1 = svld1_u64(mask, xacc + 2);
5489 svuint64_t acc2 = svld1_u64(mask, xacc + 4);
5490 svuint64_t acc3 = svld1_u64(mask, xacc + 6);
5491 do {
5492 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5493 ACCRND(acc0, 0);
5494 ACCRND(acc1, 2);
5495 ACCRND(acc2, 4);
5496 ACCRND(acc3, 6);
5497 xinput += 8;
5498 xsecret += 1;
5499 nbStripes--;
5500 } while (nbStripes != 0);
5501
5502 svst1_u64(mask, xacc + 0, acc0);
5503 svst1_u64(mask, xacc + 2, acc1);
5504 svst1_u64(mask, xacc + 4, acc2);
5505 svst1_u64(mask, xacc + 6, acc3);
5506 } else {
5507 svbool_t mask = svptrue_pat_b64(SV_VL4);
5508 svuint64_t acc0 = svld1_u64(mask, xacc + 0);
5509 svuint64_t acc1 = svld1_u64(mask, xacc + 4);
5510 do {
5511 svprfd(mask, xinput + 128, SV_PLDL1STRM);
5512 ACCRND(acc0, 0);
5513 ACCRND(acc1, 4);
5514 xinput += 8;
5515 xsecret += 1;
5516 nbStripes--;
5517 } while (nbStripes != 0);
5518
5519 svst1_u64(mask, xacc + 0, acc0);
5520 svst1_u64(mask, xacc + 4, acc1);
5521 }
5522 }
5523}
5524
5525#endif
5526
648db22b 5527/* scalar variants - universal */
5528
f535537f 5529#if defined(__aarch64__) && (defined(__GNUC__) || defined(__clang__))
5530/*
5531 * In XXH3_scalarRound(), GCC and Clang have a similar codegen issue, where they
5532 * emit an excess mask and a full 64-bit multiply-add (MADD X-form).
5533 *
5534 * While this might not seem like much, as AArch64 is a 64-bit architecture, only
5535 * big Cortex designs have a full 64-bit multiplier.
5536 *
5537 * On the little cores, the smaller 32-bit multiplier is used, and full 64-bit
5538 * multiplies expand to 2-3 multiplies in microcode. This has a major penalty
5539 * of up to 4 latency cycles and 2 stall cycles in the multiply pipeline.
5540 *
5541 * Thankfully, AArch64 still provides the 32-bit long multiply-add (UMADDL) which does
5542 * not have this penalty and does the mask automatically.
5543 */
5544XXH_FORCE_INLINE xxh_u64
5545XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
5546{
5547 xxh_u64 ret;
5548 /* note: %x = 64-bit register, %w = 32-bit register */
5549 __asm__("umaddl %x0, %w1, %w2, %x3" : "=r" (ret) : "r" (lhs), "r" (rhs), "r" (acc));
5550 return ret;
5551}
5552#else
5553XXH_FORCE_INLINE xxh_u64
5554XXH_mult32to64_add64(xxh_u64 lhs, xxh_u64 rhs, xxh_u64 acc)
5555{
5556 return XXH_mult32to64((xxh_u32)lhs, (xxh_u32)rhs) + acc;
5557}
5558#endif
5559
648db22b 5560/*!
5561 * @internal
5562 * @brief Scalar round for @ref XXH3_accumulate_512_scalar().
5563 *
5564 * This is extracted to its own function because the NEON path uses a combination
5565 * of NEON and scalar.
5566 */
5567XXH_FORCE_INLINE void
5568XXH3_scalarRound(void* XXH_RESTRICT acc,
5569 void const* XXH_RESTRICT input,
5570 void const* XXH_RESTRICT secret,
5571 size_t lane)
5572{
5573 xxh_u64* xacc = (xxh_u64*) acc;
5574 xxh_u8 const* xinput = (xxh_u8 const*) input;
5575 xxh_u8 const* xsecret = (xxh_u8 const*) secret;
5576 XXH_ASSERT(lane < XXH_ACC_NB);
5577 XXH_ASSERT(((size_t)acc & (XXH_ACC_ALIGN-1)) == 0);
5578 {
5579 xxh_u64 const data_val = XXH_readLE64(xinput + lane * 8);
5580 xxh_u64 const data_key = data_val ^ XXH_readLE64(xsecret + lane * 8);
5581 xacc[lane ^ 1] += data_val; /* swap adjacent lanes */
f535537f 5582 xacc[lane] = XXH_mult32to64_add64(data_key /* & 0xFFFFFFFF */, data_key >> 32, xacc[lane]);
648db22b 5583 }
5584}
5585
5586/*!
5587 * @internal
5588 * @brief Processes a 64 byte block of data using the scalar path.
5589 */
5590XXH_FORCE_INLINE void
5591XXH3_accumulate_512_scalar(void* XXH_RESTRICT acc,
5592 const void* XXH_RESTRICT input,
5593 const void* XXH_RESTRICT secret)
5594{
5595 size_t i;
f535537f 5596 /* ARM GCC refuses to unroll this loop, resulting in a 24% slowdown on ARMv6. */
5597#if defined(__GNUC__) && !defined(__clang__) \
5598 && (defined(__arm__) || defined(__thumb2__)) \
5599 && defined(__ARM_FEATURE_UNALIGNED) /* no unaligned access just wastes bytes */ \
5600 && XXH_SIZE_OPT <= 0
5601# pragma GCC unroll 8
5602#endif
648db22b 5603 for (i=0; i < XXH_ACC_NB; i++) {
5604 XXH3_scalarRound(acc, input, secret, i);
5605 }
5606}
f535537f 5607XXH_FORCE_INLINE XXH3_ACCUMULATE_TEMPLATE(scalar)
648db22b 5608
5609/*!
5610 * @internal
5611 * @brief Scalar scramble step for @ref XXH3_scrambleAcc_scalar().
5612 *
5613 * This is extracted to its own function because the NEON path uses a combination
5614 * of NEON and scalar.
5615 */
5616XXH_FORCE_INLINE void
5617XXH3_scalarScrambleRound(void* XXH_RESTRICT acc,
5618 void const* XXH_RESTRICT secret,
5619 size_t lane)
5620{
5621 xxh_u64* const xacc = (xxh_u64*) acc; /* presumed aligned */
5622 const xxh_u8* const xsecret = (const xxh_u8*) secret; /* no alignment restriction */
5623 XXH_ASSERT((((size_t)acc) & (XXH_ACC_ALIGN-1)) == 0);
5624 XXH_ASSERT(lane < XXH_ACC_NB);
5625 {
5626 xxh_u64 const key64 = XXH_readLE64(xsecret + lane * 8);
5627 xxh_u64 acc64 = xacc[lane];
5628 acc64 = XXH_xorshift64(acc64, 47);
5629 acc64 ^= key64;
5630 acc64 *= XXH_PRIME32_1;
5631 xacc[lane] = acc64;
5632 }
5633}
5634
5635/*!
5636 * @internal
5637 * @brief Scrambles the accumulators after a large chunk has been read
5638 */
5639XXH_FORCE_INLINE void
5640XXH3_scrambleAcc_scalar(void* XXH_RESTRICT acc, const void* XXH_RESTRICT secret)
5641{
5642 size_t i;
5643 for (i=0; i < XXH_ACC_NB; i++) {
5644 XXH3_scalarScrambleRound(acc, secret, i);
5645 }
5646}
5647
5648XXH_FORCE_INLINE void
5649XXH3_initCustomSecret_scalar(void* XXH_RESTRICT customSecret, xxh_u64 seed64)
5650{
5651 /*
5652 * We need a separate pointer for the hack below,
5653 * which requires a non-const pointer.
5654 * Any decent compiler will optimize this out otherwise.
5655 */
5656 const xxh_u8* kSecretPtr = XXH3_kSecret;
5657 XXH_STATIC_ASSERT((XXH_SECRET_DEFAULT_SIZE & 15) == 0);
5658
f535537f 5659#if defined(__GNUC__) && defined(__aarch64__)
648db22b 5660 /*
5661 * UGLY HACK:
f535537f 5662 * GCC and Clang generate a bunch of MOV/MOVK pairs for aarch64, and they are
648db22b 5663 * placed sequentially, in order, at the top of the unrolled loop.
5664 *
5665 * While MOVK is great for generating constants (2 cycles for a 64-bit
5666 * constant compared to 4 cycles for LDR), it fights for bandwidth with
5667 * the arithmetic instructions.
5668 *
5669 * I L S
5670 * MOVK
5671 * MOVK
5672 * MOVK
5673 * MOVK
5674 * ADD
5675 * SUB STR
5676 * STR
f535537f 5677 * By forcing loads from memory (as the asm line causes the compiler to assume
648db22b 5678 * that XXH3_kSecretPtr has been changed), the pipelines are used more
5679 * efficiently:
5680 * I L S
5681 * LDR
5682 * ADD LDR
5683 * SUB STR
5684 * STR
5685 *
5686 * See XXH3_NEON_LANES for details on the pipsline.
5687 *
5688 * XXH3_64bits_withSeed, len == 256, Snapdragon 835
5689 * without hack: 2654.4 MB/s
5690 * with hack: 3202.9 MB/s
5691 */
5692 XXH_COMPILER_GUARD(kSecretPtr);
5693#endif
648db22b 5694 { int const nbRounds = XXH_SECRET_DEFAULT_SIZE / 16;
5695 int i;
5696 for (i=0; i < nbRounds; i++) {
5697 /*
f535537f 5698 * The asm hack causes the compiler to assume that kSecretPtr aliases with
648db22b 5699 * customSecret, and on aarch64, this prevented LDP from merging two
5700 * loads together for free. Putting the loads together before the stores
5701 * properly generates LDP.
5702 */
5703 xxh_u64 lo = XXH_readLE64(kSecretPtr + 16*i) + seed64;
5704 xxh_u64 hi = XXH_readLE64(kSecretPtr + 16*i + 8) - seed64;
5705 XXH_writeLE64((xxh_u8*)customSecret + 16*i, lo);
5706 XXH_writeLE64((xxh_u8*)customSecret + 16*i + 8, hi);
5707 } }
5708}
5709
5710
f535537f 5711typedef void (*XXH3_f_accumulate)(xxh_u64* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, const xxh_u8* XXH_RESTRICT, size_t);
648db22b 5712typedef void (*XXH3_f_scrambleAcc)(void* XXH_RESTRICT, const void*);
5713typedef void (*XXH3_f_initCustomSecret)(void* XXH_RESTRICT, xxh_u64);
5714
5715
5716#if (XXH_VECTOR == XXH_AVX512)
5717
5718#define XXH3_accumulate_512 XXH3_accumulate_512_avx512
f535537f 5719#define XXH3_accumulate XXH3_accumulate_avx512
648db22b 5720#define XXH3_scrambleAcc XXH3_scrambleAcc_avx512
5721#define XXH3_initCustomSecret XXH3_initCustomSecret_avx512
5722
5723#elif (XXH_VECTOR == XXH_AVX2)
5724
5725#define XXH3_accumulate_512 XXH3_accumulate_512_avx2
f535537f 5726#define XXH3_accumulate XXH3_accumulate_avx2
648db22b 5727#define XXH3_scrambleAcc XXH3_scrambleAcc_avx2
5728#define XXH3_initCustomSecret XXH3_initCustomSecret_avx2
5729
5730#elif (XXH_VECTOR == XXH_SSE2)
5731
5732#define XXH3_accumulate_512 XXH3_accumulate_512_sse2
f535537f 5733#define XXH3_accumulate XXH3_accumulate_sse2
648db22b 5734#define XXH3_scrambleAcc XXH3_scrambleAcc_sse2
5735#define XXH3_initCustomSecret XXH3_initCustomSecret_sse2
5736
5737#elif (XXH_VECTOR == XXH_NEON)
5738
5739#define XXH3_accumulate_512 XXH3_accumulate_512_neon
f535537f 5740#define XXH3_accumulate XXH3_accumulate_neon
648db22b 5741#define XXH3_scrambleAcc XXH3_scrambleAcc_neon
5742#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5743
5744#elif (XXH_VECTOR == XXH_VSX)
5745
5746#define XXH3_accumulate_512 XXH3_accumulate_512_vsx
f535537f 5747#define XXH3_accumulate XXH3_accumulate_vsx
648db22b 5748#define XXH3_scrambleAcc XXH3_scrambleAcc_vsx
5749#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5750
f535537f 5751#elif (XXH_VECTOR == XXH_SVE)
5752#define XXH3_accumulate_512 XXH3_accumulate_512_sve
5753#define XXH3_accumulate XXH3_accumulate_sve
5754#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
5755#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5756
648db22b 5757#else /* scalar */
5758
5759#define XXH3_accumulate_512 XXH3_accumulate_512_scalar
f535537f 5760#define XXH3_accumulate XXH3_accumulate_scalar
648db22b 5761#define XXH3_scrambleAcc XXH3_scrambleAcc_scalar
5762#define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5763
5764#endif
5765
f535537f 5766#if XXH_SIZE_OPT >= 1 /* don't do SIMD for initialization */
5767# undef XXH3_initCustomSecret
5768# define XXH3_initCustomSecret XXH3_initCustomSecret_scalar
5769#endif
648db22b 5770
5771XXH_FORCE_INLINE void
5772XXH3_hashLong_internal_loop(xxh_u64* XXH_RESTRICT acc,
5773 const xxh_u8* XXH_RESTRICT input, size_t len,
5774 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
f535537f 5775 XXH3_f_accumulate f_acc,
648db22b 5776 XXH3_f_scrambleAcc f_scramble)
5777{
5778 size_t const nbStripesPerBlock = (secretSize - XXH_STRIPE_LEN) / XXH_SECRET_CONSUME_RATE;
5779 size_t const block_len = XXH_STRIPE_LEN * nbStripesPerBlock;
5780 size_t const nb_blocks = (len - 1) / block_len;
5781
5782 size_t n;
5783
5784 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
5785
5786 for (n = 0; n < nb_blocks; n++) {
f535537f 5787 f_acc(acc, input + n*block_len, secret, nbStripesPerBlock);
648db22b 5788 f_scramble(acc, secret + secretSize - XXH_STRIPE_LEN);
5789 }
5790
5791 /* last partial block */
5792 XXH_ASSERT(len > XXH_STRIPE_LEN);
5793 { size_t const nbStripes = ((len - 1) - (block_len * nb_blocks)) / XXH_STRIPE_LEN;
5794 XXH_ASSERT(nbStripes <= (secretSize / XXH_SECRET_CONSUME_RATE));
f535537f 5795 f_acc(acc, input + nb_blocks*block_len, secret, nbStripes);
648db22b 5796
5797 /* last stripe */
5798 { const xxh_u8* const p = input + len - XXH_STRIPE_LEN;
5799#define XXH_SECRET_LASTACC_START 7 /* not aligned on 8, last secret is different from acc & scrambler */
f535537f 5800 XXH3_accumulate_512(acc, p, secret + secretSize - XXH_STRIPE_LEN - XXH_SECRET_LASTACC_START);
648db22b 5801 } }
5802}
5803
5804XXH_FORCE_INLINE xxh_u64
5805XXH3_mix2Accs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret)
5806{
5807 return XXH3_mul128_fold64(
5808 acc[0] ^ XXH_readLE64(secret),
5809 acc[1] ^ XXH_readLE64(secret+8) );
5810}
5811
5812static XXH64_hash_t
5813XXH3_mergeAccs(const xxh_u64* XXH_RESTRICT acc, const xxh_u8* XXH_RESTRICT secret, xxh_u64 start)
5814{
5815 xxh_u64 result64 = start;
5816 size_t i = 0;
5817
5818 for (i = 0; i < 4; i++) {
5819 result64 += XXH3_mix2Accs(acc+2*i, secret + 16*i);
5820#if defined(__clang__) /* Clang */ \
5821 && (defined(__arm__) || defined(__thumb__)) /* ARMv7 */ \
5822 && (defined(__ARM_NEON) || defined(__ARM_NEON__)) /* NEON */ \
5823 && !defined(XXH_ENABLE_AUTOVECTORIZE) /* Define to disable */
5824 /*
5825 * UGLY HACK:
5826 * Prevent autovectorization on Clang ARMv7-a. Exact same problem as
5827 * the one in XXH3_len_129to240_64b. Speeds up shorter keys > 240b.
5828 * XXH3_64bits, len == 256, Snapdragon 835:
5829 * without hack: 2063.7 MB/s
5830 * with hack: 2560.7 MB/s
5831 */
5832 XXH_COMPILER_GUARD(result64);
5833#endif
5834 }
5835
5836 return XXH3_avalanche(result64);
5837}
5838
5839#define XXH3_INIT_ACC { XXH_PRIME32_3, XXH_PRIME64_1, XXH_PRIME64_2, XXH_PRIME64_3, \
5840 XXH_PRIME64_4, XXH_PRIME32_2, XXH_PRIME64_5, XXH_PRIME32_1 }
5841
5842XXH_FORCE_INLINE XXH64_hash_t
5843XXH3_hashLong_64b_internal(const void* XXH_RESTRICT input, size_t len,
5844 const void* XXH_RESTRICT secret, size_t secretSize,
f535537f 5845 XXH3_f_accumulate f_acc,
648db22b 5846 XXH3_f_scrambleAcc f_scramble)
5847{
5848 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
5849
f535537f 5850 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, (const xxh_u8*)secret, secretSize, f_acc, f_scramble);
648db22b 5851
5852 /* converge into final hash */
5853 XXH_STATIC_ASSERT(sizeof(acc) == 64);
5854 /* do not align on 8, so that the secret is different from the accumulator */
5855#define XXH_SECRET_MERGEACCS_START 11
5856 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
5857 return XXH3_mergeAccs(acc, (const xxh_u8*)secret + XXH_SECRET_MERGEACCS_START, (xxh_u64)len * XXH_PRIME64_1);
5858}
5859
5860/*
5861 * It's important for performance to transmit secret's size (when it's static)
5862 * so that the compiler can properly optimize the vectorized loop.
5863 * This makes a big performance difference for "medium" keys (<1 KB) when using AVX instruction set.
f535537f 5864 * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
5865 * breaks -Og, this is XXH_NO_INLINE.
648db22b 5866 */
f535537f 5867XXH3_WITH_SECRET_INLINE XXH64_hash_t
648db22b 5868XXH3_hashLong_64b_withSecret(const void* XXH_RESTRICT input, size_t len,
5869 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5870{
5871 (void)seed64;
f535537f 5872 return XXH3_hashLong_64b_internal(input, len, secret, secretLen, XXH3_accumulate, XXH3_scrambleAcc);
648db22b 5873}
5874
5875/*
5876 * It's preferable for performance that XXH3_hashLong is not inlined,
5877 * as it results in a smaller function for small data, easier to the instruction cache.
5878 * Note that inside this no_inline function, we do inline the internal loop,
5879 * and provide a statically defined secret size to allow optimization of vector loop.
5880 */
f535537f 5881XXH_NO_INLINE XXH_PUREF XXH64_hash_t
648db22b 5882XXH3_hashLong_64b_default(const void* XXH_RESTRICT input, size_t len,
5883 XXH64_hash_t seed64, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
5884{
5885 (void)seed64; (void)secret; (void)secretLen;
f535537f 5886 return XXH3_hashLong_64b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_accumulate, XXH3_scrambleAcc);
648db22b 5887}
5888
5889/*
5890 * XXH3_hashLong_64b_withSeed():
5891 * Generate a custom key based on alteration of default XXH3_kSecret with the seed,
5892 * and then use this key for long mode hashing.
5893 *
5894 * This operation is decently fast but nonetheless costs a little bit of time.
5895 * Try to avoid it whenever possible (typically when seed==0).
5896 *
5897 * It's important for performance that XXH3_hashLong is not inlined. Not sure
5898 * why (uop cache maybe?), but the difference is large and easily measurable.
5899 */
5900XXH_FORCE_INLINE XXH64_hash_t
5901XXH3_hashLong_64b_withSeed_internal(const void* input, size_t len,
5902 XXH64_hash_t seed,
f535537f 5903 XXH3_f_accumulate f_acc,
648db22b 5904 XXH3_f_scrambleAcc f_scramble,
5905 XXH3_f_initCustomSecret f_initSec)
5906{
f535537f 5907#if XXH_SIZE_OPT <= 0
648db22b 5908 if (seed == 0)
5909 return XXH3_hashLong_64b_internal(input, len,
5910 XXH3_kSecret, sizeof(XXH3_kSecret),
f535537f 5911 f_acc, f_scramble);
5912#endif
648db22b 5913 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
5914 f_initSec(secret, seed);
5915 return XXH3_hashLong_64b_internal(input, len, secret, sizeof(secret),
f535537f 5916 f_acc, f_scramble);
648db22b 5917 }
5918}
5919
5920/*
5921 * It's important for performance that XXH3_hashLong is not inlined.
5922 */
5923XXH_NO_INLINE XXH64_hash_t
f535537f 5924XXH3_hashLong_64b_withSeed(const void* XXH_RESTRICT input, size_t len,
5925 XXH64_hash_t seed, const xxh_u8* XXH_RESTRICT secret, size_t secretLen)
648db22b 5926{
5927 (void)secret; (void)secretLen;
5928 return XXH3_hashLong_64b_withSeed_internal(input, len, seed,
f535537f 5929 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
648db22b 5930}
5931
5932
5933typedef XXH64_hash_t (*XXH3_hashLong64_f)(const void* XXH_RESTRICT, size_t,
5934 XXH64_hash_t, const xxh_u8* XXH_RESTRICT, size_t);
5935
5936XXH_FORCE_INLINE XXH64_hash_t
5937XXH3_64bits_internal(const void* XXH_RESTRICT input, size_t len,
5938 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
5939 XXH3_hashLong64_f f_hashLong)
5940{
5941 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
5942 /*
5943 * If an action is to be taken if `secretLen` condition is not respected,
5944 * it should be done here.
5945 * For now, it's a contract pre-condition.
5946 * Adding a check and a branch here would cost performance at every hash.
5947 * Also, note that function signature doesn't offer room to return an error.
5948 */
5949 if (len <= 16)
5950 return XXH3_len_0to16_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
5951 if (len <= 128)
5952 return XXH3_len_17to128_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5953 if (len <= XXH3_MIDSIZE_MAX)
5954 return XXH3_len_129to240_64b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
5955 return f_hashLong(input, len, seed64, (const xxh_u8*)secret, secretLen);
5956}
5957
5958
5959/* === Public entry point === */
5960
f535537f 5961/*! @ingroup XXH3_family */
5962XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(XXH_NOESCAPE const void* input, size_t length)
648db22b 5963{
f535537f 5964 return XXH3_64bits_internal(input, length, 0, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_default);
648db22b 5965}
5966
f535537f 5967/*! @ingroup XXH3_family */
648db22b 5968XXH_PUBLIC_API XXH64_hash_t
f535537f 5969XXH3_64bits_withSecret(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize)
648db22b 5970{
f535537f 5971 return XXH3_64bits_internal(input, length, 0, secret, secretSize, XXH3_hashLong_64b_withSecret);
648db22b 5972}
5973
f535537f 5974/*! @ingroup XXH3_family */
648db22b 5975XXH_PUBLIC_API XXH64_hash_t
f535537f 5976XXH3_64bits_withSeed(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed)
648db22b 5977{
f535537f 5978 return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), XXH3_hashLong_64b_withSeed);
648db22b 5979}
5980
5981XXH_PUBLIC_API XXH64_hash_t
f535537f 5982XXH3_64bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t length, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
648db22b 5983{
f535537f 5984 if (length <= XXH3_MIDSIZE_MAX)
5985 return XXH3_64bits_internal(input, length, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
5986 return XXH3_hashLong_64b_withSecret(input, length, seed, (const xxh_u8*)secret, secretSize);
648db22b 5987}
5988
5989
5990/* === XXH3 streaming === */
f535537f 5991#ifndef XXH_NO_STREAM
648db22b 5992/*
5993 * Malloc's a pointer that is always aligned to align.
5994 *
5995 * This must be freed with `XXH_alignedFree()`.
5996 *
5997 * malloc typically guarantees 16 byte alignment on 64-bit systems and 8 byte
5998 * alignment on 32-bit. This isn't enough for the 32 byte aligned loads in AVX2
5999 * or on 32-bit, the 16 byte aligned loads in SSE2 and NEON.
6000 *
6001 * This underalignment previously caused a rather obvious crash which went
6002 * completely unnoticed due to XXH3_createState() not actually being tested.
6003 * Credit to RedSpah for noticing this bug.
6004 *
6005 * The alignment is done manually: Functions like posix_memalign or _mm_malloc
6006 * are avoided: To maintain portability, we would have to write a fallback
6007 * like this anyways, and besides, testing for the existence of library
6008 * functions without relying on external build tools is impossible.
6009 *
6010 * The method is simple: Overallocate, manually align, and store the offset
6011 * to the original behind the returned pointer.
6012 *
6013 * Align must be a power of 2 and 8 <= align <= 128.
6014 */
f535537f 6015static XXH_MALLOCF void* XXH_alignedMalloc(size_t s, size_t align)
648db22b 6016{
6017 XXH_ASSERT(align <= 128 && align >= 8); /* range check */
6018 XXH_ASSERT((align & (align-1)) == 0); /* power of 2 */
6019 XXH_ASSERT(s != 0 && s < (s + align)); /* empty/overflow */
6020 { /* Overallocate to make room for manual realignment and an offset byte */
6021 xxh_u8* base = (xxh_u8*)XXH_malloc(s + align);
6022 if (base != NULL) {
6023 /*
6024 * Get the offset needed to align this pointer.
6025 *
6026 * Even if the returned pointer is aligned, there will always be
6027 * at least one byte to store the offset to the original pointer.
6028 */
6029 size_t offset = align - ((size_t)base & (align - 1)); /* base % align */
6030 /* Add the offset for the now-aligned pointer */
6031 xxh_u8* ptr = base + offset;
6032
6033 XXH_ASSERT((size_t)ptr % align == 0);
6034
6035 /* Store the offset immediately before the returned pointer. */
6036 ptr[-1] = (xxh_u8)offset;
6037 return ptr;
6038 }
6039 return NULL;
6040 }
6041}
6042/*
6043 * Frees an aligned pointer allocated by XXH_alignedMalloc(). Don't pass
6044 * normal malloc'd pointers, XXH_alignedMalloc has a specific data layout.
6045 */
6046static void XXH_alignedFree(void* p)
6047{
6048 if (p != NULL) {
6049 xxh_u8* ptr = (xxh_u8*)p;
6050 /* Get the offset byte we added in XXH_malloc. */
6051 xxh_u8 offset = ptr[-1];
6052 /* Free the original malloc'd pointer */
6053 xxh_u8* base = ptr - offset;
6054 XXH_free(base);
6055 }
6056}
f535537f 6057/*! @ingroup XXH3_family */
6058/*!
6059 * @brief Allocate an @ref XXH3_state_t.
6060 *
6061 * @return An allocated pointer of @ref XXH3_state_t on success.
6062 * @return `NULL` on failure.
6063 *
6064 * @note Must be freed with XXH3_freeState().
6065 */
648db22b 6066XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void)
6067{
6068 XXH3_state_t* const state = (XXH3_state_t*)XXH_alignedMalloc(sizeof(XXH3_state_t), 64);
6069 if (state==NULL) return NULL;
6070 XXH3_INITSTATE(state);
6071 return state;
6072}
6073
f535537f 6074/*! @ingroup XXH3_family */
6075/*!
6076 * @brief Frees an @ref XXH3_state_t.
6077 *
6078 * @param statePtr A pointer to an @ref XXH3_state_t allocated with @ref XXH3_createState().
6079 *
6080 * @return @ref XXH_OK.
6081 *
6082 * @note Must be allocated with XXH3_createState().
6083 */
648db22b 6084XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr)
6085{
6086 XXH_alignedFree(statePtr);
6087 return XXH_OK;
6088}
6089
f535537f 6090/*! @ingroup XXH3_family */
648db22b 6091XXH_PUBLIC_API void
f535537f 6092XXH3_copyState(XXH_NOESCAPE XXH3_state_t* dst_state, XXH_NOESCAPE const XXH3_state_t* src_state)
648db22b 6093{
6094 XXH_memcpy(dst_state, src_state, sizeof(*dst_state));
6095}
6096
6097static void
6098XXH3_reset_internal(XXH3_state_t* statePtr,
6099 XXH64_hash_t seed,
6100 const void* secret, size_t secretSize)
6101{
6102 size_t const initStart = offsetof(XXH3_state_t, bufferedSize);
6103 size_t const initLength = offsetof(XXH3_state_t, nbStripesPerBlock) - initStart;
6104 XXH_ASSERT(offsetof(XXH3_state_t, nbStripesPerBlock) > initStart);
6105 XXH_ASSERT(statePtr != NULL);
6106 /* set members from bufferedSize to nbStripesPerBlock (excluded) to 0 */
6107 memset((char*)statePtr + initStart, 0, initLength);
6108 statePtr->acc[0] = XXH_PRIME32_3;
6109 statePtr->acc[1] = XXH_PRIME64_1;
6110 statePtr->acc[2] = XXH_PRIME64_2;
6111 statePtr->acc[3] = XXH_PRIME64_3;
6112 statePtr->acc[4] = XXH_PRIME64_4;
6113 statePtr->acc[5] = XXH_PRIME32_2;
6114 statePtr->acc[6] = XXH_PRIME64_5;
6115 statePtr->acc[7] = XXH_PRIME32_1;
6116 statePtr->seed = seed;
6117 statePtr->useSeed = (seed != 0);
6118 statePtr->extSecret = (const unsigned char*)secret;
6119 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
6120 statePtr->secretLimit = secretSize - XXH_STRIPE_LEN;
6121 statePtr->nbStripesPerBlock = statePtr->secretLimit / XXH_SECRET_CONSUME_RATE;
6122}
6123
f535537f 6124/*! @ingroup XXH3_family */
648db22b 6125XXH_PUBLIC_API XXH_errorcode
f535537f 6126XXH3_64bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
648db22b 6127{
6128 if (statePtr == NULL) return XXH_ERROR;
6129 XXH3_reset_internal(statePtr, 0, XXH3_kSecret, XXH_SECRET_DEFAULT_SIZE);
6130 return XXH_OK;
6131}
6132
f535537f 6133/*! @ingroup XXH3_family */
648db22b 6134XXH_PUBLIC_API XXH_errorcode
f535537f 6135XXH3_64bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
648db22b 6136{
6137 if (statePtr == NULL) return XXH_ERROR;
6138 XXH3_reset_internal(statePtr, 0, secret, secretSize);
6139 if (secret == NULL) return XXH_ERROR;
6140 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6141 return XXH_OK;
6142}
6143
f535537f 6144/*! @ingroup XXH3_family */
648db22b 6145XXH_PUBLIC_API XXH_errorcode
f535537f 6146XXH3_64bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
648db22b 6147{
6148 if (statePtr == NULL) return XXH_ERROR;
6149 if (seed==0) return XXH3_64bits_reset(statePtr);
6150 if ((seed != statePtr->seed) || (statePtr->extSecret != NULL))
6151 XXH3_initCustomSecret(statePtr->customSecret, seed);
6152 XXH3_reset_internal(statePtr, seed, NULL, XXH_SECRET_DEFAULT_SIZE);
6153 return XXH_OK;
6154}
6155
f535537f 6156/*! @ingroup XXH3_family */
648db22b 6157XXH_PUBLIC_API XXH_errorcode
f535537f 6158XXH3_64bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed64)
648db22b 6159{
6160 if (statePtr == NULL) return XXH_ERROR;
6161 if (secret == NULL) return XXH_ERROR;
6162 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6163 XXH3_reset_internal(statePtr, seed64, secret, secretSize);
6164 statePtr->useSeed = 1; /* always, even if seed64==0 */
6165 return XXH_OK;
6166}
6167
f535537f 6168/*!
6169 * @internal
6170 * @brief Processes a large input for XXH3_update() and XXH3_digest_long().
6171 *
6172 * Unlike XXH3_hashLong_internal_loop(), this can process data that overlaps a block.
6173 *
6174 * @param acc Pointer to the 8 accumulator lanes
6175 * @param nbStripesSoFarPtr In/out pointer to the number of leftover stripes in the block*
6176 * @param nbStripesPerBlock Number of stripes in a block
6177 * @param input Input pointer
6178 * @param nbStripes Number of stripes to process
6179 * @param secret Secret pointer
6180 * @param secretLimit Offset of the last block in @p secret
6181 * @param f_acc Pointer to an XXH3_accumulate implementation
6182 * @param f_scramble Pointer to an XXH3_scrambleAcc implementation
6183 * @return Pointer past the end of @p input after processing
6184 */
6185XXH_FORCE_INLINE const xxh_u8 *
648db22b 6186XXH3_consumeStripes(xxh_u64* XXH_RESTRICT acc,
6187 size_t* XXH_RESTRICT nbStripesSoFarPtr, size_t nbStripesPerBlock,
6188 const xxh_u8* XXH_RESTRICT input, size_t nbStripes,
6189 const xxh_u8* XXH_RESTRICT secret, size_t secretLimit,
f535537f 6190 XXH3_f_accumulate f_acc,
648db22b 6191 XXH3_f_scrambleAcc f_scramble)
6192{
f535537f 6193 const xxh_u8* initialSecret = secret + *nbStripesSoFarPtr * XXH_SECRET_CONSUME_RATE;
6194 /* Process full blocks */
6195 if (nbStripes >= (nbStripesPerBlock - *nbStripesSoFarPtr)) {
6196 /* Process the initial partial block... */
6197 size_t nbStripesThisIter = nbStripesPerBlock - *nbStripesSoFarPtr;
6198
6199 do {
6200 /* Accumulate and scramble */
6201 f_acc(acc, input, initialSecret, nbStripesThisIter);
6202 f_scramble(acc, secret + secretLimit);
6203 input += nbStripesThisIter * XXH_STRIPE_LEN;
6204 nbStripes -= nbStripesThisIter;
6205 /* Then continue the loop with the full block size */
6206 nbStripesThisIter = nbStripesPerBlock;
6207 initialSecret = secret;
6208 } while (nbStripes >= nbStripesPerBlock);
6209 *nbStripesSoFarPtr = 0;
6210 }
6211 /* Process a partial block */
6212 if (nbStripes > 0) {
6213 f_acc(acc, input, initialSecret, nbStripes);
6214 input += nbStripes * XXH_STRIPE_LEN;
648db22b 6215 *nbStripesSoFarPtr += nbStripes;
6216 }
f535537f 6217 /* Return end pointer */
6218 return input;
648db22b 6219}
6220
6221#ifndef XXH3_STREAM_USE_STACK
f535537f 6222# if XXH_SIZE_OPT <= 0 && !defined(__clang__) /* clang doesn't need additional stack space */
648db22b 6223# define XXH3_STREAM_USE_STACK 1
6224# endif
6225#endif
6226/*
6227 * Both XXH3_64bits_update and XXH3_128bits_update use this routine.
6228 */
6229XXH_FORCE_INLINE XXH_errorcode
6230XXH3_update(XXH3_state_t* XXH_RESTRICT const state,
6231 const xxh_u8* XXH_RESTRICT input, size_t len,
f535537f 6232 XXH3_f_accumulate f_acc,
648db22b 6233 XXH3_f_scrambleAcc f_scramble)
6234{
6235 if (input==NULL) {
6236 XXH_ASSERT(len == 0);
6237 return XXH_OK;
6238 }
6239
6240 XXH_ASSERT(state != NULL);
6241 { const xxh_u8* const bEnd = input + len;
6242 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
6243#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
6244 /* For some reason, gcc and MSVC seem to suffer greatly
6245 * when operating accumulators directly into state.
6246 * Operating into stack space seems to enable proper optimization.
6247 * clang, on the other hand, doesn't seem to need this trick */
f535537f 6248 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[8];
6249 XXH_memcpy(acc, state->acc, sizeof(acc));
648db22b 6250#else
6251 xxh_u64* XXH_RESTRICT const acc = state->acc;
6252#endif
6253 state->totalLen += len;
6254 XXH_ASSERT(state->bufferedSize <= XXH3_INTERNALBUFFER_SIZE);
6255
6256 /* small input : just fill in tmp buffer */
f535537f 6257 if (len <= XXH3_INTERNALBUFFER_SIZE - state->bufferedSize) {
648db22b 6258 XXH_memcpy(state->buffer + state->bufferedSize, input, len);
6259 state->bufferedSize += (XXH32_hash_t)len;
6260 return XXH_OK;
6261 }
6262
6263 /* total input is now > XXH3_INTERNALBUFFER_SIZE */
6264 #define XXH3_INTERNALBUFFER_STRIPES (XXH3_INTERNALBUFFER_SIZE / XXH_STRIPE_LEN)
6265 XXH_STATIC_ASSERT(XXH3_INTERNALBUFFER_SIZE % XXH_STRIPE_LEN == 0); /* clean multiple */
6266
6267 /*
6268 * Internal buffer is partially filled (always, except at beginning)
6269 * Complete it, then consume it.
6270 */
6271 if (state->bufferedSize) {
6272 size_t const loadSize = XXH3_INTERNALBUFFER_SIZE - state->bufferedSize;
6273 XXH_memcpy(state->buffer + state->bufferedSize, input, loadSize);
6274 input += loadSize;
6275 XXH3_consumeStripes(acc,
6276 &state->nbStripesSoFar, state->nbStripesPerBlock,
6277 state->buffer, XXH3_INTERNALBUFFER_STRIPES,
6278 secret, state->secretLimit,
f535537f 6279 f_acc, f_scramble);
648db22b 6280 state->bufferedSize = 0;
6281 }
6282 XXH_ASSERT(input < bEnd);
f535537f 6283 if (bEnd - input > XXH3_INTERNALBUFFER_SIZE) {
648db22b 6284 size_t nbStripes = (size_t)(bEnd - 1 - input) / XXH_STRIPE_LEN;
f535537f 6285 input = XXH3_consumeStripes(acc,
648db22b 6286 &state->nbStripesSoFar, state->nbStripesPerBlock,
f535537f 6287 input, nbStripes,
6288 secret, state->secretLimit,
6289 f_acc, f_scramble);
6290 XXH_memcpy(state->buffer + sizeof(state->buffer) - XXH_STRIPE_LEN, input - XXH_STRIPE_LEN, XXH_STRIPE_LEN);
648db22b 6291
f535537f 6292 }
648db22b 6293 /* Some remaining input (always) : buffer it */
6294 XXH_ASSERT(input < bEnd);
6295 XXH_ASSERT(bEnd - input <= XXH3_INTERNALBUFFER_SIZE);
6296 XXH_ASSERT(state->bufferedSize == 0);
6297 XXH_memcpy(state->buffer, input, (size_t)(bEnd-input));
6298 state->bufferedSize = (XXH32_hash_t)(bEnd-input);
6299#if defined(XXH3_STREAM_USE_STACK) && XXH3_STREAM_USE_STACK >= 1
6300 /* save stack accumulators into state */
f535537f 6301 XXH_memcpy(state->acc, acc, sizeof(acc));
648db22b 6302#endif
6303 }
6304
6305 return XXH_OK;
6306}
6307
f535537f 6308/*! @ingroup XXH3_family */
648db22b 6309XXH_PUBLIC_API XXH_errorcode
f535537f 6310XXH3_64bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
648db22b 6311{
6312 return XXH3_update(state, (const xxh_u8*)input, len,
f535537f 6313 XXH3_accumulate, XXH3_scrambleAcc);
648db22b 6314}
6315
6316
6317XXH_FORCE_INLINE void
6318XXH3_digest_long (XXH64_hash_t* acc,
6319 const XXH3_state_t* state,
6320 const unsigned char* secret)
6321{
f535537f 6322 xxh_u8 lastStripe[XXH_STRIPE_LEN];
6323 const xxh_u8* lastStripePtr;
6324
648db22b 6325 /*
6326 * Digest on a local copy. This way, the state remains unaltered, and it can
6327 * continue ingesting more input afterwards.
6328 */
6329 XXH_memcpy(acc, state->acc, sizeof(state->acc));
6330 if (state->bufferedSize >= XXH_STRIPE_LEN) {
f535537f 6331 /* Consume remaining stripes then point to remaining data in buffer */
648db22b 6332 size_t const nbStripes = (state->bufferedSize - 1) / XXH_STRIPE_LEN;
6333 size_t nbStripesSoFar = state->nbStripesSoFar;
6334 XXH3_consumeStripes(acc,
6335 &nbStripesSoFar, state->nbStripesPerBlock,
6336 state->buffer, nbStripes,
6337 secret, state->secretLimit,
f535537f 6338 XXH3_accumulate, XXH3_scrambleAcc);
6339 lastStripePtr = state->buffer + state->bufferedSize - XXH_STRIPE_LEN;
648db22b 6340 } else { /* bufferedSize < XXH_STRIPE_LEN */
f535537f 6341 /* Copy to temp buffer */
648db22b 6342 size_t const catchupSize = XXH_STRIPE_LEN - state->bufferedSize;
6343 XXH_ASSERT(state->bufferedSize > 0); /* there is always some input buffered */
6344 XXH_memcpy(lastStripe, state->buffer + sizeof(state->buffer) - catchupSize, catchupSize);
6345 XXH_memcpy(lastStripe + catchupSize, state->buffer, state->bufferedSize);
f535537f 6346 lastStripePtr = lastStripe;
648db22b 6347 }
f535537f 6348 /* Last stripe */
6349 XXH3_accumulate_512(acc,
6350 lastStripePtr,
6351 secret + state->secretLimit - XXH_SECRET_LASTACC_START);
648db22b 6352}
6353
f535537f 6354/*! @ingroup XXH3_family */
6355XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
648db22b 6356{
6357 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
6358 if (state->totalLen > XXH3_MIDSIZE_MAX) {
6359 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
6360 XXH3_digest_long(acc, state, secret);
6361 return XXH3_mergeAccs(acc,
6362 secret + XXH_SECRET_MERGEACCS_START,
6363 (xxh_u64)state->totalLen * XXH_PRIME64_1);
6364 }
6365 /* totalLen <= XXH3_MIDSIZE_MAX: digesting a short input */
6366 if (state->useSeed)
6367 return XXH3_64bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
6368 return XXH3_64bits_withSecret(state->buffer, (size_t)(state->totalLen),
6369 secret, state->secretLimit + XXH_STRIPE_LEN);
6370}
f535537f 6371#endif /* !XXH_NO_STREAM */
648db22b 6372
6373
6374/* ==========================================
6375 * XXH3 128 bits (a.k.a XXH128)
6376 * ==========================================
6377 * XXH3's 128-bit variant has better mixing and strength than the 64-bit variant,
6378 * even without counting the significantly larger output size.
6379 *
6380 * For example, extra steps are taken to avoid the seed-dependent collisions
6381 * in 17-240 byte inputs (See XXH3_mix16B and XXH128_mix32B).
6382 *
6383 * This strength naturally comes at the cost of some speed, especially on short
6384 * lengths. Note that longer hashes are about as fast as the 64-bit version
6385 * due to it using only a slight modification of the 64-bit loop.
6386 *
6387 * XXH128 is also more oriented towards 64-bit machines. It is still extremely
6388 * fast for a _128-bit_ hash on 32-bit (it usually clears XXH64).
6389 */
6390
f535537f 6391XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
648db22b 6392XXH3_len_1to3_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6393{
6394 /* A doubled version of 1to3_64b with different constants. */
6395 XXH_ASSERT(input != NULL);
6396 XXH_ASSERT(1 <= len && len <= 3);
6397 XXH_ASSERT(secret != NULL);
6398 /*
6399 * len = 1: combinedl = { input[0], 0x01, input[0], input[0] }
6400 * len = 2: combinedl = { input[1], 0x02, input[0], input[1] }
6401 * len = 3: combinedl = { input[2], 0x03, input[0], input[1] }
6402 */
6403 { xxh_u8 const c1 = input[0];
6404 xxh_u8 const c2 = input[len >> 1];
6405 xxh_u8 const c3 = input[len - 1];
6406 xxh_u32 const combinedl = ((xxh_u32)c1 <<16) | ((xxh_u32)c2 << 24)
6407 | ((xxh_u32)c3 << 0) | ((xxh_u32)len << 8);
6408 xxh_u32 const combinedh = XXH_rotl32(XXH_swap32(combinedl), 13);
6409 xxh_u64 const bitflipl = (XXH_readLE32(secret) ^ XXH_readLE32(secret+4)) + seed;
6410 xxh_u64 const bitfliph = (XXH_readLE32(secret+8) ^ XXH_readLE32(secret+12)) - seed;
6411 xxh_u64 const keyed_lo = (xxh_u64)combinedl ^ bitflipl;
6412 xxh_u64 const keyed_hi = (xxh_u64)combinedh ^ bitfliph;
6413 XXH128_hash_t h128;
6414 h128.low64 = XXH64_avalanche(keyed_lo);
6415 h128.high64 = XXH64_avalanche(keyed_hi);
6416 return h128;
6417 }
6418}
6419
f535537f 6420XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
648db22b 6421XXH3_len_4to8_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6422{
6423 XXH_ASSERT(input != NULL);
6424 XXH_ASSERT(secret != NULL);
6425 XXH_ASSERT(4 <= len && len <= 8);
6426 seed ^= (xxh_u64)XXH_swap32((xxh_u32)seed) << 32;
6427 { xxh_u32 const input_lo = XXH_readLE32(input);
6428 xxh_u32 const input_hi = XXH_readLE32(input + len - 4);
6429 xxh_u64 const input_64 = input_lo + ((xxh_u64)input_hi << 32);
6430 xxh_u64 const bitflip = (XXH_readLE64(secret+16) ^ XXH_readLE64(secret+24)) + seed;
6431 xxh_u64 const keyed = input_64 ^ bitflip;
6432
6433 /* Shift len to the left to ensure it is even, this avoids even multiplies. */
6434 XXH128_hash_t m128 = XXH_mult64to128(keyed, XXH_PRIME64_1 + (len << 2));
6435
6436 m128.high64 += (m128.low64 << 1);
6437 m128.low64 ^= (m128.high64 >> 3);
6438
6439 m128.low64 = XXH_xorshift64(m128.low64, 35);
f535537f 6440 m128.low64 *= PRIME_MX2;
648db22b 6441 m128.low64 = XXH_xorshift64(m128.low64, 28);
6442 m128.high64 = XXH3_avalanche(m128.high64);
6443 return m128;
6444 }
6445}
6446
f535537f 6447XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
648db22b 6448XXH3_len_9to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6449{
6450 XXH_ASSERT(input != NULL);
6451 XXH_ASSERT(secret != NULL);
6452 XXH_ASSERT(9 <= len && len <= 16);
6453 { xxh_u64 const bitflipl = (XXH_readLE64(secret+32) ^ XXH_readLE64(secret+40)) - seed;
6454 xxh_u64 const bitfliph = (XXH_readLE64(secret+48) ^ XXH_readLE64(secret+56)) + seed;
6455 xxh_u64 const input_lo = XXH_readLE64(input);
6456 xxh_u64 input_hi = XXH_readLE64(input + len - 8);
6457 XXH128_hash_t m128 = XXH_mult64to128(input_lo ^ input_hi ^ bitflipl, XXH_PRIME64_1);
6458 /*
6459 * Put len in the middle of m128 to ensure that the length gets mixed to
6460 * both the low and high bits in the 128x64 multiply below.
6461 */
6462 m128.low64 += (xxh_u64)(len - 1) << 54;
6463 input_hi ^= bitfliph;
6464 /*
6465 * Add the high 32 bits of input_hi to the high 32 bits of m128, then
6466 * add the long product of the low 32 bits of input_hi and XXH_PRIME32_2 to
6467 * the high 64 bits of m128.
6468 *
6469 * The best approach to this operation is different on 32-bit and 64-bit.
6470 */
6471 if (sizeof(void *) < sizeof(xxh_u64)) { /* 32-bit */
6472 /*
6473 * 32-bit optimized version, which is more readable.
6474 *
6475 * On 32-bit, it removes an ADC and delays a dependency between the two
6476 * halves of m128.high64, but it generates an extra mask on 64-bit.
6477 */
6478 m128.high64 += (input_hi & 0xFFFFFFFF00000000ULL) + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2);
6479 } else {
6480 /*
6481 * 64-bit optimized (albeit more confusing) version.
6482 *
6483 * Uses some properties of addition and multiplication to remove the mask:
6484 *
6485 * Let:
6486 * a = input_hi.lo = (input_hi & 0x00000000FFFFFFFF)
6487 * b = input_hi.hi = (input_hi & 0xFFFFFFFF00000000)
6488 * c = XXH_PRIME32_2
6489 *
6490 * a + (b * c)
6491 * Inverse Property: x + y - x == y
6492 * a + (b * (1 + c - 1))
6493 * Distributive Property: x * (y + z) == (x * y) + (x * z)
6494 * a + (b * 1) + (b * (c - 1))
6495 * Identity Property: x * 1 == x
6496 * a + b + (b * (c - 1))
6497 *
6498 * Substitute a, b, and c:
6499 * input_hi.hi + input_hi.lo + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
6500 *
6501 * Since input_hi.hi + input_hi.lo == input_hi, we get this:
6502 * input_hi + ((xxh_u64)input_hi.lo * (XXH_PRIME32_2 - 1))
6503 */
6504 m128.high64 += input_hi + XXH_mult32to64((xxh_u32)input_hi, XXH_PRIME32_2 - 1);
6505 }
6506 /* m128 ^= XXH_swap64(m128 >> 64); */
6507 m128.low64 ^= XXH_swap64(m128.high64);
6508
6509 { /* 128x64 multiply: h128 = m128 * XXH_PRIME64_2; */
6510 XXH128_hash_t h128 = XXH_mult64to128(m128.low64, XXH_PRIME64_2);
6511 h128.high64 += m128.high64 * XXH_PRIME64_2;
6512
6513 h128.low64 = XXH3_avalanche(h128.low64);
6514 h128.high64 = XXH3_avalanche(h128.high64);
6515 return h128;
6516 } }
6517}
6518
6519/*
6520 * Assumption: `secret` size is >= XXH3_SECRET_SIZE_MIN
6521 */
f535537f 6522XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
648db22b 6523XXH3_len_0to16_128b(const xxh_u8* input, size_t len, const xxh_u8* secret, XXH64_hash_t seed)
6524{
6525 XXH_ASSERT(len <= 16);
6526 { if (len > 8) return XXH3_len_9to16_128b(input, len, secret, seed);
6527 if (len >= 4) return XXH3_len_4to8_128b(input, len, secret, seed);
6528 if (len) return XXH3_len_1to3_128b(input, len, secret, seed);
6529 { XXH128_hash_t h128;
6530 xxh_u64 const bitflipl = XXH_readLE64(secret+64) ^ XXH_readLE64(secret+72);
6531 xxh_u64 const bitfliph = XXH_readLE64(secret+80) ^ XXH_readLE64(secret+88);
6532 h128.low64 = XXH64_avalanche(seed ^ bitflipl);
6533 h128.high64 = XXH64_avalanche( seed ^ bitfliph);
6534 return h128;
6535 } }
6536}
6537
6538/*
6539 * A bit slower than XXH3_mix16B, but handles multiply by zero better.
6540 */
6541XXH_FORCE_INLINE XXH128_hash_t
6542XXH128_mix32B(XXH128_hash_t acc, const xxh_u8* input_1, const xxh_u8* input_2,
6543 const xxh_u8* secret, XXH64_hash_t seed)
6544{
6545 acc.low64 += XXH3_mix16B (input_1, secret+0, seed);
6546 acc.low64 ^= XXH_readLE64(input_2) + XXH_readLE64(input_2 + 8);
6547 acc.high64 += XXH3_mix16B (input_2, secret+16, seed);
6548 acc.high64 ^= XXH_readLE64(input_1) + XXH_readLE64(input_1 + 8);
6549 return acc;
6550}
6551
6552
f535537f 6553XXH_FORCE_INLINE XXH_PUREF XXH128_hash_t
648db22b 6554XXH3_len_17to128_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
6555 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
6556 XXH64_hash_t seed)
6557{
6558 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
6559 XXH_ASSERT(16 < len && len <= 128);
6560
6561 { XXH128_hash_t acc;
6562 acc.low64 = len * XXH_PRIME64_1;
6563 acc.high64 = 0;
f535537f 6564
6565#if XXH_SIZE_OPT >= 1
6566 {
6567 /* Smaller, but slightly slower. */
6568 unsigned int i = (unsigned int)(len - 1) / 32;
6569 do {
6570 acc = XXH128_mix32B(acc, input+16*i, input+len-16*(i+1), secret+32*i, seed);
6571 } while (i-- != 0);
6572 }
6573#else
648db22b 6574 if (len > 32) {
6575 if (len > 64) {
6576 if (len > 96) {
6577 acc = XXH128_mix32B(acc, input+48, input+len-64, secret+96, seed);
6578 }
6579 acc = XXH128_mix32B(acc, input+32, input+len-48, secret+64, seed);
6580 }
6581 acc = XXH128_mix32B(acc, input+16, input+len-32, secret+32, seed);
6582 }
6583 acc = XXH128_mix32B(acc, input, input+len-16, secret, seed);
f535537f 6584#endif
648db22b 6585 { XXH128_hash_t h128;
6586 h128.low64 = acc.low64 + acc.high64;
6587 h128.high64 = (acc.low64 * XXH_PRIME64_1)
6588 + (acc.high64 * XXH_PRIME64_4)
6589 + ((len - seed) * XXH_PRIME64_2);
6590 h128.low64 = XXH3_avalanche(h128.low64);
6591 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
6592 return h128;
6593 }
6594 }
6595}
6596
f535537f 6597XXH_NO_INLINE XXH_PUREF XXH128_hash_t
648db22b 6598XXH3_len_129to240_128b(const xxh_u8* XXH_RESTRICT input, size_t len,
6599 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
6600 XXH64_hash_t seed)
6601{
6602 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN); (void)secretSize;
6603 XXH_ASSERT(128 < len && len <= XXH3_MIDSIZE_MAX);
6604
6605 { XXH128_hash_t acc;
f535537f 6606 unsigned i;
648db22b 6607 acc.low64 = len * XXH_PRIME64_1;
6608 acc.high64 = 0;
f535537f 6609 /*
6610 * We set as `i` as offset + 32. We do this so that unchanged
6611 * `len` can be used as upper bound. This reaches a sweet spot
6612 * where both x86 and aarch64 get simple agen and good codegen
6613 * for the loop.
6614 */
6615 for (i = 32; i < 160; i += 32) {
648db22b 6616 acc = XXH128_mix32B(acc,
f535537f 6617 input + i - 32,
6618 input + i - 16,
6619 secret + i - 32,
648db22b 6620 seed);
6621 }
6622 acc.low64 = XXH3_avalanche(acc.low64);
6623 acc.high64 = XXH3_avalanche(acc.high64);
f535537f 6624 /*
6625 * NB: `i <= len` will duplicate the last 32-bytes if
6626 * len % 32 was zero. This is an unfortunate necessity to keep
6627 * the hash result stable.
6628 */
6629 for (i=160; i <= len; i += 32) {
648db22b 6630 acc = XXH128_mix32B(acc,
f535537f 6631 input + i - 32,
6632 input + i - 16,
6633 secret + XXH3_MIDSIZE_STARTOFFSET + i - 160,
648db22b 6634 seed);
6635 }
6636 /* last bytes */
6637 acc = XXH128_mix32B(acc,
6638 input + len - 16,
6639 input + len - 32,
6640 secret + XXH3_SECRET_SIZE_MIN - XXH3_MIDSIZE_LASTOFFSET - 16,
f535537f 6641 (XXH64_hash_t)0 - seed);
648db22b 6642
6643 { XXH128_hash_t h128;
6644 h128.low64 = acc.low64 + acc.high64;
6645 h128.high64 = (acc.low64 * XXH_PRIME64_1)
6646 + (acc.high64 * XXH_PRIME64_4)
6647 + ((len - seed) * XXH_PRIME64_2);
6648 h128.low64 = XXH3_avalanche(h128.low64);
6649 h128.high64 = (XXH64_hash_t)0 - XXH3_avalanche(h128.high64);
6650 return h128;
6651 }
6652 }
6653}
6654
6655XXH_FORCE_INLINE XXH128_hash_t
6656XXH3_hashLong_128b_internal(const void* XXH_RESTRICT input, size_t len,
6657 const xxh_u8* XXH_RESTRICT secret, size_t secretSize,
f535537f 6658 XXH3_f_accumulate f_acc,
648db22b 6659 XXH3_f_scrambleAcc f_scramble)
6660{
6661 XXH_ALIGN(XXH_ACC_ALIGN) xxh_u64 acc[XXH_ACC_NB] = XXH3_INIT_ACC;
6662
f535537f 6663 XXH3_hashLong_internal_loop(acc, (const xxh_u8*)input, len, secret, secretSize, f_acc, f_scramble);
648db22b 6664
6665 /* converge into final hash */
6666 XXH_STATIC_ASSERT(sizeof(acc) == 64);
6667 XXH_ASSERT(secretSize >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
6668 { XXH128_hash_t h128;
6669 h128.low64 = XXH3_mergeAccs(acc,
6670 secret + XXH_SECRET_MERGEACCS_START,
6671 (xxh_u64)len * XXH_PRIME64_1);
6672 h128.high64 = XXH3_mergeAccs(acc,
6673 secret + secretSize
6674 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6675 ~((xxh_u64)len * XXH_PRIME64_2));
6676 return h128;
6677 }
6678}
6679
6680/*
f535537f 6681 * It's important for performance that XXH3_hashLong() is not inlined.
648db22b 6682 */
f535537f 6683XXH_NO_INLINE XXH_PUREF XXH128_hash_t
648db22b 6684XXH3_hashLong_128b_default(const void* XXH_RESTRICT input, size_t len,
6685 XXH64_hash_t seed64,
6686 const void* XXH_RESTRICT secret, size_t secretLen)
6687{
6688 (void)seed64; (void)secret; (void)secretLen;
6689 return XXH3_hashLong_128b_internal(input, len, XXH3_kSecret, sizeof(XXH3_kSecret),
f535537f 6690 XXH3_accumulate, XXH3_scrambleAcc);
648db22b 6691}
6692
6693/*
f535537f 6694 * It's important for performance to pass @p secretLen (when it's static)
648db22b 6695 * to the compiler, so that it can properly optimize the vectorized loop.
f535537f 6696 *
6697 * When the secret size is unknown, or on GCC 12 where the mix of NO_INLINE and FORCE_INLINE
6698 * breaks -Og, this is XXH_NO_INLINE.
648db22b 6699 */
f535537f 6700XXH3_WITH_SECRET_INLINE XXH128_hash_t
648db22b 6701XXH3_hashLong_128b_withSecret(const void* XXH_RESTRICT input, size_t len,
6702 XXH64_hash_t seed64,
6703 const void* XXH_RESTRICT secret, size_t secretLen)
6704{
6705 (void)seed64;
6706 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, secretLen,
f535537f 6707 XXH3_accumulate, XXH3_scrambleAcc);
648db22b 6708}
6709
6710XXH_FORCE_INLINE XXH128_hash_t
6711XXH3_hashLong_128b_withSeed_internal(const void* XXH_RESTRICT input, size_t len,
6712 XXH64_hash_t seed64,
f535537f 6713 XXH3_f_accumulate f_acc,
648db22b 6714 XXH3_f_scrambleAcc f_scramble,
6715 XXH3_f_initCustomSecret f_initSec)
6716{
6717 if (seed64 == 0)
6718 return XXH3_hashLong_128b_internal(input, len,
6719 XXH3_kSecret, sizeof(XXH3_kSecret),
f535537f 6720 f_acc, f_scramble);
648db22b 6721 { XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6722 f_initSec(secret, seed64);
6723 return XXH3_hashLong_128b_internal(input, len, (const xxh_u8*)secret, sizeof(secret),
f535537f 6724 f_acc, f_scramble);
648db22b 6725 }
6726}
6727
6728/*
6729 * It's important for performance that XXH3_hashLong is not inlined.
6730 */
6731XXH_NO_INLINE XXH128_hash_t
6732XXH3_hashLong_128b_withSeed(const void* input, size_t len,
6733 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen)
6734{
6735 (void)secret; (void)secretLen;
6736 return XXH3_hashLong_128b_withSeed_internal(input, len, seed64,
f535537f 6737 XXH3_accumulate, XXH3_scrambleAcc, XXH3_initCustomSecret);
648db22b 6738}
6739
6740typedef XXH128_hash_t (*XXH3_hashLong128_f)(const void* XXH_RESTRICT, size_t,
6741 XXH64_hash_t, const void* XXH_RESTRICT, size_t);
6742
6743XXH_FORCE_INLINE XXH128_hash_t
6744XXH3_128bits_internal(const void* input, size_t len,
6745 XXH64_hash_t seed64, const void* XXH_RESTRICT secret, size_t secretLen,
6746 XXH3_hashLong128_f f_hl128)
6747{
6748 XXH_ASSERT(secretLen >= XXH3_SECRET_SIZE_MIN);
6749 /*
6750 * If an action is to be taken if `secret` conditions are not respected,
6751 * it should be done here.
6752 * For now, it's a contract pre-condition.
6753 * Adding a check and a branch here would cost performance at every hash.
6754 */
6755 if (len <= 16)
6756 return XXH3_len_0to16_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, seed64);
6757 if (len <= 128)
6758 return XXH3_len_17to128_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
6759 if (len <= XXH3_MIDSIZE_MAX)
6760 return XXH3_len_129to240_128b((const xxh_u8*)input, len, (const xxh_u8*)secret, secretLen, seed64);
6761 return f_hl128(input, len, seed64, secret, secretLen);
6762}
6763
6764
6765/* === Public XXH128 API === */
6766
f535537f 6767/*! @ingroup XXH3_family */
6768XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(XXH_NOESCAPE const void* input, size_t len)
648db22b 6769{
6770 return XXH3_128bits_internal(input, len, 0,
6771 XXH3_kSecret, sizeof(XXH3_kSecret),
6772 XXH3_hashLong_128b_default);
6773}
6774
f535537f 6775/*! @ingroup XXH3_family */
648db22b 6776XXH_PUBLIC_API XXH128_hash_t
f535537f 6777XXH3_128bits_withSecret(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize)
648db22b 6778{
6779 return XXH3_128bits_internal(input, len, 0,
6780 (const xxh_u8*)secret, secretSize,
6781 XXH3_hashLong_128b_withSecret);
6782}
6783
f535537f 6784/*! @ingroup XXH3_family */
648db22b 6785XXH_PUBLIC_API XXH128_hash_t
f535537f 6786XXH3_128bits_withSeed(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
648db22b 6787{
6788 return XXH3_128bits_internal(input, len, seed,
6789 XXH3_kSecret, sizeof(XXH3_kSecret),
6790 XXH3_hashLong_128b_withSeed);
6791}
6792
f535537f 6793/*! @ingroup XXH3_family */
648db22b 6794XXH_PUBLIC_API XXH128_hash_t
f535537f 6795XXH3_128bits_withSecretandSeed(XXH_NOESCAPE const void* input, size_t len, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
648db22b 6796{
6797 if (len <= XXH3_MIDSIZE_MAX)
6798 return XXH3_128bits_internal(input, len, seed, XXH3_kSecret, sizeof(XXH3_kSecret), NULL);
6799 return XXH3_hashLong_128b_withSecret(input, len, seed, secret, secretSize);
6800}
6801
f535537f 6802/*! @ingroup XXH3_family */
648db22b 6803XXH_PUBLIC_API XXH128_hash_t
f535537f 6804XXH128(XXH_NOESCAPE const void* input, size_t len, XXH64_hash_t seed)
648db22b 6805{
6806 return XXH3_128bits_withSeed(input, len, seed);
6807}
6808
6809
6810/* === XXH3 128-bit streaming === */
f535537f 6811#ifndef XXH_NO_STREAM
648db22b 6812/*
6813 * All initialization and update functions are identical to 64-bit streaming variant.
6814 * The only difference is the finalization routine.
6815 */
6816
f535537f 6817/*! @ingroup XXH3_family */
648db22b 6818XXH_PUBLIC_API XXH_errorcode
f535537f 6819XXH3_128bits_reset(XXH_NOESCAPE XXH3_state_t* statePtr)
648db22b 6820{
6821 return XXH3_64bits_reset(statePtr);
6822}
6823
f535537f 6824/*! @ingroup XXH3_family */
648db22b 6825XXH_PUBLIC_API XXH_errorcode
f535537f 6826XXH3_128bits_reset_withSecret(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize)
648db22b 6827{
6828 return XXH3_64bits_reset_withSecret(statePtr, secret, secretSize);
6829}
6830
f535537f 6831/*! @ingroup XXH3_family */
648db22b 6832XXH_PUBLIC_API XXH_errorcode
f535537f 6833XXH3_128bits_reset_withSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH64_hash_t seed)
648db22b 6834{
6835 return XXH3_64bits_reset_withSeed(statePtr, seed);
6836}
6837
f535537f 6838/*! @ingroup XXH3_family */
648db22b 6839XXH_PUBLIC_API XXH_errorcode
f535537f 6840XXH3_128bits_reset_withSecretandSeed(XXH_NOESCAPE XXH3_state_t* statePtr, XXH_NOESCAPE const void* secret, size_t secretSize, XXH64_hash_t seed)
648db22b 6841{
6842 return XXH3_64bits_reset_withSecretandSeed(statePtr, secret, secretSize, seed);
6843}
6844
f535537f 6845/*! @ingroup XXH3_family */
648db22b 6846XXH_PUBLIC_API XXH_errorcode
f535537f 6847XXH3_128bits_update(XXH_NOESCAPE XXH3_state_t* state, XXH_NOESCAPE const void* input, size_t len)
648db22b 6848{
f535537f 6849 return XXH3_64bits_update(state, input, len);
648db22b 6850}
6851
f535537f 6852/*! @ingroup XXH3_family */
6853XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (XXH_NOESCAPE const XXH3_state_t* state)
648db22b 6854{
6855 const unsigned char* const secret = (state->extSecret == NULL) ? state->customSecret : state->extSecret;
6856 if (state->totalLen > XXH3_MIDSIZE_MAX) {
6857 XXH_ALIGN(XXH_ACC_ALIGN) XXH64_hash_t acc[XXH_ACC_NB];
6858 XXH3_digest_long(acc, state, secret);
6859 XXH_ASSERT(state->secretLimit + XXH_STRIPE_LEN >= sizeof(acc) + XXH_SECRET_MERGEACCS_START);
6860 { XXH128_hash_t h128;
6861 h128.low64 = XXH3_mergeAccs(acc,
6862 secret + XXH_SECRET_MERGEACCS_START,
6863 (xxh_u64)state->totalLen * XXH_PRIME64_1);
6864 h128.high64 = XXH3_mergeAccs(acc,
6865 secret + state->secretLimit + XXH_STRIPE_LEN
6866 - sizeof(acc) - XXH_SECRET_MERGEACCS_START,
6867 ~((xxh_u64)state->totalLen * XXH_PRIME64_2));
6868 return h128;
6869 }
6870 }
6871 /* len <= XXH3_MIDSIZE_MAX : short code */
6872 if (state->seed)
6873 return XXH3_128bits_withSeed(state->buffer, (size_t)state->totalLen, state->seed);
6874 return XXH3_128bits_withSecret(state->buffer, (size_t)(state->totalLen),
6875 secret, state->secretLimit + XXH_STRIPE_LEN);
6876}
f535537f 6877#endif /* !XXH_NO_STREAM */
648db22b 6878/* 128-bit utility functions */
6879
6880#include <string.h> /* memcmp, memcpy */
6881
6882/* return : 1 is equal, 0 if different */
f535537f 6883/*! @ingroup XXH3_family */
648db22b 6884XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
6885{
6886 /* note : XXH128_hash_t is compact, it has no padding byte */
6887 return !(memcmp(&h1, &h2, sizeof(h1)));
6888}
6889
6890/* This prototype is compatible with stdlib's qsort().
f535537f 6891 * @return : >0 if *h128_1 > *h128_2
6892 * <0 if *h128_1 < *h128_2
6893 * =0 if *h128_1 == *h128_2 */
6894/*! @ingroup XXH3_family */
6895XXH_PUBLIC_API int XXH128_cmp(XXH_NOESCAPE const void* h128_1, XXH_NOESCAPE const void* h128_2)
648db22b 6896{
6897 XXH128_hash_t const h1 = *(const XXH128_hash_t*)h128_1;
6898 XXH128_hash_t const h2 = *(const XXH128_hash_t*)h128_2;
6899 int const hcmp = (h1.high64 > h2.high64) - (h2.high64 > h1.high64);
6900 /* note : bets that, in most cases, hash values are different */
6901 if (hcmp) return hcmp;
6902 return (h1.low64 > h2.low64) - (h2.low64 > h1.low64);
6903}
6904
6905
6906/*====== Canonical representation ======*/
f535537f 6907/*! @ingroup XXH3_family */
648db22b 6908XXH_PUBLIC_API void
f535537f 6909XXH128_canonicalFromHash(XXH_NOESCAPE XXH128_canonical_t* dst, XXH128_hash_t hash)
648db22b 6910{
6911 XXH_STATIC_ASSERT(sizeof(XXH128_canonical_t) == sizeof(XXH128_hash_t));
6912 if (XXH_CPU_LITTLE_ENDIAN) {
6913 hash.high64 = XXH_swap64(hash.high64);
6914 hash.low64 = XXH_swap64(hash.low64);
6915 }
6916 XXH_memcpy(dst, &hash.high64, sizeof(hash.high64));
6917 XXH_memcpy((char*)dst + sizeof(hash.high64), &hash.low64, sizeof(hash.low64));
6918}
6919
f535537f 6920/*! @ingroup XXH3_family */
648db22b 6921XXH_PUBLIC_API XXH128_hash_t
f535537f 6922XXH128_hashFromCanonical(XXH_NOESCAPE const XXH128_canonical_t* src)
648db22b 6923{
6924 XXH128_hash_t h;
6925 h.high64 = XXH_readBE64(src);
6926 h.low64 = XXH_readBE64(src->digest + 8);
6927 return h;
6928}
6929
6930
6931
6932/* ==========================================
6933 * Secret generators
6934 * ==========================================
6935 */
6936#define XXH_MIN(x, y) (((x) > (y)) ? (y) : (x))
6937
6938XXH_FORCE_INLINE void XXH3_combine16(void* dst, XXH128_hash_t h128)
6939{
6940 XXH_writeLE64( dst, XXH_readLE64(dst) ^ h128.low64 );
6941 XXH_writeLE64( (char*)dst+8, XXH_readLE64((char*)dst+8) ^ h128.high64 );
6942}
6943
f535537f 6944/*! @ingroup XXH3_family */
648db22b 6945XXH_PUBLIC_API XXH_errorcode
f535537f 6946XXH3_generateSecret(XXH_NOESCAPE void* secretBuffer, size_t secretSize, XXH_NOESCAPE const void* customSeed, size_t customSeedSize)
648db22b 6947{
6948#if (XXH_DEBUGLEVEL >= 1)
6949 XXH_ASSERT(secretBuffer != NULL);
6950 XXH_ASSERT(secretSize >= XXH3_SECRET_SIZE_MIN);
6951#else
6952 /* production mode, assert() are disabled */
6953 if (secretBuffer == NULL) return XXH_ERROR;
6954 if (secretSize < XXH3_SECRET_SIZE_MIN) return XXH_ERROR;
6955#endif
6956
6957 if (customSeedSize == 0) {
6958 customSeed = XXH3_kSecret;
6959 customSeedSize = XXH_SECRET_DEFAULT_SIZE;
6960 }
6961#if (XXH_DEBUGLEVEL >= 1)
6962 XXH_ASSERT(customSeed != NULL);
6963#else
6964 if (customSeed == NULL) return XXH_ERROR;
6965#endif
6966
6967 /* Fill secretBuffer with a copy of customSeed - repeat as needed */
6968 { size_t pos = 0;
6969 while (pos < secretSize) {
6970 size_t const toCopy = XXH_MIN((secretSize - pos), customSeedSize);
6971 memcpy((char*)secretBuffer + pos, customSeed, toCopy);
6972 pos += toCopy;
6973 } }
6974
6975 { size_t const nbSeg16 = secretSize / 16;
6976 size_t n;
6977 XXH128_canonical_t scrambler;
6978 XXH128_canonicalFromHash(&scrambler, XXH128(customSeed, customSeedSize, 0));
6979 for (n=0; n<nbSeg16; n++) {
6980 XXH128_hash_t const h128 = XXH128(&scrambler, sizeof(scrambler), n);
6981 XXH3_combine16((char*)secretBuffer + n*16, h128);
6982 }
6983 /* last segment */
6984 XXH3_combine16((char*)secretBuffer + secretSize - 16, XXH128_hashFromCanonical(&scrambler));
6985 }
6986 return XXH_OK;
6987}
6988
f535537f 6989/*! @ingroup XXH3_family */
648db22b 6990XXH_PUBLIC_API void
f535537f 6991XXH3_generateSecret_fromSeed(XXH_NOESCAPE void* secretBuffer, XXH64_hash_t seed)
648db22b 6992{
6993 XXH_ALIGN(XXH_SEC_ALIGN) xxh_u8 secret[XXH_SECRET_DEFAULT_SIZE];
6994 XXH3_initCustomSecret(secret, seed);
6995 XXH_ASSERT(secretBuffer != NULL);
6996 memcpy(secretBuffer, secret, XXH_SECRET_DEFAULT_SIZE);
6997}
6998
6999
7000
7001/* Pop our optimization override from above */
7002#if XXH_VECTOR == XXH_AVX2 /* AVX2 */ \
7003 && defined(__GNUC__) && !defined(__clang__) /* GCC, not Clang */ \
f535537f 7004 && defined(__OPTIMIZE__) && XXH_SIZE_OPT <= 0 /* respect -O0 and -Os */
648db22b 7005# pragma GCC pop_options
7006#endif
7007
7008#endif /* XXH_NO_LONG_LONG */
7009
7010#endif /* XXH_NO_XXH3 */
7011
7012/*!
7013 * @}
7014 */
7015#endif /* XXH_IMPLEMENTATION */
7016
7017
7018#if defined (__cplusplus)
f535537f 7019} /* extern "C" */
648db22b 7020#endif