NAMD
xxhash.h
Go to the documentation of this file.
1 /*
2  xxHash - Extremely Fast Hash algorithm
3  Header File
4  Copyright (C) 2012-2016, Yann Collet.
5 
6  BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
7 
8  Redistribution and use in source and binary forms, with or without
9  modification, are permitted provided that the following conditions are
10  met:
11 
12  * Redistributions of source code must retain the above copyright
13  notice, this list of conditions and the following disclaimer.
14  * Redistributions in binary form must reproduce the above
15  copyright notice, this list of conditions and the following disclaimer
16  in the documentation and/or other materials provided with the
17  distribution.
18 
19  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31  You can contact the author at :
32  - xxHash source repository : https://github.com/Cyan4973/xxHash
33 */
34 
35 /* Notice extracted from xxHash homepage :
36 
37 xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
38 It also successfully passes all tests from the SMHasher suite.
39 
40 Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
41 
42 Name Speed Q.Score Author
43 xxHash 5.4 GB/s 10
44 CrapWow 3.2 GB/s 2 Andrew
45 MumurHash 3a 2.7 GB/s 10 Austin Appleby
46 SpookyHash 2.0 GB/s 10 Bob Jenkins
47 SBox 1.4 GB/s 9 Bret Mulvey
48 Lookup3 1.2 GB/s 9 Bob Jenkins
49 SuperFastHash 1.2 GB/s 1 Paul Hsieh
50 CityHash64 1.05 GB/s 10 Pike & Alakuijala
51 FNV 0.55 GB/s 5 Fowler, Noll, Vo
52 CRC32 0.43 GB/s 9
53 MD5-32 0.33 GB/s 10 Ronald L. Rivest
54 SHA1-32 0.28 GB/s 10
55 
56 Q.Score is a measure of quality of the hash function.
57 It depends on successfully passing SMHasher test set.
58 10 is a perfect score.
59 
60 Note : SMHasher's CRC32 implementation is not the fastest one.
61 Other speed-oriented implementations can be faster,
62 especially in combination with PCLMUL instruction :
63 http://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
64 
65 A 64-bit version, named XXH64, is available since r35.
66 It offers much better speed, but for 64-bit applications only.
67 Name Speed on 64 bits Speed on 32 bits
68 XXH64 13.8 GB/s 1.9 GB/s
69 XXH32 6.8 GB/s 6.0 GB/s
70 */
71 
72 #if defined (__cplusplus)
73 extern "C" {
74 #endif
75 
76 /* ****************************
77  * INLINE mode
78  ******************************/
90 #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \
91  && !defined(XXH_INLINE_ALL_31684351384)
92  /* this section should be traversed only once */
93 # define XXH_INLINE_ALL_31684351384
94  /* give access to advanced API, required to compile implementations */
95 # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */
96 # define XXH_STATIC_LINKING_ONLY
97  /* make functions private */
98 # undef XXH_PUBLIC_API
99 # if defined(__GNUC__)
100 # define XXH_PUBLIC_API static __inline __attribute__((unused))
101 # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
102 # define XXH_PUBLIC_API static inline
103 # elif defined(_MSC_VER)
104 # define XXH_PUBLIC_API static __inline
105 # else
106  /* note : this version may generate warnings for unused static functions */
107 # define XXH_PUBLIC_API static
108 # endif
109 
110  /* prefix all names, to avoid symbol duplicates with potential library */
111 # ifdef XXH_NAMESPACE
112 # error "XXH_INLINE_ALL with XXH_NAMESPACE is not supported"
113 # /* Note : Alternative is to #undef all symbols (it's a pretty large list).
114  * If doing nothing : it compiles, but functions are actually Not inlined.
115  * */
116 # endif
117 # define XXH_NAMESPACE XXH_INLINE_
118  /* some identifiers are not symbols,
119  * they must nonetheless be renamed to avoid double declaration
120  * Alternative : do not redeclare them,
121  * which requires some #ifdef, and is more dispersed in the file
122  * while renaming can be achieved in a single place */
123 # define XXH_IPREF(Id) XXH_INLINE_ ## Id
124 # define XXH_OK XXH_IPREF(XXH_OK)
125 # define XXH_ERROR XXH_IPREF(XXH_ERROR)
126 # define XXH_errorcode XXH_IPREF(XXH_errorcode)
127 # define XXH32_canonical_t XXH_IPREF(XXH32_canonical_t)
128 # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t)
129 # define XXH128_canonical_t XXH_IPREF(XXH128_canonical_t)
130 # define XXH32_state_s XXH_IPREF(XXH32_state_s)
131 # define XXH32_state_t XXH_IPREF(XXH32_state_t)
132 # define XXH64_state_s XXH_IPREF(XXH64_state_s)
133 # define XXH64_state_t XXH_IPREF(XXH64_state_t)
134 # define XXH3_state_s XXH_IPREF(XXH3_state_s)
135 # define XXH3_state_t XXH_IPREF(XXH3_state_t)
136 # define XXH128_hash_t XXH_IPREF(XXH128_hash_t)
137  /* Ensure header is parsed again, even if it was previously included */
138 # undef XXHASH_H_5627135585666179
139 # undef XXHASH_H_STATIC_13879238742
140 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
141 
142 
143 
144 /* ****************************************************************
145  * Stable API
146  *****************************************************************/
147 #ifndef XXHASH_H_5627135585666179
148 #define XXHASH_H_5627135585666179 1
149 
150 /* specific declaration modes for Windows */
151 #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API)
152 # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
153 # ifdef XXH_EXPORT
154 # define XXH_PUBLIC_API __declspec(dllexport)
155 # elif XXH_IMPORT
156 # define XXH_PUBLIC_API __declspec(dllimport)
157 # endif
158 # else
159 # define XXH_PUBLIC_API /* do nothing */
160 # endif
161 #endif
162 
174 #ifdef XXH_NAMESPACE
175 # define XXH_CAT(A,B) A##B
176 # define XXH_NAME2(A,B) XXH_CAT(A,B)
177 # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
178 # define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
179 # define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
180 # define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
181 # define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
182 # define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
183 # define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
184 # define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
185 # define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
186 # define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
187 # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
188 # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
189 # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
190 # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
191 # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
192 # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
193 # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
194 # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
195 # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
196 #endif
197 
198 
199 /* *************************************
200 * Version
201 ***************************************/
202 #define XXH_VERSION_MAJOR 0
203 #define XXH_VERSION_MINOR 7
204 #define XXH_VERSION_RELEASE 2
205 #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
206 XXH_PUBLIC_API unsigned XXH_versionNumber (void);
207 
208 
209 /* ****************************
210 * Definitions
211 ******************************/
212 #include <stddef.h> /* size_t */
213 typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
214 
215 
216 /*-**********************************************************************
217 * 32-bit hash
218 ************************************************************************/
219 #if !defined (__VMS) \
220  && (defined (__cplusplus) \
221  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
222 # include <stdint.h>
223  typedef uint32_t XXH32_hash_t;
224 #else
225 # include <limits.h>
226 # if UINT_MAX == 0xFFFFFFFFUL
227  typedef unsigned int XXH32_hash_t;
228 # else
229 # if ULONG_MAX == 0xFFFFFFFFUL
230  typedef unsigned long XXH32_hash_t;
231 # else
232 # error "unsupported platform : need a 32-bit type"
233 # endif
234 # endif
235 #endif
236 
242 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
243 
244 /******* Streaming *******/
245 
246 /*
247  * Streaming functions generate the xxHash value from an incrememtal input.
248  * This method is slower than single-call functions, due to state management.
249  * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
250  *
251  * XXH state must first be allocated, using XXH*_createState() .
252  *
253  * Start a new hash by initializing state with a seed, using XXH*_reset().
254  *
255  * Then, feed the hash state by calling XXH*_update() as many times as necessary.
256  * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
257  *
258  * Finally, a hash value can be produced anytime, by using XXH*_digest().
259  * This function returns the nn-bits hash as an int or long long.
260  *
261  * It's still possible to continue inserting input into the hash state after a digest,
262  * and generate some new hash values later on, by invoking again XXH*_digest().
263  *
264  * When done, release the state, using XXH*_freeState().
265  */
266 
267 typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
270 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
271 
272 XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
273 XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
274 XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
275 
276 /******* Canonical representation *******/
277 
278 /* Default return values from XXH functions are basic unsigned 32 and 64 bits.
279  * This the simplest and fastest format for further post-processing.
280  * However, this leaves open the question of what is the order of bytes,
281  * since little and big endian conventions will write the same number differently.
282  *
283  * The canonical representation settles this issue,
284  * by mandating big-endian convention,
285  * aka, the same convention as human-readable numbers (large digits first).
286  * When writing hash values to storage, sending them over a network, or printing them,
287  * it's highly recommended to use the canonical representation,
288  * to ensure portability across a wider range of systems, present and future.
289  *
290  * The following functions allow transformation of hash values into and from canonical format.
291  */
292 
293 typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
294 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
296 
297 
298 #ifndef XXH_NO_LONG_LONG
299 /*-**********************************************************************
300 * 64-bit hash
301 ************************************************************************/
302 #if !defined (__VMS) \
303  && (defined (__cplusplus) \
304  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
305 # include <stdint.h>
306  typedef uint64_t XXH64_hash_t;
307 #else
308  /* the following type must have a width of 64-bit */
309  typedef unsigned long long XXH64_hash_t;
310 #endif
311 
317 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed);
318 
319 /******* Streaming *******/
320 typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
323 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
324 
326 XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
328 
329 /******* Canonical representation *******/
330 typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
333 
334 
335 #endif /* XXH_NO_LONG_LONG */
336 
337 #endif /* XXHASH_H_5627135585666179 */
338 
339 
340 
341 #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
342 #define XXHASH_H_STATIC_13879238742
343 /* ************************************************************************************************
344  This section contains declarations which are not guaranteed to remain stable.
345  They may change in future versions, becoming incompatible with a different version of the library.
346  These declarations should only be used with static linking.
347  Never use them in association with dynamic linking !
348 *************************************************************************************************** */
349 
350 /* These definitions are only present to allow
351  * static allocation of XXH state, on stack or in a struct for example.
352  * Never **ever** use members directly. */
353 
354 struct XXH32_state_s {
355  XXH32_hash_t total_len_32;
356  XXH32_hash_t large_len;
357  XXH32_hash_t v1;
358  XXH32_hash_t v2;
359  XXH32_hash_t v3;
360  XXH32_hash_t v4;
361  XXH32_hash_t mem32[4];
362  XXH32_hash_t memsize;
363  XXH32_hash_t reserved; /* never read nor write, might be removed in a future version */
364 }; /* typedef'd to XXH32_state_t */
365 
366 
367 #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
368 
369 struct XXH64_state_s {
370  XXH64_hash_t total_len;
371  XXH64_hash_t v1;
372  XXH64_hash_t v2;
373  XXH64_hash_t v3;
374  XXH64_hash_t v4;
375  XXH64_hash_t mem64[4];
376  XXH32_hash_t memsize;
377  XXH32_hash_t reserved32; /* required for padding anyway */
378  XXH64_hash_t reserved64; /* never read nor write, might be removed in a future version */
379 }; /* typedef'd to XXH64_state_t */
380 
381 
382 /*-**********************************************************************
383 * XXH3
384 * New experimental hash
385 ************************************************************************/
386 
387 /* *********************************************
388  * XXH3 is a new hash algorithm,
389  * featuring improved speed performance for both small and large inputs.
390  * See full speed analysis at : http://fastcompression.blogspot.com/2019/03/presenting-xxh3.html
391  * In general, expect XXH3 to run about ~2x faster on large inputs,
392  * and >3x faster on small ones, though exact differences depend on platform.
393  *
394  * The algorithm is portable, will generate the same hash on all platforms.
395  * It benefits greatly from vectorization units, but does not require it.
396  *
397  * XXH3 offers 2 variants, _64bits and _128bits.
398  * When only 64 bits are needed, prefer calling the _64bits variant :
399  * it reduces the amount of mixing, resulting in faster speed on small inputs.
400  * It's also generally simpler to manipulate a scalar return type than a struct.
401  *
402  * The XXH3 algorithm is still considered experimental.
403  * Produced results can still change between versions.
404  * Results produced by v0.7.x are not comparable with results from v0.7.y .
405  * It's nonetheless possible to use XXH3 for ephemeral data (local sessions),
406  * but avoid storing values in long-term storage for later reads.
407  *
408  * The API supports one-shot hashing, streaming mode, and custom secrets.
409  *
410  * There are still a number of opened questions that community can influence during the experimental period.
411  * I'm trying to list a few of them below, though don't consider this list as complete.
412  *
413  * - 128-bits output type : currently defined as a structure of two 64-bits fields.
414  * That's because 128-bit values do not exist in C standard.
415  * Note that it means that, at byte level, result is not identical depending on endianess.
416  * However, at field level, they are identical on all platforms.
417  * The canonical representation solves the issue of identical byte-level representation across platforms,
418  * which is necessary for serialization.
419  * Q1 : Would there be a better representation for a 128-bit hash result ?
420  * Q2 : Are the names of the inner 64-bit fields important ? Should they be changed ?
421  *
422  * - Prototype XXH128() : XXH128() uses the same arguments as XXH64(), for consistency.
423  * It means it maps to XXH3_128bits_withSeed().
424  * This variant is slightly slower than XXH3_128bits(),
425  * because the seed is now part of the algorithm, and can't be simplified.
426  * Is that a good idea ?
427  *
428  * - Seed type for XXH128() : currently, it's a single 64-bit value, like the 64-bit variant.
429  * It could be argued that it's more logical to offer a 128-bit seed input parameter for a 128-bit hash.
430  * But 128-bit seed is more difficult to use, since it requires to pass a structure instead of a scalar value.
431  * Such a variant could either replace current one, or become an additional one.
432  * Farmhash, for example, offers both variants (the 128-bits seed variant is called `doubleSeed`).
433  * Follow up question : if both 64-bit and 128-bit seeds are allowed, which variant should be called XXH128 ?
434  *
435  * - Result for len==0 : Currently, the result of hashing a zero-length input is always `0`.
436  * It seems okay as a return value when using "default" secret and seed.
437  * But is it still fine to return `0` when secret or seed are non-default ?
438  * Are there use cases which could depend on generating a different hash result for zero-length input when the secret is different ?
439  *
440  * - Consistency (1) : Streaming XXH128 uses an XXH3 state, which is the same state as XXH3_64bits().
441  * It means a 128bit streaming loop must invoke the following symbols :
442  * XXH3_createState(), XXH3_128bits_reset(), XXH3_128bits_update() (loop), XXH3_128bits_digest(), XXH3_freeState().
443  * Is that consistent enough ?
444  *
445  * - Consistency (2) : The canonical representation of `XXH3_64bits` is provided by existing functions
446  * XXH64_canonicalFromHash(), and reverse operation XXH64_hashFromCanonical().
447  * As a mirror, canonical functions for XXH128_hash_t results generated by `XXH3_128bits`
448  * are XXH128_canonicalFromHash() and XXH128_hashFromCanonical().
449  * Which means, `XXH3` doesn't appear in the names, because canonical functions operate on a type,
450  * independently of which algorithm was used to generate that type.
451  * Is that consistent enough ?
452  */
453 
454 #ifdef XXH_NAMESPACE
455 # define XXH3_64bits XXH_NAME2(XXH_NAMESPACE, XXH3_64bits)
456 # define XXH3_64bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSecret)
457 # define XXH3_64bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_withSeed)
458 
459 # define XXH3_createState XXH_NAME2(XXH_NAMESPACE, XXH3_createState)
460 # define XXH3_freeState XXH_NAME2(XXH_NAMESPACE, XXH3_freeState)
461 # define XXH3_copyState XXH_NAME2(XXH_NAMESPACE, XXH3_copyState)
462 
463 # define XXH3_64bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset)
464 # define XXH3_64bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSeed)
465 # define XXH3_64bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_reset_withSecret)
466 # define XXH3_64bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_update)
467 # define XXH3_64bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_64bits_digest)
468 #endif
469 
470 /* XXH3_64bits() :
471  * default 64-bit variant, using default secret and default seed of 0.
472  * It's the fastest variant. */
473 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void* data, size_t len);
474 
475 /* XXH3_64bits_withSecret() :
476  * It's possible to provide any blob of bytes as a "secret" to generate the hash.
477  * This makes it more difficult for an external actor to prepare an intentional collision.
478  * The secret *must* be large enough (>= XXH3_SECRET_SIZE_MIN).
479  * It should consist of random bytes.
480  * Avoid repeating same character, or sequences of bytes,
481  * and especially avoid swathes of \0.
482  * Failure to respect these conditions will result in a poor quality hash.
483  */
484 #define XXH3_SECRET_SIZE_MIN 136
485 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
486 
487 /* XXH3_64bits_withSeed() :
488  * This variant generates on the fly a custom secret,
489  * based on the default secret, altered using the `seed` value.
490  * While this operation is decently fast, note that it's not completely free.
491  * note : seed==0 produces same results as XXH3_64bits() */
492 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void* data, size_t len, XXH64_hash_t seed);
493 
494 
495 /* streaming 64-bit */
496 
497 #if defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11+ */
498 # include <stdalign.h>
499 # define XXH_ALIGN(n) alignas(n)
500 #elif defined(__GNUC__)
501 # define XXH_ALIGN(n) __attribute__ ((aligned(n)))
502 #elif defined(_MSC_VER)
503 # define XXH_ALIGN(n) __declspec(align(n))
504 #else
505 # define XXH_ALIGN(n) /* disabled */
506 #endif
507 
508 typedef struct XXH3_state_s XXH3_state_t;
509 
510 #define XXH3_SECRET_DEFAULT_SIZE 192 /* minimum XXH3_SECRET_SIZE_MIN */
511 #define XXH3_INTERNALBUFFER_SIZE 256
512 struct XXH3_state_s {
513  XXH_ALIGN(64) XXH64_hash_t acc[8];
514  XXH_ALIGN(64) unsigned char customSecret[XXH3_SECRET_DEFAULT_SIZE]; /* used to store a custom secret generated from the seed. Makes state larger. Design might change */
515  XXH_ALIGN(64) unsigned char buffer[XXH3_INTERNALBUFFER_SIZE];
516  XXH32_hash_t bufferedSize;
517  XXH32_hash_t nbStripesPerBlock;
518  XXH32_hash_t nbStripesSoFar;
519  XXH32_hash_t secretLimit;
520  XXH32_hash_t reserved32;
521  XXH32_hash_t reserved32_2;
522  XXH64_hash_t totalLen;
523  XXH64_hash_t seed;
524  XXH64_hash_t reserved64;
525  const unsigned char* secret; /* note : there is some padding after, due to alignment on 64 bytes */
526 }; /* typedef'd to XXH3_state_t */
527 
528 /* Streaming requires state maintenance.
529  * This operation costs memory and cpu.
530  * As a consequence, streaming is slower than one-shot hashing.
531  * For better performance, prefer using one-shot functions whenever possible. */
532 
533 XXH_PUBLIC_API XXH3_state_t* XXH3_createState(void);
534 XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t* statePtr);
535 XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t* dst_state, const XXH3_state_t* src_state);
536 
537 
538 /* XXH3_64bits_reset() :
539  * initialize with default parameters.
540  * result will be equivalent to `XXH3_64bits()`. */
541 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t* statePtr);
542 /* XXH3_64bits_reset_withSeed() :
543  * generate a custom secret from `seed`, and store it into state.
544  * digest will be equivalent to `XXH3_64bits_withSeed()`. */
546 /* XXH3_64bits_reset_withSecret() :
547  * `secret` is referenced, and must outlive the hash streaming session.
548  * secretSize must be >= XXH3_SECRET_SIZE_MIN.
549  */
550 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
551 
552 XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
553 XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest (const XXH3_state_t* statePtr);
554 
555 
556 /* 128-bit */
557 
558 #ifdef XXH_NAMESPACE
559 # define XXH128 XXH_NAME2(XXH_NAMESPACE, XXH128)
560 # define XXH3_128bits XXH_NAME2(XXH_NAMESPACE, XXH3_128bits)
561 # define XXH3_128bits_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSeed)
562 # define XXH3_128bits_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_withSecret)
563 
564 # define XXH3_128bits_reset XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset)
565 # define XXH3_128bits_reset_withSeed XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSeed)
566 # define XXH3_128bits_reset_withSecret XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_reset_withSecret)
567 # define XXH3_128bits_update XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_update)
568 # define XXH3_128bits_digest XXH_NAME2(XXH_NAMESPACE, XXH3_128bits_digest)
569 
570 # define XXH128_isEqual XXH_NAME2(XXH_NAMESPACE, XXH128_isEqual)
571 # define XXH128_cmp XXH_NAME2(XXH_NAMESPACE, XXH128_cmp)
572 # define XXH128_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH128_canonicalFromHash)
573 # define XXH128_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH128_hashFromCanonical)
574 #endif
575 
576 typedef struct {
577  XXH64_hash_t low64;
578  XXH64_hash_t high64;
579 } XXH128_hash_t;
580 
581 XXH_PUBLIC_API XXH128_hash_t XXH128(const void* data, size_t len, XXH64_hash_t seed);
582 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void* data, size_t len);
583 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void* data, size_t len, XXH64_hash_t seed); /* == XXH128() */
584 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void* data, size_t len, const void* secret, size_t secretSize);
585 
586 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t* statePtr);
588 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t* statePtr, const void* secret, size_t secretSize);
589 
590 XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update (XXH3_state_t* statePtr, const void* input, size_t length);
591 XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest (const XXH3_state_t* statePtr);
592 
593 
594 /* Note : for better performance, following functions can be inlined,
595  * using XXH_INLINE_ALL */
596 
597 /* return : 1 is equal, 0 if different */
598 XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2);
599 
600 /* This comparator is compatible with stdlib's qsort().
601  * return : >0 if *h128_1 > *h128_2
602  * <0 if *h128_1 < *h128_2
603  * =0 if *h128_1 == *h128_2 */
604 XXH_PUBLIC_API int XXH128_cmp(const void* h128_1, const void* h128_2);
605 
606 
607 /******* Canonical representation *******/
608 typedef struct { unsigned char digest[16]; } XXH128_canonical_t;
609 XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t* dst, XXH128_hash_t hash);
610 XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t* src);
611 
612 
613 #endif /* XXH_NO_LONG_LONG */
614 
615 #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
616 # define XXH_IMPLEMENTATION
617 #endif
618 
619 #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
620 
621 
622 /* ======================================================================== */
623 /* ======================================================================== */
624 /* ======================================================================== */
625 
626 
627 /*-**********************************************************************
628 * xxHash implementation
629 * -**********************************************************************
630 * Functions implementation used to be hosted within xxhash.c .
631 * However, code inlining requires to place implementation in the header file.
632 * As a consequence, xxhash.c used to be included within xxhash.h .
633 * But some build systems don't like *.c inclusions.
634 * So the implementation is now directly integrated within xxhash.h .
635 * Another small advantage is that xxhash.c is no longer required in /includes .
636 ************************************************************************/
637 
638 #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
639  || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
640 # define XXH_IMPLEM_13a8737387
641 
642 /* *************************************
643 * Tuning parameters
644 ***************************************/
658 #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
659 # if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6)
660 # define XXH_FORCE_MEMORY_ACCESS 2
661 # elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
662  (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7)))
663 # define XXH_FORCE_MEMORY_ACCESS 1
664 # endif
665 #endif
666 
672 #ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
673 # define XXH_ACCEPT_NULL_INPUT_POINTER 0
674 #endif
675 
683 #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
684 # if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
685 # define XXH_FORCE_ALIGN_CHECK 0
686 # else
687 # define XXH_FORCE_ALIGN_CHECK 1
688 # endif
689 #endif
690 
707 #ifndef XXH_NO_INLINE_HINTS
708 # if defined(__OPTIMIZE_SIZE__) /* -Os, -Oz */ \
709  || defined(__NO_INLINE__) /* -O0, -fno-inline */
710 # define XXH_NO_INLINE_HINTS 1
711 # else
712 # define XXH_NO_INLINE_HINTS 0
713 # endif
714 #endif
715 
721 #ifndef XXH_REROLL
722 # if defined(__OPTIMIZE_SIZE__)
723 # define XXH_REROLL 1
724 # else
725 # define XXH_REROLL 0
726 # endif
727 #endif
728 
729 
730 /* *************************************
731 * Includes & Memory related functions
732 ***************************************/
735 #include <stdlib.h>
736 static void* XXH_malloc(size_t s) { return malloc(s); }
737 static void XXH_free (void* p) { free(p); }
739 #include <string.h>
740 static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
741 
742 #include <limits.h> /* ULLONG_MAX */
743 
744 
745 /* *************************************
746 * Compiler Specific Options
747 ***************************************/
748 #ifdef _MSC_VER /* Visual Studio warning fix */
749 # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
750 #endif
751 
752 #if XXH_NO_INLINE_HINTS /* disable inlining hints */
753 # define XXH_FORCE_INLINE static
754 # define XXH_NO_INLINE static
755 #elif defined(_MSC_VER) /* Visual Studio */
756 # define XXH_FORCE_INLINE static __forceinline
757 # define XXH_NO_INLINE static __declspec(noinline)
758 #else
759 # if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
760 # ifdef __GNUC__
761 # define XXH_FORCE_INLINE static inline __attribute__((always_inline))
762 # define XXH_NO_INLINE static __attribute__((noinline))
763 # else
764 # define XXH_FORCE_INLINE static inline
765 # define XXH_NO_INLINE static
766 # endif
767 # else
768 # define XXH_FORCE_INLINE static
769 # define XXH_NO_INLINE static
770 # endif /* __STDC_VERSION__ */
771 #endif
772 
773 
774 
775 /* *************************************
776 * Debug
777 ***************************************/
778 /* DEBUGLEVEL is expected to be defined externally,
779  * typically through compiler command line.
780  * Value must be a number. */
781 #ifndef DEBUGLEVEL
782 # define DEBUGLEVEL 0
783 #endif
784 
785 #if (DEBUGLEVEL>=1)
786 # include <assert.h> /* note : can still be disabled with NDEBUG */
787 # define XXH_ASSERT(c) assert(c)
788 #else
789 # define XXH_ASSERT(c) ((void)0)
790 #endif
791 
792 /* note : use after variable declarations */
793 #define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; }
794 
795 
796 /* *************************************
797 * Basic Types
798 ***************************************/
799 #if !defined (__VMS) \
800  && (defined (__cplusplus) \
801  || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
802 # include <stdint.h>
803  typedef uint8_t xxh_u8;
804 #else
805  typedef unsigned char xxh_u8;
806 #endif
807 typedef XXH32_hash_t xxh_u32;
808 
809 
810 /* *** Memory access *** */
811 
812 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
813 
814 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
815 static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
816 
817 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
818 
819 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
820 /* currently only defined for gcc and icc */
821 typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
822 static xxh_u32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
823 
824 #else
825 
826 /* portable and safe solution. Generally efficient.
827  * see : http://stackoverflow.com/a/32095106/646947
828  */
829 static xxh_u32 XXH_read32(const void* memPtr)
830 {
831  xxh_u32 val;
832  memcpy(&val, memPtr, sizeof(val));
833  return val;
834 }
835 
836 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
837 
838 
839 /* *** Endianess *** */
840 typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
841 
842 /* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
843 #ifndef XXH_CPU_LITTLE_ENDIAN
844 # if defined(_WIN32) /* Windows is always little endian */ \
845  || defined(__LITTLE_ENDIAN__) \
846  || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
847 # define XXH_CPU_LITTLE_ENDIAN 1
848 # elif defined(__BIG_ENDIAN__) \
849  || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
850 # define XXH_CPU_LITTLE_ENDIAN 0
851 # else
852 static int XXH_isLittleEndian(void)
853 {
854  const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; /* don't use static : performance detrimental */
855  return one.c[0];
856 }
857 # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
858 # endif
859 #endif
860 
861 
862 
863 
864 /* ****************************************
865 * Compiler-specific Functions and Macros
866 ******************************************/
867 #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
868 
869 #ifndef __has_builtin
870 # define __has_builtin(x) 0
871 #endif
872 
873 #if !defined(NO_CLANG_BUILTIN) && __has_builtin(__builtin_rotateleft32) && __has_builtin(__builtin_rotateleft64)
874 # define XXH_rotl32 __builtin_rotateleft32
875 # define XXH_rotl64 __builtin_rotateleft64
876 /* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
877 #elif defined(_MSC_VER)
878 # define XXH_rotl32(x,r) _rotl(x,r)
879 # define XXH_rotl64(x,r) _rotl64(x,r)
880 #else
881 # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
882 # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
883 #endif
884 
885 #if defined(_MSC_VER) /* Visual Studio */
886 # define XXH_swap32 _byteswap_ulong
887 #elif XXH_GCC_VERSION >= 403
888 # define XXH_swap32 __builtin_bswap32
889 #else
890 static xxh_u32 XXH_swap32 (xxh_u32 x)
891 {
892  return ((x << 24) & 0xff000000 ) |
893  ((x << 8) & 0x00ff0000 ) |
894  ((x >> 8) & 0x0000ff00 ) |
895  ((x >> 24) & 0x000000ff );
896 }
897 #endif
898 
899 
900 /* ***************************
901 * Memory reads
902 *****************************/
903 typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
904 
905 XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
906 {
907  return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
908 }
909 
910 static xxh_u32 XXH_readBE32(const void* ptr)
911 {
912  return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
913 }
914 
915 XXH_FORCE_INLINE xxh_u32
916 XXH_readLE32_align(const void* ptr, XXH_alignment align)
917 {
918  if (align==XXH_unaligned) {
919  return XXH_readLE32(ptr);
920  } else {
921  return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
922  }
923 }
924 
925 
926 /* *************************************
927 * Misc
928 ***************************************/
929 XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
930 
931 
932 /* *******************************************************************
933 * 32-bit hash functions
934 *********************************************************************/
935 static const xxh_u32 PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */
936 static const xxh_u32 PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */
937 static const xxh_u32 PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */
938 static const xxh_u32 PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */
939 static const xxh_u32 PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */
940 
941 static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
942 {
943  acc += input * PRIME32_2;
944  acc = XXH_rotl32(acc, 13);
945  acc *= PRIME32_1;
946 #if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
947  /* UGLY HACK:
948  * This inline assembly hack forces acc into a normal register. This is the
949  * only thing that prevents GCC and Clang from autovectorizing the XXH32 loop
950  * (pragmas and attributes don't work for some resason) without globally
951  * disabling SSE4.1.
952  *
953  * The reason we want to avoid vectorization is because despite working on
954  * 4 integers at a time, there are multiple factors slowing XXH32 down on
955  * SSE4:
956  * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on newer chips!)
957  * making it slightly slower to multiply four integers at once compared to four
958  * integers independently. Even when pmulld was fastest, Sandy/Ivy Bridge, it is
959  * still not worth it to go into SSE just to multiply unless doing a long operation.
960  *
961  * - Four instructions are required to rotate,
962  * movqda tmp, v // not required with VEX encoding
963  * pslld tmp, 13 // tmp <<= 13
964  * psrld v, 19 // x >>= 19
965  * por v, tmp // x |= tmp
966  * compared to one for scalar:
967  * roll v, 13 // reliably fast across the board
968  * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
969  *
970  * - Instruction level parallelism is actually more beneficial here because the
971  * SIMD actually serializes this operation: While v1 is rotating, v2 can load data,
972  * while v3 can multiply. SSE forces them to operate together.
973  *
974  * How this hack works:
975  * __asm__("" // Declare an assembly block but don't declare any instructions
976  * : // However, as an Input/Output Operand,
977  * "+r" // constrain a read/write operand (+) as a general purpose register (r).
978  * (acc) // and set acc as the operand
979  * );
980  *
981  * Because of the 'r', the compiler has promised that seed will be in a
982  * general purpose register and the '+' says that it will be 'read/write',
983  * so it has to assume it has changed. It is like volatile without all the
984  * loads and stores.
985  *
986  * Since the argument has to be in a normal register (not an SSE register),
987  * each time XXH32_round is called, it is impossible to vectorize. */
988  __asm__("" : "+r" (acc));
989 #endif
990  return acc;
991 }
992 
993 /* mix all bits */
994 static xxh_u32 XXH32_avalanche(xxh_u32 h32)
995 {
996  h32 ^= h32 >> 15;
997  h32 *= PRIME32_2;
998  h32 ^= h32 >> 13;
999  h32 *= PRIME32_3;
1000  h32 ^= h32 >> 16;
1001  return(h32);
1002 }
1003 
1004 #define XXH_get32bits(p) XXH_readLE32_align(p, align)
1005 
1006 static xxh_u32
1007 XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
1008 {
1009 #define PROCESS1 \
1010  h32 += (*ptr++) * PRIME32_5; \
1011  h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
1012 
1013 #define PROCESS4 \
1014  h32 += XXH_get32bits(ptr) * PRIME32_3; \
1015  ptr+=4; \
1016  h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
1017 
1018  /* Compact rerolled version */
1019  if (XXH_REROLL) {
1020  len &= 15;
1021  while (len >= 4) {
1022  PROCESS4;
1023  len -= 4;
1024  }
1025  while (len > 0) {
1026  PROCESS1;
1027  --len;
1028  }
1029  return XXH32_avalanche(h32);
1030  } else {
1031  switch(len&15) /* or switch(bEnd - p) */ {
1032  case 12: PROCESS4;
1033  /* fallthrough */
1034  case 8: PROCESS4;
1035  /* fallthrough */
1036  case 4: PROCESS4;
1037  return XXH32_avalanche(h32);
1038 
1039  case 13: PROCESS4;
1040  /* fallthrough */
1041  case 9: PROCESS4;
1042  /* fallthrough */
1043  case 5: PROCESS4;
1044  PROCESS1;
1045  return XXH32_avalanche(h32);
1046 
1047  case 14: PROCESS4;
1048  /* fallthrough */
1049  case 10: PROCESS4;
1050  /* fallthrough */
1051  case 6: PROCESS4;
1052  PROCESS1;
1053  PROCESS1;
1054  return XXH32_avalanche(h32);
1055 
1056  case 15: PROCESS4;
1057  /* fallthrough */
1058  case 11: PROCESS4;
1059  /* fallthrough */
1060  case 7: PROCESS4;
1061  /* fallthrough */
1062  case 3: PROCESS1;
1063  /* fallthrough */
1064  case 2: PROCESS1;
1065  /* fallthrough */
1066  case 1: PROCESS1;
1067  /* fallthrough */
1068  case 0: return XXH32_avalanche(h32);
1069  }
1070  XXH_ASSERT(0);
1071  return h32; /* reaching this point is deemed impossible */
1072  }
1073 }
1074 
1075 XXH_FORCE_INLINE xxh_u32
1076 XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
1077 {
1078  const xxh_u8* bEnd = input + len;
1079  xxh_u32 h32;
1080 
1081 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
1082  if (input==NULL) {
1083  len=0;
1084  bEnd=input=(const xxh_u8*)(size_t)16;
1085  }
1086 #endif
1087 
1088  if (len>=16) {
1089  const xxh_u8* const limit = bEnd - 15;
1090  xxh_u32 v1 = seed + PRIME32_1 + PRIME32_2;
1091  xxh_u32 v2 = seed + PRIME32_2;
1092  xxh_u32 v3 = seed + 0;
1093  xxh_u32 v4 = seed - PRIME32_1;
1094 
1095  do {
1096  v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
1097  v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
1098  v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
1099  v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
1100  } while (input < limit);
1101 
1102  h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
1103  + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
1104  } else {
1105  h32 = seed + PRIME32_5;
1106  }
1107 
1108  h32 += (xxh_u32)len;
1109 
1110  return XXH32_finalize(h32, input, len&15, align);
1111 }
1112 
1113 
1114 XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
1115 {
1116 #if 0
1117  /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
1118  XXH32_state_t state;
1119  XXH32_reset(&state, seed);
1120  XXH32_update(&state, (const xxh_u8*)input, len);
1121  return XXH32_digest(&state);
1122 
1123 #else
1124 
1125  if (XXH_FORCE_ALIGN_CHECK) {
1126  if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
1127  return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
1128  } }
1129 
1130  return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
1131 #endif
1132 }
1133 
1134 
1135 
1136 /******* Hash streaming *******/
1137 
1139 {
1140  return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
1141 }
1143 {
1144  XXH_free(statePtr);
1145  return XXH_OK;
1146 }
1147 
1148 XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
1149 {
1150  memcpy(dstState, srcState, sizeof(*dstState));
1151 }
1152 
1153 XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
1154 {
1155  XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
1156  memset(&state, 0, sizeof(state));
1157  state.v1 = seed + PRIME32_1 + PRIME32_2;
1158  state.v2 = seed + PRIME32_2;
1159  state.v3 = seed + 0;
1160  state.v4 = seed - PRIME32_1;
1161  /* do not write into reserved, planned to be removed in a future version */
1162  memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
1163  return XXH_OK;
1164 }
1165 
1166 
1168 XXH32_update(XXH32_state_t* state, const void* input, size_t len)
1169 {
1170  if (input==NULL)
1171 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
1172  return XXH_OK;
1173 #else
1174  return XXH_ERROR;
1175 #endif
1176 
1177  { const xxh_u8* p = (const xxh_u8*)input;
1178  const xxh_u8* const bEnd = p + len;
1179 
1180  state->total_len_32 += (XXH32_hash_t)len;
1181  state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
1182 
1183  if (state->memsize + len < 16) { /* fill in tmp buffer */
1184  XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
1185  state->memsize += (XXH32_hash_t)len;
1186  return XXH_OK;
1187  }
1188 
1189  if (state->memsize) { /* some data left from previous update */
1190  XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
1191  { const xxh_u32* p32 = state->mem32;
1192  state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
1193  state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
1194  state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
1195  state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
1196  }
1197  p += 16-state->memsize;
1198  state->memsize = 0;
1199  }
1200 
1201  if (p <= bEnd-16) {
1202  const xxh_u8* const limit = bEnd - 16;
1203  xxh_u32 v1 = state->v1;
1204  xxh_u32 v2 = state->v2;
1205  xxh_u32 v3 = state->v3;
1206  xxh_u32 v4 = state->v4;
1207 
1208  do {
1209  v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
1210  v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4;
1211  v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4;
1212  v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4;
1213  } while (p<=limit);
1214 
1215  state->v1 = v1;
1216  state->v2 = v2;
1217  state->v3 = v3;
1218  state->v4 = v4;
1219  }
1220 
1221  if (p < bEnd) {
1222  XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
1223  state->memsize = (unsigned)(bEnd-p);
1224  }
1225  }
1226 
1227  return XXH_OK;
1228 }
1229 
1230 
1231 XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state)
1232 {
1233  xxh_u32 h32;
1234 
1235  if (state->large_len) {
1236  h32 = XXH_rotl32(state->v1, 1)
1237  + XXH_rotl32(state->v2, 7)
1238  + XXH_rotl32(state->v3, 12)
1239  + XXH_rotl32(state->v4, 18);
1240  } else {
1241  h32 = state->v3 /* == seed */ + PRIME32_5;
1242  }
1243 
1244  h32 += state->total_len_32;
1245 
1246  return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
1247 }
1248 
1249 
1250 /******* Canonical representation *******/
1251 
1258 XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
1259 {
1260  XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
1261  if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
1262  memcpy(dst, &hash, sizeof(*dst));
1263 }
1264 
1266 {
1267  return XXH_readBE32(src);
1268 }
1269 
1270 
1271 #ifndef XXH_NO_LONG_LONG
1272 
1273 /* *******************************************************************
1274 * 64-bit hash functions
1275 *********************************************************************/
1276 
1277 /******* Memory access *******/
1278 
1279 typedef XXH64_hash_t xxh_u64;
1280 
1281 
1295 #ifndef XXH_REROLL_XXH64
1296 # if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \
1297  || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \
1298  || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \
1299  || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \
1300  || defined(__mips64__) || defined(__mips64)) /* mips64 */ \
1301  || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */
1302 # define XXH_REROLL_XXH64 1
1303 # else
1304 # define XXH_REROLL_XXH64 0
1305 # endif
1306 #endif /* !defined(XXH_REROLL_XXH64) */
1307 
1308 #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
1309 
1310 /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
1311 static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; }
1312 
1313 #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
1314 
1315 /* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
1316 /* currently only defined for gcc and icc */
1317 typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
1318 static xxh_u64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
1319 
1320 #else
1321 
1322 /* portable and safe solution. Generally efficient.
1323  * see : http://stackoverflow.com/a/32095106/646947
1324  */
1325 
1326 static xxh_u64 XXH_read64(const void* memPtr)
1327 {
1328  xxh_u64 val;
1329  memcpy(&val, memPtr, sizeof(val));
1330  return val;
1331 }
1332 
1333 #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
1334 
1335 #if defined(_MSC_VER) /* Visual Studio */
1336 # define XXH_swap64 _byteswap_uint64
1337 #elif XXH_GCC_VERSION >= 403
1338 # define XXH_swap64 __builtin_bswap64
1339 #else
1340 static xxh_u64 XXH_swap64 (xxh_u64 x)
1341 {
1342  return ((x << 56) & 0xff00000000000000ULL) |
1343  ((x << 40) & 0x00ff000000000000ULL) |
1344  ((x << 24) & 0x0000ff0000000000ULL) |
1345  ((x << 8) & 0x000000ff00000000ULL) |
1346  ((x >> 8) & 0x00000000ff000000ULL) |
1347  ((x >> 24) & 0x0000000000ff0000ULL) |
1348  ((x >> 40) & 0x000000000000ff00ULL) |
1349  ((x >> 56) & 0x00000000000000ffULL);
1350 }
1351 #endif
1352 
1353 XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
1354 {
1355  return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
1356 }
1357 
1358 static xxh_u64 XXH_readBE64(const void* ptr)
1359 {
1360  return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
1361 }
1362 
1363 XXH_FORCE_INLINE xxh_u64
1364 XXH_readLE64_align(const void* ptr, XXH_alignment align)
1365 {
1366  if (align==XXH_unaligned)
1367  return XXH_readLE64(ptr);
1368  else
1369  return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
1370 }
1371 
1372 
1373 /******* xxh64 *******/
1374 
1375 static const xxh_u64 PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
1376 static const xxh_u64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
1377 static const xxh_u64 PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
1378 static const xxh_u64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
1379 static const xxh_u64 PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
1380 
1381 static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
1382 {
1383  acc += input * PRIME64_2;
1384  acc = XXH_rotl64(acc, 31);
1385  acc *= PRIME64_1;
1386  return acc;
1387 }
1388 
1389 static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
1390 {
1391  val = XXH64_round(0, val);
1392  acc ^= val;
1393  acc = acc * PRIME64_1 + PRIME64_4;
1394  return acc;
1395 }
1396 
1397 static xxh_u64 XXH64_avalanche(xxh_u64 h64)
1398 {
1399  h64 ^= h64 >> 33;
1400  h64 *= PRIME64_2;
1401  h64 ^= h64 >> 29;
1402  h64 *= PRIME64_3;
1403  h64 ^= h64 >> 32;
1404  return h64;
1405 }
1406 
1407 
1408 #define XXH_get64bits(p) XXH_readLE64_align(p, align)
1409 
1410 static xxh_u64
1411 XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
1412 {
1413 #define PROCESS1_64 \
1414  h64 ^= (*ptr++) * PRIME64_5; \
1415  h64 = XXH_rotl64(h64, 11) * PRIME64_1;
1416 
1417 #define PROCESS4_64 \
1418  h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * PRIME64_1; \
1419  ptr+=4; \
1420  h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
1421 
1422 #define PROCESS8_64 { \
1423  xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
1424  ptr+=8; \
1425  h64 ^= k1; \
1426  h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
1427 }
1428 
1429  /* Rerolled version for 32-bit targets is faster and much smaller. */
1430  if (XXH_REROLL || XXH_REROLL_XXH64) {
1431  len &= 31;
1432  while (len >= 8) {
1433  PROCESS8_64;
1434  len -= 8;
1435  }
1436  if (len >= 4) {
1437  PROCESS4_64;
1438  len -= 4;
1439  }
1440  while (len > 0) {
1441  PROCESS1_64;
1442  --len;
1443  }
1444  return XXH64_avalanche(h64);
1445  } else {
1446  switch(len & 31) {
1447  case 24: PROCESS8_64;
1448  /* fallthrough */
1449  case 16: PROCESS8_64;
1450  /* fallthrough */
1451  case 8: PROCESS8_64;
1452  return XXH64_avalanche(h64);
1453 
1454  case 28: PROCESS8_64;
1455  /* fallthrough */
1456  case 20: PROCESS8_64;
1457  /* fallthrough */
1458  case 12: PROCESS8_64;
1459  /* fallthrough */
1460  case 4: PROCESS4_64;
1461  return XXH64_avalanche(h64);
1462 
1463  case 25: PROCESS8_64;
1464  /* fallthrough */
1465  case 17: PROCESS8_64;
1466  /* fallthrough */
1467  case 9: PROCESS8_64;
1468  PROCESS1_64;
1469  return XXH64_avalanche(h64);
1470 
1471  case 29: PROCESS8_64;
1472  /* fallthrough */
1473  case 21: PROCESS8_64;
1474  /* fallthrough */
1475  case 13: PROCESS8_64;
1476  /* fallthrough */
1477  case 5: PROCESS4_64;
1478  PROCESS1_64;
1479  return XXH64_avalanche(h64);
1480 
1481  case 26: PROCESS8_64;
1482  /* fallthrough */
1483  case 18: PROCESS8_64;
1484  /* fallthrough */
1485  case 10: PROCESS8_64;
1486  PROCESS1_64;
1487  PROCESS1_64;
1488  return XXH64_avalanche(h64);
1489 
1490  case 30: PROCESS8_64;
1491  /* fallthrough */
1492  case 22: PROCESS8_64;
1493  /* fallthrough */
1494  case 14: PROCESS8_64;
1495  /* fallthrough */
1496  case 6: PROCESS4_64;
1497  PROCESS1_64;
1498  PROCESS1_64;
1499  return XXH64_avalanche(h64);
1500 
1501  case 27: PROCESS8_64;
1502  /* fallthrough */
1503  case 19: PROCESS8_64;
1504  /* fallthrough */
1505  case 11: PROCESS8_64;
1506  PROCESS1_64;
1507  PROCESS1_64;
1508  PROCESS1_64;
1509  return XXH64_avalanche(h64);
1510 
1511  case 31: PROCESS8_64;
1512  /* fallthrough */
1513  case 23: PROCESS8_64;
1514  /* fallthrough */
1515  case 15: PROCESS8_64;
1516  /* fallthrough */
1517  case 7: PROCESS4_64;
1518  /* fallthrough */
1519  case 3: PROCESS1_64;
1520  /* fallthrough */
1521  case 2: PROCESS1_64;
1522  /* fallthrough */
1523  case 1: PROCESS1_64;
1524  /* fallthrough */
1525  case 0: return XXH64_avalanche(h64);
1526  }
1527  }
1528  /* impossible to reach */
1529  XXH_ASSERT(0);
1530  return 0; /* unreachable, but some compilers complain without it */
1531 }
1532 
1533 XXH_FORCE_INLINE xxh_u64
1534 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
1535 {
1536  const xxh_u8* bEnd = input + len;
1537  xxh_u64 h64;
1538 
1539 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
1540  if (input==NULL) {
1541  len=0;
1542  bEnd=input=(const xxh_u8*)(size_t)32;
1543  }
1544 #endif
1545 
1546  if (len>=32) {
1547  const xxh_u8* const limit = bEnd - 32;
1548  xxh_u64 v1 = seed + PRIME64_1 + PRIME64_2;
1549  xxh_u64 v2 = seed + PRIME64_2;
1550  xxh_u64 v3 = seed + 0;
1551  xxh_u64 v4 = seed - PRIME64_1;
1552 
1553  do {
1554  v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
1555  v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
1556  v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
1557  v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
1558  } while (input<=limit);
1559 
1560  h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
1561  h64 = XXH64_mergeRound(h64, v1);
1562  h64 = XXH64_mergeRound(h64, v2);
1563  h64 = XXH64_mergeRound(h64, v3);
1564  h64 = XXH64_mergeRound(h64, v4);
1565 
1566  } else {
1567  h64 = seed + PRIME64_5;
1568  }
1569 
1570  h64 += (xxh_u64) len;
1571 
1572  return XXH64_finalize(h64, input, len, align);
1573 }
1574 
1575 
1576 XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
1577 {
1578 #if 0
1579  /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
1580  XXH64_state_t state;
1581  XXH64_reset(&state, seed);
1582  XXH64_update(&state, (const xxh_u8*)input, len);
1583  return XXH64_digest(&state);
1584 
1585 #else
1586 
1587  if (XXH_FORCE_ALIGN_CHECK) {
1588  if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
1589  return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
1590  } }
1591 
1592  return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
1593 
1594 #endif
1595 }
1596 
1597 /******* Hash Streaming *******/
1598 
1600 {
1601  return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
1602 }
1604 {
1605  XXH_free(statePtr);
1606  return XXH_OK;
1607 }
1608 
1609 XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
1610 {
1611  memcpy(dstState, srcState, sizeof(*dstState));
1612 }
1613 
1615 {
1616  XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
1617  memset(&state, 0, sizeof(state));
1618  state.v1 = seed + PRIME64_1 + PRIME64_2;
1619  state.v2 = seed + PRIME64_2;
1620  state.v3 = seed + 0;
1621  state.v4 = seed - PRIME64_1;
1622  /* do not write into reserved64, might be removed in a future version */
1623  memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
1624  return XXH_OK;
1625 }
1626 
1628 XXH64_update (XXH64_state_t* state, const void* input, size_t len)
1629 {
1630  if (input==NULL)
1631 #if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
1632  return XXH_OK;
1633 #else
1634  return XXH_ERROR;
1635 #endif
1636 
1637  { const xxh_u8* p = (const xxh_u8*)input;
1638  const xxh_u8* const bEnd = p + len;
1639 
1640  state->total_len += len;
1641 
1642  if (state->memsize + len < 32) { /* fill in tmp buffer */
1643  XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
1644  state->memsize += (xxh_u32)len;
1645  return XXH_OK;
1646  }
1647 
1648  if (state->memsize) { /* tmp buffer is full */
1649  XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
1650  state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
1651  state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
1652  state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
1653  state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3));
1654  p += 32-state->memsize;
1655  state->memsize = 0;
1656  }
1657 
1658  if (p+32 <= bEnd) {
1659  const xxh_u8* const limit = bEnd - 32;
1660  xxh_u64 v1 = state->v1;
1661  xxh_u64 v2 = state->v2;
1662  xxh_u64 v3 = state->v3;
1663  xxh_u64 v4 = state->v4;
1664 
1665  do {
1666  v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
1667  v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8;
1668  v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8;
1669  v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8;
1670  } while (p<=limit);
1671 
1672  state->v1 = v1;
1673  state->v2 = v2;
1674  state->v3 = v3;
1675  state->v4 = v4;
1676  }
1677 
1678  if (p < bEnd) {
1679  XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
1680  state->memsize = (unsigned)(bEnd-p);
1681  }
1682  }
1683 
1684  return XXH_OK;
1685 }
1686 
1687 
1689 {
1690  xxh_u64 h64;
1691 
1692  if (state->total_len >= 32) {
1693  xxh_u64 const v1 = state->v1;
1694  xxh_u64 const v2 = state->v2;
1695  xxh_u64 const v3 = state->v3;
1696  xxh_u64 const v4 = state->v4;
1697 
1698  h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
1699  h64 = XXH64_mergeRound(h64, v1);
1700  h64 = XXH64_mergeRound(h64, v2);
1701  h64 = XXH64_mergeRound(h64, v3);
1702  h64 = XXH64_mergeRound(h64, v4);
1703  } else {
1704  h64 = state->v3 /*seed*/ + PRIME64_5;
1705  }
1706 
1707  h64 += (xxh_u64) state->total_len;
1708 
1709  return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
1710 }
1711 
1712 
1713 /******* Canonical representation *******/
1714 
1716 {
1717  XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
1718  if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
1719  memcpy(dst, &hash, sizeof(*dst));
1720 }
1721 
1723 {
1724  return XXH_readBE64(src);
1725 }
1726 
1727 
1728 
1729 /* *********************************************************************
1730 * XXH3
1731 * New generation hash designed for speed on small keys and vectorization
1732 ************************************************************************ */
1733 
1734 #include "xxh3.h"
1735 
1736 
1737 #endif /* XXH_NO_LONG_LONG */
1738 
1739 
1740 #endif /* XXH_IMPLEMENTATION */
1741 
1742 
1743 #if defined (__cplusplus)
1744 }
1745 #endif
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state, const XXH32_state_t *src_state)
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_digest(const XXH3_state_t *state)
Definition: xxh3.h:1661
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset(XXH3_state_t *statePtr)
Definition: xxh3.h:2006
struct XXH64_state_s XXH64_state_t
Definition: xxhash.h:320
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst, XXH64_hash_t hash)
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset(XXH3_state_t *statePtr)
Definition: xxh3.h:1506
static void h2(float r, float r2, float ri, float rc, float r0, float rs, float &h)
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits(const void *input, size_t len)
Definition: xxh3.h:1951
XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr, const void *input, size_t length)
XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr)
XXH_PUBLIC_API unsigned XXH_versionNumber(void)
#define XXH_VERSION_NUMBER
Definition: xxhash.h:205
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSeed(XXH3_state_t *statePtr, XXH64_hash_t seed)
Definition: xxh3.h:1524
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst, XXH32_hash_t hash)
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t *src)
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSeed(const void *input, size_t len, XXH64_hash_t seed)
Definition: xxh3.h:1976
static void h1(float r, float r2, float ri, float rc, float r0, float rs, float &h)
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_reset_withSecret(XXH3_state_t *statePtr, const void *secret, size_t secretSize)
Definition: xxh3.h:1514
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSecret(const void *input, size_t len, const void *secret, size_t secretSize)
Definition: xxh3.h:1439
unsigned long long XXH64_hash_t
Definition: xxhash.h:309
XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input, size_t length, XXH64_hash_t seed)
XXH_ALIGN(64) static const xxh_u8 kSecret[XXH_SECRET_DEFAULT_SIZE]
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr, XXH64_hash_t seed)
XXH_errorcode
Definition: xxhash.h:213
XXH_PUBLIC_API XXH_errorcode XXH3_64bits_update(XXH3_state_t *state, const void *input, size_t len)
Definition: xxh3.h:1620
XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr)
XXH_PUBLIC_API XXH_errorcode XXH3_freeState(XXH3_state_t *statePtr)
Definition: xxh3.h:1470
XXH_PUBLIC_API int XXH128_cmp(const void *h128_1, const void *h128_2)
Definition: xxh3.h:2072
Definition: xxhash.h:213
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_digest(const XXH3_state_t *state)
Definition: xxh3.h:2039
XXH_PUBLIC_API XXH128_hash_t XXH128_hashFromCanonical(const XXH128_canonical_t *src)
Definition: xxh3.h:2097
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_update(XXH3_state_t *state, const void *input, size_t len)
Definition: xxh3.h:2034
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits_withSeed(const void *input, size_t len, XXH64_hash_t seed)
Definition: xxh3.h:1455
XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input, size_t length, XXH32_hash_t seed)
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSeed(XXH3_state_t *statePtr, XXH64_hash_t seed)
Definition: xxh3.h:2024
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state, const XXH64_state_t *src_state)
XXH_PUBLIC_API XXH3_state_t * XXH3_createState(void)
Definition: xxh3.h:1465
XXH_PUBLIC_API XXH64_state_t * XXH64_createState(void)
XXH_PUBLIC_API XXH64_hash_t XXH3_64bits(const void *input, size_t len)
Definition: xxh3.h:1430
XXH_PUBLIC_API int XXH128_isEqual(XXH128_hash_t h1, XXH128_hash_t h2)
Definition: xxh3.h:2062
XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr)
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr, XXH32_hash_t seed)
#define XXH_PUBLIC_API
Definition: xxhash.h:159
XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr)
XXH_PUBLIC_API XXH128_hash_t XXH3_128bits_withSecret(const void *input, size_t len, const void *secret, size_t secretSize)
Definition: xxh3.h:1960
XXH_PUBLIC_API XXH128_hash_t XXH128(const void *input, size_t len, XXH64_hash_t seed)
Definition: xxh3.h:1985
XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr, const void *input, size_t length)
struct XXH32_state_s XXH32_state_t
Definition: xxhash.h:267
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t *src)
XXH_PUBLIC_API void XXH3_copyState(XXH3_state_t *dst_state, const XXH3_state_t *src_state)
Definition: xxh3.h:1477
XXH_PUBLIC_API XXH32_state_t * XXH32_createState(void)
XXH_PUBLIC_API XXH_errorcode XXH3_128bits_reset_withSecret(XXH3_state_t *statePtr, const void *secret, size_t secretSize)
Definition: xxh3.h:2014
XXH_PUBLIC_API void XXH128_canonicalFromHash(XXH128_canonical_t *dst, XXH128_hash_t hash)
Definition: xxh3.h:2085