/* * xxHash - Extremely Fast Hash algorithm * Header File * Copyright (C) 2012-2021 Yann Collet * * BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following disclaimer * in the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You can contact the author at: * - xxHash homepage: https://www.xxhash.com * - xxHash source repository: https://github.com/Cyan4973/xxHash */ /*! * @mainpage xxHash * * xxHash is an extremely fast non-cryptographic hash algorithm, working at RAM speed * limits. * * It is proposed in four flavors, in three families: * 1. @ref XXH32_family * - Classic 32-bit hash function. Simple, compact, and runs on almost all * 32-bit and 64-bit systems. * 2. @ref XXH64_family * - Classic 64-bit adaptation of XXH32. Just as simple, and runs well on most * 64-bit systems (but _not_ 32-bit systems). * 3. @ref XXH3_family * - Modern 64-bit and 128-bit hash function family which features improved * strength and performance across the board, especially on smaller data. * It benefits greatly from SIMD and 64-bit without requiring it. * * Benchmarks * --- * The reference system uses an Intel i7-9700K CPU, and runs Ubuntu x64 20.04. * The open source benchmark program is compiled with clang v10.0 using -O3 flag. * * | Hash Name | ISA ext | Width | Large Data Speed | Small Data Velocity | * | -------------------- | ------- | ----: | ---------------: | ------------------: | * | XXH3_64bits() | @b AVX2 | 64 | 59.4 GB/s | 133.1 | * | MeowHash | AES-NI | 128 | 58.2 GB/s | 52.5 | * | XXH3_128bits() | @b AVX2 | 128 | 57.9 GB/s | 118.1 | * | CLHash | PCLMUL | 64 | 37.1 GB/s | 58.1 | * | XXH3_64bits() | @b SSE2 | 64 | 31.5 GB/s | 133.1 | * | XXH3_128bits() | @b SSE2 | 128 | 29.6 GB/s | 118.1 | * | RAM sequential read | | N/A | 28.0 GB/s | N/A | * | ahash | AES-NI | 64 | 22.5 GB/s | 107.2 | * | City64 | | 64 | 22.0 GB/s | 76.6 | * | T1ha2 | | 64 | 22.0 GB/s | 99.0 | * | City128 | | 128 | 21.7 GB/s | 57.7 | * | FarmHash | AES-NI | 64 | 21.3 GB/s | 71.9 | * | XXH64() | | 64 | 19.4 GB/s | 71.0 | * | SpookyHash | | 64 | 19.3 GB/s | 53.2 | * | Mum | | 64 | 18.0 GB/s | 67.0 | * | CRC32C | SSE4.2 | 32 | 13.0 GB/s | 57.9 | * | XXH32() | | 32 | 9.7 GB/s | 71.9 | * | City32 | | 32 | 9.1 GB/s | 66.0 | * | Blake3* | @b AVX2 | 256 | 4.4 GB/s | 8.1 | * | Murmur3 | | 32 | 3.9 GB/s | 56.1 | * | SipHash* | | 64 | 3.0 GB/s | 43.2 | * | Blake3* | @b SSE2 | 256 | 2.4 GB/s | 8.1 | * | HighwayHash | | 64 | 1.4 GB/s | 6.0 | * | FNV64 | | 64 | 1.2 GB/s | 62.7 | * | Blake2* | | 256 | 1.1 GB/s | 5.1 | * | SHA1* | | 160 | 0.8 GB/s | 5.6 | * | MD5* | | 128 | 0.6 GB/s | 7.8 | * @note * - Hashes which require a specific ISA extension are noted. SSE2 is also noted, * even though it is mandatory on x64. * - Hashes with an asterisk are cryptographic. Note that MD5 is non-cryptographic * by modern standards. * - Small data velocity is a rough average of algorithm's efficiency for small * data. For more accurate information, see the wiki. * - More benchmarks and strength tests are found on the wiki: * https://github.com/Cyan4973/xxHash/wiki * * Usage * ------ * All xxHash variants use a similar API. Changing the algorithm is a trivial * substitution. * * @pre * For functions which take an input and length parameter, the following * requirements are assumed: * - The range from [`input`, `input + length`) is valid, readable memory. * - The only exception is if the `length` is `0`, `input` may be `NULL`. * - For C++, the objects must have the *TriviallyCopyable* property, as the * functions access bytes directly as if it was an array of `unsigned char`. * * @anchor single_shot_example * **Single Shot** * * These functions are stateless functions which hash a contiguous block of memory, * immediately returning the result. They are the easiest and usually the fastest * option. * * XXH32(), XXH64(), XXH3_64bits(), XXH3_128bits() * * @code{.c} * #include * #include "xxhash.h" * * // Example for a function which hashes a null terminated string with XXH32(). * XXH32_hash_t hash_string(const char* string, XXH32_hash_t seed) * { * // NULL pointers are only valid if the length is zero * size_t length = (string == NULL) ? 0 : strlen(string); * return XXH32(string, length, seed); * } * @endcode * * @anchor streaming_example * **Streaming** * * These groups of functions allow incremental hashing of unknown size, even * more than what would fit in a size_t. * * XXH32_reset(), XXH64_reset(), XXH3_64bits_reset(), XXH3_128bits_reset() * * @code{.c} * #include * #include * #include "xxhash.h" * // Example for a function which hashes a FILE incrementally with XXH3_64bits(). * XXH64_hash_t hashFile(FILE* f) * { * // Allocate a state struct. Do not just use malloc() or new. * XXH3_state_t* state = XXH3_createState(); * assert(state != NULL && "Out of memory!"); * // Reset the state to start a new hashing session. * XXH3_64bits_reset(state); * char buffer[4096]; * size_t count; * // Read the file in chunks * while ((count = fread(buffer, 1, sizeof(buffer), f)) != 0) { * // Run update() as many times as necessary to process the data * XXH3_64bits_update(state, buffer, count); * } * // Retrieve the finalized hash. This will not change the state. * XXH64_hash_t result = XXH3_64bits_digest(state); * // Free the state. Do not use free(). * XXH3_freeState(state); * return result; * } * @endcode * * @file xxhash.h * xxHash prototypes and implementation */ #if defined (__cplusplus) extern "C" { #endif /* **************************** * INLINE mode ******************************/ /*! * @defgroup public Public API * Contains details on the public xxHash functions. * @{ */ #if (defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)) \ && !defined(XXH_INLINE_ALL_31684351384) /* this section should be traversed only once */ # define XXH_INLINE_ALL_31684351384 /* give access to the advanced API, required to compile implementations */ # undef XXH_STATIC_LINKING_ONLY /* avoid macro redef */ # define XXH_STATIC_LINKING_ONLY /* make all functions private */ # undef XXH_PUBLIC_API # if defined(__GNUC__) # define XXH_PUBLIC_API static __inline __attribute__((unused)) # elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) # define XXH_PUBLIC_API static inline # elif defined(_MSC_VER) # define XXH_PUBLIC_API static __inline # else /* note: this version may generate warnings for unused static functions */ # define XXH_PUBLIC_API static # endif /* * This part deals with the special case where a unit wants to inline xxHash, * but "xxhash.h" has previously been included without XXH_INLINE_ALL, * such as part of some previously included *.h header file. * Without further action, the new include would just be ignored, * and functions would effectively _not_ be inlined (silent failure). * The following macros solve this situation by prefixing all inlined names, * avoiding naming collision with previous inclusions. */ /* Before that, we unconditionally #undef all symbols, * in case they were already defined with XXH_NAMESPACE. * They will then be redefined for XXH_INLINE_ALL */ # undef XXH_versionNumber /* XXH64 */ # undef XXH64 # undef XXH64_createState # undef XXH64_freeState # undef XXH64_reset # undef XXH64_update # undef XXH64_digest # undef XXH64_copyState # undef XXH64_canonicalFromHash # undef XXH64_hashFromCanonical /* Finally, free the namespace itself */ # undef XXH_NAMESPACE /* employ the namespace for XXH_INLINE_ALL */ # define XXH_NAMESPACE XXH_INLINE_ /* * Some identifiers (enums, type names) are not symbols, * but they must nonetheless be renamed to avoid redeclaration. * Alternative solution: do not redeclare them. * However, this requires some #ifdefs, and has a more dispersed impact. * Meanwhile, renaming can be achieved in a single place. */ # define XXH_IPREF(Id) XXH_NAMESPACE ## Id # define XXH_OK XXH_IPREF(XXH_OK) # define XXH_ERROR XXH_IPREF(XXH_ERROR) # define XXH_errorcode XXH_IPREF(XXH_errorcode) # define XXH64_canonical_t XXH_IPREF(XXH64_canonical_t) # define XXH64_state_s XXH_IPREF(XXH64_state_s) # define XXH64_state_t XXH_IPREF(XXH64_state_t) /* Ensure the header is parsed again, even if it was previously included */ # undef XXHASH_H_5627135585666179 # undef XXHASH_H_STATIC_13879238742 #endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */ /* **************************************************************** * Stable API *****************************************************************/ #ifndef XXHASH_H_5627135585666179 #define XXHASH_H_5627135585666179 1 /*! @brief Marks a global symbol. */ #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) # ifdef XXH_EXPORT # define XXH_PUBLIC_API __declspec(dllexport) # elif XXH_IMPORT # define XXH_PUBLIC_API __declspec(dllimport) # endif # else # define XXH_PUBLIC_API /* do nothing */ # endif #endif #ifdef XXH_NAMESPACE # define XXH_CAT(A,B) A##B # define XXH_NAME2(A,B) XXH_CAT(A,B) # define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber) /* XXH64 */ # define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64) # define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState) # define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState) # define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset) # define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update) # define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest) # define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState) # define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash) # define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical) #endif /* ************************************* * Compiler specifics ***************************************/ /* specific declaration modes for Windows */ #if !defined(XXH_INLINE_ALL) && !defined(XXH_PRIVATE_API) # if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT)) # ifdef XXH_EXPORT # define XXH_PUBLIC_API __declspec(dllexport) # elif XXH_IMPORT # define XXH_PUBLIC_API __declspec(dllimport) # endif # else # define XXH_PUBLIC_API /* do nothing */ # endif #endif #if defined (__GNUC__) # define XXH_CONSTF __attribute__((const)) # define XXH_PUREF __attribute__((pure)) # define XXH_MALLOCF __attribute__((malloc)) #else # define XXH_CONSTF /* disable */ # define XXH_PUREF # define XXH_MALLOCF #endif /* ************************************* * Version ***************************************/ #define XXH_VERSION_MAJOR 0 #define XXH_VERSION_MINOR 8 #define XXH_VERSION_RELEASE 1 /*! @brief Version number, encoded as two digits each */ #define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) /*! * @brief Obtains the xxHash version. * * This is mostly useful when xxHash is compiled as a shared library, * since the returned value comes from the library, as opposed to header file. * * @return @ref XXH_VERSION_NUMBER of the invoked library. */ XXH_PUBLIC_API XXH_CONSTF unsigned XXH_versionNumber (void); /* **************************** * Common basic types ******************************/ #include /* size_t */ /*! * @brief Exit code for the streaming API. */ typedef enum { XXH_OK = 0, /*!< OK */ XXH_ERROR /*!< Error */ } XXH_errorcode; /*-********************************************************************** * 32-bit hash ************************************************************************/ #if !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint32_t XXH32_hash_t; #else # include # if UINT_MAX == 0xFFFFFFFFUL typedef unsigned int XXH32_hash_t; # elif ULONG_MAX == 0xFFFFFFFFUL typedef unsigned long XXH32_hash_t; # else # error "unsupported platform: need a 32-bit type" # endif #endif /*! * @} * * @defgroup XXH32_family XXH32 family * @ingroup public * Contains functions used in the classic 32-bit xxHash algorithm. * * @note * XXH32 is useful for older platforms, with no or poor 64-bit performance. * Note that the @ref XXH3_family provides competitive speed for both 32-bit * and 64-bit systems, and offers true 64/128 bit hash results. * * @see @ref XXH64_family, @ref XXH3_family : Other xxHash families * @see @ref XXH32_impl for implementation details * @{ */ /*! * @brief Calculates the 32-bit hash of @p input using xxHash32. * * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark): 5.4 GB/s * * See @ref single_shot_example "Single Shot Example" for an example. * * @param input The block of data to be hashed, at least @p length bytes in size. * @param length The length of @p input, in bytes. * @param seed The 32-bit seed to alter the hash's output predictably. * * @pre * The memory between @p input and @p input + @p length must be valid, * readable, contiguous memory. However, if @p length is `0`, @p input may be * `NULL`. In C++, this also must be *TriviallyCopyable*. * * @return The calculated 32-bit hash value. * * @see * XXH64(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): * Direct equivalents for the other variants of xxHash. * @see * XXH32_createState(), XXH32_update(), XXH32_digest(): Streaming version. */ XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed); /******* Canonical representation *******/ /* * The default return values from XXH functions are unsigned 32 and 64 bit * integers. * This the simplest and fastest format for further post-processing. * * However, this leaves open the question of what is the order on the byte level, * since little and big endian conventions will store the same number differently. * * The canonical representation settles this issue by mandating big-endian * convention, the same convention as human-readable numbers (large digits first). * * When writing hash values to storage, sending them over a network, or printing * them, it's highly recommended to use the canonical representation to ensure * portability across a wider range of systems, present and future. * * The following functions allow transformation of hash values to and from * canonical format. */ /*! * @brief Canonical (big endian) representation of @ref XXH32_hash_t. */ typedef struct { unsigned char digest[4]; /*!< Hash bytes, big endian */ } XXH32_canonical_t; /*! * @brief Converts an @ref XXH32_hash_t to a big endian @ref XXH32_canonical_t. * * @param dst The @ref XXH32_canonical_t pointer to be stored to. * @param hash The @ref XXH32_hash_t to be converted. * * @pre * @p dst must not be `NULL`. */ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); /*! * @brief Converts an @ref XXH32_canonical_t to a native @ref XXH32_hash_t. * * @param src The @ref XXH32_canonical_t to convert. * * @pre * @p src must not be `NULL`. * * @return The converted hash. */ XXH_PUBLIC_API XXH_PUREF XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); #ifdef __has_attribute # define XXH_HAS_ATTRIBUTE(x) __has_attribute(x) #else # define XXH_HAS_ATTRIBUTE(x) 0 #endif /* C-language Attributes are added in C23. */ #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute) # define XXH_HAS_C_ATTRIBUTE(x) __has_c_attribute(x) #else # define XXH_HAS_C_ATTRIBUTE(x) 0 #endif #if defined(__cplusplus) && defined(__has_cpp_attribute) # define XXH_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) #else # define XXH_HAS_CPP_ATTRIBUTE(x) 0 #endif /* * Define XXH_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute * introduced in CPP17 and C23. * CPP17 : https://en.cppreference.com/w/cpp/language/attributes/fallthrough * C23 : https://en.cppreference.com/w/c/language/attributes/fallthrough */ #if XXH_HAS_C_ATTRIBUTE(fallthrough) || XXH_HAS_CPP_ATTRIBUTE(fallthrough) # define XXH_FALLTHROUGH [[fallthrough]] #elif XXH_HAS_ATTRIBUTE(__fallthrough__) # define XXH_FALLTHROUGH __attribute__ ((__fallthrough__)) #else # define XXH_FALLTHROUGH /* fallthrough */ #endif /* * Define XXH_NOESCAPE for annotated pointers in public API. * https://clang.llvm.org/docs/AttributeReference.html#noescape * As of writing this, only supported by clang. */ #if XXH_HAS_ATTRIBUTE(noescape) # define XXH_NOESCAPE __attribute__((noescape)) #else # define XXH_NOESCAPE #endif /*! * @} * @ingroup public * @{ */ #ifndef XXH_NO_LONG_LONG /*-********************************************************************** * 64-bit hash ************************************************************************/ #if !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint64_t XXH64_hash_t; #else # include # if defined(__LP64__) && ULONG_MAX == 0xFFFFFFFFFFFFFFFFULL /* LP64 ABI says uint64_t is unsigned long */ typedef unsigned long XXH64_hash_t; # else /* the following type must have a width of 64-bit */ typedef unsigned long long XXH64_hash_t; # endif #endif /*! * @} * * @defgroup XXH64_family XXH64 family * @ingroup public * @{ * Contains functions used in the classic 64-bit xxHash algorithm. * * @note * XXH3 provides competitive speed for both 32-bit and 64-bit systems, * and offers true 64/128 bit hash results. * It provides better speed for systems with vector processing capabilities. */ /*! * @brief Calculates the 64-bit hash of @p input using xxHash64. * * This function usually runs faster on 64-bit systems, but slower on 32-bit * systems (see benchmark). * * @param input The block of data to be hashed, at least @p length bytes in size. * @param length The length of @p input, in bytes. * @param seed The 64-bit seed to alter the hash's output predictably. * * @pre * The memory between @p input and @p input + @p length must be valid, * readable, contiguous memory. However, if @p length is `0`, @p input may be * `NULL`. In C++, this also must be *TriviallyCopyable*. * * @return The calculated 64-bit hash. * * @see * XXH32(), XXH3_64bits_withSeed(), XXH3_128bits_withSeed(), XXH128(): * Direct equivalents for the other variants of xxHash. * @see * XXH64_createState(), XXH64_update(), XXH64_digest(): Streaming version. */ XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64(XXH_NOESCAPE const void* input, size_t length, XXH64_hash_t seed); /******* Canonical representation *******/ typedef struct { unsigned char digest[sizeof(XXH64_hash_t)]; } XXH64_canonical_t; XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash); XXH_PUBLIC_API XXH_PUREF XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src); #endif /* XXH_NO_LONG_LONG */ /*! * @} */ #endif /* XXHASH_H_5627135585666179 */ #if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) #define XXHASH_H_STATIC_13879238742 /* **************************************************************************** * This section contains declarations which are not guaranteed to remain stable. * They may change in future versions, becoming incompatible with a different * version of the library. * These declarations should only be used with static linking. * Never use them in association with dynamic linking! ***************************************************************************** */ /* * These definitions are only present to allow static allocation * of XXH states, on stack or in a struct, for example. * Never **ever** access their members directly. */ /*! * @internal * @brief Structure for XXH32 streaming API. * * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is * an opaque type. This allows fields to safely be changed. * * Typedef'd to @ref XXH32_state_t. * Do not access the members of this struct directly. * @see XXH64_state_s, XXH3_state_s */ struct XXH32_state_s { XXH32_hash_t total_len_32; /*!< Total length hashed, modulo 2^32 */ XXH32_hash_t large_len; /*!< Whether the hash is >= 16 (handles @ref total_len_32 overflow) */ XXH32_hash_t v[4]; /*!< Accumulator lanes */ XXH32_hash_t mem32[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[16]. */ XXH32_hash_t memsize; /*!< Amount of data in @ref mem32 */ XXH32_hash_t reserved; /*!< Reserved field. Do not read nor write to it. */ }; /* typedef'd to XXH32_state_t */ #ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */ /*! * @internal * @brief Structure for XXH64 streaming API. * * @note This is only defined when @ref XXH_STATIC_LINKING_ONLY, * @ref XXH_INLINE_ALL, or @ref XXH_IMPLEMENTATION is defined. Otherwise it is * an opaque type. This allows fields to safely be changed. * * Typedef'd to @ref XXH64_state_t. * Do not access the members of this struct directly. * @see XXH32_state_s, XXH3_state_s */ struct XXH64_state_s { XXH64_hash_t total_len; /*!< Total length hashed. This is always 64-bit. */ XXH64_hash_t v[4]; /*!< Accumulator lanes */ XXH64_hash_t mem64[4]; /*!< Internal buffer for partial reads. Treated as unsigned char[32]. */ XXH32_hash_t memsize; /*!< Amount of data in @ref mem64 */ XXH32_hash_t reserved32; /*!< Reserved field, needed for padding anyways*/ XXH64_hash_t reserved64; /*!< Reserved field. Do not read or write to it. */ }; /* typedef'd to XXH64_state_t */ #endif /* XXH_NO_LONG_LONG */ #if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) # define XXH_IMPLEMENTATION #endif #endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */ /* ======================================================================== */ /*-********************************************************************** * xxHash implementation *-********************************************************************** * xxHash's implementation used to be hosted inside xxhash.c. * * However, inlining requires implementation to be visible to the compiler, * hence be included alongside the header. * Previously, implementation was hosted inside xxhash.c, * which was then #included when inlining was activated. * This construction created issues with a few build and install systems, * as it required xxhash.c to be stored in /include directory. * * xxHash implementation is now directly integrated within xxhash.h. * As a consequence, xxhash.c is no longer needed in /include. * * xxhash.c is still available and is still useful. * In a "normal" setup, when xxhash is not inlined, * xxhash.h only exposes the prototypes and public symbols, * while xxhash.c can be built into an object file xxhash.o * which can then be linked into the final binary. ************************************************************************/ #if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \ || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387) # define XXH_IMPLEM_13a8737387 /* ************************************* * Tuning parameters ***************************************/ #ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */ /* prefer __packed__ structures (method 1) for GCC * < ARMv7 with unaligned access (e.g. Raspbian armhf) still uses byte shifting, so we use memcpy * which for some reason does unaligned loads. */ # if defined(__GNUC__) && !(defined(__ARM_ARCH) && __ARM_ARCH < 7 && defined(__ARM_FEATURE_UNALIGNED)) # define XXH_FORCE_MEMORY_ACCESS 1 # endif #endif #ifndef XXH_SIZE_OPT /* default to 1 for -Os or -Oz */ # if (defined(__GNUC__) || defined(__clang__)) && defined(__OPTIMIZE_SIZE__) # define XXH_SIZE_OPT 1 # else # define XXH_SIZE_OPT 0 # endif #endif #ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */ /* don't check on sizeopt, x86, aarch64, or arm when unaligned access is available */ # if XXH_SIZE_OPT >= 1 || \ defined(__i386) || defined(__x86_64__) || defined(__aarch64__) || defined(__ARM_FEATURE_UNALIGNED) \ || defined(_M_IX86) || defined(_M_X64) || defined(_M_ARM64) || defined(_M_ARM) /* visual */ # define XXH_FORCE_ALIGN_CHECK 0 # else # define XXH_FORCE_ALIGN_CHECK 1 # endif #endif #ifndef XXH_NO_INLINE_HINTS # if XXH_SIZE_OPT >= 1 || defined(__NO_INLINE__) /* -O0, -fno-inline */ # define XXH_NO_INLINE_HINTS 1 # else # define XXH_NO_INLINE_HINTS 0 # endif #endif #ifndef XXH32_ENDJMP /* generally preferable for performance */ # define XXH32_ENDJMP 0 #endif #include /*! * @internal * @brief Modify this function to use a different routine than memcpy(). */ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } #include /* ULLONG_MAX */ /* ************************************* * Compiler Specific Options ***************************************/ #ifdef _MSC_VER /* Visual Studio warning fix */ # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ #endif #if XXH_NO_INLINE_HINTS /* disable inlining hints */ # if defined(__GNUC__) || defined(__clang__) # define XXH_FORCE_INLINE static __attribute__((unused)) # else # define XXH_FORCE_INLINE static # endif # define XXH_NO_INLINE static /* enable inlining hints */ #elif defined(__GNUC__) || defined(__clang__) # define XXH_FORCE_INLINE static __inline__ __attribute__((always_inline, unused)) # define XXH_NO_INLINE static __attribute__((noinline)) #elif defined(_MSC_VER) /* Visual Studio */ # define XXH_FORCE_INLINE static __forceinline # define XXH_NO_INLINE static __declspec(noinline) #elif defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)) /* C99 */ # define XXH_FORCE_INLINE static inline # define XXH_NO_INLINE static #else # define XXH_FORCE_INLINE static # define XXH_NO_INLINE static #endif /* ************************************* * Debug ***************************************/ /*! * @ingroup tuning * @def XXH_DEBUGLEVEL * @brief Sets the debugging level. * * XXH_DEBUGLEVEL is expected to be defined externally, typically via the * compiler's command line options. The value must be a number. */ #ifndef XXH_DEBUGLEVEL # ifdef DEBUGLEVEL /* backwards compat */ # define XXH_DEBUGLEVEL DEBUGLEVEL # else # define XXH_DEBUGLEVEL 0 # endif #endif #if (XXH_DEBUGLEVEL>=1) # include /* note: can still be disabled with NDEBUG */ # define XXH_ASSERT(c) assert(c) #else # define XXH_ASSERT(c) XXH_ASSUME(c) #endif /* note: use after variable declarations */ #ifndef XXH_STATIC_ASSERT # if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) /* C11 */ # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { _Static_assert((c),m); } while(0) # elif defined(__cplusplus) && (__cplusplus >= 201103L) /* C++11 */ # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { static_assert((c),m); } while(0) # else # define XXH_STATIC_ASSERT_WITH_MESSAGE(c,m) do { struct xxh_sa { char x[(c) ? 1 : -1]; }; } while(0) # endif # define XXH_STATIC_ASSERT(c) XXH_STATIC_ASSERT_WITH_MESSAGE((c),#c) #endif /*! * @internal * @def XXH_COMPILER_GUARD(var) * @brief Used to prevent unwanted optimizations for @p var. * * It uses an empty GCC inline assembly statement with a register constraint * which forces @p var into a general purpose register (eg eax, ebx, ecx * on x86) and marks it as modified. * * This is used in a few places to avoid unwanted autovectorization (e.g. * XXH32_round()). All vectorization we want is explicit via intrinsics, * and _usually_ isn't wanted elsewhere. * * We also use it to prevent unwanted constant folding for AArch64 in * XXH3_initCustomSecret_scalar(). */ #if defined(__GNUC__) || defined(__clang__) # define XXH_COMPILER_GUARD(var) __asm__ __volatile__("" : "+r" (var)) #else # define XXH_COMPILER_GUARD(var) ((void)0) #endif #if defined(__clang__) # define XXH_COMPILER_GUARD_W(var) __asm__ __volatile__("" : "+w" (var)) #else # define XXH_COMPILER_GUARD_W(var) ((void)0) #endif /* ************************************* * Basic Types ***************************************/ #if !defined (__VMS) \ && (defined (__cplusplus) \ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) ) # include typedef uint8_t xxh_u8; #else typedef unsigned char xxh_u8; #endif typedef XXH32_hash_t xxh_u32; #ifdef XXH_OLD_NAMES # define BYTE xxh_u8 # define U8 xxh_u8 # define U32 xxh_u32 #endif /* *** Memory access *** */ #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) /* * Manual byteshift. Best for old compilers which don't inline memcpy. * We actually directly use XXH_readLE32 and XXH_readBE32. */ #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* * Force direct memory access. Only works on CPU which support unaligned memory * access in hardware. */ static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* * __attribute__((aligned(1))) is supported by gcc and clang. Originally the * documentation claimed that it only increased the alignment, but actually it * can decrease it on gcc, clang, and icc: * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, * https://gcc.godbolt.org/z/xYez1j67Y. */ #ifdef XXH_OLD_NAMES typedef union { xxh_u32 u32; } __attribute__((packed)) unalign; #endif static xxh_u32 XXH_read32(const void* ptr) { typedef __attribute__((aligned(1))) xxh_u32 xxh_unalign32; return *((const xxh_unalign32*)ptr); } #else /* * Portable and safe solution. Generally efficient. * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u32 XXH_read32(const void* memPtr) { xxh_u32 val; XXH_memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ /* *** Endianness *** */ /*! * @ingroup tuning * @def XXH_CPU_LITTLE_ENDIAN * @brief Whether the target is little endian. * * Defined to 1 if the target is little endian, or 0 if it is big endian. * It can be defined externally, for example on the compiler command line. * * If it is not defined, * a runtime check (which is usually constant folded) is used instead. * * @note * This is not necessarily defined to an integer constant. * * @see XXH_isLittleEndian() for the runtime check. */ #ifndef XXH_CPU_LITTLE_ENDIAN /* * Try to detect endianness automatically, to avoid the nonstandard behavior * in `XXH_isLittleEndian()` */ # if defined(_WIN32) /* Windows is always little endian */ \ || defined(__LITTLE_ENDIAN__) \ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) # define XXH_CPU_LITTLE_ENDIAN 1 # elif defined(__BIG_ENDIAN__) \ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) # define XXH_CPU_LITTLE_ENDIAN 0 # else /*! * @internal * @brief Runtime check for @ref XXH_CPU_LITTLE_ENDIAN. * * Most compilers will constant fold this. */ static int XXH_isLittleEndian(void) { /* * Portable and well-defined behavior. * Don't use static: it is detrimental to performance. */ const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; return one.c[0]; } # define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian() # endif #endif /* **************************************** * Compiler-specific Functions and Macros ******************************************/ #define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) #ifdef __has_builtin # define XXH_HAS_BUILTIN(x) __has_builtin(x) #else # define XXH_HAS_BUILTIN(x) 0 #endif #if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) /* C23 and future versions have standard "unreachable()" */ # include # define XXH_UNREACHABLE() unreachable() #elif defined(__cplusplus) && (__cplusplus > 202002L) /* C++23 and future versions have std::unreachable() */ # include /* std::unreachable() */ # define XXH_UNREACHABLE() std::unreachable() #elif XXH_HAS_BUILTIN(__builtin_unreachable) # define XXH_UNREACHABLE() __builtin_unreachable() #elif defined(_MSC_VER) # define XXH_UNREACHABLE() __assume(0) #else # define XXH_UNREACHABLE() #endif #define XXH_ASSUME(c) if (!(c)) { XXH_UNREACHABLE(); } /*! * @internal * @def XXH_rotl32(x,r) * @brief 32-bit rotate left. * * @param x The 32-bit integer to be rotated. * @param r The number of bits to rotate. * @pre * @p r > 0 && @p r < 32 * @note * @p x and @p r may be evaluated multiple times. * @return The rotated result. */ #if !defined(NO_CLANG_BUILTIN) && XXH_HAS_BUILTIN(__builtin_rotateleft32) \ && XXH_HAS_BUILTIN(__builtin_rotateleft64) # define XXH_rotl32 __builtin_rotateleft32 # define XXH_rotl64 __builtin_rotateleft64 /* Note: although _rotl exists for minGW (GCC under windows), performance seems poor */ #elif defined(_MSC_VER) # define XXH_rotl32(x,r) _rotl(x,r) # define XXH_rotl64(x,r) _rotl64(x,r) #else # define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r)))) # define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r)))) #endif /*! * @internal * @fn xxh_u32 XXH_swap32(xxh_u32 x) * @brief A 32-bit byteswap. * * @param x The 32-bit integer to byteswap. * @return @p x, byteswapped. */ #if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap32 _byteswap_ulong #elif XXH_GCC_VERSION >= 403 # define XXH_swap32 __builtin_bswap32 #else static xxh_u32 XXH_swap32 (xxh_u32 x) { return ((x << 24) & 0xff000000 ) | ((x << 8) & 0x00ff0000 ) | ((x >> 8) & 0x0000ff00 ) | ((x >> 24) & 0x000000ff ); } #endif /* *************************** * Memory reads *****************************/ /*! * @internal * @brief Enum to indicate whether a pointer is aligned. */ typedef enum { XXH_aligned, /*!< Aligned */ XXH_unaligned /*!< Possibly unaligned */ } XXH_alignment; /* * XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. * * This is ideal for older compilers which don't inline memcpy. */ #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) __attribute__((unused)) XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* memPtr) { const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; return bytePtr[0] | ((xxh_u32)bytePtr[1] << 8) | ((xxh_u32)bytePtr[2] << 16) | ((xxh_u32)bytePtr[3] << 24); } __attribute__((unused)) XXH_FORCE_INLINE xxh_u32 XXH_readBE32(const void* memPtr) { const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; return bytePtr[3] | ((xxh_u32)bytePtr[2] << 8) | ((xxh_u32)bytePtr[1] << 16) | ((xxh_u32)bytePtr[0] << 24); } #else __attribute__((unused)) XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr)); } __attribute__((unused)) static xxh_u32 XXH_readBE32(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr); } #endif XXH_FORCE_INLINE xxh_u32 XXH_readLE32_align(const void* ptr, XXH_alignment align) { if (align==XXH_unaligned) { return XXH_readLE32(ptr); } else { return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr); } } /* ************************************* * Misc ***************************************/ /*! @ingroup public */ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } #define XXH_get32bits(p) XXH_readLE32_align(p, align) #ifndef XXH_NO_LONG_LONG /* ******************************************************************* * 64-bit hash functions *********************************************************************/ /******* Memory access *******/ typedef XXH64_hash_t xxh_u64; #ifdef XXH_OLD_NAMES # define U64 xxh_u64 #endif #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) /* * Manual byteshift. Best for old compilers which don't inline memcpy. * We actually directly use XXH_readLE64 and XXH_readBE64. */ #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2)) /* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */ static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; } #elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1)) /* * __attribute__((aligned(1))) is supported by gcc and clang. Originally the * documentation claimed that it only increased the alignment, but actually it * can decrease it on gcc, clang, and icc: * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=69502, * https://gcc.godbolt.org/z/xYez1j67Y. */ #ifdef XXH_OLD_NAMES typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64; #endif static xxh_u64 XXH_read64(const void* ptr) { typedef __attribute__((aligned(1))) xxh_u64 xxh_unalign64; return *((const xxh_unalign64*)ptr); } #else /* * Portable and safe solution. Generally efficient. * see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html */ static xxh_u64 XXH_read64(const void* memPtr) { xxh_u64 val; XXH_memcpy(&val, memPtr, sizeof(val)); return val; } #endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */ #if defined(_MSC_VER) /* Visual Studio */ # define XXH_swap64 _byteswap_uint64 #elif XXH_GCC_VERSION >= 403 # define XXH_swap64 __builtin_bswap64 #else static xxh_u64 XXH_swap64(xxh_u64 x) { return ((x << 56) & 0xff00000000000000ULL) | ((x << 40) & 0x00ff000000000000ULL) | ((x << 24) & 0x0000ff0000000000ULL) | ((x << 8) & 0x000000ff00000000ULL) | ((x >> 8) & 0x00000000ff000000ULL) | ((x >> 24) & 0x0000000000ff0000ULL) | ((x >> 40) & 0x000000000000ff00ULL) | ((x >> 56) & 0x00000000000000ffULL); } #endif /* XXH_FORCE_MEMORY_ACCESS==3 is an endian-independent byteshift load. */ #if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==3)) XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* memPtr) { const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; return bytePtr[0] | ((xxh_u64)bytePtr[1] << 8) | ((xxh_u64)bytePtr[2] << 16) | ((xxh_u64)bytePtr[3] << 24) | ((xxh_u64)bytePtr[4] << 32) | ((xxh_u64)bytePtr[5] << 40) | ((xxh_u64)bytePtr[6] << 48) | ((xxh_u64)bytePtr[7] << 56); } XXH_FORCE_INLINE xxh_u64 XXH_readBE64(const void* memPtr) { const xxh_u8* bytePtr = (const xxh_u8 *)memPtr; return bytePtr[7] | ((xxh_u64)bytePtr[6] << 8) | ((xxh_u64)bytePtr[5] << 16) | ((xxh_u64)bytePtr[4] << 24) | ((xxh_u64)bytePtr[3] << 32) | ((xxh_u64)bytePtr[2] << 40) | ((xxh_u64)bytePtr[1] << 48) | ((xxh_u64)bytePtr[0] << 56); } #else XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr)); } static xxh_u64 XXH_readBE64(const void* ptr) { return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr); } #endif XXH_FORCE_INLINE xxh_u64 XXH_readLE64_align(const void* ptr, XXH_alignment align) { if (align==XXH_unaligned) return XXH_readLE64(ptr); else return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr); } /******* xxh64 *******/ /*! * @} * @defgroup XXH64_impl XXH64 implementation * @ingroup impl * * Details on the XXH64 implementation. * @{ */ /* #define rather that static const, to be used as initializers */ #define XXH_PRIME64_1 0x9E3779B185EBCA87ULL /*!< 0b1001111000110111011110011011000110000101111010111100101010000111 */ #define XXH_PRIME64_2 0xC2B2AE3D27D4EB4FULL /*!< 0b1100001010110010101011100011110100100111110101001110101101001111 */ #define XXH_PRIME64_3 0x165667B19E3779F9ULL /*!< 0b0001011001010110011001111011000110011110001101110111100111111001 */ #define XXH_PRIME64_4 0x85EBCA77C2B2AE63ULL /*!< 0b1000010111101011110010100111011111000010101100101010111001100011 */ #define XXH_PRIME64_5 0x27D4EB2F165667C5ULL /*!< 0b0010011111010100111010110010111100010110010101100110011111000101 */ #ifdef XXH_OLD_NAMES # define PRIME64_1 XXH_PRIME64_1 # define PRIME64_2 XXH_PRIME64_2 # define PRIME64_3 XXH_PRIME64_3 # define PRIME64_4 XXH_PRIME64_4 # define PRIME64_5 XXH_PRIME64_5 #endif static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input) { acc += input * XXH_PRIME64_2; acc = XXH_rotl64(acc, 31); acc *= XXH_PRIME64_1; return acc; } static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val) { val = XXH64_round(0, val); acc ^= val; acc = acc * XXH_PRIME64_1 + XXH_PRIME64_4; return acc; } static xxh_u64 XXH64_avalanche(xxh_u64 hash) { hash ^= hash >> 33; hash *= XXH_PRIME64_2; hash ^= hash >> 29; hash *= XXH_PRIME64_3; hash ^= hash >> 32; return hash; } #define XXH_get64bits(p) XXH_readLE64_align(p, align) /*! * @internal * @brief Processes the last 0-31 bytes of @p ptr. * * There may be up to 31 bytes remaining to consume from the input. * This final stage will digest them to ensure that all input bytes are present * in the final mix. * * @param hash The hash to finalize. * @param ptr The pointer to the remaining input. * @param len The remaining length, modulo 32. * @param align Whether @p ptr is aligned. * @return The finalized hash * @see XXH32_finalize(). */ static XXH_PUREF xxh_u64 XXH64_finalize(xxh_u64 hash, const xxh_u8* ptr, size_t len, XXH_alignment align) { if (ptr==NULL) XXH_ASSERT(len == 0); len &= 31; while (len >= 8) { xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); ptr += 8; hash ^= k1; hash = XXH_rotl64(hash,27) * XXH_PRIME64_1 + XXH_PRIME64_4; len -= 8; } if (len >= 4) { hash ^= (xxh_u64)(XXH_get32bits(ptr)) * XXH_PRIME64_1; ptr += 4; hash = XXH_rotl64(hash, 23) * XXH_PRIME64_2 + XXH_PRIME64_3; len -= 4; } while (len > 0) { hash ^= (*ptr++) * XXH_PRIME64_5; hash = XXH_rotl64(hash, 11) * XXH_PRIME64_1; --len; } return XXH64_avalanche(hash); } #ifdef XXH_OLD_NAMES # define PROCESS1_64 XXH_PROCESS1_64 # define PROCESS4_64 XXH_PROCESS4_64 # define PROCESS8_64 XXH_PROCESS8_64 #else # undef XXH_PROCESS1_64 # undef XXH_PROCESS4_64 # undef XXH_PROCESS8_64 #endif /*! * @internal * @brief The implementation for @ref XXH64(). * * @param input , len , seed Directly passed from @ref XXH64(). * @param align Whether @p input is aligned. * @return The calculated hash. */ XXH_FORCE_INLINE XXH_PUREF xxh_u64 XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align) { xxh_u64 h64; if (input==NULL) XXH_ASSERT(len == 0); if (len>=32) { const xxh_u8* const bEnd = input + len; const xxh_u8* const limit = bEnd - 31; xxh_u64 v1 = seed + XXH_PRIME64_1 + XXH_PRIME64_2; xxh_u64 v2 = seed + XXH_PRIME64_2; xxh_u64 v3 = seed + 0; xxh_u64 v4 = seed - XXH_PRIME64_1; do { v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8; v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8; v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8; v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8; } while (input= 2 /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ XXH64_state_t state; XXH64_reset(&state, seed); XXH64_update(&state, (const xxh_u8*)input, len); return XXH64_digest(&state); #else if (XXH_FORCE_ALIGN_CHECK) { if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned); } } return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned); #endif } /******* Canonical representation *******/ /*! @ingroup XXH64_family */ XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH_NOESCAPE XXH64_canonical_t* dst, XXH64_hash_t hash) { XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash); XXH_memcpy(dst, &hash, sizeof(*dst)); } /*! @ingroup XXH64_family */ XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(XXH_NOESCAPE const XXH64_canonical_t* src) { return XXH_readBE64(src); } #endif /* XXH_NO_LONG_LONG */ #endif /* XXH_IMPLEMENTATION */ #if defined (__cplusplus) } #endif