10 changed files with 594 additions and 344 deletions
Before Width: | Height: | Size: 8.6 KiB After Width: | Height: | Size: 8.6 KiB |
@ -1,337 +1,355 @@
@@ -1,337 +1,355 @@
|
||||
//-----------------------------------------------------------------------------
|
||||
// MurmurHash3 was written by Austin Appleby, and is placed in the public
|
||||
// domain. The author hereby disclaims copyright to this source code.
|
||||
|
||||
// Note - The x86 and x64 versions do _not_ produce the same results, as the
|
||||
// algorithms are optimized for their respective platforms. You can still
|
||||
// compile and run any of them on any platform, but your performance with the
|
||||
// non-native version will be less than optimal.
|
||||
|
||||
#include "Murmur3.h" |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Platform-specific functions and macros
|
||||
|
||||
// Microsoft Visual Studio
|
||||
|
||||
#if defined(_MSC_VER) |
||||
|
||||
#define FORCE_INLINE __forceinline |
||||
|
||||
#include <stdlib.h> |
||||
|
||||
#define ROTL32(x,y) _rotl(x,y) |
||||
#define ROTL64(x,y) _rotl64(x,y) |
||||
|
||||
#define BIG_CONSTANT(x) (x) |
||||
|
||||
// Other compilers
|
||||
|
||||
#else // defined(_MSC_VER)
|
||||
|
||||
#define FORCE_INLINE inline __attribute__((always_inline)) |
||||
|
||||
inline uint32_t rotl32 ( uint32_t x, int8_t r ) |
||||
{ |
||||
return (x << r) | (x >> (32 - r)); |
||||
} |
||||
|
||||
inline uint64_t rotl64 ( uint64_t x, int8_t r ) |
||||
{ |
||||
return (x << r) | (x >> (64 - r)); |
||||
} |
||||
|
||||
#define ROTL32(x,y) rotl32(x,y) |
||||
#define ROTL64(x,y) rotl64(x,y) |
||||
|
||||
#define BIG_CONSTANT(x) (x##LLU) |
||||
|
||||
#endif // !defined(_MSC_VER)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Block read - if your platform needs to do endian-swapping or can only
|
||||
// handle aligned reads, do the conversion here
|
||||
|
||||
FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) |
||||
{ |
||||
return p[i]; |
||||
} |
||||
|
||||
FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i ) |
||||
{ |
||||
return p[i]; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Finalization mix - force all bits of a hash block to avalanche
|
||||
|
||||
FORCE_INLINE uint32_t fmix32 ( uint32_t h ) |
||||
{ |
||||
h ^= h >> 16; |
||||
h *= 0x85ebca6b; |
||||
h ^= h >> 13; |
||||
h *= 0xc2b2ae35; |
||||
h ^= h >> 16; |
||||
|
||||
return h; |
||||
} |
||||
|
||||
//----------
|
||||
|
||||
FORCE_INLINE uint64_t fmix64 ( uint64_t k ) |
||||
{ |
||||
k ^= k >> 33; |
||||
k *= BIG_CONSTANT(0xff51afd7ed558ccd); |
||||
k ^= k >> 33; |
||||
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); |
||||
k ^= k >> 33; |
||||
|
||||
return k; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
void MurmurHash3_x86_32 ( const void * key, int len, |
||||
uint32_t seed, void * out ) { |
||||
const uint8_t * data = static_cast<const uint8_t*>(key); |
||||
const int nblocks = len / 4; |
||||
|
||||
uint32_t h1 = seed; |
||||
|
||||
const uint32_t c1 = 0xcc9e2d51; |
||||
const uint32_t c2 = 0x1b873593; |
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
const uint32_t * blocks = reinterpret_cast<const unsigned int *>(data + nblocks*4); |
||||
|
||||
for(int i = -nblocks; i; i++) |
||||
{ |
||||
uint32_t k1 = getblock32(blocks,i); |
||||
|
||||
k1 *= c1; |
||||
k1 = ROTL32(k1,15); |
||||
k1 *= c2; |
||||
|
||||
h1 ^= k1; |
||||
h1 = ROTL32(h1,13); |
||||
h1 = h1*5+0xe6546b64; |
||||
} |
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
const uint8_t * tail = (data + nblocks*4); |
||||
|
||||
uint32_t k1 = 0; |
||||
|
||||
switch(len & 3) { |
||||
case 3: k1 ^= static_cast<unsigned int>(tail[2]) << 16; // fall-through
|
||||
case 2: k1 ^= static_cast<unsigned int>(tail[1]) << 8; // fall-through
|
||||
case 1: k1 ^= static_cast<unsigned int>(tail[0]); |
||||
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; |
||||
break; |
||||
default: |
||||
break; |
||||
}; |
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= static_cast<unsigned int>(len); |
||||
|
||||
h1 = fmix32(h1); |
||||
|
||||
*static_cast<uint32_t*>(out) = h1; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
void MurmurHash3_x86_128 ( const void * key, const int len, |
||||
uint32_t seed, void * out ) |
||||
{ |
||||
const uint8_t * data = static_cast<const uint8_t*>(key); |
||||
const int nblocks = len / 16; |
||||
|
||||
uint32_t h1 = seed; |
||||
uint32_t h2 = seed; |
||||
uint32_t h3 = seed; |
||||
uint32_t h4 = seed; |
||||
|
||||
const uint32_t c1 = 0x239b961b; |
||||
const uint32_t c2 = 0xab0e9789; |
||||
const uint32_t c3 = 0x38b34ae5; |
||||
const uint32_t c4 = 0xa1e38b93; |
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
const uint32_t * blocks = reinterpret_cast<const uint32_t *>(data + nblocks*16); |
||||
|
||||
for(int i = -nblocks; i; i++) |
||||
{ |
||||
uint32_t k1 = getblock32(blocks,i*4+0); |
||||
uint32_t k2 = getblock32(blocks,i*4+1); |
||||
uint32_t k3 = getblock32(blocks,i*4+2); |
||||
uint32_t k4 = getblock32(blocks,i*4+3); |
||||
|
||||
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; |
||||
|
||||
h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b; |
||||
|
||||
k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; |
||||
|
||||
h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747; |
||||
|
||||
k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; |
||||
|
||||
h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35; |
||||
|
||||
k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; |
||||
|
||||
h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17; |
||||
} |
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
const uint8_t * tail = (data + nblocks*16); |
||||
|
||||
uint32_t k1 = 0; |
||||
uint32_t k2 = 0; |
||||
uint32_t k3 = 0; |
||||
uint32_t k4 = 0; |
||||
|
||||
switch(len & 15) { |
||||
case 15: k4 ^= static_cast<unsigned int>(tail[14]) << 16; // fall-through
|
||||
case 14: k4 ^= static_cast<unsigned int>(tail[13]) << 8; // fall-through
|
||||
case 13: k4 ^= static_cast<unsigned int>(tail[12]) << 0; |
||||
k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; |
||||
|
||||
case 12: k3 ^= static_cast<unsigned int>(tail[11]) << 24; // fall-through
|
||||
case 11: k3 ^= static_cast<unsigned int>(tail[10]) << 16; // fall-through
|
||||
case 10: k3 ^= static_cast<unsigned int>(tail[ 9]) << 8; // fall-through
|
||||
case 9: k3 ^= static_cast<unsigned int>(tail[ 8]) << 0; |
||||
k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; |
||||
|
||||
case 8: k2 ^= static_cast<unsigned int>(tail[ 7]) << 24; // fall-through
|
||||
case 7: k2 ^= static_cast<unsigned int>(tail[ 6]) << 16; // fall-through
|
||||
case 6: k2 ^= static_cast<unsigned int>(tail[ 5]) << 8; // fall-through
|
||||
case 5: k2 ^= static_cast<unsigned int>(tail[ 4]) << 0; |
||||
k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; |
||||
|
||||
case 4: k1 ^= static_cast<unsigned int>(tail[ 3]) << 24; // fall-through
|
||||
case 3: k1 ^= static_cast<unsigned int>(tail[ 2]) << 16; // fall-through
|
||||
case 2: k1 ^= static_cast<unsigned int>(tail[ 1]) << 8; // fall-through
|
||||
case 1: k1 ^= static_cast<unsigned int>(tail[ 0]) << 0; |
||||
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; |
||||
break; |
||||
default: |
||||
break; |
||||
}; |
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= static_cast<unsigned int>(len); h2 ^= static_cast<unsigned int>(len); h3 ^= static_cast<unsigned int>(len); h4 ^= static_cast<unsigned int>(len); |
||||
|
||||
h1 += h2; h1 += h3; h1 += h4; |
||||
h2 += h1; h3 += h1; h4 += h1; |
||||
|
||||
h1 = fmix32(h1); |
||||
h2 = fmix32(h2); |
||||
h3 = fmix32(h3); |
||||
h4 = fmix32(h4); |
||||
|
||||
h1 += h2; h1 += h3; h1 += h4; |
||||
h2 += h1; h3 += h1; h4 += h1; |
||||
|
||||
(static_cast<uint32_t*>(out))[0] = h1; |
||||
(static_cast<uint32_t*>(out))[1] = h2; |
||||
(static_cast<uint32_t*>(out))[2] = h3; |
||||
(static_cast<uint32_t*>(out))[3] = h4; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
void MurmurHash3_x64_128 ( const void * key, const int len, |
||||
const uint32_t seed, void * out ) { |
||||
const uint8_t * data = reinterpret_cast<const uint8_t*>(key); |
||||
const int nblocks = len / 16; |
||||
|
||||
uint64_t h1 = seed; |
||||
uint64_t h2 = seed; |
||||
|
||||
const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
||||
const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
const uint64_t * blocks = reinterpret_cast<const uint64_t *>(data); |
||||
|
||||
for(int i = 0; i < nblocks; i++) { |
||||
uint64_t k1 = getblock64(blocks,i*2+0); |
||||
uint64_t k2 = getblock64(blocks,i*2+1); |
||||
|
||||
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; |
||||
|
||||
h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729; |
||||
|
||||
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; |
||||
|
||||
h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5; |
||||
} |
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
const uint8_t * tail = (data + nblocks*16); |
||||
|
||||
uint64_t k1 = 0; |
||||
uint64_t k2 = 0; |
||||
|
||||
switch(len & 15) { |
||||
case 15: k2 ^= (static_cast<uint64_t>(tail[14])) << 48; // fall-through
|
||||
case 14: k2 ^= (static_cast<uint64_t>(tail[13])) << 40; // fall-through
|
||||
case 13: k2 ^= (static_cast<uint64_t>(tail[12])) << 32; // fall-through
|
||||
case 12: k2 ^= (static_cast<uint64_t>(tail[11])) << 24; // fall-through
|
||||
case 11: k2 ^= (static_cast<uint64_t>(tail[10])) << 16; // fall-through
|
||||
case 10: k2 ^= (static_cast<uint64_t>(tail[ 9])) << 8; // fall-through
|
||||
case 9: k2 ^= (static_cast<uint64_t>(tail[ 8])) << 0; |
||||
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; |
||||
|
||||
case 8: k1 ^= (static_cast<uint64_t>(tail[ 7])) << 56; // fall-through
|
||||
case 7: k1 ^= (static_cast<uint64_t>(tail[ 6])) << 48; // fall-through
|
||||
case 6: k1 ^= (static_cast<uint64_t>(tail[ 5])) << 40; // fall-through
|
||||
case 5: k1 ^= (static_cast<uint64_t>(tail[ 4])) << 32; // fall-through
|
||||
case 4: k1 ^= (static_cast<uint64_t>(tail[ 3])) << 24; // fall-through
|
||||
case 3: k1 ^= (static_cast<uint64_t>(tail[ 2])) << 16; // fall-through
|
||||
case 2: k1 ^= (static_cast<uint64_t>(tail[ 1])) << 8; // fall-through
|
||||
case 1: k1 ^= (static_cast<uint64_t>(tail[ 0])) << 0; |
||||
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; |
||||
break; |
||||
default: |
||||
break; |
||||
}; |
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= static_cast<unsigned int>(len); h2 ^= static_cast<unsigned int>(len); |
||||
|
||||
h1 += h2; |
||||
h2 += h1; |
||||
|
||||
h1 = fmix64(h1); |
||||
h2 = fmix64(h2); |
||||
|
||||
h1 += h2; |
||||
h2 += h1; |
||||
|
||||
(static_cast<uint64_t*>(out))[0] = h1; |
||||
(static_cast<uint64_t*>(out))[1] = h2; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
//-----------------------------------------------------------------------------
|
||||
// MurmurHash3 was written by Austin Appleby, and is placed in the public
|
||||
// domain. The author hereby disclaims copyright to this source code.
|
||||
|
||||
// Note - The x86 and x64 versions do _not_ produce the same results, as the
|
||||
// algorithms are optimized for their respective platforms. You can still
|
||||
// compile and run any of them on any platform, but your performance with the
|
||||
// non-native version will be less than optimal.
|
||||
|
||||
#include "Murmur3.h" |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Platform-specific functions and macros
|
||||
|
||||
#pragma GCC diagnostic ignored "-Wimplicit-fallthrough" |
||||
#define GCC_VERSION (__GNUC__ * 10000 \ |
||||
+ __GNUC_MINOR__ * 100 \ |
||||
+ __GNUC_PATCHLEVEL__) |
||||
|
||||
/* Test for GCC > 6.0.0, which supports push/pop similar to MS C/C++ */ |
||||
#if GCC_VERSION > 60000 |
||||
#pragma GCC diagnostic push |
||||
#endif |
||||
|
||||
// Microsoft Visual Studio
|
||||
|
||||
#if defined(_MSC_VER) |
||||
|
||||
#define FORCE_INLINE __forceinline |
||||
|
||||
#include <stdlib.h> |
||||
|
||||
#define ROTL32(x,y) _rotl(x,y) |
||||
#define ROTL64(x,y) _rotl64(x,y) |
||||
|
||||
#define BIG_CONSTANT(x) (x) |
||||
|
||||
// Other compilers
|
||||
|
||||
#else // defined(_MSC_VER)
|
||||
|
||||
#define FORCE_INLINE inline __attribute__((always_inline)) |
||||
|
||||
inline uint32_t rotl32 ( uint32_t x, int8_t r ) |
||||
{ |
||||
return (x << r) | (x >> (32 - r)); |
||||
} |
||||
|
||||
inline uint64_t rotl64 ( uint64_t x, int8_t r ) |
||||
{ |
||||
return (x << r) | (x >> (64 - r)); |
||||
} |
||||
|
||||
#define ROTL32(x,y) rotl32(x,y) |
||||
#define ROTL64(x,y) rotl64(x,y) |
||||
|
||||
#define BIG_CONSTANT(x) (x##LLU) |
||||
|
||||
#endif // !defined(_MSC_VER)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Block read - if your platform needs to do endian-swapping or can only
|
||||
// handle aligned reads, do the conversion here
|
||||
|
||||
FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) |
||||
{ |
||||
return p[i]; |
||||
} |
||||
|
||||
FORCE_INLINE uint64_t getblock64 ( const uint64_t * p, int i ) |
||||
{ |
||||
return p[i]; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Finalization mix - force all bits of a hash block to avalanche
|
||||
|
||||
FORCE_INLINE uint32_t fmix32 ( uint32_t h ) |
||||
{ |
||||
h ^= h >> 16; |
||||
h *= 0x85ebca6b; |
||||
h ^= h >> 13; |
||||
h *= 0xc2b2ae35; |
||||
h ^= h >> 16; |
||||
|
||||
return h; |
||||
} |
||||
|
||||
//----------
|
||||
|
||||
FORCE_INLINE uint64_t fmix64 ( uint64_t k ) |
||||
{ |
||||
k ^= k >> 33; |
||||
k *= BIG_CONSTANT(0xff51afd7ed558ccd); |
||||
k ^= k >> 33; |
||||
k *= BIG_CONSTANT(0xc4ceb9fe1a85ec53); |
||||
k ^= k >> 33; |
||||
|
||||
return k; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
void MurmurHash3_x86_32 ( const void * key, int len, |
||||
uint32_t seed, void * out ) { |
||||
const uint8_t * data = static_cast<const uint8_t*>(key); |
||||
const int nblocks = len / 4; |
||||
|
||||
uint32_t h1 = seed; |
||||
|
||||
const uint32_t c1 = 0xcc9e2d51; |
||||
const uint32_t c2 = 0x1b873593; |
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
const uint32_t * blocks = reinterpret_cast<const unsigned int *>(data + nblocks*4); |
||||
|
||||
for(int i = -nblocks; i; i++) |
||||
{ |
||||
uint32_t k1 = getblock32(blocks,i); |
||||
|
||||
k1 *= c1; |
||||
k1 = ROTL32(k1,15); |
||||
k1 *= c2; |
||||
|
||||
h1 ^= k1; |
||||
h1 = ROTL32(h1,13); |
||||
h1 = h1*5+0xe6546b64; |
||||
} |
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
const uint8_t * tail = (data + nblocks*4); |
||||
|
||||
uint32_t k1 = 0; |
||||
|
||||
switch(len & 3) { |
||||
case 3: k1 ^= static_cast<unsigned int>(tail[2]) << 16; // fall-through
|
||||
case 2: k1 ^= static_cast<unsigned int>(tail[1]) << 8; // fall-through
|
||||
case 1: k1 ^= static_cast<unsigned int>(tail[0]); |
||||
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; |
||||
break; |
||||
default: |
||||
break; |
||||
}; |
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= static_cast<unsigned int>(len); |
||||
|
||||
h1 = fmix32(h1); |
||||
|
||||
*static_cast<uint32_t*>(out) = h1; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
void MurmurHash3_x86_128 ( const void * key, const int len, |
||||
uint32_t seed, void * out ) |
||||
{ |
||||
const uint8_t * data = static_cast<const uint8_t*>(key); |
||||
const int nblocks = len / 16; |
||||
|
||||
uint32_t h1 = seed; |
||||
uint32_t h2 = seed; |
||||
uint32_t h3 = seed; |
||||
uint32_t h4 = seed; |
||||
|
||||
const uint32_t c1 = 0x239b961b; |
||||
const uint32_t c2 = 0xab0e9789; |
||||
const uint32_t c3 = 0x38b34ae5; |
||||
const uint32_t c4 = 0xa1e38b93; |
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
const uint32_t * blocks = reinterpret_cast<const uint32_t *>(data + nblocks*16); |
||||
|
||||
for(int i = -nblocks; i; i++) |
||||
{ |
||||
uint32_t k1 = getblock32(blocks,i*4+0); |
||||
uint32_t k2 = getblock32(blocks,i*4+1); |
||||
uint32_t k3 = getblock32(blocks,i*4+2); |
||||
uint32_t k4 = getblock32(blocks,i*4+3); |
||||
|
||||
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; |
||||
|
||||
h1 = ROTL32(h1,19); h1 += h2; h1 = h1*5+0x561ccd1b; |
||||
|
||||
k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; |
||||
|
||||
h2 = ROTL32(h2,17); h2 += h3; h2 = h2*5+0x0bcaa747; |
||||
|
||||
k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; |
||||
|
||||
h3 = ROTL32(h3,15); h3 += h4; h3 = h3*5+0x96cd1c35; |
||||
|
||||
k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; |
||||
|
||||
h4 = ROTL32(h4,13); h4 += h1; h4 = h4*5+0x32ac3b17; |
||||
} |
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
const uint8_t * tail = (data + nblocks*16); |
||||
|
||||
uint32_t k1 = 0; |
||||
uint32_t k2 = 0; |
||||
uint32_t k3 = 0; |
||||
uint32_t k4 = 0; |
||||
|
||||
// These are supposed to fall through
|
||||
switch(len & 15) { |
||||
case 15: k4 ^= static_cast<unsigned int>(tail[14]) << 16; // fall-through
|
||||
case 14: k4 ^= static_cast<unsigned int>(tail[13]) << 8; // fall-through
|
||||
case 13: k4 ^= static_cast<unsigned int>(tail[12]) << 0; |
||||
k4 *= c4; k4 = ROTL32(k4,18); k4 *= c1; h4 ^= k4; |
||||
|
||||
case 12: k3 ^= static_cast<unsigned int>(tail[11]) << 24; // fall-through
|
||||
case 11: k3 ^= static_cast<unsigned int>(tail[10]) << 16; // fall-through
|
||||
case 10: k3 ^= static_cast<unsigned int>(tail[ 9]) << 8; // fall-through
|
||||
case 9: k3 ^= static_cast<unsigned int>(tail[ 8]) << 0; |
||||
k3 *= c3; k3 = ROTL32(k3,17); k3 *= c4; h3 ^= k3; |
||||
|
||||
case 8: k2 ^= static_cast<unsigned int>(tail[ 7]) << 24; // fall-through
|
||||
case 7: k2 ^= static_cast<unsigned int>(tail[ 6]) << 16; // fall-through
|
||||
case 6: k2 ^= static_cast<unsigned int>(tail[ 5]) << 8; // fall-through
|
||||
case 5: k2 ^= static_cast<unsigned int>(tail[ 4]) << 0; |
||||
k2 *= c2; k2 = ROTL32(k2,16); k2 *= c3; h2 ^= k2; |
||||
|
||||
case 4: k1 ^= static_cast<unsigned int>(tail[ 3]) << 24; // fall-through
|
||||
case 3: k1 ^= static_cast<unsigned int>(tail[ 2]) << 16; // fall-through
|
||||
case 2: k1 ^= static_cast<unsigned int>(tail[ 1]) << 8; // fall-through
|
||||
case 1: k1 ^= static_cast<unsigned int>(tail[ 0]) << 0; |
||||
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; |
||||
break; |
||||
default: |
||||
break; |
||||
}; |
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= static_cast<unsigned int>(len); h2 ^= static_cast<unsigned int>(len); h3 ^= static_cast<unsigned int>(len); h4 ^= static_cast<unsigned int>(len); |
||||
|
||||
h1 += h2; h1 += h3; h1 += h4; |
||||
h2 += h1; h3 += h1; h4 += h1; |
||||
|
||||
h1 = fmix32(h1); |
||||
h2 = fmix32(h2); |
||||
h3 = fmix32(h3); |
||||
h4 = fmix32(h4); |
||||
|
||||
h1 += h2; h1 += h3; h1 += h4; |
||||
h2 += h1; h3 += h1; h4 += h1; |
||||
|
||||
(static_cast<uint32_t*>(out))[0] = h1; |
||||
(static_cast<uint32_t*>(out))[1] = h2; |
||||
(static_cast<uint32_t*>(out))[2] = h3; |
||||
(static_cast<uint32_t*>(out))[3] = h4; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
void MurmurHash3_x64_128 ( const void * key, const int len, |
||||
const uint32_t seed, void * out ) { |
||||
const uint8_t * data = reinterpret_cast<const uint8_t*>(key); |
||||
const int nblocks = len / 16; |
||||
|
||||
uint64_t h1 = seed; |
||||
uint64_t h2 = seed; |
||||
|
||||
const uint64_t c1 = BIG_CONSTANT(0x87c37b91114253d5); |
||||
const uint64_t c2 = BIG_CONSTANT(0x4cf5ad432745937f); |
||||
|
||||
//----------
|
||||
// body
|
||||
|
||||
const uint64_t * blocks = reinterpret_cast<const uint64_t *>(data); |
||||
|
||||
for(int i = 0; i < nblocks; i++) { |
||||
uint64_t k1 = getblock64(blocks,i*2+0); |
||||
uint64_t k2 = getblock64(blocks,i*2+1); |
||||
|
||||
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; |
||||
|
||||
h1 = ROTL64(h1,27); h1 += h2; h1 = h1*5+0x52dce729; |
||||
|
||||
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; |
||||
|
||||
h2 = ROTL64(h2,31); h2 += h1; h2 = h2*5+0x38495ab5; |
||||
} |
||||
|
||||
//----------
|
||||
// tail
|
||||
|
||||
const uint8_t * tail = (data + nblocks*16); |
||||
|
||||
uint64_t k1 = 0; |
||||
uint64_t k2 = 0; |
||||
|
||||
switch(len & 15) { |
||||
case 15: k2 ^= (static_cast<uint64_t>(tail[14])) << 48; // fall-through
|
||||
case 14: k2 ^= (static_cast<uint64_t>(tail[13])) << 40; // fall-through
|
||||
case 13: k2 ^= (static_cast<uint64_t>(tail[12])) << 32; // fall-through
|
||||
case 12: k2 ^= (static_cast<uint64_t>(tail[11])) << 24; // fall-through
|
||||
case 11: k2 ^= (static_cast<uint64_t>(tail[10])) << 16; // fall-through
|
||||
case 10: k2 ^= (static_cast<uint64_t>(tail[ 9])) << 8; // fall-through
|
||||
case 9: k2 ^= (static_cast<uint64_t>(tail[ 8])) << 0; |
||||
k2 *= c2; k2 = ROTL64(k2,33); k2 *= c1; h2 ^= k2; |
||||
|
||||
case 8: k1 ^= (static_cast<uint64_t>(tail[ 7])) << 56; // fall-through
|
||||
case 7: k1 ^= (static_cast<uint64_t>(tail[ 6])) << 48; // fall-through
|
||||
case 6: k1 ^= (static_cast<uint64_t>(tail[ 5])) << 40; // fall-through
|
||||
case 5: k1 ^= (static_cast<uint64_t>(tail[ 4])) << 32; // fall-through
|
||||
case 4: k1 ^= (static_cast<uint64_t>(tail[ 3])) << 24; // fall-through
|
||||
case 3: k1 ^= (static_cast<uint64_t>(tail[ 2])) << 16; // fall-through
|
||||
case 2: k1 ^= (static_cast<uint64_t>(tail[ 1])) << 8; // fall-through
|
||||
case 1: k1 ^= (static_cast<uint64_t>(tail[ 0])) << 0; |
||||
k1 *= c1; k1 = ROTL64(k1,31); k1 *= c2; h1 ^= k1; |
||||
break; |
||||
default: |
||||
break; |
||||
}; |
||||
|
||||
//----------
|
||||
// finalization
|
||||
|
||||
h1 ^= static_cast<unsigned int>(len); h2 ^= static_cast<unsigned int>(len); |
||||
|
||||
h1 += h2; |
||||
h2 += h1; |
||||
|
||||
h1 = fmix64(h1); |
||||
h2 = fmix64(h2); |
||||
|
||||
h1 += h2; |
||||
h2 += h1; |
||||
|
||||
(static_cast<uint64_t*>(out))[0] = h1; |
||||
(static_cast<uint64_t*>(out))[1] = h2; |
||||
} |
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
/* Test for GCC > 6.0.0 */ |
||||
#if GCC_VERSION > 60000 |
||||
#pragma GCC diagnostic pop |
||||
#else |
||||
// Otherwise, turn the warning back on manually.
|
||||
#pragma GCC diagnostic warning "-Wimplicit-fallthrough" |
||||
#endif |
@ -0,0 +1,169 @@
@@ -0,0 +1,169 @@
|
||||
/* libSoX minimal glob for MS-Windows: (c) 2009 SoX contributors
|
||||
* |
||||
* This library is free software; you can redistribute it and/or modify it |
||||
* under the terms of the GNU Lesser General Public License as published by |
||||
* the Free Software Foundation; either version 2.1 of the License, or (at |
||||
* your option) any later version. |
||||
* |
||||
* This library is distributed in the hope that it will be useful, but |
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser |
||||
* General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public License |
||||
* along with this library; if not, write to the Free Software Foundation, |
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#include "win32-glob.h" |
||||
#include <stdlib.h> |
||||
#include <stdio.h> |
||||
#include <errno.h> |
||||
#define WIN32_LEAN_AND_MEAN 1 |
||||
#include <windows.h> |
||||
|
||||
typedef struct file_entry |
||||
{ |
||||
char name[MAX_PATH]; |
||||
struct file_entry *next; |
||||
} file_entry; |
||||
|
||||
static int |
||||
insert( |
||||
const char* path, |
||||
const char* name, |
||||
file_entry** phead) |
||||
{ |
||||
int len; |
||||
file_entry* cur = malloc(sizeof(file_entry)); |
||||
if (!cur) |
||||
{ |
||||
return ENOMEM; |
||||
} |
||||
|
||||
len = _snprintf(cur->name, MAX_PATH, "%s%s", path, name); |
||||
cur->name[MAX_PATH - 1] = 0; |
||||
cur->next = *phead; |
||||
*phead = cur; |
||||
|
||||
return len < 0 || len >= MAX_PATH ? ENAMETOOLONG : 0; |
||||
} |
||||
|
||||
static int |
||||
entry_comparer( |
||||
const void* pv1, |
||||
const void* pv2) |
||||
{ |
||||
const file_entry* const * pe1 = pv1; |
||||
const file_entry* const * pe2 = pv2; |
||||
return _stricmp((*pe1)->name, (*pe2)->name); |
||||
} |
||||
|
||||
int |
||||
glob( |
||||
const char *pattern, |
||||
int flags, |
||||
void *unused, |
||||
glob_t *pglob) |
||||
{ |
||||
char path[MAX_PATH]; |
||||
file_entry *head = NULL; |
||||
int err = 0; |
||||
size_t len; |
||||
unsigned entries = 0; |
||||
WIN32_FIND_DATAA finddata; |
||||
HANDLE hfindfile = FindFirstFileA(pattern, &finddata); |
||||
|
||||
if (!pattern || flags != (flags & GLOB_FLAGS) || unused || !pglob) |
||||
{ |
||||
errno = EINVAL; |
||||
return EINVAL; |
||||
} |
||||
|
||||
path[MAX_PATH - 1] = 0; |
||||
strncpy(path, pattern, MAX_PATH); |
||||
if (path[MAX_PATH - 1] != 0) |
||||
{ |
||||
errno = ENAMETOOLONG; |
||||
return ENAMETOOLONG; |
||||
} |
||||
|
||||
len = strlen(path); |
||||
while (len > 0 && path[len - 1] != '/' && path[len - 1] != '\\') |
||||
len--; |
||||
path[len] = 0; |
||||
|
||||
if (hfindfile == INVALID_HANDLE_VALUE) |
||||
{ |
||||
if (flags & GLOB_NOCHECK) |
||||
{ |
||||
err = insert("", pattern, &head); |
||||
entries++; |
||||
} |
||||
} |
||||
else |
||||
{ |
||||
do |
||||
{ |
||||
err = insert(path, finddata.cFileName, &head); |
||||
entries++; |
||||
} while (!err && FindNextFileA(hfindfile, &finddata)); |
||||
|
||||
FindClose(hfindfile); |
||||
} |
||||
|
||||
if (err == 0) |
||||
{ |
||||
pglob->gl_pathv = malloc((entries + 1) * sizeof(char*)); |
||||
if (pglob->gl_pathv) |
||||
{ |
||||
pglob->gl_pathc = entries; |
||||
pglob->gl_pathv[entries] = NULL; |
||||
for (; head; head = head->next, entries--) |
||||
pglob->gl_pathv[entries - 1] = (char*)head; |
||||
qsort(pglob->gl_pathv, pglob->gl_pathc, sizeof(char*), entry_comparer); |
||||
} |
||||
else |
||||
{ |
||||
pglob->gl_pathc = 0; |
||||
err = ENOMEM; |
||||
} |
||||
} |
||||
else if (pglob) |
||||
{ |
||||
pglob->gl_pathc = 0; |
||||
pglob->gl_pathv = NULL; |
||||
} |
||||
|
||||
if (err) |
||||
{ |
||||
file_entry *cur; |
||||
while (head) |
||||
{ |
||||
cur = head; |
||||
head = head->next; |
||||
free(cur); |
||||
} |
||||
|
||||
errno = err; |
||||
} |
||||
|
||||
return err; |
||||
} |
||||
|
||||
void |
||||
globfree( |
||||
glob_t* pglob) |
||||
{ |
||||
if (pglob) |
||||
{ |
||||
char** cur; |
||||
for (cur = pglob->gl_pathv; *cur; cur++) |
||||
{ |
||||
free(*cur); |
||||
} |
||||
|
||||
pglob->gl_pathc = 0; |
||||
pglob->gl_pathv = NULL; |
||||
} |
||||
} |
@ -0,0 +1,49 @@
@@ -0,0 +1,49 @@
|
||||
/* libSoX minimal glob for MS-Windows: (c) 2009 SoX contributors
|
||||
* |
||||
* This library is free software; you can redistribute it and/or modify it |
||||
* under the terms of the GNU Lesser General Public License as published by |
||||
* the Free Software Foundation; either version 2.1 of the License, or (at |
||||
* your option) any later version. |
||||
* |
||||
* This library is distributed in the hope that it will be useful, but |
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of |
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser |
||||
* General Public License for more details. |
||||
* |
||||
* You should have received a copy of the GNU Lesser General Public License |
||||
* along with this library; if not, write to the Free Software Foundation, |
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
||||
*/ |
||||
|
||||
#ifndef GLOB_H |
||||
#define GLOB_H 1 |
||||
|
||||
#define GLOB_NOCHECK (16) |
||||
#define GLOB_FLAGS (GLOB_NOCHECK) |
||||
|
||||
typedef struct glob_t |
||||
{ |
||||
unsigned gl_pathc; |
||||
char **gl_pathv; |
||||
} glob_t; |
||||
|
||||
#ifdef __cplusplus |
||||
extern "C" { |
||||
#endif |
||||
|
||||
int |
||||
glob( |
||||
const char *pattern, |
||||
int flags, |
||||
void *unused, |
||||
glob_t *pglob); |
||||
|
||||
void |
||||
globfree( |
||||
glob_t* pglob); |
||||
|
||||
#ifdef __cplusplus |
||||
} |
||||
#endif |
||||
|
||||
#endif /* ifndef GLOB_H */ |
Loading…
Reference in new issue