1 | // Copyright (C) 1999 and onwards Google, Inc. |
2 | // |
3 | // |
4 | // This file contains the core of Bob Jenkins lookup2 algorithm. |
5 | // |
6 | // This file contains the basic hash "mix" code which is widely referenced. |
7 | // |
8 | // This file also contains routines used to load an unaligned little-endian |
9 | // word from memory. This relatively generic functionality probably |
10 | // shouldn't live in this file. |
11 | |
12 | #ifndef UTIL_HASH_JENKINS_LOOKUP2_H__ |
13 | #define UTIL_HASH_JENKINS_LOOKUP2_H__ |
14 | |
15 | #include "base/port.h" |
16 | |
17 | // ---------------------------------------------------------------------- |
18 | // mix() |
19 | // The hash function I use is due to Bob Jenkins (see |
20 | // http://burtleburtle.net/bob/hash/index.html). |
21 | // Each mix takes 36 instructions, in 18 cycles if you're lucky. |
22 | // |
23 | // On x86 architectures, this requires 45 instructions in 27 cycles, |
24 | // if you're lucky. |
25 | // ---------------------------------------------------------------------- |
26 | |
27 | static inline void mix(uint32& a, uint32& b, uint32& c) { // 32bit version |
28 | a -= b; a -= c; a ^= (c>>13); |
29 | b -= c; b -= a; b ^= (a<<8); |
30 | c -= a; c -= b; c ^= (b>>13); |
31 | a -= b; a -= c; a ^= (c>>12); |
32 | b -= c; b -= a; b ^= (a<<16); |
33 | c -= a; c -= b; c ^= (b>>5); |
34 | a -= b; a -= c; a ^= (c>>3); |
35 | b -= c; b -= a; b ^= (a<<10); |
36 | c -= a; c -= b; c ^= (b>>15); |
37 | } |
38 | |
39 | static inline void mix(uint64& a, uint64& b, uint64& c) { // 64bit version |
40 | a -= b; a -= c; a ^= (c>>43); |
41 | b -= c; b -= a; b ^= (a<<9); |
42 | c -= a; c -= b; c ^= (b>>8); |
43 | a -= b; a -= c; a ^= (c>>38); |
44 | b -= c; b -= a; b ^= (a<<23); |
45 | c -= a; c -= b; c ^= (b>>5); |
46 | a -= b; a -= c; a ^= (c>>35); |
47 | b -= c; b -= a; b ^= (a<<49); |
48 | c -= a; c -= b; c ^= (b>>11); |
49 | a -= b; a -= c; a ^= (c>>12); |
50 | b -= c; b -= a; b ^= (a<<18); |
51 | c -= a; c -= b; c ^= (b>>22); |
52 | } |
53 | |
54 | |
55 | // Load an unaligned little endian word from memory. |
56 | // |
57 | // These routines are named Word32At(), Word64At() and Google1At(). |
58 | // Long ago, the 32-bit version of this operation was implemented using |
59 | // signed characters. The hash function that used this variant creates |
60 | // persistent hash values. The hash routine needs to remain backwards |
61 | // compatible, so we renamed the word loading function 'Google1At' to |
62 | // make it clear this implements special functionality. |
63 | // |
64 | // If a machine has alignment constraints or is big endian, we must |
65 | // load the word a byte at a time. Otherwise we can load the whole word |
66 | // from memory. |
67 | // |
68 | // [Plausibly, Word32At() and Word64At() should really be called |
69 | // UNALIGNED_LITTLE_ENDIAN_LOAD32() and UNALIGNED_LITTLE_ENDIAN_LOAD64() |
70 | // but that seems overly verbose.] |
71 | |
72 | #if !defined(NEED_ALIGNED_LOADS) && defined(IS_LITTLE_ENDIAN) |
73 | static inline uint64 Word64At(const char *ptr) { |
74 | return UNALIGNED_LOAD64(ptr); |
75 | } |
76 | |
77 | static inline uint32 Word32At(const char *ptr) { |
78 | return UNALIGNED_LOAD32(ptr); |
79 | } |
80 | |
81 | // This produces the same results as the byte-by-byte version below. |
82 | // Here, we mask off the sign bits and subtract off two copies. To |
83 | // see why this is the same as adding together the sign extensions, |
84 | // start by considering the low-order byte. If we loaded an unsigned |
85 | // word and wanted to sign extend it, we isolate the sign bit and subtract |
86 | // that from zero which gives us a sequence of bits matching the sign bit |
87 | // at and above the sign bit. If we remove (subtract) the sign bit and |
88 | // add in the low order byte, we now have a sign-extended byte as desired. |
89 | // We can then operate on all four bytes in parallel because addition |
90 | // is associative and commutative. |
91 | // |
92 | // For example, consider sign extending the bytes 0x01 and 0x81. For 0x01, |
93 | // the sign bit is zero, and 0x01 - 0 -0 = 1. For 0x81, the sign bit is 1 |
94 | // and we are computing 0x81 - 0x80 + (-0x80) == 0x01 + 0xFFFFFF80. |
95 | // |
96 | // Similarily, if we start with 0x8200 and want to sign extend that, |
97 | // we end up calculating 0x8200 - 0x8000 + (-0x8000) == 0xFFFF8000 + 0x0200 |
98 | // |
99 | // Suppose we have two bytes at the same time. Doesn't the adding of all |
100 | // those F's generate something wierd? Ignore the F's and reassociate |
101 | // the addition. For 0x8281, processing the bytes one at a time (like |
102 | // we used to do) calculates |
103 | // [0x8200 - 0x8000 + (-0x8000)] + [0x0081 - 0x80 + (-0x80)] |
104 | // == 0x8281 - 0x8080 - 0x8000 - 0x80 |
105 | // == 0x8281 - 0x8080 - 0x8080 |
106 | |
107 | static inline uint32 Google1At(const char *ptr) { |
108 | uint32 t = UNALIGNED_LOAD32(ptr); |
109 | uint32 masked = t & 0x80808080; |
110 | return t - masked - masked; |
111 | } |
112 | |
113 | #else |
114 | |
115 | // NOTE: This code is not normally used or tested. |
116 | |
117 | static inline uint64 Word64At(const char *ptr) { |
118 | return (static_cast<uint64>(ptr[0]) + |
119 | (static_cast<uint64>(ptr[1]) << 8) + |
120 | (static_cast<uint64>(ptr[2]) << 16) + |
121 | (static_cast<uint64>(ptr[3]) << 24) + |
122 | (static_cast<uint64>(ptr[4]) << 32) + |
123 | (static_cast<uint64>(ptr[5]) << 40) + |
124 | (static_cast<uint64>(ptr[6]) << 48) + |
125 | (static_cast<uint64>(ptr[7]) << 56)); |
126 | } |
127 | |
128 | static inline uint32 Word32At(const char *ptr) { |
129 | return (static_cast<uint32>(ptr[0]) + |
130 | (static_cast<uint32>(ptr[1]) << 8) + |
131 | (static_cast<uint32>(ptr[2]) << 16) + |
132 | (static_cast<uint32>(ptr[3]) << 24)); |
133 | } |
134 | |
135 | static inline uint32 Google1At(const char *ptr2) { |
136 | const schar * ptr = reinterpret_cast<const schar *>(ptr2); |
137 | return (static_cast<schar>(ptr[0]) + |
138 | (static_cast<uint32>(ptr[1]) << 8) + |
139 | (static_cast<uint32>(ptr[2]) << 16) + |
140 | (static_cast<uint32>(ptr[3]) << 24)); |
141 | } |
142 | |
143 | #endif /* !NEED_ALIGNED_LOADS && IS_LITTLE_ENDIAN */ |
144 | |
145 | // Historically, WORD_HASH has always been defined as we always run on |
146 | // machines that don't NEED_ALIGNED_LOADS and which IS_LITTLE_ENDIAN. |
147 | // |
148 | // TODO(user): find occurences of WORD_HASH and adjust the code to |
149 | // use more meaningful concepts. |
150 | # define WORD_HASH |
151 | |
152 | #endif // UTIL_HASH_JENKINS_LOOKUP2_H__ |
153 | |
154 | |