1 | /* |
2 | * Copyright 2017-present Facebook, Inc. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | */ |
16 | |
17 | // This is version 2 of SpookyHash, incompatible with version 1. |
18 | // |
19 | // SpookyHash: a 128-bit noncryptographic hash function |
20 | // By Bob Jenkins, public domain |
21 | // Oct 31 2010: alpha, framework + SpookyHash::Mix appears right |
22 | // Oct 31 2011: alpha again, Mix only good to 2^^69 but rest appears right |
23 | // Dec 31 2011: beta, improved Mix, tested it for 2-bit deltas |
24 | // Feb 2 2012: production, same bits as beta |
25 | // Feb 5 2012: adjusted definitions of uint* to be more portable |
26 | // Mar 30 2012: 3 bytes/cycle, not 4. Alpha was 4 but wasn't thorough enough. |
27 | // August 5 2012: SpookyV2 (different results) |
28 | // |
29 | // Up to 3 bytes/cycle for long messages. Reasonably fast for short messages. |
30 | // All 1 or 2 bit deltas achieve avalanche within 1% bias per output bit. |
31 | // |
32 | // This was developed for and tested on 64-bit x86-compatible processors. |
33 | // It assumes the processor is little-endian. There is a macro |
34 | // controlling whether unaligned reads are allowed (by default they are). |
35 | // This should be an equally good hash on big-endian machines, but it will |
36 | // compute different results on them than on little-endian machines. |
37 | // |
38 | // Google's CityHash has similar specs to SpookyHash, and CityHash is faster |
39 | // on new Intel boxes. MD4 and MD5 also have similar specs, but they are orders |
40 | // of magnitude slower. CRCs are two or more times slower, but unlike |
41 | // SpookyHash, they have nice math for combining the CRCs of pieces to form |
42 | // the CRCs of wholes. There are also cryptographic hashes, but those are even |
43 | // slower than MD5. |
44 | // |
45 | |
46 | #pragma once |
47 | |
48 | #include <cstddef> |
49 | #include <cstdint> |
50 | |
51 | namespace folly { |
52 | namespace hash { |
53 | |
54 | // clang-format off |
55 | |
56 | class SpookyHashV2 |
57 | { |
58 | public: |
59 | // |
60 | // SpookyHash: hash a single message in one call, produce 128-bit output |
61 | // |
62 | static void Hash128( |
63 | const void *message, // message to hash |
64 | size_t length, // length of message in bytes |
65 | uint64_t *hash1, // in/out: in seed 1, out hash value 1 |
66 | uint64_t *hash2); // in/out: in seed 2, out hash value 2 |
67 | |
68 | // |
69 | // Hash64: hash a single message in one call, return 64-bit output |
70 | // |
71 | static uint64_t Hash64( |
72 | const void *message, // message to hash |
73 | size_t length, // length of message in bytes |
74 | uint64_t seed) // seed |
75 | { |
76 | uint64_t hash1 = seed; |
77 | Hash128(message, length, &hash1, &seed); |
78 | return hash1; |
79 | } |
80 | |
81 | // |
82 | // Hash32: hash a single message in one call, produce 32-bit output |
83 | // |
84 | static uint32_t Hash32( |
85 | const void *message, // message to hash |
86 | size_t length, // length of message in bytes |
87 | uint32_t seed) // seed |
88 | { |
89 | uint64_t hash1 = seed, hash2 = seed; |
90 | Hash128(message, length, &hash1, &hash2); |
91 | return (uint32_t)hash1; |
92 | } |
93 | |
94 | // |
95 | // Init: initialize the context of a SpookyHash |
96 | // |
97 | void Init( |
98 | uint64_t seed1, // any 64-bit value will do, including 0 |
99 | uint64_t seed2); // different seeds produce independent hashes |
100 | |
101 | // |
102 | // Update: add a piece of a message to a SpookyHash state |
103 | // |
104 | void Update( |
105 | const void *message, // message fragment |
106 | size_t length); // length of message fragment in bytes |
107 | |
108 | |
109 | // |
110 | // Final: compute the hash for the current SpookyHash state |
111 | // |
112 | // This does not modify the state; you can keep updating it afterward |
113 | // |
114 | // The result is the same as if SpookyHash() had been called with |
115 | // all the pieces concatenated into one message. |
116 | // |
117 | void Final( |
118 | uint64_t *hash1, // out only: first 64 bits of hash value. |
119 | uint64_t *hash2) const; // out only: second 64 bits of hash value. |
120 | |
121 | // |
122 | // left rotate a 64-bit value by k bytes |
123 | // |
124 | static inline uint64_t Rot64(uint64_t x, int k) |
125 | { |
126 | return (x << k) | (x >> (64 - k)); |
127 | } |
128 | |
129 | // |
130 | // This is used if the input is 96 bytes long or longer. |
131 | // |
132 | // The internal state is fully overwritten every 96 bytes. |
133 | // Every input bit appears to cause at least 128 bits of entropy |
134 | // before 96 other bytes are combined, when run forward or backward |
135 | // For every input bit, |
136 | // Two inputs differing in just that input bit |
137 | // Where "differ" means xor or subtraction |
138 | // And the base value is random |
139 | // When run forward or backwards one Mix |
140 | // I tried 3 pairs of each; they all differed by at least 212 bits. |
141 | // |
142 | static inline void Mix( |
143 | const uint64_t *data, |
144 | uint64_t &s0, uint64_t &s1, uint64_t &s2, uint64_t &s3, |
145 | uint64_t &s4, uint64_t &s5, uint64_t &s6, uint64_t &s7, |
146 | uint64_t &s8, uint64_t &s9, uint64_t &s10,uint64_t &s11) |
147 | { |
148 | s0 += data[0]; s2 ^= s10; s11 ^= s0; s0 = Rot64(s0,11); s11 += s1; |
149 | s1 += data[1]; s3 ^= s11; s0 ^= s1; s1 = Rot64(s1,32); s0 += s2; |
150 | s2 += data[2]; s4 ^= s0; s1 ^= s2; s2 = Rot64(s2,43); s1 += s3; |
151 | s3 += data[3]; s5 ^= s1; s2 ^= s3; s3 = Rot64(s3,31); s2 += s4; |
152 | s4 += data[4]; s6 ^= s2; s3 ^= s4; s4 = Rot64(s4,17); s3 += s5; |
153 | s5 += data[5]; s7 ^= s3; s4 ^= s5; s5 = Rot64(s5,28); s4 += s6; |
154 | s6 += data[6]; s8 ^= s4; s5 ^= s6; s6 = Rot64(s6,39); s5 += s7; |
155 | s7 += data[7]; s9 ^= s5; s6 ^= s7; s7 = Rot64(s7,57); s6 += s8; |
156 | s8 += data[8]; s10 ^= s6; s7 ^= s8; s8 = Rot64(s8,55); s7 += s9; |
157 | s9 += data[9]; s11 ^= s7; s8 ^= s9; s9 = Rot64(s9,54); s8 += s10; |
158 | s10 += data[10]; s0 ^= s8; s9 ^= s10; s10 = Rot64(s10,22); s9 += s11; |
159 | s11 += data[11]; s1 ^= s9; s10 ^= s11; s11 = Rot64(s11,46); s10 += s0; |
160 | } |
161 | |
162 | // |
163 | // Mix all 12 inputs together so that h0, h1 are a hash of them all. |
164 | // |
165 | // For two inputs differing in just the input bits |
166 | // Where "differ" means xor or subtraction |
167 | // And the base value is random, or a counting value starting at that bit |
168 | // The final result will have each bit of h0, h1 flip |
169 | // For every input bit, |
170 | // with probability 50 +- .3% |
171 | // For every pair of input bits, |
172 | // with probability 50 +- 3% |
173 | // |
174 | // This does not rely on the last Mix() call having already mixed some. |
175 | // Two iterations was almost good enough for a 64-bit result, but a |
176 | // 128-bit result is reported, so End() does three iterations. |
177 | // |
178 | static inline void EndPartial( |
179 | uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3, |
180 | uint64_t &h4, uint64_t &h5, uint64_t &h6, uint64_t &h7, |
181 | uint64_t &h8, uint64_t &h9, uint64_t &h10,uint64_t &h11) |
182 | { |
183 | h11+= h1; h2 ^= h11; h1 = Rot64(h1,44); |
184 | h0 += h2; h3 ^= h0; h2 = Rot64(h2,15); |
185 | h1 += h3; h4 ^= h1; h3 = Rot64(h3,34); |
186 | h2 += h4; h5 ^= h2; h4 = Rot64(h4,21); |
187 | h3 += h5; h6 ^= h3; h5 = Rot64(h5,38); |
188 | h4 += h6; h7 ^= h4; h6 = Rot64(h6,33); |
189 | h5 += h7; h8 ^= h5; h7 = Rot64(h7,10); |
190 | h6 += h8; h9 ^= h6; h8 = Rot64(h8,13); |
191 | h7 += h9; h10^= h7; h9 = Rot64(h9,38); |
192 | h8 += h10; h11^= h8; h10= Rot64(h10,53); |
193 | h9 += h11; h0 ^= h9; h11= Rot64(h11,42); |
194 | h10+= h0; h1 ^= h10; h0 = Rot64(h0,54); |
195 | } |
196 | |
197 | static inline void End( |
198 | const uint64_t *data, |
199 | uint64_t &h0, uint64_t &h1, uint64_t &h2, uint64_t &h3, |
200 | uint64_t &h4, uint64_t &h5, uint64_t &h6, uint64_t &h7, |
201 | uint64_t &h8, uint64_t &h9, uint64_t &h10,uint64_t &h11) |
202 | { |
203 | h0 += data[0]; h1 += data[1]; h2 += data[2]; h3 += data[3]; |
204 | h4 += data[4]; h5 += data[5]; h6 += data[6]; h7 += data[7]; |
205 | h8 += data[8]; h9 += data[9]; h10 += data[10]; h11 += data[11]; |
206 | EndPartial(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); |
207 | EndPartial(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); |
208 | EndPartial(h0,h1,h2,h3,h4,h5,h6,h7,h8,h9,h10,h11); |
209 | } |
210 | |
211 | // |
212 | // The goal is for each bit of the input to expand into 128 bits of |
213 | // apparent entropy before it is fully overwritten. |
214 | // n trials both set and cleared at least m bits of h0 h1 h2 h3 |
215 | // n: 2 m: 29 |
216 | // n: 3 m: 46 |
217 | // n: 4 m: 57 |
218 | // n: 5 m: 107 |
219 | // n: 6 m: 146 |
220 | // n: 7 m: 152 |
221 | // when run forwards or backwards |
222 | // for all 1-bit and 2-bit diffs |
223 | // with diffs defined by either xor or subtraction |
224 | // with a base of all zeros plus a counter, or plus another bit, or random |
225 | // |
226 | static inline void ShortMix(uint64_t &h0, uint64_t &h1, |
227 | uint64_t &h2, uint64_t &h3) |
228 | { |
229 | h2 = Rot64(h2,50); h2 += h3; h0 ^= h2; |
230 | h3 = Rot64(h3,52); h3 += h0; h1 ^= h3; |
231 | h0 = Rot64(h0,30); h0 += h1; h2 ^= h0; |
232 | h1 = Rot64(h1,41); h1 += h2; h3 ^= h1; |
233 | h2 = Rot64(h2,54); h2 += h3; h0 ^= h2; |
234 | h3 = Rot64(h3,48); h3 += h0; h1 ^= h3; |
235 | h0 = Rot64(h0,38); h0 += h1; h2 ^= h0; |
236 | h1 = Rot64(h1,37); h1 += h2; h3 ^= h1; |
237 | h2 = Rot64(h2,62); h2 += h3; h0 ^= h2; |
238 | h3 = Rot64(h3,34); h3 += h0; h1 ^= h3; |
239 | h0 = Rot64(h0,5); h0 += h1; h2 ^= h0; |
240 | h1 = Rot64(h1,36); h1 += h2; h3 ^= h1; |
241 | } |
242 | |
243 | // |
244 | // Mix all 4 inputs together so that h0, h1 are a hash of them all. |
245 | // |
246 | // For two inputs differing in just the input bits |
247 | // Where "differ" means xor or subtraction |
248 | // And the base value is random, or a counting value starting at that bit |
249 | // The final result will have each bit of h0, h1 flip |
250 | // For every input bit, |
251 | // with probability 50 +- .3% (it is probably better than that) |
252 | // For every pair of input bits, |
253 | // with probability 50 +- .75% (the worst case is approximately that) |
254 | // |
255 | static inline void ShortEnd(uint64_t &h0, uint64_t &h1, |
256 | uint64_t &h2, uint64_t &h3) |
257 | { |
258 | h3 ^= h2; h2 = Rot64(h2,15); h3 += h2; |
259 | h0 ^= h3; h3 = Rot64(h3,52); h0 += h3; |
260 | h1 ^= h0; h0 = Rot64(h0,26); h1 += h0; |
261 | h2 ^= h1; h1 = Rot64(h1,51); h2 += h1; |
262 | h3 ^= h2; h2 = Rot64(h2,28); h3 += h2; |
263 | h0 ^= h3; h3 = Rot64(h3,9); h0 += h3; |
264 | h1 ^= h0; h0 = Rot64(h0,47); h1 += h0; |
265 | h2 ^= h1; h1 = Rot64(h1,54); h2 += h1; |
266 | h3 ^= h2; h2 = Rot64(h2,32); h3 += h2; |
267 | h0 ^= h3; h3 = Rot64(h3,25); h0 += h3; |
268 | h1 ^= h0; h0 = Rot64(h0,63); h1 += h0; |
269 | } |
270 | |
271 | private: |
272 | |
273 | // |
274 | // Short is used for messages under 192 bytes in length |
275 | // Short has a low startup cost, the normal mode is good for long |
276 | // keys, the cost crossover is at about 192 bytes. The two modes were |
277 | // held to the same quality bar. |
278 | // |
279 | static void Short( |
280 | const void *message, // message (byte array, not necessarily aligned) |
281 | size_t length, // length of message (in bytes) |
282 | uint64_t *hash1, // in/out: in the seed, out the hash value |
283 | uint64_t *hash2); // in/out: in the seed, out the hash value |
284 | |
285 | // number of uint64_t's in internal state |
286 | static constexpr size_t sc_numVars = 12; |
287 | |
288 | // size of the internal state |
289 | static constexpr size_t sc_blockSize = sc_numVars*8; |
290 | |
291 | // size of buffer of unhashed data, in bytes |
292 | static constexpr size_t sc_bufSize = 2*sc_blockSize; |
293 | |
294 | // |
295 | // sc_const: a constant which: |
296 | // * is not zero |
297 | // * is odd |
298 | // * is a not-very-regular mix of 1's and 0's |
299 | // * does not need any other special mathematical properties |
300 | // |
301 | static constexpr uint64_t sc_const = 0xdeadbeefdeadbeefULL; |
302 | |
303 | uint64_t m_data[2*sc_numVars]; // unhashed data, for partial messages |
304 | uint64_t m_state[sc_numVars]; // internal state of the hash |
305 | size_t m_length; // total length of the input so far |
306 | uint8_t m_remainder; // length of unhashed data stashed in m_data |
307 | }; |
308 | |
309 | // clang-format on |
310 | |
311 | } // namespace hash |
312 | } // namespace folly |
313 | |