1/**************************************************************************/
2/* hashfuncs.h */
3/**************************************************************************/
4/* This file is part of: */
5/* GODOT ENGINE */
6/* https://godotengine.org */
7/**************************************************************************/
8/* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
9/* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
10/* */
11/* Permission is hereby granted, free of charge, to any person obtaining */
12/* a copy of this software and associated documentation files (the */
13/* "Software"), to deal in the Software without restriction, including */
14/* without limitation the rights to use, copy, modify, merge, publish, */
15/* distribute, sublicense, and/or sell copies of the Software, and to */
16/* permit persons to whom the Software is furnished to do so, subject to */
17/* the following conditions: */
18/* */
19/* The above copyright notice and this permission notice shall be */
20/* included in all copies or substantial portions of the Software. */
21/* */
22/* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
23/* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
24/* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
25/* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
26/* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
27/* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
28/* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
29/**************************************************************************/
30
31#ifndef HASHFUNCS_H
32#define HASHFUNCS_H
33
34#include "core/math/aabb.h"
35#include "core/math/math_defs.h"
36#include "core/math/math_funcs.h"
37#include "core/math/rect2.h"
38#include "core/math/rect2i.h"
39#include "core/math/vector2.h"
40#include "core/math/vector2i.h"
41#include "core/math/vector3.h"
42#include "core/math/vector3i.h"
43#include "core/math/vector4.h"
44#include "core/math/vector4i.h"
45#include "core/object/object_id.h"
46#include "core/string/node_path.h"
47#include "core/string/string_name.h"
48#include "core/string/ustring.h"
49#include "core/templates/rid.h"
50#include "core/typedefs.h"
51
52/**
53 * Hashing functions
54 */
55
56/**
57 * DJB2 Hash function
58 * @param C String
59 * @return 32-bits hashcode
60 */
61static _FORCE_INLINE_ uint32_t hash_djb2(const char *p_cstr) {
62 const unsigned char *chr = (const unsigned char *)p_cstr;
63 uint32_t hash = 5381;
64 uint32_t c = *chr++;
65
66 while (c) {
67 hash = ((hash << 5) + hash) ^ c; /* hash * 33 ^ c */
68 c = *chr++;
69 }
70
71 return hash;
72}
73
74static _FORCE_INLINE_ uint32_t hash_djb2_buffer(const uint8_t *p_buff, int p_len, uint32_t p_prev = 5381) {
75 uint32_t hash = p_prev;
76
77 for (int i = 0; i < p_len; i++) {
78 hash = ((hash << 5) + hash) ^ p_buff[i]; /* hash * 33 + c */
79 }
80
81 return hash;
82}
83
84static _FORCE_INLINE_ uint32_t hash_djb2_one_32(uint32_t p_in, uint32_t p_prev = 5381) {
85 return ((p_prev << 5) + p_prev) ^ p_in;
86}
87
88/**
89 * Thomas Wang's 64-bit to 32-bit Hash function:
90 * https://web.archive.org/web/20071223173210/https:/www.concentric.net/~Ttwang/tech/inthash.htm
91 *
92 * @param p_int - 64-bit unsigned integer key to be hashed
93 * @return unsigned 32-bit value representing hashcode
94 */
95static _FORCE_INLINE_ uint32_t hash_one_uint64(const uint64_t p_int) {
96 uint64_t v = p_int;
97 v = (~v) + (v << 18); // v = (v << 18) - v - 1;
98 v = v ^ (v >> 31);
99 v = v * 21; // v = (v + (v << 2)) + (v << 4);
100 v = v ^ (v >> 11);
101 v = v + (v << 6);
102 v = v ^ (v >> 22);
103 return uint32_t(v);
104}
105
106#define HASH_MURMUR3_SEED 0x7F07C65
107// Murmurhash3 32-bit version.
108// All MurmurHash versions are public domain software, and the author disclaims all copyright to their code.
109
110static _FORCE_INLINE_ uint32_t hash_murmur3_one_32(uint32_t p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
111 p_in *= 0xcc9e2d51;
112 p_in = (p_in << 15) | (p_in >> 17);
113 p_in *= 0x1b873593;
114
115 p_seed ^= p_in;
116 p_seed = (p_seed << 13) | (p_seed >> 19);
117 p_seed = p_seed * 5 + 0xe6546b64;
118
119 return p_seed;
120}
121
122static _FORCE_INLINE_ uint32_t hash_murmur3_one_float(float p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
123 union {
124 float f;
125 uint32_t i;
126 } u;
127
128 // Normalize +/- 0.0 and NaN values so they hash the same.
129 if (p_in == 0.0f) {
130 u.f = 0.0;
131 } else if (Math::is_nan(p_in)) {
132 u.f = NAN;
133 } else {
134 u.f = p_in;
135 }
136
137 return hash_murmur3_one_32(u.i, p_seed);
138}
139
140static _FORCE_INLINE_ uint32_t hash_murmur3_one_64(uint64_t p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
141 p_seed = hash_murmur3_one_32(p_in & 0xFFFFFFFF, p_seed);
142 return hash_murmur3_one_32(p_in >> 32, p_seed);
143}
144
145static _FORCE_INLINE_ uint32_t hash_murmur3_one_double(double p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
146 union {
147 double d;
148 uint64_t i;
149 } u;
150
151 // Normalize +/- 0.0 and NaN values so they hash the same.
152 if (p_in == 0.0f) {
153 u.d = 0.0;
154 } else if (Math::is_nan(p_in)) {
155 u.d = NAN;
156 } else {
157 u.d = p_in;
158 }
159
160 return hash_murmur3_one_64(u.i, p_seed);
161}
162
163static _FORCE_INLINE_ uint32_t hash_murmur3_one_real(real_t p_in, uint32_t p_seed = HASH_MURMUR3_SEED) {
164#ifdef REAL_T_IS_DOUBLE
165 return hash_murmur3_one_double(p_in, p_seed);
166#else
167 return hash_murmur3_one_float(p_in, p_seed);
168#endif
169}
170
171static _FORCE_INLINE_ uint32_t hash_rotl32(uint32_t x, int8_t r) {
172 return (x << r) | (x >> (32 - r));
173}
174
175static _FORCE_INLINE_ uint32_t hash_fmix32(uint32_t h) {
176 h ^= h >> 16;
177 h *= 0x85ebca6b;
178 h ^= h >> 13;
179 h *= 0xc2b2ae35;
180 h ^= h >> 16;
181
182 return h;
183}
184
185static _FORCE_INLINE_ uint32_t hash_murmur3_buffer(const void *key, int length, const uint32_t seed = HASH_MURMUR3_SEED) {
186 // Although not required, this is a random prime number.
187 const uint8_t *data = (const uint8_t *)key;
188 const int nblocks = length / 4;
189
190 uint32_t h1 = seed;
191
192 const uint32_t c1 = 0xcc9e2d51;
193 const uint32_t c2 = 0x1b873593;
194
195 const uint32_t *blocks = (const uint32_t *)(data + nblocks * 4);
196
197 for (int i = -nblocks; i; i++) {
198 uint32_t k1 = blocks[i];
199
200 k1 *= c1;
201 k1 = hash_rotl32(k1, 15);
202 k1 *= c2;
203
204 h1 ^= k1;
205 h1 = hash_rotl32(h1, 13);
206 h1 = h1 * 5 + 0xe6546b64;
207 }
208
209 const uint8_t *tail = (const uint8_t *)(data + nblocks * 4);
210
211 uint32_t k1 = 0;
212
213 switch (length & 3) {
214 case 3:
215 k1 ^= tail[2] << 16;
216 [[fallthrough]];
217 case 2:
218 k1 ^= tail[1] << 8;
219 [[fallthrough]];
220 case 1:
221 k1 ^= tail[0];
222 k1 *= c1;
223 k1 = hash_rotl32(k1, 15);
224 k1 *= c2;
225 h1 ^= k1;
226 };
227
228 // Finalize with additional bit mixing.
229 h1 ^= length;
230 return hash_fmix32(h1);
231}
232
233static _FORCE_INLINE_ uint32_t hash_djb2_one_float(double p_in, uint32_t p_prev = 5381) {
234 union {
235 double d;
236 uint64_t i;
237 } u;
238
239 // Normalize +/- 0.0 and NaN values so they hash the same.
240 if (p_in == 0.0f) {
241 u.d = 0.0;
242 } else if (Math::is_nan(p_in)) {
243 u.d = NAN;
244 } else {
245 u.d = p_in;
246 }
247
248 return ((p_prev << 5) + p_prev) + hash_one_uint64(u.i);
249}
250
251template <class T>
252static _FORCE_INLINE_ uint32_t hash_make_uint32_t(T p_in) {
253 union {
254 T t;
255 uint32_t _u32;
256 } _u;
257 _u._u32 = 0;
258 _u.t = p_in;
259 return _u._u32;
260}
261
262static _FORCE_INLINE_ uint64_t hash_djb2_one_float_64(double p_in, uint64_t p_prev = 5381) {
263 union {
264 double d;
265 uint64_t i;
266 } u;
267
268 // Normalize +/- 0.0 and NaN values so they hash the same.
269 if (p_in == 0.0f) {
270 u.d = 0.0;
271 } else if (Math::is_nan(p_in)) {
272 u.d = NAN;
273 } else {
274 u.d = p_in;
275 }
276
277 return ((p_prev << 5) + p_prev) + u.i;
278}
279
280static _FORCE_INLINE_ uint64_t hash_djb2_one_64(uint64_t p_in, uint64_t p_prev = 5381) {
281 return ((p_prev << 5) + p_prev) ^ p_in;
282}
283
284template <class T>
285static _FORCE_INLINE_ uint64_t hash_make_uint64_t(T p_in) {
286 union {
287 T t;
288 uint64_t _u64;
289 } _u;
290 _u._u64 = 0; // in case p_in is smaller
291
292 _u.t = p_in;
293 return _u._u64;
294}
295
296template <class T>
297class Ref;
298
299struct HashMapHasherDefault {
300 // Generic hash function for any type.
301 template <class T>
302 static _FORCE_INLINE_ uint32_t hash(const T *p_pointer) { return hash_one_uint64((uint64_t)p_pointer); }
303
304 template <class T>
305 static _FORCE_INLINE_ uint32_t hash(const Ref<T> &p_ref) { return hash_one_uint64((uint64_t)p_ref.operator->()); }
306
307 static _FORCE_INLINE_ uint32_t hash(const String &p_string) { return p_string.hash(); }
308 static _FORCE_INLINE_ uint32_t hash(const char *p_cstr) { return hash_djb2(p_cstr); }
309 static _FORCE_INLINE_ uint32_t hash(const wchar_t p_wchar) { return hash_fmix32(p_wchar); }
310 static _FORCE_INLINE_ uint32_t hash(const char16_t p_uchar) { return hash_fmix32(p_uchar); }
311 static _FORCE_INLINE_ uint32_t hash(const char32_t p_uchar) { return hash_fmix32(p_uchar); }
312 static _FORCE_INLINE_ uint32_t hash(const RID &p_rid) { return hash_one_uint64(p_rid.get_id()); }
313 static _FORCE_INLINE_ uint32_t hash(const CharString &p_char_string) { return hash_djb2(p_char_string.ptr()); }
314 static _FORCE_INLINE_ uint32_t hash(const StringName &p_string_name) { return p_string_name.hash(); }
315 static _FORCE_INLINE_ uint32_t hash(const NodePath &p_path) { return p_path.hash(); }
316 static _FORCE_INLINE_ uint32_t hash(const ObjectID &p_id) { return hash_one_uint64(p_id); }
317
318 static _FORCE_INLINE_ uint32_t hash(const uint64_t p_int) { return hash_one_uint64(p_int); }
319 static _FORCE_INLINE_ uint32_t hash(const int64_t p_int) { return hash_one_uint64(p_int); }
320 static _FORCE_INLINE_ uint32_t hash(const float p_float) { return hash_murmur3_one_float(p_float); }
321 static _FORCE_INLINE_ uint32_t hash(const double p_double) { return hash_murmur3_one_double(p_double); }
322 static _FORCE_INLINE_ uint32_t hash(const uint32_t p_int) { return hash_fmix32(p_int); }
323 static _FORCE_INLINE_ uint32_t hash(const int32_t p_int) { return hash_fmix32(p_int); }
324 static _FORCE_INLINE_ uint32_t hash(const uint16_t p_int) { return hash_fmix32(p_int); }
325 static _FORCE_INLINE_ uint32_t hash(const int16_t p_int) { return hash_fmix32(p_int); }
326 static _FORCE_INLINE_ uint32_t hash(const uint8_t p_int) { return hash_fmix32(p_int); }
327 static _FORCE_INLINE_ uint32_t hash(const int8_t p_int) { return hash_fmix32(p_int); }
328 static _FORCE_INLINE_ uint32_t hash(const Vector2i &p_vec) {
329 uint32_t h = hash_murmur3_one_32(p_vec.x);
330 h = hash_murmur3_one_32(p_vec.y, h);
331 return hash_fmix32(h);
332 }
333 static _FORCE_INLINE_ uint32_t hash(const Vector3i &p_vec) {
334 uint32_t h = hash_murmur3_one_32(p_vec.x);
335 h = hash_murmur3_one_32(p_vec.y, h);
336 h = hash_murmur3_one_32(p_vec.z, h);
337 return hash_fmix32(h);
338 }
339 static _FORCE_INLINE_ uint32_t hash(const Vector4i &p_vec) {
340 uint32_t h = hash_murmur3_one_32(p_vec.x);
341 h = hash_murmur3_one_32(p_vec.y, h);
342 h = hash_murmur3_one_32(p_vec.z, h);
343 h = hash_murmur3_one_32(p_vec.w, h);
344 return hash_fmix32(h);
345 }
346 static _FORCE_INLINE_ uint32_t hash(const Vector2 &p_vec) {
347 uint32_t h = hash_murmur3_one_real(p_vec.x);
348 h = hash_murmur3_one_real(p_vec.y, h);
349 return hash_fmix32(h);
350 }
351 static _FORCE_INLINE_ uint32_t hash(const Vector3 &p_vec) {
352 uint32_t h = hash_murmur3_one_real(p_vec.x);
353 h = hash_murmur3_one_real(p_vec.y, h);
354 h = hash_murmur3_one_real(p_vec.z, h);
355 return hash_fmix32(h);
356 }
357 static _FORCE_INLINE_ uint32_t hash(const Vector4 &p_vec) {
358 uint32_t h = hash_murmur3_one_real(p_vec.x);
359 h = hash_murmur3_one_real(p_vec.y, h);
360 h = hash_murmur3_one_real(p_vec.z, h);
361 h = hash_murmur3_one_real(p_vec.w, h);
362 return hash_fmix32(h);
363 }
364 static _FORCE_INLINE_ uint32_t hash(const Rect2i &p_rect) {
365 uint32_t h = hash_murmur3_one_32(p_rect.position.x);
366 h = hash_murmur3_one_32(p_rect.position.y, h);
367 h = hash_murmur3_one_32(p_rect.size.x, h);
368 h = hash_murmur3_one_32(p_rect.size.y, h);
369 return hash_fmix32(h);
370 }
371 static _FORCE_INLINE_ uint32_t hash(const Rect2 &p_rect) {
372 uint32_t h = hash_murmur3_one_real(p_rect.position.x);
373 h = hash_murmur3_one_real(p_rect.position.y, h);
374 h = hash_murmur3_one_real(p_rect.size.x, h);
375 h = hash_murmur3_one_real(p_rect.size.y, h);
376 return hash_fmix32(h);
377 }
378 static _FORCE_INLINE_ uint32_t hash(const AABB &p_aabb) {
379 uint32_t h = hash_murmur3_one_real(p_aabb.position.x);
380 h = hash_murmur3_one_real(p_aabb.position.y, h);
381 h = hash_murmur3_one_real(p_aabb.position.z, h);
382 h = hash_murmur3_one_real(p_aabb.size.x, h);
383 h = hash_murmur3_one_real(p_aabb.size.y, h);
384 h = hash_murmur3_one_real(p_aabb.size.z, h);
385 return hash_fmix32(h);
386 }
387};
388
389// TODO: Fold this into HashMapHasherDefault once C++20 concepts are allowed
390template <class T>
391struct HashableHasher {
392 static _FORCE_INLINE_ uint32_t hash(const T &hashable) { return hashable.hash(); }
393};
394
395template <typename T>
396struct HashMapComparatorDefault {
397 static bool compare(const T &p_lhs, const T &p_rhs) {
398 return p_lhs == p_rhs;
399 }
400};
401
402template <>
403struct HashMapComparatorDefault<float> {
404 static bool compare(const float &p_lhs, const float &p_rhs) {
405 return (p_lhs == p_rhs) || (Math::is_nan(p_lhs) && Math::is_nan(p_rhs));
406 }
407};
408
409template <>
410struct HashMapComparatorDefault<double> {
411 static bool compare(const double &p_lhs, const double &p_rhs) {
412 return (p_lhs == p_rhs) || (Math::is_nan(p_lhs) && Math::is_nan(p_rhs));
413 }
414};
415
416template <>
417struct HashMapComparatorDefault<Vector2> {
418 static bool compare(const Vector2 &p_lhs, const Vector2 &p_rhs) {
419 return ((p_lhs.x == p_rhs.x) || (Math::is_nan(p_lhs.x) && Math::is_nan(p_rhs.x))) && ((p_lhs.y == p_rhs.y) || (Math::is_nan(p_lhs.y) && Math::is_nan(p_rhs.y)));
420 }
421};
422
423template <>
424struct HashMapComparatorDefault<Vector3> {
425 static bool compare(const Vector3 &p_lhs, const Vector3 &p_rhs) {
426 return ((p_lhs.x == p_rhs.x) || (Math::is_nan(p_lhs.x) && Math::is_nan(p_rhs.x))) && ((p_lhs.y == p_rhs.y) || (Math::is_nan(p_lhs.y) && Math::is_nan(p_rhs.y))) && ((p_lhs.z == p_rhs.z) || (Math::is_nan(p_lhs.z) && Math::is_nan(p_rhs.z)));
427 }
428};
429
430constexpr uint32_t HASH_TABLE_SIZE_MAX = 29;
431
432const uint32_t hash_table_size_primes[HASH_TABLE_SIZE_MAX] = {
433 5,
434 13,
435 23,
436 47,
437 97,
438 193,
439 389,
440 769,
441 1543,
442 3079,
443 6151,
444 12289,
445 24593,
446 49157,
447 98317,
448 196613,
449 393241,
450 786433,
451 1572869,
452 3145739,
453 6291469,
454 12582917,
455 25165843,
456 50331653,
457 100663319,
458 201326611,
459 402653189,
460 805306457,
461 1610612741,
462};
463
464// Computed with elem_i = UINT64_C (0 x FFFFFFFF FFFFFFFF ) / d_i + 1, where d_i is the i-th element of the above array.
465const uint64_t hash_table_size_primes_inv[HASH_TABLE_SIZE_MAX] = {
466 3689348814741910324,
467 1418980313362273202,
468 802032351030850071,
469 392483916461905354,
470 190172619316593316,
471 95578984837873325,
472 47420935922132524,
473 23987963684927896,
474 11955116055547344,
475 5991147799191151,
476 2998982941588287,
477 1501077717772769,
478 750081082979285,
479 375261795343686,
480 187625172388393,
481 93822606204624,
482 46909513691883,
483 23456218233098,
484 11728086747027,
485 5864041509391,
486 2932024948977,
487 1466014921160,
488 733007198436,
489 366503839517,
490 183251896093,
491 91625960335,
492 45812983922,
493 22906489714,
494 11453246088
495};
496
497/**
498 * Fastmod computes ( n mod d ) given the precomputed c much faster than n % d.
499 * The implementation of fastmod is based on the following paper by Daniel Lemire et al.
500 * Faster Remainder by Direct Computation: Applications to Compilers and Software Libraries
501 * https://arxiv.org/abs/1902.01961
502 */
503static _FORCE_INLINE_ uint32_t fastmod(const uint32_t n, const uint64_t c, const uint32_t d) {
504#if defined(_MSC_VER)
505 // Returns the upper 64 bits of the product of two 64-bit unsigned integers.
506 // This intrinsic function is required since MSVC does not support unsigned 128-bit integers.
507#if defined(_M_X64) || defined(_M_ARM64)
508 return __umulh(c * n, d);
509#else
510 // Fallback to the slower method for 32-bit platforms.
511 return n % d;
512#endif // _M_X64 || _M_ARM64
513#else
514#ifdef __SIZEOF_INT128__
515 // Prevent compiler warning, because we know what we are doing.
516 uint64_t lowbits = c * n;
517 __extension__ typedef unsigned __int128 uint128;
518 return static_cast<uint64_t>(((uint128)lowbits * d) >> 64);
519#else
520 // Fallback to the slower method if no 128-bit unsigned integer type is available.
521 return n % d;
522#endif // __SIZEOF_INT128__
523#endif // _MSC_VER
524}
525
526#endif // HASHFUNCS_H
527