1 | // Copyright (c) 2014, the Dart project authors. Please see the AUTHORS file |
2 | // for details. All rights reserved. Use of this source code is governed by a |
3 | // BSD-style license that can be found in the LICENSE file. |
4 | |
5 | #ifndef RUNTIME_VM_HASH_TABLE_H_ |
6 | #define RUNTIME_VM_HASH_TABLE_H_ |
7 | |
8 | #include "platform/assert.h" |
9 | #include "vm/object.h" |
10 | |
11 | namespace dart { |
12 | |
13 | // OVERVIEW: |
14 | // |
15 | // Hash maps and hash sets all use RawArray as backing storage. At the lowest |
16 | // level is a generic open-addressing table that supports deletion. |
17 | // - HashTable |
18 | // The next layer provides ordering and iteration functionality: |
19 | // - UnorderedHashTable |
20 | // - LinkedListHashTable (TODO(koda): Implement.) |
21 | // The utility class HashTables handles growth and conversion. |
22 | // The next layer fixes the payload size and provides a natural interface: |
23 | // - HashMap |
24 | // - HashSet |
25 | // Combining either of these with an iteration strategy, we get the templates |
26 | // intended for use outside this file: |
27 | // - UnorderedHashMap |
28 | // - LinkedListHashMap |
29 | // - UnorderedHashSet |
30 | // - LinkedListHashSet |
31 | // Each of these can be finally specialized with KeyTraits to support any set of |
32 | // lookup key types (e.g., look up a char* in a set of String objects), and |
33 | // any equality and hash code computation. |
34 | // |
35 | // The classes all wrap an Array handle, and methods like HashSet::Insert can |
36 | // trigger growth into a new RawArray, updating the handle. Debug mode asserts |
37 | // that 'Release' was called once to access the final array before destruction. |
38 | // NOTE: The handle returned by 'Release' is cleared by ~HashTable. |
39 | // |
40 | // Example use: |
41 | // typedef UnorderedHashMap<FooTraits> FooMap; |
42 | // ... |
43 | // FooMap cache(get_foo_cache()); |
44 | // cache.UpdateOrInsert(name0, obj0); |
45 | // cache.UpdateOrInsert(name1, obj1); |
46 | // ... |
47 | // set_foo_cache(cache.Release()); |
48 | // |
49 | // If you *know* that no mutating operations were called, you can optimize: |
50 | // ... |
51 | // obj ^= cache.GetOrNull(name); |
52 | // ASSERT(cache.Release().raw() == get_foo_cache()); |
53 | // |
54 | // TODO(koda): When exposing these to Dart code, document and assert that |
55 | // KeyTraits methods must not run Dart code (since the C++ code doesn't check |
56 | // for concurrent modification). |
57 | |
58 | // Open-addressing hash table template using a RawArray as backing storage. |
59 | // |
60 | // The elements of the array are partitioned into entries: |
61 | // [ header | metadata | entry0 | entry1 | ... | entryN ] |
62 | // Each entry contains a key, followed by zero or more payload components, |
63 | // and has 3 possible states: unused, occupied, or deleted. |
64 | // The header tracks the number of entries in each state. |
65 | // Any object except the backing storage array and Object::transition_sentinel() |
66 | // may be stored as a key. Any object may be stored in a payload. |
67 | // |
68 | // Parameters |
69 | // KeyTraits: defines static methods |
70 | // bool IsMatch(const Key& key, const Object& obj) and |
71 | // uword Hash(const Key& key) for any number of desired lookup key types. |
72 | // kPayloadSize: number of components of the payload in each entry. |
73 | // kMetaDataSize: number of elements reserved (e.g., for iteration order data). |
74 | template <typename KeyTraits, intptr_t kPayloadSize, intptr_t kMetaDataSize> |
75 | class HashTable : public ValueObject { |
76 | public: |
77 | typedef KeyTraits Traits; |
78 | // Uses the passed in handles for all handle operations. |
79 | // 'Release' must be called at the end to obtain the final table |
80 | // after potential growth/shrinkage. |
81 | HashTable(Object* key, Smi* index, Array* data) |
82 | : key_handle_(key), |
83 | smi_handle_(index), |
84 | data_(data), |
85 | released_data_(NULL) {} |
86 | // Uses 'zone' for handle allocation. 'Release' must be called at the end |
87 | // to obtain the final table after potential growth/shrinkage. |
88 | HashTable(Zone* zone, ArrayPtr data) |
89 | : key_handle_(&Object::Handle(zone)), |
90 | smi_handle_(&Smi::Handle(zone)), |
91 | data_(&Array::Handle(zone, data)), |
92 | released_data_(NULL) {} |
93 | |
94 | // Returns the final table. The handle is cleared when this HashTable is |
95 | // destroyed. |
96 | Array& Release() { |
97 | ASSERT(data_ != NULL); |
98 | ASSERT(released_data_ == NULL); |
99 | // Ensure that no methods are called after 'Release'. |
100 | released_data_ = data_; |
101 | data_ = NULL; |
102 | return *released_data_; |
103 | } |
104 | |
105 | ~HashTable() { |
106 | // In DEBUG mode, calling 'Release' is mandatory. |
107 | ASSERT(data_ == NULL); |
108 | if (released_data_ != NULL) { |
109 | *released_data_ = Array::null(); |
110 | } |
111 | } |
112 | |
113 | // Returns a backing storage size such that 'num_occupied' distinct keys can |
114 | // be inserted into the table. |
115 | static intptr_t ArrayLengthForNumOccupied(intptr_t num_occupied) { |
116 | // Because we use quadratic (actually triangle number) probing it is |
117 | // important that the size is a power of two (otherwise we could fail to |
118 | // find an empty slot). This is described in Knuth's The Art of Computer |
119 | // Programming Volume 2, Chapter 6.4, exercise 20 (solution in the |
120 | // appendix, 2nd edition). |
121 | intptr_t num_entries = Utils::RoundUpToPowerOfTwo(num_occupied + 1); |
122 | return kFirstKeyIndex + (kEntrySize * num_entries); |
123 | } |
124 | |
125 | // Initializes an empty table. |
126 | void Initialize() const { |
127 | ASSERT(data_->Length() >= ArrayLengthForNumOccupied(0)); |
128 | *smi_handle_ = Smi::New(0); |
129 | data_->SetAt(kOccupiedEntriesIndex, *smi_handle_); |
130 | data_->SetAt(kDeletedEntriesIndex, *smi_handle_); |
131 | |
132 | #if !defined(PRODUCT) |
133 | data_->SetAt(kNumGrowsIndex, *smi_handle_); |
134 | data_->SetAt(kNumLT5LookupsIndex, *smi_handle_); |
135 | data_->SetAt(kNumLT25LookupsIndex, *smi_handle_); |
136 | data_->SetAt(kNumGT25LookupsIndex, *smi_handle_); |
137 | data_->SetAt(kNumProbesIndex, *smi_handle_); |
138 | #endif // !defined(PRODUCT) |
139 | |
140 | for (intptr_t i = kHeaderSize; i < data_->Length(); ++i) { |
141 | data_->SetAt(i, UnusedMarker()); |
142 | } |
143 | } |
144 | |
145 | // Returns whether 'key' matches any key in the table. |
146 | template <typename Key> |
147 | bool ContainsKey(const Key& key) const { |
148 | return FindKey(key) != -1; |
149 | } |
150 | |
151 | // Returns the entry that matches 'key', or -1 if none exists. |
152 | template <typename Key> |
153 | intptr_t FindKey(const Key& key) const { |
154 | const intptr_t num_entries = NumEntries(); |
155 | ASSERT(NumOccupied() < num_entries); |
156 | // TODO(koda): Add salt. |
157 | NOT_IN_PRODUCT(intptr_t collisions = 0;) |
158 | uword hash = KeyTraits::Hash(key); |
159 | ASSERT(Utils::IsPowerOfTwo(num_entries)); |
160 | intptr_t probe = hash & (num_entries - 1); |
161 | int probe_distance = 1; |
162 | while (true) { |
163 | if (IsUnused(probe)) { |
164 | NOT_IN_PRODUCT(UpdateCollisions(collisions);) |
165 | return -1; |
166 | } else if (!IsDeleted(probe)) { |
167 | *key_handle_ = GetKey(probe); |
168 | if (KeyTraits::IsMatch(key, *key_handle_)) { |
169 | NOT_IN_PRODUCT(UpdateCollisions(collisions);) |
170 | return probe; |
171 | } |
172 | NOT_IN_PRODUCT(collisions += 1;) |
173 | } |
174 | // Advance probe. See ArrayLengthForNumOccupied comment for |
175 | // explanation of how we know this hits all slots. |
176 | probe = (probe + probe_distance) & (num_entries - 1); |
177 | probe_distance++; |
178 | } |
179 | UNREACHABLE(); |
180 | return -1; |
181 | } |
182 | |
183 | // Sets *entry to either: |
184 | // - an occupied entry matching 'key', and returns true, or |
185 | // - an unused/deleted entry where a matching key may be inserted, |
186 | // and returns false. |
187 | template <typename Key> |
188 | bool FindKeyOrDeletedOrUnused(const Key& key, intptr_t* entry) const { |
189 | const intptr_t num_entries = NumEntries(); |
190 | ASSERT(entry != NULL); |
191 | ASSERT(NumOccupied() < num_entries); |
192 | NOT_IN_PRODUCT(intptr_t collisions = 0;) |
193 | uword hash = KeyTraits::Hash(key); |
194 | ASSERT(Utils::IsPowerOfTwo(num_entries)); |
195 | intptr_t probe = hash & (num_entries - 1); |
196 | int probe_distance = 1; |
197 | intptr_t deleted = -1; |
198 | while (true) { |
199 | if (IsUnused(probe)) { |
200 | *entry = (deleted != -1) ? deleted : probe; |
201 | NOT_IN_PRODUCT(UpdateCollisions(collisions);) |
202 | return false; |
203 | } else if (IsDeleted(probe)) { |
204 | if (deleted == -1) { |
205 | deleted = probe; |
206 | } |
207 | } else { |
208 | *key_handle_ = GetKey(probe); |
209 | if (KeyTraits::IsMatch(key, *key_handle_)) { |
210 | *entry = probe; |
211 | NOT_IN_PRODUCT(UpdateCollisions(collisions);) |
212 | return true; |
213 | } |
214 | NOT_IN_PRODUCT(collisions += 1;) |
215 | } |
216 | // Advance probe. See ArrayLengthForNumOccupied comment for |
217 | // explanation of how we know this hits all slots. |
218 | probe = (probe + probe_distance) & (num_entries - 1); |
219 | probe_distance++; |
220 | } |
221 | UNREACHABLE(); |
222 | return false; |
223 | } |
224 | |
225 | // Sets the key of a previously unoccupied entry. This must not be the last |
226 | // unoccupied entry. |
227 | void InsertKey(intptr_t entry, const Object& key) const { |
228 | ASSERT(key.raw() != UnusedMarker().raw()); |
229 | ASSERT(key.raw() != DeletedMarker().raw()); |
230 | ASSERT(!IsOccupied(entry)); |
231 | AdjustSmiValueAt(kOccupiedEntriesIndex, 1); |
232 | if (IsDeleted(entry)) { |
233 | AdjustSmiValueAt(kDeletedEntriesIndex, -1); |
234 | } else { |
235 | ASSERT(IsUnused(entry)); |
236 | } |
237 | InternalSetKey(entry, key); |
238 | ASSERT(IsOccupied(entry)); |
239 | ASSERT(NumOccupied() < NumEntries()); |
240 | } |
241 | |
242 | const Object& UnusedMarker() const { return Object::transition_sentinel(); } |
243 | const Object& DeletedMarker() const { return *data_; } |
244 | |
245 | bool IsUnused(intptr_t entry) const { |
246 | return InternalGetKey(entry) == UnusedMarker().raw(); |
247 | } |
248 | bool IsOccupied(intptr_t entry) const { |
249 | return !IsUnused(entry) && !IsDeleted(entry); |
250 | } |
251 | bool IsDeleted(intptr_t entry) const { |
252 | return InternalGetKey(entry) == DeletedMarker().raw(); |
253 | } |
254 | |
255 | ObjectPtr GetKey(intptr_t entry) const { |
256 | ASSERT(IsOccupied(entry)); |
257 | return InternalGetKey(entry); |
258 | } |
259 | ObjectPtr GetPayload(intptr_t entry, intptr_t component) const { |
260 | ASSERT(IsOccupied(entry)); |
261 | return data_->At(PayloadIndex(entry, component)); |
262 | } |
263 | void UpdatePayload(intptr_t entry, |
264 | intptr_t component, |
265 | const Object& value) const { |
266 | ASSERT(IsOccupied(entry)); |
267 | ASSERT(0 <= component && component < kPayloadSize); |
268 | data_->SetAt(PayloadIndex(entry, component), value); |
269 | } |
270 | // Deletes both the key and payload of the specified entry. |
271 | void DeleteEntry(intptr_t entry) const { |
272 | ASSERT(IsOccupied(entry)); |
273 | for (intptr_t i = 0; i < kPayloadSize; ++i) { |
274 | UpdatePayload(entry, i, DeletedMarker()); |
275 | } |
276 | InternalSetKey(entry, DeletedMarker()); |
277 | AdjustSmiValueAt(kOccupiedEntriesIndex, -1); |
278 | AdjustSmiValueAt(kDeletedEntriesIndex, 1); |
279 | } |
280 | intptr_t NumEntries() const { |
281 | return (data_->Length() - kFirstKeyIndex) / kEntrySize; |
282 | } |
283 | intptr_t NumUnused() const { |
284 | return NumEntries() - NumOccupied() - NumDeleted(); |
285 | } |
286 | intptr_t NumOccupied() const { return GetSmiValueAt(kOccupiedEntriesIndex); } |
287 | intptr_t NumDeleted() const { return GetSmiValueAt(kDeletedEntriesIndex); } |
288 | Object& KeyHandle() const { return *key_handle_; } |
289 | Smi& SmiHandle() const { return *smi_handle_; } |
290 | |
291 | #if !defined(PRODUCT) |
292 | intptr_t NumGrows() const { return GetSmiValueAt(kNumGrowsIndex); } |
293 | intptr_t NumLT5Collisions() const { |
294 | return GetSmiValueAt(kNumLT5LookupsIndex); |
295 | } |
296 | intptr_t NumLT25Collisions() const { |
297 | return GetSmiValueAt(kNumLT25LookupsIndex); |
298 | } |
299 | intptr_t NumGT25Collisions() const { |
300 | return GetSmiValueAt(kNumGT25LookupsIndex); |
301 | } |
302 | intptr_t NumProbes() const { return GetSmiValueAt(kNumProbesIndex); } |
303 | void UpdateGrowth() const { |
304 | if (KeyTraits::ReportStats()) { |
305 | AdjustSmiValueAt(kNumGrowsIndex, 1); |
306 | } |
307 | } |
308 | void UpdateCollisions(intptr_t collisions) const { |
309 | if (KeyTraits::ReportStats()) { |
310 | if (data_->raw()->ptr()->InVMIsolateHeap()) { |
311 | return; |
312 | } |
313 | AdjustSmiValueAt(kNumProbesIndex, collisions + 1); |
314 | if (collisions < 5) { |
315 | AdjustSmiValueAt(kNumLT5LookupsIndex, 1); |
316 | } else if (collisions < 25) { |
317 | AdjustSmiValueAt(kNumLT25LookupsIndex, 1); |
318 | } else { |
319 | AdjustSmiValueAt(kNumGT25LookupsIndex, 1); |
320 | } |
321 | } |
322 | } |
323 | void PrintStats() const { |
324 | if (!KeyTraits::ReportStats()) { |
325 | return; |
326 | } |
327 | const intptr_t num5 = NumLT5Collisions(); |
328 | const intptr_t num25 = NumLT25Collisions(); |
329 | const intptr_t num_more = NumGT25Collisions(); |
330 | // clang-format off |
331 | OS::PrintErr("Stats for %s table :\n" |
332 | " Size of table = %" Pd ",Number of Occupied entries = %" Pd "\n" |
333 | " Number of Grows = %" Pd "\n" |
334 | " Number of lookups with < 5 collisions = %" Pd "\n" |
335 | " Number of lookups with < 25 collisions = %" Pd "\n" |
336 | " Number of lookups with > 25 collisions = %" Pd "\n" |
337 | " Average number of probes = %g\n" , |
338 | KeyTraits::Name(), |
339 | NumEntries(), NumOccupied(), NumGrows(), |
340 | num5, num25, num_more, |
341 | static_cast<double>(NumProbes()) / (num5 + num25 + num_more)); |
342 | // clang-format on |
343 | } |
344 | #endif // !PRODUCT |
345 | |
346 | protected: |
347 | static const intptr_t kOccupiedEntriesIndex = 0; |
348 | static const intptr_t kDeletedEntriesIndex = 1; |
349 | #if defined(PRODUCT) |
350 | static const intptr_t kHeaderSize = kDeletedEntriesIndex + 1; |
351 | #else |
352 | static const intptr_t kNumGrowsIndex = 2; |
353 | static const intptr_t kNumLT5LookupsIndex = 3; |
354 | static const intptr_t kNumLT25LookupsIndex = 4; |
355 | static const intptr_t kNumGT25LookupsIndex = 5; |
356 | static const intptr_t kNumProbesIndex = 6; |
357 | static const intptr_t = kNumProbesIndex + 1; |
358 | #endif |
359 | static const intptr_t kMetaDataIndex = kHeaderSize; |
360 | static const intptr_t kFirstKeyIndex = kHeaderSize + kMetaDataSize; |
361 | static const intptr_t kEntrySize = 1 + kPayloadSize; |
362 | |
363 | intptr_t KeyIndex(intptr_t entry) const { |
364 | ASSERT(0 <= entry && entry < NumEntries()); |
365 | return kFirstKeyIndex + (kEntrySize * entry); |
366 | } |
367 | |
368 | intptr_t PayloadIndex(intptr_t entry, intptr_t component) const { |
369 | ASSERT(0 <= component && component < kPayloadSize); |
370 | return KeyIndex(entry) + 1 + component; |
371 | } |
372 | |
373 | ObjectPtr InternalGetKey(intptr_t entry) const { |
374 | return data_->At(KeyIndex(entry)); |
375 | } |
376 | |
377 | void InternalSetKey(intptr_t entry, const Object& key) const { |
378 | data_->SetAt(KeyIndex(entry), key); |
379 | } |
380 | |
381 | intptr_t GetSmiValueAt(intptr_t index) const { |
382 | ASSERT(!data_->IsNull()); |
383 | ASSERT(!data_->At(index)->IsHeapObject()); |
384 | return Smi::Value(Smi::RawCast(data_->At(index))); |
385 | } |
386 | |
387 | void SetSmiValueAt(intptr_t index, intptr_t value) const { |
388 | *smi_handle_ = Smi::New(value); |
389 | data_->SetAt(index, *smi_handle_); |
390 | } |
391 | |
392 | void AdjustSmiValueAt(intptr_t index, intptr_t delta) const { |
393 | SetSmiValueAt(index, (GetSmiValueAt(index) + delta)); |
394 | } |
395 | |
396 | Object* key_handle_; |
397 | Smi* smi_handle_; |
398 | // Exactly one of these is non-NULL, depending on whether Release was called. |
399 | Array* data_; |
400 | Array* released_data_; |
401 | |
402 | friend class HashTables; |
403 | }; |
404 | |
405 | // Table with unspecified iteration order. No payload overhead or metadata. |
406 | template <typename KeyTraits, intptr_t kUserPayloadSize> |
407 | class UnorderedHashTable : public HashTable<KeyTraits, kUserPayloadSize, 0> { |
408 | public: |
409 | typedef HashTable<KeyTraits, kUserPayloadSize, 0> BaseTable; |
410 | static const intptr_t kPayloadSize = kUserPayloadSize; |
411 | explicit UnorderedHashTable(ArrayPtr data) |
412 | : BaseTable(Thread::Current()->zone(), data) {} |
413 | UnorderedHashTable(Zone* zone, ArrayPtr data) : BaseTable(zone, data) {} |
414 | UnorderedHashTable(Object* key, Smi* value, Array* data) |
415 | : BaseTable(key, value, data) {} |
416 | // Note: Does not check for concurrent modification. |
417 | class Iterator { |
418 | public: |
419 | explicit Iterator(const UnorderedHashTable* table) |
420 | : table_(table), entry_(-1) {} |
421 | bool MoveNext() { |
422 | while (entry_ < (table_->NumEntries() - 1)) { |
423 | ++entry_; |
424 | if (table_->IsOccupied(entry_)) { |
425 | return true; |
426 | } |
427 | } |
428 | return false; |
429 | } |
430 | intptr_t Current() { return entry_; } |
431 | |
432 | private: |
433 | const UnorderedHashTable* table_; |
434 | intptr_t entry_; |
435 | }; |
436 | |
437 | // No extra book-keeping needed for Initialize, InsertKey, DeleteEntry. |
438 | }; |
439 | |
440 | class HashTables : public AllStatic { |
441 | public: |
442 | // Allocates and initializes a table. |
443 | template <typename Table> |
444 | static ArrayPtr New(intptr_t initial_capacity, |
445 | Heap::Space space = Heap::kNew) { |
446 | Table table( |
447 | Thread::Current()->zone(), |
448 | Array::New(Table::ArrayLengthForNumOccupied(initial_capacity), space)); |
449 | table.Initialize(); |
450 | return table.Release().raw(); |
451 | } |
452 | |
453 | template <typename Table> |
454 | static ArrayPtr New(const Array& array) { |
455 | Table table(Thread::Current()->zone(), array.raw()); |
456 | table.Initialize(); |
457 | return table.Release().raw(); |
458 | } |
459 | |
460 | // Clears 'to' and inserts all elements from 'from', in iteration order. |
461 | // The tables must have the same user payload size. |
462 | template <typename From, typename To> |
463 | static void Copy(const From& from, const To& to) { |
464 | COMPILE_ASSERT(From::kPayloadSize == To::kPayloadSize); |
465 | to.Initialize(); |
466 | ASSERT(from.NumOccupied() < to.NumEntries()); |
467 | typename From::Iterator it(&from); |
468 | Object& obj = Object::Handle(); |
469 | while (it.MoveNext()) { |
470 | intptr_t from_entry = it.Current(); |
471 | obj = from.GetKey(from_entry); |
472 | intptr_t to_entry = -1; |
473 | const Object& key = obj; |
474 | bool present = to.FindKeyOrDeletedOrUnused(key, &to_entry); |
475 | ASSERT(!present); |
476 | to.InsertKey(to_entry, obj); |
477 | for (intptr_t i = 0; i < From::kPayloadSize; ++i) { |
478 | obj = from.GetPayload(from_entry, i); |
479 | to.UpdatePayload(to_entry, i, obj); |
480 | } |
481 | } |
482 | } |
483 | |
484 | template <typename Table> |
485 | static void EnsureLoadFactor(double high, const Table& table) { |
486 | // We count deleted elements because they take up space just |
487 | // like occupied slots in order to cause a rehashing. |
488 | const double current = (1 + table.NumOccupied() + table.NumDeleted()) / |
489 | static_cast<double>(table.NumEntries()); |
490 | const bool too_many_deleted = table.NumOccupied() <= table.NumDeleted(); |
491 | if (current < high && !too_many_deleted) { |
492 | return; |
493 | } |
494 | // Normally we double the size here, but if less than half are occupied |
495 | // then it won't grow (this would imply that there were quite a lot of |
496 | // deleted slots). We don't want to constantly rehash if we are adding |
497 | // and deleting entries at just under the load factor limit, so we may |
498 | // double the size even though the number of occupied slots would not |
499 | // necessarily justify it. For example if the max load factor is 71% and |
500 | // the table is 70% full we will double the size to avoid a rehash every |
501 | // time 1% has been added and deleted. |
502 | const intptr_t new_capacity = table.NumOccupied() * 2 + 1; |
503 | ASSERT(table.NumOccupied() == 0 || |
504 | ((1.0 + table.NumOccupied()) / |
505 | Utils::RoundUpToPowerOfTwo(new_capacity)) <= high); |
506 | Table new_table(New<Table>(new_capacity, // Is rounded up to power of 2. |
507 | table.data_->IsOld() ? Heap::kOld : Heap::kNew)); |
508 | Copy(table, new_table); |
509 | *table.data_ = new_table.Release().raw(); |
510 | NOT_IN_PRODUCT(table.UpdateGrowth(); table.PrintStats();) |
511 | } |
512 | |
513 | // Serializes a table by concatenating its entries as an array. |
514 | template <typename Table> |
515 | static ArrayPtr ToArray(const Table& table, bool include_payload) { |
516 | const intptr_t entry_size = include_payload ? (1 + Table::kPayloadSize) : 1; |
517 | Array& result = Array::Handle(Array::New(table.NumOccupied() * entry_size)); |
518 | typename Table::Iterator it(&table); |
519 | Object& obj = Object::Handle(); |
520 | intptr_t result_index = 0; |
521 | while (it.MoveNext()) { |
522 | intptr_t entry = it.Current(); |
523 | obj = table.GetKey(entry); |
524 | result.SetAt(result_index++, obj); |
525 | if (include_payload) { |
526 | for (intptr_t i = 0; i < Table::kPayloadSize; ++i) { |
527 | obj = table.GetPayload(entry, i); |
528 | result.SetAt(result_index++, obj); |
529 | } |
530 | } |
531 | } |
532 | return result.raw(); |
533 | } |
534 | }; |
535 | |
536 | template <typename BaseIterTable> |
537 | class HashMap : public BaseIterTable { |
538 | public: |
539 | explicit HashMap(ArrayPtr data) |
540 | : BaseIterTable(Thread::Current()->zone(), data) {} |
541 | HashMap(Zone* zone, ArrayPtr data) : BaseIterTable(zone, data) {} |
542 | HashMap(Object* key, Smi* value, Array* data) |
543 | : BaseIterTable(key, value, data) {} |
544 | template <typename Key> |
545 | ObjectPtr GetOrNull(const Key& key, bool* present = NULL) const { |
546 | intptr_t entry = BaseIterTable::FindKey(key); |
547 | if (present != NULL) { |
548 | *present = (entry != -1); |
549 | } |
550 | return (entry == -1) ? Object::null() : BaseIterTable::GetPayload(entry, 0); |
551 | } |
552 | template <typename Key> |
553 | ObjectPtr GetOrDie(const Key& key) const { |
554 | intptr_t entry = BaseIterTable::FindKey(key); |
555 | if (entry == -1) UNREACHABLE(); |
556 | return BaseIterTable::GetPayload(entry, 0); |
557 | } |
558 | bool UpdateOrInsert(const Object& key, const Object& value) const { |
559 | EnsureCapacity(); |
560 | intptr_t entry = -1; |
561 | bool present = BaseIterTable::FindKeyOrDeletedOrUnused(key, &entry); |
562 | if (!present) { |
563 | BaseIterTable::InsertKey(entry, key); |
564 | } |
565 | BaseIterTable::UpdatePayload(entry, 0, value); |
566 | return present; |
567 | } |
568 | // Update the value of an existing key. Note that 'key' need not be an Object. |
569 | template <typename Key> |
570 | void UpdateValue(const Key& key, const Object& value) const { |
571 | intptr_t entry = BaseIterTable::FindKey(key); |
572 | ASSERT(entry != -1); |
573 | BaseIterTable::UpdatePayload(entry, 0, value); |
574 | } |
575 | // If 'key' is not present, maps it to 'value_if_absent'. Returns the final |
576 | // value in the map. |
577 | ObjectPtr InsertOrGetValue(const Object& key, |
578 | const Object& value_if_absent) const { |
579 | EnsureCapacity(); |
580 | intptr_t entry = -1; |
581 | if (!BaseIterTable::FindKeyOrDeletedOrUnused(key, &entry)) { |
582 | BaseIterTable::InsertKey(entry, key); |
583 | BaseIterTable::UpdatePayload(entry, 0, value_if_absent); |
584 | return value_if_absent.raw(); |
585 | } else { |
586 | return BaseIterTable::GetPayload(entry, 0); |
587 | } |
588 | } |
589 | // Like InsertOrGetValue, but calls NewKey to allocate a key object if needed. |
590 | template <typename Key> |
591 | ObjectPtr InsertNewOrGetValue(const Key& key, |
592 | const Object& value_if_absent) const { |
593 | EnsureCapacity(); |
594 | intptr_t entry = -1; |
595 | if (!BaseIterTable::FindKeyOrDeletedOrUnused(key, &entry)) { |
596 | BaseIterTable::KeyHandle() = |
597 | BaseIterTable::BaseTable::Traits::NewKey(key); |
598 | BaseIterTable::InsertKey(entry, BaseIterTable::KeyHandle()); |
599 | BaseIterTable::UpdatePayload(entry, 0, value_if_absent); |
600 | return value_if_absent.raw(); |
601 | } else { |
602 | return BaseIterTable::GetPayload(entry, 0); |
603 | } |
604 | } |
605 | |
606 | template <typename Key> |
607 | bool Remove(const Key& key) const { |
608 | intptr_t entry = BaseIterTable::FindKey(key); |
609 | if (entry == -1) { |
610 | return false; |
611 | } else { |
612 | BaseIterTable::DeleteEntry(entry); |
613 | return true; |
614 | } |
615 | } |
616 | |
617 | void Clear() const { BaseIterTable::Initialize(); } |
618 | |
619 | protected: |
620 | void EnsureCapacity() const { |
621 | static const double kMaxLoadFactor = 0.71; |
622 | HashTables::EnsureLoadFactor(kMaxLoadFactor, *this); |
623 | } |
624 | }; |
625 | |
626 | template <typename KeyTraits> |
627 | class UnorderedHashMap : public HashMap<UnorderedHashTable<KeyTraits, 1> > { |
628 | public: |
629 | typedef HashMap<UnorderedHashTable<KeyTraits, 1> > BaseMap; |
630 | explicit UnorderedHashMap(ArrayPtr data) |
631 | : BaseMap(Thread::Current()->zone(), data) {} |
632 | UnorderedHashMap(Zone* zone, ArrayPtr data) : BaseMap(zone, data) {} |
633 | UnorderedHashMap(Object* key, Smi* value, Array* data) |
634 | : BaseMap(key, value, data) {} |
635 | }; |
636 | |
637 | template <typename BaseIterTable> |
638 | class HashSet : public BaseIterTable { |
639 | public: |
640 | explicit HashSet(ArrayPtr data) |
641 | : BaseIterTable(Thread::Current()->zone(), data) {} |
642 | HashSet(Zone* zone, ArrayPtr data) : BaseIterTable(zone, data) {} |
643 | HashSet(Object* key, Smi* value, Array* data) |
644 | : BaseIterTable(key, value, data) {} |
645 | bool Insert(const Object& key) { |
646 | EnsureCapacity(); |
647 | intptr_t entry = -1; |
648 | bool present = BaseIterTable::FindKeyOrDeletedOrUnused(key, &entry); |
649 | if (!present) { |
650 | BaseIterTable::InsertKey(entry, key); |
651 | } |
652 | return present; |
653 | } |
654 | |
655 | // If 'key' is not present, insert and return it. Else, return the existing |
656 | // key in the set (useful for canonicalization). |
657 | ObjectPtr InsertOrGet(const Object& key) const { |
658 | EnsureCapacity(); |
659 | intptr_t entry = -1; |
660 | if (!BaseIterTable::FindKeyOrDeletedOrUnused(key, &entry)) { |
661 | BaseIterTable::InsertKey(entry, key); |
662 | return key.raw(); |
663 | } else { |
664 | return BaseIterTable::GetKey(entry); |
665 | } |
666 | } |
667 | |
668 | // Like InsertOrGet, but calls NewKey to allocate a key object if needed. |
669 | template <typename Key> |
670 | ObjectPtr InsertNewOrGet(const Key& key) const { |
671 | EnsureCapacity(); |
672 | intptr_t entry = -1; |
673 | if (!BaseIterTable::FindKeyOrDeletedOrUnused(key, &entry)) { |
674 | BaseIterTable::KeyHandle() = |
675 | BaseIterTable::BaseTable::Traits::NewKey(key); |
676 | BaseIterTable::InsertKey(entry, BaseIterTable::KeyHandle()); |
677 | return BaseIterTable::KeyHandle().raw(); |
678 | } else { |
679 | return BaseIterTable::GetKey(entry); |
680 | } |
681 | } |
682 | |
683 | template <typename Key> |
684 | ObjectPtr GetOrNull(const Key& key, bool* present = NULL) const { |
685 | intptr_t entry = BaseIterTable::FindKey(key); |
686 | if (present != NULL) { |
687 | *present = (entry != -1); |
688 | } |
689 | return (entry == -1) ? Object::null() : BaseIterTable::GetKey(entry); |
690 | } |
691 | |
692 | template <typename Key> |
693 | bool Remove(const Key& key) const { |
694 | intptr_t entry = BaseIterTable::FindKey(key); |
695 | if (entry == -1) { |
696 | return false; |
697 | } else { |
698 | BaseIterTable::DeleteEntry(entry); |
699 | return true; |
700 | } |
701 | } |
702 | |
703 | void Clear() const { BaseIterTable::Initialize(); } |
704 | |
705 | protected: |
706 | void EnsureCapacity() const { |
707 | static const double kMaxLoadFactor = 0.71; |
708 | HashTables::EnsureLoadFactor(kMaxLoadFactor, *this); |
709 | } |
710 | }; |
711 | |
712 | template <typename KeyTraits> |
713 | class UnorderedHashSet : public HashSet<UnorderedHashTable<KeyTraits, 0> > { |
714 | public: |
715 | typedef HashSet<UnorderedHashTable<KeyTraits, 0> > BaseSet; |
716 | explicit UnorderedHashSet(ArrayPtr data) |
717 | : BaseSet(Thread::Current()->zone(), data) { |
718 | ASSERT(data != Array::null()); |
719 | } |
720 | UnorderedHashSet(Zone* zone, ArrayPtr data) : BaseSet(zone, data) {} |
721 | UnorderedHashSet(Object* key, Smi* value, Array* data) |
722 | : BaseSet(key, value, data) {} |
723 | |
724 | void Dump() const { |
725 | Object& entry = Object::Handle(); |
726 | for (intptr_t i = 0; i < this->data_->Length(); i++) { |
727 | entry = this->data_->At(i); |
728 | if (entry.raw() == BaseSet::UnusedMarker().raw() || |
729 | entry.raw() == BaseSet::DeletedMarker().raw() || entry.IsSmi()) { |
730 | // empty, deleted, num_used/num_deleted |
731 | OS::PrintErr("%" Pd ": %s\n" , i, entry.ToCString()); |
732 | } else { |
733 | intptr_t hash = KeyTraits::Hash(entry); |
734 | OS::PrintErr("%" Pd ": %" Pd ", %s\n" , i, hash, entry.ToCString()); |
735 | } |
736 | } |
737 | } |
738 | }; |
739 | |
740 | } // namespace dart |
741 | |
742 | #endif // RUNTIME_VM_HASH_TABLE_H_ |
743 | |