1 | /* |
2 | * Copyright 2017-present Facebook, Inc. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | */ |
16 | #pragma once |
17 | |
18 | #include <folly/synchronization/Hazptr.h> |
19 | #include <atomic> |
20 | #include <mutex> |
21 | |
22 | namespace folly { |
23 | |
24 | namespace detail { |
25 | |
26 | namespace concurrenthashmap { |
27 | |
28 | // hazptr deleter that can use an allocator. |
29 | template <typename Allocator> |
30 | class HazptrDeleter { |
31 | public: |
32 | template <typename Node> |
33 | void operator()(Node* node) { |
34 | node->~Node(); |
35 | Allocator().deallocate((uint8_t*)node, sizeof(Node)); |
36 | } |
37 | }; |
38 | |
39 | template <typename Allocator> |
40 | class HazptrBucketDeleter { |
41 | size_t count_; |
42 | |
43 | public: |
44 | HazptrBucketDeleter(size_t count) : count_(count) {} |
45 | HazptrBucketDeleter() = default; |
46 | template <typename Bucket> |
47 | void operator()(Bucket* bucket) { |
48 | bucket->destroy(count_); |
49 | } |
50 | }; |
51 | |
52 | template < |
53 | typename KeyType, |
54 | typename ValueType, |
55 | typename Allocator, |
56 | typename Enabled = void> |
57 | class ValueHolder { |
58 | public: |
59 | typedef std::pair<const KeyType, ValueType> value_type; |
60 | |
61 | explicit ValueHolder(const ValueHolder& other) : item_(other.item_) {} |
62 | |
63 | template <typename Arg, typename... Args> |
64 | ValueHolder(std::piecewise_construct_t, Arg&& k, Args&&... args) |
65 | : item_( |
66 | std::piecewise_construct, |
67 | std::forward_as_tuple(std::forward<Arg>(k)), |
68 | std::forward_as_tuple(std::forward<Args>(args)...)) {} |
69 | value_type& getItem() { |
70 | return item_; |
71 | } |
72 | |
73 | private: |
74 | value_type item_; |
75 | }; |
76 | |
77 | // If the ValueType is not copy constructible, we can instead add |
78 | // an extra indirection. Adds more allocations / deallocations and |
79 | // pulls in an extra cacheline. |
80 | template <typename KeyType, typename ValueType, typename Allocator> |
81 | class ValueHolder< |
82 | KeyType, |
83 | ValueType, |
84 | Allocator, |
85 | std::enable_if_t< |
86 | !std::is_nothrow_copy_constructible<ValueType>::value || |
87 | !std::is_nothrow_copy_constructible<KeyType>::value>> { |
88 | public: |
89 | typedef std::pair<const KeyType, ValueType> value_type; |
90 | |
91 | explicit ValueHolder(const ValueHolder& other) { |
92 | other.owned_ = false; |
93 | item_ = other.item_; |
94 | } |
95 | |
96 | template <typename Arg, typename... Args> |
97 | ValueHolder(std::piecewise_construct_t, Arg&& k, Args&&... args) { |
98 | item_ = (value_type*)Allocator().allocate(sizeof(value_type)); |
99 | new (item_) value_type( |
100 | std::piecewise_construct, |
101 | std::forward_as_tuple(std::forward<Arg>(k)), |
102 | std::forward_as_tuple(std::forward<Args>(args)...)); |
103 | } |
104 | |
105 | ~ValueHolder() { |
106 | if (owned_) { |
107 | item_->~value_type(); |
108 | Allocator().deallocate((uint8_t*)item_, sizeof(value_type)); |
109 | } |
110 | } |
111 | |
112 | value_type& getItem() { |
113 | return *item_; |
114 | } |
115 | |
116 | private: |
117 | value_type* item_; |
118 | mutable bool owned_{true}; |
119 | }; |
120 | |
121 | template < |
122 | typename KeyType, |
123 | typename ValueType, |
124 | typename Allocator, |
125 | template <typename> class Atom = std::atomic> |
126 | class NodeT : public hazptr_obj_base_linked< |
127 | NodeT<KeyType, ValueType, Allocator, Atom>, |
128 | Atom, |
129 | concurrenthashmap::HazptrDeleter<Allocator>> { |
130 | public: |
131 | typedef std::pair<const KeyType, ValueType> value_type; |
132 | |
133 | explicit NodeT(hazptr_obj_batch<Atom>* batch, NodeT* other) |
134 | : item_(other->item_) { |
135 | init(batch); |
136 | } |
137 | |
138 | template <typename Arg, typename... Args> |
139 | NodeT(hazptr_obj_batch<Atom>* batch, Arg&& k, Args&&... args) |
140 | : item_( |
141 | std::piecewise_construct, |
142 | std::forward<Arg>(k), |
143 | std::forward<Args>(args)...) { |
144 | init(batch); |
145 | } |
146 | |
147 | void release() { |
148 | this->unlink(); |
149 | } |
150 | |
151 | value_type& getItem() { |
152 | return item_.getItem(); |
153 | } |
154 | |
155 | template <typename S> |
156 | void push_links(bool m, S& s) { |
157 | if (m) { |
158 | auto p = next_.load(std::memory_order_acquire); |
159 | if (p) { |
160 | s.push(p); |
161 | } |
162 | } |
163 | } |
164 | |
165 | Atom<NodeT*> next_{nullptr}; |
166 | |
167 | private: |
168 | void init(hazptr_obj_batch<Atom>* batch) { |
169 | DCHECK(batch); |
170 | this->set_deleter( // defined in hazptr_obj |
171 | concurrenthashmap::HazptrDeleter<Allocator>()); |
172 | this->set_batch_tag(batch); // defined in hazptr_obj |
173 | this->acquire_link_safe(); // defined in hazptr_obj_base_linked |
174 | } |
175 | |
176 | ValueHolder<KeyType, ValueType, Allocator> item_; |
177 | }; |
178 | |
179 | } // namespace concurrenthashmap |
180 | |
181 | /* A Segment is a single shard of the ConcurrentHashMap. |
182 | * All writes take the lock, while readers are all wait-free. |
183 | * Readers always proceed in parallel with the single writer. |
184 | * |
185 | * |
186 | * Possible additional optimizations: |
187 | * |
188 | * * insert / erase could be lock / wait free. Would need to be |
189 | * careful that assign and rehash don't conflict (possibly with |
190 | * reader/writer lock, or microlock per node or per bucket, etc). |
191 | * Java 8 goes halfway, and does lock per bucket, except for the |
192 | * first item, that is inserted with a CAS (which is somewhat |
193 | * specific to java having a lock per object) |
194 | * |
195 | * * I tried using trylock() and find() to warm the cache for insert() |
196 | * and erase() similar to Java 7, but didn't have much luck. |
197 | * |
198 | * * We could order elements using split ordering, for faster rehash, |
199 | * and no need to ever copy nodes. Note that a full split ordering |
200 | * including dummy nodes increases the memory usage by 2x, but we |
201 | * could split the difference and still require a lock to set bucket |
202 | * pointers. |
203 | */ |
204 | template < |
205 | typename KeyType, |
206 | typename ValueType, |
207 | uint8_t ShardBits = 0, |
208 | typename HashFn = std::hash<KeyType>, |
209 | typename KeyEqual = std::equal_to<KeyType>, |
210 | typename Allocator = std::allocator<uint8_t>, |
211 | template <typename> class Atom = std::atomic, |
212 | class Mutex = std::mutex> |
213 | class alignas(64) ConcurrentHashMapSegment { |
214 | enum class InsertType { |
215 | DOES_NOT_EXIST, // insert/emplace operations. If key exists, return false. |
216 | MUST_EXIST, // assign operations. If key does not exist, return false. |
217 | ANY, // insert_or_assign. |
218 | MATCH, // assign_if_equal (not in std). For concurrent maps, a |
219 | // way to atomically change a value if equal to some other |
220 | // value. |
221 | }; |
222 | |
223 | public: |
224 | typedef KeyType key_type; |
225 | typedef ValueType mapped_type; |
226 | typedef std::pair<const KeyType, ValueType> value_type; |
227 | typedef std::size_t size_type; |
228 | |
229 | using Node = concurrenthashmap::NodeT<KeyType, ValueType, Allocator, Atom>; |
230 | class Iterator; |
231 | |
232 | ConcurrentHashMapSegment( |
233 | size_t initial_buckets, |
234 | float load_factor, |
235 | size_t max_size, |
236 | hazptr_obj_batch<Atom>* batch) |
237 | : load_factor_(load_factor), max_size_(max_size), batch_(batch) { |
238 | DCHECK(batch); |
239 | initial_buckets = folly::nextPowTwo(initial_buckets); |
240 | DCHECK( |
241 | max_size_ == 0 || |
242 | (isPowTwo(max_size_) && |
243 | (folly::popcount(max_size_ - 1) + ShardBits <= 32))); |
244 | auto buckets = Buckets::create(initial_buckets, batch); |
245 | buckets_.store(buckets, std::memory_order_release); |
246 | load_factor_nodes_ = initial_buckets * load_factor_; |
247 | bucket_count_.store(initial_buckets, std::memory_order_relaxed); |
248 | } |
249 | |
250 | ~ConcurrentHashMapSegment() { |
251 | auto buckets = buckets_.load(std::memory_order_relaxed); |
252 | // We can delete and not retire() here, since users must have |
253 | // their own synchronization around destruction. |
254 | auto count = bucket_count_.load(std::memory_order_relaxed); |
255 | buckets->unlink_and_reclaim_nodes(count); |
256 | buckets->destroy(count); |
257 | } |
258 | |
259 | size_t size() { |
260 | return size_; |
261 | } |
262 | |
263 | bool empty() { |
264 | return size() == 0; |
265 | } |
266 | |
267 | bool insert(Iterator& it, std::pair<key_type, mapped_type>&& foo) { |
268 | return insert(it, std::move(foo.first), std::move(foo.second)); |
269 | } |
270 | |
271 | template <typename Key, typename Value> |
272 | bool insert(Iterator& it, Key&& k, Value&& v) { |
273 | auto node = (Node*)Allocator().allocate(sizeof(Node)); |
274 | new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(v)); |
275 | auto res = insert_internal( |
276 | it, |
277 | node->getItem().first, |
278 | InsertType::DOES_NOT_EXIST, |
279 | [](const ValueType&) { return false; }, |
280 | node, |
281 | node); |
282 | if (!res) { |
283 | node->~Node(); |
284 | Allocator().deallocate((uint8_t*)node, sizeof(Node)); |
285 | } |
286 | return res; |
287 | } |
288 | |
289 | template <typename Key, typename... Args> |
290 | bool try_emplace(Iterator& it, Key&& k, Args&&... args) { |
291 | // Note: first key is only ever compared. Second is moved in to |
292 | // create the node, and the first key is never touched again. |
293 | return insert_internal( |
294 | it, |
295 | std::forward<Key>(k), |
296 | InsertType::DOES_NOT_EXIST, |
297 | [](const ValueType&) { return false; }, |
298 | nullptr, |
299 | std::forward<Key>(k), |
300 | std::forward<Args>(args)...); |
301 | } |
302 | |
303 | template <typename... Args> |
304 | bool emplace(Iterator& it, const KeyType& k, Node* node) { |
305 | return insert_internal( |
306 | it, |
307 | k, |
308 | InsertType::DOES_NOT_EXIST, |
309 | [](const ValueType&) { return false; }, |
310 | node, |
311 | node); |
312 | } |
313 | |
314 | template <typename Key, typename Value> |
315 | bool insert_or_assign(Iterator& it, Key&& k, Value&& v) { |
316 | auto node = (Node*)Allocator().allocate(sizeof(Node)); |
317 | new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(v)); |
318 | auto res = insert_internal( |
319 | it, |
320 | node->getItem().first, |
321 | InsertType::ANY, |
322 | [](const ValueType&) { return false; }, |
323 | node, |
324 | node); |
325 | if (!res) { |
326 | node->~Node(); |
327 | Allocator().deallocate((uint8_t*)node, sizeof(Node)); |
328 | } |
329 | return res; |
330 | } |
331 | |
332 | template <typename Key, typename Value> |
333 | bool assign(Iterator& it, Key&& k, Value&& v) { |
334 | auto node = (Node*)Allocator().allocate(sizeof(Node)); |
335 | new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(v)); |
336 | auto res = insert_internal( |
337 | it, |
338 | node->getItem().first, |
339 | InsertType::MUST_EXIST, |
340 | [](const ValueType&) { return false; }, |
341 | node, |
342 | node); |
343 | if (!res) { |
344 | node->~Node(); |
345 | Allocator().deallocate((uint8_t*)node, sizeof(Node)); |
346 | } |
347 | return res; |
348 | } |
349 | |
350 | template <typename Key, typename Value> |
351 | bool assign_if_equal( |
352 | Iterator& it, |
353 | Key&& k, |
354 | const ValueType& expected, |
355 | Value&& desired) { |
356 | auto node = (Node*)Allocator().allocate(sizeof(Node)); |
357 | new (node) Node(batch_, std::forward<Key>(k), std::forward<Value>(desired)); |
358 | auto res = insert_internal( |
359 | it, |
360 | node->getItem().first, |
361 | InsertType::MATCH, |
362 | [&expected](const ValueType& v) { return v == expected; }, |
363 | node, |
364 | node); |
365 | if (!res) { |
366 | node->~Node(); |
367 | Allocator().deallocate((uint8_t*)node, sizeof(Node)); |
368 | } |
369 | return res; |
370 | } |
371 | |
372 | template <typename MatchFunc, typename... Args> |
373 | bool insert_internal( |
374 | Iterator& it, |
375 | const KeyType& k, |
376 | InsertType type, |
377 | MatchFunc match, |
378 | Node* cur, |
379 | Args&&... args) { |
380 | auto h = HashFn()(k); |
381 | std::unique_lock<Mutex> g(m_); |
382 | |
383 | size_t bcount = bucket_count_.load(std::memory_order_relaxed); |
384 | auto buckets = buckets_.load(std::memory_order_relaxed); |
385 | // Check for rehash needed for DOES_NOT_EXIST |
386 | if (size_ >= load_factor_nodes_ && type == InsertType::DOES_NOT_EXIST) { |
387 | if (max_size_ && size_ << 1 > max_size_) { |
388 | // Would exceed max size. |
389 | throw std::bad_alloc(); |
390 | } |
391 | rehash(bcount << 1); |
392 | buckets = buckets_.load(std::memory_order_relaxed); |
393 | bcount = bucket_count_.load(std::memory_order_relaxed); |
394 | } |
395 | |
396 | auto idx = getIdx(bcount, h); |
397 | auto head = &buckets->buckets_[idx](); |
398 | auto node = head->load(std::memory_order_relaxed); |
399 | auto headnode = node; |
400 | auto prev = head; |
401 | auto& hazbuckets = it.hazptrs_[0]; |
402 | auto& haznode = it.hazptrs_[1]; |
403 | hazbuckets.reset(buckets); |
404 | while (node) { |
405 | // Is the key found? |
406 | if (KeyEqual()(k, node->getItem().first)) { |
407 | it.setNode(node, buckets, bcount, idx); |
408 | haznode.reset(node); |
409 | if (type == InsertType::MATCH) { |
410 | if (!match(node->getItem().second)) { |
411 | return false; |
412 | } |
413 | } |
414 | if (type == InsertType::DOES_NOT_EXIST) { |
415 | return false; |
416 | } else { |
417 | if (!cur) { |
418 | cur = (Node*)Allocator().allocate(sizeof(Node)); |
419 | new (cur) Node(batch_, std::forward<Args>(args)...); |
420 | } |
421 | auto next = node->next_.load(std::memory_order_relaxed); |
422 | cur->next_.store(next, std::memory_order_relaxed); |
423 | if (next) { |
424 | next->acquire_link(); // defined in hazptr_obj_base_linked |
425 | } |
426 | prev->store(cur, std::memory_order_release); |
427 | g.unlock(); |
428 | // Release not under lock. |
429 | node->release(); |
430 | return true; |
431 | } |
432 | } |
433 | |
434 | prev = &node->next_; |
435 | node = node->next_.load(std::memory_order_relaxed); |
436 | } |
437 | if (type != InsertType::DOES_NOT_EXIST && type != InsertType::ANY) { |
438 | haznode.reset(); |
439 | hazbuckets.reset(); |
440 | return false; |
441 | } |
442 | // Node not found, check for rehash on ANY |
443 | if (size_ >= load_factor_nodes_ && type == InsertType::ANY) { |
444 | if (max_size_ && size_ << 1 > max_size_) { |
445 | // Would exceed max size. |
446 | throw std::bad_alloc(); |
447 | } |
448 | rehash(bcount << 1); |
449 | |
450 | // Reload correct bucket. |
451 | buckets = buckets_.load(std::memory_order_relaxed); |
452 | bcount <<= 1; |
453 | hazbuckets.reset(buckets); |
454 | idx = getIdx(bcount, h); |
455 | head = &buckets->buckets_[idx](); |
456 | headnode = head->load(std::memory_order_relaxed); |
457 | } |
458 | |
459 | // We found a slot to put the node. |
460 | size_++; |
461 | if (!cur) { |
462 | // InsertType::ANY |
463 | // OR DOES_NOT_EXIST, but only in the try_emplace case |
464 | DCHECK(type == InsertType::ANY || type == InsertType::DOES_NOT_EXIST); |
465 | cur = (Node*)Allocator().allocate(sizeof(Node)); |
466 | new (cur) Node(batch_, std::forward<Args>(args)...); |
467 | } |
468 | cur->next_.store(headnode, std::memory_order_relaxed); |
469 | head->store(cur, std::memory_order_release); |
470 | it.setNode(cur, buckets, bcount, idx); |
471 | return true; |
472 | } |
473 | |
474 | // Must hold lock. |
475 | void rehash(size_t bucket_count) { |
476 | auto buckets = buckets_.load(std::memory_order_relaxed); |
477 | auto newbuckets = Buckets::create(bucket_count, batch_); |
478 | |
479 | load_factor_nodes_ = bucket_count * load_factor_; |
480 | |
481 | auto oldcount = bucket_count_.load(std::memory_order_relaxed); |
482 | for (size_t i = 0; i < oldcount; i++) { |
483 | auto bucket = &buckets->buckets_[i](); |
484 | auto node = bucket->load(std::memory_order_relaxed); |
485 | if (!node) { |
486 | continue; |
487 | } |
488 | auto h = HashFn()(node->getItem().first); |
489 | auto idx = getIdx(bucket_count, h); |
490 | // Reuse as long a chain as possible from the end. Since the |
491 | // nodes don't have previous pointers, the longest last chain |
492 | // will be the same for both the previous hashmap and the new one, |
493 | // assuming all the nodes hash to the same bucket. |
494 | auto lastrun = node; |
495 | auto lastidx = idx; |
496 | auto count = 0; |
497 | auto last = node->next_.load(std::memory_order_relaxed); |
498 | for (; last != nullptr; |
499 | last = last->next_.load(std::memory_order_relaxed)) { |
500 | auto k = getIdx(bucket_count, HashFn()(last->getItem().first)); |
501 | if (k != lastidx) { |
502 | lastidx = k; |
503 | lastrun = last; |
504 | count = 0; |
505 | } |
506 | count++; |
507 | } |
508 | // Set longest last run in new bucket, incrementing the refcount. |
509 | lastrun->acquire_link(); // defined in hazptr_obj_base_linked |
510 | newbuckets->buckets_[lastidx]().store(lastrun, std::memory_order_relaxed); |
511 | // Clone remaining nodes |
512 | for (; node != lastrun; |
513 | node = node->next_.load(std::memory_order_relaxed)) { |
514 | auto newnode = (Node*)Allocator().allocate(sizeof(Node)); |
515 | new (newnode) Node(batch_, node); |
516 | auto k = getIdx(bucket_count, HashFn()(node->getItem().first)); |
517 | auto prevhead = &newbuckets->buckets_[k](); |
518 | newnode->next_.store(prevhead->load(std::memory_order_relaxed)); |
519 | prevhead->store(newnode, std::memory_order_relaxed); |
520 | } |
521 | } |
522 | |
523 | auto oldbuckets = buckets_.load(std::memory_order_relaxed); |
524 | seqlock_.fetch_add(1, std::memory_order_release); |
525 | bucket_count_.store(bucket_count, std::memory_order_release); |
526 | buckets_.store(newbuckets, std::memory_order_release); |
527 | seqlock_.fetch_add(1, std::memory_order_release); |
528 | oldbuckets->retire( |
529 | concurrenthashmap::HazptrBucketDeleter<Allocator>(oldcount)); |
530 | } |
531 | |
532 | bool find(Iterator& res, const KeyType& k) { |
533 | auto& hazcurr = res.hazptrs_[1]; |
534 | auto& haznext = res.hazptrs_[2]; |
535 | auto h = HashFn()(k); |
536 | size_t bcount; |
537 | Buckets* buckets; |
538 | getBucketsAndCount(bcount, buckets, res.hazptrs_[0]); |
539 | |
540 | auto idx = getIdx(bcount, h); |
541 | auto prev = &buckets->buckets_[idx](); |
542 | auto node = hazcurr.get_protected(*prev); |
543 | while (node) { |
544 | if (KeyEqual()(k, node->getItem().first)) { |
545 | res.setNode(node, buckets, bcount, idx); |
546 | return true; |
547 | } |
548 | node = haznext.get_protected(node->next_); |
549 | hazcurr.swap(haznext); |
550 | } |
551 | return false; |
552 | } |
553 | |
554 | // Listed separately because we need a prev pointer. |
555 | size_type erase(const key_type& key) { |
556 | return erase_internal(key, nullptr, [](const ValueType&) { return true; }); |
557 | } |
558 | |
559 | size_type erase_if_equal(const key_type& key, const ValueType& expected) { |
560 | return erase_internal(key, nullptr, [&expected](const ValueType& v) { |
561 | return v == expected; |
562 | }); |
563 | } |
564 | |
565 | template <typename MatchFunc> |
566 | size_type |
567 | erase_internal(const key_type& key, Iterator* iter, MatchFunc match) { |
568 | Node* node{nullptr}; |
569 | auto h = HashFn()(key); |
570 | { |
571 | std::lock_guard<Mutex> g(m_); |
572 | |
573 | size_t bcount = bucket_count_.load(std::memory_order_relaxed); |
574 | auto buckets = buckets_.load(std::memory_order_relaxed); |
575 | auto idx = getIdx(bcount, h); |
576 | auto head = &buckets->buckets_[idx](); |
577 | node = head->load(std::memory_order_relaxed); |
578 | Node* prev = nullptr; |
579 | while (node) { |
580 | if (KeyEqual()(key, node->getItem().first)) { |
581 | if (!match(node->getItem().second)) { |
582 | return 0; |
583 | } |
584 | auto next = node->next_.load(std::memory_order_relaxed); |
585 | if (next) { |
586 | next->acquire_link(); // defined in hazptr_obj_base_linked |
587 | } |
588 | if (prev) { |
589 | prev->next_.store(next, std::memory_order_release); |
590 | } else { |
591 | // Must be head of list. |
592 | head->store(next, std::memory_order_release); |
593 | } |
594 | |
595 | if (iter) { |
596 | iter->hazptrs_[0].reset(buckets); |
597 | iter->setNode( |
598 | node->next_.load(std::memory_order_acquire), |
599 | buckets, |
600 | bcount, |
601 | idx); |
602 | iter->next(); |
603 | } |
604 | size_--; |
605 | break; |
606 | } |
607 | prev = node; |
608 | node = node->next_.load(std::memory_order_relaxed); |
609 | } |
610 | } |
611 | // Delete the node while not under the lock. |
612 | if (node) { |
613 | node->release(); |
614 | return 1; |
615 | } |
616 | |
617 | return 0; |
618 | } |
619 | |
620 | // Unfortunately because we are reusing nodes on rehash, we can't |
621 | // have prev pointers in the bucket chain. We have to start the |
622 | // search from the bucket. |
623 | // |
624 | // This is a small departure from standard stl containers: erase may |
625 | // throw if hash or key_eq functions throw. |
626 | void erase(Iterator& res, Iterator& pos) { |
627 | erase_internal(pos->first, &res, [](const ValueType&) { return true; }); |
628 | // Invalidate the iterator. |
629 | pos = cend(); |
630 | } |
631 | |
632 | void clear() { |
633 | size_t bcount = bucket_count_.load(std::memory_order_relaxed); |
634 | Buckets* buckets; |
635 | auto newbuckets = Buckets::create(bcount, batch_); |
636 | { |
637 | std::lock_guard<Mutex> g(m_); |
638 | buckets = buckets_.load(std::memory_order_relaxed); |
639 | buckets_.store(newbuckets, std::memory_order_release); |
640 | size_ = 0; |
641 | } |
642 | buckets->retire(concurrenthashmap::HazptrBucketDeleter<Allocator>(bcount)); |
643 | } |
644 | |
645 | void max_load_factor(float factor) { |
646 | std::lock_guard<Mutex> g(m_); |
647 | load_factor_ = factor; |
648 | load_factor_nodes_ = |
649 | bucket_count_.load(std::memory_order_relaxed) * load_factor_; |
650 | } |
651 | |
652 | Iterator cbegin() { |
653 | Iterator res; |
654 | size_t bcount; |
655 | Buckets* buckets; |
656 | getBucketsAndCount(bcount, buckets, res.hazptrs_[0]); |
657 | res.setNode(nullptr, buckets, bcount, 0); |
658 | res.next(); |
659 | return res; |
660 | } |
661 | |
662 | Iterator cend() { |
663 | return Iterator(nullptr); |
664 | } |
665 | |
666 | // Could be optimized to avoid an extra pointer dereference by |
667 | // allocating buckets_ at the same time. |
668 | class Buckets : public hazptr_obj_base< |
669 | Buckets, |
670 | Atom, |
671 | concurrenthashmap::HazptrBucketDeleter<Allocator>> { |
672 | using BucketRoot = hazptr_root<Node, Atom>; |
673 | |
674 | Buckets() {} |
675 | ~Buckets() {} |
676 | |
677 | public: |
678 | static Buckets* create(size_t count, hazptr_obj_batch<Atom>* batch) { |
679 | auto buf = |
680 | Allocator().allocate(sizeof(Buckets) + sizeof(BucketRoot) * count); |
681 | auto buckets = new (buf) Buckets(); |
682 | DCHECK(batch); |
683 | buckets->set_batch_tag(batch); // defined in hazptr_obj |
684 | for (size_t i = 0; i < count; i++) { |
685 | new (&buckets->buckets_[i]) BucketRoot; |
686 | } |
687 | return buckets; |
688 | } |
689 | |
690 | void destroy(size_t count) { |
691 | for (size_t i = 0; i < count; i++) { |
692 | buckets_[i].~BucketRoot(); |
693 | } |
694 | this->~Buckets(); |
695 | Allocator().deallocate( |
696 | (uint8_t*)this, sizeof(BucketRoot) * count + sizeof(*this)); |
697 | } |
698 | |
699 | void unlink_and_reclaim_nodes(size_t count) { |
700 | for (size_t i = 0; i < count; i++) { |
701 | auto node = buckets_[i]().load(std::memory_order_relaxed); |
702 | if (node) { |
703 | buckets_[i]().store(nullptr, std::memory_order_relaxed); |
704 | while (node) { |
705 | auto next = node->next_.load(std::memory_order_relaxed); |
706 | if (next) { |
707 | node->next_.store(nullptr, std::memory_order_relaxed); |
708 | } |
709 | node->unlink_and_reclaim_unchecked(); |
710 | node = next; |
711 | } |
712 | } |
713 | } |
714 | } |
715 | |
716 | BucketRoot buckets_[0]; |
717 | }; |
718 | |
719 | public: |
720 | class Iterator { |
721 | public: |
722 | FOLLY_ALWAYS_INLINE Iterator() {} |
723 | FOLLY_ALWAYS_INLINE explicit Iterator(std::nullptr_t) : hazptrs_(nullptr) {} |
724 | FOLLY_ALWAYS_INLINE ~Iterator() {} |
725 | |
726 | void |
727 | setNode(Node* node, Buckets* buckets, size_t bucket_count, uint64_t idx) { |
728 | node_ = node; |
729 | buckets_ = buckets; |
730 | idx_ = idx; |
731 | bucket_count_ = bucket_count; |
732 | } |
733 | |
734 | const value_type& operator*() const { |
735 | DCHECK(node_); |
736 | return node_->getItem(); |
737 | } |
738 | |
739 | const value_type* operator->() const { |
740 | DCHECK(node_); |
741 | return &(node_->getItem()); |
742 | } |
743 | |
744 | const Iterator& operator++() { |
745 | DCHECK(node_); |
746 | node_ = hazptrs_[2].get_protected(node_->next_); |
747 | hazptrs_[1].swap(hazptrs_[2]); |
748 | if (!node_) { |
749 | ++idx_; |
750 | next(); |
751 | } |
752 | return *this; |
753 | } |
754 | |
755 | void next() { |
756 | while (!node_) { |
757 | if (idx_ >= bucket_count_) { |
758 | break; |
759 | } |
760 | DCHECK(buckets_); |
761 | node_ = hazptrs_[1].get_protected(buckets_->buckets_[idx_]()); |
762 | if (node_) { |
763 | break; |
764 | } |
765 | ++idx_; |
766 | } |
767 | } |
768 | |
769 | bool operator==(const Iterator& o) const { |
770 | return node_ == o.node_; |
771 | } |
772 | |
773 | bool operator!=(const Iterator& o) const { |
774 | return !(*this == o); |
775 | } |
776 | |
777 | Iterator& operator=(const Iterator& o) = delete; |
778 | |
779 | Iterator& operator=(Iterator&& o) noexcept { |
780 | if (this != &o) { |
781 | hazptrs_ = std::move(o.hazptrs_); |
782 | node_ = std::exchange(o.node_, nullptr); |
783 | buckets_ = std::exchange(o.buckets_, nullptr); |
784 | bucket_count_ = std::exchange(o.bucket_count_, 0); |
785 | idx_ = std::exchange(o.idx_, 0); |
786 | } |
787 | return *this; |
788 | } |
789 | |
790 | Iterator(const Iterator& o) = delete; |
791 | |
792 | Iterator(Iterator&& o) noexcept |
793 | : hazptrs_(std::move(o.hazptrs_)), |
794 | node_(std::exchange(o.node_, nullptr)), |
795 | buckets_(std::exchange(o.buckets_, nullptr)), |
796 | bucket_count_(std::exchange(o.bucket_count_, 0)), |
797 | idx_(std::exchange(o.idx_, 0)) {} |
798 | |
799 | // These are accessed directly from the functions above |
800 | hazptr_array<3, Atom> hazptrs_; |
801 | |
802 | private: |
803 | Node* node_{nullptr}; |
804 | Buckets* buckets_{nullptr}; |
805 | size_t bucket_count_{0}; |
806 | uint64_t idx_{0}; |
807 | }; |
808 | |
809 | private: |
810 | // Shards have already used low ShardBits of the hash. |
811 | // Shift it over to use fresh bits. |
812 | uint64_t getIdx(size_t bucket_count, size_t hash) { |
813 | return (hash >> ShardBits) & (bucket_count - 1); |
814 | } |
815 | void getBucketsAndCount( |
816 | size_t& bcount, |
817 | Buckets*& buckets, |
818 | hazptr_holder<Atom>& hazptr) { |
819 | while (true) { |
820 | auto seqlock = seqlock_.load(std::memory_order_acquire); |
821 | bcount = bucket_count_.load(std::memory_order_acquire); |
822 | buckets = hazptr.get_protected(buckets_); |
823 | auto seqlock2 = seqlock_.load(std::memory_order_acquire); |
824 | if (!(seqlock & 1) && (seqlock == seqlock2)) { |
825 | break; |
826 | } |
827 | } |
828 | DCHECK(buckets); |
829 | } |
830 | |
831 | Mutex m_; |
832 | float load_factor_; |
833 | size_t load_factor_nodes_; |
834 | size_t size_{0}; |
835 | size_t const max_size_; |
836 | |
837 | // Fields needed for read-only access, on separate cacheline. |
838 | alignas(64) Atom<Buckets*> buckets_{nullptr}; |
839 | std::atomic<uint64_t> seqlock_{0}; |
840 | Atom<size_t> bucket_count_; |
841 | hazptr_obj_batch<Atom>* batch_; |
842 | }; |
843 | } // namespace detail |
844 | } // namespace folly |
845 | |