1/*
2 * Copyright 2018-present Facebook, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#pragma once
17
18#include <folly/synchronization/Hazptr-fwd.h>
19#include <folly/synchronization/HazptrObj.h>
20#include <folly/synchronization/HazptrRec.h>
21#include <folly/synchronization/HazptrThrLocal.h>
22
23#include <folly/Portability.h>
24#include <folly/synchronization/AsymmetricMemoryBarrier.h>
25
26#include <atomic>
27#include <unordered_set> // for hash set in bulk_reclaim
28
29///
30/// Classes related to hazard pointer domains.
31///
32
33namespace folly {
34
35namespace detail {
36
37constexpr int hazptr_domain_rcount_threshold() {
38 return 1000;
39}
40
41} // namespace detail
42
43/**
44 * hazptr_domain
45 *
46 * A domain manages a set of hazard pointers and a set of retired objects.
47 *
48 * Most user code need not specify any domains.
49 *
50 * Notes on destruction order, tagged objects, locking and deadlock
51 * avoidance:
52 * - Tagged objects support reclamation order guarantees. A call to
53 * cleanup_batch_tag(tag) guarantees that all objects with the
54 * specified tag are reclaimed before the function returns.
55 * - Due to the strict order, access to the set of tagged objects
56 * needs synchronization and care must be taken to avoid deadlock.
57 * - There are two types of reclamation operations to consider:
58 * - Type A: A Type A reclamation operation is triggered by meeting
59 * some threshold. Reclaimed objects may have different
60 * tags. Hazard pointers are checked and only unprotected objects
61 * are reclaimed. This type is expected to be expensive but
62 * infrequent and the cost is amortized over a large number of
63 * reclaimed objects. This type is needed to guarantee an upper
64 * bound on unreclaimed reclaimable objects.
65 * - Type B: A Type B reclamation operation is triggered by a call
66 * to the function cleanup_batch_tag for a specific tag. All
67 * objects with the specified tag must be reclaimed
68 * unconditionally before returning from such a function
69 * call. Hazard pointers are not checked. This type of reclamation
70 * operation is expected to be inexpensive and may be invoked more
71 * frequently than Type A.
72 * - Tagged retired objects are kept in a single list in the domain
73 * structure, named tagged_.
74 * - Both Type A and Type B of reclamation pop all the objects in
75 * tagged_ and sort them into two sets of reclaimable and
76 * unreclaimable objects. The objects in the reclaimable set are
77 * reclaimed and the objects in the unreclaimable set are pushed
78 * back in tagged_.
79 * - The tagged_ list is locked between popping all objects and
80 * pushing back unreclaimable objects, in order to guarantee that
81 * Type B operations do not miss any objects that match the
82 * specified tag.
83 * - A Type A operation cannot release the lock on the tagged_ list
84 * before reclaiming reclaimable objects, to prevent concurrent
85 * Type B operations from returning before the reclamation of
86 * objects with matching tags.
87 * - A Type B operation can release the lock on tagged_ before
88 * reclaiming objects because the set of reclaimable objects by
89 * Type B operations are disjoint.
90 * - The lock on the tagged_ list is re-entrant, to prevent deadlock
91 * when reclamation in a Type A operation requires a Type B
92 * reclamation operation to complete.
93 * - The implementation allows only one pattern of re-entrance: An
94 * inner Type B inside an outer Type A.
95 * - An inner Type B operation must have access and ability to modify
96 * the outer Type A operation's set of reclaimable objects and
97 * their children objects in order not to miss objects that match
98 * the specified tag. Hence, Type A operations use data members,
99 * unprotected_ and children_, to keep track of these objects
100 * between reclamation steps and to provide inner Type B operations
101 * access to these objects.
102 */
103template <template <typename> class Atom>
104class hazptr_domain {
105 using Obj = hazptr_obj<Atom>;
106 using ObjList = hazptr_obj_list<Atom>;
107 using RetiredList = hazptr_obj_retired_list<Atom>;
108 using Set = std::unordered_set<const void*>;
109
110 static constexpr int kThreshold = detail::hazptr_domain_rcount_threshold();
111 static constexpr int kMultiplier = 2;
112 static constexpr uint64_t kSyncTimePeriod{2000000000}; // nanoseconds
113 static constexpr uintptr_t kTagBit = hazptr_obj<Atom>::kTagBit;
114
115 Atom<hazptr_rec<Atom>*> hazptrs_{nullptr};
116 Atom<hazptr_obj<Atom>*> retired_{nullptr};
117 Atom<uint64_t> sync_time_{0};
118 /* Using signed int for rcount_ because it may transiently be negative.
119 Using signed int for all integer variables that may be involved in
120 calculations related to the value of rcount_. */
121 Atom<int> hcount_{0};
122 Atom<int> rcount_{0};
123 Atom<uint16_t> num_bulk_reclaims_{0};
124 bool shutdown_{false};
125
126 RetiredList untagged_;
127 RetiredList tagged_;
128 Obj* unprotected_; // List of unprotected objects being reclaimed
129 ObjList children_; // Children of unprotected objects being reclaimed
130
131 public:
132 /** Constructor */
133 hazptr_domain() = default;
134
135 /** Destructor */
136 ~hazptr_domain() {
137 shutdown_ = true;
138 reclaim_all_objects();
139 free_hazptr_recs();
140 DCHECK(tagged_.empty());
141 }
142
143 hazptr_domain(const hazptr_domain&) = delete;
144 hazptr_domain(hazptr_domain&&) = delete;
145 hazptr_domain& operator=(const hazptr_domain&) = delete;
146 hazptr_domain& operator=(hazptr_domain&&) = delete;
147
148 /** retire - nonintrusive - allocates memory */
149 template <typename T, typename D = std::default_delete<T>>
150 void retire(T* obj, D reclaim = {}) {
151 struct hazptr_retire_node : hazptr_obj<Atom> {
152 std::unique_ptr<T, D> obj_;
153 hazptr_retire_node(T* retireObj, D toReclaim)
154 : obj_{retireObj, std::move(toReclaim)} {}
155 };
156
157 auto node = new hazptr_retire_node(obj, std::move(reclaim));
158 node->reclaim_ = [](hazptr_obj<Atom>* p, hazptr_obj_list<Atom>&) {
159 delete static_cast<hazptr_retire_node*>(p);
160 };
161 hazptr_obj_list<Atom> l(node);
162 push_retired(l);
163 }
164
165 /** cleanup */
166 void cleanup() noexcept {
167 relaxed_cleanup();
168 wait_for_zero_bulk_reclaims(); // wait for concurrent bulk_reclaim-s
169 }
170
171 /** cleanup_batch_tag */
172 void cleanup_batch_tag(const hazptr_obj_batch<Atom>* batch) noexcept {
173 auto tag = reinterpret_cast<uintptr_t>(batch) + kTagBit;
174 auto obj = tagged_.pop_all(RetiredList::kAlsoLock);
175 ObjList match, nomatch;
176 list_match_tag(tag, obj, match, nomatch);
177 if (unprotected_) { // There must be ongoing do_reclamation
178 ObjList match2, nomatch2;
179 list_match_tag(tag, unprotected_, match2, nomatch2);
180 match.splice(match2);
181 unprotected_ = nomatch2.head();
182 }
183 if (children_.head()) {
184 ObjList match2, nomatch2;
185 list_match_tag(tag, children_.head(), match2, nomatch2);
186 match.splice(match2);
187 children_ = std::move(nomatch2);
188 }
189 auto count = nomatch.count();
190 nomatch.set_count(0);
191 tagged_.push_unlock(nomatch);
192 obj = match.head();
193 reclaim_list_transitive(obj);
194 if (count >= threshold()) {
195 check_threshold_and_reclaim(tagged_, RetiredList::kAlsoLock);
196 }
197 }
198
199 void
200 list_match_tag(uintptr_t tag, Obj* obj, ObjList& match, ObjList& nomatch) {
201 list_match_condition(
202 obj, match, nomatch, [tag](Obj* o) { return o->batch_tag() == tag; });
203 }
204
205 private:
206 friend void hazptr_domain_push_list<Atom>(
207 hazptr_obj_list<Atom>&,
208 hazptr_domain<Atom>&) noexcept;
209 friend void hazptr_domain_push_retired<Atom>(
210 hazptr_obj_list<Atom>&,
211 bool check,
212 hazptr_domain<Atom>&) noexcept;
213 friend class hazptr_holder<Atom>;
214 friend class hazptr_obj<Atom>;
215 friend class hazptr_obj_batch<Atom>;
216#if FOLLY_HAZPTR_THR_LOCAL
217 friend class hazptr_tc<Atom>;
218#endif
219
220 /** hprec_acquire */
221 hazptr_rec<Atom>* hprec_acquire() {
222 auto rec = try_acquire_existing_hprec();
223 return rec != nullptr ? rec : acquire_new_hprec();
224 }
225
226 /** hprec_release */
227 void hprec_release(hazptr_rec<Atom>* hprec) noexcept {
228 hprec->release();
229 }
230
231 /** push_retired */
232 void push_retired(hazptr_obj_list<Atom>& l, bool check = true) {
233 /*** Full fence ***/ asymmetricLightBarrier();
234 while (true) {
235 auto r = retired();
236 l.tail()->set_next(r);
237 if (retired_.compare_exchange_weak(
238 r,
239 l.head(),
240 std::memory_order_release,
241 std::memory_order_acquire)) {
242 break;
243 }
244 }
245 rcount_.fetch_add(l.count(), std::memory_order_release);
246 if (check) {
247 check_cleanup_and_reclaim();
248 }
249 }
250
251 /** push_list */
252 void push_list(ObjList& l) {
253 if (l.empty()) {
254 return;
255 }
256 uintptr_t btag = l.head()->batch_tag();
257 bool tagged = ((btag & kTagBit) == kTagBit);
258 RetiredList& rlist = tagged ? tagged_ : untagged_;
259 /*** Full fence ***/ asymmetricLightBarrier();
260 /* Only tagged lists need to be locked because tagging is used to
261 * guarantee the identification of all objects with a specific
262 * tag. Locking pcrotects against concurrent hazptr_cleanup_tag()
263 * calls missing tagged objects. */
264 bool lock =
265 tagged ? RetiredList::kMayBeLocked : RetiredList::kMayNotBeLocked;
266 rlist.push(l, lock);
267 check_threshold_and_reclaim(rlist, lock);
268 }
269
270 /** threshold */
271 int threshold() {
272 auto thresh = kThreshold;
273 return std::max(thresh, kMultiplier * hcount());
274 }
275
276 /** check_threshold_and_reclaim */
277 void check_threshold_and_reclaim(RetiredList& rlist, bool lock) {
278 if (!(lock && rlist.check_lock()) &&
279 rlist.check_threshold_try_zero_count(threshold())) {
280 do_reclamation(rlist, lock);
281 }
282 }
283
284 /** do_reclamation */
285 void do_reclamation(RetiredList& rlist, bool lock) {
286 auto obj = rlist.pop_all(lock == RetiredList::kAlsoLock);
287 /*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
288 auto hprec = hazptrs_.load(std::memory_order_acquire);
289 /* Read hazard pointer values into private search structure */
290 Set hs;
291 for (; hprec; hprec = hprec->next()) {
292 hs.insert(hprec->hazptr());
293 }
294 /* Check objets against hazard pointer values */
295 ObjList match, nomatch;
296 list_match_condition(obj, match, nomatch, [&](Obj* o) {
297 return hs.count(o->raw_ptr()) > 0;
298 });
299 /* Reclaim unprotected objects and push back protected objects and
300 children of reclaimed objects */
301 if (lock) {
302 unprotected_ = nomatch.head();
303 DCHECK(children_.empty());
304 reclaim_unprotected_safe();
305 match.splice(children_);
306 rlist.push_unlock(match);
307 } else {
308 ObjList children;
309 reclaim_unprotected_unsafe(nomatch.head(), children);
310 match.splice(children);
311 rlist.push(match, false);
312 }
313 }
314
315 /** lookup_and_reclaim */
316 void lookup_and_reclaim(Obj* obj, const Set& hs, ObjList& keep) {
317 while (obj) {
318 auto next = obj->next();
319 DCHECK_NE(obj, next);
320 if (hs.count(obj->raw_ptr()) == 0) {
321 (*(obj->reclaim()))(obj, keep);
322 } else {
323 keep.push(obj);
324 }
325 obj = next;
326 }
327 }
328
329 /** list_match_condition */
330 template <typename Cond>
331 void list_match_condition(
332 Obj* obj,
333 ObjList& match,
334 ObjList& nomatch,
335 const Cond& cond) {
336 while (obj) {
337 auto next = obj->next();
338 DCHECK_NE(obj, next);
339 if (cond(obj)) {
340 match.push(obj);
341 } else {
342 nomatch.push(obj);
343 }
344 obj = next;
345 }
346 }
347
348 /** reclaim_unprotected_safe */
349 void reclaim_unprotected_safe() {
350 while (unprotected_) {
351 auto obj = unprotected_;
352 unprotected_ = obj->next();
353 (*(obj->reclaim()))(obj, children_);
354 }
355 }
356
357 /** reclaim_unprotected_unsafe */
358 void reclaim_unprotected_unsafe(Obj* obj, ObjList& children) {
359 while (obj) {
360 auto next = obj->next();
361 (*(obj->reclaim()))(obj, children);
362 obj = next;
363 }
364 }
365
366 /** reclaim_unconditional */
367 void reclaim_unconditional(Obj* head, ObjList& children) {
368 while (head) {
369 auto next = head->next();
370 (*(head->reclaim()))(head, children);
371 head = next;
372 }
373 }
374
375 hazptr_rec<Atom>* head() const noexcept {
376 return hazptrs_.load(std::memory_order_acquire);
377 }
378
379 hazptr_obj<Atom>* retired() const noexcept {
380 return retired_.load(std::memory_order_acquire);
381 }
382
383 int hcount() const noexcept {
384 return hcount_.load(std::memory_order_acquire);
385 }
386
387 int rcount() const noexcept {
388 return rcount_.load(std::memory_order_acquire);
389 }
390
391 bool reached_threshold(int rc, int hc) const noexcept {
392 return rc >= kThreshold && rc >= kMultiplier * hc;
393 }
394
395 void reclaim_all_objects() {
396 auto head = retired_.exchange(nullptr);
397 reclaim_list_transitive(head);
398 head = untagged_.pop_all(RetiredList::kDontLock);
399 reclaim_list_transitive(head);
400 }
401
402 void reclaim_list_transitive(Obj* head) {
403 while (head) {
404 ObjList children;
405 reclaim_unconditional(head, children);
406 head = children.head();
407 }
408 }
409
410 void free_hazptr_recs() {
411 /* Leak the hazard pointers for the default domain to avoid
412 destruction order issues with thread caches. */
413 if (this == &default_hazptr_domain<Atom>()) {
414 return;
415 }
416 auto rec = head();
417 while (rec) {
418 auto next = rec->next();
419 DCHECK(!rec->active());
420 delete rec;
421 rec = next;
422 }
423 }
424
425 void check_cleanup_and_reclaim() {
426 if (try_timed_cleanup()) {
427 return;
428 }
429 if (reached_threshold(rcount(), hcount())) {
430 try_bulk_reclaim();
431 }
432 }
433
434 void relaxed_cleanup() noexcept {
435#if FOLLY_HAZPTR_THR_LOCAL
436 hazptr_obj<Atom>* h = nullptr;
437 hazptr_obj<Atom>* t = nullptr;
438 for (hazptr_priv<Atom>& priv :
439 hazptr_priv_singleton<Atom>::accessAllThreads()) {
440 priv.collect(h, t);
441 }
442 if (h) {
443 DCHECK(t);
444 hazptr_obj_list<Atom> l(h, t, 0);
445 push_retired(l);
446 }
447#endif
448 rcount_.store(0, std::memory_order_release);
449 bulk_reclaim(true);
450 }
451
452 void wait_for_zero_bulk_reclaims() {
453 while (num_bulk_reclaims_.load(std::memory_order_acquire) > 0) {
454 std::this_thread::yield();
455 }
456 }
457
458 void try_bulk_reclaim() {
459 auto hc = hcount();
460 auto rc = rcount();
461 if (!reached_threshold(rc, hc)) {
462 return;
463 }
464 rc = rcount_.exchange(0, std::memory_order_release);
465 if (!reached_threshold(rc, hc)) {
466 /* No need to add rc back to rcount_. At least one concurrent
467 try_bulk_reclaim will proceed to bulk_reclaim. */
468 return;
469 }
470 bulk_reclaim();
471 }
472
473 void bulk_reclaim(bool transitive = false) {
474 num_bulk_reclaims_.fetch_add(1, std::memory_order_acquire);
475 while (true) {
476 auto obj = retired_.exchange(nullptr, std::memory_order_acquire);
477 /*** Full fence ***/ asymmetricHeavyBarrier(AMBFlags::EXPEDITED);
478 auto rec = hazptrs_.load(std::memory_order_acquire);
479 /* Part 1 - read hazard pointer values into private search structure */
480 std::unordered_set<const void*> hashset; // TOTO: lock-free fixed hash set
481 for (; rec; rec = rec->next()) {
482 hashset.insert(rec->hazptr());
483 }
484 /* Part 2 - for each retired object, reclaim if no match */
485 if (bulk_lookup_and_reclaim(obj, hashset) || !transitive) {
486 break;
487 }
488 }
489 num_bulk_reclaims_.fetch_sub(1, std::memory_order_release);
490 }
491
492 bool bulk_lookup_and_reclaim(
493 hazptr_obj<Atom>* obj,
494 const std::unordered_set<const void*>& hashset) {
495 hazptr_obj_list<Atom> children;
496 hazptr_obj_list<Atom> matched;
497 while (obj) {
498 auto next = obj->next();
499 DCHECK_NE(obj, next);
500 if (hashset.count(obj->raw_ptr()) == 0) {
501 (*(obj->reclaim()))(obj, children);
502 } else {
503 matched.push(obj);
504 }
505 obj = next;
506 }
507#if FOLLY_HAZPTR_THR_LOCAL
508 if (!shutdown_) {
509 hazptr_priv_tls<Atom>().push_all_to_domain(false);
510 }
511#endif
512 bool done = ((children.count() == 0) && (retired() == nullptr));
513 matched.splice(children);
514 if (matched.count() > 0) {
515 push_retired(matched, false /* don't call bulk_reclaim recursively */);
516 }
517 return done;
518 }
519
520 bool try_timed_cleanup() {
521 uint64_t time = std::chrono::duration_cast<std::chrono::nanoseconds>(
522 std::chrono::steady_clock::now().time_since_epoch())
523 .count();
524 auto prevtime = sync_time_.load(std::memory_order_relaxed);
525 if (time < prevtime ||
526 !sync_time_.compare_exchange_strong(
527 prevtime, time + kSyncTimePeriod, std::memory_order_relaxed)) {
528 return false;
529 }
530 relaxed_cleanup(); // calling regular cleanup may self deadlock
531 return true;
532 }
533
534 hazptr_rec<Atom>* try_acquire_existing_hprec() {
535 auto rec = head();
536 while (rec) {
537 auto next = rec->next();
538 if (rec->try_acquire()) {
539 return rec;
540 }
541 rec = next;
542 }
543 return nullptr;
544 }
545
546 hazptr_rec<Atom>* acquire_new_hprec() {
547 auto rec = new hazptr_rec<Atom>;
548 rec->set_active();
549 rec->set_domain(this);
550 while (true) {
551 auto h = head();
552 rec->set_next(h);
553 if (hazptrs_.compare_exchange_weak(
554 h, rec, std::memory_order_release, std::memory_order_acquire)) {
555 break;
556 }
557 }
558 hcount_.fetch_add(1);
559 return rec;
560 }
561}; // hazptr_domain
562
563/**
564 * Free functions related to hazptr domains
565 */
566
567/** default_hazptr_domain: Returns reference to the default domain */
568
569template <template <typename> class Atom>
570struct hazptr_default_domain_helper {
571 static FOLLY_ALWAYS_INLINE hazptr_domain<Atom>& get() {
572 static hazptr_domain<Atom> domain;
573 return domain;
574 }
575};
576
577template <>
578struct hazptr_default_domain_helper<std::atomic> {
579 static FOLLY_ALWAYS_INLINE hazptr_domain<std::atomic>& get() {
580 return default_domain;
581 }
582};
583
584template <template <typename> class Atom>
585FOLLY_ALWAYS_INLINE hazptr_domain<Atom>& default_hazptr_domain() {
586 return hazptr_default_domain_helper<Atom>::get();
587}
588
589/** hazptr_domain_push_retired: push a list of retired objects into a domain */
590template <template <typename> class Atom>
591void hazptr_domain_push_retired(
592 hazptr_obj_list<Atom>& l,
593 bool check,
594 hazptr_domain<Atom>& domain) noexcept {
595 domain.push_retired(l, check);
596}
597
598/** hazptr_domain_push_list */
599template <template <typename> class Atom>
600void hazptr_domain_push_list(
601 hazptr_obj_list<Atom>& l,
602 hazptr_domain<Atom>& domain) noexcept {
603 domain.push_list(l);
604}
605
606/** hazptr_retire */
607template <template <typename> class Atom, typename T, typename D>
608FOLLY_ALWAYS_INLINE void hazptr_retire(T* obj, D reclaim) {
609 default_hazptr_domain<Atom>().retire(obj, std::move(reclaim));
610}
611
612/** hazptr_cleanup: Reclaims all reclaimable objects retired to the domain */
613template <template <typename> class Atom>
614void hazptr_cleanup(hazptr_domain<Atom>& domain) noexcept {
615 domain.cleanup();
616}
617
618/** hazptr_cleanup_tag: Reclaims objects asssociated with a tag */
619template <template <typename> class Atom>
620void hazptr_cleanup_batch_tag(
621 const hazptr_obj_batch<Atom>* batch,
622 hazptr_domain<Atom>& domain) noexcept {
623 domain.cleanup_batch_tag(batch);
624}
625
626} // namespace folly
627