1/*
2 * Copyright 2018-present Facebook, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#pragma once
17
18#include <folly/synchronization/Hazptr-fwd.h>
19#include <folly/synchronization/HazptrObj.h>
20
21#include <glog/logging.h>
22
23#include <atomic>
24#include <stack>
25
26///
27/// Classes related to link counted objects and automatic retirement.
28///
29
30namespace folly {
31
32/**
33 * hazptr_root
34 *
35 * Link to counted objects. When destroyed unlinks the linked object
36 * if any.
37 *
38 * Template parameter T must support a member function unlink(),
39 * inherited from hazptr_obj_base_linked.
40 *
41 * Use example: Bucket heads in ConcurrentHashMap.
42 */
43template <typename T, template <typename> class Atom>
44class hazptr_root {
45 Atom<T*> link_;
46
47 public:
48 explicit hazptr_root(T* p = nullptr) noexcept : link_(p) {}
49
50 ~hazptr_root() {
51 auto p = link_.load(std::memory_order_relaxed);
52 if (p) {
53 p->unlink();
54 }
55 }
56
57 const Atom<T*>& operator()() const noexcept {
58 return link_;
59 }
60
61 Atom<T*>& operator()() noexcept {
62 return link_;
63 }
64}; // hazptr_root
65
66/**
67 * hazptr_obj_linked
68 *
69 * Base class template for link counted objects.
70 * Supports:
71 * - Protecting descendants of protected objects.
72 * - One-pass reclamation of long immutable chains of objects.
73 * - Automatic reclamation of acyclic structures.
74 *
75 * Two inbound link counts are maintained per object:
76 * - Link count: Represents the number of links from mutable paths.
77 * - Ref count: Represents the number of links from immutable paths.
78 * [Note: The ref count is one less than such links plus one if
79 * the object hasn't gone through matching with hazard pointers
80 * without finding a match. That is, a new object without inbound
81 * links has a ref count of 0 and an about-to-be-reclaimed object
82 * can be viewed to have a ref count of -1.]
83 *
84 * User code can increment the link and ref counts by calling
85 * acquire_link and acquire_ref or their variants that require the
86 * user to guarantee thread safety. There are no public functions to
87 * decrement the counts explicitly. Counts are decremented implicitly
88 * as described in hazptr_obj_base_linked.
89 */
90template <template <typename> class Atom>
91class hazptr_obj_linked : public hazptr_obj<Atom> {
92 using Count = uint32_t;
93
94 static constexpr Count kRef = 1u;
95 static constexpr Count kLink = 1u << 16;
96 static constexpr Count kRefMask = kLink - 1u;
97 static constexpr Count kLinkMask = ~kRefMask;
98
99 Atom<Count> count_{0};
100
101 public:
102 void acquire_link() noexcept {
103 count_inc(kLink);
104 }
105
106 void acquire_link_safe() noexcept {
107 count_inc_safe(kLink);
108 }
109
110 void acquire_ref() noexcept {
111 count_inc(kRef);
112 }
113
114 void acquire_ref_safe() noexcept {
115 count_inc_safe(kRef);
116 }
117
118 private:
119 template <typename, template <typename> class, typename>
120 friend class hazptr_obj_base_linked;
121
122 Count count() const noexcept {
123 return count_.load(std::memory_order_acquire);
124 }
125
126 void count_set(Count val) noexcept {
127 count_.store(val, std::memory_order_release);
128 }
129
130 void count_inc(Count add) noexcept {
131 auto oldval = count_.fetch_add(add, std::memory_order_acq_rel);
132 DCHECK_LT(oldval & kLinkMask, kLinkMask);
133 DCHECK_LT(oldval & kRefMask, kRefMask);
134 }
135
136 void count_inc_safe(Count add) noexcept {
137 auto oldval = count();
138 count_set(oldval + add);
139 DCHECK_LT(oldval & kLinkMask, kLinkMask);
140 DCHECK_LT(oldval & kRefMask, kRefMask);
141 }
142
143 bool count_cas(Count& oldval, Count newval) noexcept {
144 return count_.compare_exchange_weak(
145 oldval, newval, std::memory_order_acq_rel, std::memory_order_acquire);
146 }
147
148 bool release_link() noexcept {
149 auto sub = kLink;
150 auto oldval = count();
151 while (true) {
152 DCHECK_GT(oldval & kLinkMask, 0u);
153 if (oldval == kLink) {
154 count_set(0u);
155 return true;
156 }
157 if (count_cas(oldval, oldval - sub)) {
158 return false;
159 }
160 }
161 }
162
163 bool release_ref() noexcept {
164 auto sub = kRef;
165 auto oldval = count();
166 while (true) {
167 if (oldval == 0u) {
168 if (kIsDebug) {
169 count_set(kRefMask);
170 }
171 return true;
172 }
173 DCHECK_GT(oldval & kRefMask, 0u);
174 if (count_cas(oldval, oldval - sub)) {
175 return false;
176 }
177 }
178 }
179
180 bool downgrade_link() noexcept {
181 auto oldval = count();
182 auto sub = kLink - kRef;
183 while (true) {
184 if (oldval == kLink) {
185 count_set(kRef);
186 return true;
187 }
188 if (count_cas(oldval, oldval - sub)) {
189 return (oldval & kLinkMask) == kLink;
190 }
191 }
192 }
193}; // hazptr_obj_linked
194
195/**
196 * hazptr_obj_base_linked
197 *
198 * Base class template for link counted objects.
199 *
200 * Supports both *explicit* and *implicit* object retirement, depending
201 * on whether object removal is *certain* or *uncertain*.
202 *
203 * A derived object's removal is certain when it is always possible
204 * to reason based only on the local state of user code when an
205 * object is removed, i.e., becomes unreachable from static
206 * roots. Otherwise, removal is uncertain.
207 *
208 * For example, Removal in UnboundedQueue is certain, whereas removal
209 * is ConcurrentHashMap is uncertain.
210 *
211 * If removal is certain, user code can call retire() explicitly.
212 * Otherwise, user code should call unlink() whenever an inbound
213 * link to the object is changed. Calls to unlink() automatically
214 * retire the object when the link count is decremented to 0. [Note:
215 * A ref count greater than 0 does not delay retiring an object.]
216 *
217 * Derived type T must define a member function template
218 * template <typename S>
219 * void push_links(bool m, S& s) {
220 * if (m) { // m stands mutable links
221 * // for each outbound mutable pointer p call
222 * // s.push(p);
223 * } else {
224 * // for each outbound immutable pointer p call
225 * // s.push(p);
226 * }
227 * }
228 *
229 * T may have both, either, or none of the two types of outbound
230 * links. For example, UnboundedQueue Segment has an immutable
231 * link, and ConcurrentHashMap NodeT has a mutable link.
232 */
233template <typename T, template <typename> class Atom, typename D>
234class hazptr_obj_base_linked : public hazptr_obj_linked<Atom>,
235 public hazptr_deleter<T, D> {
236 using Stack = std::stack<hazptr_obj_base_linked<T, Atom, D>*>;
237
238 public:
239 void retire() {
240 this->pre_retire_check(); // defined in hazptr_obj
241 set_reclaim();
242 auto& domain = default_hazptr_domain<Atom>();
243 this->push_obj(domain); // defined in hazptr_obj
244 }
245
246 /* unlink: Retire object if last link is released. */
247 void unlink() {
248 if (this->release_link()) { // defined in hazptr_obj_linked
249 downgrade_retire_immutable_descendants();
250 retire();
251 }
252 }
253
254 /* unlink_and_reclaim_unchecked: Reclaim object if the last link is
255 released, without checking hazard pointers. To be called only
256 when the object cannot possibly be protected by any hazard
257 pointers. */
258 void unlink_and_reclaim_unchecked() {
259 if (this->release_link()) { // defined in hazptr_obj_linked
260 DCHECK_EQ(this->count(), 0u);
261 delete_self();
262 }
263 }
264
265 private:
266 void set_reclaim() noexcept {
267 this->reclaim_ = [](hazptr_obj<Atom>* p, hazptr_obj_list<Atom>& l) {
268 auto obj = static_cast<hazptr_obj_base_linked<T, Atom, D>*>(p);
269 if (obj->release_ref()) { // defined in hazptr_obj_linked
270 obj->release_delete_immutable_descendants();
271 obj->release_retire_mutable_children(l);
272 obj->delete_self();
273 }
274 };
275 }
276
277 void downgrade_retire_immutable_descendants() {
278 Stack s;
279 call_push_links(false, s);
280 while (!s.empty()) {
281 auto p = s.top();
282 s.pop();
283 if (p && p->downgrade_link()) {
284 p->call_push_links(false, s);
285 p->retire();
286 }
287 }
288 }
289
290 void release_delete_immutable_descendants() {
291 Stack s;
292 call_push_links(false, s);
293 while (!s.empty()) {
294 auto p = s.top();
295 s.pop();
296 if (p && p->release_ref()) {
297 p->call_push_links(false, s);
298 p->delete_self();
299 }
300 }
301 }
302
303 void release_retire_mutable_children(hazptr_obj_list<Atom>& l) {
304 Stack s;
305 call_push_links(true, s);
306 while (!s.empty()) {
307 auto p = s.top();
308 s.pop();
309 if (p->release_link()) {
310 p->pre_retire_check(); // defined in hazptr_obj
311 p->set_reclaim();
312 l.push(p); // treated as if retired immediately
313 }
314 }
315 }
316
317 void call_push_links(bool m, Stack& s) {
318 static_cast<T*>(this)->push_links(m, s); // to be defined in T
319 }
320
321 void delete_self() {
322 this->delete_obj(static_cast<T*>(this)); // defined in hazptr_deleter
323 }
324}; // hazptr_obj_base_linked
325
326} // namespace folly
327