1/*
2 * Copyright 2018-present Facebook, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16#pragma once
17
18#include <folly/Portability.h>
19#include <folly/synchronization/detail/Sleeper.h>
20
21#include <glog/logging.h>
22
23#include <atomic>
24#include <thread>
25
26/// Linked list class templates used in the hazard pointer library:
27/// - linked_list: Sequential linked list that uses a pre-existing
28/// members next() and set_next();.
29/// - shared_head_tail_list: Thread-safe linked list that maintains
30/// head and tail pointers. Supports push and pop_all.
31/// - shared_head_only_list: Thread-safe linked lockable list that
32/// maintains only a head pointer. Supports push and pop_all.
33
34namespace folly {
35namespace hazptr_detail {
36
37/**
38 * linked_list
39 *
40 * Template parameter Node must support set_next
41 *
42 */
43template <typename Node>
44class linked_list {
45 Node* head_;
46 Node* tail_;
47
48 public:
49 linked_list() noexcept : head_(nullptr), tail_(nullptr) {}
50
51 explicit linked_list(Node* head, Node* tail) noexcept
52 : head_(head), tail_(tail) {}
53
54 Node* head() const noexcept {
55 return head_;
56 }
57
58 Node* tail() const noexcept {
59 return tail_;
60 }
61
62 bool empty() const noexcept {
63 return head() == nullptr;
64 }
65
66 void push(Node* node) noexcept {
67 node->set_next(nullptr);
68 if (tail_) {
69 tail_->set_next(node);
70 } else {
71 head_ = node;
72 }
73 tail_ = node;
74 }
75
76 void splice(linked_list& l) {
77 if (head() == nullptr) {
78 head_ = l.head();
79 } else {
80 tail_->set_next(l.head());
81 }
82 tail_ = l.tail();
83 l.clear();
84 }
85
86 void clear() {
87 head_ = nullptr;
88 tail_ = nullptr;
89 }
90
91}; // linked_list
92
93/**
94 * shared_head_tail_list
95 *
96 * Maintains head and tail pointers. Supports push and pop all
97 * operations. Pop all operation is wait-free.
98 */
99template <typename Node, template <typename> class Atom = std::atomic>
100class shared_head_tail_list {
101 Atom<Node*> head_;
102 Atom<Node*> tail_;
103
104 public:
105 shared_head_tail_list() noexcept : head_(nullptr), tail_(nullptr) {}
106
107 shared_head_tail_list(shared_head_tail_list&& o) noexcept {
108 head_.store(o.head(), std::memory_order_relaxed);
109 tail_.store(o.tail(), std::memory_order_relaxed);
110 o.head_.store(nullptr, std::memory_order_relaxed);
111 o.tail_.store(nullptr, std::memory_order_relaxed);
112 }
113
114 shared_head_tail_list& operator=(shared_head_tail_list&& o) noexcept {
115 head_.store(o.head(), std::memory_order_relaxed);
116 tail_.store(o.tail(), std::memory_order_relaxed);
117 o.head_.store(nullptr, std::memory_order_relaxed);
118 o.tail_.store(nullptr, std::memory_order_relaxed);
119 return *this;
120 }
121
122 ~shared_head_tail_list() {
123 DCHECK(head() == nullptr);
124 DCHECK(tail() == nullptr);
125 }
126
127 void push(Node* node) noexcept {
128 bool done = false;
129 while (!done) {
130 if (tail()) {
131 done = push_in_non_empty_list(node);
132 } else {
133 done = push_in_empty_list(node);
134 }
135 }
136 }
137
138 linked_list<Node> pop_all() noexcept {
139 auto h = exchange_head();
140 auto t = (h != nullptr) ? exchange_tail() : nullptr;
141 return linked_list<Node>(h, t);
142 }
143
144 bool empty() const noexcept {
145 return head() == nullptr;
146 }
147
148 private:
149 Node* head() const noexcept {
150 return head_.load(std::memory_order_acquire);
151 }
152
153 Node* tail() const noexcept {
154 return tail_.load(std::memory_order_acquire);
155 }
156
157 void set_head(Node* node) noexcept {
158 head_.store(node, std::memory_order_release);
159 }
160
161 bool cas_head(Node* expected, Node* node) noexcept {
162 return head_.compare_exchange_weak(
163 expected, node, std::memory_order_acq_rel, std::memory_order_relaxed);
164 }
165
166 bool cas_tail(Node* expected, Node* node) noexcept {
167 return tail_.compare_exchange_weak(
168 expected, node, std::memory_order_acq_rel, std::memory_order_relaxed);
169 }
170
171 Node* exchange_head() noexcept {
172 return head_.exchange(nullptr, std::memory_order_acq_rel);
173 }
174
175 Node* exchange_tail() noexcept {
176 return tail_.exchange(nullptr, std::memory_order_acq_rel);
177 }
178
179 bool push_in_non_empty_list(Node* node) noexcept {
180 auto h = head();
181 if (h) {
182 node->set_next(h); // Node must support set_next
183 if (cas_head(h, node)) {
184 return true;
185 }
186 }
187 return false;
188 }
189
190 bool push_in_empty_list(Node* node) noexcept {
191 Node* t = nullptr;
192 node->set_next(nullptr); // Node must support set_next
193 if (cas_tail(t, node)) {
194 set_head(node);
195 return true;
196 }
197 return false;
198 }
199}; // shared_head_tail_list
200
201/**
202 * shared_head_only_list
203 *
204 * A shared singly linked list that maintains only a head pointer. It
205 * supports pop all and push list operations. Optionally the list may
206 * be locked for pop all operations. Pop all operations have locked
207 * and wait-free variants. Push operations are always lock-free.
208 *
209 * Not all combinations of operationsa are mutually operable. The
210 * following are valid combinations:
211 * - push(kMayBeLocked), pop_all(kAlsoLock), push_unlock
212 * - push(kMayNotBeLocked), pop_all(kDontLock)
213 *
214 * Locking is reentrant to prevent self deadlock.
215 */
216template <typename Node, template <typename> class Atom = std::atomic>
217class shared_head_only_list {
218 Atom<uintptr_t> head_{0}; // lowest bit is a lock for pop all
219 Atom<std::thread::id> owner_{std::thread::id()};
220 int reentrance_{0};
221
222 static constexpr uintptr_t kLockBit = 1u;
223 static constexpr uintptr_t kUnlocked = 0u;
224
225 public:
226 static constexpr bool kAlsoLock = true;
227 static constexpr bool kDontLock = false;
228 static constexpr bool kMayBeLocked = true;
229 static constexpr bool kMayNotBeLocked = false;
230
231 public:
232 void push(linked_list<Node>& l, bool may_be_locked) noexcept {
233 if (l.empty()) {
234 return;
235 }
236 auto oldval = head();
237 while (true) {
238 auto newval = reinterpret_cast<uintptr_t>(l.head());
239 auto ptrval = oldval;
240 auto lockbit = oldval & kLockBit;
241 if (may_be_locked == kMayBeLocked) {
242 ptrval -= lockbit;
243 newval += lockbit;
244 } else {
245 DCHECK_EQ(lockbit, kUnlocked);
246 }
247 auto ptr = reinterpret_cast<Node*>(ptrval);
248 l.tail()->set_next(ptr); // Node must support set_next
249 if (cas_head(oldval, newval)) {
250 break;
251 }
252 }
253 }
254
255 Node* pop_all(bool lock) noexcept {
256 return lock == kAlsoLock ? pop_all_lock() : pop_all_no_lock();
257 }
258
259 void push_unlock(linked_list<Node>& l) noexcept {
260 DCHECK_EQ(owner(), std::this_thread::get_id());
261 uintptr_t lockbit;
262 if (reentrance_ > 0) {
263 DCHECK_EQ(reentrance_, 1);
264 --reentrance_;
265 lockbit = kLockBit;
266 } else {
267 clear_owner();
268 lockbit = kUnlocked;
269 }
270 DCHECK_EQ(reentrance_, 0);
271 while (true) {
272 auto oldval = head();
273 DCHECK_EQ(oldval & kLockBit, kLockBit); // Should be already locked
274 auto ptrval = oldval - kLockBit;
275 auto ptr = reinterpret_cast<Node*>(ptrval);
276 auto t = l.tail();
277 if (t) {
278 t->set_next(ptr); // Node must support set_next
279 }
280 auto newval =
281 (t == nullptr) ? ptrval : reinterpret_cast<uintptr_t>(l.head());
282 newval += lockbit;
283 if (cas_head(oldval, newval)) {
284 break;
285 }
286 }
287 }
288
289 bool check_lock() const noexcept {
290 return (head() & kLockBit) == kLockBit;
291 }
292
293 bool empty() const noexcept {
294 return head() == 0u;
295 }
296
297 private:
298 uintptr_t head() const noexcept {
299 return head_.load(std::memory_order_acquire);
300 }
301
302 uintptr_t exchange_head() noexcept {
303 auto newval = reinterpret_cast<uintptr_t>(nullptr);
304 auto oldval = head_.exchange(newval, std::memory_order_acq_rel);
305 return oldval;
306 }
307
308 bool cas_head(uintptr_t& oldval, uintptr_t newval) noexcept {
309 return head_.compare_exchange_weak(
310 oldval, newval, std::memory_order_acq_rel, std::memory_order_acquire);
311 }
312
313 std::thread::id owner() {
314 return owner_.load(std::memory_order_relaxed);
315 }
316
317 void set_owner() {
318 DCHECK(owner() == std::thread::id());
319 owner_.store(std::this_thread::get_id(), std::memory_order_relaxed);
320 }
321
322 void clear_owner() {
323 owner_.store(std::thread::id(), std::memory_order_relaxed);
324 }
325
326 Node* pop_all_no_lock() noexcept {
327 auto oldval = exchange_head();
328 DCHECK_EQ(oldval & kLockBit, kUnlocked);
329 return reinterpret_cast<Node*>(oldval);
330 }
331
332 Node* pop_all_lock() noexcept {
333 folly::detail::Sleeper s;
334 while (true) {
335 auto oldval = head();
336 auto lockbit = oldval & kLockBit;
337 std::thread::id tid = std::this_thread::get_id();
338 if (lockbit == kUnlocked || owner() == tid) {
339 auto newval = reinterpret_cast<uintptr_t>(nullptr) + kLockBit;
340 if (cas_head(oldval, newval)) {
341 DCHECK_EQ(reentrance_, 0);
342 if (lockbit == kUnlocked) {
343 set_owner();
344 } else {
345 ++reentrance_;
346 }
347 auto ptrval = oldval - lockbit;
348 return reinterpret_cast<Node*>(ptrval);
349 }
350 }
351 s.sleep();
352 }
353 }
354}; // shared_head_only_list
355
356} // namespace hazptr_detail
357} // namespace folly
358