1/*
2 * Copyright 2011-present Facebook, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17// @author: Xin Liu <xliux@fb.com>
18
19#pragma once
20
21#include <algorithm>
22#include <atomic>
23#include <climits>
24#include <cmath>
25#include <memory>
26#include <mutex>
27#include <type_traits>
28#include <vector>
29
30#include <boost/noncopyable.hpp>
31#include <boost/random.hpp>
32#include <boost/type_traits.hpp>
33#include <glog/logging.h>
34
35#include <folly/Memory.h>
36#include <folly/ThreadLocal.h>
37#include <folly/synchronization/MicroSpinLock.h>
38
39namespace folly {
40namespace detail {
41
42template <typename ValT, typename NodeT>
43class csl_iterator;
44
45template <typename T>
46class SkipListNode : private boost::noncopyable {
47 enum : uint16_t {
48 IS_HEAD_NODE = 1,
49 MARKED_FOR_REMOVAL = (1 << 1),
50 FULLY_LINKED = (1 << 2),
51 };
52
53 public:
54 typedef T value_type;
55
56 template <
57 typename NodeAlloc,
58 typename U,
59 typename =
60 typename std::enable_if<std::is_convertible<U, T>::value>::type>
61 static SkipListNode*
62 create(NodeAlloc& alloc, int height, U&& data, bool isHead = false) {
63 DCHECK(height >= 1 && height < 64) << height;
64
65 size_t size =
66 sizeof(SkipListNode) + height * sizeof(std::atomic<SkipListNode*>);
67 auto storage = std::allocator_traits<NodeAlloc>::allocate(alloc, size);
68 // do placement new
69 return new (storage)
70 SkipListNode(uint8_t(height), std::forward<U>(data), isHead);
71 }
72
73 template <typename NodeAlloc>
74 static void destroy(NodeAlloc& alloc, SkipListNode* node) {
75 size_t size = sizeof(SkipListNode) +
76 node->height_ * sizeof(std::atomic<SkipListNode*>);
77 node->~SkipListNode();
78 std::allocator_traits<NodeAlloc>::deallocate(alloc, node, size);
79 }
80
81 template <typename NodeAlloc>
82 struct DestroyIsNoOp : StrictConjunction<
83 AllocatorHasTrivialDeallocate<NodeAlloc>,
84 boost::has_trivial_destructor<SkipListNode>> {};
85
86 // copy the head node to a new head node assuming lock acquired
87 SkipListNode* copyHead(SkipListNode* node) {
88 DCHECK(node != nullptr && height_ > node->height_);
89 setFlags(node->getFlags());
90 for (uint8_t i = 0; i < node->height_; ++i) {
91 setSkip(i, node->skip(i));
92 }
93 return this;
94 }
95
96 inline SkipListNode* skip(int layer) const {
97 DCHECK_LT(layer, height_);
98 return skip_[layer].load(std::memory_order_consume);
99 }
100
101 // next valid node as in the linked list
102 SkipListNode* next() {
103 SkipListNode* node;
104 for (node = skip(0); (node != nullptr && node->markedForRemoval());
105 node = node->skip(0)) {
106 }
107 return node;
108 }
109
110 void setSkip(uint8_t h, SkipListNode* next) {
111 DCHECK_LT(h, height_);
112 skip_[h].store(next, std::memory_order_release);
113 }
114
115 value_type& data() {
116 return data_;
117 }
118 const value_type& data() const {
119 return data_;
120 }
121 int maxLayer() const {
122 return height_ - 1;
123 }
124 int height() const {
125 return height_;
126 }
127
128 std::unique_lock<MicroSpinLock> acquireGuard() {
129 return std::unique_lock<MicroSpinLock>(spinLock_);
130 }
131
132 bool fullyLinked() const {
133 return getFlags() & FULLY_LINKED;
134 }
135 bool markedForRemoval() const {
136 return getFlags() & MARKED_FOR_REMOVAL;
137 }
138 bool isHeadNode() const {
139 return getFlags() & IS_HEAD_NODE;
140 }
141
142 void setIsHeadNode() {
143 setFlags(uint16_t(getFlags() | IS_HEAD_NODE));
144 }
145 void setFullyLinked() {
146 setFlags(uint16_t(getFlags() | FULLY_LINKED));
147 }
148 void setMarkedForRemoval() {
149 setFlags(uint16_t(getFlags() | MARKED_FOR_REMOVAL));
150 }
151
152 private:
153 // Note! this can only be called from create() as a placement new.
154 template <typename U>
155 SkipListNode(uint8_t height, U&& data, bool isHead)
156 : height_(height), data_(std::forward<U>(data)) {
157 spinLock_.init();
158 setFlags(0);
159 if (isHead) {
160 setIsHeadNode();
161 }
162 // need to explicitly init the dynamic atomic pointer array
163 for (uint8_t i = 0; i < height_; ++i) {
164 new (&skip_[i]) std::atomic<SkipListNode*>(nullptr);
165 }
166 }
167
168 ~SkipListNode() {
169 for (uint8_t i = 0; i < height_; ++i) {
170 skip_[i].~atomic();
171 }
172 }
173
174 uint16_t getFlags() const {
175 return flags_.load(std::memory_order_consume);
176 }
177 void setFlags(uint16_t flags) {
178 flags_.store(flags, std::memory_order_release);
179 }
180
181 // TODO(xliu): on x86_64, it's possible to squeeze these into
182 // skip_[0] to maybe save 8 bytes depending on the data alignments.
183 // NOTE: currently this is x86_64 only anyway, due to the
184 // MicroSpinLock.
185 std::atomic<uint16_t> flags_;
186 const uint8_t height_;
187 MicroSpinLock spinLock_;
188
189 value_type data_;
190
191 std::atomic<SkipListNode*> skip_[0];
192};
193
194class SkipListRandomHeight {
195 enum { kMaxHeight = 64 };
196
197 public:
198 // make it a singleton.
199 static SkipListRandomHeight* instance() {
200 static SkipListRandomHeight instance_;
201 return &instance_;
202 }
203
204 int getHeight(int maxHeight) const {
205 DCHECK_LE(maxHeight, kMaxHeight) << "max height too big!";
206 double p = randomProb();
207 for (int i = 0; i < maxHeight; ++i) {
208 if (p < lookupTable_[i]) {
209 return i + 1;
210 }
211 }
212 return maxHeight;
213 }
214
215 size_t getSizeLimit(int height) const {
216 DCHECK_LT(height, kMaxHeight);
217 return sizeLimitTable_[height];
218 }
219
220 private:
221 SkipListRandomHeight() {
222 initLookupTable();
223 }
224
225 void initLookupTable() {
226 // set skip prob = 1/E
227 static const double kProbInv = exp(1);
228 static const double kProb = 1.0 / kProbInv;
229 static const size_t kMaxSizeLimit = std::numeric_limits<size_t>::max();
230
231 double sizeLimit = 1;
232 double p = lookupTable_[0] = (1 - kProb);
233 sizeLimitTable_[0] = 1;
234 for (int i = 1; i < kMaxHeight - 1; ++i) {
235 p *= kProb;
236 sizeLimit *= kProbInv;
237 lookupTable_[i] = lookupTable_[i - 1] + p;
238 sizeLimitTable_[i] = sizeLimit > kMaxSizeLimit
239 ? kMaxSizeLimit
240 : static_cast<size_t>(sizeLimit);
241 }
242 lookupTable_[kMaxHeight - 1] = 1;
243 sizeLimitTable_[kMaxHeight - 1] = kMaxSizeLimit;
244 }
245
246 static double randomProb() {
247 static ThreadLocal<boost::lagged_fibonacci2281> rng_;
248 return (*rng_)();
249 }
250
251 double lookupTable_[kMaxHeight];
252 size_t sizeLimitTable_[kMaxHeight];
253};
254
255template <typename NodeType, typename NodeAlloc, typename = void>
256class NodeRecycler;
257
258template <typename NodeType, typename NodeAlloc>
259class NodeRecycler<
260 NodeType,
261 NodeAlloc,
262 typename std::enable_if<
263 !NodeType::template DestroyIsNoOp<NodeAlloc>::value>::type> {
264 public:
265 explicit NodeRecycler(const NodeAlloc& alloc)
266 : refs_(0), dirty_(false), alloc_(alloc) {
267 lock_.init();
268 }
269
270 explicit NodeRecycler() : refs_(0), dirty_(false) {
271 lock_.init();
272 }
273
274 ~NodeRecycler() {
275 CHECK_EQ(refs(), 0);
276 if (nodes_) {
277 for (auto& node : *nodes_) {
278 NodeType::destroy(alloc_, node);
279 }
280 }
281 }
282
283 void add(NodeType* node) {
284 std::lock_guard<MicroSpinLock> g(lock_);
285 if (nodes_.get() == nullptr) {
286 nodes_ = std::make_unique<std::vector<NodeType*>>(1, node);
287 } else {
288 nodes_->push_back(node);
289 }
290 DCHECK_GT(refs(), 0);
291 dirty_.store(true, std::memory_order_relaxed);
292 }
293
294 int addRef() {
295 return refs_.fetch_add(1, std::memory_order_relaxed);
296 }
297
298 int releaseRef() {
299 // We don't expect to clean the recycler immediately everytime it is OK
300 // to do so. Here, it is possible that multiple accessors all release at
301 // the same time but nobody would clean the recycler here. If this
302 // happens, the recycler will usually still get cleaned when
303 // such a race doesn't happen. The worst case is the recycler will
304 // eventually get deleted along with the skiplist.
305 if (LIKELY(!dirty_.load(std::memory_order_relaxed) || refs() > 1)) {
306 return refs_.fetch_add(-1, std::memory_order_relaxed);
307 }
308
309 std::unique_ptr<std::vector<NodeType*>> newNodes;
310 {
311 std::lock_guard<MicroSpinLock> g(lock_);
312 if (nodes_.get() == nullptr || refs() > 1) {
313 return refs_.fetch_add(-1, std::memory_order_relaxed);
314 }
315 // once refs_ reaches 1 and there is no other accessor, it is safe to
316 // remove all the current nodes in the recycler, as we already acquired
317 // the lock here so no more new nodes can be added, even though new
318 // accessors may be added after that.
319 newNodes.swap(nodes_);
320 dirty_.store(false, std::memory_order_relaxed);
321 }
322
323 // TODO(xliu) should we spawn a thread to do this when there are large
324 // number of nodes in the recycler?
325 for (auto& node : *newNodes) {
326 NodeType::destroy(alloc_, node);
327 }
328
329 // decrease the ref count at the very end, to minimize the
330 // chance of other threads acquiring lock_ to clear the deleted
331 // nodes again.
332 return refs_.fetch_add(-1, std::memory_order_relaxed);
333 }
334
335 NodeAlloc& alloc() {
336 return alloc_;
337 }
338
339 private:
340 int refs() const {
341 return refs_.load(std::memory_order_relaxed);
342 }
343
344 std::unique_ptr<std::vector<NodeType*>> nodes_;
345 std::atomic<int32_t> refs_; // current number of visitors to the list
346 std::atomic<bool> dirty_; // whether *nodes_ is non-empty
347 MicroSpinLock lock_; // protects access to *nodes_
348 NodeAlloc alloc_;
349};
350
351// In case of arena allocator, no recycling is necessary, and it's possible
352// to save on ConcurrentSkipList size.
353template <typename NodeType, typename NodeAlloc>
354class NodeRecycler<
355 NodeType,
356 NodeAlloc,
357 typename std::enable_if<
358 NodeType::template DestroyIsNoOp<NodeAlloc>::value>::type> {
359 public:
360 explicit NodeRecycler(const NodeAlloc& alloc) : alloc_(alloc) {}
361
362 void addRef() {}
363 void releaseRef() {}
364
365 void add(NodeType* /* node */) {}
366
367 NodeAlloc& alloc() {
368 return alloc_;
369 }
370
371 private:
372 NodeAlloc alloc_;
373};
374
375} // namespace detail
376} // namespace folly
377