1 | /* |
2 | * Copyright 2013-present Facebook, Inc. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | */ |
16 | |
17 | #pragma once |
18 | |
19 | #include <array> |
20 | #include <atomic> |
21 | #include <cassert> |
22 | #include <cstddef> |
23 | #include <limits> |
24 | |
25 | #include <boost/noncopyable.hpp> |
26 | |
27 | #include <folly/Portability.h> |
28 | |
29 | namespace folly { |
30 | |
31 | /** |
32 | * An atomic bitset of fixed size (specified at compile time). |
33 | */ |
34 | template <size_t N> |
35 | class AtomicBitSet : private boost::noncopyable { |
36 | public: |
37 | /** |
38 | * Construct an AtomicBitSet; all bits are initially false. |
39 | */ |
40 | AtomicBitSet(); |
41 | |
42 | /** |
43 | * Set bit idx to true, using the given memory order. Returns the |
44 | * previous value of the bit. |
45 | * |
46 | * Note that the operation is a read-modify-write operation due to the use |
47 | * of fetch_or. |
48 | */ |
49 | bool set(size_t idx, std::memory_order order = std::memory_order_seq_cst); |
50 | |
51 | /** |
52 | * Set bit idx to false, using the given memory order. Returns the |
53 | * previous value of the bit. |
54 | * |
55 | * Note that the operation is a read-modify-write operation due to the use |
56 | * of fetch_and. |
57 | */ |
58 | bool reset(size_t idx, std::memory_order order = std::memory_order_seq_cst); |
59 | |
60 | /** |
61 | * Set bit idx to the given value, using the given memory order. Returns |
62 | * the previous value of the bit. |
63 | * |
64 | * Note that the operation is a read-modify-write operation due to the use |
65 | * of fetch_and or fetch_or. |
66 | * |
67 | * Yes, this is an overload of set(), to keep as close to std::bitset's |
68 | * interface as possible. |
69 | */ |
70 | bool set( |
71 | size_t idx, |
72 | bool value, |
73 | std::memory_order order = std::memory_order_seq_cst); |
74 | |
75 | /** |
76 | * Read bit idx. |
77 | */ |
78 | bool test(size_t idx, std::memory_order order = std::memory_order_seq_cst) |
79 | const; |
80 | |
81 | /** |
82 | * Same as test() with the default memory order. |
83 | */ |
84 | bool operator[](size_t idx) const; |
85 | |
86 | /** |
87 | * Return the size of the bitset. |
88 | */ |
89 | constexpr size_t size() const { |
90 | return N; |
91 | } |
92 | |
93 | private: |
94 | // Pick the largest lock-free type available |
95 | #if (ATOMIC_LLONG_LOCK_FREE == 2) |
96 | typedef unsigned long long BlockType; |
97 | #elif (ATOMIC_LONG_LOCK_FREE == 2) |
98 | typedef unsigned long BlockType; |
99 | #else |
100 | // Even if not lock free, what can we do? |
101 | typedef unsigned int BlockType; |
102 | #endif |
103 | typedef std::atomic<BlockType> AtomicBlockType; |
104 | |
105 | static constexpr size_t kBitsPerBlock = |
106 | std::numeric_limits<BlockType>::digits; |
107 | |
108 | static constexpr size_t blockIndex(size_t bit) { |
109 | return bit / kBitsPerBlock; |
110 | } |
111 | |
112 | static constexpr size_t bitOffset(size_t bit) { |
113 | return bit % kBitsPerBlock; |
114 | } |
115 | |
116 | // avoid casts |
117 | static constexpr BlockType kOne = 1; |
118 | |
119 | std::array<AtomicBlockType, N> data_; |
120 | }; |
121 | |
122 | // value-initialize to zero |
123 | template <size_t N> |
124 | inline AtomicBitSet<N>::AtomicBitSet() : data_() {} |
125 | |
126 | template <size_t N> |
127 | inline bool AtomicBitSet<N>::set(size_t idx, std::memory_order order) { |
128 | assert(idx < N * kBitsPerBlock); |
129 | BlockType mask = kOne << bitOffset(idx); |
130 | return data_[blockIndex(idx)].fetch_or(mask, order) & mask; |
131 | } |
132 | |
133 | template <size_t N> |
134 | inline bool AtomicBitSet<N>::reset(size_t idx, std::memory_order order) { |
135 | assert(idx < N * kBitsPerBlock); |
136 | BlockType mask = kOne << bitOffset(idx); |
137 | return data_[blockIndex(idx)].fetch_and(~mask, order) & mask; |
138 | } |
139 | |
140 | template <size_t N> |
141 | inline bool |
142 | AtomicBitSet<N>::set(size_t idx, bool value, std::memory_order order) { |
143 | return value ? set(idx, order) : reset(idx, order); |
144 | } |
145 | |
146 | template <size_t N> |
147 | inline bool AtomicBitSet<N>::test(size_t idx, std::memory_order order) const { |
148 | assert(idx < N * kBitsPerBlock); |
149 | BlockType mask = kOne << bitOffset(idx); |
150 | return data_[blockIndex(idx)].load(order) & mask; |
151 | } |
152 | |
153 | template <size_t N> |
154 | inline bool AtomicBitSet<N>::operator[](size_t idx) const { |
155 | return test(idx); |
156 | } |
157 | |
158 | } // namespace folly |
159 | |