1 | // |
2 | // immer: immutable data structures for C++ |
3 | // Copyright (C) 2016, 2017, 2018 Juan Pedro Bolivar Puente |
4 | // |
5 | // This software is distributed under the Boost Software License, Version 1.0. |
6 | // See accompanying file LICENSE or copy at http://boost.org/LICENSE_1_0.txt |
7 | // |
8 | |
9 | #pragma once |
10 | |
11 | #include <immer/box.hpp> |
12 | #include <immer/refcount/no_refcount_policy.hpp> |
13 | |
14 | #include <atomic> |
15 | #include <type_traits> |
16 | |
17 | namespace immer { |
18 | |
19 | namespace detail { |
20 | |
21 | template <typename T, typename MemoryPolicy> |
22 | struct refcount_atom_impl |
23 | { |
24 | using box_type = box<T, MemoryPolicy>; |
25 | using value_type = T; |
26 | using memory_policy = MemoryPolicy; |
27 | using spinlock_t = typename MemoryPolicy::refcount::spinlock_type; |
28 | using scoped_lock_t = typename spinlock_t::scoped_lock; |
29 | |
30 | refcount_atom_impl(const refcount_atom_impl&) = delete; |
31 | refcount_atom_impl(refcount_atom_impl&&) = delete; |
32 | refcount_atom_impl& operator=(const refcount_atom_impl&) = delete; |
33 | refcount_atom_impl& operator=(refcount_atom_impl&&) = delete; |
34 | |
35 | refcount_atom_impl(box_type b) |
36 | : impl_{std::move(b)} |
37 | {} |
38 | |
39 | box_type load() const |
40 | { |
41 | scoped_lock_t lock{lock_}; |
42 | return impl_; |
43 | } |
44 | |
45 | void store(box_type b) |
46 | { |
47 | scoped_lock_t lock{lock_}; |
48 | impl_ = std::move(b); |
49 | } |
50 | |
51 | box_type exchange(box_type b) |
52 | { |
53 | { |
54 | scoped_lock_t lock{lock_}; |
55 | swap(b, impl_); |
56 | } |
57 | return b; |
58 | } |
59 | |
60 | template <typename Fn> |
61 | box_type update(Fn&& fn) |
62 | { |
63 | while (true) { |
64 | auto oldv = load(); |
65 | auto newv = oldv.update(fn); |
66 | { |
67 | scoped_lock_t lock{lock_}; |
68 | if (oldv.impl_ == impl_.impl_) { |
69 | impl_ = newv; |
70 | return { newv }; |
71 | } |
72 | } |
73 | } |
74 | } |
75 | |
76 | private: |
77 | mutable spinlock_t lock_; |
78 | box_type impl_; |
79 | }; |
80 | |
81 | template <typename T, typename MemoryPolicy> |
82 | struct gc_atom_impl |
83 | { |
84 | using box_type = box<T, MemoryPolicy>; |
85 | using value_type = T; |
86 | using memory_policy = MemoryPolicy; |
87 | |
88 | static_assert( |
89 | std::is_same<typename MemoryPolicy::refcount, |
90 | no_refcount_policy>::value, |
91 | "gc_atom_impl can only be used when there is no refcount!" ); |
92 | |
93 | gc_atom_impl(const gc_atom_impl&) = delete; |
94 | gc_atom_impl(gc_atom_impl&&) = delete; |
95 | gc_atom_impl& operator=(const gc_atom_impl&) = delete; |
96 | gc_atom_impl& operator=(gc_atom_impl&&) = delete; |
97 | |
98 | gc_atom_impl(box_type b) |
99 | : impl_{b.impl_} |
100 | {} |
101 | |
102 | box_type load() const |
103 | { return {impl_.load()}; } |
104 | |
105 | void store(box_type b) |
106 | { impl_.store(b.impl_); } |
107 | |
108 | box_type exchange(box_type b) |
109 | { return {impl_.exchange(b.impl_)}; } |
110 | |
111 | template <typename Fn> |
112 | box_type update(Fn&& fn) |
113 | { |
114 | while (true) { |
115 | auto oldv = box_type{impl_.load()}; |
116 | auto newv = oldv.update(fn); |
117 | if (impl_.compare_exchange_weak(oldv.impl_, newv.impl_)) |
118 | return { newv }; |
119 | } |
120 | } |
121 | |
122 | private: |
123 | std::atomic<typename box_type::holder*> impl_; |
124 | }; |
125 | |
126 | } // namespace detail |
127 | |
128 | /*! |
129 | * Stores for boxed values of type `T` in a thread-safe manner. |
130 | * |
131 | * @see box |
132 | * |
133 | * @rst |
134 | * |
135 | * .. warning:: If memory policy used includes thread unsafe reference counting, |
136 | * no no thread safety is assumed, and the atom becomes thread unsafe too! |
137 | * |
138 | * .. note:: ``box<T>`` provides a value based box of type ``T``, this is, we can |
139 | * think about it as a value-based version of ``std::shared_ptr``. In a |
140 | * similar fashion, ``atom<T>`` is in spirit the value-based equivalent of |
141 | * C++20 ``std::atomic_shared_ptr``. However, the API does not follow |
142 | * ``std::atomic`` interface closely, since it attempts to be a higher level |
143 | * construction, most similar to Clojure's ``(atom)``. It is remarkable in |
144 | * particular that, since ``box<T>`` underlying object is immutable, using |
145 | * ``atom<T>`` is fully thread-safe in ways that ``std::atmic_shared_ptr`` is |
146 | * not. This is so because dereferencing the underlying pointer in a |
147 | * ``std::atomic_share_ptr`` may require further synchronization, in particular |
148 | * when invoking non-const methods. |
149 | * |
150 | * @endrst |
151 | */ |
152 | template <typename T, |
153 | typename MemoryPolicy = default_memory_policy> |
154 | class atom |
155 | { |
156 | public: |
157 | using box_type = box<T, MemoryPolicy>; |
158 | using value_type = T; |
159 | using memory_policy = MemoryPolicy; |
160 | |
161 | atom(const atom&) = delete; |
162 | atom(atom&&) = delete; |
163 | void operator=(const atom&) = delete; |
164 | void operator=(atom&&) = delete; |
165 | |
166 | /*! |
167 | * Constructs an atom holding a value `b`; |
168 | */ |
169 | atom(box_type v={}) |
170 | : impl_{std::move(v)} |
171 | {} |
172 | |
173 | /*! |
174 | * Sets a new value in the atom. |
175 | */ |
176 | atom& operator=(box_type b) |
177 | { |
178 | impl_.store(std::move(b)); |
179 | return *this; |
180 | } |
181 | |
182 | /*! |
183 | * Reads the currently stored value in a thread-safe manner. |
184 | */ |
185 | operator box_type() const |
186 | { return impl_.load(); } |
187 | |
188 | /*! |
189 | * Reads the currently stored value in a thread-safe manner. |
190 | */ |
191 | operator value_type() const |
192 | { return *impl_.load(); } |
193 | |
194 | /*! |
195 | * Reads the currently stored value in a thread-safe manner. |
196 | */ |
197 | IMMER_NODISCARD box_type load() const |
198 | { return impl_.load(); } |
199 | |
200 | /*! |
201 | * Stores a new value in a thread-safe manner. |
202 | */ |
203 | void store(box_type b) |
204 | { impl_.store(std::move(b)); } |
205 | |
206 | /*! |
207 | * Stores a new value and returns the old value, in a thread-safe manner. |
208 | */ |
209 | IMMER_NODISCARD box_type exchange(box_type b) |
210 | { return impl_.exchange(std::move(b)); } |
211 | |
212 | /*! |
213 | * Stores the result of applying `fn` to the current value atomically and |
214 | * returns the new resulting value. |
215 | * |
216 | * @rst |
217 | * |
218 | * .. warning:: ``fn`` must be a pure function and have no side effects! The |
219 | * function might be evaluated multiple times when multiple threads |
220 | * content to update the value. |
221 | * |
222 | * @endrst |
223 | */ |
224 | template <typename Fn> |
225 | box_type update(Fn&& fn) |
226 | { return impl_.update(std::forward<Fn>(fn)); } |
227 | |
228 | private: |
229 | struct get_refcount_atom_impl |
230 | { |
231 | template <typename U, typename MP> |
232 | struct apply |
233 | { |
234 | using type = detail::refcount_atom_impl<U, MP>; |
235 | }; |
236 | }; |
237 | |
238 | struct get_gc_atom_impl |
239 | { |
240 | template <typename U, typename MP> |
241 | struct apply |
242 | { |
243 | using type = detail::gc_atom_impl<U, MP>; |
244 | }; |
245 | }; |
246 | |
247 | // If we are using "real" garbage collection (we assume this when we use |
248 | // `no_refcount_policy`), we just store the pointer in an atomic. If we use |
249 | // reference counting, we rely on the reference counting spinlock. |
250 | using impl_t = typename std::conditional_t< |
251 | std::is_same<typename MemoryPolicy::refcount, no_refcount_policy>::value, |
252 | get_gc_atom_impl, |
253 | get_refcount_atom_impl |
254 | >::template apply<T, MemoryPolicy>::type; |
255 | |
256 | impl_t impl_; |
257 | }; |
258 | |
259 | } |
260 | |