1 | //===------------------------ memory.cpp ----------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "memory" |
10 | #ifndef _LIBCPP_HAS_NO_THREADS |
11 | #include "mutex" |
12 | #include "thread" |
13 | #if defined(__ELF__) && defined(_LIBCPP_LINK_PTHREAD_LIB) |
14 | #pragma comment(lib, "pthread") |
15 | #endif |
16 | #endif |
17 | #include "include/atomic_support.h" |
18 | |
19 | _LIBCPP_BEGIN_NAMESPACE_STD |
20 | |
21 | const allocator_arg_t allocator_arg = allocator_arg_t(); |
22 | |
23 | bad_weak_ptr::~bad_weak_ptr() _NOEXCEPT {} |
24 | |
25 | const char* |
26 | bad_weak_ptr::what() const _NOEXCEPT |
27 | { |
28 | return "bad_weak_ptr" ; |
29 | } |
30 | |
31 | __shared_count::~__shared_count() |
32 | { |
33 | } |
34 | |
35 | __shared_weak_count::~__shared_weak_count() |
36 | { |
37 | } |
38 | |
39 | #if defined(_LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS) |
40 | void |
41 | __shared_count::__add_shared() _NOEXCEPT |
42 | { |
43 | __libcpp_atomic_refcount_increment(__shared_owners_); |
44 | } |
45 | |
46 | bool |
47 | __shared_count::__release_shared() _NOEXCEPT |
48 | { |
49 | if (__libcpp_atomic_refcount_decrement(__shared_owners_) == -1) |
50 | { |
51 | __on_zero_shared(); |
52 | return true; |
53 | } |
54 | return false; |
55 | } |
56 | |
57 | void |
58 | __shared_weak_count::__add_shared() _NOEXCEPT |
59 | { |
60 | __shared_count::__add_shared(); |
61 | } |
62 | |
63 | void |
64 | __shared_weak_count::__add_weak() _NOEXCEPT |
65 | { |
66 | __libcpp_atomic_refcount_increment(__shared_weak_owners_); |
67 | } |
68 | |
69 | void |
70 | __shared_weak_count::__release_shared() _NOEXCEPT |
71 | { |
72 | if (__shared_count::__release_shared()) |
73 | __release_weak(); |
74 | } |
75 | |
76 | #endif // _LIBCPP_DEPRECATED_ABI_LEGACY_LIBRARY_DEFINITIONS_FOR_INLINE_FUNCTIONS |
77 | |
78 | void |
79 | __shared_weak_count::__release_weak() _NOEXCEPT |
80 | { |
81 | // NOTE: The acquire load here is an optimization of the very |
82 | // common case where a shared pointer is being destructed while |
83 | // having no other contended references. |
84 | // |
85 | // BENEFIT: We avoid expensive atomic stores like XADD and STREX |
86 | // in a common case. Those instructions are slow and do nasty |
87 | // things to caches. |
88 | // |
89 | // IS THIS SAFE? Yes. During weak destruction, if we see that we |
90 | // are the last reference, we know that no-one else is accessing |
91 | // us. If someone were accessing us, then they would be doing so |
92 | // while the last shared / weak_ptr was being destructed, and |
93 | // that's undefined anyway. |
94 | // |
95 | // If we see anything other than a 0, then we have possible |
96 | // contention, and need to use an atomicrmw primitive. |
97 | // The same arguments don't apply for increment, where it is legal |
98 | // (though inadvisable) to share shared_ptr references between |
99 | // threads, and have them all get copied at once. The argument |
100 | // also doesn't apply for __release_shared, because an outstanding |
101 | // weak_ptr::lock() could read / modify the shared count. |
102 | if (__libcpp_atomic_load(&__shared_weak_owners_, _AO_Acquire) == 0) |
103 | { |
104 | // no need to do this store, because we are about |
105 | // to destroy everything. |
106 | //__libcpp_atomic_store(&__shared_weak_owners_, -1, _AO_Release); |
107 | __on_zero_shared_weak(); |
108 | } |
109 | else if (__libcpp_atomic_refcount_decrement(__shared_weak_owners_) == -1) |
110 | __on_zero_shared_weak(); |
111 | } |
112 | |
113 | __shared_weak_count* |
114 | __shared_weak_count::lock() _NOEXCEPT |
115 | { |
116 | long object_owners = __libcpp_atomic_load(&__shared_owners_); |
117 | while (object_owners != -1) |
118 | { |
119 | if (__libcpp_atomic_compare_exchange(&__shared_owners_, |
120 | &object_owners, |
121 | object_owners+1)) |
122 | return this; |
123 | } |
124 | return nullptr; |
125 | } |
126 | |
127 | #if !defined(_LIBCPP_NO_RTTI) || !defined(_LIBCPP_BUILD_STATIC) |
128 | |
129 | const void* |
130 | __shared_weak_count::__get_deleter(const type_info&) const _NOEXCEPT |
131 | { |
132 | return nullptr; |
133 | } |
134 | |
135 | #endif // _LIBCPP_NO_RTTI |
136 | |
137 | #if !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER) |
138 | |
139 | _LIBCPP_SAFE_STATIC static const std::size_t __sp_mut_count = 16; |
140 | _LIBCPP_SAFE_STATIC static __libcpp_mutex_t mut_back[__sp_mut_count] = |
141 | { |
142 | _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, |
143 | _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, |
144 | _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, |
145 | _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER, _LIBCPP_MUTEX_INITIALIZER |
146 | }; |
147 | |
148 | _LIBCPP_CONSTEXPR __sp_mut::__sp_mut(void* p) _NOEXCEPT |
149 | : __lx(p) |
150 | { |
151 | } |
152 | |
153 | void |
154 | __sp_mut::lock() _NOEXCEPT |
155 | { |
156 | auto m = static_cast<__libcpp_mutex_t*>(__lx); |
157 | unsigned count = 0; |
158 | while (!__libcpp_mutex_trylock(m)) |
159 | { |
160 | if (++count > 16) |
161 | { |
162 | __libcpp_mutex_lock(m); |
163 | break; |
164 | } |
165 | this_thread::yield(); |
166 | } |
167 | } |
168 | |
169 | void |
170 | __sp_mut::unlock() _NOEXCEPT |
171 | { |
172 | __libcpp_mutex_unlock(static_cast<__libcpp_mutex_t*>(__lx)); |
173 | } |
174 | |
175 | __sp_mut& |
176 | __get_sp_mut(const void* p) |
177 | { |
178 | static __sp_mut muts[__sp_mut_count] |
179 | { |
180 | &mut_back[ 0], &mut_back[ 1], &mut_back[ 2], &mut_back[ 3], |
181 | &mut_back[ 4], &mut_back[ 5], &mut_back[ 6], &mut_back[ 7], |
182 | &mut_back[ 8], &mut_back[ 9], &mut_back[10], &mut_back[11], |
183 | &mut_back[12], &mut_back[13], &mut_back[14], &mut_back[15] |
184 | }; |
185 | return muts[hash<const void*>()(p) & (__sp_mut_count-1)]; |
186 | } |
187 | |
188 | #endif // !defined(_LIBCPP_HAS_NO_ATOMIC_HEADER) |
189 | |
190 | void |
191 | declare_reachable(void*) |
192 | { |
193 | } |
194 | |
195 | void |
196 | declare_no_pointers(char*, size_t) |
197 | { |
198 | } |
199 | |
200 | void |
201 | undeclare_no_pointers(char*, size_t) |
202 | { |
203 | } |
204 | |
205 | #if !defined(_LIBCPP_ABI_POINTER_SAFETY_ENUM_TYPE) |
206 | pointer_safety get_pointer_safety() _NOEXCEPT |
207 | { |
208 | return pointer_safety::relaxed; |
209 | } |
210 | #endif |
211 | |
212 | void* |
213 | __undeclare_reachable(void* p) |
214 | { |
215 | return p; |
216 | } |
217 | |
218 | void* |
219 | align(size_t alignment, size_t size, void*& ptr, size_t& space) |
220 | { |
221 | void* r = nullptr; |
222 | if (size <= space) |
223 | { |
224 | char* p1 = static_cast<char*>(ptr); |
225 | char* p2 = reinterpret_cast<char*>(reinterpret_cast<size_t>(p1 + (alignment - 1)) & -alignment); |
226 | size_t d = static_cast<size_t>(p2 - p1); |
227 | if (d <= space - size) |
228 | { |
229 | r = p2; |
230 | ptr = r; |
231 | space -= d; |
232 | } |
233 | } |
234 | return r; |
235 | } |
236 | |
237 | _LIBCPP_END_NAMESPACE_STD |
238 | |