1 | /* |
2 | * Copyright 2006 The Android Open Source Project |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef SkRefCnt_DEFINED |
9 | #define SkRefCnt_DEFINED |
10 | |
11 | #include "include/core/SkTypes.h" |
12 | |
13 | #include <atomic> // std::atomic, std::memory_order_* |
14 | #include <cstddef> // std::nullptr_t |
15 | #include <iosfwd> // std::basic_ostream |
16 | #include <memory> // TODO: unused |
17 | #include <type_traits> // std::enable_if, std::is_convertible |
18 | #include <utility> // std::forward, std::swap |
19 | |
20 | /** \class SkRefCntBase |
21 | |
22 | SkRefCntBase is the base class for objects that may be shared by multiple |
23 | objects. When an existing owner wants to share a reference, it calls ref(). |
24 | When an owner wants to release its reference, it calls unref(). When the |
25 | shared object's reference count goes to zero as the result of an unref() |
26 | call, its (virtual) destructor is called. It is an error for the |
27 | destructor to be called explicitly (or via the object going out of scope on |
28 | the stack or calling delete) if getRefCnt() > 1. |
29 | */ |
30 | class SK_API SkRefCntBase { |
31 | public: |
32 | /** Default construct, initializing the reference count to 1. |
33 | */ |
34 | SkRefCntBase() : fRefCnt(1) {} |
35 | |
36 | /** Destruct, asserting that the reference count is 1. |
37 | */ |
38 | virtual ~SkRefCntBase() { |
39 | #ifdef SK_DEBUG |
40 | SkASSERTF(this->getRefCnt() == 1, "fRefCnt was %d" , this->getRefCnt()); |
41 | // illegal value, to catch us if we reuse after delete |
42 | fRefCnt.store(0, std::memory_order_relaxed); |
43 | #endif |
44 | } |
45 | |
46 | /** May return true if the caller is the only owner. |
47 | * Ensures that all previous owner's actions are complete. |
48 | */ |
49 | bool unique() const { |
50 | if (1 == fRefCnt.load(std::memory_order_acquire)) { |
51 | // The acquire barrier is only really needed if we return true. It |
52 | // prevents code conditioned on the result of unique() from running |
53 | // until previous owners are all totally done calling unref(). |
54 | return true; |
55 | } |
56 | return false; |
57 | } |
58 | |
59 | /** Increment the reference count. Must be balanced by a call to unref(). |
60 | */ |
61 | void ref() const { |
62 | SkASSERT(this->getRefCnt() > 0); |
63 | // No barrier required. |
64 | (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); |
65 | } |
66 | |
67 | /** Decrement the reference count. If the reference count is 1 before the |
68 | decrement, then delete the object. Note that if this is the case, then |
69 | the object needs to have been allocated via new, and not on the stack. |
70 | */ |
71 | void unref() const { |
72 | SkASSERT(this->getRefCnt() > 0); |
73 | // A release here acts in place of all releases we "should" have been doing in ref(). |
74 | if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) { |
75 | // Like unique(), the acquire is only needed on success, to make sure |
76 | // code in internal_dispose() doesn't happen before the decrement. |
77 | this->internal_dispose(); |
78 | } |
79 | } |
80 | |
81 | private: |
82 | |
83 | #ifdef SK_DEBUG |
84 | /** Return the reference count. Use only for debugging. */ |
85 | int32_t getRefCnt() const { |
86 | return fRefCnt.load(std::memory_order_relaxed); |
87 | } |
88 | #endif |
89 | |
90 | /** |
91 | * Called when the ref count goes to 0. |
92 | */ |
93 | virtual void internal_dispose() const { |
94 | #ifdef SK_DEBUG |
95 | SkASSERT(0 == this->getRefCnt()); |
96 | fRefCnt.store(1, std::memory_order_relaxed); |
97 | #endif |
98 | delete this; |
99 | } |
100 | |
101 | // The following friends are those which override internal_dispose() |
102 | // and conditionally call SkRefCnt::internal_dispose(). |
103 | friend class SkWeakRefCnt; |
104 | |
105 | mutable std::atomic<int32_t> fRefCnt; |
106 | |
107 | SkRefCntBase(SkRefCntBase&&) = delete; |
108 | SkRefCntBase(const SkRefCntBase&) = delete; |
109 | SkRefCntBase& operator=(SkRefCntBase&&) = delete; |
110 | SkRefCntBase& operator=(const SkRefCntBase&) = delete; |
111 | }; |
112 | |
113 | #ifdef SK_REF_CNT_MIXIN_INCLUDE |
114 | // It is the responsibility of the following include to define the type SkRefCnt. |
115 | // This SkRefCnt should normally derive from SkRefCntBase. |
116 | #include SK_REF_CNT_MIXIN_INCLUDE |
117 | #else |
118 | class SK_API SkRefCnt : public SkRefCntBase { |
119 | // "#include SK_REF_CNT_MIXIN_INCLUDE" doesn't work with this build system. |
120 | #if defined(SK_BUILD_FOR_GOOGLE3) |
121 | public: |
122 | void deref() const { this->unref(); } |
123 | #endif |
124 | }; |
125 | #endif |
126 | |
127 | /////////////////////////////////////////////////////////////////////////////// |
128 | |
129 | /** Call obj->ref() and return obj. The obj must not be nullptr. |
130 | */ |
131 | template <typename T> static inline T* SkRef(T* obj) { |
132 | SkASSERT(obj); |
133 | obj->ref(); |
134 | return obj; |
135 | } |
136 | |
137 | /** Check if the argument is non-null, and if so, call obj->ref() and return obj. |
138 | */ |
139 | template <typename T> static inline T* SkSafeRef(T* obj) { |
140 | if (obj) { |
141 | obj->ref(); |
142 | } |
143 | return obj; |
144 | } |
145 | |
146 | /** Check if the argument is non-null, and if so, call obj->unref() |
147 | */ |
148 | template <typename T> static inline void SkSafeUnref(T* obj) { |
149 | if (obj) { |
150 | obj->unref(); |
151 | } |
152 | } |
153 | |
154 | /////////////////////////////////////////////////////////////////////////////// |
155 | |
156 | // This is a variant of SkRefCnt that's Not Virtual, so weighs 4 bytes instead of 8 or 16. |
157 | // There's only benefit to using this if the deriving class does not otherwise need a vtable. |
158 | template <typename Derived> |
159 | class SkNVRefCnt { |
160 | public: |
161 | SkNVRefCnt() : fRefCnt(1) {} |
162 | ~SkNVRefCnt() { |
163 | #ifdef SK_DEBUG |
164 | int rc = fRefCnt.load(std::memory_order_relaxed); |
165 | SkASSERTF(rc == 1, "NVRefCnt was %d" , rc); |
166 | #endif |
167 | } |
168 | |
169 | // Implementation is pretty much the same as SkRefCntBase. All required barriers are the same: |
170 | // - unique() needs acquire when it returns true, and no barrier if it returns false; |
171 | // - ref() doesn't need any barrier; |
172 | // - unref() needs a release barrier, and an acquire if it's going to call delete. |
173 | |
174 | bool unique() const { return 1 == fRefCnt.load(std::memory_order_acquire); } |
175 | void ref() const { (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); } |
176 | void unref() const { |
177 | if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) { |
178 | // restore the 1 for our destructor's assert |
179 | SkDEBUGCODE(fRefCnt.store(1, std::memory_order_relaxed)); |
180 | delete (const Derived*)this; |
181 | } |
182 | } |
183 | void deref() const { this->unref(); } |
184 | |
185 | // This must be used with caution. It is only valid to call this when 'threadIsolatedTestCnt' |
186 | // refs are known to be isolated to the current thread. That is, it is known that there are at |
187 | // least 'threadIsolatedTestCnt' refs for which no other thread may make a balancing unref() |
188 | // call. Assuming the contract is followed, if this returns false then no other thread has |
189 | // ownership of this. If it returns true then another thread *may* have ownership. |
190 | bool refCntGreaterThan(int32_t threadIsolatedTestCnt) const { |
191 | int cnt = fRefCnt.load(std::memory_order_acquire); |
192 | // If this fails then the above contract has been violated. |
193 | SkASSERT(cnt >= threadIsolatedTestCnt); |
194 | return cnt > threadIsolatedTestCnt; |
195 | } |
196 | |
197 | private: |
198 | mutable std::atomic<int32_t> fRefCnt; |
199 | |
200 | SkNVRefCnt(SkNVRefCnt&&) = delete; |
201 | SkNVRefCnt(const SkNVRefCnt&) = delete; |
202 | SkNVRefCnt& operator=(SkNVRefCnt&&) = delete; |
203 | SkNVRefCnt& operator=(const SkNVRefCnt&) = delete; |
204 | }; |
205 | |
206 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
207 | |
208 | /** |
209 | * Shared pointer class to wrap classes that support a ref()/unref() interface. |
210 | * |
211 | * This can be used for classes inheriting from SkRefCnt, but it also works for other |
212 | * classes that match the interface, but have different internal choices: e.g. the hosted class |
213 | * may have its ref/unref be thread-safe, but that is not assumed/imposed by sk_sp. |
214 | */ |
215 | template <typename T> class sk_sp { |
216 | public: |
217 | using element_type = T; |
218 | |
219 | constexpr sk_sp() : fPtr(nullptr) {} |
220 | constexpr sk_sp(std::nullptr_t) : fPtr(nullptr) {} |
221 | |
222 | /** |
223 | * Shares the underlying object by calling ref(), so that both the argument and the newly |
224 | * created sk_sp both have a reference to it. |
225 | */ |
226 | sk_sp(const sk_sp<T>& that) : fPtr(SkSafeRef(that.get())) {} |
227 | template <typename U, |
228 | typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> |
229 | sk_sp(const sk_sp<U>& that) : fPtr(SkSafeRef(that.get())) {} |
230 | |
231 | /** |
232 | * Move the underlying object from the argument to the newly created sk_sp. Afterwards only |
233 | * the new sk_sp will have a reference to the object, and the argument will point to null. |
234 | * No call to ref() or unref() will be made. |
235 | */ |
236 | sk_sp(sk_sp<T>&& that) : fPtr(that.release()) {} |
237 | template <typename U, |
238 | typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> |
239 | sk_sp(sk_sp<U>&& that) : fPtr(that.release()) {} |
240 | |
241 | /** |
242 | * Adopt the bare pointer into the newly created sk_sp. |
243 | * No call to ref() or unref() will be made. |
244 | */ |
245 | explicit sk_sp(T* obj) : fPtr(obj) {} |
246 | |
247 | /** |
248 | * Calls unref() on the underlying object pointer. |
249 | */ |
250 | ~sk_sp() { |
251 | SkSafeUnref(fPtr); |
252 | SkDEBUGCODE(fPtr = nullptr); |
253 | } |
254 | |
255 | sk_sp<T>& operator=(std::nullptr_t) { this->reset(); return *this; } |
256 | |
257 | /** |
258 | * Shares the underlying object referenced by the argument by calling ref() on it. If this |
259 | * sk_sp previously had a reference to an object (i.e. not null) it will call unref() on that |
260 | * object. |
261 | */ |
262 | sk_sp<T>& operator=(const sk_sp<T>& that) { |
263 | if (this != &that) { |
264 | this->reset(SkSafeRef(that.get())); |
265 | } |
266 | return *this; |
267 | } |
268 | template <typename U, |
269 | typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> |
270 | sk_sp<T>& operator=(const sk_sp<U>& that) { |
271 | this->reset(SkSafeRef(that.get())); |
272 | return *this; |
273 | } |
274 | |
275 | /** |
276 | * Move the underlying object from the argument to the sk_sp. If the sk_sp previously held |
277 | * a reference to another object, unref() will be called on that object. No call to ref() |
278 | * will be made. |
279 | */ |
280 | sk_sp<T>& operator=(sk_sp<T>&& that) { |
281 | this->reset(that.release()); |
282 | return *this; |
283 | } |
284 | template <typename U, |
285 | typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> |
286 | sk_sp<T>& operator=(sk_sp<U>&& that) { |
287 | this->reset(that.release()); |
288 | return *this; |
289 | } |
290 | |
291 | T& operator*() const { |
292 | SkASSERT(this->get() != nullptr); |
293 | return *this->get(); |
294 | } |
295 | |
296 | explicit operator bool() const { return this->get() != nullptr; } |
297 | |
298 | T* get() const { return fPtr; } |
299 | T* operator->() const { return fPtr; } |
300 | |
301 | /** |
302 | * Adopt the new bare pointer, and call unref() on any previously held object (if not null). |
303 | * No call to ref() will be made. |
304 | */ |
305 | void reset(T* ptr = nullptr) { |
306 | // Calling fPtr->unref() may call this->~() or this->reset(T*). |
307 | // http://wg21.cmeerw.net/lwg/issue998 |
308 | // http://wg21.cmeerw.net/lwg/issue2262 |
309 | T* oldPtr = fPtr; |
310 | fPtr = ptr; |
311 | SkSafeUnref(oldPtr); |
312 | } |
313 | |
314 | /** |
315 | * Return the bare pointer, and set the internal object pointer to nullptr. |
316 | * The caller must assume ownership of the object, and manage its reference count directly. |
317 | * No call to unref() will be made. |
318 | */ |
319 | T* SK_WARN_UNUSED_RESULT release() { |
320 | T* ptr = fPtr; |
321 | fPtr = nullptr; |
322 | return ptr; |
323 | } |
324 | |
325 | void swap(sk_sp<T>& that) /*noexcept*/ { |
326 | using std::swap; |
327 | swap(fPtr, that.fPtr); |
328 | } |
329 | |
330 | private: |
331 | T* fPtr; |
332 | }; |
333 | |
334 | template <typename T> inline void swap(sk_sp<T>& a, sk_sp<T>& b) /*noexcept*/ { |
335 | a.swap(b); |
336 | } |
337 | |
338 | template <typename T, typename U> inline bool operator==(const sk_sp<T>& a, const sk_sp<U>& b) { |
339 | return a.get() == b.get(); |
340 | } |
341 | template <typename T> inline bool operator==(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ { |
342 | return !a; |
343 | } |
344 | template <typename T> inline bool operator==(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ { |
345 | return !b; |
346 | } |
347 | |
348 | template <typename T, typename U> inline bool operator!=(const sk_sp<T>& a, const sk_sp<U>& b) { |
349 | return a.get() != b.get(); |
350 | } |
351 | template <typename T> inline bool operator!=(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ { |
352 | return static_cast<bool>(a); |
353 | } |
354 | template <typename T> inline bool operator!=(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ { |
355 | return static_cast<bool>(b); |
356 | } |
357 | |
358 | template <typename C, typename CT, typename T> |
359 | auto operator<<(std::basic_ostream<C, CT>& os, const sk_sp<T>& sp) -> decltype(os << sp.get()) { |
360 | return os << sp.get(); |
361 | } |
362 | |
363 | template <typename T, typename... Args> |
364 | sk_sp<T> sk_make_sp(Args&&... args) { |
365 | return sk_sp<T>(new T(std::forward<Args>(args)...)); |
366 | } |
367 | |
368 | /* |
369 | * Returns a sk_sp wrapping the provided ptr AND calls ref on it (if not null). |
370 | * |
371 | * This is different than the semantics of the constructor for sk_sp, which just wraps the ptr, |
372 | * effectively "adopting" it. |
373 | */ |
374 | template <typename T> sk_sp<T> sk_ref_sp(T* obj) { |
375 | return sk_sp<T>(SkSafeRef(obj)); |
376 | } |
377 | |
378 | template <typename T> sk_sp<T> sk_ref_sp(const T* obj) { |
379 | return sk_sp<T>(const_cast<T*>(SkSafeRef(obj))); |
380 | } |
381 | |
382 | #endif |
383 | |