1 | /* |
2 | * Copyright 2015 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef GrManagedResource_DEFINED |
9 | #define GrManagedResource_DEFINED |
10 | |
11 | #include "include/private/GrTypesPriv.h" |
12 | #include "include/private/SkMutex.h" |
13 | #include "include/private/SkTHash.h" |
14 | #include "include/utils/SkRandom.h" |
15 | #include <atomic> |
16 | |
17 | class GrTexture; |
18 | |
19 | // uncomment to enable tracing of resource refs |
20 | #ifdef SK_DEBUG |
21 | #define SK_TRACE_MANAGED_RESOURCES |
22 | #endif |
23 | |
24 | /** \class GrManagedResource |
25 | |
26 | GrManagedResource is the base class for GPU resources that may be shared by |
27 | multiple objects, in particular objects that are tracked by a command buffer. |
28 | When an existing owner wants to share a reference, it calls ref(). |
29 | When an owner wants to release its reference, it calls unref(). When the |
30 | shared object's reference count goes to zero as the result of an unref() |
31 | call, its (virtual) destructor is called. It is an error for the |
32 | destructor to be called explicitly (or via the object going out of scope on |
33 | the stack or calling delete) if getRefCnt() > 1. |
34 | |
35 | This is nearly identical to SkRefCntBase. The exceptions are that unref() |
36 | takes a GrGpu, and any derived classes must implement freeGPUData(). |
37 | */ |
38 | |
39 | class GrManagedResource : SkNoncopyable { |
40 | public: |
41 | // Simple refCount tracing, to ensure that everything ref'ed is unref'ed. |
42 | #ifdef SK_TRACE_MANAGED_RESOURCES |
43 | struct Hash { |
44 | uint32_t operator()(const GrManagedResource* const& r) const { |
45 | SkASSERT(r); |
46 | return r->fKey; |
47 | } |
48 | }; |
49 | |
50 | class Trace { |
51 | public: |
52 | ~Trace() { |
53 | fHashSet.foreach([](const GrManagedResource* r) { |
54 | r->dumpInfo(); |
55 | }); |
56 | SkASSERT(0 == fHashSet.count()); |
57 | } |
58 | |
59 | void add(const GrManagedResource* r) { |
60 | SkAutoMutexExclusive locked(fLock); |
61 | fHashSet.add(r); |
62 | } |
63 | |
64 | void remove(const GrManagedResource* r) { |
65 | SkAutoMutexExclusive locked(fLock); |
66 | fHashSet.remove(r); |
67 | } |
68 | |
69 | private: |
70 | SkMutex fLock; |
71 | SkTHashSet<const GrManagedResource*, GrManagedResource::Hash> fHashSet SK_GUARDED_BY(fLock); |
72 | }; |
73 | |
74 | static std::atomic<uint32_t> fKeyCounter; |
75 | #endif |
76 | |
77 | /** Default construct, initializing the reference count to 1. |
78 | */ |
79 | GrManagedResource() : fRefCnt(1) { |
80 | #ifdef SK_TRACE_MANAGED_RESOURCES |
81 | fKey = fKeyCounter.fetch_add(+1, std::memory_order_relaxed); |
82 | GetTrace()->add(this); |
83 | #endif |
84 | } |
85 | |
86 | /** Destruct, asserting that the reference count is 1. |
87 | */ |
88 | virtual ~GrManagedResource() { |
89 | #ifdef SK_DEBUG |
90 | auto count = this->getRefCnt(); |
91 | SkASSERTF(count == 1, "fRefCnt was %d" , count); |
92 | fRefCnt.store(0); // illegal value, to catch us if we reuse after delete |
93 | #endif |
94 | } |
95 | |
96 | #ifdef SK_DEBUG |
97 | /** Return the reference count. Use only for debugging. */ |
98 | int32_t getRefCnt() const { return fRefCnt.load(); } |
99 | #endif |
100 | |
101 | /** May return true if the caller is the only owner. |
102 | * Ensures that all previous owner's actions are complete. |
103 | */ |
104 | bool unique() const { |
105 | // The acquire barrier is only really needed if we return true. It |
106 | // prevents code conditioned on the result of unique() from running |
107 | // until previous owners are all totally done calling unref(). |
108 | return 1 == fRefCnt.load(std::memory_order_acquire); |
109 | } |
110 | |
111 | /** Increment the reference count. |
112 | Must be balanced by a call to unref() or unrefAndFreeResources(). |
113 | */ |
114 | void ref() const { |
115 | // No barrier required. |
116 | SkDEBUGCODE(int newRefCount = )fRefCnt.fetch_add(+1, std::memory_order_relaxed); |
117 | SkASSERT(newRefCount >= 1); |
118 | } |
119 | |
120 | /** Decrement the reference count. If the reference count is 1 before the |
121 | decrement, then delete the object. Note that if this is the case, then |
122 | the object needs to have been allocated via new, and not on the stack. |
123 | Any GPU data associated with this resource will be freed before it's deleted. |
124 | */ |
125 | void unref() const { |
126 | // A release here acts in place of all releases we "should" have been doing in ref(). |
127 | int newRefCount = fRefCnt.fetch_add(-1, std::memory_order_acq_rel); |
128 | SkASSERT(newRefCount >= 0); |
129 | if (newRefCount == 1) { |
130 | // Like unique(), the acquire is only needed on success, to make sure |
131 | // code in internal_dispose() doesn't happen before the decrement. |
132 | this->internal_dispose(); |
133 | } |
134 | } |
135 | |
136 | // Called every time this resource is queued for use on the GPU (typically because |
137 | // it was added to a command buffer). |
138 | virtual void notifyQueuedForWorkOnGpu() const {} |
139 | // Called every time this resource has finished its use on the GPU (typically because |
140 | // the command buffer finished execution on the GPU.) |
141 | virtual void notifyFinishedWithWorkOnGpu() const {} |
142 | |
143 | #ifdef SK_DEBUG |
144 | // This is used for validating in the vulkan backend when using a main command buffer and temp |
145 | // command buffer at the same time. We need to validate that no images in the temp command |
146 | // buffer have been used in the main command buffer. |
147 | virtual const GrManagedResource* asVkImageResource() const { return nullptr; } |
148 | #endif |
149 | |
150 | #ifdef SK_DEBUG |
151 | void validate() const { |
152 | SkASSERT(this->getRefCnt() > 0); |
153 | } |
154 | #endif |
155 | |
156 | #ifdef SK_TRACE_MANAGED_RESOURCES |
157 | /** Output a human-readable dump of this resource's information |
158 | */ |
159 | virtual void dumpInfo() const = 0; |
160 | #endif |
161 | |
162 | private: |
163 | #ifdef SK_TRACE_MANAGED_RESOURCES |
164 | static Trace* GetTrace() { |
165 | static Trace kTrace; |
166 | return &kTrace; |
167 | } |
168 | #endif |
169 | |
170 | /** Must be implemented by any subclasses. |
171 | * Deletes any GPU data associated with this resource |
172 | */ |
173 | virtual void freeGPUData() const = 0; |
174 | |
175 | /** |
176 | * Called when the ref count goes to 0. Will free GPU resources. |
177 | */ |
178 | void internal_dispose() const { |
179 | this->freeGPUData(); |
180 | #ifdef SK_TRACE_MANAGED_RESOURCES |
181 | GetTrace()->remove(this); |
182 | #endif |
183 | |
184 | #ifdef SK_DEBUG |
185 | SkASSERT(0 == this->getRefCnt()); |
186 | fRefCnt.store(1); |
187 | #endif |
188 | delete this; |
189 | } |
190 | |
191 | mutable std::atomic<int32_t> fRefCnt; |
192 | #ifdef SK_TRACE_MANAGED_RESOURCES |
193 | uint32_t fKey; |
194 | #endif |
195 | |
196 | typedef SkNoncopyable INHERITED; |
197 | }; |
198 | |
199 | // This subclass allows for recycling |
200 | class GrRecycledResource : public GrManagedResource { |
201 | public: |
202 | // When recycle is called and there is only one ref left on the resource, we will signal that |
203 | // the resource can be recycled for reuse. If the subclass (or whoever is managing this resource) |
204 | // decides not to recycle the objects, it is their responsibility to call unref on the object. |
205 | void recycle() const { |
206 | if (this->unique()) { |
207 | this->onRecycle(); |
208 | } else { |
209 | this->unref(); |
210 | } |
211 | } |
212 | |
213 | private: |
214 | virtual void onRecycle() const = 0; |
215 | }; |
216 | |
217 | /** \class GrTextureResource |
218 | |
219 | GrTextureResource is the base class for managed texture resources, and implements the |
220 | basic idleProc and releaseProc functionality for them. |
221 | |
222 | */ |
223 | class GrTextureResource : public GrManagedResource { |
224 | public: |
225 | GrTextureResource() {} |
226 | |
227 | ~GrTextureResource() override { |
228 | SkASSERT(!fReleaseHelper); |
229 | } |
230 | |
231 | void setRelease(sk_sp<GrRefCntedCallback> releaseHelper) { |
232 | fReleaseHelper = std::move(releaseHelper); |
233 | } |
234 | |
235 | /** |
236 | * These are used to coordinate calling the "finished" idle procs between the GrTexture |
237 | * and the GrTextureResource. If the GrTexture becomes purgeable and if there are no command |
238 | * buffers referring to the GrTextureResource then it calls the procs. Otherwise, the |
239 | * GrTextureResource calls them when the last command buffer reference goes away and the |
240 | * GrTexture is purgeable. |
241 | */ |
242 | void addIdleProc(GrTexture*, sk_sp<GrRefCntedCallback>) const; |
243 | int idleProcCnt() const; |
244 | sk_sp<GrRefCntedCallback> idleProc(int) const; |
245 | void resetIdleProcs() const; |
246 | void removeOwningTexture() const; |
247 | |
248 | /** |
249 | * We track how many outstanding references this GrTextureResource has in command buffers and |
250 | * when the count reaches zero we call the idle proc. |
251 | */ |
252 | void notifyQueuedForWorkOnGpu() const override; |
253 | void notifyFinishedWithWorkOnGpu() const override; |
254 | bool isQueuedForWorkOnGpu() const { return fNumOwners > 0; } |
255 | |
256 | protected: |
257 | mutable sk_sp<GrRefCntedCallback> fReleaseHelper; |
258 | mutable GrTexture* fOwningTexture = nullptr; |
259 | |
260 | void invokeReleaseProc() const { |
261 | if (fReleaseHelper) { |
262 | // Depending on the ref count of fReleaseHelper this may or may not actually trigger |
263 | // the ReleaseProc to be called. |
264 | fReleaseHelper.reset(); |
265 | } |
266 | } |
267 | |
268 | private: |
269 | mutable int fNumOwners = 0; |
270 | mutable SkTArray<sk_sp<GrRefCntedCallback>> fIdleProcs; |
271 | |
272 | typedef GrManagedResource INHERITED; |
273 | }; |
274 | |
275 | #endif |
276 | |