1 | /* |
2 | * Copyright 2014 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "src/gpu/GrResourceCache.h" |
9 | #include <atomic> |
10 | #include "include/gpu/GrContext.h" |
11 | #include "include/private/GrSingleOwner.h" |
12 | #include "include/private/SkTo.h" |
13 | #include "include/utils/SkRandom.h" |
14 | #include "src/core/SkExchange.h" |
15 | #include "src/core/SkMessageBus.h" |
16 | #include "src/core/SkOpts.h" |
17 | #include "src/core/SkScopeExit.h" |
18 | #include "src/core/SkTSort.h" |
19 | #include "src/gpu/GrCaps.h" |
20 | #include "src/gpu/GrContextPriv.h" |
21 | #include "src/gpu/GrGpuResourceCacheAccess.h" |
22 | #include "src/gpu/GrProxyProvider.h" |
23 | #include "src/gpu/GrTexture.h" |
24 | #include "src/gpu/GrTextureProxyCacheAccess.h" |
25 | #include "src/gpu/GrTracing.h" |
26 | #include "src/gpu/SkGr.h" |
27 | |
28 | DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage); |
29 | |
30 | DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage); |
31 | |
32 | #define ASSERT_SINGLE_OWNER \ |
33 | SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);) |
34 | |
35 | ////////////////////////////////////////////////////////////////////////////// |
36 | |
37 | GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() { |
38 | static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1}; |
39 | |
40 | int32_t type = nextType++; |
41 | if (type > SkTo<int32_t>(UINT16_MAX)) { |
42 | SK_ABORT("Too many Resource Types" ); |
43 | } |
44 | |
45 | return static_cast<ResourceType>(type); |
46 | } |
47 | |
48 | GrUniqueKey::Domain GrUniqueKey::GenerateDomain() { |
49 | static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1}; |
50 | |
51 | int32_t domain = nextDomain++; |
52 | if (domain > SkTo<int32_t>(UINT16_MAX)) { |
53 | SK_ABORT("Too many GrUniqueKey Domains" ); |
54 | } |
55 | |
56 | return static_cast<Domain>(domain); |
57 | } |
58 | |
59 | uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) { |
60 | return SkOpts::hash(data, size); |
61 | } |
62 | |
63 | ////////////////////////////////////////////////////////////////////////////// |
64 | |
65 | class GrResourceCache::AutoValidate : ::SkNoncopyable { |
66 | public: |
67 | AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); } |
68 | ~AutoValidate() { fCache->validate(); } |
69 | private: |
70 | GrResourceCache* fCache; |
71 | }; |
72 | |
73 | ////////////////////////////////////////////////////////////////////////////// |
74 | |
75 | inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default; |
76 | |
77 | inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture) |
78 | : fTexture(texture), fNumUnrefs(1) {} |
79 | |
80 | inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) { |
81 | fTexture = skstd::exchange(that.fTexture, nullptr); |
82 | fNumUnrefs = skstd::exchange(that.fNumUnrefs, 0); |
83 | } |
84 | |
85 | inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=( |
86 | TextureAwaitingUnref&& that) { |
87 | fTexture = skstd::exchange(that.fTexture, nullptr); |
88 | fNumUnrefs = skstd::exchange(that.fNumUnrefs, 0); |
89 | return *this; |
90 | } |
91 | |
92 | inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() { |
93 | if (fTexture) { |
94 | for (int i = 0; i < fNumUnrefs; ++i) { |
95 | fTexture->unref(); |
96 | } |
97 | } |
98 | } |
99 | |
100 | inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; } |
101 | |
102 | inline void GrResourceCache::TextureAwaitingUnref::unref() { |
103 | SkASSERT(fNumUnrefs > 0); |
104 | fTexture->unref(); |
105 | --fNumUnrefs; |
106 | } |
107 | |
108 | inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; } |
109 | |
110 | ////////////////////////////////////////////////////////////////////////////// |
111 | |
112 | GrResourceCache::GrResourceCache(const GrCaps* caps, GrSingleOwner* singleOwner, |
113 | uint32_t contextUniqueID) |
114 | : fInvalidUniqueKeyInbox(contextUniqueID) |
115 | , fFreedTextureInbox(contextUniqueID) |
116 | , fContextUniqueID(contextUniqueID) |
117 | , fSingleOwner(singleOwner) |
118 | , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) { |
119 | SkASSERT(contextUniqueID != SK_InvalidUniqueID); |
120 | } |
121 | |
122 | GrResourceCache::~GrResourceCache() { |
123 | this->releaseAll(); |
124 | } |
125 | |
126 | void GrResourceCache::setLimit(size_t bytes) { |
127 | fMaxBytes = bytes; |
128 | this->purgeAsNeeded(); |
129 | } |
130 | |
131 | void GrResourceCache::insertResource(GrGpuResource* resource) { |
132 | ASSERT_SINGLE_OWNER |
133 | SkASSERT(resource); |
134 | SkASSERT(!this->isInCache(resource)); |
135 | SkASSERT(!resource->wasDestroyed()); |
136 | SkASSERT(!resource->resourcePriv().isPurgeable()); |
137 | |
138 | // We must set the timestamp before adding to the array in case the timestamp wraps and we wind |
139 | // up iterating over all the resources that already have timestamps. |
140 | resource->cacheAccess().setTimestamp(this->getNextTimestamp()); |
141 | |
142 | this->addToNonpurgeableArray(resource); |
143 | |
144 | size_t size = resource->gpuMemorySize(); |
145 | SkDEBUGCODE(++fCount;) |
146 | fBytes += size; |
147 | #if GR_CACHE_STATS |
148 | fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount); |
149 | fHighWaterBytes = std::max(fBytes, fHighWaterBytes); |
150 | #endif |
151 | if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) { |
152 | ++fBudgetedCount; |
153 | fBudgetedBytes += size; |
154 | TRACE_COUNTER2("skia.gpu.cache" , "skia budget" , "used" , |
155 | fBudgetedBytes, "free" , fMaxBytes - fBudgetedBytes); |
156 | #if GR_CACHE_STATS |
157 | fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount); |
158 | fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes); |
159 | #endif |
160 | } |
161 | if (resource->resourcePriv().getScratchKey().isValid() && |
162 | !resource->getUniqueKey().isValid()) { |
163 | SkASSERT(!resource->resourcePriv().refsWrappedObjects()); |
164 | fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource); |
165 | } |
166 | |
167 | this->purgeAsNeeded(); |
168 | } |
169 | |
170 | void GrResourceCache::removeResource(GrGpuResource* resource) { |
171 | ASSERT_SINGLE_OWNER |
172 | this->validate(); |
173 | SkASSERT(this->isInCache(resource)); |
174 | |
175 | size_t size = resource->gpuMemorySize(); |
176 | if (resource->resourcePriv().isPurgeable()) { |
177 | fPurgeableQueue.remove(resource); |
178 | fPurgeableBytes -= size; |
179 | } else { |
180 | this->removeFromNonpurgeableArray(resource); |
181 | } |
182 | |
183 | SkDEBUGCODE(--fCount;) |
184 | fBytes -= size; |
185 | if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) { |
186 | --fBudgetedCount; |
187 | fBudgetedBytes -= size; |
188 | TRACE_COUNTER2("skia.gpu.cache" , "skia budget" , "used" , |
189 | fBudgetedBytes, "free" , fMaxBytes - fBudgetedBytes); |
190 | } |
191 | |
192 | if (resource->resourcePriv().getScratchKey().isValid() && |
193 | !resource->getUniqueKey().isValid()) { |
194 | fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource); |
195 | } |
196 | if (resource->getUniqueKey().isValid()) { |
197 | fUniqueHash.remove(resource->getUniqueKey()); |
198 | } |
199 | this->validate(); |
200 | } |
201 | |
202 | void GrResourceCache::abandonAll() { |
203 | AutoValidate av(this); |
204 | |
205 | // We need to make sure to free any resources that were waiting on a free message but never |
206 | // received one. |
207 | fTexturesAwaitingUnref.reset(); |
208 | |
209 | while (fNonpurgeableResources.count()) { |
210 | GrGpuResource* back = *(fNonpurgeableResources.end() - 1); |
211 | SkASSERT(!back->wasDestroyed()); |
212 | back->cacheAccess().abandon(); |
213 | } |
214 | |
215 | while (fPurgeableQueue.count()) { |
216 | GrGpuResource* top = fPurgeableQueue.peek(); |
217 | SkASSERT(!top->wasDestroyed()); |
218 | top->cacheAccess().abandon(); |
219 | } |
220 | |
221 | SkASSERT(!fScratchMap.count()); |
222 | SkASSERT(!fUniqueHash.count()); |
223 | SkASSERT(!fCount); |
224 | SkASSERT(!this->getResourceCount()); |
225 | SkASSERT(!fBytes); |
226 | SkASSERT(!fBudgetedCount); |
227 | SkASSERT(!fBudgetedBytes); |
228 | SkASSERT(!fPurgeableBytes); |
229 | SkASSERT(!fTexturesAwaitingUnref.count()); |
230 | } |
231 | |
232 | void GrResourceCache::releaseAll() { |
233 | AutoValidate av(this); |
234 | |
235 | this->processFreedGpuResources(); |
236 | |
237 | // We need to make sure to free any resources that were waiting on a free message but never |
238 | // received one. |
239 | fTexturesAwaitingUnref.reset(); |
240 | |
241 | SkASSERT(fProxyProvider); // better have called setProxyProvider |
242 | // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey |
243 | // they also have a raw pointer back to this class (which is presumably going away)! |
244 | fProxyProvider->removeAllUniqueKeys(); |
245 | |
246 | while (fNonpurgeableResources.count()) { |
247 | GrGpuResource* back = *(fNonpurgeableResources.end() - 1); |
248 | SkASSERT(!back->wasDestroyed()); |
249 | back->cacheAccess().release(); |
250 | } |
251 | |
252 | while (fPurgeableQueue.count()) { |
253 | GrGpuResource* top = fPurgeableQueue.peek(); |
254 | SkASSERT(!top->wasDestroyed()); |
255 | top->cacheAccess().release(); |
256 | } |
257 | |
258 | SkASSERT(!fScratchMap.count()); |
259 | SkASSERT(!fUniqueHash.count()); |
260 | SkASSERT(!fCount); |
261 | SkASSERT(!this->getResourceCount()); |
262 | SkASSERT(!fBytes); |
263 | SkASSERT(!fBudgetedCount); |
264 | SkASSERT(!fBudgetedBytes); |
265 | SkASSERT(!fPurgeableBytes); |
266 | SkASSERT(!fTexturesAwaitingUnref.count()); |
267 | } |
268 | |
269 | void GrResourceCache::refResource(GrGpuResource* resource) { |
270 | SkASSERT(resource); |
271 | SkASSERT(resource->getContext()->priv().getResourceCache() == this); |
272 | if (resource->cacheAccess().hasRef()) { |
273 | resource->ref(); |
274 | } else { |
275 | this->refAndMakeResourceMRU(resource); |
276 | } |
277 | this->validate(); |
278 | } |
279 | |
280 | class GrResourceCache::AvailableForScratchUse { |
281 | public: |
282 | AvailableForScratchUse() { } |
283 | |
284 | bool operator()(const GrGpuResource* resource) const { |
285 | SkASSERT(!resource->getUniqueKey().isValid() && |
286 | resource->resourcePriv().getScratchKey().isValid()); |
287 | |
288 | // isScratch() also tests that the resource is budgeted. |
289 | if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) { |
290 | return false; |
291 | } |
292 | return true; |
293 | } |
294 | }; |
295 | |
296 | GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) { |
297 | SkASSERT(scratchKey.isValid()); |
298 | |
299 | GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse()); |
300 | if (resource) { |
301 | this->refAndMakeResourceMRU(resource); |
302 | this->validate(); |
303 | } |
304 | return resource; |
305 | } |
306 | |
307 | void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) { |
308 | ASSERT_SINGLE_OWNER |
309 | SkASSERT(resource->resourcePriv().getScratchKey().isValid()); |
310 | if (!resource->getUniqueKey().isValid()) { |
311 | fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource); |
312 | } |
313 | } |
314 | |
315 | void GrResourceCache::removeUniqueKey(GrGpuResource* resource) { |
316 | ASSERT_SINGLE_OWNER |
317 | // Someone has a ref to this resource in order to have removed the key. When the ref count |
318 | // reaches zero we will get a ref cnt notification and figure out what to do with it. |
319 | if (resource->getUniqueKey().isValid()) { |
320 | SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey())); |
321 | fUniqueHash.remove(resource->getUniqueKey()); |
322 | } |
323 | resource->cacheAccess().removeUniqueKey(); |
324 | if (resource->resourcePriv().getScratchKey().isValid()) { |
325 | fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource); |
326 | } |
327 | |
328 | // Removing a unique key from a kUnbudgetedCacheable resource would make the resource |
329 | // require purging. However, the resource must be ref'ed to get here and therefore can't |
330 | // be purgeable. We'll purge it when the refs reach zero. |
331 | SkASSERT(!resource->resourcePriv().isPurgeable()); |
332 | this->validate(); |
333 | } |
334 | |
335 | void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) { |
336 | ASSERT_SINGLE_OWNER |
337 | SkASSERT(resource); |
338 | SkASSERT(this->isInCache(resource)); |
339 | |
340 | // If another resource has the new key, remove its key then install the key on this resource. |
341 | if (newKey.isValid()) { |
342 | if (GrGpuResource* old = fUniqueHash.find(newKey)) { |
343 | // If the old resource using the key is purgeable and is unreachable, then remove it. |
344 | if (!old->resourcePriv().getScratchKey().isValid() && |
345 | old->resourcePriv().isPurgeable()) { |
346 | old->cacheAccess().release(); |
347 | } else { |
348 | // removeUniqueKey expects an external owner of the resource. |
349 | this->removeUniqueKey(sk_ref_sp(old).get()); |
350 | } |
351 | } |
352 | SkASSERT(nullptr == fUniqueHash.find(newKey)); |
353 | |
354 | // Remove the entry for this resource if it already has a unique key. |
355 | if (resource->getUniqueKey().isValid()) { |
356 | SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey())); |
357 | fUniqueHash.remove(resource->getUniqueKey()); |
358 | SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey())); |
359 | } else { |
360 | // 'resource' didn't have a valid unique key before so it is switching sides. Remove it |
361 | // from the ScratchMap |
362 | if (resource->resourcePriv().getScratchKey().isValid()) { |
363 | fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource); |
364 | } |
365 | } |
366 | |
367 | resource->cacheAccess().setUniqueKey(newKey); |
368 | fUniqueHash.add(resource); |
369 | } else { |
370 | this->removeUniqueKey(resource); |
371 | } |
372 | |
373 | this->validate(); |
374 | } |
375 | |
376 | void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) { |
377 | ASSERT_SINGLE_OWNER |
378 | SkASSERT(resource); |
379 | SkASSERT(this->isInCache(resource)); |
380 | |
381 | if (resource->resourcePriv().isPurgeable()) { |
382 | // It's about to become unpurgeable. |
383 | fPurgeableBytes -= resource->gpuMemorySize(); |
384 | fPurgeableQueue.remove(resource); |
385 | this->addToNonpurgeableArray(resource); |
386 | } else if (!resource->cacheAccess().hasRef() && |
387 | resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) { |
388 | SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0); |
389 | fNumBudgetedResourcesFlushWillMakePurgeable--; |
390 | } |
391 | resource->cacheAccess().ref(); |
392 | |
393 | resource->cacheAccess().setTimestamp(this->getNextTimestamp()); |
394 | this->validate(); |
395 | } |
396 | |
397 | void GrResourceCache::notifyRefCntReachedZero(GrGpuResource* resource) { |
398 | ASSERT_SINGLE_OWNER |
399 | SkASSERT(resource); |
400 | SkASSERT(!resource->wasDestroyed()); |
401 | SkASSERT(this->isInCache(resource)); |
402 | // This resource should always be in the nonpurgeable array when this function is called. It |
403 | // will be moved to the queue if it is newly purgeable. |
404 | SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource); |
405 | |
406 | #ifdef SK_DEBUG |
407 | // When the timestamp overflows validate() is called. validate() checks that resources in |
408 | // the nonpurgeable array are indeed not purgeable. However, the movement from the array to |
409 | // the purgeable queue happens just below in this function. So we mark it as an exception. |
410 | if (resource->resourcePriv().isPurgeable()) { |
411 | fNewlyPurgeableResourceForValidation = resource; |
412 | } |
413 | #endif |
414 | resource->cacheAccess().setTimestamp(this->getNextTimestamp()); |
415 | SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr); |
416 | |
417 | if (!resource->resourcePriv().isPurgeable() && |
418 | resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) { |
419 | ++fNumBudgetedResourcesFlushWillMakePurgeable; |
420 | } |
421 | |
422 | if (!resource->resourcePriv().isPurgeable()) { |
423 | this->validate(); |
424 | return; |
425 | } |
426 | |
427 | this->removeFromNonpurgeableArray(resource); |
428 | fPurgeableQueue.insert(resource); |
429 | resource->cacheAccess().setTimeWhenResourceBecomePurgeable(); |
430 | fPurgeableBytes += resource->gpuMemorySize(); |
431 | |
432 | bool hasUniqueKey = resource->getUniqueKey().isValid(); |
433 | |
434 | GrBudgetedType budgetedType = resource->resourcePriv().budgetedType(); |
435 | |
436 | if (budgetedType == GrBudgetedType::kBudgeted) { |
437 | // Purge the resource immediately if we're over budget |
438 | // Also purge if the resource has neither a valid scratch key nor a unique key. |
439 | bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey; |
440 | if (!this->overBudget() && hasKey) { |
441 | return; |
442 | } |
443 | } else { |
444 | // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so |
445 | // they can be reused again by the image connected to the unique key. |
446 | if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) { |
447 | return; |
448 | } |
449 | // Check whether this resource could still be used as a scratch resource. |
450 | if (!resource->resourcePriv().refsWrappedObjects() && |
451 | resource->resourcePriv().getScratchKey().isValid()) { |
452 | // We won't purge an existing resource to make room for this one. |
453 | if (this->wouldFit(resource->gpuMemorySize())) { |
454 | resource->resourcePriv().makeBudgeted(); |
455 | return; |
456 | } |
457 | } |
458 | } |
459 | |
460 | SkDEBUGCODE(int beforeCount = this->getResourceCount();) |
461 | resource->cacheAccess().release(); |
462 | // We should at least free this resource, perhaps dependent resources as well. |
463 | SkASSERT(this->getResourceCount() < beforeCount); |
464 | this->validate(); |
465 | } |
466 | |
467 | void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) { |
468 | ASSERT_SINGLE_OWNER |
469 | SkASSERT(resource); |
470 | SkASSERT(this->isInCache(resource)); |
471 | |
472 | size_t size = resource->gpuMemorySize(); |
473 | // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make |
474 | // resource become purgeable. However, we should never allow that transition. Wrapped |
475 | // resources are the only resources that can be in that state and they aren't allowed to |
476 | // transition from one budgeted state to another. |
477 | SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable()); |
478 | if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) { |
479 | ++fBudgetedCount; |
480 | fBudgetedBytes += size; |
481 | #if GR_CACHE_STATS |
482 | fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes); |
483 | fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount); |
484 | #endif |
485 | if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) { |
486 | ++fNumBudgetedResourcesFlushWillMakePurgeable; |
487 | } |
488 | this->purgeAsNeeded(); |
489 | } else { |
490 | SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable); |
491 | --fBudgetedCount; |
492 | fBudgetedBytes -= size; |
493 | if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) { |
494 | --fNumBudgetedResourcesFlushWillMakePurgeable; |
495 | } |
496 | } |
497 | SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable()); |
498 | TRACE_COUNTER2("skia.gpu.cache" , "skia budget" , "used" , |
499 | fBudgetedBytes, "free" , fMaxBytes - fBudgetedBytes); |
500 | |
501 | this->validate(); |
502 | } |
503 | |
504 | void GrResourceCache::purgeAsNeeded() { |
505 | SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs; |
506 | fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs); |
507 | if (invalidKeyMsgs.count()) { |
508 | SkASSERT(fProxyProvider); |
509 | |
510 | for (int i = 0; i < invalidKeyMsgs.count(); ++i) { |
511 | fProxyProvider->processInvalidUniqueKey(invalidKeyMsgs[i].key(), nullptr, |
512 | GrProxyProvider::InvalidateGPUResource::kYes); |
513 | SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key())); |
514 | } |
515 | } |
516 | |
517 | this->processFreedGpuResources(); |
518 | |
519 | bool stillOverbudget = this->overBudget(); |
520 | while (stillOverbudget && fPurgeableQueue.count()) { |
521 | GrGpuResource* resource = fPurgeableQueue.peek(); |
522 | SkASSERT(resource->resourcePriv().isPurgeable()); |
523 | resource->cacheAccess().release(); |
524 | stillOverbudget = this->overBudget(); |
525 | } |
526 | |
527 | this->validate(); |
528 | } |
529 | |
530 | void GrResourceCache::purgeUnlockedResources(bool scratchResourcesOnly) { |
531 | if (!scratchResourcesOnly) { |
532 | // We could disable maintaining the heap property here, but it would add a lot of |
533 | // complexity. Moreover, this is rarely called. |
534 | while (fPurgeableQueue.count()) { |
535 | GrGpuResource* resource = fPurgeableQueue.peek(); |
536 | SkASSERT(resource->resourcePriv().isPurgeable()); |
537 | resource->cacheAccess().release(); |
538 | } |
539 | } else { |
540 | // Sort the queue |
541 | fPurgeableQueue.sort(); |
542 | |
543 | // Make a list of the scratch resources to delete |
544 | SkTDArray<GrGpuResource*> scratchResources; |
545 | for (int i = 0; i < fPurgeableQueue.count(); i++) { |
546 | GrGpuResource* resource = fPurgeableQueue.at(i); |
547 | SkASSERT(resource->resourcePriv().isPurgeable()); |
548 | if (!resource->getUniqueKey().isValid()) { |
549 | *scratchResources.append() = resource; |
550 | } |
551 | } |
552 | |
553 | // Delete the scratch resources. This must be done as a separate pass |
554 | // to avoid messing up the sorted order of the queue |
555 | for (int i = 0; i < scratchResources.count(); i++) { |
556 | scratchResources.getAt(i)->cacheAccess().release(); |
557 | } |
558 | } |
559 | |
560 | this->validate(); |
561 | } |
562 | |
563 | void GrResourceCache::purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime) { |
564 | while (fPurgeableQueue.count()) { |
565 | const GrStdSteadyClock::time_point resourceTime = |
566 | fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable(); |
567 | if (resourceTime >= purgeTime) { |
568 | // Resources were given both LRU timestamps and tagged with a frame number when |
569 | // they first became purgeable. The LRU timestamp won't change again until the |
570 | // resource is made non-purgeable again. So, at this point all the remaining |
571 | // resources in the timestamp-sorted queue will have a frame number >= to this |
572 | // one. |
573 | break; |
574 | } |
575 | GrGpuResource* resource = fPurgeableQueue.peek(); |
576 | SkASSERT(resource->resourcePriv().isPurgeable()); |
577 | resource->cacheAccess().release(); |
578 | } |
579 | } |
580 | |
581 | void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) { |
582 | |
583 | const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge); |
584 | bool stillOverbudget = tmpByteBudget < fBytes; |
585 | |
586 | if (preferScratchResources && bytesToPurge < fPurgeableBytes) { |
587 | // Sort the queue |
588 | fPurgeableQueue.sort(); |
589 | |
590 | // Make a list of the scratch resources to delete |
591 | SkTDArray<GrGpuResource*> scratchResources; |
592 | size_t scratchByteCount = 0; |
593 | for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) { |
594 | GrGpuResource* resource = fPurgeableQueue.at(i); |
595 | SkASSERT(resource->resourcePriv().isPurgeable()); |
596 | if (!resource->getUniqueKey().isValid()) { |
597 | *scratchResources.append() = resource; |
598 | scratchByteCount += resource->gpuMemorySize(); |
599 | stillOverbudget = tmpByteBudget < fBytes - scratchByteCount; |
600 | } |
601 | } |
602 | |
603 | // Delete the scratch resources. This must be done as a separate pass |
604 | // to avoid messing up the sorted order of the queue |
605 | for (int i = 0; i < scratchResources.count(); i++) { |
606 | scratchResources.getAt(i)->cacheAccess().release(); |
607 | } |
608 | stillOverbudget = tmpByteBudget < fBytes; |
609 | |
610 | this->validate(); |
611 | } |
612 | |
613 | // Purge any remaining resources in LRU order |
614 | if (stillOverbudget) { |
615 | const size_t cachedByteCount = fMaxBytes; |
616 | fMaxBytes = tmpByteBudget; |
617 | this->purgeAsNeeded(); |
618 | fMaxBytes = cachedByteCount; |
619 | } |
620 | } |
621 | bool GrResourceCache::requestsFlush() const { |
622 | return this->overBudget() && !fPurgeableQueue.count() && |
623 | fNumBudgetedResourcesFlushWillMakePurgeable > 0; |
624 | } |
625 | |
626 | |
627 | void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) { |
628 | texture->ref(); |
629 | uint32_t id = texture->uniqueID().asUInt(); |
630 | if (auto* data = fTexturesAwaitingUnref.find(id)) { |
631 | data->addRef(); |
632 | } else { |
633 | fTexturesAwaitingUnref.set(id, {texture}); |
634 | } |
635 | } |
636 | |
637 | void GrResourceCache::processFreedGpuResources() { |
638 | if (!fTexturesAwaitingUnref.count()) { |
639 | return; |
640 | } |
641 | |
642 | SkTArray<GrTextureFreedMessage> msgs; |
643 | fFreedTextureInbox.poll(&msgs); |
644 | for (int i = 0; i < msgs.count(); ++i) { |
645 | SkASSERT(msgs[i].fOwningUniqueID == fContextUniqueID); |
646 | uint32_t id = msgs[i].fTexture->uniqueID().asUInt(); |
647 | TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id); |
648 | // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been |
649 | // empty and we would have returned early above. Thus, any texture from a message should be |
650 | // in the list of fTexturesAwaitingUnref. |
651 | SkASSERT(info); |
652 | info->unref(); |
653 | if (info->finished()) { |
654 | fTexturesAwaitingUnref.remove(id); |
655 | } |
656 | } |
657 | } |
658 | |
659 | void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) { |
660 | int index = fNonpurgeableResources.count(); |
661 | *fNonpurgeableResources.append() = resource; |
662 | *resource->cacheAccess().accessCacheIndex() = index; |
663 | } |
664 | |
665 | void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) { |
666 | int* index = resource->cacheAccess().accessCacheIndex(); |
667 | // Fill the whole we will create in the array with the tail object, adjust its index, and |
668 | // then pop the array |
669 | GrGpuResource* tail = *(fNonpurgeableResources.end() - 1); |
670 | SkASSERT(fNonpurgeableResources[*index] == resource); |
671 | fNonpurgeableResources[*index] = tail; |
672 | *tail->cacheAccess().accessCacheIndex() = *index; |
673 | fNonpurgeableResources.pop(); |
674 | SkDEBUGCODE(*index = -1); |
675 | } |
676 | |
677 | uint32_t GrResourceCache::getNextTimestamp() { |
678 | // If we wrap then all the existing resources will appear older than any resources that get |
679 | // a timestamp after the wrap. |
680 | if (0 == fTimestamp) { |
681 | int count = this->getResourceCount(); |
682 | if (count) { |
683 | // Reset all the timestamps. We sort the resources by timestamp and then assign |
684 | // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely |
685 | // rare. |
686 | SkTDArray<GrGpuResource*> sortedPurgeableResources; |
687 | sortedPurgeableResources.setReserve(fPurgeableQueue.count()); |
688 | |
689 | while (fPurgeableQueue.count()) { |
690 | *sortedPurgeableResources.append() = fPurgeableQueue.peek(); |
691 | fPurgeableQueue.pop(); |
692 | } |
693 | |
694 | SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end() - 1, |
695 | CompareTimestamp); |
696 | |
697 | // Pick resources out of the purgeable and non-purgeable arrays based on lowest |
698 | // timestamp and assign new timestamps. |
699 | int currP = 0; |
700 | int currNP = 0; |
701 | while (currP < sortedPurgeableResources.count() && |
702 | currNP < fNonpurgeableResources.count()) { |
703 | uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp(); |
704 | uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp(); |
705 | SkASSERT(tsP != tsNP); |
706 | if (tsP < tsNP) { |
707 | sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++); |
708 | } else { |
709 | // Correct the index in the nonpurgeable array stored on the resource post-sort. |
710 | *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP; |
711 | fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++); |
712 | } |
713 | } |
714 | |
715 | // The above loop ended when we hit the end of one array. Finish the other one. |
716 | while (currP < sortedPurgeableResources.count()) { |
717 | sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++); |
718 | } |
719 | while (currNP < fNonpurgeableResources.count()) { |
720 | *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP; |
721 | fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++); |
722 | } |
723 | |
724 | // Rebuild the queue. |
725 | for (int i = 0; i < sortedPurgeableResources.count(); ++i) { |
726 | fPurgeableQueue.insert(sortedPurgeableResources[i]); |
727 | } |
728 | |
729 | this->validate(); |
730 | SkASSERT(count == this->getResourceCount()); |
731 | |
732 | // count should be the next timestamp we return. |
733 | SkASSERT(fTimestamp == SkToU32(count)); |
734 | } |
735 | } |
736 | return fTimestamp++; |
737 | } |
738 | |
739 | void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const { |
740 | for (int i = 0; i < fNonpurgeableResources.count(); ++i) { |
741 | fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump); |
742 | } |
743 | for (int i = 0; i < fPurgeableQueue.count(); ++i) { |
744 | fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump); |
745 | } |
746 | } |
747 | |
748 | #if GR_CACHE_STATS |
749 | void GrResourceCache::getStats(Stats* stats) const { |
750 | stats->reset(); |
751 | |
752 | stats->fTotal = this->getResourceCount(); |
753 | stats->fNumNonPurgeable = fNonpurgeableResources.count(); |
754 | stats->fNumPurgeable = fPurgeableQueue.count(); |
755 | |
756 | for (int i = 0; i < fNonpurgeableResources.count(); ++i) { |
757 | stats->update(fNonpurgeableResources[i]); |
758 | } |
759 | for (int i = 0; i < fPurgeableQueue.count(); ++i) { |
760 | stats->update(fPurgeableQueue.at(i)); |
761 | } |
762 | } |
763 | |
764 | #if GR_TEST_UTILS |
765 | void GrResourceCache::dumpStats(SkString* out) const { |
766 | this->validate(); |
767 | |
768 | Stats stats; |
769 | |
770 | this->getStats(&stats); |
771 | |
772 | float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes; |
773 | |
774 | out->appendf("Budget: %d bytes\n" , (int)fMaxBytes); |
775 | out->appendf("\t\tEntry Count: current %d" |
776 | " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n" , |
777 | stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable, |
778 | stats.fScratch, fHighWaterCount); |
779 | out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n" , |
780 | SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization, |
781 | SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes)); |
782 | } |
783 | |
784 | void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys, |
785 | SkTArray<double>* values) const { |
786 | this->validate(); |
787 | |
788 | Stats stats; |
789 | this->getStats(&stats); |
790 | |
791 | keys->push_back(SkString("gpu_cache_purgable_entries" )); values->push_back(stats.fNumPurgeable); |
792 | } |
793 | #endif |
794 | |
795 | #endif |
796 | |
797 | #ifdef SK_DEBUG |
798 | void GrResourceCache::validate() const { |
799 | // Reduce the frequency of validations for large resource counts. |
800 | static SkRandom gRandom; |
801 | int mask = (SkNextPow2(fCount + 1) >> 5) - 1; |
802 | if (~mask && (gRandom.nextU() & mask)) { |
803 | return; |
804 | } |
805 | |
806 | struct Stats { |
807 | size_t fBytes; |
808 | int fBudgetedCount; |
809 | size_t fBudgetedBytes; |
810 | int fLocked; |
811 | int fScratch; |
812 | int fCouldBeScratch; |
813 | int fContent; |
814 | const ScratchMap* fScratchMap; |
815 | const UniqueHash* fUniqueHash; |
816 | |
817 | Stats(const GrResourceCache* cache) { |
818 | memset(this, 0, sizeof(*this)); |
819 | fScratchMap = &cache->fScratchMap; |
820 | fUniqueHash = &cache->fUniqueHash; |
821 | } |
822 | |
823 | void update(GrGpuResource* resource) { |
824 | fBytes += resource->gpuMemorySize(); |
825 | |
826 | if (!resource->resourcePriv().isPurgeable()) { |
827 | ++fLocked; |
828 | } |
829 | |
830 | const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey(); |
831 | const GrUniqueKey& uniqueKey = resource->getUniqueKey(); |
832 | |
833 | if (resource->cacheAccess().isScratch()) { |
834 | SkASSERT(!uniqueKey.isValid()); |
835 | ++fScratch; |
836 | SkASSERT(fScratchMap->countForKey(scratchKey)); |
837 | SkASSERT(!resource->resourcePriv().refsWrappedObjects()); |
838 | } else if (scratchKey.isValid()) { |
839 | SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() || |
840 | uniqueKey.isValid()); |
841 | if (!uniqueKey.isValid()) { |
842 | ++fCouldBeScratch; |
843 | SkASSERT(fScratchMap->countForKey(scratchKey)); |
844 | } |
845 | SkASSERT(!resource->resourcePriv().refsWrappedObjects()); |
846 | } |
847 | if (uniqueKey.isValid()) { |
848 | ++fContent; |
849 | SkASSERT(fUniqueHash->find(uniqueKey) == resource); |
850 | SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() || |
851 | resource->resourcePriv().refsWrappedObjects()); |
852 | |
853 | if (scratchKey.isValid()) { |
854 | SkASSERT(!fScratchMap->has(resource, scratchKey)); |
855 | } |
856 | } |
857 | |
858 | if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) { |
859 | ++fBudgetedCount; |
860 | fBudgetedBytes += resource->gpuMemorySize(); |
861 | } |
862 | } |
863 | }; |
864 | |
865 | { |
866 | int count = 0; |
867 | fScratchMap.foreach([&](const GrGpuResource& resource) { |
868 | SkASSERT(resource.resourcePriv().getScratchKey().isValid()); |
869 | SkASSERT(!resource.getUniqueKey().isValid()); |
870 | count++; |
871 | }); |
872 | SkASSERT(count == fScratchMap.count()); |
873 | } |
874 | |
875 | Stats stats(this); |
876 | size_t purgeableBytes = 0; |
877 | int numBudgetedResourcesFlushWillMakePurgeable = 0; |
878 | |
879 | for (int i = 0; i < fNonpurgeableResources.count(); ++i) { |
880 | SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() || |
881 | fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]); |
882 | SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i); |
883 | SkASSERT(!fNonpurgeableResources[i]->wasDestroyed()); |
884 | if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted && |
885 | !fNonpurgeableResources[i]->cacheAccess().hasRef() && |
886 | fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) { |
887 | ++numBudgetedResourcesFlushWillMakePurgeable; |
888 | } |
889 | stats.update(fNonpurgeableResources[i]); |
890 | } |
891 | for (int i = 0; i < fPurgeableQueue.count(); ++i) { |
892 | SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable()); |
893 | SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i); |
894 | SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed()); |
895 | stats.update(fPurgeableQueue.at(i)); |
896 | purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize(); |
897 | } |
898 | |
899 | SkASSERT(fCount == this->getResourceCount()); |
900 | SkASSERT(fBudgetedCount <= fCount); |
901 | SkASSERT(fBudgetedBytes <= fBytes); |
902 | SkASSERT(stats.fBytes == fBytes); |
903 | SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable == |
904 | numBudgetedResourcesFlushWillMakePurgeable); |
905 | SkASSERT(stats.fBudgetedBytes == fBudgetedBytes); |
906 | SkASSERT(stats.fBudgetedCount == fBudgetedCount); |
907 | SkASSERT(purgeableBytes == fPurgeableBytes); |
908 | #if GR_CACHE_STATS |
909 | SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount); |
910 | SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes); |
911 | SkASSERT(fBytes <= fHighWaterBytes); |
912 | SkASSERT(fCount <= fHighWaterCount); |
913 | SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes); |
914 | SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount); |
915 | #endif |
916 | SkASSERT(stats.fContent == fUniqueHash.count()); |
917 | SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count()); |
918 | |
919 | // This assertion is not currently valid because we can be in recursive notifyCntReachedZero() |
920 | // calls. This will be fixed when subresource registration is explicit. |
921 | // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount; |
922 | // SkASSERT(!overBudget || locked == count || fPurging); |
923 | } |
924 | |
925 | bool GrResourceCache::isInCache(const GrGpuResource* resource) const { |
926 | int index = *resource->cacheAccess().accessCacheIndex(); |
927 | if (index < 0) { |
928 | return false; |
929 | } |
930 | if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) { |
931 | return true; |
932 | } |
933 | if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) { |
934 | return true; |
935 | } |
936 | SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache." ); |
937 | return false; |
938 | } |
939 | |
940 | #endif |
941 | |