1/*
2 * Copyright 2014 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrResourceCache.h"
9#include <atomic>
10#include "include/gpu/GrDirectContext.h"
11#include "include/private/GrSingleOwner.h"
12#include "include/private/SkTo.h"
13#include "include/utils/SkRandom.h"
14#include "src/core/SkMessageBus.h"
15#include "src/core/SkOpts.h"
16#include "src/core/SkScopeExit.h"
17#include "src/core/SkTSort.h"
18#include "src/gpu/GrCaps.h"
19#include "src/gpu/GrContextPriv.h"
20#include "src/gpu/GrGpuResourceCacheAccess.h"
21#include "src/gpu/GrProxyProvider.h"
22#include "src/gpu/GrTexture.h"
23#include "src/gpu/GrTextureProxyCacheAccess.h"
24#include "src/gpu/GrTracing.h"
25#include "src/gpu/SkGr.h"
26
27DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage);
28
29DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage);
30
31#define ASSERT_SINGLE_OWNER GR_ASSERT_SINGLE_OWNER(fSingleOwner)
32
33//////////////////////////////////////////////////////////////////////////////
34
35GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
36 static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
37
38 int32_t type = nextType++;
39 if (type > SkTo<int32_t>(UINT16_MAX)) {
40 SK_ABORT("Too many Resource Types");
41 }
42
43 return static_cast<ResourceType>(type);
44}
45
46GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
47 static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
48
49 int32_t domain = nextDomain++;
50 if (domain > SkTo<int32_t>(UINT16_MAX)) {
51 SK_ABORT("Too many GrUniqueKey Domains");
52 }
53
54 return static_cast<Domain>(domain);
55}
56
57uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
58 return SkOpts::hash(data, size);
59}
60
61//////////////////////////////////////////////////////////////////////////////
62
63class GrResourceCache::AutoValidate : ::SkNoncopyable {
64public:
65 AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
66 ~AutoValidate() { fCache->validate(); }
67private:
68 GrResourceCache* fCache;
69};
70
71//////////////////////////////////////////////////////////////////////////////
72
73inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref() = default;
74
75inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(GrTexture* texture)
76 : fTexture(texture), fNumUnrefs(1) {}
77
78inline GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref(TextureAwaitingUnref&& that) {
79 fTexture = std::exchange(that.fTexture, nullptr);
80 fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
81}
82
83inline GrResourceCache::TextureAwaitingUnref& GrResourceCache::TextureAwaitingUnref::operator=(
84 TextureAwaitingUnref&& that) {
85 fTexture = std::exchange(that.fTexture, nullptr);
86 fNumUnrefs = std::exchange(that.fNumUnrefs, 0);
87 return *this;
88}
89
90inline GrResourceCache::TextureAwaitingUnref::~TextureAwaitingUnref() {
91 if (fTexture) {
92 for (int i = 0; i < fNumUnrefs; ++i) {
93 fTexture->unref();
94 }
95 }
96}
97
98inline void GrResourceCache::TextureAwaitingUnref::TextureAwaitingUnref::addRef() { ++fNumUnrefs; }
99
100inline void GrResourceCache::TextureAwaitingUnref::unref() {
101 SkASSERT(fNumUnrefs > 0);
102 fTexture->unref();
103 --fNumUnrefs;
104}
105
106inline bool GrResourceCache::TextureAwaitingUnref::finished() { return !fNumUnrefs; }
107
108//////////////////////////////////////////////////////////////////////////////
109
110GrResourceCache::GrResourceCache(const GrCaps* caps, GrSingleOwner* singleOwner,
111 uint32_t contextUniqueID)
112 : fInvalidUniqueKeyInbox(contextUniqueID)
113 , fFreedTextureInbox(contextUniqueID)
114 , fContextUniqueID(contextUniqueID)
115 , fSingleOwner(singleOwner)
116 , fPreferVRAMUseOverFlushes(caps->preferVRAMUseOverFlushes()) {
117 SkASSERT(contextUniqueID != SK_InvalidUniqueID);
118}
119
120GrResourceCache::~GrResourceCache() {
121 this->releaseAll();
122}
123
124void GrResourceCache::setLimit(size_t bytes) {
125 fMaxBytes = bytes;
126 this->purgeAsNeeded();
127}
128
129void GrResourceCache::insertResource(GrGpuResource* resource) {
130 ASSERT_SINGLE_OWNER
131 SkASSERT(resource);
132 SkASSERT(!this->isInCache(resource));
133 SkASSERT(!resource->wasDestroyed());
134 SkASSERT(!resource->resourcePriv().isPurgeable());
135
136 // We must set the timestamp before adding to the array in case the timestamp wraps and we wind
137 // up iterating over all the resources that already have timestamps.
138 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
139
140 this->addToNonpurgeableArray(resource);
141
142 size_t size = resource->gpuMemorySize();
143 SkDEBUGCODE(++fCount;)
144 fBytes += size;
145#if GR_CACHE_STATS
146 fHighWaterCount = std::max(this->getResourceCount(), fHighWaterCount);
147 fHighWaterBytes = std::max(fBytes, fHighWaterBytes);
148#endif
149 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
150 ++fBudgetedCount;
151 fBudgetedBytes += size;
152 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
153 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
154#if GR_CACHE_STATS
155 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
156 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
157#endif
158 }
159 if (resource->resourcePriv().getScratchKey().isValid() &&
160 !resource->getUniqueKey().isValid()) {
161 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
162 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
163 }
164
165 this->purgeAsNeeded();
166}
167
168void GrResourceCache::removeResource(GrGpuResource* resource) {
169 ASSERT_SINGLE_OWNER
170 this->validate();
171 SkASSERT(this->isInCache(resource));
172
173 size_t size = resource->gpuMemorySize();
174 if (resource->resourcePriv().isPurgeable()) {
175 fPurgeableQueue.remove(resource);
176 fPurgeableBytes -= size;
177 } else {
178 this->removeFromNonpurgeableArray(resource);
179 }
180
181 SkDEBUGCODE(--fCount;)
182 fBytes -= size;
183 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
184 --fBudgetedCount;
185 fBudgetedBytes -= size;
186 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
187 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
188 }
189
190 if (resource->resourcePriv().getScratchKey().isValid() &&
191 !resource->getUniqueKey().isValid()) {
192 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
193 }
194 if (resource->getUniqueKey().isValid()) {
195 fUniqueHash.remove(resource->getUniqueKey());
196 }
197 this->validate();
198}
199
200void GrResourceCache::abandonAll() {
201 AutoValidate av(this);
202
203 // We need to make sure to free any resources that were waiting on a free message but never
204 // received one.
205 fTexturesAwaitingUnref.reset();
206
207 while (fNonpurgeableResources.count()) {
208 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
209 SkASSERT(!back->wasDestroyed());
210 back->cacheAccess().abandon();
211 }
212
213 while (fPurgeableQueue.count()) {
214 GrGpuResource* top = fPurgeableQueue.peek();
215 SkASSERT(!top->wasDestroyed());
216 top->cacheAccess().abandon();
217 }
218
219 SkASSERT(!fScratchMap.count());
220 SkASSERT(!fUniqueHash.count());
221 SkASSERT(!fCount);
222 SkASSERT(!this->getResourceCount());
223 SkASSERT(!fBytes);
224 SkASSERT(!fBudgetedCount);
225 SkASSERT(!fBudgetedBytes);
226 SkASSERT(!fPurgeableBytes);
227 SkASSERT(!fTexturesAwaitingUnref.count());
228}
229
230void GrResourceCache::releaseAll() {
231 AutoValidate av(this);
232
233 this->processFreedGpuResources();
234
235 // We need to make sure to free any resources that were waiting on a free message but never
236 // received one.
237 fTexturesAwaitingUnref.reset();
238
239 SkASSERT(fProxyProvider); // better have called setProxyProvider
240 // We must remove the uniqueKeys from the proxies here. While they possess a uniqueKey
241 // they also have a raw pointer back to this class (which is presumably going away)!
242 fProxyProvider->removeAllUniqueKeys();
243
244 while (fNonpurgeableResources.count()) {
245 GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
246 SkASSERT(!back->wasDestroyed());
247 back->cacheAccess().release();
248 }
249
250 while (fPurgeableQueue.count()) {
251 GrGpuResource* top = fPurgeableQueue.peek();
252 SkASSERT(!top->wasDestroyed());
253 top->cacheAccess().release();
254 }
255
256 SkASSERT(!fScratchMap.count());
257 SkASSERT(!fUniqueHash.count());
258 SkASSERT(!fCount);
259 SkASSERT(!this->getResourceCount());
260 SkASSERT(!fBytes);
261 SkASSERT(!fBudgetedCount);
262 SkASSERT(!fBudgetedBytes);
263 SkASSERT(!fPurgeableBytes);
264 SkASSERT(!fTexturesAwaitingUnref.count());
265}
266
267void GrResourceCache::refResource(GrGpuResource* resource) {
268 SkASSERT(resource);
269 SkASSERT(resource->getContext()->priv().getResourceCache() == this);
270 if (resource->cacheAccess().hasRef()) {
271 resource->ref();
272 } else {
273 this->refAndMakeResourceMRU(resource);
274 }
275 this->validate();
276}
277
278class GrResourceCache::AvailableForScratchUse {
279public:
280 AvailableForScratchUse() { }
281
282 bool operator()(const GrGpuResource* resource) const {
283 SkASSERT(!resource->getUniqueKey().isValid() &&
284 resource->resourcePriv().getScratchKey().isValid());
285
286 // isScratch() also tests that the resource is budgeted.
287 if (resource->internalHasRef() || !resource->cacheAccess().isScratch()) {
288 return false;
289 }
290 return true;
291 }
292};
293
294GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
295 SkASSERT(scratchKey.isValid());
296
297 GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
298 if (resource) {
299 this->refAndMakeResourceMRU(resource);
300 this->validate();
301 }
302 return resource;
303}
304
305void GrResourceCache::willRemoveScratchKey(const GrGpuResource* resource) {
306 ASSERT_SINGLE_OWNER
307 SkASSERT(resource->resourcePriv().getScratchKey().isValid());
308 if (!resource->getUniqueKey().isValid()) {
309 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
310 }
311}
312
313void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
314 ASSERT_SINGLE_OWNER
315 // Someone has a ref to this resource in order to have removed the key. When the ref count
316 // reaches zero we will get a ref cnt notification and figure out what to do with it.
317 if (resource->getUniqueKey().isValid()) {
318 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
319 fUniqueHash.remove(resource->getUniqueKey());
320 }
321 resource->cacheAccess().removeUniqueKey();
322 if (resource->resourcePriv().getScratchKey().isValid()) {
323 fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
324 }
325
326 // Removing a unique key from a kUnbudgetedCacheable resource would make the resource
327 // require purging. However, the resource must be ref'ed to get here and therefore can't
328 // be purgeable. We'll purge it when the refs reach zero.
329 SkASSERT(!resource->resourcePriv().isPurgeable());
330 this->validate();
331}
332
333void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
334 ASSERT_SINGLE_OWNER
335 SkASSERT(resource);
336 SkASSERT(this->isInCache(resource));
337
338 // If another resource has the new key, remove its key then install the key on this resource.
339 if (newKey.isValid()) {
340 if (GrGpuResource* old = fUniqueHash.find(newKey)) {
341 // If the old resource using the key is purgeable and is unreachable, then remove it.
342 if (!old->resourcePriv().getScratchKey().isValid() &&
343 old->resourcePriv().isPurgeable()) {
344 old->cacheAccess().release();
345 } else {
346 // removeUniqueKey expects an external owner of the resource.
347 this->removeUniqueKey(sk_ref_sp(old).get());
348 }
349 }
350 SkASSERT(nullptr == fUniqueHash.find(newKey));
351
352 // Remove the entry for this resource if it already has a unique key.
353 if (resource->getUniqueKey().isValid()) {
354 SkASSERT(resource == fUniqueHash.find(resource->getUniqueKey()));
355 fUniqueHash.remove(resource->getUniqueKey());
356 SkASSERT(nullptr == fUniqueHash.find(resource->getUniqueKey()));
357 } else {
358 // 'resource' didn't have a valid unique key before so it is switching sides. Remove it
359 // from the ScratchMap
360 if (resource->resourcePriv().getScratchKey().isValid()) {
361 fScratchMap.remove(resource->resourcePriv().getScratchKey(), resource);
362 }
363 }
364
365 resource->cacheAccess().setUniqueKey(newKey);
366 fUniqueHash.add(resource);
367 } else {
368 this->removeUniqueKey(resource);
369 }
370
371 this->validate();
372}
373
374void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
375 ASSERT_SINGLE_OWNER
376 SkASSERT(resource);
377 SkASSERT(this->isInCache(resource));
378
379 if (resource->resourcePriv().isPurgeable()) {
380 // It's about to become unpurgeable.
381 fPurgeableBytes -= resource->gpuMemorySize();
382 fPurgeableQueue.remove(resource);
383 this->addToNonpurgeableArray(resource);
384 } else if (!resource->cacheAccess().hasRef() &&
385 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
386 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
387 fNumBudgetedResourcesFlushWillMakePurgeable--;
388 }
389 resource->cacheAccess().ref();
390
391 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
392 this->validate();
393}
394
395void GrResourceCache::notifyRefCntReachedZero(GrGpuResource* resource) {
396 ASSERT_SINGLE_OWNER
397 SkASSERT(resource);
398 SkASSERT(!resource->wasDestroyed());
399 SkASSERT(this->isInCache(resource));
400 // This resource should always be in the nonpurgeable array when this function is called. It
401 // will be moved to the queue if it is newly purgeable.
402 SkASSERT(fNonpurgeableResources[*resource->cacheAccess().accessCacheIndex()] == resource);
403
404#ifdef SK_DEBUG
405 // When the timestamp overflows validate() is called. validate() checks that resources in
406 // the nonpurgeable array are indeed not purgeable. However, the movement from the array to
407 // the purgeable queue happens just below in this function. So we mark it as an exception.
408 if (resource->resourcePriv().isPurgeable()) {
409 fNewlyPurgeableResourceForValidation = resource;
410 }
411#endif
412 resource->cacheAccess().setTimestamp(this->getNextTimestamp());
413 SkDEBUGCODE(fNewlyPurgeableResourceForValidation = nullptr);
414
415 if (!resource->resourcePriv().isPurgeable() &&
416 resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
417 ++fNumBudgetedResourcesFlushWillMakePurgeable;
418 }
419
420 if (!resource->resourcePriv().isPurgeable()) {
421 this->validate();
422 return;
423 }
424
425 this->removeFromNonpurgeableArray(resource);
426 fPurgeableQueue.insert(resource);
427 resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
428 fPurgeableBytes += resource->gpuMemorySize();
429
430 bool hasUniqueKey = resource->getUniqueKey().isValid();
431
432 GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
433
434 if (budgetedType == GrBudgetedType::kBudgeted) {
435 // Purge the resource immediately if we're over budget
436 // Also purge if the resource has neither a valid scratch key nor a unique key.
437 bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
438 if (!this->overBudget() && hasKey) {
439 return;
440 }
441 } else {
442 // We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
443 // they can be reused again by the image connected to the unique key.
444 if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
445 return;
446 }
447 // Check whether this resource could still be used as a scratch resource.
448 if (!resource->resourcePriv().refsWrappedObjects() &&
449 resource->resourcePriv().getScratchKey().isValid()) {
450 // We won't purge an existing resource to make room for this one.
451 if (this->wouldFit(resource->gpuMemorySize())) {
452 resource->resourcePriv().makeBudgeted();
453 return;
454 }
455 }
456 }
457
458 SkDEBUGCODE(int beforeCount = this->getResourceCount();)
459 resource->cacheAccess().release();
460 // We should at least free this resource, perhaps dependent resources as well.
461 SkASSERT(this->getResourceCount() < beforeCount);
462 this->validate();
463}
464
465void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
466 ASSERT_SINGLE_OWNER
467 SkASSERT(resource);
468 SkASSERT(this->isInCache(resource));
469
470 size_t size = resource->gpuMemorySize();
471 // Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
472 // resource become purgeable. However, we should never allow that transition. Wrapped
473 // resources are the only resources that can be in that state and they aren't allowed to
474 // transition from one budgeted state to another.
475 SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
476 if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
477 ++fBudgetedCount;
478 fBudgetedBytes += size;
479#if GR_CACHE_STATS
480 fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
481 fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
482#endif
483 if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
484 ++fNumBudgetedResourcesFlushWillMakePurgeable;
485 }
486 this->purgeAsNeeded();
487 } else {
488 SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
489 --fBudgetedCount;
490 fBudgetedBytes -= size;
491 if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
492 --fNumBudgetedResourcesFlushWillMakePurgeable;
493 }
494 }
495 SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
496 TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
497 fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
498
499 this->validate();
500}
501
502void GrResourceCache::purgeAsNeeded() {
503 SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
504 fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
505 if (invalidKeyMsgs.count()) {
506 SkASSERT(fProxyProvider);
507
508 for (int i = 0; i < invalidKeyMsgs.count(); ++i) {
509 fProxyProvider->processInvalidUniqueKey(invalidKeyMsgs[i].key(), nullptr,
510 GrProxyProvider::InvalidateGPUResource::kYes);
511 SkASSERT(!this->findAndRefUniqueResource(invalidKeyMsgs[i].key()));
512 }
513 }
514
515 this->processFreedGpuResources();
516
517 bool stillOverbudget = this->overBudget();
518 while (stillOverbudget && fPurgeableQueue.count()) {
519 GrGpuResource* resource = fPurgeableQueue.peek();
520 SkASSERT(resource->resourcePriv().isPurgeable());
521 resource->cacheAccess().release();
522 stillOverbudget = this->overBudget();
523 }
524
525 this->validate();
526}
527
528void GrResourceCache::purgeUnlockedResources(bool scratchResourcesOnly) {
529 if (!scratchResourcesOnly) {
530 // We could disable maintaining the heap property here, but it would add a lot of
531 // complexity. Moreover, this is rarely called.
532 while (fPurgeableQueue.count()) {
533 GrGpuResource* resource = fPurgeableQueue.peek();
534 SkASSERT(resource->resourcePriv().isPurgeable());
535 resource->cacheAccess().release();
536 }
537 } else {
538 // Sort the queue
539 fPurgeableQueue.sort();
540
541 // Make a list of the scratch resources to delete
542 SkTDArray<GrGpuResource*> scratchResources;
543 for (int i = 0; i < fPurgeableQueue.count(); i++) {
544 GrGpuResource* resource = fPurgeableQueue.at(i);
545 SkASSERT(resource->resourcePriv().isPurgeable());
546 if (!resource->getUniqueKey().isValid()) {
547 *scratchResources.append() = resource;
548 }
549 }
550
551 // Delete the scratch resources. This must be done as a separate pass
552 // to avoid messing up the sorted order of the queue
553 for (int i = 0; i < scratchResources.count(); i++) {
554 scratchResources.getAt(i)->cacheAccess().release();
555 }
556 }
557
558 this->validate();
559}
560
561void GrResourceCache::purgeResourcesNotUsedSince(GrStdSteadyClock::time_point purgeTime) {
562 while (fPurgeableQueue.count()) {
563 const GrStdSteadyClock::time_point resourceTime =
564 fPurgeableQueue.peek()->cacheAccess().timeWhenResourceBecamePurgeable();
565 if (resourceTime >= purgeTime) {
566 // Resources were given both LRU timestamps and tagged with a frame number when
567 // they first became purgeable. The LRU timestamp won't change again until the
568 // resource is made non-purgeable again. So, at this point all the remaining
569 // resources in the timestamp-sorted queue will have a frame number >= to this
570 // one.
571 break;
572 }
573 GrGpuResource* resource = fPurgeableQueue.peek();
574 SkASSERT(resource->resourcePriv().isPurgeable());
575 resource->cacheAccess().release();
576 }
577}
578
579void GrResourceCache::purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources) {
580
581 const size_t tmpByteBudget = std::max((size_t)0, fBytes - bytesToPurge);
582 bool stillOverbudget = tmpByteBudget < fBytes;
583
584 if (preferScratchResources && bytesToPurge < fPurgeableBytes) {
585 // Sort the queue
586 fPurgeableQueue.sort();
587
588 // Make a list of the scratch resources to delete
589 SkTDArray<GrGpuResource*> scratchResources;
590 size_t scratchByteCount = 0;
591 for (int i = 0; i < fPurgeableQueue.count() && stillOverbudget; i++) {
592 GrGpuResource* resource = fPurgeableQueue.at(i);
593 SkASSERT(resource->resourcePriv().isPurgeable());
594 if (!resource->getUniqueKey().isValid()) {
595 *scratchResources.append() = resource;
596 scratchByteCount += resource->gpuMemorySize();
597 stillOverbudget = tmpByteBudget < fBytes - scratchByteCount;
598 }
599 }
600
601 // Delete the scratch resources. This must be done as a separate pass
602 // to avoid messing up the sorted order of the queue
603 for (int i = 0; i < scratchResources.count(); i++) {
604 scratchResources.getAt(i)->cacheAccess().release();
605 }
606 stillOverbudget = tmpByteBudget < fBytes;
607
608 this->validate();
609 }
610
611 // Purge any remaining resources in LRU order
612 if (stillOverbudget) {
613 const size_t cachedByteCount = fMaxBytes;
614 fMaxBytes = tmpByteBudget;
615 this->purgeAsNeeded();
616 fMaxBytes = cachedByteCount;
617 }
618}
619bool GrResourceCache::requestsFlush() const {
620 return this->overBudget() && !fPurgeableQueue.count() &&
621 fNumBudgetedResourcesFlushWillMakePurgeable > 0;
622}
623
624
625void GrResourceCache::insertDelayedTextureUnref(GrTexture* texture) {
626 texture->ref();
627 uint32_t id = texture->uniqueID().asUInt();
628 if (auto* data = fTexturesAwaitingUnref.find(id)) {
629 data->addRef();
630 } else {
631 fTexturesAwaitingUnref.set(id, {texture});
632 }
633}
634
635void GrResourceCache::processFreedGpuResources() {
636 if (!fTexturesAwaitingUnref.count()) {
637 return;
638 }
639
640 SkTArray<GrTextureFreedMessage> msgs;
641 fFreedTextureInbox.poll(&msgs);
642 for (int i = 0; i < msgs.count(); ++i) {
643 SkASSERT(msgs[i].fOwningUniqueID == fContextUniqueID);
644 uint32_t id = msgs[i].fTexture->uniqueID().asUInt();
645 TextureAwaitingUnref* info = fTexturesAwaitingUnref.find(id);
646 // If the GrContext was released or abandoned then fTexturesAwaitingUnref should have been
647 // empty and we would have returned early above. Thus, any texture from a message should be
648 // in the list of fTexturesAwaitingUnref.
649 SkASSERT(info);
650 info->unref();
651 if (info->finished()) {
652 fTexturesAwaitingUnref.remove(id);
653 }
654 }
655}
656
657void GrResourceCache::addToNonpurgeableArray(GrGpuResource* resource) {
658 int index = fNonpurgeableResources.count();
659 *fNonpurgeableResources.append() = resource;
660 *resource->cacheAccess().accessCacheIndex() = index;
661}
662
663void GrResourceCache::removeFromNonpurgeableArray(GrGpuResource* resource) {
664 int* index = resource->cacheAccess().accessCacheIndex();
665 // Fill the whole we will create in the array with the tail object, adjust its index, and
666 // then pop the array
667 GrGpuResource* tail = *(fNonpurgeableResources.end() - 1);
668 SkASSERT(fNonpurgeableResources[*index] == resource);
669 fNonpurgeableResources[*index] = tail;
670 *tail->cacheAccess().accessCacheIndex() = *index;
671 fNonpurgeableResources.pop();
672 SkDEBUGCODE(*index = -1);
673}
674
675uint32_t GrResourceCache::getNextTimestamp() {
676 // If we wrap then all the existing resources will appear older than any resources that get
677 // a timestamp after the wrap.
678 if (0 == fTimestamp) {
679 int count = this->getResourceCount();
680 if (count) {
681 // Reset all the timestamps. We sort the resources by timestamp and then assign
682 // sequential timestamps beginning with 0. This is O(n*lg(n)) but it should be extremely
683 // rare.
684 SkTDArray<GrGpuResource*> sortedPurgeableResources;
685 sortedPurgeableResources.setReserve(fPurgeableQueue.count());
686
687 while (fPurgeableQueue.count()) {
688 *sortedPurgeableResources.append() = fPurgeableQueue.peek();
689 fPurgeableQueue.pop();
690 }
691
692 SkTQSort(fNonpurgeableResources.begin(), fNonpurgeableResources.end(),
693 CompareTimestamp);
694
695 // Pick resources out of the purgeable and non-purgeable arrays based on lowest
696 // timestamp and assign new timestamps.
697 int currP = 0;
698 int currNP = 0;
699 while (currP < sortedPurgeableResources.count() &&
700 currNP < fNonpurgeableResources.count()) {
701 uint32_t tsP = sortedPurgeableResources[currP]->cacheAccess().timestamp();
702 uint32_t tsNP = fNonpurgeableResources[currNP]->cacheAccess().timestamp();
703 SkASSERT(tsP != tsNP);
704 if (tsP < tsNP) {
705 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
706 } else {
707 // Correct the index in the nonpurgeable array stored on the resource post-sort.
708 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
709 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
710 }
711 }
712
713 // The above loop ended when we hit the end of one array. Finish the other one.
714 while (currP < sortedPurgeableResources.count()) {
715 sortedPurgeableResources[currP++]->cacheAccess().setTimestamp(fTimestamp++);
716 }
717 while (currNP < fNonpurgeableResources.count()) {
718 *fNonpurgeableResources[currNP]->cacheAccess().accessCacheIndex() = currNP;
719 fNonpurgeableResources[currNP++]->cacheAccess().setTimestamp(fTimestamp++);
720 }
721
722 // Rebuild the queue.
723 for (int i = 0; i < sortedPurgeableResources.count(); ++i) {
724 fPurgeableQueue.insert(sortedPurgeableResources[i]);
725 }
726
727 this->validate();
728 SkASSERT(count == this->getResourceCount());
729
730 // count should be the next timestamp we return.
731 SkASSERT(fTimestamp == SkToU32(count));
732 }
733 }
734 return fTimestamp++;
735}
736
737void GrResourceCache::dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const {
738 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
739 fNonpurgeableResources[i]->dumpMemoryStatistics(traceMemoryDump);
740 }
741 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
742 fPurgeableQueue.at(i)->dumpMemoryStatistics(traceMemoryDump);
743 }
744}
745
746#if GR_CACHE_STATS
747void GrResourceCache::getStats(Stats* stats) const {
748 stats->reset();
749
750 stats->fTotal = this->getResourceCount();
751 stats->fNumNonPurgeable = fNonpurgeableResources.count();
752 stats->fNumPurgeable = fPurgeableQueue.count();
753
754 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
755 stats->update(fNonpurgeableResources[i]);
756 }
757 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
758 stats->update(fPurgeableQueue.at(i));
759 }
760}
761
762#if GR_TEST_UTILS
763void GrResourceCache::dumpStats(SkString* out) const {
764 this->validate();
765
766 Stats stats;
767
768 this->getStats(&stats);
769
770 float byteUtilization = (100.f * fBudgetedBytes) / fMaxBytes;
771
772 out->appendf("Budget: %d bytes\n", (int)fMaxBytes);
773 out->appendf("\t\tEntry Count: current %d"
774 " (%d budgeted, %d wrapped, %d locked, %d scratch), high %d\n",
775 stats.fTotal, fBudgetedCount, stats.fWrapped, stats.fNumNonPurgeable,
776 stats.fScratch, fHighWaterCount);
777 out->appendf("\t\tEntry Bytes: current %d (budgeted %d, %.2g%% full, %d unbudgeted) high %d\n",
778 SkToInt(fBytes), SkToInt(fBudgetedBytes), byteUtilization,
779 SkToInt(stats.fUnbudgetedSize), SkToInt(fHighWaterBytes));
780}
781
782void GrResourceCache::dumpStatsKeyValuePairs(SkTArray<SkString>* keys,
783 SkTArray<double>* values) const {
784 this->validate();
785
786 Stats stats;
787 this->getStats(&stats);
788
789 keys->push_back(SkString("gpu_cache_purgable_entries")); values->push_back(stats.fNumPurgeable);
790}
791#endif
792
793#endif
794
795#ifdef SK_DEBUG
796void GrResourceCache::validate() const {
797 // Reduce the frequency of validations for large resource counts.
798 static SkRandom gRandom;
799 int mask = (SkNextPow2(fCount + 1) >> 5) - 1;
800 if (~mask && (gRandom.nextU() & mask)) {
801 return;
802 }
803
804 struct Stats {
805 size_t fBytes;
806 int fBudgetedCount;
807 size_t fBudgetedBytes;
808 int fLocked;
809 int fScratch;
810 int fCouldBeScratch;
811 int fContent;
812 const ScratchMap* fScratchMap;
813 const UniqueHash* fUniqueHash;
814
815 Stats(const GrResourceCache* cache) {
816 memset(this, 0, sizeof(*this));
817 fScratchMap = &cache->fScratchMap;
818 fUniqueHash = &cache->fUniqueHash;
819 }
820
821 void update(GrGpuResource* resource) {
822 fBytes += resource->gpuMemorySize();
823
824 if (!resource->resourcePriv().isPurgeable()) {
825 ++fLocked;
826 }
827
828 const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
829 const GrUniqueKey& uniqueKey = resource->getUniqueKey();
830
831 if (resource->cacheAccess().isScratch()) {
832 SkASSERT(!uniqueKey.isValid());
833 ++fScratch;
834 SkASSERT(fScratchMap->countForKey(scratchKey));
835 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
836 } else if (scratchKey.isValid()) {
837 SkASSERT(GrBudgetedType::kBudgeted != resource->resourcePriv().budgetedType() ||
838 uniqueKey.isValid());
839 if (!uniqueKey.isValid()) {
840 ++fCouldBeScratch;
841 SkASSERT(fScratchMap->countForKey(scratchKey));
842 }
843 SkASSERT(!resource->resourcePriv().refsWrappedObjects());
844 }
845 if (uniqueKey.isValid()) {
846 ++fContent;
847 SkASSERT(fUniqueHash->find(uniqueKey) == resource);
848 SkASSERT(GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType() ||
849 resource->resourcePriv().refsWrappedObjects());
850
851 if (scratchKey.isValid()) {
852 SkASSERT(!fScratchMap->has(resource, scratchKey));
853 }
854 }
855
856 if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
857 ++fBudgetedCount;
858 fBudgetedBytes += resource->gpuMemorySize();
859 }
860 }
861 };
862
863 {
864 int count = 0;
865 fScratchMap.foreach([&](const GrGpuResource& resource) {
866 SkASSERT(resource.resourcePriv().getScratchKey().isValid());
867 SkASSERT(!resource.getUniqueKey().isValid());
868 count++;
869 });
870 SkASSERT(count == fScratchMap.count());
871 }
872
873 Stats stats(this);
874 size_t purgeableBytes = 0;
875 int numBudgetedResourcesFlushWillMakePurgeable = 0;
876
877 for (int i = 0; i < fNonpurgeableResources.count(); ++i) {
878 SkASSERT(!fNonpurgeableResources[i]->resourcePriv().isPurgeable() ||
879 fNewlyPurgeableResourceForValidation == fNonpurgeableResources[i]);
880 SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
881 SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
882 if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
883 !fNonpurgeableResources[i]->cacheAccess().hasRef() &&
884 fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
885 ++numBudgetedResourcesFlushWillMakePurgeable;
886 }
887 stats.update(fNonpurgeableResources[i]);
888 }
889 for (int i = 0; i < fPurgeableQueue.count(); ++i) {
890 SkASSERT(fPurgeableQueue.at(i)->resourcePriv().isPurgeable());
891 SkASSERT(*fPurgeableQueue.at(i)->cacheAccess().accessCacheIndex() == i);
892 SkASSERT(!fPurgeableQueue.at(i)->wasDestroyed());
893 stats.update(fPurgeableQueue.at(i));
894 purgeableBytes += fPurgeableQueue.at(i)->gpuMemorySize();
895 }
896
897 SkASSERT(fCount == this->getResourceCount());
898 SkASSERT(fBudgetedCount <= fCount);
899 SkASSERT(fBudgetedBytes <= fBytes);
900 SkASSERT(stats.fBytes == fBytes);
901 SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable ==
902 numBudgetedResourcesFlushWillMakePurgeable);
903 SkASSERT(stats.fBudgetedBytes == fBudgetedBytes);
904 SkASSERT(stats.fBudgetedCount == fBudgetedCount);
905 SkASSERT(purgeableBytes == fPurgeableBytes);
906#if GR_CACHE_STATS
907 SkASSERT(fBudgetedHighWaterCount <= fHighWaterCount);
908 SkASSERT(fBudgetedHighWaterBytes <= fHighWaterBytes);
909 SkASSERT(fBytes <= fHighWaterBytes);
910 SkASSERT(fCount <= fHighWaterCount);
911 SkASSERT(fBudgetedBytes <= fBudgetedHighWaterBytes);
912 SkASSERT(fBudgetedCount <= fBudgetedHighWaterCount);
913#endif
914 SkASSERT(stats.fContent == fUniqueHash.count());
915 SkASSERT(stats.fScratch + stats.fCouldBeScratch == fScratchMap.count());
916
917 // This assertion is not currently valid because we can be in recursive notifyCntReachedZero()
918 // calls. This will be fixed when subresource registration is explicit.
919 // bool overBudget = budgetedBytes > fMaxBytes || budgetedCount > fMaxCount;
920 // SkASSERT(!overBudget || locked == count || fPurging);
921}
922
923bool GrResourceCache::isInCache(const GrGpuResource* resource) const {
924 int index = *resource->cacheAccess().accessCacheIndex();
925 if (index < 0) {
926 return false;
927 }
928 if (index < fPurgeableQueue.count() && fPurgeableQueue.at(index) == resource) {
929 return true;
930 }
931 if (index < fNonpurgeableResources.count() && fNonpurgeableResources[index] == resource) {
932 return true;
933 }
934 SkDEBUGFAIL("Resource index should be -1 or the resource should be in the cache.");
935 return false;
936}
937
938#endif
939