1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/ccpr/GrCCPathCache.h"
9
10#include "include/private/SkNx.h"
11#include "src/gpu/GrOnFlushResourceProvider.h"
12#include "src/gpu/GrProxyProvider.h"
13
14static constexpr int kMaxKeyDataCountU32 = 256; // 1kB of uint32_t's.
15
16DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp<GrCCPathCache::Key>);
17
18static inline uint32_t next_path_cache_id() {
19 static std::atomic<uint32_t> gNextID(1);
20 for (;;) {
21 uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire);
22 if (SK_InvalidUniqueID != id) {
23 return id;
24 }
25 }
26}
27
28static inline bool SkShouldPostMessageToBus(
29 const sk_sp<GrCCPathCache::Key>& key, uint32_t msgBusUniqueID) {
30 return key->pathCacheUniqueID() == msgBusUniqueID;
31}
32
33// The maximum number of cache entries we allow in our own cache.
34static constexpr int kMaxCacheCount = 1 << 16;
35
36
37GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift)
38 : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} {
39 SkASSERT(!m.hasPerspective());
40 Sk2f translate = Sk2f(m.getTranslateX(), m.getTranslateY());
41 Sk2f transFloor;
42#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
43 // On Android framework we pre-round view matrix translates to integers for better caching.
44 transFloor = translate;
45#else
46 transFloor = translate.floor();
47 (translate - transFloor).store(fSubpixelTranslate);
48#endif
49 shift->set((int)transFloor[0], (int)transFloor[1]);
50 SkASSERT((float)shift->fX == transFloor[0]); // Make sure transFloor had integer values.
51 SkASSERT((float)shift->fY == transFloor[1]);
52}
53
54inline static bool fuzzy_equals(const GrCCPathCache::MaskTransform& a,
55 const GrCCPathCache::MaskTransform& b) {
56 if ((Sk4f::Load(a.fMatrix2x2) != Sk4f::Load(b.fMatrix2x2)).anyTrue()) {
57 return false;
58 }
59#ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK
60 if (((Sk2f::Load(a.fSubpixelTranslate) -
61 Sk2f::Load(b.fSubpixelTranslate)).abs() > 1.f/256).anyTrue()) {
62 return false;
63 }
64#endif
65 return true;
66}
67
68sk_sp<GrCCPathCache::Key> GrCCPathCache::Key::Make(uint32_t pathCacheUniqueID,
69 int dataCountU32, const void* data) {
70 void* memory = ::operator new (sizeof(Key) + dataCountU32 * sizeof(uint32_t));
71 sk_sp<GrCCPathCache::Key> key(new (memory) Key(pathCacheUniqueID, dataCountU32));
72 if (data) {
73 memcpy(key->data(), data, key->dataSizeInBytes());
74 }
75 return key;
76}
77
78void GrCCPathCache::Key::operator delete(void* p) { ::operator delete(p); }
79
80const uint32_t* GrCCPathCache::Key::data() const {
81 // The shape key is a variable-length footer to the entry allocation.
82 return reinterpret_cast<const uint32_t*>(reinterpret_cast<const char*>(this) + sizeof(Key));
83}
84
85uint32_t* GrCCPathCache::Key::data() {
86 // The shape key is a variable-length footer to the entry allocation.
87 return reinterpret_cast<uint32_t*>(reinterpret_cast<char*>(this) + sizeof(Key));
88}
89
90void GrCCPathCache::Key::changed() {
91 // Our key's corresponding path was invalidated. Post a thread-safe eviction message.
92 SkMessageBus<sk_sp<Key>>::Post(sk_ref_sp(this));
93}
94
95GrCCPathCache::GrCCPathCache(uint32_t contextUniqueID)
96 : fContextUniqueID(contextUniqueID)
97 , fInvalidatedKeysInbox(next_path_cache_id())
98 , fScratchKey(Key::Make(fInvalidatedKeysInbox.uniqueID(), kMaxKeyDataCountU32)) {
99}
100
101GrCCPathCache::~GrCCPathCache() {
102 while (!fLRU.isEmpty()) {
103 this->evict(*fLRU.tail()->fCacheKey, fLRU.tail());
104 }
105 SkASSERT(0 == fHashTable.count()); // Ensure the hash table and LRU list were coherent.
106
107 // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
108 // We just purge via message bus since we don't have any access to the resource cache right now.
109 for (const sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
110 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
111 GrUniqueKeyInvalidatedMessage(proxy->getUniqueKey(), fContextUniqueID));
112 }
113 for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
114 SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(
115 GrUniqueKeyInvalidatedMessage(key, fContextUniqueID));
116 }
117}
118
119namespace {
120
121// Produces a key that accounts both for a shape's path geometry, as well as any stroke/style.
122class WriteKeyHelper {
123public:
124 static constexpr int kStrokeWidthIdx = 0;
125 static constexpr int kStrokeMiterIdx = 1;
126 static constexpr int kStrokeCapJoinIdx = 2;
127 static constexpr int kShapeUnstyledKeyIdx = 3;
128
129 WriteKeyHelper(const GrStyledShape& shape) : fShapeUnstyledKeyCount(shape.unstyledKeySize()) {}
130
131 // Returns the total number of uint32_t's to allocate for the key.
132 int allocCountU32() const { return kShapeUnstyledKeyIdx + fShapeUnstyledKeyCount; }
133
134 // Writes the key data to out[].
135 void write(const GrStyledShape& shape, uint32_t* out) {
136 // Stroke key.
137 // We don't use GrStyle::WriteKey() because it does not account for hairlines.
138 // http://skbug.com/8273
139 SkASSERT(!shape.style().hasPathEffect());
140 const SkStrokeRec& stroke = shape.style().strokeRec();
141 if (stroke.isFillStyle()) {
142 // Use a value for width that won't collide with a valid fp32 value >= 0.
143 out[kStrokeWidthIdx] = ~0;
144 out[kStrokeMiterIdx] = out[kStrokeCapJoinIdx] = 0;
145 } else {
146 float width = stroke.getWidth(), miterLimit = stroke.getMiter();
147 memcpy(&out[kStrokeWidthIdx], &width, sizeof(float));
148 memcpy(&out[kStrokeMiterIdx], &miterLimit, sizeof(float));
149 out[kStrokeCapJoinIdx] = (stroke.getCap() << 16) | stroke.getJoin();
150 static_assert(sizeof(out[kStrokeWidthIdx]) == sizeof(float));
151 }
152
153 // Shape unstyled key.
154 shape.writeUnstyledKey(&out[kShapeUnstyledKeyIdx]);
155 }
156
157private:
158 int fShapeUnstyledKeyCount;
159};
160
161} // namespace
162
163GrCCPathCache::OnFlushEntryRef GrCCPathCache::find(
164 GrOnFlushResourceProvider* onFlushRP, const GrStyledShape& shape,
165 const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, SkIVector* maskShift) {
166 if (!shape.hasUnstyledKey()) {
167 return OnFlushEntryRef();
168 }
169
170 WriteKeyHelper writeKeyHelper(shape);
171 if (writeKeyHelper.allocCountU32() > kMaxKeyDataCountU32) {
172 return OnFlushEntryRef();
173 }
174
175 SkASSERT(fScratchKey->unique());
176 fScratchKey->resetDataCountU32(writeKeyHelper.allocCountU32());
177 writeKeyHelper.write(shape, fScratchKey->data());
178
179 MaskTransform m(viewMatrix, maskShift);
180 GrCCPathCacheEntry* entry = nullptr;
181 if (HashNode* node = fHashTable.find(*fScratchKey)) {
182 entry = node->entry();
183 SkASSERT(fLRU.isInList(entry));
184
185 if (!fuzzy_equals(m, entry->fMaskTransform)) {
186 // The path was reused with an incompatible matrix.
187 if (entry->unique()) {
188 // This entry is unique: recycle it instead of deleting and malloc-ing a new one.
189 SkASSERT(0 == entry->fOnFlushRefCnt); // Because we are unique.
190 entry->fMaskTransform = m;
191 entry->fHitCount = 0;
192 entry->fHitRect = SkIRect::MakeEmpty();
193 entry->releaseCachedAtlas(this);
194 } else {
195 this->evict(*fScratchKey);
196 entry = nullptr;
197 }
198 }
199 }
200
201 if (!entry) {
202 if (fHashTable.count() >= kMaxCacheCount) {
203 SkDEBUGCODE(HashNode* node = fHashTable.find(*fLRU.tail()->fCacheKey));
204 SkASSERT(node && node->entry() == fLRU.tail());
205 this->evict(*fLRU.tail()->fCacheKey); // We've exceeded our limit.
206 }
207
208 // Create a new entry in the cache.
209 sk_sp<Key> permanentKey = Key::Make(fInvalidatedKeysInbox.uniqueID(),
210 writeKeyHelper.allocCountU32(), fScratchKey->data());
211 SkASSERT(*permanentKey == *fScratchKey);
212 SkASSERT(!fHashTable.find(*permanentKey));
213 entry = fHashTable.set(HashNode(this, std::move(permanentKey), m, shape))->entry();
214
215 SkASSERT(fHashTable.count() <= kMaxCacheCount);
216 } else {
217 fLRU.remove(entry); // Will be re-added at head.
218 }
219
220 SkDEBUGCODE(HashNode* node = fHashTable.find(*fScratchKey));
221 SkASSERT(node && node->entry() == entry);
222 fLRU.addToHead(entry);
223
224 if (0 == entry->fOnFlushRefCnt) {
225 // Only update the time stamp and hit count if we haven't seen this entry yet during the
226 // current flush.
227 entry->fTimestamp = this->quickPerFlushTimestamp();
228 ++entry->fHitCount;
229
230 if (entry->fCachedAtlas) {
231 SkASSERT(SkToBool(entry->fCachedAtlas->peekOnFlushRefCnt()) ==
232 SkToBool(entry->fCachedAtlas->getOnFlushProxy()));
233 if (!entry->fCachedAtlas->getOnFlushProxy()) {
234 if (sk_sp<GrTextureProxy> onFlushProxy = onFlushRP->findOrCreateProxyByUniqueKey(
235 entry->fCachedAtlas->textureKey(), GrSurfaceProxy::UseAllocator::kNo)) {
236 entry->fCachedAtlas->setOnFlushProxy(std::move(onFlushProxy));
237 }
238 }
239 if (!entry->fCachedAtlas->getOnFlushProxy()) {
240 // Our atlas's backing texture got purged from the GrResourceCache. Release the
241 // cached atlas.
242 entry->releaseCachedAtlas(this);
243 }
244 }
245 }
246 entry->fHitRect.join(clippedDrawBounds.makeOffset(-*maskShift));
247 SkASSERT(!entry->fCachedAtlas || entry->fCachedAtlas->getOnFlushProxy());
248 return OnFlushEntryRef::OnFlushRef(entry);
249}
250
251void GrCCPathCache::evict(const GrCCPathCache::Key& key, GrCCPathCacheEntry* entry) {
252 if (!entry) {
253 HashNode* node = fHashTable.find(key);
254 SkASSERT(node);
255 entry = node->entry();
256 }
257 SkASSERT(*entry->fCacheKey == key);
258 SkASSERT(!entry->hasBeenEvicted());
259 entry->fCacheKey->markShouldDeregister(); // Unregister the path listener.
260 entry->releaseCachedAtlas(this);
261 fLRU.remove(entry);
262 fHashTable.remove(key);
263}
264
265void GrCCPathCache::doPreFlushProcessing() {
266 this->evictInvalidatedCacheKeys();
267
268 // Mark the per-flush timestamp as needing to be updated with a newer clock reading.
269 fPerFlushTimestamp = GrStdSteadyClock::time_point::min();
270}
271
272void GrCCPathCache::purgeEntriesOlderThan(GrProxyProvider* proxyProvider,
273 const GrStdSteadyClock::time_point& purgeTime) {
274 this->evictInvalidatedCacheKeys();
275
276#ifdef SK_DEBUG
277 auto lastTimestamp = (fLRU.isEmpty())
278 ? GrStdSteadyClock::time_point::max()
279 : fLRU.tail()->fTimestamp;
280#endif
281
282 // Evict every entry from our local path cache whose timestamp is older than purgeTime.
283 while (!fLRU.isEmpty() && fLRU.tail()->fTimestamp < purgeTime) {
284#ifdef SK_DEBUG
285 // Verify that fLRU is sorted by timestamp.
286 auto timestamp = fLRU.tail()->fTimestamp;
287 SkASSERT(timestamp >= lastTimestamp);
288 lastTimestamp = timestamp;
289#endif
290 this->evict(*fLRU.tail()->fCacheKey);
291 }
292
293 // Now take all the atlas textures we just invalidated and purge them from the GrResourceCache.
294 this->purgeInvalidatedAtlasTextures(proxyProvider);
295}
296
297void GrCCPathCache::purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider* onFlushRP) {
298 for (const sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
299 onFlushRP->removeUniqueKeyFromProxy(proxy.get());
300 }
301 fInvalidatedProxies.reset();
302
303 for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
304 onFlushRP->processInvalidUniqueKey(key);
305 }
306 fInvalidatedProxyUniqueKeys.reset();
307}
308
309void GrCCPathCache::purgeInvalidatedAtlasTextures(GrProxyProvider* proxyProvider) {
310 for (const sk_sp<GrTextureProxy>& proxy : fInvalidatedProxies) {
311 proxyProvider->removeUniqueKeyFromProxy(proxy.get());
312 }
313 fInvalidatedProxies.reset();
314
315 for (const GrUniqueKey& key : fInvalidatedProxyUniqueKeys) {
316 proxyProvider->processInvalidUniqueKey(key, nullptr,
317 GrProxyProvider::InvalidateGPUResource::kYes);
318 }
319 fInvalidatedProxyUniqueKeys.reset();
320}
321
322void GrCCPathCache::evictInvalidatedCacheKeys() {
323 SkTArray<sk_sp<Key>> invalidatedKeys;
324 fInvalidatedKeysInbox.poll(&invalidatedKeys);
325 for (const sk_sp<Key>& key : invalidatedKeys) {
326 bool isInCache = !key->shouldDeregister(); // Gets set upon exiting the cache.
327 if (isInCache) {
328 this->evict(*key);
329 }
330 }
331}
332
333GrCCPathCache::OnFlushEntryRef
334GrCCPathCache::OnFlushEntryRef::OnFlushRef(GrCCPathCacheEntry* entry) {
335 entry->ref();
336 ++entry->fOnFlushRefCnt;
337 if (entry->fCachedAtlas) {
338 entry->fCachedAtlas->incrOnFlushRefCnt();
339 }
340 return OnFlushEntryRef(entry);
341}
342
343GrCCPathCache::OnFlushEntryRef::~OnFlushEntryRef() {
344 if (!fEntry) {
345 return;
346 }
347 --fEntry->fOnFlushRefCnt;
348 SkASSERT(fEntry->fOnFlushRefCnt >= 0);
349 if (fEntry->fCachedAtlas) {
350 fEntry->fCachedAtlas->decrOnFlushRefCnt();
351 }
352 fEntry->unref();
353}
354
355
356void GrCCPathCacheEntry::setCoverageCountAtlas(
357 GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas, const SkIVector& atlasOffset,
358 const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift) {
359 SkASSERT(fOnFlushRefCnt > 0);
360 SkASSERT(!fCachedAtlas); // Otherwise we would need to call releaseCachedAtlas().
361
362 if (this->hasBeenEvicted()) {
363 // This entry will never be found in the path cache again. Don't bother trying to save an
364 // atlas texture for it in the GrResourceCache.
365 return;
366 }
367
368 fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
369 fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
370 fCachedAtlas->addPathPixels(devIBounds.height() * devIBounds.width());
371
372 fAtlasOffset = atlasOffset + maskShift;
373
374 fOctoBounds.setOffset(octoBounds, -maskShift.fX, -maskShift.fY);
375 fDevIBounds = devIBounds.makeOffset(-maskShift);
376}
377
378GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::upgradeToLiteralCoverageAtlas(
379 GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCAtlas* atlas,
380 const SkIVector& newAtlasOffset) {
381 SkASSERT(!this->hasBeenEvicted());
382 SkASSERT(fOnFlushRefCnt > 0);
383 SkASSERT(fCachedAtlas);
384 SkASSERT(GrCCAtlas::CoverageType::kA8_LiteralCoverage != fCachedAtlas->coverageType());
385
386 ReleaseAtlasResult releaseAtlasResult = this->releaseCachedAtlas(pathCache);
387
388 fCachedAtlas = atlas->refOrMakeCachedAtlas(onFlushRP);
389 fCachedAtlas->incrOnFlushRefCnt(fOnFlushRefCnt);
390 fCachedAtlas->addPathPixels(this->height() * this->width());
391
392 fAtlasOffset = newAtlasOffset;
393 return releaseAtlasResult;
394}
395
396GrCCPathCacheEntry::ReleaseAtlasResult GrCCPathCacheEntry::releaseCachedAtlas(
397 GrCCPathCache* pathCache) {
398 ReleaseAtlasResult result = ReleaseAtlasResult::kNone;
399 if (fCachedAtlas) {
400 result = fCachedAtlas->invalidatePathPixels(pathCache, this->height() * this->width());
401 if (fOnFlushRefCnt) {
402 SkASSERT(fOnFlushRefCnt > 0);
403 fCachedAtlas->decrOnFlushRefCnt(fOnFlushRefCnt);
404 }
405 fCachedAtlas = nullptr;
406 }
407 return result;
408}
409
410GrCCPathCacheEntry::ReleaseAtlasResult GrCCCachedAtlas::invalidatePathPixels(
411 GrCCPathCache* pathCache, int numPixels) {
412 // Mark the pixels invalid in the cached atlas texture.
413 fNumInvalidatedPathPixels += numPixels;
414 SkASSERT(fNumInvalidatedPathPixels <= fNumPathPixels);
415 if (!fIsInvalidatedFromResourceCache && fNumInvalidatedPathPixels >= fNumPathPixels / 2) {
416 // Too many invalidated pixels: purge the atlas texture from the resource cache.
417 if (fOnFlushProxy) {
418 // Don't clear (or std::move) fOnFlushProxy. Other path cache entries might still have a
419 // reference on this atlas and expect to use our proxy during the current flush.
420 // fOnFlushProxy will be cleared once fOnFlushRefCnt decrements to zero.
421 pathCache->fInvalidatedProxies.push_back(fOnFlushProxy);
422 } else {
423 pathCache->fInvalidatedProxyUniqueKeys.push_back(fTextureKey);
424 }
425 fIsInvalidatedFromResourceCache = true;
426 return ReleaseAtlasResult::kDidInvalidateFromCache;
427 }
428 return ReleaseAtlasResult::kNone;
429}
430
431void GrCCCachedAtlas::decrOnFlushRefCnt(int count) const {
432 SkASSERT(count > 0);
433 fOnFlushRefCnt -= count;
434 SkASSERT(fOnFlushRefCnt >= 0);
435 if (0 == fOnFlushRefCnt) {
436 // Don't hold the actual proxy past the end of the current flush.
437 SkASSERT(fOnFlushProxy);
438 fOnFlushProxy = nullptr;
439 }
440}
441