1 | /* |
2 | * Copyright 2018 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef GrCCPathCache_DEFINED |
9 | #define GrCCPathCache_DEFINED |
10 | |
11 | #include "include/private/SkIDChangeListener.h" |
12 | #include "include/private/SkTHash.h" |
13 | #include "src/core/SkExchange.h" |
14 | #include "src/core/SkTInternalLList.h" |
15 | #include "src/gpu/ccpr/GrCCAtlas.h" |
16 | #include "src/gpu/ccpr/GrCCPathProcessor.h" |
17 | #include "src/gpu/geometry/GrShape.h" |
18 | |
19 | class GrCCPathCacheEntry; |
20 | class GrShape; |
21 | |
22 | /** |
23 | * This class implements an LRU cache that maps from GrShape to GrCCPathCacheEntry objects. Shapes |
24 | * are only given one entry in the cache, so any time they are accessed with a different matrix, the |
25 | * old entry gets evicted. |
26 | */ |
27 | class GrCCPathCache { |
28 | public: |
29 | GrCCPathCache(uint32_t contextUniqueID); |
30 | ~GrCCPathCache(); |
31 | |
32 | class Key : public SkIDChangeListener { |
33 | public: |
34 | static sk_sp<Key> Make(uint32_t pathCacheUniqueID, int dataCountU32, |
35 | const void* data = nullptr); |
36 | |
37 | uint32_t pathCacheUniqueID() const { return fPathCacheUniqueID; } |
38 | |
39 | int dataSizeInBytes() const { return fDataSizeInBytes; } |
40 | const uint32_t* data() const; |
41 | |
42 | void resetDataCountU32(int dataCountU32) { |
43 | SkASSERT(dataCountU32 <= fDataReserveCountU32); |
44 | fDataSizeInBytes = dataCountU32 * sizeof(uint32_t); |
45 | } |
46 | uint32_t* data(); |
47 | |
48 | bool operator==(const Key& that) const { |
49 | return fDataSizeInBytes == that.fDataSizeInBytes && |
50 | !memcmp(this->data(), that.data(), fDataSizeInBytes); |
51 | } |
52 | |
53 | // Called when our corresponding path is modified or deleted. Not threadsafe. |
54 | void changed() override; |
55 | |
56 | // TODO(b/30449950): use sized delete once P0722R3 is available |
57 | static void operator delete(void* p); |
58 | |
59 | private: |
60 | Key(uint32_t pathCacheUniqueID, int dataCountU32) |
61 | : fPathCacheUniqueID(pathCacheUniqueID) |
62 | , fDataSizeInBytes(dataCountU32 * sizeof(uint32_t)) |
63 | SkDEBUGCODE(, fDataReserveCountU32(dataCountU32)) { |
64 | SkASSERT(SK_InvalidUniqueID != fPathCacheUniqueID); |
65 | } |
66 | |
67 | const uint32_t fPathCacheUniqueID; |
68 | int fDataSizeInBytes; |
69 | SkDEBUGCODE(const int fDataReserveCountU32); |
70 | // The GrShape's unstyled key is stored as a variable-length footer to this class. GetKey |
71 | // provides access to it. |
72 | }; |
73 | |
74 | // Stores the components of a transformation that affect a path mask (i.e. everything but |
75 | // integer translation). During construction, any integer portions of the matrix's translate are |
76 | // shaved off and returned to the caller. The caller is responsible for those integer shifts. |
77 | struct MaskTransform { |
78 | MaskTransform(const SkMatrix& m, SkIVector* shift); |
79 | float fMatrix2x2[4]; |
80 | #ifndef SK_BUILD_FOR_ANDROID_FRAMEWORK |
81 | // Except on AOSP, cache hits must have matching subpixel portions of their view matrix. |
82 | // On AOSP we follow after HWUI and ignore the subpixel translate. |
83 | float fSubpixelTranslate[2]; |
84 | #endif |
85 | }; |
86 | |
87 | // Represents a ref on a GrCCPathCacheEntry that should only be used during the current flush. |
88 | class OnFlushEntryRef : SkNoncopyable { |
89 | public: |
90 | static OnFlushEntryRef OnFlushRef(GrCCPathCacheEntry*); |
91 | OnFlushEntryRef() = default; |
92 | OnFlushEntryRef(OnFlushEntryRef&& ref) : fEntry(skstd::exchange(ref.fEntry, nullptr)) {} |
93 | ~OnFlushEntryRef(); |
94 | |
95 | GrCCPathCacheEntry* get() const { return fEntry; } |
96 | GrCCPathCacheEntry* operator->() const { return fEntry; } |
97 | GrCCPathCacheEntry& operator*() const { return *fEntry; } |
98 | explicit operator bool() const { return fEntry; } |
99 | void operator=(OnFlushEntryRef&& ref) { fEntry = skstd::exchange(ref.fEntry, nullptr); } |
100 | |
101 | private: |
102 | OnFlushEntryRef(GrCCPathCacheEntry* entry) : fEntry(entry) {} |
103 | GrCCPathCacheEntry* fEntry = nullptr; |
104 | }; |
105 | |
106 | // Finds an entry in the cache that matches the given shape and transformation matrix. |
107 | // 'maskShift' is filled with an integer post-translate that the caller must apply when drawing |
108 | // the entry's mask to the device. |
109 | // |
110 | // NOTE: Shapes are only given one entry, so any time they are accessed with a new |
111 | // transformation, the old entry gets evicted. |
112 | OnFlushEntryRef find(GrOnFlushResourceProvider*, const GrShape&, |
113 | const SkIRect& clippedDrawBounds, const SkMatrix& viewMatrix, |
114 | SkIVector* maskShift); |
115 | |
116 | void doPreFlushProcessing(); |
117 | |
118 | void purgeEntriesOlderThan(GrProxyProvider*, const GrStdSteadyClock::time_point& purgeTime); |
119 | |
120 | // As we evict entries from our local path cache, we accumulate a list of invalidated atlas |
121 | // textures. This call purges the invalidated atlas textures from the mainline GrResourceCache. |
122 | // This call is available with two different "provider" objects, to accomodate whatever might |
123 | // be available at the callsite. |
124 | void purgeInvalidatedAtlasTextures(GrOnFlushResourceProvider*); |
125 | void purgeInvalidatedAtlasTextures(GrProxyProvider*); |
126 | |
127 | private: |
128 | // This is a special ref ptr for GrCCPathCacheEntry, used by the hash table. It provides static |
129 | // methods for SkTHash, and can only be moved. This guarantees the hash table holds exactly one |
130 | // reference for each entry. Also, when a HashNode goes out of scope, that means it is exiting |
131 | // the hash table. We take that opportunity to remove it from the LRU list and do some cleanup. |
132 | class HashNode : SkNoncopyable { |
133 | public: |
134 | static const Key& GetKey(const HashNode&); |
135 | inline static uint32_t Hash(const Key& key) { |
136 | return GrResourceKeyHash(key.data(), key.dataSizeInBytes()); |
137 | } |
138 | |
139 | HashNode() = default; |
140 | HashNode(GrCCPathCache*, sk_sp<Key>, const MaskTransform&, const GrShape&); |
141 | HashNode(HashNode&& node) |
142 | : fPathCache(node.fPathCache), fEntry(std::move(node.fEntry)) { |
143 | SkASSERT(!node.fEntry); |
144 | } |
145 | |
146 | ~HashNode(); |
147 | |
148 | void operator=(HashNode&& node); |
149 | |
150 | GrCCPathCacheEntry* entry() const { return fEntry.get(); } |
151 | |
152 | private: |
153 | GrCCPathCache* fPathCache = nullptr; |
154 | sk_sp<GrCCPathCacheEntry> fEntry; |
155 | }; |
156 | |
157 | GrStdSteadyClock::time_point quickPerFlushTimestamp() { |
158 | // time_point::min() means it's time to update fPerFlushTimestamp with a newer clock read. |
159 | if (GrStdSteadyClock::time_point::min() == fPerFlushTimestamp) { |
160 | fPerFlushTimestamp = GrStdSteadyClock::now(); |
161 | } |
162 | return fPerFlushTimestamp; |
163 | } |
164 | |
165 | void evict(const GrCCPathCache::Key&, GrCCPathCacheEntry* = nullptr); |
166 | |
167 | // Evicts all the cache entries whose keys have been queued up in fInvalidatedKeysInbox via |
168 | // SkPath listeners. |
169 | void evictInvalidatedCacheKeys(); |
170 | |
171 | const uint32_t fContextUniqueID; |
172 | |
173 | SkTHashTable<HashNode, const Key&> fHashTable; |
174 | SkTInternalLList<GrCCPathCacheEntry> fLRU; |
175 | SkMessageBus<sk_sp<Key>>::Inbox fInvalidatedKeysInbox; |
176 | sk_sp<Key> fScratchKey; // Reused for creating a temporary key in the find() method. |
177 | |
178 | // We only read the clock once per flush, and cache it in this variable. This prevents us from |
179 | // excessive clock reads for cache timestamps that might degrade performance. |
180 | GrStdSteadyClock::time_point fPerFlushTimestamp = GrStdSteadyClock::time_point::min(); |
181 | |
182 | // As we evict entries from our local path cache, we accumulate lists of invalidated atlas |
183 | // textures in these two members. We hold these until we purge them from the GrResourceCache |
184 | // (e.g. via purgeInvalidatedAtlasTextures().) |
185 | SkSTArray<4, sk_sp<GrTextureProxy>> fInvalidatedProxies; |
186 | SkSTArray<4, GrUniqueKey> fInvalidatedProxyUniqueKeys; |
187 | |
188 | friend class GrCCCachedAtlas; // To append to fInvalidatedProxies, fInvalidatedProxyUniqueKeys. |
189 | |
190 | public: |
191 | const SkTHashTable<HashNode, const Key&>& testingOnly_getHashTable() const; |
192 | const SkTInternalLList<GrCCPathCacheEntry>& testingOnly_getLRU() const; |
193 | }; |
194 | |
195 | /** |
196 | * This class stores all the data necessary to draw a specific path + matrix combination from their |
197 | * corresponding cached atlas. |
198 | */ |
199 | class GrCCPathCacheEntry : public GrNonAtomicRef<GrCCPathCacheEntry> { |
200 | public: |
201 | SK_DECLARE_INTERNAL_LLIST_INTERFACE(GrCCPathCacheEntry); |
202 | |
203 | ~GrCCPathCacheEntry() { |
204 | SkASSERT(this->hasBeenEvicted()); // Should have called GrCCPathCache::evict(). |
205 | SkASSERT(!fCachedAtlas); |
206 | SkASSERT(0 == fOnFlushRefCnt); |
207 | } |
208 | |
209 | const GrCCPathCache::Key& cacheKey() const { SkASSERT(fCacheKey); return *fCacheKey; } |
210 | |
211 | // The number of flushes during which this specific entry (path + matrix combination) has been |
212 | // pulled from the path cache. If a path is pulled from the cache more than once in a single |
213 | // flush, the hit count is only incremented once. |
214 | // |
215 | // If the entry did not previously exist, its hit count will be 1. |
216 | int hitCount() const { return fHitCount; } |
217 | |
218 | // The accumulative region of the path that has been drawn during the lifetime of this cache |
219 | // entry (as defined by the 'clippedDrawBounds' parameter for GrCCPathCache::find). |
220 | const SkIRect& hitRect() const { return fHitRect; } |
221 | |
222 | const GrCCCachedAtlas* cachedAtlas() const { return fCachedAtlas.get(); } |
223 | |
224 | const SkIRect& devIBounds() const { return fDevIBounds; } |
225 | int width() const { return fDevIBounds.width(); } |
226 | int height() const { return fDevIBounds.height(); } |
227 | |
228 | enum class ReleaseAtlasResult : bool { |
229 | kNone, |
230 | kDidInvalidateFromCache |
231 | }; |
232 | |
233 | // Called once our path has been rendered into the mainline CCPR (fp16, coverage count) atlas. |
234 | // The caller will stash this atlas texture away after drawing, and during the next flush, |
235 | // recover it and attempt to copy any paths that got reused into permanent 8-bit atlases. |
236 | void setCoverageCountAtlas( |
237 | GrOnFlushResourceProvider*, GrCCAtlas*, const SkIVector& atlasOffset, |
238 | const GrOctoBounds& octoBounds, const SkIRect& devIBounds, const SkIVector& maskShift); |
239 | |
240 | // Called once our path mask has been copied into a permanent, 8-bit atlas. This method points |
241 | // the entry at the new atlas and updates the GrCCCCachedAtlas data. |
242 | ReleaseAtlasResult upgradeToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*, |
243 | GrCCAtlas*, const SkIVector& newAtlasOffset); |
244 | |
245 | private: |
246 | using MaskTransform = GrCCPathCache::MaskTransform; |
247 | |
248 | GrCCPathCacheEntry(sk_sp<GrCCPathCache::Key> cacheKey, const MaskTransform& maskTransform) |
249 | : fCacheKey(std::move(cacheKey)), fMaskTransform(maskTransform) { |
250 | } |
251 | |
252 | bool hasBeenEvicted() const { return fCacheKey->shouldDeregister(); } |
253 | |
254 | // Resets this entry back to not having an atlas, and purges its previous atlas texture from the |
255 | // resource cache if needed. |
256 | ReleaseAtlasResult releaseCachedAtlas(GrCCPathCache*); |
257 | |
258 | sk_sp<GrCCPathCache::Key> fCacheKey; |
259 | GrStdSteadyClock::time_point fTimestamp; |
260 | int fHitCount = 0; |
261 | SkIRect fHitRect = SkIRect::MakeEmpty(); |
262 | |
263 | sk_sp<GrCCCachedAtlas> fCachedAtlas; |
264 | SkIVector fAtlasOffset; |
265 | |
266 | MaskTransform fMaskTransform; |
267 | GrOctoBounds fOctoBounds; |
268 | SkIRect fDevIBounds; |
269 | |
270 | int fOnFlushRefCnt = 0; |
271 | |
272 | friend class GrCCPathCache; |
273 | friend void GrCCPathProcessor::Instance::set(const GrCCPathCacheEntry&, const SkIVector&, |
274 | const SkPMColor4f&, GrFillRule); |
275 | |
276 | public: |
277 | int testingOnly_peekOnFlushRefCnt() const; |
278 | }; |
279 | |
280 | /** |
281 | * Encapsulates the data for an atlas whose texture is stored in the mainline GrResourceCache. Many |
282 | * instances of GrCCPathCacheEntry will reference the same GrCCCachedAtlas. |
283 | * |
284 | * We use this object to track the percentage of the original atlas pixels that could still ever |
285 | * potentially be reused (i.e., those which still represent an extant path). When the percentage |
286 | * of useful pixels drops below 50%, we purge the entire texture from the resource cache. |
287 | * |
288 | * This object also holds a ref on the atlas's actual texture proxy during flush. When |
289 | * fOnFlushRefCnt decrements back down to zero, we release fOnFlushProxy and reset it back to null. |
290 | */ |
291 | class GrCCCachedAtlas : public GrNonAtomicRef<GrCCCachedAtlas> { |
292 | public: |
293 | using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult; |
294 | |
295 | GrCCCachedAtlas(GrCCAtlas::CoverageType type, const GrUniqueKey& textureKey, |
296 | sk_sp<GrTextureProxy> onFlushProxy) |
297 | : fCoverageType(type) |
298 | , fTextureKey(textureKey) |
299 | , fOnFlushProxy(std::move(onFlushProxy)) {} |
300 | |
301 | ~GrCCCachedAtlas() { |
302 | SkASSERT(!fOnFlushProxy); |
303 | SkASSERT(!fOnFlushRefCnt); |
304 | } |
305 | |
306 | GrCCAtlas::CoverageType coverageType() const { return fCoverageType; } |
307 | const GrUniqueKey& textureKey() const { return fTextureKey; } |
308 | |
309 | GrTextureProxy* getOnFlushProxy() const { return fOnFlushProxy.get(); } |
310 | |
311 | void setOnFlushProxy(sk_sp<GrTextureProxy> proxy) { |
312 | SkASSERT(!fOnFlushProxy); |
313 | fOnFlushProxy = std::move(proxy); |
314 | } |
315 | |
316 | void addPathPixels(int numPixels) { fNumPathPixels += numPixels; } |
317 | ReleaseAtlasResult invalidatePathPixels(GrCCPathCache*, int numPixels); |
318 | |
319 | int peekOnFlushRefCnt() const { return fOnFlushRefCnt; } |
320 | void incrOnFlushRefCnt(int count = 1) const { |
321 | SkASSERT(count > 0); |
322 | SkASSERT(fOnFlushProxy); |
323 | fOnFlushRefCnt += count; |
324 | } |
325 | void decrOnFlushRefCnt(int count = 1) const; |
326 | |
327 | private: |
328 | const GrCCAtlas::CoverageType fCoverageType; |
329 | const GrUniqueKey fTextureKey; |
330 | |
331 | int fNumPathPixels = 0; |
332 | int fNumInvalidatedPathPixels = 0; |
333 | bool fIsInvalidatedFromResourceCache = false; |
334 | |
335 | mutable sk_sp<GrTextureProxy> fOnFlushProxy; |
336 | mutable int fOnFlushRefCnt = 0; |
337 | |
338 | public: |
339 | int testingOnly_peekOnFlushRefCnt() const; |
340 | }; |
341 | |
342 | |
343 | inline GrCCPathCache::HashNode::HashNode(GrCCPathCache* pathCache, sk_sp<Key> key, |
344 | const MaskTransform& m, const GrShape& shape) |
345 | : fPathCache(pathCache) |
346 | , fEntry(new GrCCPathCacheEntry(key, m)) { |
347 | SkASSERT(shape.hasUnstyledKey()); |
348 | shape.addGenIDChangeListener(std::move(key)); |
349 | } |
350 | |
351 | inline const GrCCPathCache::Key& GrCCPathCache::HashNode::GetKey( |
352 | const GrCCPathCache::HashNode& node) { |
353 | return *node.entry()->fCacheKey; |
354 | } |
355 | |
356 | inline GrCCPathCache::HashNode::~HashNode() { |
357 | SkASSERT(!fEntry || fEntry->hasBeenEvicted()); // Should have called GrCCPathCache::evict(). |
358 | } |
359 | |
360 | inline void GrCCPathCache::HashNode::operator=(HashNode&& node) { |
361 | SkASSERT(!fEntry || fEntry->hasBeenEvicted()); // Should have called GrCCPathCache::evict(). |
362 | fEntry = skstd::exchange(node.fEntry, nullptr); |
363 | } |
364 | |
365 | inline void GrCCPathProcessor::Instance::set( |
366 | const GrCCPathCacheEntry& entry, const SkIVector& shift, const SkPMColor4f& color, |
367 | GrFillRule fillRule) { |
368 | float dx = (float)shift.fX, dy = (float)shift.fY; |
369 | this->set(entry.fOctoBounds.makeOffset(dx, dy), entry.fAtlasOffset - shift, color, fillRule); |
370 | } |
371 | |
372 | #endif |
373 | |