1 | /* |
2 | * Copyright 2017 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "src/gpu/GrResourceAllocator.h" |
9 | |
10 | #include "src/gpu/GrGpuResourcePriv.h" |
11 | #include "src/gpu/GrOpsTask.h" |
12 | #include "src/gpu/GrRenderTargetProxy.h" |
13 | #include "src/gpu/GrResourceProvider.h" |
14 | #include "src/gpu/GrSurfaceProxy.h" |
15 | #include "src/gpu/GrSurfaceProxyPriv.h" |
16 | #include "src/gpu/GrTextureProxy.h" |
17 | |
18 | #if GR_TRACK_INTERVAL_CREATION |
19 | #include <atomic> |
20 | |
21 | uint32_t GrResourceAllocator::Interval::CreateUniqueID() { |
22 | static std::atomic<uint32_t> nextID{1}; |
23 | uint32_t id; |
24 | do { |
25 | id = nextID++; |
26 | } while (id == SK_InvalidUniqueID); |
27 | return id; |
28 | } |
29 | #endif |
30 | |
31 | void GrResourceAllocator::Interval::assign(sk_sp<GrSurface> s) { |
32 | SkASSERT(!fAssignedSurface); |
33 | fAssignedSurface = s; |
34 | fProxy->priv().assign(std::move(s)); |
35 | } |
36 | |
37 | void GrResourceAllocator::determineRecyclability() { |
38 | for (Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) { |
39 | if (cur->proxy()->canSkipResourceAllocator()) { |
40 | // These types of proxies can slip in here if they require a stencil buffer |
41 | continue; |
42 | } |
43 | |
44 | if (!cur->proxy()->refCntGreaterThan(cur->uses())) { |
45 | // All the refs on the proxy are known to the resource allocator thus no one |
46 | // should be holding onto it outside of Ganesh. |
47 | cur->markAsRecyclable(); |
48 | } |
49 | } |
50 | } |
51 | |
52 | void GrResourceAllocator::markEndOfOpsTask(int opsTaskIndex) { |
53 | SkASSERT(!fAssigned); // We shouldn't be adding any opsTasks after (or during) assignment |
54 | |
55 | SkASSERT(fEndOfOpsTaskOpIndices.count() == opsTaskIndex); |
56 | if (!fEndOfOpsTaskOpIndices.empty()) { |
57 | SkASSERT(fEndOfOpsTaskOpIndices.back() < this->curOp()); |
58 | } |
59 | |
60 | // This is the first op index of the next opsTask |
61 | fEndOfOpsTaskOpIndices.push_back(this->curOp()); |
62 | SkASSERT(fEndOfOpsTaskOpIndices.count() <= fNumOpsTasks); |
63 | } |
64 | |
65 | GrResourceAllocator::~GrResourceAllocator() { |
66 | SkASSERT(fIntvlList.empty()); |
67 | SkASSERT(fActiveIntvls.empty()); |
68 | SkASSERT(!fIntvlHash.count()); |
69 | } |
70 | |
71 | void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end, |
72 | ActualUse actualUse |
73 | SkDEBUGCODE(, bool isDirectDstRead)) { |
74 | SkASSERT(start <= end); |
75 | SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment |
76 | |
77 | if (proxy->canSkipResourceAllocator()) { |
78 | return; |
79 | } |
80 | |
81 | // If a proxy is read only it must refer to a texture with specific content that cannot be |
82 | // recycled. We don't need to assign a texture to it and no other proxy can be instantiated |
83 | // with the same texture. |
84 | if (proxy->readOnly()) { |
85 | if (proxy->isLazy() && !proxy->priv().doLazyInstantiation(fResourceProvider)) { |
86 | fLazyInstantiationError = true; |
87 | } else { |
88 | // Since we aren't going to add an interval we won't revisit this proxy in assign(). So |
89 | // must already be instantiated or it must be a lazy proxy that we instantiated above. |
90 | SkASSERT(proxy->isInstantiated()); |
91 | } |
92 | return; |
93 | } |
94 | if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) { |
95 | // Revise the interval for an existing use |
96 | #ifdef SK_DEBUG |
97 | if (0 == start && 0 == end) { |
98 | // This interval is for the initial upload to a deferred proxy. Due to the vagaries |
99 | // of how deferred proxies are collected they can appear as uploads multiple times |
100 | // in a single opsTasks' list and as uploads in several opsTasks. |
101 | SkASSERT(0 == intvl->start()); |
102 | } else if (isDirectDstRead) { |
103 | // Direct reads from the render target itself should occur w/in the existing |
104 | // interval |
105 | SkASSERT(intvl->start() <= start && intvl->end() >= end); |
106 | } else { |
107 | SkASSERT(intvl->end() <= start && intvl->end() <= end); |
108 | } |
109 | #endif |
110 | if (ActualUse::kYes == actualUse) { |
111 | intvl->addUse(); |
112 | } |
113 | intvl->extendEnd(end); |
114 | return; |
115 | } |
116 | Interval* newIntvl; |
117 | if (fFreeIntervalList) { |
118 | newIntvl = fFreeIntervalList; |
119 | fFreeIntervalList = newIntvl->next(); |
120 | newIntvl->setNext(nullptr); |
121 | newIntvl->resetTo(proxy, start, end); |
122 | } else { |
123 | newIntvl = fIntervalAllocator.make<Interval>(proxy, start, end); |
124 | } |
125 | |
126 | if (ActualUse::kYes == actualUse) { |
127 | newIntvl->addUse(); |
128 | } |
129 | fIntvlList.insertByIncreasingStart(newIntvl); |
130 | fIntvlHash.add(newIntvl); |
131 | } |
132 | |
133 | GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() { |
134 | SkDEBUGCODE(this->validate()); |
135 | |
136 | Interval* temp = fHead; |
137 | if (temp) { |
138 | fHead = temp->next(); |
139 | if (!fHead) { |
140 | fTail = nullptr; |
141 | } |
142 | temp->setNext(nullptr); |
143 | } |
144 | |
145 | SkDEBUGCODE(this->validate()); |
146 | return temp; |
147 | } |
148 | |
149 | // TODO: fuse this with insertByIncreasingEnd |
150 | void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) { |
151 | SkDEBUGCODE(this->validate()); |
152 | SkASSERT(!intvl->next()); |
153 | |
154 | if (!fHead) { |
155 | // 14% |
156 | fHead = fTail = intvl; |
157 | } else if (intvl->start() <= fHead->start()) { |
158 | // 3% |
159 | intvl->setNext(fHead); |
160 | fHead = intvl; |
161 | } else if (fTail->start() <= intvl->start()) { |
162 | // 83% |
163 | fTail->setNext(intvl); |
164 | fTail = intvl; |
165 | } else { |
166 | // almost never |
167 | Interval* prev = fHead; |
168 | Interval* next = prev->next(); |
169 | for (; intvl->start() > next->start(); prev = next, next = next->next()) { |
170 | } |
171 | |
172 | SkASSERT(next); |
173 | intvl->setNext(next); |
174 | prev->setNext(intvl); |
175 | } |
176 | |
177 | SkDEBUGCODE(this->validate()); |
178 | } |
179 | |
180 | // TODO: fuse this with insertByIncreasingStart |
181 | void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) { |
182 | SkDEBUGCODE(this->validate()); |
183 | SkASSERT(!intvl->next()); |
184 | |
185 | if (!fHead) { |
186 | // 14% |
187 | fHead = fTail = intvl; |
188 | } else if (intvl->end() <= fHead->end()) { |
189 | // 64% |
190 | intvl->setNext(fHead); |
191 | fHead = intvl; |
192 | } else if (fTail->end() <= intvl->end()) { |
193 | // 3% |
194 | fTail->setNext(intvl); |
195 | fTail = intvl; |
196 | } else { |
197 | // 19% but 81% of those land right after the list's head |
198 | Interval* prev = fHead; |
199 | Interval* next = prev->next(); |
200 | for (; intvl->end() > next->end(); prev = next, next = next->next()) { |
201 | } |
202 | |
203 | SkASSERT(next); |
204 | intvl->setNext(next); |
205 | prev->setNext(intvl); |
206 | } |
207 | |
208 | SkDEBUGCODE(this->validate()); |
209 | } |
210 | |
211 | #ifdef SK_DEBUG |
212 | void GrResourceAllocator::IntervalList::validate() const { |
213 | SkASSERT(SkToBool(fHead) == SkToBool(fTail)); |
214 | |
215 | Interval* prev = nullptr; |
216 | for (Interval* cur = fHead; cur; prev = cur, cur = cur->next()) { |
217 | } |
218 | |
219 | SkASSERT(fTail == prev); |
220 | } |
221 | #endif |
222 | |
223 | GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::detachAll() { |
224 | Interval* tmp = fHead; |
225 | fHead = nullptr; |
226 | fTail = nullptr; |
227 | return tmp; |
228 | } |
229 | |
230 | // 'surface' can be reused. Add it back to the free pool. |
231 | void GrResourceAllocator::recycleSurface(sk_sp<GrSurface> surface) { |
232 | const GrScratchKey &key = surface->resourcePriv().getScratchKey(); |
233 | |
234 | if (!key.isValid()) { |
235 | return; // can't do it w/o a valid scratch key |
236 | } |
237 | |
238 | if (surface->getUniqueKey().isValid()) { |
239 | // If the surface has a unique key we throw it back into the resource cache. |
240 | // If things get really tight 'findSurfaceFor' may pull it back out but there is |
241 | // no need to have it in tight rotation. |
242 | return; |
243 | } |
244 | |
245 | #if GR_ALLOCATION_SPEW |
246 | SkDebugf("putting surface %d back into pool\n" , surface->uniqueID().asUInt()); |
247 | #endif |
248 | // TODO: fix this insertion so we get a more LRU-ish behavior |
249 | fFreePool.insert(key, surface.release()); |
250 | } |
251 | |
252 | // First try to reuse one of the recently allocated/used GrSurfaces in the free pool. |
253 | // If we can't find a useable one, create a new one. |
254 | sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(const GrSurfaceProxy* proxy) { |
255 | if (proxy->asTextureProxy() && proxy->asTextureProxy()->getUniqueKey().isValid()) { |
256 | // First try to reattach to a cached version if the proxy is uniquely keyed |
257 | if (sk_sp<GrSurface> surface = fResourceProvider->findByUniqueKey<GrSurface>( |
258 | proxy->asTextureProxy()->getUniqueKey())) { |
259 | return surface; |
260 | } |
261 | } |
262 | |
263 | // First look in the free pool |
264 | GrScratchKey key; |
265 | |
266 | proxy->priv().computeScratchKey(*fResourceProvider->caps(), &key); |
267 | |
268 | auto filter = [] (const GrSurface* s) { |
269 | return true; |
270 | }; |
271 | sk_sp<GrSurface> surface(fFreePool.findAndRemove(key, filter)); |
272 | if (surface) { |
273 | if (SkBudgeted::kYes == proxy->isBudgeted() && |
274 | GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) { |
275 | // This gets the job done but isn't quite correct. It would be better to try to |
276 | // match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted. |
277 | surface->resourcePriv().makeBudgeted(); |
278 | } |
279 | SkASSERT(!surface->getUniqueKey().isValid()); |
280 | return surface; |
281 | } |
282 | |
283 | // Failing that, try to grab a new one from the resource cache |
284 | return proxy->priv().createSurface(fResourceProvider); |
285 | } |
286 | |
287 | // Remove any intervals that end before the current index. Return their GrSurfaces |
288 | // to the free pool if possible. |
289 | void GrResourceAllocator::expire(unsigned int curIndex) { |
290 | while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) { |
291 | Interval* temp = fActiveIntvls.popHead(); |
292 | SkASSERT(!temp->next()); |
293 | |
294 | if (temp->wasAssignedSurface()) { |
295 | sk_sp<GrSurface> surface = temp->detachSurface(); |
296 | |
297 | if (temp->isRecyclable()) { |
298 | this->recycleSurface(std::move(surface)); |
299 | } |
300 | } |
301 | |
302 | // Add temp to the free interval list so it can be reused |
303 | SkASSERT(!temp->wasAssignedSurface()); // it had better not have a ref on a surface |
304 | temp->setNext(fFreeIntervalList); |
305 | fFreeIntervalList = temp; |
306 | } |
307 | } |
308 | |
309 | bool GrResourceAllocator::onOpsTaskBoundary() const { |
310 | if (fIntvlList.empty()) { |
311 | SkASSERT(fCurOpsTaskIndex+1 <= fNumOpsTasks); |
312 | // Although technically on an opsTask boundary there is no need to force an |
313 | // intermediate flush here |
314 | return false; |
315 | } |
316 | |
317 | const Interval* tmp = fIntvlList.peekHead(); |
318 | return fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start(); |
319 | } |
320 | |
321 | void GrResourceAllocator::forceIntermediateFlush(int* stopIndex) { |
322 | *stopIndex = fCurOpsTaskIndex+1; |
323 | |
324 | // This is interrupting the allocation of resources for this flush. We need to |
325 | // proactively clear the active interval list of any intervals that aren't |
326 | // guaranteed to survive the partial flush lest they become zombies (i.e., |
327 | // holding a deleted surface proxy). |
328 | const Interval* tmp = fIntvlList.peekHead(); |
329 | SkASSERT(fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= tmp->start()); |
330 | |
331 | fCurOpsTaskIndex++; |
332 | SkASSERT(fCurOpsTaskIndex < fNumOpsTasks); |
333 | |
334 | this->expire(tmp->start()); |
335 | } |
336 | |
337 | bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* outError) { |
338 | SkASSERT(outError); |
339 | *outError = fLazyInstantiationError ? AssignError::kFailedProxyInstantiation |
340 | : AssignError::kNoError; |
341 | |
342 | SkASSERT(fNumOpsTasks == fEndOfOpsTaskOpIndices.count()); |
343 | |
344 | fIntvlHash.reset(); // we don't need the interval hash anymore |
345 | |
346 | if (fCurOpsTaskIndex >= fEndOfOpsTaskOpIndices.count()) { |
347 | return false; // nothing to render |
348 | } |
349 | |
350 | *startIndex = fCurOpsTaskIndex; |
351 | *stopIndex = fEndOfOpsTaskOpIndices.count(); |
352 | |
353 | if (fIntvlList.empty()) { |
354 | fCurOpsTaskIndex = fEndOfOpsTaskOpIndices.count(); |
355 | return true; // no resources to assign |
356 | } |
357 | |
358 | #if GR_ALLOCATION_SPEW |
359 | SkDebugf("assigning opsTasks %d through %d out of %d numOpsTasks\n" , |
360 | *startIndex, *stopIndex, fNumOpsTasks); |
361 | SkDebugf("EndOfOpsTaskIndices: " ); |
362 | for (int i = 0; i < fEndOfOpsTaskOpIndices.count(); ++i) { |
363 | SkDebugf("%d " , fEndOfOpsTaskOpIndices[i]); |
364 | } |
365 | SkDebugf("\n" ); |
366 | #endif |
367 | |
368 | SkDEBUGCODE(fAssigned = true;) |
369 | |
370 | #if GR_ALLOCATION_SPEW |
371 | this->dumpIntervals(); |
372 | #endif |
373 | while (Interval* cur = fIntvlList.popHead()) { |
374 | while (fEndOfOpsTaskOpIndices[fCurOpsTaskIndex] <= cur->start()) { |
375 | fCurOpsTaskIndex++; |
376 | SkASSERT(fCurOpsTaskIndex < fNumOpsTasks); |
377 | } |
378 | |
379 | this->expire(cur->start()); |
380 | |
381 | if (cur->proxy()->isInstantiated()) { |
382 | fActiveIntvls.insertByIncreasingEnd(cur); |
383 | |
384 | if (fResourceProvider->overBudget()) { |
385 | // Only force intermediate draws on opsTask boundaries |
386 | if (this->onOpsTaskBoundary()) { |
387 | this->forceIntermediateFlush(stopIndex); |
388 | return true; |
389 | } |
390 | } |
391 | |
392 | continue; |
393 | } |
394 | |
395 | if (cur->proxy()->isLazy()) { |
396 | if (!cur->proxy()->priv().doLazyInstantiation(fResourceProvider)) { |
397 | *outError = AssignError::kFailedProxyInstantiation; |
398 | } |
399 | } else if (sk_sp<GrSurface> surface = this->findSurfaceFor(cur->proxy())) { |
400 | // TODO: make getUniqueKey virtual on GrSurfaceProxy |
401 | GrTextureProxy* texProxy = cur->proxy()->asTextureProxy(); |
402 | |
403 | if (texProxy && texProxy->getUniqueKey().isValid()) { |
404 | if (!surface->getUniqueKey().isValid()) { |
405 | fResourceProvider->assignUniqueKeyToResource(texProxy->getUniqueKey(), |
406 | surface.get()); |
407 | } |
408 | SkASSERT(surface->getUniqueKey() == texProxy->getUniqueKey()); |
409 | } |
410 | |
411 | #if GR_ALLOCATION_SPEW |
412 | SkDebugf("Assigning %d to %d\n" , |
413 | surface->uniqueID().asUInt(), |
414 | cur->proxy()->uniqueID().asUInt()); |
415 | #endif |
416 | |
417 | cur->assign(std::move(surface)); |
418 | } else { |
419 | SkASSERT(!cur->proxy()->isInstantiated()); |
420 | *outError = AssignError::kFailedProxyInstantiation; |
421 | } |
422 | |
423 | fActiveIntvls.insertByIncreasingEnd(cur); |
424 | |
425 | if (fResourceProvider->overBudget()) { |
426 | // Only force intermediate draws on opsTask boundaries |
427 | if (this->onOpsTaskBoundary()) { |
428 | this->forceIntermediateFlush(stopIndex); |
429 | return true; |
430 | } |
431 | } |
432 | } |
433 | |
434 | // expire all the remaining intervals to drain the active interval list |
435 | this->expire(std::numeric_limits<unsigned int>::max()); |
436 | return true; |
437 | } |
438 | |
439 | #if GR_ALLOCATION_SPEW |
440 | void GrResourceAllocator::dumpIntervals() { |
441 | // Print all the intervals while computing their range |
442 | SkDebugf("------------------------------------------------------------\n" ); |
443 | unsigned int min = std::numeric_limits<unsigned int>::max(); |
444 | unsigned int max = 0; |
445 | for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) { |
446 | SkDebugf("{ %3d,%3d }: [%2d, %2d] - refProxys:%d surfaceRefs:%d\n" , |
447 | cur->proxy()->uniqueID().asUInt(), |
448 | cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1, |
449 | cur->start(), |
450 | cur->end(), |
451 | cur->proxy()->priv().getProxyRefCnt(), |
452 | cur->proxy()->testingOnly_getBackingRefCnt()); |
453 | min = std::min(min, cur->start()); |
454 | max = std::max(max, cur->end()); |
455 | } |
456 | |
457 | // Draw a graph of the useage intervals |
458 | for(const Interval* cur = fIntvlList.peekHead(); cur; cur = cur->next()) { |
459 | SkDebugf("{ %3d,%3d }: " , |
460 | cur->proxy()->uniqueID().asUInt(), |
461 | cur->proxy()->isInstantiated() ? cur->proxy()->underlyingUniqueID().asUInt() : -1); |
462 | for (unsigned int i = min; i <= max; ++i) { |
463 | if (i >= cur->start() && i <= cur->end()) { |
464 | SkDebugf("x" ); |
465 | } else { |
466 | SkDebugf(" " ); |
467 | } |
468 | } |
469 | SkDebugf("\n" ); |
470 | } |
471 | } |
472 | #endif |
473 | |