1 | /* |
2 | * Copyright 2015 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "src/gpu/GrDrawingManager.h" |
9 | |
10 | #include <memory> |
11 | |
12 | #include "include/core/SkDeferredDisplayList.h" |
13 | #include "include/gpu/GrBackendSemaphore.h" |
14 | #include "include/gpu/GrDirectContext.h" |
15 | #include "include/gpu/GrRecordingContext.h" |
16 | #include "src/core/SkDeferredDisplayListPriv.h" |
17 | #include "src/core/SkTTopoSort.h" |
18 | #include "src/gpu/GrAuditTrail.h" |
19 | #include "src/gpu/GrClientMappedBufferManager.h" |
20 | #include "src/gpu/GrContextPriv.h" |
21 | #include "src/gpu/GrCopyRenderTask.h" |
22 | #include "src/gpu/GrGpu.h" |
23 | #include "src/gpu/GrMemoryPool.h" |
24 | #include "src/gpu/GrOnFlushResourceProvider.h" |
25 | #include "src/gpu/GrRecordingContextPriv.h" |
26 | #include "src/gpu/GrRenderTargetContext.h" |
27 | #include "src/gpu/GrRenderTargetProxy.h" |
28 | #include "src/gpu/GrRenderTask.h" |
29 | #include "src/gpu/GrResourceAllocator.h" |
30 | #include "src/gpu/GrResourceProvider.h" |
31 | #include "src/gpu/GrSoftwarePathRenderer.h" |
32 | #include "src/gpu/GrSurfaceContext.h" |
33 | #include "src/gpu/GrSurfaceProxyPriv.h" |
34 | #include "src/gpu/GrTexture.h" |
35 | #include "src/gpu/GrTextureProxy.h" |
36 | #include "src/gpu/GrTextureProxyPriv.h" |
37 | #include "src/gpu/GrTextureResolveRenderTask.h" |
38 | #include "src/gpu/GrTracing.h" |
39 | #include "src/gpu/GrTransferFromRenderTask.h" |
40 | #include "src/gpu/GrUnrefDDLTask.h" |
41 | #include "src/gpu/GrWaitRenderTask.h" |
42 | #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h" |
43 | #include "src/gpu/text/GrSDFTOptions.h" |
44 | #include "src/image/SkSurface_Gpu.h" |
45 | |
46 | GrDrawingManager::RenderTaskDAG::RenderTaskDAG(bool sortRenderTasks) |
47 | : fSortRenderTasks(sortRenderTasks) {} |
48 | |
49 | GrDrawingManager::RenderTaskDAG::~RenderTaskDAG() {} |
50 | |
51 | void GrDrawingManager::RenderTaskDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const { |
52 | idArray->reset(fRenderTasks.count()); |
53 | for (int i = 0; i < fRenderTasks.count(); ++i) { |
54 | if (fRenderTasks[i]) { |
55 | (*idArray)[i] = fRenderTasks[i]->uniqueID(); |
56 | } |
57 | } |
58 | } |
59 | |
60 | void GrDrawingManager::RenderTaskDAG::reset() { |
61 | fRenderTasks.reset(); |
62 | } |
63 | |
64 | void GrDrawingManager::RenderTaskDAG::rawRemoveRenderTasks(int startIndex, int stopIndex) { |
65 | for (int i = startIndex; i < stopIndex; ++i) { |
66 | fRenderTasks[i] = nullptr; |
67 | } |
68 | } |
69 | |
70 | bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const { |
71 | for (const auto& task : fRenderTasks) { |
72 | if (task && task->isUsed(proxy)) { |
73 | return true; |
74 | } |
75 | } |
76 | |
77 | return false; |
78 | } |
79 | |
80 | GrRenderTask* GrDrawingManager::RenderTaskDAG::add(sk_sp<GrRenderTask> renderTask) { |
81 | if (renderTask) { |
82 | return fRenderTasks.emplace_back(std::move(renderTask)).get(); |
83 | } |
84 | return nullptr; |
85 | } |
86 | |
87 | GrRenderTask* GrDrawingManager::RenderTaskDAG::addBeforeLast(sk_sp<GrRenderTask> renderTask) { |
88 | SkASSERT(!fRenderTasks.empty()); |
89 | if (renderTask) { |
90 | // Release 'fRenderTasks.back()' and grab the raw pointer, in case the SkTArray grows |
91 | // and reallocates during emplace_back. |
92 | fRenderTasks.emplace_back(fRenderTasks.back().release()); |
93 | return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get(); |
94 | } |
95 | return nullptr; |
96 | } |
97 | |
98 | void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& renderTasks) { |
99 | #ifdef SK_DEBUG |
100 | for (auto& renderTask : renderTasks) { |
101 | SkASSERT(renderTask->unique()); |
102 | } |
103 | #endif |
104 | |
105 | fRenderTasks.push_back_n(renderTasks.count(), renderTasks.begin()); |
106 | } |
107 | |
108 | void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks) { |
109 | SkASSERT(renderTasks->empty()); |
110 | renderTasks->swap(fRenderTasks); |
111 | } |
112 | |
113 | void GrDrawingManager::RenderTaskDAG::prepForFlush() { |
114 | if (fSortRenderTasks) { |
115 | SkDEBUGCODE(bool result =) SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>( |
116 | &fRenderTasks); |
117 | SkASSERT(result); |
118 | } |
119 | |
120 | #ifdef SK_DEBUG |
121 | // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks |
122 | // share the same backing GrSurfaceProxy it means the opsTask was artificially split. |
123 | if (fRenderTasks.count()) { |
124 | GrOpsTask* prevOpsTask = fRenderTasks[0]->asOpsTask(); |
125 | for (int i = 1; i < fRenderTasks.count(); ++i) { |
126 | GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask(); |
127 | |
128 | if (prevOpsTask && curOpsTask) { |
129 | SkASSERT(prevOpsTask->target(0).proxy() != curOpsTask->target(0).proxy()); |
130 | } |
131 | |
132 | prevOpsTask = curOpsTask; |
133 | } |
134 | } |
135 | #endif |
136 | } |
137 | |
138 | void GrDrawingManager::RenderTaskDAG::closeAll(const GrCaps* caps) { |
139 | for (auto& task : fRenderTasks) { |
140 | if (task) { |
141 | task->makeClosed(*caps); |
142 | } |
143 | } |
144 | } |
145 | |
146 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
147 | GrDrawingManager::GrDrawingManager(GrRecordingContext* context, |
148 | const GrPathRendererChain::Options& optionsForPathRendererChain, |
149 | bool sortRenderTasks, |
150 | bool reduceOpsTaskSplitting) |
151 | : fContext(context) |
152 | , fOptionsForPathRendererChain(optionsForPathRendererChain) |
153 | , fDAG(sortRenderTasks) |
154 | , fPathRendererChain(nullptr) |
155 | , fSoftwarePathRenderer(nullptr) |
156 | , fFlushing(false) |
157 | , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) { } |
158 | |
159 | GrDrawingManager::~GrDrawingManager() { |
160 | fDAG.closeAll(fContext->priv().caps()); |
161 | this->removeRenderTasks(0, fDAG.numRenderTasks()); |
162 | } |
163 | |
164 | bool GrDrawingManager::wasAbandoned() const { |
165 | return fContext->abandoned(); |
166 | } |
167 | |
168 | void GrDrawingManager::freeGpuResources() { |
169 | for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) { |
170 | if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) { |
171 | // it's safe to just do this because we're iterating in reverse |
172 | fOnFlushCBObjects.removeShuffle(i); |
173 | } |
174 | } |
175 | |
176 | // a path renderer may be holding onto resources |
177 | fPathRendererChain = nullptr; |
178 | fSoftwarePathRenderer = nullptr; |
179 | } |
180 | |
181 | // MDB TODO: make use of the 'proxy' parameter. |
182 | bool GrDrawingManager::flush( |
183 | GrSurfaceProxy* proxies[], |
184 | int numProxies, |
185 | SkSurface::BackendSurfaceAccess access, |
186 | const GrFlushInfo& info, |
187 | const GrBackendSurfaceMutableState* newState) { |
188 | SkASSERT(numProxies >= 0); |
189 | SkASSERT(!numProxies || proxies); |
190 | GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager" , "flush" , fContext); |
191 | |
192 | if (fFlushing || this->wasAbandoned()) { |
193 | if (info.fSubmittedProc) { |
194 | info.fSubmittedProc(info.fSubmittedContext, false); |
195 | } |
196 | if (info.fFinishedProc) { |
197 | info.fFinishedProc(info.fFinishedContext); |
198 | } |
199 | return false; |
200 | } |
201 | |
202 | SkDEBUGCODE(this->validate()); |
203 | |
204 | if (!info.fNumSemaphores && !info.fFinishedProc && |
205 | access == SkSurface::BackendSurfaceAccess::kNoAccess && !newState) { |
206 | bool canSkip = numProxies > 0; |
207 | for (int i = 0; i < numProxies && canSkip; ++i) { |
208 | canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]); |
209 | } |
210 | if (canSkip) { |
211 | if (info.fSubmittedProc) { |
212 | info.fSubmittedProc(info.fSubmittedContext, true); |
213 | } |
214 | return false; |
215 | } |
216 | } |
217 | |
218 | auto direct = fContext->asDirectContext(); |
219 | if (!direct) { |
220 | if (info.fSubmittedProc) { |
221 | info.fSubmittedProc(info.fSubmittedContext, false); |
222 | } |
223 | if (info.fFinishedProc) { |
224 | info.fFinishedProc(info.fFinishedContext); |
225 | } |
226 | return false; // Can't flush while DDL recording |
227 | } |
228 | direct->priv().clientMappedBufferManager()->process(); |
229 | |
230 | GrGpu* gpu = direct->priv().getGpu(); |
231 | // We have a non abandoned and direct GrContext. It must have a GrGpu. |
232 | SkASSERT(gpu); |
233 | |
234 | fFlushing = true; |
235 | |
236 | auto resourceProvider = direct->priv().resourceProvider(); |
237 | auto resourceCache = direct->priv().getResourceCache(); |
238 | |
239 | // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs |
240 | // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be |
241 | // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them |
242 | // if the SkGpuDevice(s) write to them again. |
243 | fDAG.closeAll(fContext->priv().caps()); |
244 | fActiveOpsTask = nullptr; |
245 | |
246 | fDAG.prepForFlush(); |
247 | if (!fCpuBufferCache) { |
248 | // We cache more buffers when the backend is using client side arrays. Otherwise, we |
249 | // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU |
250 | // buffer object. Each pool only requires one staging buffer at a time. |
251 | int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6; |
252 | fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers); |
253 | } |
254 | |
255 | GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache); |
256 | |
257 | GrOnFlushResourceProvider onFlushProvider(this); |
258 | |
259 | // Prepare any onFlush op lists (e.g. atlases). |
260 | if (!fOnFlushCBObjects.empty()) { |
261 | fDAG.gatherIDs(&fFlushingRenderTaskIDs); |
262 | |
263 | for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) { |
264 | onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(), |
265 | fFlushingRenderTaskIDs.count()); |
266 | } |
267 | for (const auto& onFlushRenderTask : fOnFlushRenderTasks) { |
268 | onFlushRenderTask->makeClosed(*fContext->priv().caps()); |
269 | #ifdef SK_DEBUG |
270 | // OnFlush callbacks are invoked during flush, and are therefore expected to handle |
271 | // resource allocation & usage on their own. (No deferred or lazy proxies!) |
272 | onFlushRenderTask->visitTargetAndSrcProxies_debugOnly( |
273 | [](GrSurfaceProxy* p, GrMipmapped mipMapped) { |
274 | SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred()); |
275 | SkASSERT(!p->isLazy()); |
276 | if (p->requiresManualMSAAResolve()) { |
277 | // The onFlush callback is responsible for ensuring MSAA gets resolved. |
278 | SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty()); |
279 | } |
280 | if (GrMipmapped::kYes == mipMapped) { |
281 | // The onFlush callback is responsible for regenerating mips if needed. |
282 | SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipmapsAreDirty()); |
283 | } |
284 | }); |
285 | #endif |
286 | onFlushRenderTask->prepare(&flushState); |
287 | } |
288 | } |
289 | |
290 | #if 0 |
291 | // Enable this to print out verbose GrOp information |
292 | SkDEBUGCODE(SkDebugf("onFlush renderTasks:" )); |
293 | for (const auto& onFlushRenderTask : fOnFlushRenderTasks) { |
294 | SkDEBUGCODE(onFlushRenderTask->dump();) |
295 | } |
296 | SkDEBUGCODE(SkDebugf("Normal renderTasks:" )); |
297 | for (int i = 0; i < fRenderTasks.count(); ++i) { |
298 | SkDEBUGCODE(fRenderTasks[i]->dump();) |
299 | } |
300 | #endif |
301 | |
302 | int startIndex, stopIndex; |
303 | bool flushed = false; |
304 | |
305 | { |
306 | GrResourceAllocator alloc(resourceProvider SkDEBUGCODE(, fDAG.numRenderTasks())); |
307 | for (int i = 0; i < fDAG.numRenderTasks(); ++i) { |
308 | if (fDAG.renderTask(i)) { |
309 | fDAG.renderTask(i)->gatherProxyIntervals(&alloc); |
310 | } |
311 | alloc.markEndOfOpsTask(i); |
312 | } |
313 | alloc.determineRecyclability(); |
314 | |
315 | GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError; |
316 | int numRenderTasksExecuted = 0; |
317 | while (alloc.assign(&startIndex, &stopIndex, &error)) { |
318 | if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) { |
319 | for (int i = startIndex; i < stopIndex; ++i) { |
320 | GrRenderTask* renderTask = fDAG.renderTask(i); |
321 | if (!renderTask) { |
322 | continue; |
323 | } |
324 | if (!renderTask->isInstantiated()) { |
325 | // No need to call the renderTask's handleInternalAllocationFailure |
326 | // since we will already skip executing the renderTask since it is not |
327 | // instantiated. |
328 | continue; |
329 | } |
330 | renderTask->handleInternalAllocationFailure(); |
331 | } |
332 | this->removeRenderTasks(startIndex, stopIndex); |
333 | } |
334 | |
335 | if (this->executeRenderTasks( |
336 | startIndex, stopIndex, &flushState, &numRenderTasksExecuted)) { |
337 | flushed = true; |
338 | } |
339 | } |
340 | } |
341 | |
342 | #ifdef SK_DEBUG |
343 | for (int i = 0; i < fDAG.numRenderTasks(); ++i) { |
344 | // All render tasks should have been cleared out by now – we only reset the array below to |
345 | // reclaim storage. |
346 | SkASSERT(!fDAG.renderTask(i)); |
347 | } |
348 | #endif |
349 | fLastRenderTasks.reset(); |
350 | fDAG.reset(); |
351 | this->clearDDLTargets(); |
352 | |
353 | #ifdef SK_DEBUG |
354 | // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool. |
355 | // When we move to partial flushes this assert will no longer be valid. |
356 | // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opsTasks |
357 | // will be stored in the DDL's GrOpMemoryPools. |
358 | GrOpMemoryPool* opMemoryPool = fContext->priv().opMemoryPool(); |
359 | opMemoryPool->isEmpty(); |
360 | #endif |
361 | |
362 | gpu->executeFlushInfo(proxies, numProxies, access, info, newState); |
363 | |
364 | // Give the cache a chance to purge resources that become purgeable due to flushing. |
365 | if (flushed) { |
366 | resourceCache->purgeAsNeeded(); |
367 | flushed = false; |
368 | } |
369 | for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) { |
370 | onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs.begin(), |
371 | fFlushingRenderTaskIDs.count()); |
372 | flushed = true; |
373 | } |
374 | if (flushed) { |
375 | resourceCache->purgeAsNeeded(); |
376 | } |
377 | fFlushingRenderTaskIDs.reset(); |
378 | fFlushing = false; |
379 | |
380 | return true; |
381 | } |
382 | |
383 | bool GrDrawingManager::submitToGpu(bool syncToCpu) { |
384 | if (fFlushing || this->wasAbandoned()) { |
385 | return false; |
386 | } |
387 | |
388 | auto direct = fContext->asDirectContext(); |
389 | if (!direct) { |
390 | return false; // Can't submit while DDL recording |
391 | } |
392 | GrGpu* gpu = direct->priv().getGpu(); |
393 | return gpu->submitToGpu(syncToCpu); |
394 | } |
395 | |
396 | bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState, |
397 | int* numRenderTasksExecuted) { |
398 | SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks()); |
399 | |
400 | #if GR_FLUSH_TIME_OP_SPEW |
401 | SkDebugf("Flushing opsTask: %d to %d out of [%d, %d]\n" , |
402 | startIndex, stopIndex, 0, fDAG.numRenderTasks()); |
403 | for (int i = startIndex; i < stopIndex; ++i) { |
404 | if (fDAG.renderTask(i)) { |
405 | fDAG.renderTask(i)->dump(true); |
406 | } |
407 | } |
408 | #endif |
409 | |
410 | bool anyRenderTasksExecuted = false; |
411 | |
412 | for (int i = startIndex; i < stopIndex; ++i) { |
413 | GrRenderTask* renderTask = fDAG.renderTask(i); |
414 | if (!renderTask || !renderTask->isInstantiated()) { |
415 | continue; |
416 | } |
417 | |
418 | SkASSERT(renderTask->deferredProxiesAreInstantiated()); |
419 | |
420 | renderTask->prepare(flushState); |
421 | } |
422 | |
423 | // Upload all data to the GPU |
424 | flushState->preExecuteDraws(); |
425 | |
426 | // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources |
427 | // for each command buffer associated with the oplists. If this gets too large we can cause the |
428 | // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we |
429 | // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some |
430 | // memory pressure. |
431 | static constexpr int kMaxRenderTasksBeforeFlush = 100; |
432 | |
433 | // Execute the onFlush renderTasks first, if any. |
434 | for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) { |
435 | if (!onFlushRenderTask->execute(flushState)) { |
436 | SkDebugf("WARNING: onFlushRenderTask failed to execute.\n" ); |
437 | } |
438 | SkASSERT(onFlushRenderTask->unique()); |
439 | onFlushRenderTask->disown(this); |
440 | onFlushRenderTask = nullptr; |
441 | (*numRenderTasksExecuted)++; |
442 | if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) { |
443 | flushState->gpu()->submitToGpu(false); |
444 | *numRenderTasksExecuted = 0; |
445 | } |
446 | } |
447 | fOnFlushRenderTasks.reset(); |
448 | |
449 | // Execute the normal op lists. |
450 | for (int i = startIndex; i < stopIndex; ++i) { |
451 | GrRenderTask* renderTask = fDAG.renderTask(i); |
452 | if (!renderTask || !renderTask->isInstantiated()) { |
453 | continue; |
454 | } |
455 | |
456 | if (renderTask->execute(flushState)) { |
457 | anyRenderTasksExecuted = true; |
458 | } |
459 | (*numRenderTasksExecuted)++; |
460 | if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) { |
461 | flushState->gpu()->submitToGpu(false); |
462 | *numRenderTasksExecuted = 0; |
463 | } |
464 | } |
465 | |
466 | SkASSERT(!flushState->opsRenderPass()); |
467 | SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush()); |
468 | |
469 | // We reset the flush state before the RenderTasks so that the last resources to be freed are |
470 | // those that are written to in the RenderTasks. This helps to make sure the most recently used |
471 | // resources are the last to be purged by the resource cache. |
472 | flushState->reset(); |
473 | |
474 | this->removeRenderTasks(startIndex, stopIndex); |
475 | |
476 | return anyRenderTasksExecuted; |
477 | } |
478 | |
479 | void GrDrawingManager::removeRenderTasks(int startIndex, int stopIndex) { |
480 | for (int i = startIndex; i < stopIndex; ++i) { |
481 | GrRenderTask* task = fDAG.renderTask(i); |
482 | if (!task) { |
483 | continue; |
484 | } |
485 | if (!task->unique()) { |
486 | // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111 |
487 | task->endFlush(this); |
488 | } |
489 | task->disown(this); |
490 | } |
491 | fDAG.rawRemoveRenderTasks(startIndex, stopIndex); |
492 | } |
493 | |
494 | static void resolve_and_mipmap(GrGpu* gpu, GrSurfaceProxy* proxy) { |
495 | if (!proxy->isInstantiated()) { |
496 | return; |
497 | } |
498 | |
499 | // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is |
500 | // because clients expect the flushed surface's backing texture to be fully resolved |
501 | // upon return. |
502 | if (proxy->requiresManualMSAAResolve()) { |
503 | auto* rtProxy = proxy->asRenderTargetProxy(); |
504 | SkASSERT(rtProxy); |
505 | if (rtProxy->isMSAADirty()) { |
506 | SkASSERT(rtProxy->peekRenderTarget()); |
507 | gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect()); |
508 | gpu->submitToGpu(false); |
509 | rtProxy->markMSAAResolved(); |
510 | } |
511 | } |
512 | // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in |
513 | // case their backend textures are being stolen. |
514 | // (This special case is exercised by the ReimportImageTextureWithMipLevels test.) |
515 | // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag. |
516 | if (auto* textureProxy = proxy->asTextureProxy()) { |
517 | if (textureProxy->mipmapsAreDirty()) { |
518 | SkASSERT(textureProxy->peekTexture()); |
519 | gpu->regenerateMipMapLevels(textureProxy->peekTexture()); |
520 | textureProxy->markMipmapsClean(); |
521 | } |
522 | } |
523 | } |
524 | |
525 | GrSemaphoresSubmitted GrDrawingManager::flushSurfaces( |
526 | GrSurfaceProxy* proxies[], |
527 | int numProxies, |
528 | SkSurface::BackendSurfaceAccess access, |
529 | const GrFlushInfo& info, |
530 | const GrBackendSurfaceMutableState* newState) { |
531 | if (this->wasAbandoned()) { |
532 | if (info.fSubmittedProc) { |
533 | info.fSubmittedProc(info.fSubmittedContext, false); |
534 | } |
535 | if (info.fFinishedProc) { |
536 | info.fFinishedProc(info.fFinishedContext); |
537 | } |
538 | return GrSemaphoresSubmitted::kNo; |
539 | } |
540 | SkDEBUGCODE(this->validate()); |
541 | SkASSERT(numProxies >= 0); |
542 | SkASSERT(!numProxies || proxies); |
543 | |
544 | auto direct = fContext->asDirectContext(); |
545 | if (!direct) { |
546 | if (info.fSubmittedProc) { |
547 | info.fSubmittedProc(info.fSubmittedContext, false); |
548 | } |
549 | if (info.fFinishedProc) { |
550 | info.fFinishedProc(info.fFinishedContext); |
551 | } |
552 | return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording |
553 | } |
554 | |
555 | GrGpu* gpu = direct->priv().getGpu(); |
556 | // We have a non abandoned and direct GrContext. It must have a GrGpu. |
557 | SkASSERT(gpu); |
558 | |
559 | // TODO: It is important to upgrade the drawingmanager to just flushing the |
560 | // portion of the DAG required by 'proxies' in order to restore some of the |
561 | // semantics of this method. |
562 | bool didFlush = this->flush(proxies, numProxies, access, info, newState); |
563 | for (int i = 0; i < numProxies; ++i) { |
564 | resolve_and_mipmap(gpu, proxies[i]); |
565 | } |
566 | |
567 | SkDEBUGCODE(this->validate()); |
568 | |
569 | if (!didFlush || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) { |
570 | return GrSemaphoresSubmitted::kNo; |
571 | } |
572 | return GrSemaphoresSubmitted::kYes; |
573 | } |
574 | |
575 | void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) { |
576 | fOnFlushCBObjects.push_back(onFlushCBObject); |
577 | } |
578 | |
579 | #if GR_TEST_UTILS |
580 | void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) { |
581 | int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) - |
582 | fOnFlushCBObjects.begin(); |
583 | SkASSERT(n < fOnFlushCBObjects.count()); |
584 | fOnFlushCBObjects.removeShuffle(n); |
585 | } |
586 | #endif |
587 | |
588 | void GrDrawingManager::setLastRenderTask(const GrSurfaceProxy* proxy, GrRenderTask* task) { |
589 | #ifdef SK_DEBUG |
590 | if (GrRenderTask* prior = this->getLastRenderTask(proxy)) { |
591 | SkASSERT(prior->isClosed()); |
592 | } |
593 | #endif |
594 | uint32_t key = proxy->uniqueID().asUInt(); |
595 | if (task) { |
596 | fLastRenderTasks.set(key, task); |
597 | } else if (fLastRenderTasks.find(key)) { |
598 | fLastRenderTasks.remove(key); |
599 | } |
600 | } |
601 | |
602 | GrRenderTask* GrDrawingManager::getLastRenderTask(const GrSurfaceProxy* proxy) const { |
603 | auto entry = fLastRenderTasks.find(proxy->uniqueID().asUInt()); |
604 | return entry ? *entry : nullptr; |
605 | } |
606 | |
607 | GrOpsTask* GrDrawingManager::getLastOpsTask(const GrSurfaceProxy* proxy) const { |
608 | GrRenderTask* task = this->getLastRenderTask(proxy); |
609 | return task ? task->asOpsTask() : nullptr; |
610 | } |
611 | |
612 | |
613 | void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) { |
614 | SkDEBUGCODE(this->validate()); |
615 | |
616 | // no renderTask should receive a new command after this |
617 | fDAG.closeAll(fContext->priv().caps()); |
618 | fActiveOpsTask = nullptr; |
619 | |
620 | fDAG.swap(&ddl->fRenderTasks); |
621 | SkASSERT(!fDAG.numRenderTasks()); |
622 | |
623 | for (auto& renderTask : ddl->fRenderTasks) { |
624 | renderTask->disown(this); |
625 | renderTask->prePrepare(fContext); |
626 | } |
627 | |
628 | ddl->fArenas = std::move(fContext->priv().detachArenas()); |
629 | |
630 | fContext->priv().detachProgramData(&ddl->fProgramData); |
631 | |
632 | if (fPathRendererChain) { |
633 | if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) { |
634 | ddl->fPendingPaths = ccpr->detachPendingPaths(); |
635 | } |
636 | } |
637 | |
638 | SkDEBUGCODE(this->validate()); |
639 | } |
640 | |
641 | void GrDrawingManager::copyRenderTasksFromDDL(sk_sp<const SkDeferredDisplayList> ddl, |
642 | GrRenderTargetProxy* newDest) { |
643 | SkDEBUGCODE(this->validate()); |
644 | |
645 | if (fActiveOpsTask) { |
646 | // This is a temporary fix for the partial-MDB world. In that world we're not |
647 | // reordering so ops that (in the single opsTask world) would've just glommed onto the |
648 | // end of the single opsTask but referred to a far earlier RT need to appear in their |
649 | // own opsTask. |
650 | fActiveOpsTask->makeClosed(*fContext->priv().caps()); |
651 | fActiveOpsTask = nullptr; |
652 | } |
653 | |
654 | // Propagate the DDL proxy's state information to the replaying DDL. |
655 | if (ddl->priv().targetProxy()->isMSAADirty()) { |
656 | newDest->markMSAADirty(ddl->priv().targetProxy()->msaaDirtyRect(), |
657 | ddl->characterization().origin()); |
658 | } |
659 | GrTextureProxy* newTextureProxy = newDest->asTextureProxy(); |
660 | if (newTextureProxy && GrMipmapped::kYes == newTextureProxy->mipmapped()) { |
661 | newTextureProxy->markMipmapsDirty(); |
662 | } |
663 | |
664 | this->addDDLTarget(newDest, ddl->priv().targetProxy()); |
665 | |
666 | // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData. |
667 | // The lazy proxy that references it (in the copied opsTasks) will steal its GrTexture. |
668 | ddl->fLazyProxyData->fReplayDest = newDest; |
669 | |
670 | if (ddl->fPendingPaths.size()) { |
671 | GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer(); |
672 | |
673 | ccpr->mergePendingPaths(ddl->fPendingPaths); |
674 | } |
675 | |
676 | fDAG.add(ddl->fRenderTasks); |
677 | |
678 | // Add a task to unref the DDL after flush. |
679 | GrRenderTask* unrefTask = fDAG.add(sk_make_sp<GrUnrefDDLTask>(std::move(ddl))); |
680 | unrefTask->makeClosed(*fContext->priv().caps()); |
681 | |
682 | SkDEBUGCODE(this->validate()); |
683 | } |
684 | |
685 | #ifdef SK_DEBUG |
686 | void GrDrawingManager::validate() const { |
687 | if (fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) { |
688 | SkASSERT(!fActiveOpsTask); |
689 | } else { |
690 | if (fActiveOpsTask) { |
691 | SkASSERT(!fDAG.empty()); |
692 | SkASSERT(!fActiveOpsTask->isClosed()); |
693 | SkASSERT(fActiveOpsTask == fDAG.back()); |
694 | } |
695 | |
696 | for (int i = 0; i < fDAG.numRenderTasks(); ++i) { |
697 | if (fActiveOpsTask != fDAG.renderTask(i)) { |
698 | // The resolveTask associated with the activeTask remains open for as long as the |
699 | // activeTask does. |
700 | bool isActiveResolveTask = |
701 | fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG.renderTask(i); |
702 | SkASSERT(isActiveResolveTask || fDAG.renderTask(i)->isClosed()); |
703 | } |
704 | } |
705 | |
706 | if (!fDAG.empty() && !fDAG.back()->isClosed()) { |
707 | SkASSERT(fActiveOpsTask == fDAG.back()); |
708 | } |
709 | } |
710 | } |
711 | #endif |
712 | |
713 | void GrDrawingManager::closeRenderTasksForNewRenderTask(GrSurfaceProxy* target) { |
714 | if (target && fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) { |
715 | // In this case we need to close all the renderTasks that rely on the current contents of |
716 | // 'target'. That is bc we're going to update the content of the proxy so they need to be |
717 | // split in case they use both the old and new content. (This is a bit of an overkill: they |
718 | // really only need to be split if they ever reference proxy's contents again but that is |
719 | // hard to predict/handle). |
720 | if (GrRenderTask* lastRenderTask = this->getLastRenderTask(target)) { |
721 | lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps()); |
722 | } |
723 | } else if (fActiveOpsTask) { |
724 | // This is a temporary fix for the partial-MDB world. In that world we're not |
725 | // reordering so ops that (in the single opsTask world) would've just glommed onto the |
726 | // end of the single opsTask but referred to a far earlier RT need to appear in their |
727 | // own opsTask. |
728 | fActiveOpsTask->makeClosed(*fContext->priv().caps()); |
729 | fActiveOpsTask = nullptr; |
730 | } |
731 | } |
732 | |
733 | sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView, |
734 | bool managedOpsTask) { |
735 | SkDEBUGCODE(this->validate()); |
736 | SkASSERT(fContext); |
737 | |
738 | GrSurfaceProxy* proxy = surfaceView.proxy(); |
739 | this->closeRenderTasksForNewRenderTask(proxy); |
740 | |
741 | sk_sp<GrOpsTask> opsTask(new GrOpsTask(this, fContext->priv().arenas(), |
742 | std::move(surfaceView), |
743 | fContext->priv().auditTrail())); |
744 | SkASSERT(this->getLastRenderTask(proxy) == opsTask.get()); |
745 | |
746 | if (managedOpsTask) { |
747 | fDAG.add(opsTask); |
748 | |
749 | if (!fDAG.sortingRenderTasks() || !fReduceOpsTaskSplitting) { |
750 | fActiveOpsTask = opsTask.get(); |
751 | } |
752 | } |
753 | |
754 | SkDEBUGCODE(this->validate()); |
755 | return opsTask; |
756 | } |
757 | |
758 | GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) { |
759 | // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are |
760 | // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current |
761 | // state. This is because those opsTasks can still receive new ops and because if they refer to |
762 | // the mipmapped version of 'proxy', they will then come to depend on the render task being |
763 | // created here. |
764 | // |
765 | // Add the new textureResolveTask before the fActiveOpsTask (if not in |
766 | // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task. |
767 | // NOTE: Putting it here will also reduce the amount of work required by the topological sort. |
768 | return static_cast<GrTextureResolveRenderTask*>(fDAG.addBeforeLast( |
769 | sk_make_sp<GrTextureResolveRenderTask>())); |
770 | } |
771 | |
772 | void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy, |
773 | std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores, |
774 | int numSemaphores) { |
775 | SkDEBUGCODE(this->validate()); |
776 | SkASSERT(fContext); |
777 | |
778 | const GrCaps& caps = *fContext->priv().caps(); |
779 | |
780 | sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy), |
781 | std::move(semaphores), |
782 | numSemaphores); |
783 | if (fReduceOpsTaskSplitting) { |
784 | GrRenderTask* lastTask = this->getLastRenderTask(proxy.get()); |
785 | if (lastTask && !lastTask->isClosed()) { |
786 | // We directly make the currently open renderTask depend on waitTask instead of using |
787 | // the proxy version of addDependency. The waitTask will never need to trigger any |
788 | // resolves or mip map generation which is the main advantage of going through the proxy |
789 | // version. Additionally we would've had to temporarily set the wait task as the |
790 | // lastRenderTask on the proxy, add the dependency, and then reset the lastRenderTask to |
791 | // lastTask. Additionally we add all dependencies of lastTask to waitTask so that the |
792 | // waitTask doesn't get reordered before them and unnecessarily block those tasks. |
793 | // Note: Any previous Ops already in lastTask will get blocked by the wait semaphore |
794 | // even though they don't need to be for correctness. |
795 | |
796 | // Make sure we add the dependencies of lastTask to waitTask first or else we'll get a |
797 | // circular self dependency of waitTask on waitTask. |
798 | waitTask->addDependenciesFromOtherTask(lastTask); |
799 | lastTask->addDependency(waitTask.get()); |
800 | } else { |
801 | // If there is a last task we set the waitTask to depend on it so that it doesn't get |
802 | // reordered in front of the lastTask causing the lastTask to be blocked by the |
803 | // semaphore. Again we directly just go through adding the dependency to the task and |
804 | // not the proxy since we don't need to worry about resolving anything. |
805 | if (lastTask) { |
806 | waitTask->addDependency(lastTask); |
807 | } |
808 | this->setLastRenderTask(proxy.get(), waitTask.get()); |
809 | } |
810 | fDAG.add(waitTask); |
811 | } else { |
812 | if (fActiveOpsTask && (fActiveOpsTask->target(0).proxy() == proxy.get())) { |
813 | SkASSERT(this->getLastRenderTask(proxy.get()) == fActiveOpsTask); |
814 | fDAG.addBeforeLast(waitTask); |
815 | // In this case we keep the current renderTask open but just insert the new waitTask |
816 | // before it in the list. The waitTask will never need to trigger any resolves or mip |
817 | // map generation which is the main advantage of going through the proxy version. |
818 | // Additionally we would've had to temporarily set the wait task as the lastRenderTask |
819 | // on the proxy, add the dependency, and then reset the lastRenderTask to |
820 | // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask |
821 | // dependencies so that we don't unnecessarily reorder the waitTask before them. |
822 | // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait |
823 | // semaphore even though they don't need to be for correctness. |
824 | |
825 | // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll |
826 | // get a circular self dependency of waitTask on waitTask. |
827 | waitTask->addDependenciesFromOtherTask(fActiveOpsTask); |
828 | fActiveOpsTask->addDependency(waitTask.get()); |
829 | } else { |
830 | // In this case we just close the previous RenderTask and start and append the waitTask |
831 | // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If |
832 | // there is a lastTask on the proxy we make waitTask depend on that task. This |
833 | // dependency isn't strictly needed but it does keep the DAG from reordering the |
834 | // waitTask earlier and blocking more tasks. |
835 | if (GrRenderTask* lastTask = this->getLastRenderTask(proxy.get())) { |
836 | waitTask->addDependency(lastTask); |
837 | } |
838 | this->setLastRenderTask(proxy.get(), waitTask.get()); |
839 | this->closeRenderTasksForNewRenderTask(proxy.get()); |
840 | fDAG.add(waitTask); |
841 | } |
842 | } |
843 | waitTask->makeClosed(caps); |
844 | |
845 | SkDEBUGCODE(this->validate()); |
846 | } |
847 | |
848 | void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, |
849 | const SkIRect& srcRect, |
850 | GrColorType surfaceColorType, |
851 | GrColorType dstColorType, |
852 | sk_sp<GrGpuBuffer> dstBuffer, |
853 | size_t dstOffset) { |
854 | SkDEBUGCODE(this->validate()); |
855 | SkASSERT(fContext); |
856 | // This copies from srcProxy to dstBuffer so it doesn't have a real target. |
857 | this->closeRenderTasksForNewRenderTask(nullptr); |
858 | |
859 | GrRenderTask* task = fDAG.add(sk_make_sp<GrTransferFromRenderTask>( |
860 | srcProxy, srcRect, surfaceColorType, dstColorType, |
861 | std::move(dstBuffer), dstOffset)); |
862 | |
863 | const GrCaps& caps = *fContext->priv().caps(); |
864 | |
865 | // We always say GrMipmapped::kNo here since we are always just copying from the base layer. We |
866 | // don't need to make sure the whole mip map chain is valid. |
867 | task->addDependency(this, srcProxy.get(), GrMipmapped::kNo, |
868 | GrTextureResolveManager(this), caps); |
869 | task->makeClosed(caps); |
870 | |
871 | // We have closed the previous active oplist but since a new oplist isn't being added there |
872 | // shouldn't be an active one. |
873 | SkASSERT(!fActiveOpsTask); |
874 | SkDEBUGCODE(this->validate()); |
875 | } |
876 | |
877 | bool GrDrawingManager::newCopyRenderTask(GrSurfaceProxyView srcView, |
878 | const SkIRect& srcRect, |
879 | GrSurfaceProxyView dstView, |
880 | const SkIPoint& dstPoint) { |
881 | SkDEBUGCODE(this->validate()); |
882 | SkASSERT(fContext); |
883 | |
884 | this->closeRenderTasksForNewRenderTask(dstView.proxy()); |
885 | const GrCaps& caps = *fContext->priv().caps(); |
886 | |
887 | GrSurfaceProxy* srcProxy = srcView.proxy(); |
888 | |
889 | GrRenderTask* task = |
890 | fDAG.add(GrCopyRenderTask::Make(this, std::move(srcView), srcRect, std::move(dstView), |
891 | dstPoint, &caps)); |
892 | if (!task) { |
893 | return false; |
894 | } |
895 | |
896 | // We always say GrMipmapped::kNo here since we are always just copying from the base layer to |
897 | // another base layer. We don't need to make sure the whole mip map chain is valid. |
898 | task->addDependency(this, srcProxy, GrMipmapped::kNo, GrTextureResolveManager(this), caps); |
899 | task->makeClosed(caps); |
900 | |
901 | // We have closed the previous active oplist but since a new oplist isn't being added there |
902 | // shouldn't be an active one. |
903 | SkASSERT(!fActiveOpsTask); |
904 | SkDEBUGCODE(this->validate()); |
905 | return true; |
906 | } |
907 | |
908 | /* |
909 | * This method finds a path renderer that can draw the specified path on |
910 | * the provided target. |
911 | * Due to its expense, the software path renderer has split out so it can |
912 | * can be individually allowed/disallowed via the "allowSW" boolean. |
913 | */ |
914 | GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args, |
915 | bool allowSW, |
916 | GrPathRendererChain::DrawType drawType, |
917 | GrPathRenderer::StencilSupport* stencilSupport) { |
918 | |
919 | if (!fPathRendererChain) { |
920 | fPathRendererChain = |
921 | std::make_unique<GrPathRendererChain>(fContext, fOptionsForPathRendererChain); |
922 | } |
923 | |
924 | GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport); |
925 | if (!pr && allowSW) { |
926 | auto swPR = this->getSoftwarePathRenderer(); |
927 | if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) { |
928 | pr = swPR; |
929 | } |
930 | } |
931 | |
932 | #if GR_PATH_RENDERER_SPEW |
933 | if (pr) { |
934 | SkDebugf("getPathRenderer: %s\n" , pr->name()); |
935 | } |
936 | #endif |
937 | |
938 | return pr; |
939 | } |
940 | |
941 | GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() { |
942 | if (!fSoftwarePathRenderer) { |
943 | fSoftwarePathRenderer.reset( |
944 | new GrSoftwarePathRenderer(fContext->priv().proxyProvider(), |
945 | fOptionsForPathRendererChain.fAllowPathMaskCaching)); |
946 | } |
947 | return fSoftwarePathRenderer.get(); |
948 | } |
949 | |
950 | GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() { |
951 | if (!fPathRendererChain) { |
952 | fPathRendererChain = std::make_unique<GrPathRendererChain>(fContext, fOptionsForPathRendererChain); |
953 | } |
954 | return fPathRendererChain->getCoverageCountingPathRenderer(); |
955 | } |
956 | |
957 | void GrDrawingManager::flushIfNecessary() { |
958 | auto direct = fContext->asDirectContext(); |
959 | if (!direct) { |
960 | return; |
961 | } |
962 | |
963 | auto resourceCache = direct->priv().getResourceCache(); |
964 | if (resourceCache && resourceCache->requestsFlush()) { |
965 | if (this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), |
966 | nullptr)) { |
967 | this->submitToGpu(false); |
968 | } |
969 | resourceCache->purgeAsNeeded(); |
970 | } |
971 | } |
972 | |
973 | |