1 | /* |
2 | * Copyright 2015 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "src/gpu/GrDrawingManager.h" |
9 | |
10 | #include "include/core/SkDeferredDisplayList.h" |
11 | #include "include/gpu/GrBackendSemaphore.h" |
12 | #include "include/private/GrRecordingContext.h" |
13 | #include "src/core/SkTTopoSort.h" |
14 | #include "src/gpu/GrAuditTrail.h" |
15 | #include "src/gpu/GrClientMappedBufferManager.h" |
16 | #include "src/gpu/GrContextPriv.h" |
17 | #include "src/gpu/GrCopyRenderTask.h" |
18 | #include "src/gpu/GrGpu.h" |
19 | #include "src/gpu/GrMemoryPool.h" |
20 | #include "src/gpu/GrOnFlushResourceProvider.h" |
21 | #include "src/gpu/GrRecordingContextPriv.h" |
22 | #include "src/gpu/GrRenderTargetContext.h" |
23 | #include "src/gpu/GrRenderTargetProxy.h" |
24 | #include "src/gpu/GrRenderTask.h" |
25 | #include "src/gpu/GrResourceAllocator.h" |
26 | #include "src/gpu/GrResourceProvider.h" |
27 | #include "src/gpu/GrSoftwarePathRenderer.h" |
28 | #include "src/gpu/GrSurfaceContext.h" |
29 | #include "src/gpu/GrSurfaceProxyPriv.h" |
30 | #include "src/gpu/GrTexture.h" |
31 | #include "src/gpu/GrTexturePriv.h" |
32 | #include "src/gpu/GrTextureProxy.h" |
33 | #include "src/gpu/GrTextureProxyPriv.h" |
34 | #include "src/gpu/GrTextureResolveRenderTask.h" |
35 | #include "src/gpu/GrTracing.h" |
36 | #include "src/gpu/GrTransferFromRenderTask.h" |
37 | #include "src/gpu/GrWaitRenderTask.h" |
38 | #include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h" |
39 | #include "src/gpu/text/GrTextContext.h" |
40 | #include "src/image/SkSurface_Gpu.h" |
41 | |
42 | GrDrawingManager::RenderTaskDAG::RenderTaskDAG(bool sortRenderTasks) |
43 | : fSortRenderTasks(sortRenderTasks) {} |
44 | |
45 | GrDrawingManager::RenderTaskDAG::~RenderTaskDAG() {} |
46 | |
47 | void GrDrawingManager::RenderTaskDAG::gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const { |
48 | idArray->reset(fRenderTasks.count()); |
49 | for (int i = 0; i < fRenderTasks.count(); ++i) { |
50 | if (fRenderTasks[i]) { |
51 | (*idArray)[i] = fRenderTasks[i]->uniqueID(); |
52 | } |
53 | } |
54 | } |
55 | |
56 | void GrDrawingManager::RenderTaskDAG::reset() { |
57 | fRenderTasks.reset(); |
58 | } |
59 | |
60 | void GrDrawingManager::RenderTaskDAG::removeRenderTask(int index) { |
61 | if (!fRenderTasks[index]->unique()) { |
62 | // TODO: Eventually this should be guaranteed unique: http://skbug.com/7111 |
63 | fRenderTasks[index]->endFlush(); |
64 | } |
65 | |
66 | fRenderTasks[index] = nullptr; |
67 | } |
68 | |
69 | void GrDrawingManager::RenderTaskDAG::removeRenderTasks(int startIndex, int stopIndex) { |
70 | for (int i = startIndex; i < stopIndex; ++i) { |
71 | if (!fRenderTasks[i]) { |
72 | continue; |
73 | } |
74 | this->removeRenderTask(i); |
75 | } |
76 | } |
77 | |
78 | bool GrDrawingManager::RenderTaskDAG::isUsed(GrSurfaceProxy* proxy) const { |
79 | for (int i = 0; i < fRenderTasks.count(); ++i) { |
80 | if (fRenderTasks[i] && fRenderTasks[i]->isUsed(proxy)) { |
81 | return true; |
82 | } |
83 | } |
84 | |
85 | return false; |
86 | } |
87 | |
88 | GrRenderTask* GrDrawingManager::RenderTaskDAG::add(sk_sp<GrRenderTask> renderTask) { |
89 | if (renderTask) { |
90 | return fRenderTasks.emplace_back(std::move(renderTask)).get(); |
91 | } |
92 | return nullptr; |
93 | } |
94 | |
95 | GrRenderTask* GrDrawingManager::RenderTaskDAG::addBeforeLast(sk_sp<GrRenderTask> renderTask) { |
96 | SkASSERT(!fRenderTasks.empty()); |
97 | if (renderTask) { |
98 | // Release 'fRenderTasks.back()' and grab the raw pointer, in case the SkTArray grows |
99 | // and reallocates during emplace_back. |
100 | fRenderTasks.emplace_back(fRenderTasks.back().release()); |
101 | return (fRenderTasks[fRenderTasks.count() - 2] = std::move(renderTask)).get(); |
102 | } |
103 | return nullptr; |
104 | } |
105 | |
106 | void GrDrawingManager::RenderTaskDAG::add(const SkTArray<sk_sp<GrRenderTask>>& renderTasks) { |
107 | #ifdef SK_DEBUG |
108 | for (auto& renderTask : renderTasks) { |
109 | SkASSERT(renderTask->unique()); |
110 | } |
111 | #endif |
112 | |
113 | fRenderTasks.push_back_n(renderTasks.count(), renderTasks.begin()); |
114 | } |
115 | |
116 | void GrDrawingManager::RenderTaskDAG::swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks) { |
117 | SkASSERT(renderTasks->empty()); |
118 | renderTasks->swap(fRenderTasks); |
119 | } |
120 | |
121 | void GrDrawingManager::RenderTaskDAG::prepForFlush() { |
122 | if (fSortRenderTasks) { |
123 | SkDEBUGCODE(bool result =) SkTTopoSort<GrRenderTask, GrRenderTask::TopoSortTraits>( |
124 | &fRenderTasks); |
125 | SkASSERT(result); |
126 | } |
127 | |
128 | #ifdef SK_DEBUG |
129 | // This block checks for any unnecessary splits in the opsTasks. If two sequential opsTasks |
130 | // share the same backing GrSurfaceProxy it means the opsTask was artificially split. |
131 | if (fRenderTasks.count()) { |
132 | GrOpsTask* prevOpsTask = fRenderTasks[0]->asOpsTask(); |
133 | for (int i = 1; i < fRenderTasks.count(); ++i) { |
134 | GrOpsTask* curOpsTask = fRenderTasks[i]->asOpsTask(); |
135 | |
136 | if (prevOpsTask && curOpsTask) { |
137 | SkASSERT(prevOpsTask->fTargetView != curOpsTask->fTargetView); |
138 | } |
139 | |
140 | prevOpsTask = curOpsTask; |
141 | } |
142 | } |
143 | #endif |
144 | } |
145 | |
146 | void GrDrawingManager::RenderTaskDAG::closeAll(const GrCaps* caps) { |
147 | for (int i = 0; i < fRenderTasks.count(); ++i) { |
148 | if (fRenderTasks[i]) { |
149 | fRenderTasks[i]->makeClosed(*caps); |
150 | } |
151 | } |
152 | } |
153 | |
154 | void GrDrawingManager::RenderTaskDAG::cleanup(const GrCaps* caps) { |
155 | for (int i = 0; i < fRenderTasks.count(); ++i) { |
156 | if (!fRenderTasks[i]) { |
157 | continue; |
158 | } |
159 | |
160 | // no renderTask should receive a dependency |
161 | fRenderTasks[i]->makeClosed(*caps); |
162 | |
163 | // We shouldn't need to do this, but it turns out some clients still hold onto opsTasks |
164 | // after a cleanup. |
165 | // MDB TODO: is this still true? |
166 | if (!fRenderTasks[i]->unique()) { |
167 | // TODO: Eventually this should be guaranteed unique. |
168 | // https://bugs.chromium.org/p/skia/issues/detail?id=7111 |
169 | fRenderTasks[i]->endFlush(); |
170 | } |
171 | } |
172 | |
173 | fRenderTasks.reset(); |
174 | } |
175 | |
176 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
177 | GrDrawingManager::GrDrawingManager(GrRecordingContext* context, |
178 | const GrPathRendererChain::Options& optionsForPathRendererChain, |
179 | const GrTextContext::Options& optionsForTextContext, |
180 | bool sortRenderTasks, |
181 | bool reduceOpsTaskSplitting) |
182 | : fContext(context) |
183 | , fOptionsForPathRendererChain(optionsForPathRendererChain) |
184 | , fOptionsForTextContext(optionsForTextContext) |
185 | , fDAG(sortRenderTasks) |
186 | , fTextContext(nullptr) |
187 | , fPathRendererChain(nullptr) |
188 | , fSoftwarePathRenderer(nullptr) |
189 | , fFlushing(false) |
190 | , fReduceOpsTaskSplitting(reduceOpsTaskSplitting) { |
191 | } |
192 | |
193 | void GrDrawingManager::cleanup() { |
194 | fDAG.cleanup(fContext->priv().caps()); |
195 | |
196 | fPathRendererChain = nullptr; |
197 | fSoftwarePathRenderer = nullptr; |
198 | |
199 | fOnFlushCBObjects.reset(); |
200 | } |
201 | |
202 | GrDrawingManager::~GrDrawingManager() { |
203 | this->cleanup(); |
204 | } |
205 | |
206 | bool GrDrawingManager::wasAbandoned() const { |
207 | return fContext->priv().abandoned(); |
208 | } |
209 | |
210 | void GrDrawingManager::freeGpuResources() { |
211 | for (int i = fOnFlushCBObjects.count() - 1; i >= 0; --i) { |
212 | if (!fOnFlushCBObjects[i]->retainOnFreeGpuResources()) { |
213 | // it's safe to just do this because we're iterating in reverse |
214 | fOnFlushCBObjects.removeShuffle(i); |
215 | } |
216 | } |
217 | |
218 | // a path renderer may be holding onto resources |
219 | fPathRendererChain = nullptr; |
220 | fSoftwarePathRenderer = nullptr; |
221 | } |
222 | |
223 | // MDB TODO: make use of the 'proxy' parameter. |
224 | bool GrDrawingManager::flush(GrSurfaceProxy* proxies[], int numProxies, |
225 | SkSurface::BackendSurfaceAccess access, const GrFlushInfo& info, |
226 | const GrPrepareForExternalIORequests& externalRequests) { |
227 | SkASSERT(numProxies >= 0); |
228 | SkASSERT(!numProxies || proxies); |
229 | GR_CREATE_TRACE_MARKER_CONTEXT("GrDrawingManager" , "flush" , fContext); |
230 | |
231 | if (fFlushing || this->wasAbandoned()) { |
232 | if (info.fFinishedProc) { |
233 | info.fFinishedProc(info.fFinishedContext); |
234 | } |
235 | return false; |
236 | } |
237 | |
238 | SkDEBUGCODE(this->validate()); |
239 | |
240 | if (kNone_GrFlushFlags == info.fFlags && !info.fNumSemaphores && !info.fFinishedProc && |
241 | !externalRequests.hasRequests()) { |
242 | bool canSkip = numProxies > 0; |
243 | for (int i = 0; i < numProxies && canSkip; ++i) { |
244 | canSkip = !fDAG.isUsed(proxies[i]) && !this->isDDLTarget(proxies[i]); |
245 | } |
246 | if (canSkip) { |
247 | return false; |
248 | } |
249 | } |
250 | |
251 | auto direct = fContext->priv().asDirectContext(); |
252 | if (!direct) { |
253 | if (info.fFinishedProc) { |
254 | info.fFinishedProc(info.fFinishedContext); |
255 | } |
256 | return false; // Can't flush while DDL recording |
257 | } |
258 | direct->priv().clientMappedBufferManager()->process(); |
259 | |
260 | GrGpu* gpu = direct->priv().getGpu(); |
261 | if (!gpu) { |
262 | if (info.fFinishedProc) { |
263 | info.fFinishedProc(info.fFinishedContext); |
264 | } |
265 | return false; // Can't flush while DDL recording |
266 | } |
267 | |
268 | fFlushing = true; |
269 | |
270 | auto resourceProvider = direct->priv().resourceProvider(); |
271 | auto resourceCache = direct->priv().getResourceCache(); |
272 | |
273 | // Semi-usually the GrRenderTasks are already closed at this point, but sometimes Ganesh needs |
274 | // to flush mid-draw. In that case, the SkGpuDevice's opsTasks won't be closed but need to be |
275 | // flushed anyway. Closing such opsTasks here will mean new ones will be created to replace them |
276 | // if the SkGpuDevice(s) write to them again. |
277 | fDAG.closeAll(fContext->priv().caps()); |
278 | fActiveOpsTask = nullptr; |
279 | |
280 | fDAG.prepForFlush(); |
281 | if (!fCpuBufferCache) { |
282 | // We cache more buffers when the backend is using client side arrays. Otherwise, we |
283 | // expect each pool will use a CPU buffer as a staging buffer before uploading to a GPU |
284 | // buffer object. Each pool only requires one staging buffer at a time. |
285 | int maxCachedBuffers = fContext->priv().caps()->preferClientSideDynamicBuffers() ? 2 : 6; |
286 | fCpuBufferCache = GrBufferAllocPool::CpuBufferCache::Make(maxCachedBuffers); |
287 | } |
288 | |
289 | GrOpFlushState flushState(gpu, resourceProvider, &fTokenTracker, fCpuBufferCache); |
290 | |
291 | GrOnFlushResourceProvider onFlushProvider(this); |
292 | // TODO: AFAICT the only reason fFlushState is on GrDrawingManager rather than on the |
293 | // stack here is to preserve the flush tokens. |
294 | |
295 | // Prepare any onFlush op lists (e.g. atlases). |
296 | if (!fOnFlushCBObjects.empty()) { |
297 | fDAG.gatherIDs(&fFlushingRenderTaskIDs); |
298 | |
299 | for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) { |
300 | onFlushCBObject->preFlush(&onFlushProvider, fFlushingRenderTaskIDs.begin(), |
301 | fFlushingRenderTaskIDs.count()); |
302 | } |
303 | for (const auto& onFlushRenderTask : fOnFlushRenderTasks) { |
304 | onFlushRenderTask->makeClosed(*fContext->priv().caps()); |
305 | #ifdef SK_DEBUG |
306 | // OnFlush callbacks are invoked during flush, and are therefore expected to handle |
307 | // resource allocation & usage on their own. (No deferred or lazy proxies!) |
308 | onFlushRenderTask->visitTargetAndSrcProxies_debugOnly( |
309 | [](GrSurfaceProxy* p, GrMipMapped mipMapped) { |
310 | SkASSERT(!p->asTextureProxy() || !p->asTextureProxy()->texPriv().isDeferred()); |
311 | SkASSERT(!p->isLazy()); |
312 | if (p->requiresManualMSAAResolve()) { |
313 | // The onFlush callback is responsible for ensuring MSAA gets resolved. |
314 | SkASSERT(p->asRenderTargetProxy() && !p->asRenderTargetProxy()->isMSAADirty()); |
315 | } |
316 | if (GrMipMapped::kYes == mipMapped) { |
317 | // The onFlush callback is responsible for regenerating mips if needed. |
318 | SkASSERT(p->asTextureProxy() && !p->asTextureProxy()->mipMapsAreDirty()); |
319 | } |
320 | }); |
321 | #endif |
322 | onFlushRenderTask->prepare(&flushState); |
323 | } |
324 | } |
325 | |
326 | #if 0 |
327 | // Enable this to print out verbose GrOp information |
328 | SkDEBUGCODE(SkDebugf("onFlush renderTasks:" )); |
329 | for (const auto& onFlushRenderTask : fOnFlushRenderTasks) { |
330 | SkDEBUGCODE(onFlushRenderTask->dump();) |
331 | } |
332 | SkDEBUGCODE(SkDebugf("Normal renderTasks:" )); |
333 | for (int i = 0; i < fRenderTasks.count(); ++i) { |
334 | SkDEBUGCODE(fRenderTasks[i]->dump();) |
335 | } |
336 | #endif |
337 | |
338 | int startIndex, stopIndex; |
339 | bool flushed = false; |
340 | |
341 | { |
342 | GrResourceAllocator alloc(resourceProvider SkDEBUGCODE(, fDAG.numRenderTasks())); |
343 | for (int i = 0; i < fDAG.numRenderTasks(); ++i) { |
344 | if (fDAG.renderTask(i)) { |
345 | fDAG.renderTask(i)->gatherProxyIntervals(&alloc); |
346 | } |
347 | alloc.markEndOfOpsTask(i); |
348 | } |
349 | alloc.determineRecyclability(); |
350 | |
351 | GrResourceAllocator::AssignError error = GrResourceAllocator::AssignError::kNoError; |
352 | int numRenderTasksExecuted = 0; |
353 | while (alloc.assign(&startIndex, &stopIndex, &error)) { |
354 | if (GrResourceAllocator::AssignError::kFailedProxyInstantiation == error) { |
355 | for (int i = startIndex; i < stopIndex; ++i) { |
356 | GrRenderTask* renderTask = fDAG.renderTask(i); |
357 | if (!renderTask) { |
358 | continue; |
359 | } |
360 | if (!renderTask->isInstantiated()) { |
361 | // No need to call the renderTask's handleInternalAllocationFailure |
362 | // since we will already skip executing the renderTask since it is not |
363 | // instantiated. |
364 | continue; |
365 | } |
366 | renderTask->handleInternalAllocationFailure(); |
367 | } |
368 | } |
369 | |
370 | if (this->executeRenderTasks( |
371 | startIndex, stopIndex, &flushState, &numRenderTasksExecuted)) { |
372 | flushed = true; |
373 | } |
374 | } |
375 | } |
376 | |
377 | #ifdef SK_DEBUG |
378 | for (int i = 0; i < fDAG.numRenderTasks(); ++i) { |
379 | // If there are any remaining opsTaskss at this point, make sure they will not survive the |
380 | // flush. Otherwise we need to call endFlush() on them. |
381 | // http://skbug.com/7111 |
382 | SkASSERT(!fDAG.renderTask(i) || fDAG.renderTask(i)->unique()); |
383 | } |
384 | #endif |
385 | fDAG.reset(); |
386 | this->clearDDLTargets(); |
387 | |
388 | #ifdef SK_DEBUG |
389 | // In non-DDL mode this checks that all the flushed ops have been freed from the memory pool. |
390 | // When we move to partial flushes this assert will no longer be valid. |
391 | // In DDL mode this check is somewhat superfluous since the memory for most of the ops/opsTasks |
392 | // will be stored in the DDL's GrOpMemoryPools. |
393 | GrOpMemoryPool* opMemoryPool = fContext->priv().opMemoryPool(); |
394 | opMemoryPool->isEmpty(); |
395 | #endif |
396 | |
397 | gpu->executeFlushInfo(proxies, numProxies, access, info, externalRequests); |
398 | |
399 | // Give the cache a chance to purge resources that become purgeable due to flushing. |
400 | if (flushed) { |
401 | resourceCache->purgeAsNeeded(); |
402 | flushed = false; |
403 | } |
404 | for (GrOnFlushCallbackObject* onFlushCBObject : fOnFlushCBObjects) { |
405 | onFlushCBObject->postFlush(fTokenTracker.nextTokenToFlush(), fFlushingRenderTaskIDs.begin(), |
406 | fFlushingRenderTaskIDs.count()); |
407 | flushed = true; |
408 | } |
409 | if (flushed) { |
410 | resourceCache->purgeAsNeeded(); |
411 | } |
412 | fFlushingRenderTaskIDs.reset(); |
413 | fFlushing = false; |
414 | |
415 | return true; |
416 | } |
417 | |
418 | bool GrDrawingManager::submitToGpu(bool syncToCpu) { |
419 | if (fFlushing || this->wasAbandoned()) { |
420 | return false; |
421 | } |
422 | |
423 | auto direct = fContext->priv().asDirectContext(); |
424 | if (!direct) { |
425 | return false; // Can't submit while DDL recording |
426 | } |
427 | GrGpu* gpu = direct->priv().getGpu(); |
428 | return gpu->submitToGpu(syncToCpu); |
429 | } |
430 | |
431 | bool GrDrawingManager::executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState* flushState, |
432 | int* numRenderTasksExecuted) { |
433 | SkASSERT(startIndex <= stopIndex && stopIndex <= fDAG.numRenderTasks()); |
434 | |
435 | #if GR_FLUSH_TIME_OP_SPEW |
436 | SkDebugf("Flushing opsTask: %d to %d out of [%d, %d]\n" , |
437 | startIndex, stopIndex, 0, fDAG.numRenderTasks()); |
438 | for (int i = startIndex; i < stopIndex; ++i) { |
439 | if (fDAG.renderTask(i)) { |
440 | fDAG.renderTask(i)->dump(true); |
441 | } |
442 | } |
443 | #endif |
444 | |
445 | bool anyRenderTasksExecuted = false; |
446 | |
447 | for (int i = startIndex; i < stopIndex; ++i) { |
448 | GrRenderTask* renderTask = fDAG.renderTask(i); |
449 | if (!renderTask || !renderTask->isInstantiated()) { |
450 | continue; |
451 | } |
452 | |
453 | SkASSERT(renderTask->deferredProxiesAreInstantiated()); |
454 | |
455 | renderTask->prepare(flushState); |
456 | } |
457 | |
458 | // Upload all data to the GPU |
459 | flushState->preExecuteDraws(); |
460 | |
461 | // For Vulkan, if we have too many oplists to be flushed we end up allocating a lot of resources |
462 | // for each command buffer associated with the oplists. If this gets too large we can cause the |
463 | // devices to go OOM. In practice we usually only hit this case in our tests, but to be safe we |
464 | // put a cap on the number of oplists we will execute before flushing to the GPU to relieve some |
465 | // memory pressure. |
466 | static constexpr int kMaxRenderTasksBeforeFlush = 100; |
467 | |
468 | // Execute the onFlush renderTasks first, if any. |
469 | for (sk_sp<GrRenderTask>& onFlushRenderTask : fOnFlushRenderTasks) { |
470 | if (!onFlushRenderTask->execute(flushState)) { |
471 | SkDebugf("WARNING: onFlushRenderTask failed to execute.\n" ); |
472 | } |
473 | SkASSERT(onFlushRenderTask->unique()); |
474 | onFlushRenderTask = nullptr; |
475 | (*numRenderTasksExecuted)++; |
476 | if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) { |
477 | flushState->gpu()->submitToGpu(false); |
478 | *numRenderTasksExecuted = 0; |
479 | } |
480 | } |
481 | fOnFlushRenderTasks.reset(); |
482 | |
483 | // Execute the normal op lists. |
484 | for (int i = startIndex; i < stopIndex; ++i) { |
485 | GrRenderTask* renderTask = fDAG.renderTask(i); |
486 | if (!renderTask || !renderTask->isInstantiated()) { |
487 | continue; |
488 | } |
489 | |
490 | if (renderTask->execute(flushState)) { |
491 | anyRenderTasksExecuted = true; |
492 | } |
493 | (*numRenderTasksExecuted)++; |
494 | if (*numRenderTasksExecuted >= kMaxRenderTasksBeforeFlush) { |
495 | flushState->gpu()->submitToGpu(false); |
496 | *numRenderTasksExecuted = 0; |
497 | } |
498 | } |
499 | |
500 | SkASSERT(!flushState->opsRenderPass()); |
501 | SkASSERT(fTokenTracker.nextDrawToken() == fTokenTracker.nextTokenToFlush()); |
502 | |
503 | // We reset the flush state before the RenderTasks so that the last resources to be freed are |
504 | // those that are written to in the RenderTasks. This helps to make sure the most recently used |
505 | // resources are the last to be purged by the resource cache. |
506 | flushState->reset(); |
507 | |
508 | fDAG.removeRenderTasks(startIndex, stopIndex); |
509 | |
510 | return anyRenderTasksExecuted; |
511 | } |
512 | |
513 | GrSemaphoresSubmitted GrDrawingManager::flushSurfaces(GrSurfaceProxy* proxies[], int numProxies, |
514 | SkSurface::BackendSurfaceAccess access, |
515 | const GrFlushInfo& info) { |
516 | if (this->wasAbandoned()) { |
517 | return GrSemaphoresSubmitted::kNo; |
518 | } |
519 | SkDEBUGCODE(this->validate()); |
520 | SkASSERT(numProxies >= 0); |
521 | SkASSERT(!numProxies || proxies); |
522 | |
523 | auto direct = fContext->priv().asDirectContext(); |
524 | if (!direct) { |
525 | return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording |
526 | } |
527 | |
528 | GrGpu* gpu = direct->priv().getGpu(); |
529 | if (!gpu) { |
530 | return GrSemaphoresSubmitted::kNo; // Can't flush while DDL recording |
531 | } |
532 | |
533 | // TODO: It is important to upgrade the drawingmanager to just flushing the |
534 | // portion of the DAG required by 'proxies' in order to restore some of the |
535 | // semantics of this method. |
536 | bool didFlush = this->flush(proxies, numProxies, access, info, |
537 | GrPrepareForExternalIORequests()); |
538 | for (int i = 0; i < numProxies; ++i) { |
539 | GrSurfaceProxy* proxy = proxies[i]; |
540 | if (!proxy->isInstantiated()) { |
541 | continue; |
542 | } |
543 | // In the flushSurfaces case, we need to resolve MSAA immediately after flush. This is |
544 | // because the client will call through to this method when drawing into a target created by |
545 | // wrapBackendTextureAsRenderTarget, and will expect the original texture to be fully |
546 | // resolved upon return. |
547 | if (proxy->requiresManualMSAAResolve()) { |
548 | auto* rtProxy = proxy->asRenderTargetProxy(); |
549 | SkASSERT(rtProxy); |
550 | if (rtProxy->isMSAADirty()) { |
551 | SkASSERT(rtProxy->peekRenderTarget()); |
552 | gpu->resolveRenderTarget(rtProxy->peekRenderTarget(), rtProxy->msaaDirtyRect(), |
553 | GrGpu::ForExternalIO::kYes); |
554 | rtProxy->markMSAAResolved(); |
555 | } |
556 | } |
557 | // If, after a flush, any of the proxies of interest have dirty mipmaps, regenerate them in |
558 | // case their backend textures are being stolen. |
559 | // (This special case is exercised by the ReimportImageTextureWithMipLevels test.) |
560 | // FIXME: It may be more ideal to plumb down a "we're going to steal the backends" flag. |
561 | if (auto* textureProxy = proxy->asTextureProxy()) { |
562 | if (textureProxy->mipMapsAreDirty()) { |
563 | SkASSERT(textureProxy->peekTexture()); |
564 | gpu->regenerateMipMapLevels(textureProxy->peekTexture()); |
565 | textureProxy->markMipMapsClean(); |
566 | } |
567 | } |
568 | } |
569 | |
570 | SkDEBUGCODE(this->validate()); |
571 | |
572 | bool submitted = false; |
573 | if (didFlush) { |
574 | submitted = this->submitToGpu(SkToBool(info.fFlags & kSyncCpu_GrFlushFlag)); |
575 | } |
576 | |
577 | if (!submitted || (!direct->priv().caps()->semaphoreSupport() && info.fNumSemaphores)) { |
578 | return GrSemaphoresSubmitted::kNo; |
579 | } |
580 | return GrSemaphoresSubmitted::kYes; |
581 | } |
582 | |
583 | void GrDrawingManager::addOnFlushCallbackObject(GrOnFlushCallbackObject* onFlushCBObject) { |
584 | fOnFlushCBObjects.push_back(onFlushCBObject); |
585 | } |
586 | |
587 | #if GR_TEST_UTILS |
588 | void GrDrawingManager::testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject* cb) { |
589 | int n = std::find(fOnFlushCBObjects.begin(), fOnFlushCBObjects.end(), cb) - |
590 | fOnFlushCBObjects.begin(); |
591 | SkASSERT(n < fOnFlushCBObjects.count()); |
592 | fOnFlushCBObjects.removeShuffle(n); |
593 | } |
594 | #endif |
595 | |
596 | void GrDrawingManager::moveRenderTasksToDDL(SkDeferredDisplayList* ddl) { |
597 | SkDEBUGCODE(this->validate()); |
598 | |
599 | // no renderTask should receive a new command after this |
600 | fDAG.closeAll(fContext->priv().caps()); |
601 | fActiveOpsTask = nullptr; |
602 | |
603 | fDAG.swap(&ddl->fRenderTasks); |
604 | SkASSERT(!fDAG.numRenderTasks()); |
605 | |
606 | for (auto& renderTask : ddl->fRenderTasks) { |
607 | renderTask->prePrepare(fContext); |
608 | } |
609 | |
610 | ddl->fArenas = std::move(fContext->priv().detachArenas()); |
611 | |
612 | fContext->priv().detachProgramData(&ddl->fProgramData); |
613 | |
614 | if (fPathRendererChain) { |
615 | if (auto ccpr = fPathRendererChain->getCoverageCountingPathRenderer()) { |
616 | ddl->fPendingPaths = ccpr->detachPendingPaths(); |
617 | } |
618 | } |
619 | |
620 | SkDEBUGCODE(this->validate()); |
621 | } |
622 | |
623 | void GrDrawingManager::copyRenderTasksFromDDL(const SkDeferredDisplayList* ddl, |
624 | GrRenderTargetProxy* newDest) { |
625 | SkDEBUGCODE(this->validate()); |
626 | |
627 | if (fActiveOpsTask) { |
628 | // This is a temporary fix for the partial-MDB world. In that world we're not |
629 | // reordering so ops that (in the single opsTask world) would've just glommed onto the |
630 | // end of the single opsTask but referred to a far earlier RT need to appear in their |
631 | // own opsTask. |
632 | fActiveOpsTask->makeClosed(*fContext->priv().caps()); |
633 | fActiveOpsTask = nullptr; |
634 | } |
635 | |
636 | this->addDDLTarget(newDest); |
637 | |
638 | // Here we jam the proxy that backs the current replay SkSurface into the LazyProxyData. |
639 | // The lazy proxy that references it (in the copied opsTasks) will steal its GrTexture. |
640 | ddl->fLazyProxyData->fReplayDest = newDest; |
641 | |
642 | if (ddl->fPendingPaths.size()) { |
643 | GrCoverageCountingPathRenderer* ccpr = this->getCoverageCountingPathRenderer(); |
644 | |
645 | ccpr->mergePendingPaths(ddl->fPendingPaths); |
646 | } |
647 | |
648 | fDAG.add(ddl->fRenderTasks); |
649 | |
650 | SkDEBUGCODE(this->validate()); |
651 | } |
652 | |
653 | #ifdef SK_DEBUG |
654 | void GrDrawingManager::validate() const { |
655 | if (fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) { |
656 | SkASSERT(!fActiveOpsTask); |
657 | } else { |
658 | if (fActiveOpsTask) { |
659 | SkASSERT(!fDAG.empty()); |
660 | SkASSERT(!fActiveOpsTask->isClosed()); |
661 | SkASSERT(fActiveOpsTask == fDAG.back()); |
662 | } |
663 | |
664 | for (int i = 0; i < fDAG.numRenderTasks(); ++i) { |
665 | if (fActiveOpsTask != fDAG.renderTask(i)) { |
666 | // The resolveTask associated with the activeTask remains open for as long as the |
667 | // activeTask does. |
668 | bool isActiveResolveTask = |
669 | fActiveOpsTask && fActiveOpsTask->fTextureResolveTask == fDAG.renderTask(i); |
670 | SkASSERT(isActiveResolveTask || fDAG.renderTask(i)->isClosed()); |
671 | } |
672 | } |
673 | |
674 | if (!fDAG.empty() && !fDAG.back()->isClosed()) { |
675 | SkASSERT(fActiveOpsTask == fDAG.back()); |
676 | } |
677 | } |
678 | } |
679 | #endif |
680 | |
681 | void GrDrawingManager::closeRenderTasksForNewRenderTask(GrSurfaceProxy* target) { |
682 | if (target && fDAG.sortingRenderTasks() && fReduceOpsTaskSplitting) { |
683 | // In this case we need to close all the renderTasks that rely on the current contents of |
684 | // 'target'. That is bc we're going to update the content of the proxy so they need to be |
685 | // split in case they use both the old and new content. (This is a bit of an overkill: they |
686 | // really only need to be split if they ever reference proxy's contents again but that is |
687 | // hard to predict/handle). |
688 | if (GrRenderTask* lastRenderTask = target->getLastRenderTask()) { |
689 | lastRenderTask->closeThoseWhoDependOnMe(*fContext->priv().caps()); |
690 | } |
691 | } else if (fActiveOpsTask) { |
692 | // This is a temporary fix for the partial-MDB world. In that world we're not |
693 | // reordering so ops that (in the single opsTask world) would've just glommed onto the |
694 | // end of the single opsTask but referred to a far earlier RT need to appear in their |
695 | // own opsTask. |
696 | fActiveOpsTask->makeClosed(*fContext->priv().caps()); |
697 | fActiveOpsTask = nullptr; |
698 | } |
699 | } |
700 | |
701 | sk_sp<GrOpsTask> GrDrawingManager::newOpsTask(GrSurfaceProxyView surfaceView, |
702 | bool managedOpsTask) { |
703 | SkDEBUGCODE(this->validate()); |
704 | SkASSERT(fContext); |
705 | |
706 | GrSurfaceProxy* proxy = surfaceView.proxy(); |
707 | this->closeRenderTasksForNewRenderTask(proxy); |
708 | |
709 | sk_sp<GrOpsTask> opsTask(new GrOpsTask(fContext->priv().arenas(), |
710 | std::move(surfaceView), |
711 | fContext->priv().auditTrail())); |
712 | SkASSERT(proxy->getLastRenderTask() == opsTask.get()); |
713 | |
714 | if (managedOpsTask) { |
715 | fDAG.add(opsTask); |
716 | |
717 | if (!fDAG.sortingRenderTasks() || !fReduceOpsTaskSplitting) { |
718 | fActiveOpsTask = opsTask.get(); |
719 | } |
720 | } |
721 | |
722 | SkDEBUGCODE(this->validate()); |
723 | return opsTask; |
724 | } |
725 | |
726 | GrTextureResolveRenderTask* GrDrawingManager::newTextureResolveRenderTask(const GrCaps& caps) { |
727 | // Unlike in the "new opsTask" case, we do not want to close the active opsTask, nor (if we are |
728 | // in sorting and opsTask reduction mode) the render tasks that depend on any proxy's current |
729 | // state. This is because those opsTasks can still receive new ops and because if they refer to |
730 | // the mipmapped version of 'proxy', they will then come to depend on the render task being |
731 | // created here. |
732 | // |
733 | // Add the new textureResolveTask before the fActiveOpsTask (if not in |
734 | // sorting/opsTask-splitting-reduction mode) because it will depend upon this resolve task. |
735 | // NOTE: Putting it here will also reduce the amount of work required by the topological sort. |
736 | return static_cast<GrTextureResolveRenderTask*>(fDAG.addBeforeLast( |
737 | sk_make_sp<GrTextureResolveRenderTask>())); |
738 | } |
739 | |
740 | void GrDrawingManager::newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy, |
741 | std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores, |
742 | int numSemaphores) { |
743 | SkDEBUGCODE(this->validate()); |
744 | SkASSERT(fContext); |
745 | |
746 | const GrCaps& caps = *fContext->priv().caps(); |
747 | |
748 | sk_sp<GrWaitRenderTask> waitTask = sk_make_sp<GrWaitRenderTask>(GrSurfaceProxyView(proxy), |
749 | std::move(semaphores), |
750 | numSemaphores); |
751 | if (fReduceOpsTaskSplitting) { |
752 | GrRenderTask* lastTask = proxy->getLastRenderTask(); |
753 | if (lastTask && !lastTask->isClosed()) { |
754 | // We directly make the currently open renderTask depend on waitTask instead of using |
755 | // the proxy version of addDependency. The waitTask will never need to trigger any |
756 | // resolves or mip map generation which is the main advantage of going through the proxy |
757 | // version. Additionally we would've had to temporarily set the wait task as the |
758 | // lastRenderTask on the proxy, add the dependency, and then reset the lastRenderTask to |
759 | // lastTask. Additionally we add all dependencies of lastTask to waitTask so that the |
760 | // waitTask doesn't get reordered before them and unnecessarily block those tasks. |
761 | // Note: Any previous Ops already in lastTask will get blocked by the wait semaphore |
762 | // even though they don't need to be for correctness. |
763 | |
764 | // Make sure we add the dependencies of lastTask to waitTask first or else we'll get a |
765 | // circular self dependency of waitTask on waitTask. |
766 | waitTask->addDependenciesFromOtherTask(lastTask); |
767 | lastTask->addDependency(waitTask.get()); |
768 | } else { |
769 | // If there is a last task we set the waitTask to depend on it so that it doesn't get |
770 | // reordered in front of the lastTask causing the lastTask to be blocked by the |
771 | // semaphore. Again we directly just go through adding the dependency to the task and |
772 | // not the proxy since we don't need to worry about resolving anything. |
773 | if (lastTask) { |
774 | waitTask->addDependency(lastTask); |
775 | } |
776 | proxy->setLastRenderTask(waitTask.get()); |
777 | } |
778 | fDAG.add(waitTask); |
779 | } else { |
780 | if (fActiveOpsTask && (fActiveOpsTask->fTargetView.proxy() == proxy.get())) { |
781 | SkASSERT(proxy->getLastRenderTask() == fActiveOpsTask); |
782 | fDAG.addBeforeLast(waitTask); |
783 | // In this case we keep the current renderTask open but just insert the new waitTask |
784 | // before it in the list. The waitTask will never need to trigger any resolves or mip |
785 | // map generation which is the main advantage of going through the proxy version. |
786 | // Additionally we would've had to temporarily set the wait task as the lastRenderTask |
787 | // on the proxy, add the dependency, and then reset the lastRenderTask to |
788 | // fActiveOpsTask. Additionally we make the waitTask depend on all of fActiveOpsTask |
789 | // dependencies so that we don't unnecessarily reorder the waitTask before them. |
790 | // Note: Any previous Ops already in fActiveOpsTask will get blocked by the wait |
791 | // semaphore even though they don't need to be for correctness. |
792 | |
793 | // Make sure we add the dependencies of fActiveOpsTask to waitTask first or else we'll |
794 | // get a circular self dependency of waitTask on waitTask. |
795 | waitTask->addDependenciesFromOtherTask(fActiveOpsTask); |
796 | fActiveOpsTask->addDependency(waitTask.get()); |
797 | } else { |
798 | // In this case we just close the previous RenderTask and start and append the waitTask |
799 | // to the DAG. Since it is the last task now we call setLastRenderTask on the proxy. If |
800 | // there is a lastTask on the proxy we make waitTask depend on that task. This |
801 | // dependency isn't strictly needed but it does keep the DAG from reordering the |
802 | // waitTask earlier and blocking more tasks. |
803 | if (GrRenderTask* lastTask = proxy->getLastRenderTask()) { |
804 | waitTask->addDependency(lastTask); |
805 | } |
806 | proxy->setLastRenderTask(waitTask.get()); |
807 | this->closeRenderTasksForNewRenderTask(proxy.get()); |
808 | fDAG.add(waitTask); |
809 | } |
810 | } |
811 | waitTask->makeClosed(caps); |
812 | |
813 | SkDEBUGCODE(this->validate()); |
814 | } |
815 | |
816 | void GrDrawingManager::newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, |
817 | const SkIRect& srcRect, |
818 | GrColorType surfaceColorType, |
819 | GrColorType dstColorType, |
820 | sk_sp<GrGpuBuffer> dstBuffer, |
821 | size_t dstOffset) { |
822 | SkDEBUGCODE(this->validate()); |
823 | SkASSERT(fContext); |
824 | // This copies from srcProxy to dstBuffer so it doesn't have a real target. |
825 | this->closeRenderTasksForNewRenderTask(nullptr); |
826 | |
827 | GrRenderTask* task = fDAG.add(sk_make_sp<GrTransferFromRenderTask>( |
828 | srcProxy, srcRect, surfaceColorType, dstColorType, std::move(dstBuffer), dstOffset)); |
829 | |
830 | const GrCaps& caps = *fContext->priv().caps(); |
831 | |
832 | // We always say GrMipMapped::kNo here since we are always just copying from the base layer. We |
833 | // don't need to make sure the whole mip map chain is valid. |
834 | task->addDependency(srcProxy.get(), GrMipMapped::kNo, GrTextureResolveManager(this), caps); |
835 | task->makeClosed(caps); |
836 | |
837 | // We have closed the previous active oplist but since a new oplist isn't being added there |
838 | // shouldn't be an active one. |
839 | SkASSERT(!fActiveOpsTask); |
840 | SkDEBUGCODE(this->validate()); |
841 | } |
842 | |
843 | bool GrDrawingManager::newCopyRenderTask(GrSurfaceProxyView srcView, |
844 | const SkIRect& srcRect, |
845 | GrSurfaceProxyView dstView, |
846 | const SkIPoint& dstPoint) { |
847 | SkDEBUGCODE(this->validate()); |
848 | SkASSERT(fContext); |
849 | |
850 | this->closeRenderTasksForNewRenderTask(dstView.proxy()); |
851 | const GrCaps& caps = *fContext->priv().caps(); |
852 | |
853 | GrSurfaceProxy* srcProxy = srcView.proxy(); |
854 | |
855 | GrRenderTask* task = |
856 | fDAG.add(GrCopyRenderTask::Make(std::move(srcView), srcRect, std::move(dstView), |
857 | dstPoint, &caps)); |
858 | if (!task) { |
859 | return false; |
860 | } |
861 | |
862 | // We always say GrMipMapped::kNo here since we are always just copying from the base layer to |
863 | // another base layer. We don't need to make sure the whole mip map chain is valid. |
864 | task->addDependency(srcProxy, GrMipMapped::kNo, GrTextureResolveManager(this), caps); |
865 | task->makeClosed(caps); |
866 | |
867 | // We have closed the previous active oplist but since a new oplist isn't being added there |
868 | // shouldn't be an active one. |
869 | SkASSERT(!fActiveOpsTask); |
870 | SkDEBUGCODE(this->validate()); |
871 | return true; |
872 | } |
873 | |
874 | GrTextContext* GrDrawingManager::getTextContext() { |
875 | if (!fTextContext) { |
876 | fTextContext = GrTextContext::Make(fOptionsForTextContext); |
877 | } |
878 | |
879 | return fTextContext.get(); |
880 | } |
881 | |
882 | /* |
883 | * This method finds a path renderer that can draw the specified path on |
884 | * the provided target. |
885 | * Due to its expense, the software path renderer has split out so it can |
886 | * can be individually allowed/disallowed via the "allowSW" boolean. |
887 | */ |
888 | GrPathRenderer* GrDrawingManager::getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args, |
889 | bool allowSW, |
890 | GrPathRendererChain::DrawType drawType, |
891 | GrPathRenderer::StencilSupport* stencilSupport) { |
892 | |
893 | if (!fPathRendererChain) { |
894 | fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain)); |
895 | } |
896 | |
897 | GrPathRenderer* pr = fPathRendererChain->getPathRenderer(args, drawType, stencilSupport); |
898 | if (!pr && allowSW) { |
899 | auto swPR = this->getSoftwarePathRenderer(); |
900 | if (GrPathRenderer::CanDrawPath::kNo != swPR->canDrawPath(args)) { |
901 | pr = swPR; |
902 | } |
903 | } |
904 | |
905 | return pr; |
906 | } |
907 | |
908 | GrPathRenderer* GrDrawingManager::getSoftwarePathRenderer() { |
909 | if (!fSoftwarePathRenderer) { |
910 | fSoftwarePathRenderer.reset( |
911 | new GrSoftwarePathRenderer(fContext->priv().proxyProvider(), |
912 | fOptionsForPathRendererChain.fAllowPathMaskCaching)); |
913 | } |
914 | return fSoftwarePathRenderer.get(); |
915 | } |
916 | |
917 | GrCoverageCountingPathRenderer* GrDrawingManager::getCoverageCountingPathRenderer() { |
918 | if (!fPathRendererChain) { |
919 | fPathRendererChain.reset(new GrPathRendererChain(fContext, fOptionsForPathRendererChain)); |
920 | } |
921 | return fPathRendererChain->getCoverageCountingPathRenderer(); |
922 | } |
923 | |
924 | void GrDrawingManager::flushIfNecessary() { |
925 | auto direct = fContext->priv().asDirectContext(); |
926 | if (!direct) { |
927 | return; |
928 | } |
929 | |
930 | auto resourceCache = direct->priv().getResourceCache(); |
931 | if (resourceCache && resourceCache->requestsFlush()) { |
932 | if (this->flush(nullptr, 0, SkSurface::BackendSurfaceAccess::kNoAccess, GrFlushInfo(), |
933 | GrPrepareForExternalIORequests())) { |
934 | this->submitToGpu(false); |
935 | } |
936 | resourceCache->purgeAsNeeded(); |
937 | } |
938 | } |
939 | |
940 | |