1 | /* |
2 | * Copyright 2015 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef GrDrawingManager_DEFINED |
9 | #define GrDrawingManager_DEFINED |
10 | |
11 | #include "include/core/SkSurface.h" |
12 | #include "include/private/SkTArray.h" |
13 | #include "include/private/SkTHash.h" |
14 | #include "src/gpu/GrBufferAllocPool.h" |
15 | #include "src/gpu/GrDeferredUpload.h" |
16 | #include "src/gpu/GrHashMapWithCache.h" |
17 | #include "src/gpu/GrPathRenderer.h" |
18 | #include "src/gpu/GrPathRendererChain.h" |
19 | #include "src/gpu/GrResourceCache.h" |
20 | #include "src/gpu/GrSurfaceProxy.h" |
21 | |
22 | // Enabling this will print out which path renderers are being chosen |
23 | #define GR_PATH_RENDERER_SPEW 0 |
24 | |
25 | class GrCoverageCountingPathRenderer; |
26 | class GrGpuBuffer; |
27 | class GrOnFlushCallbackObject; |
28 | class GrOpFlushState; |
29 | class GrOpsTask; |
30 | class GrRecordingContext; |
31 | class GrRenderTargetContext; |
32 | class GrRenderTargetProxy; |
33 | class GrRenderTask; |
34 | class GrSemaphore; |
35 | class GrSoftwarePathRenderer; |
36 | class GrSurfaceContext; |
37 | class GrSurfaceProxyView; |
38 | class GrTextureResolveRenderTask; |
39 | class SkDeferredDisplayList; |
40 | |
41 | class GrDrawingManager { |
42 | public: |
43 | ~GrDrawingManager(); |
44 | |
45 | void freeGpuResources(); |
46 | |
47 | // A managed opsTask is controlled by the drawing manager (i.e., sorted & flushed with the |
48 | // others). An unmanaged one is created and used by the onFlushCallback. |
49 | sk_sp<GrOpsTask> newOpsTask(GrSurfaceProxyView, bool managedOpsTask); |
50 | |
51 | // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This |
52 | // method will only add the new render task to the list. It is up to the caller to call |
53 | // addProxy() on the returned object. |
54 | GrTextureResolveRenderTask* newTextureResolveRenderTask(const GrCaps&); |
55 | |
56 | // Create a new render task that will cause the gpu to wait on semaphores before executing any |
57 | // more RenderTasks that target proxy. It is possible for this wait to also block additional |
58 | // work (even to other proxies) that has already been recorded or will be recorded later. The |
59 | // only guarantee is that future work to the passed in proxy will wait on the semaphores to be |
60 | // signaled. |
61 | void newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy, |
62 | std::unique_ptr<std::unique_ptr<GrSemaphore>[]>, |
63 | int numSemaphores); |
64 | |
65 | // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This |
66 | // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy |
67 | // to be copied. The surfaceColorType says how we should interpret the data when reading back |
68 | // from the source. DstColorType describes how the data should be stored in the dstBuffer. |
69 | // DstOffset is the offset into the dstBuffer where we will start writing data. |
70 | void newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, const SkIRect& srcRect, |
71 | GrColorType surfaceColorType, GrColorType dstColorType, |
72 | sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset); |
73 | |
74 | // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src |
75 | // pixels copied are specified by srcRect. They are copied to a rect of the same size in |
76 | // dstProxy with top left at dstPoint. If the src rect is clipped by the src bounds then pixel |
77 | // values in the dst rect corresponding to the area clipped by the src rect are not overwritten. |
78 | // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and |
79 | // the backend-specific limitations. |
80 | bool newCopyRenderTask(GrSurfaceProxyView srcView, const SkIRect& srcRect, |
81 | GrSurfaceProxyView dstView, const SkIPoint& dstPoint); |
82 | |
83 | GrRecordingContext* getContext() { return fContext; } |
84 | |
85 | GrPathRenderer* getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args, |
86 | bool allowSW, |
87 | GrPathRendererChain::DrawType drawType, |
88 | GrPathRenderer::StencilSupport* stencilSupport = nullptr); |
89 | |
90 | GrPathRenderer* getSoftwarePathRenderer(); |
91 | |
92 | // Returns a direct pointer to the coverage counting path renderer, or null if it is not |
93 | // supported and turned on. |
94 | GrCoverageCountingPathRenderer* getCoverageCountingPathRenderer(); |
95 | |
96 | void flushIfNecessary(); |
97 | |
98 | static bool ProgramUnitTest(GrDirectContext*, int maxStages, int maxLevels); |
99 | |
100 | GrSemaphoresSubmitted flushSurfaces(GrSurfaceProxy* proxies[], |
101 | int cnt, |
102 | SkSurface::BackendSurfaceAccess access, |
103 | const GrFlushInfo& info, |
104 | const GrBackendSurfaceMutableState* newState); |
105 | GrSemaphoresSubmitted flushSurface(GrSurfaceProxy* proxy, |
106 | SkSurface::BackendSurfaceAccess access, |
107 | const GrFlushInfo& info, |
108 | const GrBackendSurfaceMutableState* newState) { |
109 | return this->flushSurfaces(&proxy, 1, access, info, newState); |
110 | } |
111 | |
112 | void addOnFlushCallbackObject(GrOnFlushCallbackObject*); |
113 | |
114 | #if GR_TEST_UTILS |
115 | void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*); |
116 | #endif |
117 | |
118 | GrRenderTask* getLastRenderTask(const GrSurfaceProxy*) const; |
119 | GrOpsTask* getLastOpsTask(const GrSurfaceProxy*) const; |
120 | void setLastRenderTask(const GrSurfaceProxy*, GrRenderTask*); |
121 | |
122 | void moveRenderTasksToDDL(SkDeferredDisplayList* ddl); |
123 | void copyRenderTasksFromDDL(sk_sp<const SkDeferredDisplayList>, GrRenderTargetProxy* newDest); |
124 | |
125 | private: |
126 | // This class encapsulates maintenance and manipulation of the drawing manager's DAG of |
127 | // renderTasks. |
128 | class RenderTaskDAG { |
129 | public: |
130 | RenderTaskDAG(bool sortRenderTasks); |
131 | ~RenderTaskDAG(); |
132 | |
133 | // Currently, when explicitly allocating resources, this call will topologically sort the |
134 | // GrRenderTasks. |
135 | // MDB TODO: remove once incremental GrRenderTask sorting is enabled |
136 | void prepForFlush(); |
137 | |
138 | void closeAll(const GrCaps* caps); |
139 | |
140 | void gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const; |
141 | |
142 | void reset(); |
143 | |
144 | // This call forceably removes GrRenderTasks from the DAG. It is problematic bc it |
145 | // just removes the GrRenderTasks but doesn't cleanup any referring pointers (i.e. |
146 | // dependency pointers in the DAG). It works right now bc it is only called after the |
147 | // topological sort is complete (so the dangling pointers aren't used). |
148 | void rawRemoveRenderTasks(int startIndex, int stopIndex); |
149 | |
150 | bool empty() const { return fRenderTasks.empty(); } |
151 | int numRenderTasks() const { return fRenderTasks.count(); } |
152 | |
153 | bool isUsed(GrSurfaceProxy*) const; |
154 | |
155 | GrRenderTask* renderTask(int index) { return fRenderTasks[index].get(); } |
156 | const GrRenderTask* renderTask(int index) const { return fRenderTasks[index].get(); } |
157 | |
158 | GrRenderTask* back() { return fRenderTasks.back().get(); } |
159 | const GrRenderTask* back() const { return fRenderTasks.back().get(); } |
160 | |
161 | GrRenderTask* add(sk_sp<GrRenderTask>); |
162 | GrRenderTask* addBeforeLast(sk_sp<GrRenderTask>); |
163 | void add(const SkTArray<sk_sp<GrRenderTask>>&); |
164 | |
165 | void swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks); |
166 | |
167 | bool sortingRenderTasks() const { return fSortRenderTasks; } |
168 | |
169 | private: |
170 | SkTArray<sk_sp<GrRenderTask>> fRenderTasks; |
171 | bool fSortRenderTasks; |
172 | }; |
173 | |
174 | GrDrawingManager(GrRecordingContext*, |
175 | const GrPathRendererChain::Options&, |
176 | bool sortRenderTasks, |
177 | bool reduceOpsTaskSplitting); |
178 | |
179 | bool wasAbandoned() const; |
180 | |
181 | // Closes the target's dependent render tasks (or, if not in sorting/opsTask-splitting-reduction |
182 | // mode, closes fActiveOpsTask) in preparation for us opening a new opsTask that will write to |
183 | // 'target'. |
184 | void closeRenderTasksForNewRenderTask(GrSurfaceProxy* target); |
185 | |
186 | // return true if any GrRenderTasks were actually executed; false otherwise |
187 | bool executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState*, |
188 | int* numRenderTasksExecuted); |
189 | |
190 | void removeRenderTasks(int startIndex, int stopIndex); |
191 | |
192 | bool flush(GrSurfaceProxy* proxies[], |
193 | int numProxies, |
194 | SkSurface::BackendSurfaceAccess access, |
195 | const GrFlushInfo&, |
196 | const GrBackendSurfaceMutableState* newState); |
197 | |
198 | bool submitToGpu(bool syncToCpu); |
199 | |
200 | SkDEBUGCODE(void validate() const); |
201 | |
202 | friend class GrContext; // access to: flush & cleanup |
203 | friend class GrContextPriv; // access to: flush |
204 | friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class |
205 | friend class GrRecordingContext; // access to: ctor |
206 | friend class SkImage; // for access to: flush |
207 | |
208 | static const int kNumPixelGeometries = 5; // The different pixel geometries |
209 | static const int kNumDFTOptions = 2; // DFT or no DFT |
210 | |
211 | GrRecordingContext* fContext; |
212 | GrPathRendererChain::Options fOptionsForPathRendererChain; |
213 | |
214 | // This cache is used by both the vertex and index pools. It reuses memory across multiple |
215 | // flushes. |
216 | sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache; |
217 | |
218 | RenderTaskDAG fDAG; |
219 | GrOpsTask* fActiveOpsTask = nullptr; |
220 | // These are the IDs of the opsTask currently being flushed (in internalFlush) |
221 | SkSTArray<8, uint32_t, true> fFlushingRenderTaskIDs; |
222 | // These are the new renderTasks generated by the onFlush CBs |
223 | SkSTArray<4, sk_sp<GrRenderTask>> fOnFlushRenderTasks; |
224 | |
225 | std::unique_ptr<GrPathRendererChain> fPathRendererChain; |
226 | sk_sp<GrSoftwarePathRenderer> fSoftwarePathRenderer; |
227 | |
228 | GrTokenTracker fTokenTracker; |
229 | bool fFlushing; |
230 | bool fReduceOpsTaskSplitting; |
231 | |
232 | SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects; |
233 | |
234 | void addDDLTarget(GrSurfaceProxy* newTarget, GrRenderTargetProxy* ddlTarget) { |
235 | fDDLTargets.set(newTarget->uniqueID().asUInt(), ddlTarget); |
236 | } |
237 | bool isDDLTarget(GrSurfaceProxy* newTarget) { |
238 | return SkToBool(fDDLTargets.find(newTarget->uniqueID().asUInt())); |
239 | } |
240 | GrRenderTargetProxy* getDDLTarget(GrSurfaceProxy* newTarget) { |
241 | auto entry = fDDLTargets.find(newTarget->uniqueID().asUInt()); |
242 | return entry ? *entry : nullptr; |
243 | } |
244 | void clearDDLTargets() { fDDLTargets.reset(); } |
245 | |
246 | // We play a trick with lazy proxies to retarget the base target of a DDL to the SkSurface |
247 | // it is replayed on. 'fDDLTargets' stores this mapping from SkSurface unique proxy ID |
248 | // to the DDL's lazy proxy. |
249 | // Note: we do not expect a whole lot of these per flush |
250 | SkTHashMap<uint32_t, GrRenderTargetProxy*> fDDLTargets; |
251 | |
252 | struct SurfaceIDKeyTraits { |
253 | static uint32_t GetInvalidKey() { |
254 | return GrSurfaceProxy::UniqueID::InvalidID().asUInt(); |
255 | } |
256 | }; |
257 | |
258 | GrHashMapWithCache<uint32_t, GrRenderTask*, SurfaceIDKeyTraits, GrCheapHash> fLastRenderTasks; |
259 | }; |
260 | |
261 | #endif |
262 | |