1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrDrawingManager_DEFINED
9#define GrDrawingManager_DEFINED
10
11#include <set>
12#include "include/core/SkSurface.h"
13#include "include/private/SkTArray.h"
14#include "src/gpu/GrBufferAllocPool.h"
15#include "src/gpu/GrDeferredUpload.h"
16#include "src/gpu/GrPathRenderer.h"
17#include "src/gpu/GrPathRendererChain.h"
18#include "src/gpu/GrResourceCache.h"
19#include "src/gpu/text/GrTextContext.h"
20
21class GrCoverageCountingPathRenderer;
22class GrOnFlushCallbackObject;
23class GrOpFlushState;
24class GrOpsTask;
25class GrRecordingContext;
26class GrRenderTargetContext;
27class GrRenderTargetProxy;
28class GrSoftwarePathRenderer;
29class GrSurfaceContext;
30class GrSurfaceProxyView;
31class GrTextureResolveRenderTask;
32class SkDeferredDisplayList;
33
34class GrDrawingManager {
35public:
36 ~GrDrawingManager();
37
38 void freeGpuResources();
39
40 // A managed opsTask is controlled by the drawing manager (i.e., sorted & flushed with the
41 // others). An unmanaged one is created and used by the onFlushCallback.
42 sk_sp<GrOpsTask> newOpsTask(GrSurfaceProxyView, bool managedOpsTask);
43
44 // Create a render task that can resolve MSAA and/or regenerate mipmap levels on proxies. This
45 // method will only add the new render task to the list. It is up to the caller to call
46 // addProxy() on the returned object.
47 GrTextureResolveRenderTask* newTextureResolveRenderTask(const GrCaps&);
48
49 // Create a new render task that will cause the gpu to wait on semaphores before executing any
50 // more RenderTasks that target proxy. It is possible for this wait to also block additional
51 // work (even to other proxies) that has already been recorded or will be recorded later. The
52 // only guarantee is that future work to the passed in proxy will wait on the semaphores to be
53 // signaled.
54 void newWaitRenderTask(sk_sp<GrSurfaceProxy> proxy,
55 std::unique_ptr<std::unique_ptr<GrSemaphore>[]>,
56 int numSemaphores);
57
58 // Create a new render task which copies the pixels from the srcProxy into the dstBuffer. This
59 // is used to support the asynchronous readback API. The srcRect is the region of the srcProxy
60 // to be copied. The surfaceColorType says how we should interpret the data when reading back
61 // from the source. DstColorType describes how the data should be stored in the dstBuffer.
62 // DstOffset is the offset into the dstBuffer where we will start writing data.
63 void newTransferFromRenderTask(sk_sp<GrSurfaceProxy> srcProxy, const SkIRect& srcRect,
64 GrColorType surfaceColorType, GrColorType dstColorType,
65 sk_sp<GrGpuBuffer> dstBuffer, size_t dstOffset);
66
67 // Creates a new render task which copies a pixel rectangle from srcView into dstView. The src
68 // pixels copied are specified by srcRect. They are copied to a rect of the same size in
69 // dstProxy with top left at dstPoint. If the src rect is clipped by the src bounds then pixel
70 // values in the dst rect corresponding to the area clipped by the src rect are not overwritten.
71 // This method is not guaranteed to succeed depending on the type of surface, formats, etc, and
72 // the backend-specific limitations.
73 bool newCopyRenderTask(GrSurfaceProxyView srcView, const SkIRect& srcRect,
74 GrSurfaceProxyView dstView, const SkIPoint& dstPoint);
75
76 GrRecordingContext* getContext() { return fContext; }
77
78 GrTextContext* getTextContext();
79
80 GrPathRenderer* getPathRenderer(const GrPathRenderer::CanDrawPathArgs& args,
81 bool allowSW,
82 GrPathRendererChain::DrawType drawType,
83 GrPathRenderer::StencilSupport* stencilSupport = nullptr);
84
85 GrPathRenderer* getSoftwarePathRenderer();
86
87 // Returns a direct pointer to the coverage counting path renderer, or null if it is not
88 // supported and turned on.
89 GrCoverageCountingPathRenderer* getCoverageCountingPathRenderer();
90
91 void flushIfNecessary();
92
93 static bool ProgramUnitTest(GrContext* context, int maxStages, int maxLevels);
94
95 GrSemaphoresSubmitted flushSurfaces(GrSurfaceProxy* proxies[],
96 int cnt,
97 SkSurface::BackendSurfaceAccess access,
98 const GrFlushInfo& info);
99 GrSemaphoresSubmitted flushSurface(GrSurfaceProxy* proxy,
100 SkSurface::BackendSurfaceAccess access,
101 const GrFlushInfo& info) {
102 return this->flushSurfaces(&proxy, 1, access, info);
103 }
104
105 void addOnFlushCallbackObject(GrOnFlushCallbackObject*);
106
107#if GR_TEST_UTILS
108 void testingOnly_removeOnFlushCallbackObject(GrOnFlushCallbackObject*);
109#endif
110
111 void moveRenderTasksToDDL(SkDeferredDisplayList* ddl);
112 void copyRenderTasksFromDDL(const SkDeferredDisplayList*, GrRenderTargetProxy* newDest);
113
114private:
115 // This class encapsulates maintenance and manipulation of the drawing manager's DAG of
116 // renderTasks.
117 class RenderTaskDAG {
118 public:
119 RenderTaskDAG(bool sortRenderTasks);
120 ~RenderTaskDAG();
121
122 // Currently, when explicitly allocating resources, this call will topologically sort the
123 // GrRenderTasks.
124 // MDB TODO: remove once incremental GrRenderTask sorting is enabled
125 void prepForFlush();
126
127 void closeAll(const GrCaps* caps);
128
129 // A yucky combination of closeAll and reset
130 void cleanup(const GrCaps* caps);
131
132 void gatherIDs(SkSTArray<8, uint32_t, true>* idArray) const;
133
134 void reset();
135
136 // These calls forceably remove a GrRenderTask from the DAG. They are problematic bc they
137 // just remove the GrRenderTask but don't cleanup any refering pointers (i.e., dependency
138 // pointers in the DAG). They work right now bc they are only called at flush time, after
139 // the topological sort is complete (so the dangling pointers aren't used).
140 void removeRenderTask(int index);
141 void removeRenderTasks(int startIndex, int stopIndex);
142
143 bool empty() const { return fRenderTasks.empty(); }
144 int numRenderTasks() const { return fRenderTasks.count(); }
145
146 bool isUsed(GrSurfaceProxy*) const;
147
148 GrRenderTask* renderTask(int index) { return fRenderTasks[index].get(); }
149 const GrRenderTask* renderTask(int index) const { return fRenderTasks[index].get(); }
150
151 GrRenderTask* back() { return fRenderTasks.back().get(); }
152 const GrRenderTask* back() const { return fRenderTasks.back().get(); }
153
154 GrRenderTask* add(sk_sp<GrRenderTask>);
155 GrRenderTask* addBeforeLast(sk_sp<GrRenderTask>);
156 void add(const SkTArray<sk_sp<GrRenderTask>>&);
157
158 void swap(SkTArray<sk_sp<GrRenderTask>>* renderTasks);
159
160 bool sortingRenderTasks() const { return fSortRenderTasks; }
161
162 private:
163 SkTArray<sk_sp<GrRenderTask>> fRenderTasks;
164 bool fSortRenderTasks;
165 };
166
167 GrDrawingManager(GrRecordingContext*, const GrPathRendererChain::Options&,
168 const GrTextContext::Options&,
169 bool sortRenderTasks,
170 bool reduceOpsTaskSplitting);
171
172 bool wasAbandoned() const;
173
174 void cleanup();
175
176 // Closes the target's dependent render tasks (or, if not in sorting/opsTask-splitting-reduction
177 // mode, closes fActiveOpsTask) in preparation for us opening a new opsTask that will write to
178 // 'target'.
179 void closeRenderTasksForNewRenderTask(GrSurfaceProxy* target);
180
181 // return true if any GrRenderTasks were actually executed; false otherwise
182 bool executeRenderTasks(int startIndex, int stopIndex, GrOpFlushState*,
183 int* numRenderTasksExecuted);
184
185 bool flush(GrSurfaceProxy* proxies[],
186 int numProxies,
187 SkSurface::BackendSurfaceAccess access,
188 const GrFlushInfo&,
189 const GrPrepareForExternalIORequests&);
190
191 bool submitToGpu(bool syncToCpu);
192
193 SkDEBUGCODE(void validate() const);
194
195 friend class GrContext; // access to: flush & cleanup
196 friend class GrContextPriv; // access to: flush
197 friend class GrOnFlushResourceProvider; // this is just a shallow wrapper around this class
198 friend class GrRecordingContext; // access to: ctor
199 friend class SkImage; // for access to: flush
200
201 static const int kNumPixelGeometries = 5; // The different pixel geometries
202 static const int kNumDFTOptions = 2; // DFT or no DFT
203
204 GrRecordingContext* fContext;
205 GrPathRendererChain::Options fOptionsForPathRendererChain;
206 GrTextContext::Options fOptionsForTextContext;
207 // This cache is used by both the vertex and index pools. It reuses memory across multiple
208 // flushes.
209 sk_sp<GrBufferAllocPool::CpuBufferCache> fCpuBufferCache;
210
211 RenderTaskDAG fDAG;
212 GrOpsTask* fActiveOpsTask = nullptr;
213 // These are the IDs of the opsTask currently being flushed (in internalFlush)
214 SkSTArray<8, uint32_t, true> fFlushingRenderTaskIDs;
215 // These are the new renderTasks generated by the onFlush CBs
216 SkSTArray<4, sk_sp<GrRenderTask>> fOnFlushRenderTasks;
217
218 std::unique_ptr<GrTextContext> fTextContext;
219
220 std::unique_ptr<GrPathRendererChain> fPathRendererChain;
221 sk_sp<GrSoftwarePathRenderer> fSoftwarePathRenderer;
222
223 GrTokenTracker fTokenTracker;
224 bool fFlushing;
225 bool fReduceOpsTaskSplitting;
226
227 SkTArray<GrOnFlushCallbackObject*> fOnFlushCBObjects;
228
229 void addDDLTarget(GrSurfaceProxy* proxy) { fDDLTargets.insert(proxy); }
230 bool isDDLTarget(GrSurfaceProxy* proxy) { return fDDLTargets.find(proxy) != fDDLTargets.end(); }
231 void clearDDLTargets() { fDDLTargets.clear(); }
232
233 // We play a trick with lazy proxies to retarget the base target of a DDL to the SkSurface
234 // it is replayed on. Because of this remapping we need to explicitly store the targets of
235 // DDL replaying.
236 // Note: we do not expect a whole lot of these per flush
237 std::set<GrSurfaceProxy*> fDDLTargets;
238};
239
240#endif
241