1 | /* |
2 | * Copyright 2015 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef GrOpFlushState_DEFINED |
9 | #define GrOpFlushState_DEFINED |
10 | |
11 | #include <utility> |
12 | #include "src/core/SkArenaAlloc.h" |
13 | #include "src/core/SkArenaAllocList.h" |
14 | #include "src/gpu/GrAppliedClip.h" |
15 | #include "src/gpu/GrBufferAllocPool.h" |
16 | #include "src/gpu/GrDeferredUpload.h" |
17 | #include "src/gpu/GrProgramInfo.h" |
18 | #include "src/gpu/GrRenderTargetProxy.h" |
19 | #include "src/gpu/GrSurfaceProxyView.h" |
20 | #include "src/gpu/ops/GrMeshDrawOp.h" |
21 | |
22 | class GrGpu; |
23 | class GrOpsRenderPass; |
24 | class GrResourceProvider; |
25 | |
26 | /** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpsTask flush. */ |
27 | class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target { |
28 | public: |
29 | // vertexSpace and indexSpace may either be null or an alloation of size |
30 | // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for |
31 | // vertices/indices when a buffer larger than kDefaultBufferSize is required. |
32 | GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*, |
33 | sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr); |
34 | |
35 | ~GrOpFlushState() final { this->reset(); } |
36 | |
37 | /** This is called after each op has a chance to prepare its draws and before the draws are |
38 | executed. */ |
39 | void preExecuteDraws(); |
40 | |
41 | /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded |
42 | surface needs to be prepared for being sampled in a draw after the upload, the caller |
43 | should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan |
44 | when doing inline uploads to reset the image layout back to sampled. */ |
45 | void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false); |
46 | |
47 | /** Called as ops are executed. Must be called in the same order as the ops were prepared. */ |
48 | void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds, |
49 | const GrPipeline*); |
50 | |
51 | GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; } |
52 | void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; } |
53 | |
54 | GrGpu* gpu() { return fGpu; } |
55 | |
56 | void reset(); |
57 | |
58 | /** Additional data required on a per-op basis when executing GrOps. */ |
59 | struct OpArgs { |
60 | // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it. |
61 | explicit OpArgs(GrOp* op, GrSurfaceProxyView* surfaceView, GrAppliedClip* appliedClip, |
62 | const GrXferProcessor::DstProxyView& dstProxyView) |
63 | : fOp(op) |
64 | , fSurfaceView(surfaceView) |
65 | , fRenderTargetProxy(surfaceView->asRenderTargetProxy()) |
66 | , fAppliedClip(appliedClip) |
67 | , fDstProxyView(dstProxyView) { |
68 | SkASSERT(surfaceView->asRenderTargetProxy()); |
69 | } |
70 | |
71 | GrSurfaceOrigin origin() const { return fSurfaceView->origin(); } |
72 | GrSwizzle writeSwizzle() const { return fSurfaceView->swizzle(); } |
73 | |
74 | GrOp* op() { return fOp; } |
75 | const GrSurfaceProxyView* writeView() const { return fSurfaceView; } |
76 | GrRenderTargetProxy* proxy() const { return fRenderTargetProxy; } |
77 | GrAppliedClip* appliedClip() { return fAppliedClip; } |
78 | const GrAppliedClip* appliedClip() const { return fAppliedClip; } |
79 | const GrXferProcessor::DstProxyView& dstProxyView() const { return fDstProxyView; } |
80 | |
81 | #ifdef SK_DEBUG |
82 | void validate() const { |
83 | SkASSERT(fOp); |
84 | SkASSERT(fSurfaceView); |
85 | } |
86 | #endif |
87 | |
88 | private: |
89 | GrOp* fOp; |
90 | GrSurfaceProxyView* fSurfaceView; |
91 | GrRenderTargetProxy* fRenderTargetProxy; |
92 | GrAppliedClip* fAppliedClip; |
93 | GrXferProcessor::DstProxyView fDstProxyView; // TODO: do we still need the dst proxy here? |
94 | }; |
95 | |
96 | void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; } |
97 | |
98 | const OpArgs& drawOpArgs() const { |
99 | SkASSERT(fOpArgs); |
100 | SkDEBUGCODE(fOpArgs->validate()); |
101 | return *fOpArgs; |
102 | } |
103 | |
104 | void setSampledProxyArray(SkTArray<GrSurfaceProxy*, true>* sampledProxies) { |
105 | fSampledProxies = sampledProxies; |
106 | } |
107 | |
108 | SkTArray<GrSurfaceProxy*, true>* sampledProxyArray() override { |
109 | return fSampledProxies; |
110 | } |
111 | |
112 | /** Overrides of GrDeferredUploadTarget. */ |
113 | |
114 | const GrTokenTracker* tokenTracker() final { return fTokenTracker; } |
115 | GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final; |
116 | GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final; |
117 | |
118 | /** Overrides of GrMeshDrawOp::Target. */ |
119 | void recordDraw(const GrGeometryProcessor*, |
120 | const GrSimpleMesh[], |
121 | int meshCnt, |
122 | const GrSurfaceProxy* const primProcProxies[], |
123 | GrPrimitiveType) final; |
124 | void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*, |
125 | int* startVertex) final; |
126 | uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final; |
127 | void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount, |
128 | sk_sp<const GrBuffer>*, int* startVertex, |
129 | int* actualVertexCount) final; |
130 | uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount, |
131 | sk_sp<const GrBuffer>*, int* startIndex, |
132 | int* actualIndexCount) final; |
133 | GrDrawIndirectCommand* makeDrawIndirectSpace(int drawCount, sk_sp<const GrBuffer>* buffer, |
134 | size_t* offset) override { |
135 | return fDrawIndirectPool.makeSpace(drawCount, buffer, offset); |
136 | } |
137 | GrDrawIndexedIndirectCommand* makeDrawIndexedIndirectSpace( |
138 | int drawCount, sk_sp<const GrBuffer>* buffer, size_t* offset) override { |
139 | return fDrawIndirectPool.makeIndexedSpace(drawCount, buffer, offset); |
140 | } |
141 | void putBackIndices(int indexCount) final; |
142 | void putBackVertices(int vertices, size_t vertexStride) final; |
143 | const GrSurfaceProxyView* writeView() const final { return this->drawOpArgs().writeView(); } |
144 | GrRenderTargetProxy* proxy() const final { return this->drawOpArgs().proxy(); } |
145 | const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); } |
146 | const GrAppliedHardClip& appliedHardClip() const { |
147 | return (fOpArgs->appliedClip()) ? |
148 | fOpArgs->appliedClip()->hardClip() : GrAppliedHardClip::Disabled(); |
149 | } |
150 | GrAppliedClip detachAppliedClip() final; |
151 | const GrXferProcessor::DstProxyView& dstProxyView() const final { |
152 | return this->drawOpArgs().dstProxyView(); |
153 | } |
154 | GrDeferredUploadTarget* deferredUploadTarget() final { return this; } |
155 | const GrCaps& caps() const final; |
156 | GrResourceProvider* resourceProvider() const final { return fResourceProvider; } |
157 | |
158 | GrStrikeCache* strikeCache() const final; |
159 | |
160 | // At this point we know we're flushing so full access to the GrAtlasManager and |
161 | // GrSmallPathAtlasMgr is required (and permissible). |
162 | GrAtlasManager* atlasManager() const final; |
163 | GrSmallPathAtlasMgr* smallPathAtlasManager() const final; |
164 | |
165 | /** GrMeshDrawOp::Target override. */ |
166 | SkArenaAlloc* allocator() override { return &fArena; } |
167 | |
168 | // This is a convenience method that binds the given pipeline, and then, if our applied clip has |
169 | // a scissor, sets the scissor rect from the applied clip. |
170 | void bindPipelineAndScissorClip(const GrProgramInfo& programInfo, const SkRect& drawBounds) { |
171 | SkASSERT((programInfo.pipeline().isScissorTestEnabled()) == |
172 | (this->appliedClip() && this->appliedClip()->scissorState().enabled())); |
173 | this->bindPipeline(programInfo, drawBounds); |
174 | if (programInfo.pipeline().isScissorTestEnabled()) { |
175 | this->setScissorRect(this->appliedClip()->scissorState().rect()); |
176 | } |
177 | } |
178 | |
179 | // This is a convenience method for when the primitive processor has exactly one texture. It |
180 | // binds one texture for the primitive processor, and any others for FPs on the pipeline. |
181 | void bindTextures(const GrPrimitiveProcessor& primProc, |
182 | const GrSurfaceProxy& singlePrimProcTexture, const GrPipeline& pipeline) { |
183 | SkASSERT(primProc.numTextureSamplers() == 1); |
184 | const GrSurfaceProxy* ptr = &singlePrimProcTexture; |
185 | this->bindTextures(primProc, &ptr, pipeline); |
186 | } |
187 | |
188 | // Makes the appropriate bindBuffers() and draw*() calls for the provided mesh. |
189 | void drawMesh(const GrSimpleMesh& mesh); |
190 | |
191 | // Pass-through methods to GrOpsRenderPass. |
192 | void bindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) { |
193 | fOpsRenderPass->bindPipeline(programInfo, drawBounds); |
194 | } |
195 | void setScissorRect(const SkIRect& scissorRect) { |
196 | fOpsRenderPass->setScissorRect(scissorRect); |
197 | } |
198 | void bindTextures(const GrPrimitiveProcessor& primProc, |
199 | const GrSurfaceProxy* const primProcTextures[], const GrPipeline& pipeline) { |
200 | fOpsRenderPass->bindTextures(primProc, primProcTextures, pipeline); |
201 | } |
202 | void bindBuffers(sk_sp<const GrBuffer> indexBuffer, sk_sp<const GrBuffer> instanceBuffer, |
203 | sk_sp<const GrBuffer> vertexBuffer, |
204 | GrPrimitiveRestart primitiveRestart = GrPrimitiveRestart::kNo) { |
205 | fOpsRenderPass->bindBuffers(std::move(indexBuffer), std::move(instanceBuffer), |
206 | std::move(vertexBuffer), primitiveRestart); |
207 | } |
208 | void draw(int vertexCount, int baseVertex) { |
209 | fOpsRenderPass->draw(vertexCount, baseVertex); |
210 | } |
211 | void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue, |
212 | int baseVertex) { |
213 | fOpsRenderPass->drawIndexed(indexCount, baseIndex, minIndexValue, maxIndexValue, |
214 | baseVertex); |
215 | } |
216 | void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex) { |
217 | fOpsRenderPass->drawInstanced(instanceCount, baseInstance, vertexCount, baseVertex); |
218 | } |
219 | void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance, |
220 | int baseVertex) { |
221 | fOpsRenderPass->drawIndexedInstanced(indexCount, baseIndex, instanceCount, baseInstance, |
222 | baseVertex); |
223 | } |
224 | void drawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) { |
225 | fOpsRenderPass->drawIndirect(drawIndirectBuffer, offset, drawCount); |
226 | } |
227 | void drawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) { |
228 | fOpsRenderPass->drawIndexedIndirect(drawIndirectBuffer, offset, drawCount); |
229 | } |
230 | void drawIndexPattern(int patternIndexCount, int patternRepeatCount, |
231 | int maxPatternRepetitionsInIndexBuffer, int patternVertexCount, |
232 | int baseVertex) { |
233 | fOpsRenderPass->drawIndexPattern(patternIndexCount, patternRepeatCount, |
234 | maxPatternRepetitionsInIndexBuffer, patternVertexCount, |
235 | baseVertex); |
236 | } |
237 | |
238 | private: |
239 | struct InlineUpload { |
240 | InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token) |
241 | : fUpload(std::move(upload)), fUploadBeforeToken(token) {} |
242 | GrDeferredTextureUploadFn fUpload; |
243 | GrDeferredUploadToken fUploadBeforeToken; |
244 | }; |
245 | |
246 | // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The |
247 | // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes |
248 | // that share a geometry processor into a Draw is that it allows the Gpu object to setup |
249 | // the shared state once and then issue draws for each mesh. |
250 | struct Draw { |
251 | ~Draw(); |
252 | // The geometry processor is always forced to be in an arena allocation or appears on |
253 | // the stack (for CCPR). In either case this object does not need to manage its |
254 | // lifetime. |
255 | const GrGeometryProcessor* fGeometryProcessor = nullptr; |
256 | // Must have GrPrimitiveProcessor::numTextureSamplers() entries. Can be null if no samplers. |
257 | const GrSurfaceProxy* const* fPrimProcProxies = nullptr; |
258 | const GrSimpleMesh* fMeshes = nullptr; |
259 | const GrOp* fOp = nullptr; |
260 | int fMeshCnt = 0; |
261 | GrPrimitiveType fPrimitiveType; |
262 | }; |
263 | |
264 | // Storage for ops' pipelines, draws, and inline uploads. |
265 | SkArenaAllocWithReset fArena{sizeof(GrPipeline) * 100}; |
266 | |
267 | // Store vertex and index data on behalf of ops that are flushed. |
268 | GrVertexBufferAllocPool fVertexPool; |
269 | GrIndexBufferAllocPool fIndexPool; |
270 | GrDrawIndirectBufferAllocPool fDrawIndirectPool; |
271 | |
272 | // Data stored on behalf of the ops being flushed. |
273 | SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads; |
274 | SkArenaAllocList<InlineUpload> fInlineUploads; |
275 | SkArenaAllocList<Draw> fDraws; |
276 | |
277 | // All draws we store have an implicit draw token. This is the draw token for the first draw |
278 | // in fDraws. |
279 | GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken(); |
280 | |
281 | // Info about the op that is currently preparing or executing using the flush state or null if |
282 | // an op is not currently preparing of executing. |
283 | OpArgs* fOpArgs = nullptr; |
284 | |
285 | // This field is only transiently set during flush. Each GrOpsTask will set it to point to an |
286 | // array of proxies it uses before call onPrepare and onExecute. |
287 | SkTArray<GrSurfaceProxy*, true>* fSampledProxies; |
288 | |
289 | GrGpu* fGpu; |
290 | GrResourceProvider* fResourceProvider; |
291 | GrTokenTracker* fTokenTracker; |
292 | GrOpsRenderPass* fOpsRenderPass = nullptr; |
293 | |
294 | // Variables that are used to track where we are in lists as ops are executed |
295 | SkArenaAllocList<Draw>::Iter fCurrDraw; |
296 | SkArenaAllocList<InlineUpload>::Iter fCurrUpload; |
297 | }; |
298 | |
299 | #endif |
300 | |