1 | /* |
2 | * Copyright 2015 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef GrOpFlushState_DEFINED |
9 | #define GrOpFlushState_DEFINED |
10 | |
11 | #include <utility> |
12 | #include "src/core/SkArenaAlloc.h" |
13 | #include "src/core/SkArenaAllocList.h" |
14 | #include "src/gpu/GrAppliedClip.h" |
15 | #include "src/gpu/GrBufferAllocPool.h" |
16 | #include "src/gpu/GrDeferredUpload.h" |
17 | #include "src/gpu/GrProgramInfo.h" |
18 | #include "src/gpu/GrRenderTargetProxy.h" |
19 | #include "src/gpu/GrSurfaceProxyView.h" |
20 | #include "src/gpu/ops/GrMeshDrawOp.h" |
21 | |
22 | class GrGpu; |
23 | class GrOpsRenderPass; |
24 | class GrResourceProvider; |
25 | |
26 | /** Tracks the state across all the GrOps (really just the GrDrawOps) in a GrOpsTask flush. */ |
27 | class GrOpFlushState final : public GrDeferredUploadTarget, public GrMeshDrawOp::Target { |
28 | public: |
29 | // vertexSpace and indexSpace may either be null or an alloation of size |
30 | // GrBufferAllocPool::kDefaultBufferSize. If the latter, then CPU memory is only allocated for |
31 | // vertices/indices when a buffer larger than kDefaultBufferSize is required. |
32 | GrOpFlushState(GrGpu*, GrResourceProvider*, GrTokenTracker*, |
33 | sk_sp<GrBufferAllocPool::CpuBufferCache> = nullptr); |
34 | |
35 | ~GrOpFlushState() final { this->reset(); } |
36 | |
37 | /** This is called after each op has a chance to prepare its draws and before the draws are |
38 | executed. */ |
39 | void preExecuteDraws(); |
40 | |
41 | /** Called to upload data to a texture using the GrDeferredTextureUploadFn. If the uploaded |
42 | surface needs to be prepared for being sampled in a draw after the upload, the caller |
43 | should pass in true for shouldPrepareSurfaceForSampling. This feature is needed for Vulkan |
44 | when doing inline uploads to reset the image layout back to sampled. */ |
45 | void doUpload(GrDeferredTextureUploadFn&, bool shouldPrepareSurfaceForSampling = false); |
46 | |
47 | /** Called as ops are executed. Must be called in the same order as the ops were prepared. */ |
48 | void executeDrawsAndUploadsForMeshDrawOp(const GrOp* op, const SkRect& chainBounds, |
49 | const GrPipeline*); |
50 | |
51 | GrOpsRenderPass* opsRenderPass() { return fOpsRenderPass; } |
52 | void setOpsRenderPass(GrOpsRenderPass* renderPass) { fOpsRenderPass = renderPass; } |
53 | |
54 | GrGpu* gpu() { return fGpu; } |
55 | |
56 | void reset(); |
57 | |
58 | /** Additional data required on a per-op basis when executing GrOps. */ |
59 | struct OpArgs { |
60 | // TODO: why does OpArgs have the op we're going to pass it to as a member? Remove it. |
61 | explicit OpArgs(GrOp* op, GrSurfaceProxyView* surfaceView, GrAppliedClip* appliedClip, |
62 | const GrXferProcessor::DstProxyView& dstProxyView) |
63 | : fOp(op) |
64 | , fSurfaceView(surfaceView) |
65 | , fRenderTargetProxy(surfaceView->asRenderTargetProxy()) |
66 | , fAppliedClip(appliedClip) |
67 | , fDstProxyView(dstProxyView) { |
68 | SkASSERT(surfaceView->asRenderTargetProxy()); |
69 | } |
70 | |
71 | GrSurfaceOrigin origin() const { return fSurfaceView->origin(); } |
72 | GrSwizzle writeSwizzle() const { return fSurfaceView->swizzle(); } |
73 | |
74 | GrOp* op() { return fOp; } |
75 | const GrSurfaceProxyView* writeView() const { return fSurfaceView; } |
76 | GrRenderTargetProxy* proxy() const { return fRenderTargetProxy; } |
77 | GrAppliedClip* appliedClip() { return fAppliedClip; } |
78 | const GrAppliedClip* appliedClip() const { return fAppliedClip; } |
79 | const GrXferProcessor::DstProxyView& dstProxyView() const { return fDstProxyView; } |
80 | |
81 | #ifdef SK_DEBUG |
82 | void validate() const { |
83 | SkASSERT(fOp); |
84 | SkASSERT(fSurfaceView); |
85 | } |
86 | #endif |
87 | |
88 | private: |
89 | GrOp* fOp; |
90 | GrSurfaceProxyView* fSurfaceView; |
91 | GrRenderTargetProxy* fRenderTargetProxy; |
92 | GrAppliedClip* fAppliedClip; |
93 | GrXferProcessor::DstProxyView fDstProxyView; // TODO: do we still need the dst proxy here? |
94 | }; |
95 | |
96 | void setOpArgs(OpArgs* opArgs) { fOpArgs = opArgs; } |
97 | |
98 | const OpArgs& drawOpArgs() const { |
99 | SkASSERT(fOpArgs); |
100 | SkDEBUGCODE(fOpArgs->validate()); |
101 | return *fOpArgs; |
102 | } |
103 | |
104 | void setSampledProxyArray(SkTArray<GrSurfaceProxy*, true>* sampledProxies) { |
105 | fSampledProxies = sampledProxies; |
106 | } |
107 | |
108 | SkTArray<GrSurfaceProxy*, true>* sampledProxyArray() override { |
109 | return fSampledProxies; |
110 | } |
111 | |
112 | /** Overrides of GrDeferredUploadTarget. */ |
113 | |
114 | const GrTokenTracker* tokenTracker() final { return fTokenTracker; } |
115 | GrDeferredUploadToken addInlineUpload(GrDeferredTextureUploadFn&&) final; |
116 | GrDeferredUploadToken addASAPUpload(GrDeferredTextureUploadFn&&) final; |
117 | |
118 | /** Overrides of GrMeshDrawOp::Target. */ |
119 | void recordDraw(const GrGeometryProcessor*, |
120 | const GrSimpleMesh[], |
121 | int meshCnt, |
122 | const GrSurfaceProxy* const primProcProxies[], |
123 | GrPrimitiveType) final; |
124 | void* makeVertexSpace(size_t vertexSize, int vertexCount, sk_sp<const GrBuffer>*, |
125 | int* startVertex) final; |
126 | uint16_t* makeIndexSpace(int indexCount, sk_sp<const GrBuffer>*, int* startIndex) final; |
127 | void* makeVertexSpaceAtLeast(size_t vertexSize, int minVertexCount, int fallbackVertexCount, |
128 | sk_sp<const GrBuffer>*, int* startVertex, |
129 | int* actualVertexCount) final; |
130 | uint16_t* makeIndexSpaceAtLeast(int minIndexCount, int fallbackIndexCount, |
131 | sk_sp<const GrBuffer>*, int* startIndex, |
132 | int* actualIndexCount) final; |
133 | GrDrawIndirectCommand* makeDrawIndirectSpace(int drawCount, sk_sp<const GrBuffer>* buffer, |
134 | size_t* offset) { |
135 | return fDrawIndirectPool.makeSpace(drawCount, buffer, offset); |
136 | } |
137 | GrDrawIndexedIndirectCommand* makeDrawIndexedIndirectSpace( |
138 | int drawCount, sk_sp<const GrBuffer>* buffer, size_t* offset) { |
139 | return fDrawIndirectPool.makeIndexedSpace(drawCount, buffer, offset); |
140 | } |
141 | void putBackIndices(int indexCount) final; |
142 | void putBackVertices(int vertices, size_t vertexStride) final; |
143 | const GrSurfaceProxyView* writeView() const final { return this->drawOpArgs().writeView(); } |
144 | GrRenderTargetProxy* proxy() const final { return this->drawOpArgs().proxy(); } |
145 | const GrAppliedClip* appliedClip() const final { return this->drawOpArgs().appliedClip(); } |
146 | const GrAppliedHardClip& appliedHardClip() const { |
147 | return (fOpArgs->appliedClip()) ? |
148 | fOpArgs->appliedClip()->hardClip() : GrAppliedHardClip::Disabled(); |
149 | } |
150 | GrAppliedClip detachAppliedClip() final; |
151 | const GrXferProcessor::DstProxyView& dstProxyView() const final { |
152 | return this->drawOpArgs().dstProxyView(); |
153 | } |
154 | GrDeferredUploadTarget* deferredUploadTarget() final { return this; } |
155 | const GrCaps& caps() const final; |
156 | GrResourceProvider* resourceProvider() const final { return fResourceProvider; } |
157 | |
158 | GrStrikeCache* strikeCache() const final; |
159 | |
160 | // At this point we know we're flushing so full access to the GrAtlasManager is required (and |
161 | // permissible). |
162 | GrAtlasManager* atlasManager() const final; |
163 | |
164 | /** GrMeshDrawOp::Target override. */ |
165 | SkArenaAlloc* allocator() override { return &fArena; } |
166 | |
167 | // This is a convenience method that binds the given pipeline, and then, if our applied clip has |
168 | // a scissor, sets the scissor rect from the applied clip. |
169 | void bindPipelineAndScissorClip(const GrProgramInfo& programInfo, const SkRect& drawBounds) { |
170 | SkASSERT((programInfo.pipeline().isScissorTestEnabled()) == |
171 | (this->appliedClip() && this->appliedClip()->scissorState().enabled())); |
172 | this->bindPipeline(programInfo, drawBounds); |
173 | if (programInfo.pipeline().isScissorTestEnabled()) { |
174 | this->setScissorRect(this->appliedClip()->scissorState().rect()); |
175 | } |
176 | } |
177 | |
178 | // This is a convenience method for when the primitive processor has exactly one texture. It |
179 | // binds one texture for the primitive processor, and any others for FPs on the pipeline. |
180 | void bindTextures(const GrPrimitiveProcessor& primProc, |
181 | const GrSurfaceProxy& singlePrimProcTexture, const GrPipeline& pipeline) { |
182 | SkASSERT(primProc.numTextureSamplers() == 1); |
183 | const GrSurfaceProxy* ptr = &singlePrimProcTexture; |
184 | this->bindTextures(primProc, &ptr, pipeline); |
185 | } |
186 | |
187 | // Makes the appropriate bindBuffers() and draw*() calls for the provided mesh. |
188 | void drawMesh(const GrSimpleMesh& mesh); |
189 | |
190 | // Pass-through methods to GrOpsRenderPass. |
191 | void bindPipeline(const GrProgramInfo& programInfo, const SkRect& drawBounds) { |
192 | fOpsRenderPass->bindPipeline(programInfo, drawBounds); |
193 | } |
194 | void setScissorRect(const SkIRect& scissorRect) { |
195 | fOpsRenderPass->setScissorRect(scissorRect); |
196 | } |
197 | void bindTextures(const GrPrimitiveProcessor& primProc, |
198 | const GrSurfaceProxy* const primProcTextures[], const GrPipeline& pipeline) { |
199 | fOpsRenderPass->bindTextures(primProc, primProcTextures, pipeline); |
200 | } |
201 | void bindBuffers(const GrBuffer* indexBuffer, const GrBuffer* instanceBuffer, |
202 | const GrBuffer* vertexBuffer, |
203 | GrPrimitiveRestart primitiveRestart = GrPrimitiveRestart::kNo) { |
204 | fOpsRenderPass->bindBuffers(indexBuffer, instanceBuffer, vertexBuffer, primitiveRestart); |
205 | } |
206 | void draw(int vertexCount, int baseVertex) { |
207 | fOpsRenderPass->draw(vertexCount, baseVertex); |
208 | } |
209 | void drawIndexed(int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue, |
210 | int baseVertex) { |
211 | fOpsRenderPass->drawIndexed(indexCount, baseIndex, minIndexValue, maxIndexValue, |
212 | baseVertex); |
213 | } |
214 | void drawInstanced(int instanceCount, int baseInstance, int vertexCount, int baseVertex) { |
215 | fOpsRenderPass->drawInstanced(instanceCount, baseInstance, vertexCount, baseVertex); |
216 | } |
217 | void drawIndexedInstanced(int indexCount, int baseIndex, int instanceCount, int baseInstance, |
218 | int baseVertex) { |
219 | fOpsRenderPass->drawIndexedInstanced(indexCount, baseIndex, instanceCount, baseInstance, |
220 | baseVertex); |
221 | } |
222 | void drawIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) { |
223 | fOpsRenderPass->drawIndirect(drawIndirectBuffer, offset, drawCount); |
224 | } |
225 | void drawIndexedIndirect(const GrBuffer* drawIndirectBuffer, size_t offset, int drawCount) { |
226 | fOpsRenderPass->drawIndexedIndirect(drawIndirectBuffer, offset, drawCount); |
227 | } |
228 | void drawIndexPattern(int patternIndexCount, int patternRepeatCount, |
229 | int maxPatternRepetitionsInIndexBuffer, int patternVertexCount, |
230 | int baseVertex) { |
231 | fOpsRenderPass->drawIndexPattern(patternIndexCount, patternRepeatCount, |
232 | maxPatternRepetitionsInIndexBuffer, patternVertexCount, |
233 | baseVertex); |
234 | } |
235 | |
236 | private: |
237 | struct InlineUpload { |
238 | InlineUpload(GrDeferredTextureUploadFn&& upload, GrDeferredUploadToken token) |
239 | : fUpload(std::move(upload)), fUploadBeforeToken(token) {} |
240 | GrDeferredTextureUploadFn fUpload; |
241 | GrDeferredUploadToken fUploadBeforeToken; |
242 | }; |
243 | |
244 | // A set of contiguous draws that share a draw token, geometry processor, and pipeline. The |
245 | // meshes for the draw are stored in the fMeshes array. The reason for coalescing meshes |
246 | // that share a geometry processor into a Draw is that it allows the Gpu object to setup |
247 | // the shared state once and then issue draws for each mesh. |
248 | struct Draw { |
249 | ~Draw(); |
250 | // The geometry processor is always forced to be in an arena allocation or appears on |
251 | // the stack (for CCPR). In either case this object does not need to manage its |
252 | // lifetime. |
253 | const GrGeometryProcessor* fGeometryProcessor = nullptr; |
254 | // Must have GrPrimitiveProcessor::numTextureSamplers() entries. Can be null if no samplers. |
255 | const GrSurfaceProxy* const* fPrimProcProxies = nullptr; |
256 | const GrSimpleMesh* fMeshes = nullptr; |
257 | const GrOp* fOp = nullptr; |
258 | int fMeshCnt = 0; |
259 | GrPrimitiveType fPrimitiveType; |
260 | }; |
261 | |
262 | // Storage for ops' pipelines, draws, and inline uploads. |
263 | SkArenaAlloc fArena{sizeof(GrPipeline) * 100}; |
264 | |
265 | // Store vertex and index data on behalf of ops that are flushed. |
266 | GrVertexBufferAllocPool fVertexPool; |
267 | GrIndexBufferAllocPool fIndexPool; |
268 | GrDrawIndirectBufferAllocPool fDrawIndirectPool; |
269 | |
270 | // Data stored on behalf of the ops being flushed. |
271 | SkArenaAllocList<GrDeferredTextureUploadFn> fASAPUploads; |
272 | SkArenaAllocList<InlineUpload> fInlineUploads; |
273 | SkArenaAllocList<Draw> fDraws; |
274 | |
275 | // All draws we store have an implicit draw token. This is the draw token for the first draw |
276 | // in fDraws. |
277 | GrDeferredUploadToken fBaseDrawToken = GrDeferredUploadToken::AlreadyFlushedToken(); |
278 | |
279 | // Info about the op that is currently preparing or executing using the flush state or null if |
280 | // an op is not currently preparing of executing. |
281 | OpArgs* fOpArgs = nullptr; |
282 | |
283 | // This field is only transiently set during flush. Each GrOpsTask will set it to point to an |
284 | // array of proxies it uses before call onPrepare and onExecute. |
285 | SkTArray<GrSurfaceProxy*, true>* fSampledProxies; |
286 | |
287 | GrGpu* fGpu; |
288 | GrResourceProvider* fResourceProvider; |
289 | GrTokenTracker* fTokenTracker; |
290 | GrOpsRenderPass* fOpsRenderPass = nullptr; |
291 | |
292 | // Variables that are used to track where we are in lists as ops are executed |
293 | SkArenaAllocList<Draw>::Iter fCurrDraw; |
294 | SkArenaAllocList<InlineUpload>::Iter fCurrUpload; |
295 | }; |
296 | |
297 | #endif |
298 | |