1 | /* |
2 | * Copyright 2018 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "src/gpu/ccpr/GrCCPerFlushResources.h" |
9 | |
10 | #include "include/gpu/GrRecordingContext.h" |
11 | #include "src/gpu/GrMemoryPool.h" |
12 | #include "src/gpu/GrOnFlushResourceProvider.h" |
13 | #include "src/gpu/GrRecordingContextPriv.h" |
14 | #include "src/gpu/GrRenderTargetContext.h" |
15 | #include "src/gpu/GrSurfaceContextPriv.h" |
16 | #include "src/gpu/ccpr/GrCCPathCache.h" |
17 | #include "src/gpu/ccpr/GrGSCoverageProcessor.h" |
18 | #include "src/gpu/ccpr/GrSampleMaskProcessor.h" |
19 | #include "src/gpu/ccpr/GrVSCoverageProcessor.h" |
20 | #include "src/gpu/geometry/GrStyledShape.h" |
21 | #include <algorithm> |
22 | |
23 | using CoverageType = GrCCAtlas::CoverageType; |
24 | using FillBatchID = GrCCFiller::BatchID; |
25 | using StrokeBatchID = GrCCStroker::BatchID; |
26 | using PathInstance = GrCCPathProcessor::Instance; |
27 | |
28 | static constexpr int kFillIdx = GrCCPerFlushResourceSpecs::kFillIdx; |
29 | static constexpr int kStrokeIdx = GrCCPerFlushResourceSpecs::kStrokeIdx; |
30 | |
31 | namespace { |
32 | |
33 | // Base class for an Op that renders a CCPR atlas. |
34 | class AtlasOp : public GrDrawOp { |
35 | public: |
36 | FixedFunctionFlags fixedFunctionFlags() const override { return FixedFunctionFlags::kNone; } |
37 | GrProcessorSet::Analysis finalize(const GrCaps&, const GrAppliedClip*, |
38 | bool hasMixedSampledCoverage, GrClampType) override { |
39 | return GrProcessorSet::EmptySetAnalysis(); |
40 | } |
41 | CombineResult onCombineIfPossible(GrOp* other, GrRecordingContext::Arenas*, |
42 | const GrCaps&) override { |
43 | // We will only make multiple copy ops if they have different source proxies. |
44 | // TODO: make use of texture chaining. |
45 | return CombineResult::kCannotCombine; |
46 | } |
47 | |
48 | protected: |
49 | AtlasOp(uint32_t classID, sk_sp<const GrCCPerFlushResources> resources, |
50 | const SkISize& drawBounds) |
51 | : GrDrawOp(classID) |
52 | , fResources(std::move(resources)) { |
53 | this->setBounds(SkRect::MakeIWH(drawBounds.width(), drawBounds.height()), |
54 | GrOp::HasAABloat::kNo, GrOp::IsHairline::kNo); |
55 | } |
56 | |
57 | const sk_sp<const GrCCPerFlushResources> fResources; |
58 | |
59 | private: |
60 | void onPrePrepare(GrRecordingContext*, |
61 | const GrSurfaceProxyView* writeView, |
62 | GrAppliedClip*, |
63 | const GrXferProcessor::DstProxyView&) final {} |
64 | void onPrepare(GrOpFlushState*) final {} |
65 | }; |
66 | |
67 | // Copies paths from a cached coverage count or msaa atlas into an 8-bit literal-coverage atlas. |
68 | class CopyAtlasOp : public AtlasOp { |
69 | public: |
70 | DEFINE_OP_CLASS_ID |
71 | |
72 | static std::unique_ptr<GrDrawOp> Make( |
73 | GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources, |
74 | sk_sp<GrTextureProxy> copyProxy, int baseInstance, int endInstance, |
75 | const SkISize& drawBounds) { |
76 | GrOpMemoryPool* pool = context->priv().opMemoryPool(); |
77 | |
78 | return pool->allocate<CopyAtlasOp>(std::move(resources), std::move(copyProxy), baseInstance, |
79 | endInstance, drawBounds); |
80 | } |
81 | |
82 | const char* name() const override { return "CopyAtlasOp (CCPR)" ; } |
83 | |
84 | void visitProxies(const VisitProxyFunc& fn) const override { |
85 | fn(fSrcProxy.get(), GrMipmapped::kNo); |
86 | } |
87 | |
88 | void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override { |
89 | SkASSERT(fSrcProxy); |
90 | SkASSERT(fSrcProxy->isInstantiated()); |
91 | |
92 | auto coverageMode = GrCCAtlas::CoverageTypeToPathCoverageMode( |
93 | fResources->renderedPathCoverageType()); |
94 | GrColorType ct = GrCCAtlas::CoverageTypeToColorType(fResources->renderedPathCoverageType()); |
95 | GrSwizzle swizzle = flushState->caps().getReadSwizzle(fSrcProxy->backendFormat(), ct); |
96 | GrCCPathProcessor pathProc(coverageMode, fSrcProxy->peekTexture(), swizzle, |
97 | GrCCAtlas::kTextureOrigin); |
98 | |
99 | bool hasScissor = flushState->appliedClip() && |
100 | flushState->appliedClip()->scissorState().enabled(); |
101 | GrPipeline pipeline(hasScissor ? GrScissorTest::kEnabled : GrScissorTest::kDisabled, |
102 | SkBlendMode::kSrc, flushState->drawOpArgs().writeSwizzle()); |
103 | |
104 | pathProc.drawPaths(flushState, pipeline, *fSrcProxy, *fResources, fBaseInstance, |
105 | fEndInstance, this->bounds()); |
106 | } |
107 | |
108 | private: |
109 | friend class ::GrOpMemoryPool; // for ctor |
110 | |
111 | CopyAtlasOp(sk_sp<const GrCCPerFlushResources> resources, sk_sp<GrTextureProxy> srcProxy, |
112 | int baseInstance, int endInstance, const SkISize& drawBounds) |
113 | : AtlasOp(ClassID(), std::move(resources), drawBounds) |
114 | , fSrcProxy(srcProxy) |
115 | , fBaseInstance(baseInstance) |
116 | , fEndInstance(endInstance) { |
117 | } |
118 | sk_sp<GrTextureProxy> fSrcProxy; |
119 | const int fBaseInstance; |
120 | const int fEndInstance; |
121 | }; |
122 | |
123 | // Renders coverage counts to a CCPR atlas using the resources' pre-filled GrCCPathParser. |
124 | template<typename ProcessorType> class RenderAtlasOp : public AtlasOp { |
125 | public: |
126 | DEFINE_OP_CLASS_ID |
127 | |
128 | static std::unique_ptr<GrDrawOp> Make( |
129 | GrRecordingContext* context, sk_sp<const GrCCPerFlushResources> resources, |
130 | FillBatchID fillBatchID, StrokeBatchID strokeBatchID, const SkISize& drawBounds) { |
131 | GrOpMemoryPool* pool = context->priv().opMemoryPool(); |
132 | |
133 | return pool->allocate<RenderAtlasOp>( |
134 | std::move(resources), fillBatchID, strokeBatchID, drawBounds); |
135 | } |
136 | |
137 | // GrDrawOp interface. |
138 | const char* name() const override { return "RenderAtlasOp (CCPR)" ; } |
139 | |
140 | void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override { |
141 | ProcessorType proc; |
142 | GrPipeline pipeline(GrScissorTest::kEnabled, SkBlendMode::kPlus, |
143 | flushState->drawOpArgs().writeSwizzle()); |
144 | fResources->filler().drawFills(flushState, &proc, pipeline, fFillBatchID, fDrawBounds); |
145 | fResources->stroker().drawStrokes(flushState, &proc, fStrokeBatchID, fDrawBounds); |
146 | } |
147 | |
148 | private: |
149 | friend class ::GrOpMemoryPool; // for ctor |
150 | |
151 | RenderAtlasOp(sk_sp<const GrCCPerFlushResources> resources, FillBatchID fillBatchID, |
152 | StrokeBatchID strokeBatchID, const SkISize& drawBounds) |
153 | : AtlasOp(ClassID(), std::move(resources), drawBounds) |
154 | , fFillBatchID(fillBatchID) |
155 | , fStrokeBatchID(strokeBatchID) |
156 | , fDrawBounds(SkIRect::MakeWH(drawBounds.width(), drawBounds.height())) { |
157 | } |
158 | |
159 | const FillBatchID fFillBatchID; |
160 | const StrokeBatchID fStrokeBatchID; |
161 | const SkIRect fDrawBounds; |
162 | }; |
163 | |
164 | } // namespace |
165 | |
166 | static int inst_buffer_count(const GrCCPerFlushResourceSpecs& specs) { |
167 | return specs.fNumCachedPaths + |
168 | // Copies get two instances per draw: 1 copy + 1 draw. |
169 | (specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) * 2 + |
170 | specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx]; |
171 | // No clips in instance buffers. |
172 | } |
173 | |
174 | GrCCPerFlushResources::GrCCPerFlushResources( |
175 | GrOnFlushResourceProvider* onFlushRP, CoverageType coverageType, |
176 | const GrCCPerFlushResourceSpecs& specs) |
177 | // Overallocate by one point so we can call Sk4f::Store at the final SkPoint in the array. |
178 | // (See transform_path_pts below.) |
179 | // FIXME: instead use built-in instructions to write only the first two lanes of an Sk4f. |
180 | : fLocalDevPtsBuffer(std::max(specs.fRenderedPathStats[kFillIdx].fMaxPointsPerPath, |
181 | specs.fRenderedPathStats[kStrokeIdx].fMaxPointsPerPath) + 1) |
182 | , fFiller((CoverageType::kFP16_CoverageCount == coverageType) |
183 | ? GrCCFiller::Algorithm::kCoverageCount |
184 | : GrCCFiller::Algorithm::kStencilWindingCount, |
185 | specs.fNumRenderedPaths[kFillIdx] + specs.fNumClipPaths, |
186 | specs.fRenderedPathStats[kFillIdx].fNumTotalSkPoints, |
187 | specs.fRenderedPathStats[kFillIdx].fNumTotalSkVerbs, |
188 | specs.fRenderedPathStats[kFillIdx].fNumTotalConicWeights) |
189 | , fStroker(specs.fNumRenderedPaths[kStrokeIdx], |
190 | specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkPoints, |
191 | specs.fRenderedPathStats[kStrokeIdx].fNumTotalSkVerbs) |
192 | , fCopyAtlasStack(CoverageType::kA8_LiteralCoverage, specs.fCopyAtlasSpecs, |
193 | onFlushRP->caps()) |
194 | , fRenderedAtlasStack(coverageType, specs.fRenderedAtlasSpecs, onFlushRP->caps()) |
195 | , fIndexBuffer(GrCCPathProcessor::FindIndexBuffer(onFlushRP)) |
196 | , fVertexBuffer(GrCCPathProcessor::FindVertexBuffer(onFlushRP)) |
197 | , fNextCopyInstanceIdx(0) |
198 | , fNextPathInstanceIdx( |
199 | specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]) { |
200 | if (!fIndexBuffer) { |
201 | SkDebugf("WARNING: failed to allocate CCPR index buffer. No paths will be drawn.\n" ); |
202 | return; |
203 | } |
204 | if (!fVertexBuffer) { |
205 | SkDebugf("WARNING: failed to allocate CCPR vertex buffer. No paths will be drawn.\n" ); |
206 | return; |
207 | } |
208 | fPathInstanceBuffer.resetAndMapBuffer(onFlushRP, |
209 | inst_buffer_count(specs) * sizeof(PathInstance)); |
210 | if (!fPathInstanceBuffer.hasGpuBuffer()) { |
211 | SkDebugf("WARNING: failed to allocate CCPR instance buffer. No paths will be drawn.\n" ); |
212 | return; |
213 | } |
214 | |
215 | if (CoverageType::kA8_Multisample == coverageType) { |
216 | int numRenderedPaths = |
217 | specs.fNumRenderedPaths[kFillIdx] + specs.fNumRenderedPaths[kStrokeIdx] + |
218 | specs.fNumClipPaths; |
219 | fStencilResolveBuffer.resetAndMapBuffer( |
220 | onFlushRP, numRenderedPaths * sizeof(GrStencilAtlasOp::ResolveRectInstance)); |
221 | if (!fStencilResolveBuffer.hasGpuBuffer()) { |
222 | SkDebugf("WARNING: failed to allocate CCPR stencil resolve buffer. " |
223 | "No paths will be drawn.\n" ); |
224 | return; |
225 | } |
226 | SkDEBUGCODE(fEndStencilResolveInstance = numRenderedPaths); |
227 | } |
228 | |
229 | SkDEBUGCODE(fEndCopyInstance = |
230 | specs.fNumCopiedPaths[kFillIdx] + specs.fNumCopiedPaths[kStrokeIdx]); |
231 | SkDEBUGCODE(fEndPathInstance = inst_buffer_count(specs)); |
232 | } |
233 | |
234 | void GrCCPerFlushResources::upgradeEntryToLiteralCoverageAtlas( |
235 | GrCCPathCache* pathCache, GrOnFlushResourceProvider* onFlushRP, GrCCPathCacheEntry* entry, |
236 | GrFillRule fillRule) { |
237 | using ReleaseAtlasResult = GrCCPathCacheEntry::ReleaseAtlasResult; |
238 | SkASSERT(this->isMapped()); |
239 | SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance); |
240 | |
241 | const GrCCCachedAtlas* cachedAtlas = entry->cachedAtlas(); |
242 | SkASSERT(cachedAtlas); |
243 | SkASSERT(cachedAtlas->getOnFlushProxy()); |
244 | |
245 | if (CoverageType::kA8_LiteralCoverage == cachedAtlas->coverageType()) { |
246 | // This entry has already been upgraded to literal coverage. The path must have been drawn |
247 | // multiple times during the flush. |
248 | SkDEBUGCODE(--fEndCopyInstance); |
249 | return; |
250 | } |
251 | |
252 | SkIVector newAtlasOffset; |
253 | if (GrCCAtlas* retiredAtlas = fCopyAtlasStack.addRect(entry->devIBounds(), &newAtlasOffset)) { |
254 | // We did not fit in the previous copy atlas and it was retired. We will render the ranges |
255 | // up until fCopyPathRanges.count() into the retired atlas during finalize(). |
256 | retiredAtlas->setFillBatchID(fCopyPathRanges.count()); |
257 | fCurrCopyAtlasRangesIdx = fCopyPathRanges.count(); |
258 | } |
259 | |
260 | this->recordCopyPathInstance( |
261 | *entry, newAtlasOffset, fillRule, sk_ref_sp(cachedAtlas->getOnFlushProxy())); |
262 | |
263 | sk_sp<GrTexture> previousAtlasTexture = |
264 | sk_ref_sp(cachedAtlas->getOnFlushProxy()->peekTexture()); |
265 | GrCCAtlas* newAtlas = &fCopyAtlasStack.current(); |
266 | if (ReleaseAtlasResult::kDidInvalidateFromCache == |
267 | entry->upgradeToLiteralCoverageAtlas(pathCache, onFlushRP, newAtlas, newAtlasOffset)) { |
268 | // This texture just got booted out of the cache. Keep it around, in case we might be able |
269 | // to recycle it for a new atlas. We can recycle it because copying happens before rendering |
270 | // new paths, and every path from the atlas that we're planning to use this flush will be |
271 | // copied to a new atlas. We'll never copy some and leave others. |
272 | fRecyclableAtlasTextures.push_back(std::move(previousAtlasTexture)); |
273 | } |
274 | } |
275 | |
276 | void GrCCPerFlushResources::recordCopyPathInstance( |
277 | const GrCCPathCacheEntry& entry, const SkIVector& newAtlasOffset, GrFillRule fillRule, |
278 | sk_sp<GrTextureProxy> srcProxy) { |
279 | SkASSERT(fNextCopyInstanceIdx < fEndCopyInstance); |
280 | |
281 | // Write the instance at the back of the array. |
282 | int currentInstanceIdx = fNextCopyInstanceIdx++; |
283 | fPathInstanceBuffer[currentInstanceIdx].set(entry, newAtlasOffset, SK_PMColor4fWHITE, fillRule); |
284 | |
285 | // Percolate the instance forward until it's contiguous with other instances that share the same |
286 | // proxy. |
287 | for (int i = fCopyPathRanges.count() - 1; i >= fCurrCopyAtlasRangesIdx; --i) { |
288 | if (fCopyPathRanges[i].fSrcProxy == srcProxy) { |
289 | ++fCopyPathRanges[i].fCount; |
290 | return; |
291 | } |
292 | int rangeFirstInstanceIdx = currentInstanceIdx - fCopyPathRanges[i].fCount; |
293 | std::swap(fPathInstanceBuffer[rangeFirstInstanceIdx], |
294 | fPathInstanceBuffer[currentInstanceIdx]); |
295 | currentInstanceIdx = rangeFirstInstanceIdx; |
296 | } |
297 | |
298 | // An instance with this particular proxy did not yet exist in the array. Add a range for it, |
299 | // first moving any later ranges back to make space for it at fCurrCopyAtlasRangesIdx. |
300 | fCopyPathRanges.push_back(); |
301 | std::move_backward(fCopyPathRanges.begin() + fCurrCopyAtlasRangesIdx, |
302 | fCopyPathRanges.end() - 1, |
303 | fCopyPathRanges.end()); |
304 | fCopyPathRanges[fCurrCopyAtlasRangesIdx] = {std::move(srcProxy), 1}; |
305 | } |
306 | |
307 | static bool transform_path_pts( |
308 | const SkMatrix& m, const SkPath& path, const SkAutoSTArray<32, SkPoint>& outDevPts, |
309 | GrOctoBounds* octoBounds) { |
310 | const SkPoint* pts = SkPathPriv::PointData(path); |
311 | int numPts = path.countPoints(); |
312 | SkASSERT(numPts + 1 <= outDevPts.count()); |
313 | SkASSERT(numPts); |
314 | |
315 | // m45 transforms path points into "45 degree" device space. A bounding box in this space gives |
316 | // the circumscribing octagon's diagonals. We could use SK_ScalarRoot2Over2, but an orthonormal |
317 | // transform is not necessary as long as the shader uses the correct inverse. |
318 | SkMatrix m45; |
319 | m45.setSinCos(1, 1); |
320 | m45.preConcat(m); |
321 | |
322 | // X,Y,T are two parallel view matrices that accumulate two bounding boxes as they map points: |
323 | // device-space bounds and "45 degree" device-space bounds (| 1 -1 | * devCoords). |
324 | // | 1 1 | |
325 | Sk4f X = Sk4f(m.getScaleX(), m.getSkewY(), m45.getScaleX(), m45.getSkewY()); |
326 | Sk4f Y = Sk4f(m.getSkewX(), m.getScaleY(), m45.getSkewX(), m45.getScaleY()); |
327 | Sk4f T = Sk4f(m.getTranslateX(), m.getTranslateY(), m45.getTranslateX(), m45.getTranslateY()); |
328 | |
329 | // Map the path's points to device space and accumulate bounding boxes. |
330 | Sk4f devPt = SkNx_fma(Y, Sk4f(pts[0].y()), T); |
331 | devPt = SkNx_fma(X, Sk4f(pts[0].x()), devPt); |
332 | Sk4f topLeft = devPt; |
333 | Sk4f bottomRight = devPt; |
334 | |
335 | // Store all 4 values [dev.x, dev.y, dev45.x, dev45.y]. We are only interested in the first two, |
336 | // and will overwrite [dev45.x, dev45.y] with the next point. This is why the dst buffer must |
337 | // be at least one larger than the number of points. |
338 | devPt.store(&outDevPts[0]); |
339 | |
340 | for (int i = 1; i < numPts; ++i) { |
341 | devPt = SkNx_fma(Y, Sk4f(pts[i].y()), T); |
342 | devPt = SkNx_fma(X, Sk4f(pts[i].x()), devPt); |
343 | topLeft = Sk4f::Min(topLeft, devPt); |
344 | bottomRight = Sk4f::Max(bottomRight, devPt); |
345 | devPt.store(&outDevPts[i]); |
346 | } |
347 | |
348 | if (!(Sk4f(0) == topLeft*0).allTrue() || !(Sk4f(0) == bottomRight*0).allTrue()) { |
349 | // The bounds are infinite or NaN. |
350 | return false; |
351 | } |
352 | |
353 | SkPoint topLeftPts[2], bottomRightPts[2]; |
354 | topLeft.store(topLeftPts); |
355 | bottomRight.store(bottomRightPts); |
356 | |
357 | const SkRect& devBounds = SkRect::MakeLTRB( |
358 | topLeftPts[0].x(), topLeftPts[0].y(), bottomRightPts[0].x(), bottomRightPts[0].y()); |
359 | const SkRect& devBounds45 = SkRect::MakeLTRB( |
360 | topLeftPts[1].x(), topLeftPts[1].y(), bottomRightPts[1].x(), bottomRightPts[1].y()); |
361 | |
362 | octoBounds->set(devBounds, devBounds45); |
363 | return true; |
364 | } |
365 | |
366 | GrCCAtlas* GrCCPerFlushResources::renderShapeInAtlas( |
367 | const SkIRect& clipIBounds, const SkMatrix& m, const GrStyledShape& shape, |
368 | float strokeDevWidth, GrOctoBounds* octoBounds, SkIRect* devIBounds, |
369 | SkIVector* devToAtlasOffset) { |
370 | SkASSERT(this->isMapped()); |
371 | SkASSERT(fNextPathInstanceIdx < fEndPathInstance); |
372 | |
373 | SkPath path; |
374 | shape.asPath(&path); |
375 | if (path.isEmpty()) { |
376 | SkDEBUGCODE(--fEndPathInstance); |
377 | SkDEBUGCODE(--fEndStencilResolveInstance); |
378 | return nullptr; |
379 | } |
380 | if (!transform_path_pts(m, path, fLocalDevPtsBuffer, octoBounds)) { |
381 | // The transformed path had infinite or NaN bounds. |
382 | SkDEBUGCODE(--fEndPathInstance); |
383 | SkDEBUGCODE(--fEndStencilResolveInstance); |
384 | return nullptr; |
385 | } |
386 | |
387 | const SkStrokeRec& stroke = shape.style().strokeRec(); |
388 | if (!stroke.isFillStyle()) { |
389 | float r = SkStrokeRec::GetInflationRadius( |
390 | stroke.getJoin(), stroke.getMiter(), stroke.getCap(), strokeDevWidth); |
391 | octoBounds->outset(r); |
392 | } |
393 | |
394 | GrScissorTest enableScissorInAtlas; |
395 | if (clipIBounds.contains(octoBounds->bounds())) { |
396 | enableScissorInAtlas = GrScissorTest::kDisabled; |
397 | } else if (octoBounds->clip(clipIBounds)) { |
398 | enableScissorInAtlas = GrScissorTest::kEnabled; |
399 | } else { |
400 | // The clip and octo bounds do not intersect. Draw nothing. |
401 | SkDEBUGCODE(--fEndPathInstance); |
402 | SkDEBUGCODE(--fEndStencilResolveInstance); |
403 | return nullptr; |
404 | } |
405 | octoBounds->roundOut(devIBounds); |
406 | SkASSERT(clipIBounds.contains(*devIBounds)); |
407 | |
408 | this->placeRenderedPathInAtlas(*devIBounds, enableScissorInAtlas, devToAtlasOffset); |
409 | |
410 | GrFillRule fillRule; |
411 | if (stroke.isFillStyle()) { |
412 | SkASSERT(0 == strokeDevWidth); |
413 | fFiller.parseDeviceSpaceFill(path, fLocalDevPtsBuffer.begin(), enableScissorInAtlas, |
414 | *devIBounds, *devToAtlasOffset); |
415 | fillRule = GrFillRuleForSkPath(path); |
416 | } else { |
417 | // Stroke-and-fill is not yet supported. |
418 | SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle() || stroke.isHairlineStyle()); |
419 | SkASSERT(!stroke.isHairlineStyle() || 1 == strokeDevWidth); |
420 | fStroker.parseDeviceSpaceStroke( |
421 | path, fLocalDevPtsBuffer.begin(), stroke, strokeDevWidth, enableScissorInAtlas, |
422 | *devIBounds, *devToAtlasOffset); |
423 | fillRule = GrFillRule::kNonzero; |
424 | } |
425 | |
426 | if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) { |
427 | this->recordStencilResolveInstance(*devIBounds, *devToAtlasOffset, fillRule); |
428 | } |
429 | |
430 | return &fRenderedAtlasStack.current(); |
431 | } |
432 | |
433 | const GrCCAtlas* GrCCPerFlushResources::renderDeviceSpacePathInAtlas( |
434 | const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds, |
435 | GrFillRule fillRule, SkIVector* devToAtlasOffset) { |
436 | SkASSERT(this->isMapped()); |
437 | |
438 | if (devPath.isEmpty()) { |
439 | SkDEBUGCODE(--fEndStencilResolveInstance); |
440 | return nullptr; |
441 | } |
442 | |
443 | GrScissorTest enableScissorInAtlas; |
444 | SkIRect clippedPathIBounds; |
445 | if (clipIBounds.contains(devPathIBounds)) { |
446 | clippedPathIBounds = devPathIBounds; |
447 | enableScissorInAtlas = GrScissorTest::kDisabled; |
448 | } else if (clippedPathIBounds.intersect(clipIBounds, devPathIBounds)) { |
449 | enableScissorInAtlas = GrScissorTest::kEnabled; |
450 | } else { |
451 | // The clip and path bounds do not intersect. Draw nothing. |
452 | SkDEBUGCODE(--fEndStencilResolveInstance); |
453 | return nullptr; |
454 | } |
455 | |
456 | this->placeRenderedPathInAtlas(clippedPathIBounds, enableScissorInAtlas, devToAtlasOffset); |
457 | fFiller.parseDeviceSpaceFill(devPath, SkPathPriv::PointData(devPath), enableScissorInAtlas, |
458 | clippedPathIBounds, *devToAtlasOffset); |
459 | |
460 | // In MSAA mode we also record an internal draw instance that will be used to resolve stencil |
461 | // winding values to coverage when the atlas is generated. |
462 | if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) { |
463 | this->recordStencilResolveInstance(clippedPathIBounds, *devToAtlasOffset, fillRule); |
464 | } |
465 | |
466 | return &fRenderedAtlasStack.current(); |
467 | } |
468 | |
469 | void GrCCPerFlushResources::placeRenderedPathInAtlas( |
470 | const SkIRect& clippedPathIBounds, GrScissorTest scissorTest, SkIVector* devToAtlasOffset) { |
471 | if (GrCCAtlas* retiredAtlas = |
472 | fRenderedAtlasStack.addRect(clippedPathIBounds, devToAtlasOffset)) { |
473 | // We did not fit in the previous coverage count atlas and it was retired. Close the path |
474 | // parser's current batch (which does not yet include the path we just parsed). We will |
475 | // render this batch into the retired atlas during finalize(). |
476 | retiredAtlas->setFillBatchID(fFiller.closeCurrentBatch()); |
477 | retiredAtlas->setStrokeBatchID(fStroker.closeCurrentBatch()); |
478 | retiredAtlas->setEndStencilResolveInstance(fNextStencilResolveInstanceIdx); |
479 | } |
480 | } |
481 | |
482 | void GrCCPerFlushResources::recordStencilResolveInstance( |
483 | const SkIRect& clippedPathIBounds, const SkIVector& devToAtlasOffset, GrFillRule fillRule) { |
484 | SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()); |
485 | SkASSERT(fNextStencilResolveInstanceIdx < fEndStencilResolveInstance); |
486 | |
487 | SkIRect atlasIBounds = clippedPathIBounds.makeOffset(devToAtlasOffset); |
488 | if (GrFillRule::kEvenOdd == fillRule) { |
489 | // Make even/odd fills counterclockwise. The resolve draw uses two-sided stencil, with |
490 | // "nonzero" settings in front and "even/odd" settings in back. |
491 | std::swap(atlasIBounds.fLeft, atlasIBounds.fRight); |
492 | } |
493 | fStencilResolveBuffer[fNextStencilResolveInstanceIdx++] = { |
494 | (int16_t)atlasIBounds.left(), (int16_t)atlasIBounds.top(), |
495 | (int16_t)atlasIBounds.right(), (int16_t)atlasIBounds.bottom()}; |
496 | } |
497 | |
498 | bool GrCCPerFlushResources::finalize(GrOnFlushResourceProvider* onFlushRP) { |
499 | SkASSERT(this->isMapped()); |
500 | SkASSERT(fNextPathInstanceIdx == fEndPathInstance); |
501 | SkASSERT(fNextCopyInstanceIdx == fEndCopyInstance); |
502 | SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() || |
503 | fNextStencilResolveInstanceIdx == fEndStencilResolveInstance); |
504 | |
505 | fPathInstanceBuffer.unmapBuffer(); |
506 | |
507 | if (fStencilResolveBuffer.hasGpuBuffer()) { |
508 | fStencilResolveBuffer.unmapBuffer(); |
509 | } |
510 | |
511 | if (!fCopyAtlasStack.empty()) { |
512 | fCopyAtlasStack.current().setFillBatchID(fCopyPathRanges.count()); |
513 | fCurrCopyAtlasRangesIdx = fCopyPathRanges.count(); |
514 | } |
515 | if (!fRenderedAtlasStack.empty()) { |
516 | fRenderedAtlasStack.current().setFillBatchID(fFiller.closeCurrentBatch()); |
517 | fRenderedAtlasStack.current().setStrokeBatchID(fStroker.closeCurrentBatch()); |
518 | fRenderedAtlasStack.current().setEndStencilResolveInstance(fNextStencilResolveInstanceIdx); |
519 | } |
520 | |
521 | // Build the GPU buffers to render path coverage counts. (This must not happen until after the |
522 | // final calls to fFiller/fStroker.closeCurrentBatch().) |
523 | if (!fFiller.prepareToDraw(onFlushRP)) { |
524 | return false; |
525 | } |
526 | if (!fStroker.prepareToDraw(onFlushRP)) { |
527 | return false; |
528 | } |
529 | |
530 | // Draw the copies from coverage count or msaa atlas(es) into 8-bit cached atlas(es). |
531 | int copyRangeIdx = 0; |
532 | int baseCopyInstance = 0; |
533 | for (GrCCAtlas& atlas : fCopyAtlasStack.atlases()) { |
534 | int endCopyRange = atlas.getFillBatchID(); |
535 | SkASSERT(endCopyRange > copyRangeIdx); |
536 | |
537 | auto rtc = atlas.instantiate(onFlushRP); |
538 | for (; copyRangeIdx < endCopyRange; ++copyRangeIdx) { |
539 | const CopyPathRange& copyRange = fCopyPathRanges[copyRangeIdx]; |
540 | int endCopyInstance = baseCopyInstance + copyRange.fCount; |
541 | if (rtc) { |
542 | auto op = CopyAtlasOp::Make( |
543 | rtc->surfPriv().getContext(), sk_ref_sp(this), copyRange.fSrcProxy, |
544 | baseCopyInstance, endCopyInstance, atlas.drawBounds()); |
545 | rtc->addDrawOp(nullptr, std::move(op)); |
546 | } |
547 | baseCopyInstance = endCopyInstance; |
548 | } |
549 | } |
550 | SkASSERT(fCopyPathRanges.count() == copyRangeIdx); |
551 | SkASSERT(fNextCopyInstanceIdx == baseCopyInstance); |
552 | SkASSERT(baseCopyInstance == fEndCopyInstance); |
553 | |
554 | // Render the coverage count atlas(es). |
555 | int baseStencilResolveInstance = 0; |
556 | for (GrCCAtlas& atlas : fRenderedAtlasStack.atlases()) { |
557 | // Copies will be finished by the time we get to rendering new atlases. See if we can |
558 | // recycle any previous invalidated atlas textures instead of creating new ones. |
559 | sk_sp<GrTexture> backingTexture; |
560 | for (sk_sp<GrTexture>& texture : fRecyclableAtlasTextures) { |
561 | if (texture && atlas.currentHeight() == texture->height() && |
562 | atlas.currentWidth() == texture->width()) { |
563 | backingTexture = std::exchange(texture, nullptr); |
564 | break; |
565 | } |
566 | } |
567 | |
568 | if (auto rtc = atlas.instantiate(onFlushRP, std::move(backingTexture))) { |
569 | std::unique_ptr<GrDrawOp> op; |
570 | if (CoverageType::kA8_Multisample == fRenderedAtlasStack.coverageType()) { |
571 | op = GrStencilAtlasOp::Make( |
572 | rtc->surfPriv().getContext(), sk_ref_sp(this), atlas.getFillBatchID(), |
573 | atlas.getStrokeBatchID(), baseStencilResolveInstance, |
574 | atlas.getEndStencilResolveInstance(), atlas.drawBounds()); |
575 | } else if (onFlushRP->caps()->shaderCaps()->geometryShaderSupport()) { |
576 | op = RenderAtlasOp<GrGSCoverageProcessor>::Make( |
577 | rtc->surfPriv().getContext(), sk_ref_sp(this), atlas.getFillBatchID(), |
578 | atlas.getStrokeBatchID(), atlas.drawBounds()); |
579 | } else { |
580 | op = RenderAtlasOp<GrVSCoverageProcessor>::Make( |
581 | rtc->surfPriv().getContext(), sk_ref_sp(this), atlas.getFillBatchID(), |
582 | atlas.getStrokeBatchID(), atlas.drawBounds()); |
583 | } |
584 | rtc->addDrawOp(nullptr, std::move(op)); |
585 | if (rtc->asSurfaceProxy()->requiresManualMSAAResolve()) { |
586 | onFlushRP->addTextureResolveTask(sk_ref_sp(rtc->asTextureProxy()), |
587 | GrSurfaceProxy::ResolveFlags::kMSAA); |
588 | } |
589 | } |
590 | |
591 | SkASSERT(atlas.getEndStencilResolveInstance() >= baseStencilResolveInstance); |
592 | baseStencilResolveInstance = atlas.getEndStencilResolveInstance(); |
593 | } |
594 | SkASSERT(GrCCAtlas::CoverageType::kA8_Multisample != this->renderedPathCoverageType() || |
595 | baseStencilResolveInstance == fEndStencilResolveInstance); |
596 | |
597 | return true; |
598 | } |
599 | |
600 | void GrCCPerFlushResourceSpecs::cancelCopies() { |
601 | // Convert copies to cached draws. |
602 | fNumCachedPaths += fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx]; |
603 | fNumCopiedPaths[kFillIdx] = fNumCopiedPaths[kStrokeIdx] = 0; |
604 | fCopyPathStats[kFillIdx] = fCopyPathStats[kStrokeIdx] = GrCCRenderedPathStats(); |
605 | fCopyAtlasSpecs = GrCCAtlas::Specs(); |
606 | } |
607 | |