1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/ccpr/GrCoverageCountingPathRenderer.h"
9
10#include "include/pathops/SkPathOps.h"
11#include "src/gpu/GrCaps.h"
12#include "src/gpu/GrClip.h"
13#include "src/gpu/GrProxyProvider.h"
14#include "src/gpu/ccpr/GrCCClipProcessor.h"
15#include "src/gpu/ccpr/GrCCDrawPathsOp.h"
16#include "src/gpu/ccpr/GrCCPathCache.h"
17
18using PathInstance = GrCCPathProcessor::Instance;
19
20bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps, CoverageType* coverageType) {
21 const GrShaderCaps& shaderCaps = *caps.shaderCaps();
22 GrBackendFormat defaultA8Format = caps.getDefaultBackendFormat(GrColorType::kAlpha_8,
23 GrRenderable::kYes);
24 if (caps.driverBlacklistCCPR() || !shaderCaps.integerSupport() ||
25 !caps.drawInstancedSupport() || !shaderCaps.floatIs32Bits() ||
26 !defaultA8Format.isValid() || // This checks both texturable and renderable
27 !caps.halfFloatVertexAttributeSupport()) {
28 return false;
29 }
30
31 GrBackendFormat defaultAHalfFormat = caps.getDefaultBackendFormat(GrColorType::kAlpha_F16,
32 GrRenderable::kYes);
33 if (caps.allowCoverageCounting() &&
34 defaultAHalfFormat.isValid()) { // This checks both texturable and renderable
35 if (coverageType) {
36 *coverageType = CoverageType::kFP16_CoverageCount;
37 }
38 return true;
39 }
40
41 if (!caps.driverBlacklistMSAACCPR() &&
42 caps.internalMultisampleCount(defaultA8Format) > 1 &&
43 caps.sampleLocationsSupport() &&
44 shaderCaps.sampleMaskSupport()) {
45 if (coverageType) {
46 *coverageType = CoverageType::kA8_Multisample;
47 }
48 return true;
49 }
50
51 return false;
52}
53
54sk_sp<GrCoverageCountingPathRenderer> GrCoverageCountingPathRenderer::CreateIfSupported(
55 const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) {
56 CoverageType coverageType;
57 if (IsSupported(caps, &coverageType)) {
58 return sk_sp<GrCoverageCountingPathRenderer>(new GrCoverageCountingPathRenderer(
59 coverageType, allowCaching, contextUniqueID));
60 }
61 return nullptr;
62}
63
64GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(
65 CoverageType coverageType, AllowCaching allowCaching, uint32_t contextUniqueID)
66 : fCoverageType(coverageType) {
67 if (AllowCaching::kYes == allowCaching) {
68 fPathCache = std::make_unique<GrCCPathCache>(contextUniqueID);
69 }
70}
71
72GrCCPerOpsTaskPaths* GrCoverageCountingPathRenderer::lookupPendingPaths(uint32_t opsTaskID) {
73 auto it = fPendingPaths.find(opsTaskID);
74 if (fPendingPaths.end() == it) {
75 sk_sp<GrCCPerOpsTaskPaths> paths = sk_make_sp<GrCCPerOpsTaskPaths>();
76 it = fPendingPaths.insert(std::make_pair(opsTaskID, std::move(paths))).first;
77 }
78 return it->second.get();
79}
80
81GrPathRenderer::CanDrawPath GrCoverageCountingPathRenderer::onCanDrawPath(
82 const CanDrawPathArgs& args) const {
83 const GrShape& shape = *args.fShape;
84 // We use "kCoverage", or analytic AA, no mater what the coverage type of our atlas: Even if the
85 // atlas is multisampled, that resolves into analytic coverage before we draw the path to the
86 // main canvas.
87 if (GrAAType::kCoverage != args.fAAType || shape.style().hasPathEffect() ||
88 args.fViewMatrix->hasPerspective() || shape.inverseFilled()) {
89 return CanDrawPath::kNo;
90 }
91
92 SkPath path;
93 shape.asPath(&path);
94
95 const SkStrokeRec& stroke = shape.style().strokeRec();
96 switch (stroke.getStyle()) {
97 case SkStrokeRec::kFill_Style: {
98 SkRect devBounds;
99 args.fViewMatrix->mapRect(&devBounds, path.getBounds());
100
101 SkIRect clippedIBounds;
102 devBounds.roundOut(&clippedIBounds);
103 if (!clippedIBounds.intersect(*args.fClipConservativeBounds)) {
104 // The path is completely clipped away. Our code will eventually notice this before
105 // doing any real work.
106 return CanDrawPath::kYes;
107 }
108
109 int64_t numPixels = sk_64_mul(clippedIBounds.height(), clippedIBounds.width());
110 if (path.countVerbs() > 1000 && path.countPoints() > numPixels) {
111 // This is a complicated path that has more vertices than pixels! Let's let the SW
112 // renderer have this one: It will probably be faster and a bitmap will require less
113 // total memory on the GPU than CCPR instance buffers would for the raw path data.
114 return CanDrawPath::kNo;
115 }
116
117 if (numPixels > 256 * 256) {
118 // Large paths can blow up the atlas fast. And they are not ideal for a two-pass
119 // rendering algorithm. Give the simpler direct renderers a chance before we commit
120 // to drawing it.
121 return CanDrawPath::kAsBackup;
122 }
123
124 if (args.fShape->hasUnstyledKey() && path.countVerbs() > 50) {
125 // Complex paths do better cached in an SDF, if the renderer will accept them.
126 return CanDrawPath::kAsBackup;
127 }
128
129 return CanDrawPath::kYes;
130 }
131
132 case SkStrokeRec::kStroke_Style:
133 if (!args.fViewMatrix->isSimilarity()) {
134 // The stroker currently only supports rigid-body transfoms for the stroke lines
135 // themselves. This limitation doesn't affect hairlines since their stroke lines are
136 // defined relative to device space.
137 return CanDrawPath::kNo;
138 }
139 // fallthru
140 case SkStrokeRec::kHairline_Style: {
141 if (CoverageType::kFP16_CoverageCount != fCoverageType) {
142 // Stroking is not yet supported in MSAA atlas mode.
143 return CanDrawPath::kNo;
144 }
145 float inflationRadius;
146 GetStrokeDevWidth(*args.fViewMatrix, stroke, &inflationRadius);
147 if (!(inflationRadius <= kMaxBoundsInflationFromStroke)) {
148 // Let extremely wide strokes be converted to fill paths and drawn by the CCPR
149 // filler instead. (Cast the logic negatively in order to also catch r=NaN.)
150 return CanDrawPath::kNo;
151 }
152 SkASSERT(!SkScalarIsNaN(inflationRadius));
153 if (SkPathPriv::ConicWeightCnt(path)) {
154 // The stroker does not support conics yet.
155 return CanDrawPath::kNo;
156 }
157 return CanDrawPath::kYes;
158 }
159
160 case SkStrokeRec::kStrokeAndFill_Style:
161 return CanDrawPath::kNo;
162 }
163
164 SK_ABORT("Invalid stroke style.");
165}
166
167bool GrCoverageCountingPathRenderer::onDrawPath(const DrawPathArgs& args) {
168 SkASSERT(!fFlushing);
169
170 SkIRect clipIBounds;
171 GrRenderTargetContext* rtc = args.fRenderTargetContext;
172 args.fClip->getConservativeBounds(rtc->width(), rtc->height(), &clipIBounds, nullptr);
173
174 auto op = GrCCDrawPathsOp::Make(args.fContext, clipIBounds, *args.fViewMatrix, *args.fShape,
175 std::move(args.fPaint));
176 this->recordOp(std::move(op), args);
177 return true;
178}
179
180void GrCoverageCountingPathRenderer::recordOp(std::unique_ptr<GrCCDrawPathsOp> op,
181 const DrawPathArgs& args) {
182 if (op) {
183 auto addToOwningPerOpsTaskPaths = [this](GrOp* op, uint32_t opsTaskID) {
184 op->cast<GrCCDrawPathsOp>()->addToOwningPerOpsTaskPaths(
185 sk_ref_sp(this->lookupPendingPaths(opsTaskID)));
186 };
187 args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op),
188 addToOwningPerOpsTaskPaths);
189 }
190}
191
192std::unique_ptr<GrFragmentProcessor> GrCoverageCountingPathRenderer::makeClipProcessor(
193 uint32_t opsTaskID, const SkPath& deviceSpacePath, const SkIRect& accessRect,
194 const GrCaps& caps) {
195 SkASSERT(!fFlushing);
196
197 uint32_t key = deviceSpacePath.getGenerationID();
198 if (CoverageType::kA8_Multisample == fCoverageType) {
199 // We only need to consider fill rule in MSAA mode. In coverage count mode Even/Odd and
200 // Nonzero both reference the same coverage count mask.
201 key = (key << 1) | (uint32_t)GrFillRuleForSkPath(deviceSpacePath);
202 }
203 GrCCClipPath& clipPath =
204 this->lookupPendingPaths(opsTaskID)->fClipPaths[key];
205 if (!clipPath.isInitialized()) {
206 // This ClipPath was just created during lookup. Initialize it.
207 const SkRect& pathDevBounds = deviceSpacePath.getBounds();
208 if (std::max(pathDevBounds.height(), pathDevBounds.width()) > kPathCropThreshold) {
209 // The path is too large. Crop it or analytic AA can run out of fp32 precision.
210 SkPath croppedPath;
211 int maxRTSize = caps.maxRenderTargetSize();
212 CropPath(deviceSpacePath, SkIRect::MakeWH(maxRTSize, maxRTSize), &croppedPath);
213 clipPath.init(croppedPath, accessRect, fCoverageType, caps);
214 } else {
215 clipPath.init(deviceSpacePath, accessRect, fCoverageType, caps);
216 }
217 } else {
218 clipPath.addAccess(accessRect);
219 }
220
221 auto isCoverageCount = GrCCClipProcessor::IsCoverageCount(
222 CoverageType::kFP16_CoverageCount == fCoverageType);
223 auto mustCheckBounds = GrCCClipProcessor::MustCheckBounds(
224 !clipPath.pathDevIBounds().contains(accessRect));
225 return std::make_unique<GrCCClipProcessor>(caps, &clipPath, isCoverageCount, mustCheckBounds);
226}
227
228void GrCoverageCountingPathRenderer::preFlush(
229 GrOnFlushResourceProvider* onFlushRP, const uint32_t* opsTaskIDs, int numOpsTaskIDs) {
230 using DoCopiesToA8Coverage = GrCCDrawPathsOp::DoCopiesToA8Coverage;
231 SkASSERT(!fFlushing);
232 SkASSERT(fFlushingPaths.empty());
233 SkDEBUGCODE(fFlushing = true);
234
235 if (fPathCache) {
236 fPathCache->doPreFlushProcessing();
237 }
238
239 if (fPendingPaths.empty()) {
240 return; // Nothing to draw.
241 }
242
243 GrCCPerFlushResourceSpecs specs;
244 int maxPreferredRTSize = onFlushRP->caps()->maxPreferredRenderTargetSize();
245 specs.fCopyAtlasSpecs.fMaxPreferredTextureSize = std::min(2048, maxPreferredRTSize);
246 SkASSERT(0 == specs.fCopyAtlasSpecs.fMinTextureSize);
247 specs.fRenderedAtlasSpecs.fMaxPreferredTextureSize = maxPreferredRTSize;
248 specs.fRenderedAtlasSpecs.fMinTextureSize = std::min(512, maxPreferredRTSize);
249
250 // Move the per-opsTask paths that are about to be flushed from fPendingPaths to fFlushingPaths,
251 // and count them up so we can preallocate buffers.
252 fFlushingPaths.reserve(numOpsTaskIDs);
253 for (int i = 0; i < numOpsTaskIDs; ++i) {
254 auto iter = fPendingPaths.find(opsTaskIDs[i]);
255 if (fPendingPaths.end() == iter) {
256 continue; // No paths on this opsTask.
257 }
258
259 fFlushingPaths.push_back(std::move(iter->second));
260 fPendingPaths.erase(iter);
261
262 for (GrCCDrawPathsOp* op : fFlushingPaths.back()->fDrawOps) {
263 op->accountForOwnPaths(fPathCache.get(), onFlushRP, &specs);
264 }
265 for (const auto& clipsIter : fFlushingPaths.back()->fClipPaths) {
266 clipsIter.second.accountForOwnPath(&specs);
267 }
268 }
269
270 if (specs.isEmpty()) {
271 return; // Nothing to draw.
272 }
273
274 // Determine if there are enough reusable paths from last flush for it to be worth our time to
275 // copy them to cached atlas(es).
276 int numCopies = specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kFillIdx] +
277 specs.fNumCopiedPaths[GrCCPerFlushResourceSpecs::kStrokeIdx];
278 auto doCopies = DoCopiesToA8Coverage(numCopies > 100 ||
279 specs.fCopyAtlasSpecs.fApproxNumPixels > 256 * 256);
280 if (numCopies && DoCopiesToA8Coverage::kNo == doCopies) {
281 specs.cancelCopies();
282 }
283
284 auto resources = sk_make_sp<GrCCPerFlushResources>(onFlushRP, fCoverageType, specs);
285 if (!resources->isMapped()) {
286 return; // Some allocation failed.
287 }
288
289 // Layout the atlas(es) and parse paths.
290 for (const auto& flushingPaths : fFlushingPaths) {
291 for (GrCCDrawPathsOp* op : flushingPaths->fDrawOps) {
292 op->setupResources(fPathCache.get(), onFlushRP, resources.get(), doCopies);
293 }
294 for (auto& clipsIter : flushingPaths->fClipPaths) {
295 clipsIter.second.renderPathInAtlas(resources.get(), onFlushRP);
296 }
297 }
298
299 if (fPathCache) {
300 // Purge invalidated textures from previous atlases *before* calling finalize(). That way,
301 // the underlying textures objects can be freed up and reused for the next atlases.
302 fPathCache->purgeInvalidatedAtlasTextures(onFlushRP);
303 }
304
305 // Allocate resources and then render the atlas(es).
306 if (!resources->finalize(onFlushRP)) {
307 return;
308 }
309
310 // Commit flushing paths to the resources once they are successfully completed.
311 for (auto& flushingPaths : fFlushingPaths) {
312 SkASSERT(!flushingPaths->fFlushResources);
313 flushingPaths->fFlushResources = resources;
314 }
315}
316
317void GrCoverageCountingPathRenderer::postFlush(GrDeferredUploadToken, const uint32_t* opsTaskIDs,
318 int numOpsTaskIDs) {
319 SkASSERT(fFlushing);
320
321 if (!fFlushingPaths.empty()) {
322 // In DDL mode these aren't guaranteed to be deleted so we must clear out the perFlush
323 // resources manually.
324 for (auto& flushingPaths : fFlushingPaths) {
325 flushingPaths->fFlushResources = nullptr;
326 }
327
328 // We wait to erase these until after flush, once Ops and FPs are done accessing their data.
329 fFlushingPaths.reset();
330 }
331
332 SkDEBUGCODE(fFlushing = false);
333}
334
335void GrCoverageCountingPathRenderer::purgeCacheEntriesOlderThan(
336 GrProxyProvider* proxyProvider, const GrStdSteadyClock::time_point& purgeTime) {
337 if (fPathCache) {
338 fPathCache->purgeEntriesOlderThan(proxyProvider, purgeTime);
339 }
340}
341
342void GrCoverageCountingPathRenderer::CropPath(const SkPath& path, const SkIRect& cropbox,
343 SkPath* out) {
344 SkPath cropboxPath;
345 cropboxPath.addRect(SkRect::Make(cropbox));
346 if (!Op(cropboxPath, path, kIntersect_SkPathOp, out)) {
347 // This can fail if the PathOps encounter NaN or infinities.
348 out->reset();
349 }
350 out->setIsVolatile(true);
351}
352
353float GrCoverageCountingPathRenderer::GetStrokeDevWidth(const SkMatrix& m,
354 const SkStrokeRec& stroke,
355 float* inflationRadius) {
356 float strokeDevWidth;
357 if (stroke.isHairlineStyle()) {
358 strokeDevWidth = 1;
359 } else {
360 SkASSERT(SkStrokeRec::kStroke_Style == stroke.getStyle());
361 SkASSERT(m.isSimilarity()); // Otherwise matrixScaleFactor = m.getMaxScale().
362 float matrixScaleFactor = SkVector::Length(m.getScaleX(), m.getSkewY());
363 strokeDevWidth = stroke.getWidth() * matrixScaleFactor;
364 }
365 if (inflationRadius) {
366 // Inflate for a minimum stroke width of 1. In some cases when the stroke is less than 1px
367 // wide, we may inflate it to 1px and instead reduce the opacity.
368 *inflationRadius = SkStrokeRec::GetInflationRadius(
369 stroke.getJoin(), stroke.getMiter(), stroke.getCap(), std::max(strokeDevWidth, 1.f));
370 }
371 return strokeDevWidth;
372}
373