1 | /* |
2 | * Copyright 2012 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "src/gpu/GrSoftwarePathRenderer.h" |
9 | |
10 | #include "include/gpu/GrDirectContext.h" |
11 | #include "include/private/SkSemaphore.h" |
12 | #include "src/core/SkTaskGroup.h" |
13 | #include "src/core/SkTraceEvent.h" |
14 | #include "src/gpu/GrAuditTrail.h" |
15 | #include "src/gpu/GrCaps.h" |
16 | #include "src/gpu/GrClip.h" |
17 | #include "src/gpu/GrContextPriv.h" |
18 | #include "src/gpu/GrDeferredProxyUploader.h" |
19 | #include "src/gpu/GrGpuResourcePriv.h" |
20 | #include "src/gpu/GrOpFlushState.h" |
21 | #include "src/gpu/GrProxyProvider.h" |
22 | #include "src/gpu/GrRecordingContextPriv.h" |
23 | #include "src/gpu/GrRenderTargetContextPriv.h" |
24 | #include "src/gpu/GrSWMaskHelper.h" |
25 | #include "src/gpu/GrSurfaceContextPriv.h" |
26 | #include "src/gpu/SkGr.h" |
27 | #include "src/gpu/geometry/GrStyledShape.h" |
28 | #include "src/gpu/ops/GrDrawOp.h" |
29 | |
30 | //////////////////////////////////////////////////////////////////////////////// |
31 | GrPathRenderer::CanDrawPath |
32 | GrSoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const { |
33 | // Pass on any style that applies. The caller will apply the style if a suitable renderer is |
34 | // not found and try again with the new GrStyledShape. |
35 | if (!args.fShape->style().applies() && SkToBool(fProxyProvider) && |
36 | (args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) { |
37 | // This is the fallback renderer for when a path is too complicated for the GPU ones. |
38 | return CanDrawPath::kAsBackup; |
39 | } |
40 | return CanDrawPath::kNo; |
41 | } |
42 | |
43 | //////////////////////////////////////////////////////////////////////////////// |
44 | static bool get_unclipped_shape_dev_bounds(const GrStyledShape& shape, const SkMatrix& matrix, |
45 | SkIRect* devBounds) { |
46 | SkRect shapeBounds = shape.styledBounds(); |
47 | if (shapeBounds.isEmpty()) { |
48 | return false; |
49 | } |
50 | SkRect shapeDevBounds; |
51 | matrix.mapRect(&shapeDevBounds, shapeBounds); |
52 | // Even though these are "unclipped" bounds we still clip to the int32_t range. |
53 | // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints |
54 | // would round down to this value when cast to a float, but who really cares. |
55 | // INT32_MIN is exactly representable. |
56 | static constexpr int32_t kMaxInt = 2147483520; |
57 | if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) { |
58 | return false; |
59 | } |
60 | // Make sure that the resulting SkIRect can have representable width and height |
61 | if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt || |
62 | SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) { |
63 | return false; |
64 | } |
65 | shapeDevBounds.roundOut(devBounds); |
66 | return true; |
67 | } |
68 | |
69 | // Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there |
70 | // is no intersection. |
71 | bool GrSoftwarePathRenderer::GetShapeAndClipBounds(GrRenderTargetContext* renderTargetContext, |
72 | const GrClip* clip, |
73 | const GrStyledShape& shape, |
74 | const SkMatrix& matrix, |
75 | SkIRect* unclippedDevShapeBounds, |
76 | SkIRect* clippedDevShapeBounds, |
77 | SkIRect* devClipBounds) { |
78 | // compute bounds as intersection of rt size, clip, and path |
79 | *devClipBounds = clip ? clip->getConservativeBounds() |
80 | : SkIRect::MakeWH(renderTargetContext->width(), |
81 | renderTargetContext->height()); |
82 | |
83 | if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) { |
84 | *unclippedDevShapeBounds = SkIRect::MakeEmpty(); |
85 | *clippedDevShapeBounds = SkIRect::MakeEmpty(); |
86 | return false; |
87 | } |
88 | if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) { |
89 | *clippedDevShapeBounds = SkIRect::MakeEmpty(); |
90 | return false; |
91 | } |
92 | return true; |
93 | } |
94 | |
95 | //////////////////////////////////////////////////////////////////////////////// |
96 | |
97 | void GrSoftwarePathRenderer::DrawNonAARect(GrRenderTargetContext* renderTargetContext, |
98 | GrPaint&& paint, |
99 | const GrUserStencilSettings& userStencilSettings, |
100 | const GrClip* clip, |
101 | const SkMatrix& viewMatrix, |
102 | const SkRect& rect, |
103 | const SkMatrix& localMatrix) { |
104 | renderTargetContext->priv().stencilRect(clip, &userStencilSettings, std::move(paint), GrAA::kNo, |
105 | viewMatrix, rect, &localMatrix); |
106 | } |
107 | |
108 | void GrSoftwarePathRenderer::DrawAroundInvPath(GrRenderTargetContext* renderTargetContext, |
109 | GrPaint&& paint, |
110 | const GrUserStencilSettings& userStencilSettings, |
111 | const GrClip* clip, |
112 | const SkMatrix& viewMatrix, |
113 | const SkIRect& devClipBounds, |
114 | const SkIRect& devPathBounds) { |
115 | SkMatrix invert; |
116 | if (!viewMatrix.invert(&invert)) { |
117 | return; |
118 | } |
119 | |
120 | SkRect rect; |
121 | if (devClipBounds.fTop < devPathBounds.fTop) { |
122 | rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devClipBounds.fTop), |
123 | SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fTop)); |
124 | DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip, |
125 | SkMatrix::I(), rect, invert); |
126 | } |
127 | if (devClipBounds.fLeft < devPathBounds.fLeft) { |
128 | rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fTop), |
129 | SkIntToScalar(devPathBounds.fLeft), SkIntToScalar(devPathBounds.fBottom)); |
130 | DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip, |
131 | SkMatrix::I(), rect, invert); |
132 | } |
133 | if (devClipBounds.fRight > devPathBounds.fRight) { |
134 | rect.setLTRB(SkIntToScalar(devPathBounds.fRight), SkIntToScalar(devPathBounds.fTop), |
135 | SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fBottom)); |
136 | DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip, |
137 | SkMatrix::I(), rect, invert); |
138 | } |
139 | if (devClipBounds.fBottom > devPathBounds.fBottom) { |
140 | rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fBottom), |
141 | SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devClipBounds.fBottom)); |
142 | DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip, |
143 | SkMatrix::I(), rect, invert); |
144 | } |
145 | } |
146 | |
147 | void GrSoftwarePathRenderer::DrawToTargetWithShapeMask( |
148 | GrSurfaceProxyView view, |
149 | GrRenderTargetContext* renderTargetContext, |
150 | GrPaint&& paint, |
151 | const GrUserStencilSettings& userStencilSettings, |
152 | const GrClip* clip, |
153 | const SkMatrix& viewMatrix, |
154 | const SkIPoint& textureOriginInDeviceSpace, |
155 | const SkIRect& deviceSpaceRectToDraw) { |
156 | SkMatrix invert; |
157 | if (!viewMatrix.invert(&invert)) { |
158 | return; |
159 | } |
160 | |
161 | SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw); |
162 | |
163 | // We use device coords to compute the texture coordinates. We take the device coords and apply |
164 | // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling |
165 | // matrix to normalized coords. |
166 | SkMatrix maskMatrix = SkMatrix::Translate(SkIntToScalar(-textureOriginInDeviceSpace.fX), |
167 | SkIntToScalar(-textureOriginInDeviceSpace.fY)); |
168 | maskMatrix.preConcat(viewMatrix); |
169 | |
170 | paint.setCoverageFragmentProcessor(GrTextureEffect::Make( |
171 | std::move(view), kPremul_SkAlphaType, maskMatrix, GrSamplerState::Filter::kNearest)); |
172 | DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip, SkMatrix::I(), |
173 | dstRect, invert); |
174 | } |
175 | |
176 | static GrSurfaceProxyView make_deferred_mask_texture_view(GrRecordingContext* context, |
177 | SkBackingFit fit, |
178 | SkISize dimensions) { |
179 | GrProxyProvider* proxyProvider = context->priv().proxyProvider(); |
180 | const GrCaps* caps = context->priv().caps(); |
181 | |
182 | const GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8, |
183 | GrRenderable::kNo); |
184 | |
185 | GrSwizzle swizzle = caps->getReadSwizzle(format, GrColorType::kAlpha_8); |
186 | |
187 | auto proxy = |
188 | proxyProvider->createProxy(format, dimensions, GrRenderable::kNo, 1, GrMipmapped::kNo, |
189 | fit, SkBudgeted::kYes, GrProtected::kNo); |
190 | return {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle}; |
191 | } |
192 | |
193 | namespace { |
194 | |
195 | /** |
196 | * Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws |
197 | * a single path into the mask texture. This stores all of the information needed by the worker |
198 | * thread's call to drawShape (see below, in onDrawPath). |
199 | */ |
200 | class SoftwarePathData { |
201 | public: |
202 | SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix, |
203 | const GrStyledShape& shape, GrAA aa) |
204 | : fMaskBounds(maskBounds) |
205 | , fViewMatrix(viewMatrix) |
206 | , fShape(shape) |
207 | , fAA(aa) {} |
208 | |
209 | const SkIRect& getMaskBounds() const { return fMaskBounds; } |
210 | const SkMatrix* getViewMatrix() const { return &fViewMatrix; } |
211 | const GrStyledShape& getShape() const { return fShape; } |
212 | GrAA getAA() const { return fAA; } |
213 | |
214 | private: |
215 | SkIRect fMaskBounds; |
216 | SkMatrix fViewMatrix; |
217 | GrStyledShape fShape; |
218 | GrAA fAA; |
219 | }; |
220 | |
221 | } // namespace |
222 | |
223 | //////////////////////////////////////////////////////////////////////////////// |
224 | // return true on success; false on failure |
225 | bool GrSoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) { |
226 | GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(), |
227 | "GrSoftwarePathRenderer::onDrawPath" ); |
228 | if (!fProxyProvider) { |
229 | return false; |
230 | } |
231 | |
232 | SkASSERT(!args.fShape->style().applies()); |
233 | // We really need to know if the shape will be inverse filled or not |
234 | // If the path is hairline, ignore inverse fill. |
235 | bool inverseFilled = args.fShape->inverseFilled() && |
236 | !IsStrokeHairlineOrEquivalent(args.fShape->style(), |
237 | *args.fViewMatrix, nullptr); |
238 | |
239 | SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds; |
240 | // To prevent overloading the cache with entries during animations we limit the cache of masks |
241 | // to cases where the matrix preserves axis alignment. |
242 | bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() && |
243 | args.fShape->hasUnstyledKey() && (GrAAType::kCoverage == args.fAAType); |
244 | |
245 | if (!GetShapeAndClipBounds(args.fRenderTargetContext, |
246 | args.fClip, *args.fShape, |
247 | *args.fViewMatrix, &unclippedDevShapeBounds, |
248 | &clippedDevShapeBounds, |
249 | &devClipBounds)) { |
250 | if (inverseFilled) { |
251 | DrawAroundInvPath(args.fRenderTargetContext, std::move(args.fPaint), |
252 | *args.fUserStencilSettings, args.fClip, *args.fViewMatrix, |
253 | devClipBounds, unclippedDevShapeBounds); |
254 | } |
255 | return true; |
256 | } |
257 | |
258 | const SkIRect* boundsForMask = &clippedDevShapeBounds; |
259 | if (useCache) { |
260 | // Use the cache only if >50% of the path is visible. |
261 | int unclippedWidth = unclippedDevShapeBounds.width(); |
262 | int unclippedHeight = unclippedDevShapeBounds.height(); |
263 | int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight); |
264 | int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(), |
265 | clippedDevShapeBounds.height()); |
266 | int maxTextureSize = args.fRenderTargetContext->caps()->maxTextureSize(); |
267 | if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize || |
268 | unclippedHeight > maxTextureSize) { |
269 | useCache = false; |
270 | } else { |
271 | boundsForMask = &unclippedDevShapeBounds; |
272 | } |
273 | } |
274 | |
275 | GrUniqueKey maskKey; |
276 | if (useCache) { |
277 | // We require the upper left 2x2 of the matrix to match exactly for a cache hit. |
278 | SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX); |
279 | SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY); |
280 | SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX); |
281 | SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY); |
282 | static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain(); |
283 | GrUniqueKey::Builder builder(&maskKey, kDomain, 7 + args.fShape->unstyledKeySize(), |
284 | "SW Path Mask" ); |
285 | builder[0] = boundsForMask->width(); |
286 | builder[1] = boundsForMask->height(); |
287 | |
288 | #ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK |
289 | // Fractional translate does not affect caching on Android. This is done for better cache |
290 | // hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix |
291 | // at all when caching paths. |
292 | SkFixed fracX = 0; |
293 | SkFixed fracY = 0; |
294 | #else |
295 | SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX); |
296 | SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY); |
297 | // Allow 8 bits each in x and y of subpixel positioning. |
298 | SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00; |
299 | SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00; |
300 | #endif |
301 | builder[2] = SkFloat2Bits(sx); |
302 | builder[3] = SkFloat2Bits(sy); |
303 | builder[4] = SkFloat2Bits(kx); |
304 | builder[5] = SkFloat2Bits(ky); |
305 | // Distinguish between hairline and filled paths. For hairlines, we also need to include |
306 | // the cap. (SW grows hairlines by 0.5 pixel with round and square caps). Note that |
307 | // stroke-and-fill of hairlines is turned into pure fill by SkStrokeRec, so this covers |
308 | // all cases we might see. |
309 | uint32_t styleBits = args.fShape->style().isSimpleHairline() ? |
310 | ((args.fShape->style().strokeRec().getCap() << 1) | 1) : 0; |
311 | builder[6] = fracX | (fracY >> 8) | (styleBits << 16); |
312 | args.fShape->writeUnstyledKey(&builder[7]); |
313 | } |
314 | |
315 | sk_sp<GrTextureProxy> proxy; |
316 | GrSurfaceProxyView view; |
317 | if (useCache) { |
318 | auto proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey); |
319 | if (proxy) { |
320 | GrSwizzle swizzle = args.fRenderTargetContext->caps()->getReadSwizzle( |
321 | proxy->backendFormat(), GrColorType::kAlpha_8); |
322 | view = {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle}; |
323 | args.fContext->priv().stats()->incNumPathMasksCacheHits(); |
324 | } |
325 | } |
326 | if (!view) { |
327 | SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox; |
328 | GrAA aa = GrAA(GrAAType::kCoverage == args.fAAType); |
329 | |
330 | SkTaskGroup* taskGroup = nullptr; |
331 | if (auto direct = args.fContext->asDirectContext()) { |
332 | taskGroup = direct->priv().getTaskGroup(); |
333 | } |
334 | |
335 | if (taskGroup) { |
336 | view = make_deferred_mask_texture_view(args.fContext, fit, boundsForMask->size()); |
337 | if (!view) { |
338 | return false; |
339 | } |
340 | |
341 | auto uploader = std::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>( |
342 | *boundsForMask, *args.fViewMatrix, *args.fShape, aa); |
343 | GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get(); |
344 | |
345 | auto drawAndUploadMask = [uploaderRaw] { |
346 | TRACE_EVENT0("skia.gpu" , "Threaded SW Mask Render" ); |
347 | GrSWMaskHelper helper(uploaderRaw->getPixels()); |
348 | if (helper.init(uploaderRaw->data().getMaskBounds())) { |
349 | helper.drawShape(uploaderRaw->data().getShape(), |
350 | *uploaderRaw->data().getViewMatrix(), |
351 | SkRegion::kReplace_Op, uploaderRaw->data().getAA(), 0xFF); |
352 | } else { |
353 | SkDEBUGFAIL("Unable to allocate SW mask." ); |
354 | } |
355 | uploaderRaw->signalAndFreeData(); |
356 | }; |
357 | taskGroup->add(std::move(drawAndUploadMask)); |
358 | view.asTextureProxy()->texPriv().setDeferredUploader(std::move(uploader)); |
359 | } else { |
360 | GrSWMaskHelper helper; |
361 | if (!helper.init(*boundsForMask)) { |
362 | return false; |
363 | } |
364 | helper.drawShape(*args.fShape, *args.fViewMatrix, SkRegion::kReplace_Op, aa, 0xFF); |
365 | view = helper.toTextureView(args.fContext, fit); |
366 | } |
367 | |
368 | if (!view) { |
369 | return false; |
370 | } |
371 | if (useCache) { |
372 | SkASSERT(view.origin() == kTopLeft_GrSurfaceOrigin); |
373 | |
374 | // We will add an invalidator to the path so that if the path goes away we will |
375 | // delete or recycle the mask texture. |
376 | auto listener = GrMakeUniqueKeyInvalidationListener(&maskKey, |
377 | args.fContext->priv().contextID()); |
378 | fProxyProvider->assignUniqueKeyToProxy(maskKey, view.asTextureProxy()); |
379 | args.fShape->addGenIDChangeListener(std::move(listener)); |
380 | } |
381 | |
382 | args.fContext->priv().stats()->incNumPathMasksGenerated(); |
383 | } |
384 | SkASSERT(view); |
385 | if (inverseFilled) { |
386 | DrawAroundInvPath(args.fRenderTargetContext, GrPaint::Clone(args.fPaint), |
387 | *args.fUserStencilSettings, args.fClip, *args.fViewMatrix, devClipBounds, |
388 | unclippedDevShapeBounds); |
389 | } |
390 | DrawToTargetWithShapeMask(std::move(view), args.fRenderTargetContext, std::move(args.fPaint), |
391 | *args.fUserStencilSettings, args.fClip, *args.fViewMatrix, |
392 | SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask); |
393 | |
394 | return true; |
395 | } |
396 | |