1/*
2 * Copyright 2012 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrSoftwarePathRenderer.h"
9
10#include "include/private/SkSemaphore.h"
11#include "src/core/SkTaskGroup.h"
12#include "src/core/SkTraceEvent.h"
13#include "src/gpu/GrAuditTrail.h"
14#include "src/gpu/GrCaps.h"
15#include "src/gpu/GrClip.h"
16#include "src/gpu/GrContextPriv.h"
17#include "src/gpu/GrDeferredProxyUploader.h"
18#include "src/gpu/GrGpuResourcePriv.h"
19#include "src/gpu/GrOpFlushState.h"
20#include "src/gpu/GrProxyProvider.h"
21#include "src/gpu/GrRecordingContextPriv.h"
22#include "src/gpu/GrRenderTargetContextPriv.h"
23#include "src/gpu/GrSWMaskHelper.h"
24#include "src/gpu/GrSurfaceContextPriv.h"
25#include "src/gpu/SkGr.h"
26#include "src/gpu/geometry/GrShape.h"
27#include "src/gpu/ops/GrDrawOp.h"
28
29////////////////////////////////////////////////////////////////////////////////
30GrPathRenderer::CanDrawPath
31GrSoftwarePathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const {
32 // Pass on any style that applies. The caller will apply the style if a suitable renderer is
33 // not found and try again with the new GrShape.
34 if (!args.fShape->style().applies() && SkToBool(fProxyProvider) &&
35 (args.fAAType == GrAAType::kCoverage || args.fAAType == GrAAType::kNone)) {
36 // This is the fallback renderer for when a path is too complicated for the GPU ones.
37 return CanDrawPath::kAsBackup;
38 }
39 return CanDrawPath::kNo;
40}
41
42////////////////////////////////////////////////////////////////////////////////
43static bool get_unclipped_shape_dev_bounds(const GrShape& shape, const SkMatrix& matrix,
44 SkIRect* devBounds) {
45 SkRect shapeBounds = shape.styledBounds();
46 if (shapeBounds.isEmpty()) {
47 return false;
48 }
49 SkRect shapeDevBounds;
50 matrix.mapRect(&shapeDevBounds, shapeBounds);
51 // Even though these are "unclipped" bounds we still clip to the int32_t range.
52 // This is the largest int32_t that is representable exactly as a float. The next 63 larger ints
53 // would round down to this value when cast to a float, but who really cares.
54 // INT32_MIN is exactly representable.
55 static constexpr int32_t kMaxInt = 2147483520;
56 if (!shapeDevBounds.intersect(SkRect::MakeLTRB(INT32_MIN, INT32_MIN, kMaxInt, kMaxInt))) {
57 return false;
58 }
59 // Make sure that the resulting SkIRect can have representable width and height
60 if (SkScalarRoundToInt(shapeDevBounds.width()) > kMaxInt ||
61 SkScalarRoundToInt(shapeDevBounds.height()) > kMaxInt) {
62 return false;
63 }
64 shapeDevBounds.roundOut(devBounds);
65 return true;
66}
67
68// Gets the shape bounds, the clip bounds, and the intersection (if any). Returns false if there
69// is no intersection.
70bool GrSoftwarePathRenderer::GetShapeAndClipBounds(GrRenderTargetContext* renderTargetContext,
71 const GrClip& clip,
72 const GrShape& shape,
73 const SkMatrix& matrix,
74 SkIRect* unclippedDevShapeBounds,
75 SkIRect* clippedDevShapeBounds,
76 SkIRect* devClipBounds) {
77 // compute bounds as intersection of rt size, clip, and path
78 clip.getConservativeBounds(renderTargetContext->width(),
79 renderTargetContext->height(),
80 devClipBounds);
81
82 if (!get_unclipped_shape_dev_bounds(shape, matrix, unclippedDevShapeBounds)) {
83 *unclippedDevShapeBounds = SkIRect::MakeEmpty();
84 *clippedDevShapeBounds = SkIRect::MakeEmpty();
85 return false;
86 }
87 if (!clippedDevShapeBounds->intersect(*devClipBounds, *unclippedDevShapeBounds)) {
88 *clippedDevShapeBounds = SkIRect::MakeEmpty();
89 return false;
90 }
91 return true;
92}
93
94////////////////////////////////////////////////////////////////////////////////
95
96void GrSoftwarePathRenderer::DrawNonAARect(GrRenderTargetContext* renderTargetContext,
97 GrPaint&& paint,
98 const GrUserStencilSettings& userStencilSettings,
99 const GrClip& clip,
100 const SkMatrix& viewMatrix,
101 const SkRect& rect,
102 const SkMatrix& localMatrix) {
103 renderTargetContext->priv().stencilRect(clip, &userStencilSettings, std::move(paint), GrAA::kNo,
104 viewMatrix, rect, &localMatrix);
105}
106
107void GrSoftwarePathRenderer::DrawAroundInvPath(GrRenderTargetContext* renderTargetContext,
108 GrPaint&& paint,
109 const GrUserStencilSettings& userStencilSettings,
110 const GrClip& clip,
111 const SkMatrix& viewMatrix,
112 const SkIRect& devClipBounds,
113 const SkIRect& devPathBounds) {
114 SkMatrix invert;
115 if (!viewMatrix.invert(&invert)) {
116 return;
117 }
118
119 SkRect rect;
120 if (devClipBounds.fTop < devPathBounds.fTop) {
121 rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devClipBounds.fTop),
122 SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fTop));
123 DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
124 SkMatrix::I(), rect, invert);
125 }
126 if (devClipBounds.fLeft < devPathBounds.fLeft) {
127 rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fTop),
128 SkIntToScalar(devPathBounds.fLeft), SkIntToScalar(devPathBounds.fBottom));
129 DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
130 SkMatrix::I(), rect, invert);
131 }
132 if (devClipBounds.fRight > devPathBounds.fRight) {
133 rect.setLTRB(SkIntToScalar(devPathBounds.fRight), SkIntToScalar(devPathBounds.fTop),
134 SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devPathBounds.fBottom));
135 DrawNonAARect(renderTargetContext, GrPaint::Clone(paint), userStencilSettings, clip,
136 SkMatrix::I(), rect, invert);
137 }
138 if (devClipBounds.fBottom > devPathBounds.fBottom) {
139 rect.setLTRB(SkIntToScalar(devClipBounds.fLeft), SkIntToScalar(devPathBounds.fBottom),
140 SkIntToScalar(devClipBounds.fRight), SkIntToScalar(devClipBounds.fBottom));
141 DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip,
142 SkMatrix::I(), rect, invert);
143 }
144}
145
146void GrSoftwarePathRenderer::DrawToTargetWithShapeMask(
147 GrSurfaceProxyView view,
148 GrRenderTargetContext* renderTargetContext,
149 GrPaint&& paint,
150 const GrUserStencilSettings& userStencilSettings,
151 const GrClip& clip,
152 const SkMatrix& viewMatrix,
153 const SkIPoint& textureOriginInDeviceSpace,
154 const SkIRect& deviceSpaceRectToDraw) {
155 SkMatrix invert;
156 if (!viewMatrix.invert(&invert)) {
157 return;
158 }
159
160 SkRect dstRect = SkRect::Make(deviceSpaceRectToDraw);
161
162 // We use device coords to compute the texture coordinates. We take the device coords and apply
163 // a translation so that the top-left of the device bounds maps to 0,0, and then a scaling
164 // matrix to normalized coords.
165 SkMatrix maskMatrix = SkMatrix::MakeTrans(SkIntToScalar(-textureOriginInDeviceSpace.fX),
166 SkIntToScalar(-textureOriginInDeviceSpace.fY));
167 maskMatrix.preConcat(viewMatrix);
168
169 paint.addCoverageFragmentProcessor(GrTextureEffect::Make(
170 std::move(view), kPremul_SkAlphaType, maskMatrix, GrSamplerState::Filter::kNearest));
171 DrawNonAARect(renderTargetContext, std::move(paint), userStencilSettings, clip, SkMatrix::I(),
172 dstRect, invert);
173}
174
175static GrSurfaceProxyView make_deferred_mask_texture_view(GrRecordingContext* context,
176 SkBackingFit fit,
177 SkISize dimensions) {
178 GrProxyProvider* proxyProvider = context->priv().proxyProvider();
179 const GrCaps* caps = context->priv().caps();
180
181 const GrBackendFormat format = caps->getDefaultBackendFormat(GrColorType::kAlpha_8,
182 GrRenderable::kNo);
183
184 GrSwizzle swizzle = caps->getReadSwizzle(format, GrColorType::kAlpha_8);
185
186 auto proxy =
187 proxyProvider->createProxy(format, dimensions, GrRenderable::kNo, 1, GrMipMapped::kNo,
188 fit, SkBudgeted::kYes, GrProtected::kNo);
189 return {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle};
190}
191
192namespace {
193
194/**
195 * Payload class for use with GrTDeferredProxyUploader. The software path renderer only draws
196 * a single path into the mask texture. This stores all of the information needed by the worker
197 * thread's call to drawShape (see below, in onDrawPath).
198 */
199class SoftwarePathData {
200public:
201 SoftwarePathData(const SkIRect& maskBounds, const SkMatrix& viewMatrix, const GrShape& shape,
202 GrAA aa)
203 : fMaskBounds(maskBounds)
204 , fViewMatrix(viewMatrix)
205 , fShape(shape)
206 , fAA(aa) {}
207
208 const SkIRect& getMaskBounds() const { return fMaskBounds; }
209 const SkMatrix* getViewMatrix() const { return &fViewMatrix; }
210 const GrShape& getShape() const { return fShape; }
211 GrAA getAA() const { return fAA; }
212
213private:
214 SkIRect fMaskBounds;
215 SkMatrix fViewMatrix;
216 GrShape fShape;
217 GrAA fAA;
218};
219
220}
221
222////////////////////////////////////////////////////////////////////////////////
223// return true on success; false on failure
224bool GrSoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
225 GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(),
226 "GrSoftwarePathRenderer::onDrawPath");
227 if (!fProxyProvider) {
228 return false;
229 }
230
231 SkASSERT(!args.fShape->style().applies());
232 // We really need to know if the shape will be inverse filled or not
233 // If the path is hairline, ignore inverse fill.
234 bool inverseFilled = args.fShape->inverseFilled() &&
235 !IsStrokeHairlineOrEquivalent(args.fShape->style(),
236 *args.fViewMatrix, nullptr);
237
238 SkIRect unclippedDevShapeBounds, clippedDevShapeBounds, devClipBounds;
239 // To prevent overloading the cache with entries during animations we limit the cache of masks
240 // to cases where the matrix preserves axis alignment.
241 bool useCache = fAllowCaching && !inverseFilled && args.fViewMatrix->preservesAxisAlignment() &&
242 args.fShape->hasUnstyledKey() && (GrAAType::kCoverage == args.fAAType);
243
244 if (!GetShapeAndClipBounds(args.fRenderTargetContext,
245 *args.fClip, *args.fShape,
246 *args.fViewMatrix, &unclippedDevShapeBounds,
247 &clippedDevShapeBounds,
248 &devClipBounds)) {
249 if (inverseFilled) {
250 DrawAroundInvPath(args.fRenderTargetContext, std::move(args.fPaint),
251 *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
252 devClipBounds, unclippedDevShapeBounds);
253 }
254 return true;
255 }
256
257 const SkIRect* boundsForMask = &clippedDevShapeBounds;
258 if (useCache) {
259 // Use the cache only if >50% of the path is visible.
260 int unclippedWidth = unclippedDevShapeBounds.width();
261 int unclippedHeight = unclippedDevShapeBounds.height();
262 int64_t unclippedArea = sk_64_mul(unclippedWidth, unclippedHeight);
263 int64_t clippedArea = sk_64_mul(clippedDevShapeBounds.width(),
264 clippedDevShapeBounds.height());
265 int maxTextureSize = args.fRenderTargetContext->caps()->maxTextureSize();
266 if (unclippedArea > 2 * clippedArea || unclippedWidth > maxTextureSize ||
267 unclippedHeight > maxTextureSize) {
268 useCache = false;
269 } else {
270 boundsForMask = &unclippedDevShapeBounds;
271 }
272 }
273
274 GrUniqueKey maskKey;
275 if (useCache) {
276 // We require the upper left 2x2 of the matrix to match exactly for a cache hit.
277 SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
278 SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
279 SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
280 SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
281 static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
282 GrUniqueKey::Builder builder(&maskKey, kDomain, 5 + args.fShape->unstyledKeySize(),
283 "SW Path Mask");
284#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
285 // Fractional translate does not affect caching on Android. This is done for better cache
286 // hit ratio and speed, but it is matching HWUI behavior, which doesn't consider the matrix
287 // at all when caching paths.
288 SkFixed fracX = 0;
289 SkFixed fracY = 0;
290#else
291 SkScalar tx = args.fViewMatrix->get(SkMatrix::kMTransX);
292 SkScalar ty = args.fViewMatrix->get(SkMatrix::kMTransY);
293 // Allow 8 bits each in x and y of subpixel positioning.
294 SkFixed fracX = SkScalarToFixed(SkScalarFraction(tx)) & 0x0000FF00;
295 SkFixed fracY = SkScalarToFixed(SkScalarFraction(ty)) & 0x0000FF00;
296#endif
297 builder[0] = SkFloat2Bits(sx);
298 builder[1] = SkFloat2Bits(sy);
299 builder[2] = SkFloat2Bits(kx);
300 builder[3] = SkFloat2Bits(ky);
301 // Distinguish between hairline and filled paths. For hairlines, we also need to include
302 // the cap. (SW grows hairlines by 0.5 pixel with round and square caps). Note that
303 // stroke-and-fill of hairlines is turned into pure fill by SkStrokeRec, so this covers
304 // all cases we might see.
305 uint32_t styleBits = args.fShape->style().isSimpleHairline() ?
306 ((args.fShape->style().strokeRec().getCap() << 1) | 1) : 0;
307 builder[4] = fracX | (fracY >> 8) | (styleBits << 16);
308 args.fShape->writeUnstyledKey(&builder[5]);
309 }
310
311 sk_sp<GrTextureProxy> proxy;
312 GrSurfaceProxyView view;
313 if (useCache) {
314 auto proxy = fProxyProvider->findOrCreateProxyByUniqueKey(maskKey);
315 if (proxy) {
316 GrSwizzle swizzle = args.fRenderTargetContext->caps()->getReadSwizzle(
317 proxy->backendFormat(), GrColorType::kAlpha_8);
318 view = {std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle};
319 }
320 }
321 if (!view) {
322 SkBackingFit fit = useCache ? SkBackingFit::kExact : SkBackingFit::kApprox;
323 GrAA aa = GrAA(GrAAType::kCoverage == args.fAAType);
324
325 SkTaskGroup* taskGroup = nullptr;
326 if (auto direct = args.fContext->priv().asDirectContext()) {
327 taskGroup = direct->priv().getTaskGroup();
328 }
329
330 if (taskGroup) {
331 view = make_deferred_mask_texture_view(args.fContext, fit, boundsForMask->size());
332 if (!view) {
333 return false;
334 }
335
336 auto uploader = std::make_unique<GrTDeferredProxyUploader<SoftwarePathData>>(
337 *boundsForMask, *args.fViewMatrix, *args.fShape, aa);
338 GrTDeferredProxyUploader<SoftwarePathData>* uploaderRaw = uploader.get();
339
340 auto drawAndUploadMask = [uploaderRaw] {
341 TRACE_EVENT0("skia.gpu", "Threaded SW Mask Render");
342 GrSWMaskHelper helper(uploaderRaw->getPixels());
343 if (helper.init(uploaderRaw->data().getMaskBounds())) {
344 helper.drawShape(uploaderRaw->data().getShape(),
345 *uploaderRaw->data().getViewMatrix(),
346 SkRegion::kReplace_Op, uploaderRaw->data().getAA(), 0xFF);
347 } else {
348 SkDEBUGFAIL("Unable to allocate SW mask.");
349 }
350 uploaderRaw->signalAndFreeData();
351 };
352 taskGroup->add(std::move(drawAndUploadMask));
353 view.asTextureProxy()->texPriv().setDeferredUploader(std::move(uploader));
354 } else {
355 GrSWMaskHelper helper;
356 if (!helper.init(*boundsForMask)) {
357 return false;
358 }
359 helper.drawShape(*args.fShape, *args.fViewMatrix, SkRegion::kReplace_Op, aa, 0xFF);
360 view = helper.toTextureView(args.fContext, fit);
361 }
362
363 if (!view) {
364 return false;
365 }
366 if (useCache) {
367 SkASSERT(view.origin() == kTopLeft_GrSurfaceOrigin);
368
369 // We will add an invalidator to the path so that if the path goes away we will
370 // delete or recycle the mask texture.
371 auto listener = GrMakeUniqueKeyInvalidationListener(&maskKey,
372 args.fContext->priv().contextID());
373 fProxyProvider->assignUniqueKeyToProxy(maskKey, view.asTextureProxy());
374 args.fShape->addGenIDChangeListener(std::move(listener));
375 }
376 }
377 SkASSERT(view);
378 if (inverseFilled) {
379 DrawAroundInvPath(args.fRenderTargetContext, GrPaint::Clone(args.fPaint),
380 *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix, devClipBounds,
381 unclippedDevShapeBounds);
382 }
383 DrawToTargetWithShapeMask(std::move(view), args.fRenderTargetContext, std::move(args.fPaint),
384 *args.fUserStencilSettings, *args.fClip, *args.fViewMatrix,
385 SkIPoint{boundsForMask->fLeft, boundsForMask->fTop}, *boundsForMask);
386
387 return true;
388}
389