1/*
2 * Copyright 2006 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "include/core/SkMaskFilter.h"
9#include "include/core/SkRRect.h"
10#include "include/core/SkStrokeRec.h"
11#include "include/core/SkVertices.h"
12#include "src/core/SkBlurMask.h"
13#include "src/core/SkBlurPriv.h"
14#include "src/core/SkGpuBlurUtils.h"
15#include "src/core/SkMaskFilterBase.h"
16#include "src/core/SkRRectPriv.h"
17#include "src/core/SkReadBuffer.h"
18#include "src/core/SkStringUtils.h"
19#include "src/core/SkWriteBuffer.h"
20
21#if SK_SUPPORT_GPU
22#include "include/private/GrRecordingContext.h"
23#include "src/gpu/GrClip.h"
24#include "src/gpu/GrFragmentProcessor.h"
25#include "src/gpu/GrRecordingContextPriv.h"
26#include "src/gpu/GrRenderTargetContext.h"
27#include "src/gpu/GrResourceProvider.h"
28#include "src/gpu/GrShaderCaps.h"
29#include "src/gpu/GrStyle.h"
30#include "src/gpu/GrTextureProxy.h"
31#include "src/gpu/effects/GrTextureEffect.h"
32#include "src/gpu/effects/generated/GrCircleBlurFragmentProcessor.h"
33#include "src/gpu/effects/generated/GrRRectBlurEffect.h"
34#include "src/gpu/effects/generated/GrRectBlurEffect.h"
35#include "src/gpu/geometry/GrShape.h"
36#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
37#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
38#include "src/gpu/glsl/GrGLSLProgramDataManager.h"
39#include "src/gpu/glsl/GrGLSLUniformHandler.h"
40#endif
41
42class SkBlurMaskFilterImpl : public SkMaskFilterBase {
43public:
44 SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle, bool respectCTM);
45
46 // overrides from SkMaskFilter
47 SkMask::Format getFormat() const override;
48 bool filterMask(SkMask* dst, const SkMask& src, const SkMatrix&,
49 SkIPoint* margin) const override;
50
51#if SK_SUPPORT_GPU
52 bool canFilterMaskGPU(const GrShape& shape,
53 const SkIRect& devSpaceShapeBounds,
54 const SkIRect& clipBounds,
55 const SkMatrix& ctm,
56 SkIRect* maskRect) const override;
57 bool directFilterMaskGPU(GrRecordingContext*,
58 GrRenderTargetContext* renderTargetContext,
59 GrPaint&&,
60 const GrClip&,
61 const SkMatrix& viewMatrix,
62 const GrShape& shape) const override;
63 GrSurfaceProxyView filterMaskGPU(GrRecordingContext*,
64 GrSurfaceProxyView srcView,
65 GrColorType srcColorType,
66 SkAlphaType srcAlphaType,
67 const SkMatrix& ctm,
68 const SkIRect& maskRect) const override;
69#endif
70
71 void computeFastBounds(const SkRect&, SkRect*) const override;
72 bool asABlur(BlurRec*) const override;
73
74
75protected:
76 FilterReturn filterRectsToNine(const SkRect[], int count, const SkMatrix&,
77 const SkIRect& clipBounds,
78 NinePatch*) const override;
79
80 FilterReturn filterRRectToNine(const SkRRect&, const SkMatrix&,
81 const SkIRect& clipBounds,
82 NinePatch*) const override;
83
84 bool filterRectMask(SkMask* dstM, const SkRect& r, const SkMatrix& matrix,
85 SkIPoint* margin, SkMask::CreateMode createMode) const;
86 bool filterRRectMask(SkMask* dstM, const SkRRect& r, const SkMatrix& matrix,
87 SkIPoint* margin, SkMask::CreateMode createMode) const;
88
89 bool ignoreXform() const { return !fRespectCTM; }
90
91private:
92 SK_FLATTENABLE_HOOKS(SkBlurMaskFilterImpl)
93 // To avoid unseemly allocation requests (esp. for finite platforms like
94 // handset) we limit the radius so something manageable. (as opposed to
95 // a request like 10,000)
96 static const SkScalar kMAX_BLUR_SIGMA;
97
98 SkScalar fSigma;
99 SkBlurStyle fBlurStyle;
100 bool fRespectCTM;
101
102 SkBlurMaskFilterImpl(SkReadBuffer&);
103 void flatten(SkWriteBuffer&) const override;
104
105 SkScalar computeXformedSigma(const SkMatrix& ctm) const {
106 SkScalar xformedSigma = this->ignoreXform() ? fSigma : ctm.mapRadius(fSigma);
107 return std::min(xformedSigma, kMAX_BLUR_SIGMA);
108 }
109
110 friend class SkBlurMaskFilter;
111
112 typedef SkMaskFilter INHERITED;
113 friend void sk_register_blur_maskfilter_createproc();
114};
115
116const SkScalar SkBlurMaskFilterImpl::kMAX_BLUR_SIGMA = SkIntToScalar(128);
117
118// linearly interpolate between y1 & y3 to match x2's position between x1 & x3
119static SkScalar interp(SkScalar x1, SkScalar x2, SkScalar x3, SkScalar y1, SkScalar y3) {
120 SkASSERT(x1 <= x2 && x2 <= x3);
121 SkASSERT(y1 <= y3);
122
123 SkScalar t = (x2 - x1) / (x3 - x1);
124 return y1 + t * (y3 - y1);
125}
126
127// Insert 'lower' and 'higher' into 'array1' and insert a new value at each matching insertion
128// point in 'array2' that linearly interpolates between the existing values.
129// Return a bit mask which contains a copy of 'inputMask' for all the cells between the two
130// insertion points.
131static uint32_t insert_into_arrays(SkScalar* array1, SkScalar* array2,
132 SkScalar lower, SkScalar higher,
133 int* num, uint32_t inputMask, int maskSize) {
134 SkASSERT(lower < higher);
135 SkASSERT(lower >= array1[0] && higher <= array1[*num-1]);
136
137 int32_t skipMask = 0x0;
138 int i;
139 for (i = 0; i < *num; ++i) {
140 if (lower >= array1[i] && lower < array1[i+1]) {
141 if (!SkScalarNearlyEqual(lower, array1[i])) {
142 memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar));
143 array1[i+1] = lower;
144 memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar));
145 array2[i+1] = interp(array1[i], lower, array1[i+2], array2[i], array2[i+2]);
146 i++;
147 (*num)++;
148 }
149 break;
150 }
151 }
152 for ( ; i < *num; ++i) {
153 skipMask |= inputMask << (i*maskSize);
154 if (higher > array1[i] && higher <= array1[i+1]) {
155 if (!SkScalarNearlyEqual(higher, array1[i+1])) {
156 memmove(&array1[i+2], &array1[i+1], (*num-i-1)*sizeof(SkScalar));
157 array1[i+1] = higher;
158 memmove(&array2[i+2], &array2[i+1], (*num-i-1)*sizeof(SkScalar));
159 array2[i+1] = interp(array1[i], higher, array1[i+2], array2[i], array2[i+2]);
160 (*num)++;
161 }
162 break;
163 }
164 }
165
166 return skipMask;
167}
168
169bool SkComputeBlurredRRectParams(const SkRRect& srcRRect, const SkRRect& devRRect,
170 const SkRect& occluder,
171 SkScalar sigma, SkScalar xformedSigma,
172 SkRRect* rrectToDraw,
173 SkISize* widthHeight,
174 SkScalar rectXs[kSkBlurRRectMaxDivisions],
175 SkScalar rectYs[kSkBlurRRectMaxDivisions],
176 SkScalar texXs[kSkBlurRRectMaxDivisions],
177 SkScalar texYs[kSkBlurRRectMaxDivisions],
178 int* numXs, int* numYs, uint32_t* skipMask) {
179 unsigned int devBlurRadius = 3*SkScalarCeilToInt(xformedSigma-1/6.0f);
180 SkScalar srcBlurRadius = 3.0f * sigma;
181
182 const SkRect& devOrig = devRRect.getBounds();
183 const SkVector& devRadiiUL = devRRect.radii(SkRRect::kUpperLeft_Corner);
184 const SkVector& devRadiiUR = devRRect.radii(SkRRect::kUpperRight_Corner);
185 const SkVector& devRadiiLR = devRRect.radii(SkRRect::kLowerRight_Corner);
186 const SkVector& devRadiiLL = devRRect.radii(SkRRect::kLowerLeft_Corner);
187
188 const int devLeft = SkScalarCeilToInt(std::max<SkScalar>(devRadiiUL.fX, devRadiiLL.fX));
189 const int devTop = SkScalarCeilToInt(std::max<SkScalar>(devRadiiUL.fY, devRadiiUR.fY));
190 const int devRight = SkScalarCeilToInt(std::max<SkScalar>(devRadiiUR.fX, devRadiiLR.fX));
191 const int devBot = SkScalarCeilToInt(std::max<SkScalar>(devRadiiLL.fY, devRadiiLR.fY));
192
193 // This is a conservative check for nine-patchability
194 if (devOrig.fLeft + devLeft + devBlurRadius >= devOrig.fRight - devRight - devBlurRadius ||
195 devOrig.fTop + devTop + devBlurRadius >= devOrig.fBottom - devBot - devBlurRadius) {
196 return false;
197 }
198
199 const SkVector& srcRadiiUL = srcRRect.radii(SkRRect::kUpperLeft_Corner);
200 const SkVector& srcRadiiUR = srcRRect.radii(SkRRect::kUpperRight_Corner);
201 const SkVector& srcRadiiLR = srcRRect.radii(SkRRect::kLowerRight_Corner);
202 const SkVector& srcRadiiLL = srcRRect.radii(SkRRect::kLowerLeft_Corner);
203
204 const SkScalar srcLeft = std::max<SkScalar>(srcRadiiUL.fX, srcRadiiLL.fX);
205 const SkScalar srcTop = std::max<SkScalar>(srcRadiiUL.fY, srcRadiiUR.fY);
206 const SkScalar srcRight = std::max<SkScalar>(srcRadiiUR.fX, srcRadiiLR.fX);
207 const SkScalar srcBot = std::max<SkScalar>(srcRadiiLL.fY, srcRadiiLR.fY);
208
209 int newRRWidth = 2*devBlurRadius + devLeft + devRight + 1;
210 int newRRHeight = 2*devBlurRadius + devTop + devBot + 1;
211 widthHeight->fWidth = newRRWidth + 2 * devBlurRadius;
212 widthHeight->fHeight = newRRHeight + 2 * devBlurRadius;
213
214 const SkRect srcProxyRect = srcRRect.getBounds().makeOutset(srcBlurRadius, srcBlurRadius);
215
216 rectXs[0] = srcProxyRect.fLeft;
217 rectXs[1] = srcProxyRect.fLeft + 2*srcBlurRadius + srcLeft;
218 rectXs[2] = srcProxyRect.fRight - 2*srcBlurRadius - srcRight;
219 rectXs[3] = srcProxyRect.fRight;
220
221 rectYs[0] = srcProxyRect.fTop;
222 rectYs[1] = srcProxyRect.fTop + 2*srcBlurRadius + srcTop;
223 rectYs[2] = srcProxyRect.fBottom - 2*srcBlurRadius - srcBot;
224 rectYs[3] = srcProxyRect.fBottom;
225
226 texXs[0] = 0.0f;
227 texXs[1] = 2.0f*devBlurRadius + devLeft;
228 texXs[2] = 2.0f*devBlurRadius + devLeft + 1;
229 texXs[3] = SkIntToScalar(widthHeight->fWidth);
230
231 texYs[0] = 0.0f;
232 texYs[1] = 2.0f*devBlurRadius + devTop;
233 texYs[2] = 2.0f*devBlurRadius + devTop + 1;
234 texYs[3] = SkIntToScalar(widthHeight->fHeight);
235
236 SkRect temp = occluder;
237
238 *numXs = 4;
239 *numYs = 4;
240 *skipMask = 0;
241 if (!temp.isEmpty() && (srcProxyRect.contains(temp) || temp.intersect(srcProxyRect))) {
242 *skipMask = insert_into_arrays(rectXs, texXs, temp.fLeft, temp.fRight, numXs, 0x1, 1);
243 *skipMask = insert_into_arrays(rectYs, texYs, temp.fTop, temp.fBottom,
244 numYs, *skipMask, *numXs-1);
245 }
246
247 const SkRect newRect = SkRect::MakeXYWH(SkIntToScalar(devBlurRadius),
248 SkIntToScalar(devBlurRadius),
249 SkIntToScalar(newRRWidth),
250 SkIntToScalar(newRRHeight));
251 SkVector newRadii[4];
252 newRadii[0] = { SkScalarCeilToScalar(devRadiiUL.fX), SkScalarCeilToScalar(devRadiiUL.fY) };
253 newRadii[1] = { SkScalarCeilToScalar(devRadiiUR.fX), SkScalarCeilToScalar(devRadiiUR.fY) };
254 newRadii[2] = { SkScalarCeilToScalar(devRadiiLR.fX), SkScalarCeilToScalar(devRadiiLR.fY) };
255 newRadii[3] = { SkScalarCeilToScalar(devRadiiLL.fX), SkScalarCeilToScalar(devRadiiLL.fY) };
256
257 rrectToDraw->setRectRadii(newRect, newRadii);
258 return true;
259}
260
261///////////////////////////////////////////////////////////////////////////////
262
263SkBlurMaskFilterImpl::SkBlurMaskFilterImpl(SkScalar sigma, SkBlurStyle style, bool respectCTM)
264 : fSigma(sigma)
265 , fBlurStyle(style)
266 , fRespectCTM(respectCTM) {
267 SkASSERT(fSigma > 0);
268 SkASSERT((unsigned)style <= kLastEnum_SkBlurStyle);
269}
270
271SkMask::Format SkBlurMaskFilterImpl::getFormat() const {
272 return SkMask::kA8_Format;
273}
274
275bool SkBlurMaskFilterImpl::asABlur(BlurRec* rec) const {
276 if (this->ignoreXform()) {
277 return false;
278 }
279
280 if (rec) {
281 rec->fSigma = fSigma;
282 rec->fStyle = fBlurStyle;
283 }
284 return true;
285}
286
287bool SkBlurMaskFilterImpl::filterMask(SkMask* dst, const SkMask& src,
288 const SkMatrix& matrix,
289 SkIPoint* margin) const {
290 SkScalar sigma = this->computeXformedSigma(matrix);
291 return SkBlurMask::BoxBlur(dst, src, sigma, fBlurStyle, margin);
292}
293
294bool SkBlurMaskFilterImpl::filterRectMask(SkMask* dst, const SkRect& r,
295 const SkMatrix& matrix,
296 SkIPoint* margin, SkMask::CreateMode createMode) const {
297 SkScalar sigma = computeXformedSigma(matrix);
298
299 return SkBlurMask::BlurRect(sigma, dst, r, fBlurStyle, margin, createMode);
300}
301
302bool SkBlurMaskFilterImpl::filterRRectMask(SkMask* dst, const SkRRect& r,
303 const SkMatrix& matrix,
304 SkIPoint* margin, SkMask::CreateMode createMode) const {
305 SkScalar sigma = computeXformedSigma(matrix);
306
307 return SkBlurMask::BlurRRect(sigma, dst, r, fBlurStyle, margin, createMode);
308}
309
310#include "include/core/SkCanvas.h"
311
312static bool prepare_to_draw_into_mask(const SkRect& bounds, SkMask* mask) {
313 SkASSERT(mask != nullptr);
314
315 mask->fBounds = bounds.roundOut();
316 mask->fRowBytes = SkAlign4(mask->fBounds.width());
317 mask->fFormat = SkMask::kA8_Format;
318 const size_t size = mask->computeImageSize();
319 mask->fImage = SkMask::AllocImage(size, SkMask::kZeroInit_Alloc);
320 if (nullptr == mask->fImage) {
321 return false;
322 }
323 return true;
324}
325
326static bool draw_rrect_into_mask(const SkRRect rrect, SkMask* mask) {
327 if (!prepare_to_draw_into_mask(rrect.rect(), mask)) {
328 return false;
329 }
330
331 // FIXME: This code duplicates code in draw_rects_into_mask, below. Is there a
332 // clean way to share more code?
333 SkBitmap bitmap;
334 bitmap.installMaskPixels(*mask);
335
336 SkCanvas canvas(bitmap);
337 canvas.translate(-SkIntToScalar(mask->fBounds.left()),
338 -SkIntToScalar(mask->fBounds.top()));
339
340 SkPaint paint;
341 paint.setAntiAlias(true);
342 canvas.drawRRect(rrect, paint);
343 return true;
344}
345
346static bool draw_rects_into_mask(const SkRect rects[], int count, SkMask* mask) {
347 if (!prepare_to_draw_into_mask(rects[0], mask)) {
348 return false;
349 }
350
351 SkBitmap bitmap;
352 bitmap.installPixels(SkImageInfo::Make(mask->fBounds.width(),
353 mask->fBounds.height(),
354 kAlpha_8_SkColorType,
355 kPremul_SkAlphaType),
356 mask->fImage, mask->fRowBytes);
357
358 SkCanvas canvas(bitmap);
359 canvas.translate(-SkIntToScalar(mask->fBounds.left()),
360 -SkIntToScalar(mask->fBounds.top()));
361
362 SkPaint paint;
363 paint.setAntiAlias(true);
364
365 if (1 == count) {
366 canvas.drawRect(rects[0], paint);
367 } else {
368 // todo: do I need a fast way to do this?
369 SkPath path;
370 path.addRect(rects[0]);
371 path.addRect(rects[1]);
372 path.setFillType(SkPathFillType::kEvenOdd);
373 canvas.drawPath(path, paint);
374 }
375 return true;
376}
377
378static bool rect_exceeds(const SkRect& r, SkScalar v) {
379 return r.fLeft < -v || r.fTop < -v || r.fRight > v || r.fBottom > v ||
380 r.width() > v || r.height() > v;
381}
382
383#include "src/core/SkMaskCache.h"
384
385static SkCachedData* copy_mask_to_cacheddata(SkMask* mask) {
386 const size_t size = mask->computeTotalImageSize();
387 SkCachedData* data = SkResourceCache::NewCachedData(size);
388 if (data) {
389 memcpy(data->writable_data(), mask->fImage, size);
390 SkMask::FreeImage(mask->fImage);
391 mask->fImage = (uint8_t*)data->data();
392 }
393 return data;
394}
395
396static SkCachedData* find_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
397 const SkRRect& rrect) {
398 return SkMaskCache::FindAndRef(sigma, style, rrect, mask);
399}
400
401static SkCachedData* add_cached_rrect(SkMask* mask, SkScalar sigma, SkBlurStyle style,
402 const SkRRect& rrect) {
403 SkCachedData* cache = copy_mask_to_cacheddata(mask);
404 if (cache) {
405 SkMaskCache::Add(sigma, style, rrect, *mask, cache);
406 }
407 return cache;
408}
409
410static SkCachedData* find_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
411 const SkRect rects[], int count) {
412 return SkMaskCache::FindAndRef(sigma, style, rects, count, mask);
413}
414
415static SkCachedData* add_cached_rects(SkMask* mask, SkScalar sigma, SkBlurStyle style,
416 const SkRect rects[], int count) {
417 SkCachedData* cache = copy_mask_to_cacheddata(mask);
418 if (cache) {
419 SkMaskCache::Add(sigma, style, rects, count, *mask, cache);
420 }
421 return cache;
422}
423
424static const bool c_analyticBlurRRect{true};
425
426SkMaskFilterBase::FilterReturn
427SkBlurMaskFilterImpl::filterRRectToNine(const SkRRect& rrect, const SkMatrix& matrix,
428 const SkIRect& clipBounds,
429 NinePatch* patch) const {
430 SkASSERT(patch != nullptr);
431 switch (rrect.getType()) {
432 case SkRRect::kEmpty_Type:
433 // Nothing to draw.
434 return kFalse_FilterReturn;
435
436 case SkRRect::kRect_Type:
437 // We should have caught this earlier.
438 SkASSERT(false);
439 // Fall through.
440 case SkRRect::kOval_Type:
441 // The nine patch special case does not handle ovals, and we
442 // already have code for rectangles.
443 return kUnimplemented_FilterReturn;
444
445 // These three can take advantage of this fast path.
446 case SkRRect::kSimple_Type:
447 case SkRRect::kNinePatch_Type:
448 case SkRRect::kComplex_Type:
449 break;
450 }
451
452 // TODO: report correct metrics for innerstyle, where we do not grow the
453 // total bounds, but we do need an inset the size of our blur-radius
454 if (kInner_SkBlurStyle == fBlurStyle) {
455 return kUnimplemented_FilterReturn;
456 }
457
458 // TODO: take clipBounds into account to limit our coordinates up front
459 // for now, just skip too-large src rects (to take the old code path).
460 if (rect_exceeds(rrect.rect(), SkIntToScalar(32767))) {
461 return kUnimplemented_FilterReturn;
462 }
463
464 SkIPoint margin;
465 SkMask srcM, dstM;
466 srcM.fBounds = rrect.rect().roundOut();
467 srcM.fFormat = SkMask::kA8_Format;
468 srcM.fRowBytes = 0;
469
470 bool filterResult = false;
471 if (c_analyticBlurRRect) {
472 // special case for fast round rect blur
473 // don't actually do the blur the first time, just compute the correct size
474 filterResult = this->filterRRectMask(&dstM, rrect, matrix, &margin,
475 SkMask::kJustComputeBounds_CreateMode);
476 }
477
478 if (!filterResult) {
479 filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
480 }
481
482 if (!filterResult) {
483 return kFalse_FilterReturn;
484 }
485
486 // Now figure out the appropriate width and height of the smaller round rectangle
487 // to stretch. It will take into account the larger radius per side as well as double
488 // the margin, to account for inner and outer blur.
489 const SkVector& UL = rrect.radii(SkRRect::kUpperLeft_Corner);
490 const SkVector& UR = rrect.radii(SkRRect::kUpperRight_Corner);
491 const SkVector& LR = rrect.radii(SkRRect::kLowerRight_Corner);
492 const SkVector& LL = rrect.radii(SkRRect::kLowerLeft_Corner);
493
494 const SkScalar leftUnstretched = std::max(UL.fX, LL.fX) + SkIntToScalar(2 * margin.fX);
495 const SkScalar rightUnstretched = std::max(UR.fX, LR.fX) + SkIntToScalar(2 * margin.fX);
496
497 // Extra space in the middle to ensure an unchanging piece for stretching. Use 3 to cover
498 // any fractional space on either side plus 1 for the part to stretch.
499 const SkScalar stretchSize = SkIntToScalar(3);
500
501 const SkScalar totalSmallWidth = leftUnstretched + rightUnstretched + stretchSize;
502 if (totalSmallWidth >= rrect.rect().width()) {
503 // There is no valid piece to stretch.
504 return kUnimplemented_FilterReturn;
505 }
506
507 const SkScalar topUnstretched = std::max(UL.fY, UR.fY) + SkIntToScalar(2 * margin.fY);
508 const SkScalar bottomUnstretched = std::max(LL.fY, LR.fY) + SkIntToScalar(2 * margin.fY);
509
510 const SkScalar totalSmallHeight = topUnstretched + bottomUnstretched + stretchSize;
511 if (totalSmallHeight >= rrect.rect().height()) {
512 // There is no valid piece to stretch.
513 return kUnimplemented_FilterReturn;
514 }
515
516 SkRect smallR = SkRect::MakeWH(totalSmallWidth, totalSmallHeight);
517
518 SkRRect smallRR;
519 SkVector radii[4];
520 radii[SkRRect::kUpperLeft_Corner] = UL;
521 radii[SkRRect::kUpperRight_Corner] = UR;
522 radii[SkRRect::kLowerRight_Corner] = LR;
523 radii[SkRRect::kLowerLeft_Corner] = LL;
524 smallRR.setRectRadii(smallR, radii);
525
526 const SkScalar sigma = this->computeXformedSigma(matrix);
527 SkCachedData* cache = find_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
528 if (!cache) {
529 bool analyticBlurWorked = false;
530 if (c_analyticBlurRRect) {
531 analyticBlurWorked =
532 this->filterRRectMask(&patch->fMask, smallRR, matrix, &margin,
533 SkMask::kComputeBoundsAndRenderImage_CreateMode);
534 }
535
536 if (!analyticBlurWorked) {
537 if (!draw_rrect_into_mask(smallRR, &srcM)) {
538 return kFalse_FilterReturn;
539 }
540
541 SkAutoMaskFreeImage amf(srcM.fImage);
542
543 if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
544 return kFalse_FilterReturn;
545 }
546 }
547 cache = add_cached_rrect(&patch->fMask, sigma, fBlurStyle, smallRR);
548 }
549
550 patch->fMask.fBounds.offsetTo(0, 0);
551 patch->fOuterRect = dstM.fBounds;
552 patch->fCenter.fX = SkScalarCeilToInt(leftUnstretched) + 1;
553 patch->fCenter.fY = SkScalarCeilToInt(topUnstretched) + 1;
554 SkASSERT(nullptr == patch->fCache);
555 patch->fCache = cache; // transfer ownership to patch
556 return kTrue_FilterReturn;
557}
558
559// Use the faster analytic blur approach for ninepatch rects
560static const bool c_analyticBlurNinepatch{true};
561
562SkMaskFilterBase::FilterReturn
563SkBlurMaskFilterImpl::filterRectsToNine(const SkRect rects[], int count,
564 const SkMatrix& matrix,
565 const SkIRect& clipBounds,
566 NinePatch* patch) const {
567 if (count < 1 || count > 2) {
568 return kUnimplemented_FilterReturn;
569 }
570
571 // TODO: report correct metrics for innerstyle, where we do not grow the
572 // total bounds, but we do need an inset the size of our blur-radius
573 if (kInner_SkBlurStyle == fBlurStyle || kOuter_SkBlurStyle == fBlurStyle) {
574 return kUnimplemented_FilterReturn;
575 }
576
577 // TODO: take clipBounds into account to limit our coordinates up front
578 // for now, just skip too-large src rects (to take the old code path).
579 if (rect_exceeds(rects[0], SkIntToScalar(32767))) {
580 return kUnimplemented_FilterReturn;
581 }
582
583 SkIPoint margin;
584 SkMask srcM, dstM;
585 srcM.fBounds = rects[0].roundOut();
586 srcM.fFormat = SkMask::kA8_Format;
587 srcM.fRowBytes = 0;
588
589 bool filterResult = false;
590 if (count == 1 && c_analyticBlurNinepatch) {
591 // special case for fast rect blur
592 // don't actually do the blur the first time, just compute the correct size
593 filterResult = this->filterRectMask(&dstM, rects[0], matrix, &margin,
594 SkMask::kJustComputeBounds_CreateMode);
595 } else {
596 filterResult = this->filterMask(&dstM, srcM, matrix, &margin);
597 }
598
599 if (!filterResult) {
600 return kFalse_FilterReturn;
601 }
602
603 /*
604 * smallR is the smallest version of 'rect' that will still guarantee that
605 * we get the same blur results on all edges, plus 1 center row/col that is
606 * representative of the extendible/stretchable edges of the ninepatch.
607 * Since our actual edge may be fractional we inset 1 more to be sure we
608 * don't miss any interior blur.
609 * x is an added pixel of blur, and { and } are the (fractional) edge
610 * pixels from the original rect.
611 *
612 * x x { x x .... x x } x x
613 *
614 * Thus, in this case, we inset by a total of 5 (on each side) beginning
615 * with our outer-rect (dstM.fBounds)
616 */
617 SkRect smallR[2];
618 SkIPoint center;
619
620 // +2 is from +1 for each edge (to account for possible fractional edges
621 int smallW = dstM.fBounds.width() - srcM.fBounds.width() + 2;
622 int smallH = dstM.fBounds.height() - srcM.fBounds.height() + 2;
623 SkIRect innerIR;
624
625 if (1 == count) {
626 innerIR = srcM.fBounds;
627 center.set(smallW, smallH);
628 } else {
629 SkASSERT(2 == count);
630 rects[1].roundIn(&innerIR);
631 center.set(smallW + (innerIR.left() - srcM.fBounds.left()),
632 smallH + (innerIR.top() - srcM.fBounds.top()));
633 }
634
635 // +1 so we get a clean, stretchable, center row/col
636 smallW += 1;
637 smallH += 1;
638
639 // we want the inset amounts to be integral, so we don't change any
640 // fractional phase on the fRight or fBottom of our smallR.
641 const SkScalar dx = SkIntToScalar(innerIR.width() - smallW);
642 const SkScalar dy = SkIntToScalar(innerIR.height() - smallH);
643 if (dx < 0 || dy < 0) {
644 // we're too small, relative to our blur, to break into nine-patch,
645 // so we ask to have our normal filterMask() be called.
646 return kUnimplemented_FilterReturn;
647 }
648
649 smallR[0].setLTRB(rects[0].left(), rects[0].top(),
650 rects[0].right() - dx, rects[0].bottom() - dy);
651 if (smallR[0].width() < 2 || smallR[0].height() < 2) {
652 return kUnimplemented_FilterReturn;
653 }
654 if (2 == count) {
655 smallR[1].setLTRB(rects[1].left(), rects[1].top(),
656 rects[1].right() - dx, rects[1].bottom() - dy);
657 SkASSERT(!smallR[1].isEmpty());
658 }
659
660 const SkScalar sigma = this->computeXformedSigma(matrix);
661 SkCachedData* cache = find_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
662 if (!cache) {
663 if (count > 1 || !c_analyticBlurNinepatch) {
664 if (!draw_rects_into_mask(smallR, count, &srcM)) {
665 return kFalse_FilterReturn;
666 }
667
668 SkAutoMaskFreeImage amf(srcM.fImage);
669
670 if (!this->filterMask(&patch->fMask, srcM, matrix, &margin)) {
671 return kFalse_FilterReturn;
672 }
673 } else {
674 if (!this->filterRectMask(&patch->fMask, smallR[0], matrix, &margin,
675 SkMask::kComputeBoundsAndRenderImage_CreateMode)) {
676 return kFalse_FilterReturn;
677 }
678 }
679 cache = add_cached_rects(&patch->fMask, sigma, fBlurStyle, smallR, count);
680 }
681 patch->fMask.fBounds.offsetTo(0, 0);
682 patch->fOuterRect = dstM.fBounds;
683 patch->fCenter = center;
684 SkASSERT(nullptr == patch->fCache);
685 patch->fCache = cache; // transfer ownership to patch
686 return kTrue_FilterReturn;
687}
688
689void SkBlurMaskFilterImpl::computeFastBounds(const SkRect& src,
690 SkRect* dst) const {
691 SkScalar pad = 3.0f * fSigma;
692
693 dst->setLTRB(src.fLeft - pad, src.fTop - pad,
694 src.fRight + pad, src.fBottom + pad);
695}
696
697sk_sp<SkFlattenable> SkBlurMaskFilterImpl::CreateProc(SkReadBuffer& buffer) {
698 const SkScalar sigma = buffer.readScalar();
699 SkBlurStyle style = buffer.read32LE(kLastEnum_SkBlurStyle);
700
701 uint32_t flags = buffer.read32LE(0x3); // historically we only recorded 2 bits
702 bool respectCTM = !(flags & 1); // historically we stored ignoreCTM in low bit
703
704 if (buffer.isVersionLT(SkPicturePriv::kRemoveOccluderFromBlurMaskFilter)) {
705 SkRect unused;
706 buffer.readRect(&unused);
707 }
708
709 return SkMaskFilter::MakeBlur((SkBlurStyle)style, sigma, respectCTM);
710}
711
712void SkBlurMaskFilterImpl::flatten(SkWriteBuffer& buffer) const {
713 buffer.writeScalar(fSigma);
714 buffer.writeUInt(fBlurStyle);
715 buffer.writeUInt(!fRespectCTM); // historically we recorded ignoreCTM
716}
717
718
719#if SK_SUPPORT_GPU
720
721bool SkBlurMaskFilterImpl::directFilterMaskGPU(GrRecordingContext* context,
722 GrRenderTargetContext* renderTargetContext,
723 GrPaint&& paint,
724 const GrClip& clip,
725 const SkMatrix& viewMatrix,
726 const GrShape& shape) const {
727 SkASSERT(renderTargetContext);
728
729 if (fBlurStyle != kNormal_SkBlurStyle) {
730 return false;
731 }
732
733 if (!viewMatrix.isScaleTranslate()) {
734 return false;
735 }
736
737 // TODO: we could handle blurred stroked circles
738 if (!shape.style().isSimpleFill()) {
739 return false;
740 }
741
742 SkScalar xformedSigma = this->computeXformedSigma(viewMatrix);
743 if (xformedSigma <= 0) {
744 return false;
745 }
746
747 SkRRect srcRRect;
748 bool inverted;
749 if (!shape.asRRect(&srcRRect, nullptr, nullptr, &inverted) || inverted) {
750 return false;
751 }
752
753 SkRRect devRRect;
754 if (!srcRRect.transform(viewMatrix, &devRRect)) {
755 return false;
756 }
757
758 if (!SkRRectPriv::AllCornersCircular(devRRect)) {
759 return false;
760 }
761
762 std::unique_ptr<GrFragmentProcessor> fp;
763
764 if (devRRect.isRect() || SkRRectPriv::IsCircle(devRRect)) {
765 if (devRRect.isRect()) {
766 fp = GrRectBlurEffect::Make(context, *context->priv().caps()->shaderCaps(),
767 devRRect.rect(), xformedSigma);
768 } else {
769 fp = GrCircleBlurFragmentProcessor::Make(context, devRRect.rect(), xformedSigma);
770 }
771
772 if (!fp) {
773 return false;
774 }
775 paint.addCoverageFragmentProcessor(std::move(fp));
776
777 SkRect srcProxyRect = srcRRect.rect();
778 SkScalar outsetX = 3.0f*fSigma;
779 SkScalar outsetY = 3.0f*fSigma;
780 if (this->ignoreXform()) {
781 // When we're ignoring the CTM the padding added to the source rect also needs to ignore
782 // the CTM. The matrix passed in here is guaranteed to be just scale and translate so we
783 // can just grab the X and Y scales off the matrix and pre-undo the scale.
784 outsetX /= SkScalarAbs(viewMatrix.getScaleX());
785 outsetY /= SkScalarAbs(viewMatrix.getScaleY());
786 }
787 srcProxyRect.outset(outsetX, outsetY);
788
789 renderTargetContext->drawRect(clip, std::move(paint), GrAA::kNo, viewMatrix, srcProxyRect);
790 return true;
791 }
792
793 fp = GrRRectBlurEffect::Make(context, fSigma, xformedSigma, srcRRect, devRRect);
794 if (!fp) {
795 return false;
796 }
797
798 if (!this->ignoreXform()) {
799 SkRect srcProxyRect = srcRRect.rect();
800 srcProxyRect.outset(3.0f*fSigma, 3.0f*fSigma);
801
802 SkVertices::Builder builder(SkVertices::kTriangles_VertexMode, 4, 6, 0);
803 srcProxyRect.toQuad(builder.positions());
804
805 static const uint16_t fullIndices[6] = { 0, 1, 2, 0, 2, 3 };
806 memcpy(builder.indices(), fullIndices, sizeof(fullIndices));
807 sk_sp<SkVertices> vertices = builder.detach();
808
809 paint.addCoverageFragmentProcessor(std::move(fp));
810 renderTargetContext->drawVertices(clip, std::move(paint), viewMatrix, std::move(vertices));
811 } else {
812 SkMatrix inverse;
813 if (!viewMatrix.invert(&inverse)) {
814 return false;
815 }
816
817 float extra=3.f*SkScalarCeilToScalar(xformedSigma-1/6.0f);
818 SkRect proxyRect = devRRect.rect();
819 proxyRect.outset(extra, extra);
820
821 paint.addCoverageFragmentProcessor(std::move(fp));
822 renderTargetContext->fillRectWithLocalMatrix(clip, std::move(paint), GrAA::kNo,
823 SkMatrix::I(), proxyRect, inverse);
824 }
825
826 return true;
827}
828
829bool SkBlurMaskFilterImpl::canFilterMaskGPU(const GrShape& shape,
830 const SkIRect& devSpaceShapeBounds,
831 const SkIRect& clipBounds,
832 const SkMatrix& ctm,
833 SkIRect* maskRect) const {
834 SkScalar xformedSigma = this->computeXformedSigma(ctm);
835 if (xformedSigma <= 0) {
836 maskRect->setEmpty();
837 return false;
838 }
839
840 if (maskRect) {
841 float sigma3 = 3 * SkScalarToFloat(xformedSigma);
842
843 // Outset srcRect and clipRect by 3 * sigma, to compute affected blur area.
844 SkIRect clipRect = clipBounds.makeOutset(sigma3, sigma3);
845 SkIRect srcRect = devSpaceShapeBounds.makeOutset(sigma3, sigma3);
846
847 if (!srcRect.intersect(clipRect)) {
848 srcRect.setEmpty();
849 }
850 *maskRect = srcRect;
851 }
852
853 // We prefer to blur paths with small blur radii on the CPU.
854 if (ctm.rectStaysRect()) {
855 static const SkScalar kMIN_GPU_BLUR_SIZE = SkIntToScalar(64);
856 static const SkScalar kMIN_GPU_BLUR_SIGMA = SkIntToScalar(32);
857
858 if (devSpaceShapeBounds.width() <= kMIN_GPU_BLUR_SIZE &&
859 devSpaceShapeBounds.height() <= kMIN_GPU_BLUR_SIZE &&
860 xformedSigma <= kMIN_GPU_BLUR_SIGMA) {
861 return false;
862 }
863 }
864
865 return true;
866}
867
868GrSurfaceProxyView SkBlurMaskFilterImpl::filterMaskGPU(GrRecordingContext* context,
869 GrSurfaceProxyView srcView,
870 GrColorType srcColorType,
871 SkAlphaType srcAlphaType,
872 const SkMatrix& ctm,
873 const SkIRect& maskRect) const {
874 // 'maskRect' isn't snapped to the UL corner but the mask in 'src' is.
875 const SkIRect clipRect = SkIRect::MakeWH(maskRect.width(), maskRect.height());
876
877 SkScalar xformedSigma = this->computeXformedSigma(ctm);
878 SkASSERT(xformedSigma > 0);
879
880 // If we're doing a normal blur, we can clobber the pathTexture in the
881 // gaussianBlur. Otherwise, we need to save it for later compositing.
882 bool isNormalBlur = (kNormal_SkBlurStyle == fBlurStyle);
883 auto srcBounds = SkIRect::MakeSize(srcView.proxy()->dimensions());
884 auto renderTargetContext = SkGpuBlurUtils::GaussianBlur(context,
885 srcView,
886 srcColorType,
887 srcAlphaType,
888 nullptr,
889 clipRect,
890 srcBounds,
891 xformedSigma,
892 xformedSigma,
893 SkTileMode::kClamp);
894 if (!renderTargetContext || !renderTargetContext->asTextureProxy()) {
895 return {};
896 }
897
898 if (!isNormalBlur) {
899 GrPaint paint;
900 // Blend pathTexture over blurTexture.
901 paint.addCoverageFragmentProcessor(GrTextureEffect::Make(std::move(srcView), srcAlphaType));
902 if (kInner_SkBlurStyle == fBlurStyle) {
903 // inner: dst = dst * src
904 paint.setCoverageSetOpXPFactory(SkRegion::kIntersect_Op);
905 } else if (kSolid_SkBlurStyle == fBlurStyle) {
906 // solid: dst = src + dst - src * dst
907 // = src + (1 - src) * dst
908 paint.setCoverageSetOpXPFactory(SkRegion::kUnion_Op);
909 } else if (kOuter_SkBlurStyle == fBlurStyle) {
910 // outer: dst = dst * (1 - src)
911 // = 0 * src + (1 - src) * dst
912 paint.setCoverageSetOpXPFactory(SkRegion::kDifference_Op);
913 } else {
914 paint.setCoverageSetOpXPFactory(SkRegion::kReplace_Op);
915 }
916
917 renderTargetContext->drawRect(GrNoClip(), std::move(paint), GrAA::kNo, SkMatrix::I(),
918 SkRect::Make(clipRect));
919 }
920
921 return renderTargetContext->readSurfaceView();
922}
923
924#endif // SK_SUPPORT_GPU
925
926void sk_register_blur_maskfilter_createproc() { SK_REGISTER_FLATTENABLE(SkBlurMaskFilterImpl); }
927
928sk_sp<SkMaskFilter> SkMaskFilter::MakeBlur(SkBlurStyle style, SkScalar sigma, bool respectCTM) {
929 if (SkScalarIsFinite(sigma) && sigma > 0) {
930 return sk_sp<SkMaskFilter>(new SkBlurMaskFilterImpl(sigma, style, respectCTM));
931 }
932 return nullptr;
933}
934