1/*
2 * Copyright 2006 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/core/SkScanPriv.h"
9
10#include "include/core/SkMatrix.h"
11#include "include/core/SkPath.h"
12#include "include/core/SkRegion.h"
13#include "include/private/SkTo.h"
14#include "src/core/SkAntiRun.h"
15#include "src/core/SkBlitter.h"
16#include "src/core/SkPathPriv.h"
17
18#define SHIFT SK_SUPERSAMPLE_SHIFT
19#define SCALE (1 << SHIFT)
20#define MASK (SCALE - 1)
21
22/** @file
23 We have two techniques for capturing the output of the supersampler:
24 - SUPERMASK, which records a large mask-bitmap
25 this is often faster for small, complex objects
26 - RLE, which records a rle-encoded scanline
27 this is often faster for large objects with big spans
28
29 These blitters use two coordinate systems:
30 - destination coordinates, scale equal to the output - often
31 abbreviated with 'i' or 'I' in variable names
32 - supersampled coordinates, scale equal to the output * SCALE
33 */
34
35//#define FORCE_SUPERMASK
36//#define FORCE_RLE
37
38///////////////////////////////////////////////////////////////////////////////
39
40/// Base class for a single-pass supersampled blitter.
41class BaseSuperBlitter : public SkBlitter {
42public:
43 BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
44 const SkIRect& clipBounds, bool isInverse);
45
46 /// Must be explicitly defined on subclasses.
47 void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
48 SkDEBUGFAIL("How did I get here?");
49 }
50 /// May not be called on BaseSuperBlitter because it blits out of order.
51 void blitV(int x, int y, int height, SkAlpha alpha) override {
52 SkDEBUGFAIL("How did I get here?");
53 }
54
55protected:
56 SkBlitter* fRealBlitter;
57 /// Current y coordinate, in destination coordinates.
58 int fCurrIY;
59 /// Widest row of region to be blitted, in destination coordinates.
60 int fWidth;
61 /// Leftmost x coordinate in any row, in destination coordinates.
62 int fLeft;
63 /// Leftmost x coordinate in any row, in supersampled coordinates.
64 int fSuperLeft;
65
66 SkDEBUGCODE(int fCurrX;)
67 /// Current y coordinate in supersampled coordinates.
68 int fCurrY;
69 /// Initial y coordinate (top of bounds).
70 int fTop;
71
72 SkIRect fSectBounds;
73};
74
75BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir,
76 const SkIRect& clipBounds, bool isInverse) {
77 fRealBlitter = realBlit;
78
79 SkIRect sectBounds;
80 if (isInverse) {
81 // We use the clip bounds instead of the ir, since we may be asked to
82 //draw outside of the rect when we're a inverse filltype
83 sectBounds = clipBounds;
84 } else {
85 if (!sectBounds.intersect(ir, clipBounds)) {
86 sectBounds.setEmpty();
87 }
88 }
89
90 const int left = sectBounds.left();
91 const int right = sectBounds.right();
92
93 fLeft = left;
94 fSuperLeft = SkLeftShift(left, SHIFT);
95 fWidth = right - left;
96 fTop = sectBounds.top();
97 fCurrIY = fTop - 1;
98 fCurrY = SkLeftShift(fTop, SHIFT) - 1;
99
100 SkDEBUGCODE(fCurrX = -1;)
101}
102
103/// Run-length-encoded supersampling antialiased blitter.
104class SuperBlitter : public BaseSuperBlitter {
105public:
106 SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
107 bool isInverse);
108
109 ~SuperBlitter() override {
110 this->flush();
111 }
112
113 /// Once fRuns contains a complete supersampled row, flush() blits
114 /// it out through the wrapped blitter.
115 void flush();
116
117 /// Blits a row of pixels, with location and width specified
118 /// in supersampled coordinates.
119 void blitH(int x, int y, int width) override;
120 /// Blits a rectangle of pixels, with location and size specified
121 /// in supersampled coordinates.
122 void blitRect(int x, int y, int width, int height) override;
123
124private:
125 // The next three variables are used to track a circular buffer that
126 // contains the values used in SkAlphaRuns. These variables should only
127 // ever be updated in advanceRuns(), and fRuns should always point to
128 // a valid SkAlphaRuns...
129 int fRunsToBuffer;
130 void* fRunsBuffer;
131 int fCurrentRun;
132 SkAlphaRuns fRuns;
133
134 // extra one to store the zero at the end
135 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
136
137 // This function updates the fRuns variable to point to the next buffer space
138 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
139 // and resets fRuns to point to an empty scanline.
140 void advanceRuns() {
141 const size_t kRunsSz = this->getRunsSz();
142 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
143 fRuns.fRuns = reinterpret_cast<int16_t*>(
144 reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
145 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
146 fRuns.reset(fWidth);
147 }
148
149 int fOffsetX;
150};
151
152SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect& clipBounds,
153 bool isInverse)
154 : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
155{
156 fRunsToBuffer = realBlitter->requestRowsPreserved();
157 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
158 fCurrentRun = -1;
159
160 this->advanceRuns();
161
162 fOffsetX = 0;
163}
164
165void SuperBlitter::flush() {
166 if (fCurrIY >= fTop) {
167
168 SkASSERT(fCurrentRun < fRunsToBuffer);
169 if (!fRuns.empty()) {
170 // SkDEBUGCODE(fRuns.dump();)
171 fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
172 this->advanceRuns();
173 fOffsetX = 0;
174 }
175
176 fCurrIY = fTop - 1;
177 SkDEBUGCODE(fCurrX = -1;)
178 }
179}
180
181/** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
182 *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
183 to produce a final value in [0, 255] and handles clamping 256->255
184 itself, with the same (alpha - (alpha >> 8)) correction as
185 coverage_to_exact_alpha().
186*/
187static inline int coverage_to_partial_alpha(int aa) {
188 aa <<= 8 - 2*SHIFT;
189 return aa;
190}
191
192/** coverage_to_exact_alpha() is being used by our blitter, which wants
193 a final value in [0, 255].
194*/
195static inline int coverage_to_exact_alpha(int aa) {
196 int alpha = (256 >> SHIFT) * aa;
197 // clamp 256->255
198 return alpha - (alpha >> 8);
199}
200
201void SuperBlitter::blitH(int x, int y, int width) {
202 SkASSERT(width > 0);
203
204 int iy = y >> SHIFT;
205 SkASSERT(iy >= fCurrIY);
206
207 x -= fSuperLeft;
208 // hack, until I figure out why my cubics (I think) go beyond the bounds
209 if (x < 0) {
210 width += x;
211 x = 0;
212 }
213
214#ifdef SK_DEBUG
215 SkASSERT(y != fCurrY || x >= fCurrX);
216#endif
217 SkASSERT(y >= fCurrY);
218 if (fCurrY != y) {
219 fOffsetX = 0;
220 fCurrY = y;
221 }
222
223 if (iy != fCurrIY) { // new scanline
224 this->flush();
225 fCurrIY = iy;
226 }
227
228 int start = x;
229 int stop = x + width;
230
231 SkASSERT(start >= 0 && stop > start);
232 // integer-pixel-aligned ends of blit, rounded out
233 int fb = start & MASK;
234 int fe = stop & MASK;
235 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
236
237 if (n < 0) {
238 fb = fe - fb;
239 n = 0;
240 fe = 0;
241 } else {
242 if (fb == 0) {
243 n += 1;
244 } else {
245 fb = SCALE - fb;
246 }
247 }
248
249 fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
250 n, coverage_to_partial_alpha(fe),
251 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
252 fOffsetX);
253
254#ifdef SK_DEBUG
255 fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
256 fCurrX = x + width;
257#endif
258}
259
260#if 0 // UNUSED
261static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
262 int n, U8CPU riteA) {
263 SkASSERT(leftA <= 0xFF);
264 SkASSERT(riteA <= 0xFF);
265
266 int16_t* run = runs.fRuns;
267 uint8_t* aa = runs.fAlpha;
268
269 if (ileft > 0) {
270 run[0] = ileft;
271 aa[0] = 0;
272 run += ileft;
273 aa += ileft;
274 }
275
276 SkASSERT(leftA < 0xFF);
277 if (leftA > 0) {
278 *run++ = 1;
279 *aa++ = leftA;
280 }
281
282 if (n > 0) {
283 run[0] = n;
284 aa[0] = 0xFF;
285 run += n;
286 aa += n;
287 }
288
289 SkASSERT(riteA < 0xFF);
290 if (riteA > 0) {
291 *run++ = 1;
292 *aa++ = riteA;
293 }
294 run[0] = 0;
295}
296#endif
297
298void SuperBlitter::blitRect(int x, int y, int width, int height) {
299 SkASSERT(width > 0);
300 SkASSERT(height > 0);
301
302 // blit leading rows
303 while ((y & MASK)) {
304 this->blitH(x, y++, width);
305 if (--height <= 0) {
306 return;
307 }
308 }
309 SkASSERT(height > 0);
310
311 // Since this is a rect, instead of blitting supersampled rows one at a
312 // time and then resolving to the destination canvas, we can blit
313 // directly to the destintion canvas one row per SCALE supersampled rows.
314 int start_y = y >> SHIFT;
315 int stop_y = (y + height) >> SHIFT;
316 int count = stop_y - start_y;
317 if (count > 0) {
318 y += count << SHIFT;
319 height -= count << SHIFT;
320
321 // save original X for our tail blitH() loop at the bottom
322 int origX = x;
323
324 x -= fSuperLeft;
325 // hack, until I figure out why my cubics (I think) go beyond the bounds
326 if (x < 0) {
327 width += x;
328 x = 0;
329 }
330
331 // There is always a left column, a middle, and a right column.
332 // ileft is the destination x of the first pixel of the entire rect.
333 // xleft is (SCALE - # of covered supersampled pixels) in that
334 // destination pixel.
335 int ileft = x >> SHIFT;
336 int xleft = x & MASK;
337 // irite is the destination x of the last pixel of the OPAQUE section.
338 // xrite is the number of supersampled pixels extending beyond irite;
339 // xrite/SCALE should give us alpha.
340 int irite = (x + width) >> SHIFT;
341 int xrite = (x + width) & MASK;
342 if (!xrite) {
343 xrite = SCALE;
344 irite--;
345 }
346
347 // Need to call flush() to clean up pending draws before we
348 // even consider blitV(), since otherwise it can look nonmonotonic.
349 SkASSERT(start_y > fCurrIY);
350 this->flush();
351
352 int n = irite - ileft - 1;
353 if (n < 0) {
354 // If n < 0, we'll only have a single partially-transparent column
355 // of pixels to render.
356 xleft = xrite - xleft;
357 SkASSERT(xleft <= SCALE);
358 SkASSERT(xleft > 0);
359 fRealBlitter->blitV(ileft + fLeft, start_y, count,
360 coverage_to_exact_alpha(xleft));
361 } else {
362 // With n = 0, we have two possibly-transparent columns of pixels
363 // to render; with n > 0, we have opaque columns between them.
364
365 xleft = SCALE - xleft;
366
367 // Using coverage_to_exact_alpha is not consistent with blitH()
368 const int coverageL = coverage_to_exact_alpha(xleft);
369 const int coverageR = coverage_to_exact_alpha(xrite);
370
371 SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
372 SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
373
374 fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
375 coverageL, coverageR);
376 }
377
378 // preamble for our next call to blitH()
379 fCurrIY = stop_y - 1;
380 fOffsetX = 0;
381 fCurrY = y - 1;
382 fRuns.reset(fWidth);
383 x = origX;
384 }
385
386 // catch any remaining few rows
387 SkASSERT(height <= MASK);
388 while (--height >= 0) {
389 this->blitH(x, y++, width);
390 }
391}
392
393///////////////////////////////////////////////////////////////////////////////
394
395/// Masked supersampling antialiased blitter.
396class MaskSuperBlitter : public BaseSuperBlitter {
397public:
398 MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkIRect&, bool isInverse);
399 ~MaskSuperBlitter() override {
400 fRealBlitter->blitMask(fMask, fClipRect);
401 }
402
403 void blitH(int x, int y, int width) override;
404
405 static bool CanHandleRect(const SkIRect& bounds) {
406#ifdef FORCE_RLE
407 return false;
408#endif
409 int width = bounds.width();
410 int64_t rb = SkAlign4(width);
411 // use 64bits to detect overflow
412 int64_t storage = rb * bounds.height();
413
414 return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
415 (storage <= MaskSuperBlitter::kMAX_STORAGE);
416 }
417
418private:
419 enum {
420#ifdef FORCE_SUPERMASK
421 kMAX_WIDTH = 2048,
422 kMAX_STORAGE = 1024 * 1024 * 2
423#else
424 kMAX_WIDTH = 32, // so we don't try to do very wide things, where the RLE blitter would be faster
425 kMAX_STORAGE = 1024
426#endif
427 };
428
429 SkMask fMask;
430 SkIRect fClipRect;
431 // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
432 // perform a test to see if stopAlpha != 0
433 uint32_t fStorage[(kMAX_STORAGE >> 2) + 1];
434};
435
436MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
437 const SkIRect& clipBounds, bool isInverse)
438 : BaseSuperBlitter(realBlitter, ir, clipBounds, isInverse)
439{
440 SkASSERT(CanHandleRect(ir));
441 SkASSERT(!isInverse);
442
443 fMask.fImage = (uint8_t*)fStorage;
444 fMask.fBounds = ir;
445 fMask.fRowBytes = ir.width();
446 fMask.fFormat = SkMask::kA8_Format;
447
448 fClipRect = ir;
449 if (!fClipRect.intersect(clipBounds)) {
450 SkASSERT(0);
451 fClipRect.setEmpty();
452 }
453
454 // For valgrind, write 1 extra byte at the end so we don't read
455 // uninitialized memory. See comment in add_aa_span and fStorage[].
456 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
457}
458
459static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
460 /* I should be able to just add alpha[x] + startAlpha.
461 However, if the trailing edge of the previous span and the leading
462 edge of the current span round to the same super-sampled x value,
463 I might overflow to 256 with this add, hence the funny subtract.
464 */
465 unsigned tmp = *alpha + startAlpha;
466 SkASSERT(tmp <= 256);
467 *alpha = SkToU8(tmp - (tmp >> 8));
468}
469
470static inline uint32_t quadplicate_byte(U8CPU value) {
471 uint32_t pair = (value << 8) | value;
472 return (pair << 16) | pair;
473}
474
475// Perform this tricky subtract, to avoid overflowing to 256. Our caller should
476// only ever call us with at most enough to hit 256 (never larger), so it is
477// enough to just subtract the high-bit. Actually clamping with a branch would
478// be slower (e.g. if (tmp > 255) tmp = 255;)
479//
480static inline void saturated_add(uint8_t* ptr, U8CPU add) {
481 unsigned tmp = *ptr + add;
482 SkASSERT(tmp <= 256);
483 *ptr = SkToU8(tmp - (tmp >> 8));
484}
485
486// minimum count before we want to setup an inner loop, adding 4-at-a-time
487#define MIN_COUNT_FOR_QUAD_LOOP 16
488
489static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
490 U8CPU stopAlpha, U8CPU maxValue) {
491 SkASSERT(middleCount >= 0);
492
493 saturated_add(alpha, startAlpha);
494 alpha += 1;
495
496 if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
497 // loop until we're quad-byte aligned
498 while (reinterpret_cast<intptr_t>(alpha) & 0x3) {
499 alpha[0] = SkToU8(alpha[0] + maxValue);
500 alpha += 1;
501 middleCount -= 1;
502 }
503
504 int bigCount = middleCount >> 2;
505 uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
506 uint32_t qval = quadplicate_byte(maxValue);
507 do {
508 *qptr++ += qval;
509 } while (--bigCount > 0);
510
511 middleCount &= 3;
512 alpha = reinterpret_cast<uint8_t*> (qptr);
513 // fall through to the following while-loop
514 }
515
516 while (--middleCount >= 0) {
517 alpha[0] = SkToU8(alpha[0] + maxValue);
518 alpha += 1;
519 }
520
521 // potentially this can be off the end of our "legal" alpha values, but that
522 // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
523 // every time (slow), we just do it, and ensure that we've allocated extra space
524 // (see the + 1 comment in fStorage[]
525 saturated_add(alpha, stopAlpha);
526}
527
528void MaskSuperBlitter::blitH(int x, int y, int width) {
529 int iy = (y >> SHIFT);
530
531 SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
532 iy -= fMask.fBounds.fTop; // make it relative to 0
533
534 // This should never happen, but it does. Until the true cause is
535 // discovered, let's skip this span instead of crashing.
536 // See http://crbug.com/17569.
537 if (iy < 0) {
538 return;
539 }
540
541#ifdef SK_DEBUG
542 {
543 int ix = x >> SHIFT;
544 SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
545 }
546#endif
547
548 x -= SkLeftShift(fMask.fBounds.fLeft, SHIFT);
549
550 // hack, until I figure out why my cubics (I think) go beyond the bounds
551 if (x < 0) {
552 width += x;
553 x = 0;
554 }
555
556 uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
557
558 int start = x;
559 int stop = x + width;
560
561 SkASSERT(start >= 0 && stop > start);
562 int fb = start & MASK;
563 int fe = stop & MASK;
564 int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
565
566
567 if (n < 0) {
568 SkASSERT(row >= fMask.fImage);
569 SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
570 add_aa_span(row, coverage_to_partial_alpha(fe - fb));
571 } else {
572 fb = SCALE - fb;
573 SkASSERT(row >= fMask.fImage);
574 SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
575 add_aa_span(row, coverage_to_partial_alpha(fb),
576 n, coverage_to_partial_alpha(fe),
577 (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
578 }
579
580#ifdef SK_DEBUG
581 fCurrX = x + width;
582#endif
583}
584
585///////////////////////////////////////////////////////////////////////////////
586
587static SkIRect safeRoundOut(const SkRect& src) {
588 // roundOut will pin huge floats to max/min int
589 SkIRect dst = src.roundOut();
590
591 // intersect with a smaller huge rect, so the rect will not be considered empty for being
592 // too large. e.g. { -SK_MaxS32 ... SK_MaxS32 } is considered empty because its width
593 // exceeds signed 32bit.
594 const int32_t limit = SK_MaxS32 >> SK_SUPERSAMPLE_SHIFT;
595 (void)dst.intersect({ -limit, -limit, limit, limit});
596
597 return dst;
598}
599
600constexpr int kSampleSize = 8;
601#if !defined(SK_DISABLE_AAA)
602 constexpr SkScalar kComplexityThreshold = 0.25;
603#endif
604
605static void compute_complexity(const SkPathView& path, SkScalar& avgLength, SkScalar& complexity) {
606 int n = path.fPoints.count();
607 if (n < kSampleSize || path.fBounds.isEmpty()) {
608 // set to invalid value to indicate that we failed to compute
609 avgLength = complexity = -1;
610 return;
611 }
612
613 SkScalar sumLength = 0;
614 SkPoint lastPoint = path.fPoints[0];
615 for(int i = 1; i < kSampleSize; ++i) {
616 SkPoint point = path.fPoints[i];
617 sumLength += SkPoint::Distance(lastPoint, point);
618 lastPoint = point;
619 }
620 avgLength = sumLength / (kSampleSize - 1);
621
622 auto sqr = [](SkScalar x) { return x*x; };
623
624 SkScalar diagonalSqr = sqr(path.fBounds.width()) + sqr(path.fBounds.height());
625
626 // If the path consists of random line segments, the number of intersections should be
627 // proportional to this.
628 SkScalar intersections = sk_ieee_float_divide(sqr(n) * sqr(avgLength), diagonalSqr);
629
630 // The number of intersections per scanline should be proportional to this number.
631 complexity = sk_ieee_float_divide(intersections, path.fBounds.height());
632
633 if (sk_float_isnan(complexity)) { // it may be possible to have 0.0 / 0.0; inf is fine for us.
634 complexity = -1;
635 }
636}
637
638static bool ShouldUseAAA(const SkPathView& path, SkScalar avgLength, SkScalar complexity) {
639#if defined(SK_DISABLE_AAA)
640 return false;
641#else
642 if (gSkForceAnalyticAA) {
643 return true;
644 }
645 if (!gSkUseAnalyticAA) {
646 return false;
647 }
648 if (path.isRect(nullptr)) {
649 return true;
650 }
651
652 #ifdef SK_SUPPORT_LEGACY_AAA_CHOICE
653 const SkRect& bounds = path.fBounds;
654 // When the path have so many points compared to the size of its
655 // bounds/resolution, it indicates that the path is not quite smooth in
656 // the current resolution: the expected number of turning points in
657 // every pixel row/column is significantly greater than zero. Hence
658 // Aanlytic AA is not likely to produce visible quality improvements,
659 // and Analytic AA might be slower than supersampling.
660 return path.fPoints.count() < std::max(bounds.width(), bounds.height()) / 2 - 10;
661 #else
662 if (path.fPoints.count() >= path.fBounds.height()) {
663 // SAA is faster than AAA in this case even if there are no
664 // intersections because AAA will have too many scan lines. See
665 // skbug.com/8272
666 return false;
667 }
668 // We will use AAA if the number of verbs < kSampleSize and therefore complexity < 0
669 return complexity < kComplexityThreshold;
670 #endif
671#endif
672}
673
674void SkScan::SAAFillPath(const SkPathView& path, SkBlitter* blitter, const SkIRect& ir,
675 const SkIRect& clipBounds, bool forceRLE) {
676 bool containedInClip = clipBounds.contains(ir);
677 bool isInverse = path.isInverseFillType();
678
679 // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
680 // if we're an inverse filltype
681 if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
682 MaskSuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
683 SkASSERT(SkIntToScalar(ir.fTop) <= path.fBounds.fTop);
684 sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
685 } else {
686 SuperBlitter superBlit(blitter, ir, clipBounds, isInverse);
687 sk_fill_path(path, clipBounds, &superBlit, ir.fTop, ir.fBottom, SHIFT, containedInClip);
688 }
689}
690
691static int overflows_short_shift(int value, int shift) {
692 const int s = 16 + shift;
693 return (SkLeftShift(value, s) >> s) - value;
694}
695
696/**
697 Would any of the coordinates of this rectangle not fit in a short,
698 when left-shifted by shift?
699*/
700static int rect_overflows_short_shift(SkIRect rect, int shift) {
701 SkASSERT(!overflows_short_shift(8191, shift));
702 SkASSERT(overflows_short_shift(8192, shift));
703 SkASSERT(!overflows_short_shift(32767, 0));
704 SkASSERT(overflows_short_shift(32768, 0));
705
706 // Since we expect these to succeed, we bit-or together
707 // for a tiny extra bit of speed.
708 return overflows_short_shift(rect.fLeft, shift) |
709 overflows_short_shift(rect.fRight, shift) |
710 overflows_short_shift(rect.fTop, shift) |
711 overflows_short_shift(rect.fBottom, shift);
712}
713
714void SkScan::AntiFillPath(const SkPathView& path, const SkRegion& origClip,
715 SkBlitter* blitter, bool forceRLE) {
716 if (origClip.isEmpty()) {
717 return;
718 }
719
720 const bool isInverse = path.isInverseFillType();
721 SkIRect ir = safeRoundOut(path.fBounds);
722 if (ir.isEmpty()) {
723 if (isInverse) {
724 blitter->blitRegion(origClip);
725 }
726 return;
727 }
728
729 // If the intersection of the path bounds and the clip bounds
730 // will overflow 32767 when << by SHIFT, we can't supersample,
731 // so draw without antialiasing.
732 SkIRect clippedIR;
733 if (isInverse) {
734 // If the path is an inverse fill, it's going to fill the entire
735 // clip, and we care whether the entire clip exceeds our limits.
736 clippedIR = origClip.getBounds();
737 } else {
738 if (!clippedIR.intersect(ir, origClip.getBounds())) {
739 return;
740 }
741 }
742 if (rect_overflows_short_shift(clippedIR, SHIFT)) {
743 SkScan::FillPath(path, origClip, blitter);
744 return;
745 }
746
747 // Our antialiasing can't handle a clip larger than 32767, so we restrict
748 // the clip to that limit here. (the runs[] uses int16_t for its index).
749 //
750 // A more general solution (one that could also eliminate the need to
751 // disable aa based on ir bounds (see overflows_short_shift) would be
752 // to tile the clip/target...
753 SkRegion tmpClipStorage;
754 const SkRegion* clipRgn = &origClip;
755 {
756 static const int32_t kMaxClipCoord = 32767;
757 const SkIRect& bounds = origClip.getBounds();
758 if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
759 SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
760 tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
761 clipRgn = &tmpClipStorage;
762 }
763 }
764 // for here down, use clipRgn, not origClip
765
766 SkScanClipper clipper(blitter, clipRgn, ir);
767
768 if (clipper.getBlitter() == nullptr) { // clipped out
769 if (isInverse) {
770 blitter->blitRegion(*clipRgn);
771 }
772 return;
773 }
774
775 SkASSERT(clipper.getClipRect() == nullptr ||
776 *clipper.getClipRect() == clipRgn->getBounds());
777
778 // now use the (possibly wrapped) blitter
779 blitter = clipper.getBlitter();
780
781 if (isInverse) {
782 sk_blit_above(blitter, ir, *clipRgn);
783 }
784
785 SkScalar avgLength, complexity;
786 compute_complexity(path, avgLength, complexity);
787
788 if (ShouldUseAAA(path, avgLength, complexity)) {
789 // Do not use AAA if path is too complicated:
790 // there won't be any speedup or significant visual improvement.
791 SkScan::AAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
792 } else {
793 SkScan::SAAFillPath(path, blitter, ir, clipRgn->getBounds(), forceRLE);
794 }
795
796 if (isInverse) {
797 sk_blit_below(blitter, ir, *clipRgn);
798 }
799}
800
801///////////////////////////////////////////////////////////////////////////////
802
803#include "src/core/SkRasterClip.h"
804
805void SkScan::FillPath(const SkPathView& path, const SkRasterClip& clip, SkBlitter* blitter) {
806 if (clip.isEmpty() || !path.isFinite()) {
807 return;
808 }
809
810 if (clip.isBW()) {
811 FillPath(path, clip.bwRgn(), blitter);
812 } else {
813 SkRegion tmp;
814 SkAAClipBlitter aaBlitter;
815
816 tmp.setRect(clip.getBounds());
817 aaBlitter.init(blitter, &clip.aaRgn());
818 SkScan::FillPath(path, tmp, &aaBlitter);
819 }
820}
821
822void SkScan::AntiFillPath(const SkPathView& path, const SkRasterClip& clip, SkBlitter* blitter) {
823 if (clip.isEmpty() || !path.isFinite()) {
824 return;
825 }
826
827 if (clip.isBW()) {
828 AntiFillPath(path, clip.bwRgn(), blitter, false);
829 } else {
830 SkRegion tmp;
831 SkAAClipBlitter aaBlitter;
832
833 tmp.setRect(clip.getBounds());
834 aaBlitter.init(blitter, &clip.aaRgn());
835 AntiFillPath(path, tmp, &aaBlitter, true); // SkAAClipBlitter can blitMask, why forceRLE?
836 }
837}
838