1/*
2 * Copyright 2016 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "include/core/SkPath.h"
9#include "include/core/SkRegion.h"
10#include "include/private/SkTemplates.h"
11#include "include/private/SkTo.h"
12#include "src/core/SkAnalyticEdge.h"
13#include "src/core/SkAntiRun.h"
14#include "src/core/SkAutoMalloc.h"
15#include "src/core/SkBlitter.h"
16#include "src/core/SkEdge.h"
17#include "src/core/SkEdgeBuilder.h"
18#include "src/core/SkGeometry.h"
19#include "src/core/SkQuadClipper.h"
20#include "src/core/SkRasterClip.h"
21#include "src/core/SkScan.h"
22#include "src/core/SkScanPriv.h"
23#include "src/core/SkTSort.h"
24
25#include <utility>
26
27#if defined(SK_DISABLE_AAA)
28void SkScan::AAAFillPath(const SkPath&, SkBlitter*, const SkIRect&, const SkIRect&, bool) {
29 SkDEBUGFAIL("AAA Disabled");
30 return;
31}
32#else
33
34/*
35
36The following is a high-level overview of our analytic anti-aliasing
37algorithm. We consider a path as a collection of line segments, as
38quadratic/cubic curves are converted to small line segments. Without loss of
39generality, let's assume that the draw region is [0, W] x [0, H].
40
41Our algorithm is based on horizontal scan lines (y = c_i) as the previous
42sampling-based algorithm did. However, our algorithm uses non-equal-spaced
43scan lines, while the previous method always uses equal-spaced scan lines,
44such as (y = 1/2 + 0, 1/2 + 1, 1/2 + 2, ...) in the previous non-AA algorithm,
45and (y = 1/8 + 1/4, 1/8 + 2/4, 1/8 + 3/4, ...) in the previous
4616-supersampling AA algorithm.
47
48Our algorithm contains scan lines y = c_i for c_i that is either:
49
501. an integer between [0, H]
51
522. the y value of a line segment endpoint
53
543. the y value of an intersection of two line segments
55
56For two consecutive scan lines y = c_i, y = c_{i+1}, we analytically computes
57the coverage of this horizontal strip of our path on each pixel. This can be
58done very efficiently because the strip of our path now only consists of
59trapezoids whose top and bottom edges are y = c_i, y = c_{i+1} (this includes
60rectangles and triangles as special cases).
61
62We now describe how the coverage of single pixel is computed against such a
63trapezoid. That coverage is essentially the intersection area of a rectangle
64(e.g., [0, 1] x [c_i, c_{i+1}]) and our trapezoid. However, that intersection
65could be complicated, as shown in the example region A below:
66
67+-----------\----+
68| \ C|
69| \ |
70\ \ |
71|\ A \|
72| \ \
73| \ |
74| B \ |
75+----\-----------+
76
77However, we don't have to compute the area of A directly. Instead, we can
78compute the excluded area, which are B and C, quite easily, because they're
79just triangles. In fact, we can prove that an excluded region (take B as an
80example) is either itself a simple trapezoid (including rectangles, triangles,
81and empty regions), or its opposite (the opposite of B is A + C) is a simple
82trapezoid. In any case, we can compute its area efficiently.
83
84In summary, our algorithm has a higher quality because it generates ground-
85truth coverages analytically. It is also faster because it has much fewer
86unnessasary horizontal scan lines. For example, given a triangle path, the
87number of scan lines in our algorithm is only about 3 + H while the
8816-supersampling algorithm has about 4H scan lines.
89
90*/
91
92static void add_alpha(SkAlpha* alpha, SkAlpha delta) {
93 SkASSERT(*alpha + delta <= 256);
94 *alpha = SkAlphaRuns::CatchOverflow(*alpha + delta);
95}
96
97static void safely_add_alpha(SkAlpha* alpha, SkAlpha delta) {
98 *alpha = std::min(0xFF, *alpha + delta);
99}
100
101class AdditiveBlitter : public SkBlitter {
102public:
103 ~AdditiveBlitter() override {}
104
105 virtual SkBlitter* getRealBlitter(bool forceRealBlitter = false) = 0;
106
107 virtual void blitAntiH(int x, int y, const SkAlpha antialias[], int len) = 0;
108 virtual void blitAntiH(int x, int y, const SkAlpha alpha) = 0;
109 virtual void blitAntiH(int x, int y, int width, const SkAlpha alpha) = 0;
110
111 void blitAntiH(int x, int y, const SkAlpha antialias[], const int16_t runs[]) override {
112 SkDEBUGFAIL("Please call real blitter's blitAntiH instead.");
113 }
114
115 void blitV(int x, int y, int height, SkAlpha alpha) override {
116 SkDEBUGFAIL("Please call real blitter's blitV instead.");
117 }
118
119 void blitH(int x, int y, int width) override {
120 SkDEBUGFAIL("Please call real blitter's blitH instead.");
121 }
122
123 void blitRect(int x, int y, int width, int height) override {
124 SkDEBUGFAIL("Please call real blitter's blitRect instead.");
125 }
126
127 void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
128 override {
129 SkDEBUGFAIL("Please call real blitter's blitAntiRect instead.");
130 }
131
132 virtual int getWidth() = 0;
133
134 // Flush the additive alpha cache if floor(y) and floor(nextY) is different
135 // (i.e., we'll start working on a new pixel row).
136 virtual void flush_if_y_changed(SkFixed y, SkFixed nextY) = 0;
137};
138
139// We need this mask blitter because it significantly accelerates small path filling.
140class MaskAdditiveBlitter : public AdditiveBlitter {
141public:
142 MaskAdditiveBlitter(SkBlitter* realBlitter,
143 const SkIRect& ir,
144 const SkIRect& clipBounds,
145 bool isInverse);
146 ~MaskAdditiveBlitter() override { fRealBlitter->blitMask(fMask, fClipRect); }
147
148 // Most of the time, we still consider this mask blitter as the real blitter
149 // so we can accelerate blitRect and others. But sometimes we want to return
150 // the absolute real blitter (e.g., when we fall back to the old code path).
151 SkBlitter* getRealBlitter(bool forceRealBlitter) override {
152 return forceRealBlitter ? fRealBlitter : this;
153 }
154
155 // Virtual function is slow. So don't use this. Directly add alpha to the mask instead.
156 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
157
158 // Allowing following methods are used to blit rectangles during aaa_walk_convex_edges
159 // Since there aren't many rectangles, we can still bear the slow speed of virtual functions.
160 void blitAntiH(int x, int y, const SkAlpha alpha) override;
161 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
162 void blitV(int x, int y, int height, SkAlpha alpha) override;
163 void blitRect(int x, int y, int width, int height) override;
164 void blitAntiRect(int x, int y, int width, int height, SkAlpha leftAlpha, SkAlpha rightAlpha)
165 override;
166
167 // The flush is only needed for RLE (RunBasedAdditiveBlitter)
168 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {}
169
170 int getWidth() override { return fClipRect.width(); }
171
172 static bool CanHandleRect(const SkIRect& bounds) {
173 int width = bounds.width();
174 if (width > MaskAdditiveBlitter::kMAX_WIDTH) {
175 return false;
176 }
177 int64_t rb = SkAlign4(width);
178 // use 64bits to detect overflow
179 int64_t storage = rb * bounds.height();
180
181 return (width <= MaskAdditiveBlitter::kMAX_WIDTH) &&
182 (storage <= MaskAdditiveBlitter::kMAX_STORAGE);
183 }
184
185 // Return a pointer where pointer[x] corresonds to the alpha of (x, y)
186 uint8_t* getRow(int y) {
187 if (y != fY) {
188 fY = y;
189 fRow = fMask.fImage + (y - fMask.fBounds.fTop) * fMask.fRowBytes - fMask.fBounds.fLeft;
190 }
191 return fRow;
192 }
193
194private:
195 // so we don't try to do very wide things, where the RLE blitter would be faster
196 static const int kMAX_WIDTH = 32;
197 static const int kMAX_STORAGE = 1024;
198
199 SkBlitter* fRealBlitter;
200 SkMask fMask;
201 SkIRect fClipRect;
202 // we add 2 because we can write 1 extra byte at either end due to precision error
203 uint32_t fStorage[(kMAX_STORAGE >> 2) + 2];
204
205 uint8_t* fRow;
206 int fY;
207};
208
209MaskAdditiveBlitter::MaskAdditiveBlitter(SkBlitter* realBlitter,
210 const SkIRect& ir,
211 const SkIRect& clipBounds,
212 bool isInverse) {
213 SkASSERT(CanHandleRect(ir));
214 SkASSERT(!isInverse);
215
216 fRealBlitter = realBlitter;
217
218 fMask.fImage = (uint8_t*)fStorage + 1; // There's 1 extra byte at either end of fStorage
219 fMask.fBounds = ir;
220 fMask.fRowBytes = ir.width();
221 fMask.fFormat = SkMask::kA8_Format;
222
223 fY = ir.fTop - 1;
224 fRow = nullptr;
225
226 fClipRect = ir;
227 if (!fClipRect.intersect(clipBounds)) {
228 SkASSERT(0);
229 fClipRect.setEmpty();
230 }
231
232 memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 2);
233}
234
235void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
236 SK_ABORT("Don't use this; directly add alphas to the mask.");
237}
238
239void MaskAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
240 SkASSERT(x >= fMask.fBounds.fLeft - 1);
241 add_alpha(&this->getRow(y)[x], alpha);
242}
243
244void MaskAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
245 SkASSERT(x >= fMask.fBounds.fLeft - 1);
246 uint8_t* row = this->getRow(y);
247 for (int i = 0; i < width; ++i) {
248 add_alpha(&row[x + i], alpha);
249 }
250}
251
252void MaskAdditiveBlitter::blitV(int x, int y, int height, SkAlpha alpha) {
253 if (alpha == 0) {
254 return;
255 }
256 SkASSERT(x >= fMask.fBounds.fLeft - 1);
257 // This must be called as if this is a real blitter.
258 // So we directly set alpha rather than adding it.
259 uint8_t* row = this->getRow(y);
260 for (int i = 0; i < height; ++i) {
261 row[x] = alpha;
262 row += fMask.fRowBytes;
263 }
264}
265
266void MaskAdditiveBlitter::blitRect(int x, int y, int width, int height) {
267 SkASSERT(x >= fMask.fBounds.fLeft - 1);
268 // This must be called as if this is a real blitter.
269 // So we directly set alpha rather than adding it.
270 uint8_t* row = this->getRow(y);
271 for (int i = 0; i < height; ++i) {
272 memset(row + x, 0xFF, width);
273 row += fMask.fRowBytes;
274 }
275}
276
277void MaskAdditiveBlitter::blitAntiRect(int x,
278 int y,
279 int width,
280 int height,
281 SkAlpha leftAlpha,
282 SkAlpha rightAlpha) {
283 blitV(x, y, height, leftAlpha);
284 blitV(x + 1 + width, y, height, rightAlpha);
285 blitRect(x + 1, y, width, height);
286}
287
288class RunBasedAdditiveBlitter : public AdditiveBlitter {
289public:
290 RunBasedAdditiveBlitter(SkBlitter* realBlitter,
291 const SkIRect& ir,
292 const SkIRect& clipBounds,
293 bool isInverse);
294
295 ~RunBasedAdditiveBlitter() override { this->flush(); }
296
297 SkBlitter* getRealBlitter(bool forceRealBlitter) override { return fRealBlitter; }
298
299 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
300 void blitAntiH(int x, int y, const SkAlpha alpha) override;
301 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
302
303 int getWidth() override { return fWidth; }
304
305 void flush_if_y_changed(SkFixed y, SkFixed nextY) override {
306 if (SkFixedFloorToInt(y) != SkFixedFloorToInt(nextY)) {
307 this->flush();
308 }
309 }
310
311protected:
312 SkBlitter* fRealBlitter;
313
314 int fCurrY; // Current y coordinate.
315 int fWidth; // Widest row of region to be blitted
316 int fLeft; // Leftmost x coordinate in any row
317 int fTop; // Initial y coordinate (top of bounds)
318
319 // The next three variables are used to track a circular buffer that
320 // contains the values used in SkAlphaRuns. These variables should only
321 // ever be updated in advanceRuns(), and fRuns should always point to
322 // a valid SkAlphaRuns...
323 int fRunsToBuffer;
324 void* fRunsBuffer;
325 int fCurrentRun;
326 SkAlphaRuns fRuns;
327
328 int fOffsetX;
329
330 bool check(int x, int width) const { return x >= 0 && x + width <= fWidth; }
331
332 // extra one to store the zero at the end
333 int getRunsSz() const { return (fWidth + 1 + (fWidth + 2) / 2) * sizeof(int16_t); }
334
335 // This function updates the fRuns variable to point to the next buffer space
336 // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
337 // and resets fRuns to point to an empty scanline.
338 void advanceRuns() {
339 const size_t kRunsSz = this->getRunsSz();
340 fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
341 fRuns.fRuns = reinterpret_cast<int16_t*>(reinterpret_cast<uint8_t*>(fRunsBuffer) +
342 fCurrentRun * kRunsSz);
343 fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
344 fRuns.reset(fWidth);
345 }
346
347 // Blitting 0xFF and 0 is much faster so we snap alphas close to them
348 SkAlpha snapAlpha(SkAlpha alpha) { return alpha > 247 ? 0xFF : alpha < 8 ? 0x00 : alpha; }
349
350 void flush() {
351 if (fCurrY >= fTop) {
352 SkASSERT(fCurrentRun < fRunsToBuffer);
353 for (int x = 0; fRuns.fRuns[x]; x += fRuns.fRuns[x]) {
354 // It seems that blitting 255 or 0 is much faster than blitting 254 or 1
355 fRuns.fAlpha[x] = snapAlpha(fRuns.fAlpha[x]);
356 }
357 if (!fRuns.empty()) {
358 // SkDEBUGCODE(fRuns.dump();)
359 fRealBlitter->blitAntiH(fLeft, fCurrY, fRuns.fAlpha, fRuns.fRuns);
360 this->advanceRuns();
361 fOffsetX = 0;
362 }
363 fCurrY = fTop - 1;
364 }
365 }
366
367 void checkY(int y) {
368 if (y != fCurrY) {
369 this->flush();
370 fCurrY = y;
371 }
372 }
373};
374
375RunBasedAdditiveBlitter::RunBasedAdditiveBlitter(SkBlitter* realBlitter,
376 const SkIRect& ir,
377 const SkIRect& clipBounds,
378 bool isInverse) {
379 fRealBlitter = realBlitter;
380
381 SkIRect sectBounds;
382 if (isInverse) {
383 // We use the clip bounds instead of the ir, since we may be asked to
384 // draw outside of the rect when we're a inverse filltype
385 sectBounds = clipBounds;
386 } else {
387 if (!sectBounds.intersect(ir, clipBounds)) {
388 sectBounds.setEmpty();
389 }
390 }
391
392 const int left = sectBounds.left();
393 const int right = sectBounds.right();
394
395 fLeft = left;
396 fWidth = right - left;
397 fTop = sectBounds.top();
398 fCurrY = fTop - 1;
399
400 fRunsToBuffer = realBlitter->requestRowsPreserved();
401 fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
402 fCurrentRun = -1;
403
404 this->advanceRuns();
405
406 fOffsetX = 0;
407}
408
409void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
410 checkY(y);
411 x -= fLeft;
412
413 if (x < 0) {
414 len += x;
415 antialias -= x;
416 x = 0;
417 }
418 len = std::min(len, fWidth - x);
419 SkASSERT(check(x, len));
420
421 if (x < fOffsetX) {
422 fOffsetX = 0;
423 }
424
425 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
426 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
427 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
428 fRuns.fRuns[x + i + j] = 1;
429 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
430 }
431 fRuns.fRuns[x + i] = 1;
432 }
433 for (int i = 0; i < len; ++i) {
434 add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
435 }
436}
437
438void RunBasedAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
439 checkY(y);
440 x -= fLeft;
441
442 if (x < fOffsetX) {
443 fOffsetX = 0;
444 }
445
446 if (this->check(x, 1)) {
447 fOffsetX = fRuns.add(x, 0, 1, 0, alpha, fOffsetX);
448 }
449}
450
451void RunBasedAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
452 checkY(y);
453 x -= fLeft;
454
455 if (x < fOffsetX) {
456 fOffsetX = 0;
457 }
458
459 if (this->check(x, width)) {
460 fOffsetX = fRuns.add(x, 0, width, 0, alpha, fOffsetX);
461 }
462}
463
464// This exists specifically for concave path filling.
465// In those cases, we can easily accumulate alpha greater than 0xFF.
466class SafeRLEAdditiveBlitter : public RunBasedAdditiveBlitter {
467public:
468 SafeRLEAdditiveBlitter(SkBlitter* realBlitter,
469 const SkIRect& ir,
470 const SkIRect& clipBounds,
471 bool isInverse)
472 : RunBasedAdditiveBlitter(realBlitter, ir, clipBounds, isInverse) {}
473
474 void blitAntiH(int x, int y, const SkAlpha antialias[], int len) override;
475 void blitAntiH(int x, int y, const SkAlpha alpha) override;
476 void blitAntiH(int x, int y, int width, const SkAlpha alpha) override;
477};
478
479void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha antialias[], int len) {
480 checkY(y);
481 x -= fLeft;
482
483 if (x < 0) {
484 len += x;
485 antialias -= x;
486 x = 0;
487 }
488 len = std::min(len, fWidth - x);
489 SkASSERT(check(x, len));
490
491 if (x < fOffsetX) {
492 fOffsetX = 0;
493 }
494
495 fOffsetX = fRuns.add(x, 0, len, 0, 0, fOffsetX); // Break the run
496 for (int i = 0; i < len; i += fRuns.fRuns[x + i]) {
497 for (int j = 1; j < fRuns.fRuns[x + i]; j++) {
498 fRuns.fRuns[x + i + j] = 1;
499 fRuns.fAlpha[x + i + j] = fRuns.fAlpha[x + i];
500 }
501 fRuns.fRuns[x + i] = 1;
502 }
503 for (int i = 0; i < len; ++i) {
504 safely_add_alpha(&fRuns.fAlpha[x + i], antialias[i]);
505 }
506}
507
508void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, const SkAlpha alpha) {
509 checkY(y);
510 x -= fLeft;
511
512 if (x < fOffsetX) {
513 fOffsetX = 0;
514 }
515
516 if (check(x, 1)) {
517 // Break the run
518 fOffsetX = fRuns.add(x, 0, 1, 0, 0, fOffsetX);
519 safely_add_alpha(&fRuns.fAlpha[x], alpha);
520 }
521}
522
523void SafeRLEAdditiveBlitter::blitAntiH(int x, int y, int width, const SkAlpha alpha) {
524 checkY(y);
525 x -= fLeft;
526
527 if (x < fOffsetX) {
528 fOffsetX = 0;
529 }
530
531 if (check(x, width)) {
532 // Break the run
533 fOffsetX = fRuns.add(x, 0, width, 0, 0, fOffsetX);
534 for (int i = x; i < x + width; i += fRuns.fRuns[i]) {
535 safely_add_alpha(&fRuns.fAlpha[i], alpha);
536 }
537 }
538}
539
540// Return the alpha of a trapezoid whose height is 1
541static SkAlpha trapezoid_to_alpha(SkFixed l1, SkFixed l2) {
542 SkASSERT(l1 >= 0 && l2 >= 0);
543 SkFixed area = (l1 + l2) / 2;
544 return SkTo<SkAlpha>(area >> 8);
545}
546
547// The alpha of right-triangle (a, a*b)
548static SkAlpha partial_triangle_to_alpha(SkFixed a, SkFixed b) {
549 SkASSERT(a <= SK_Fixed1);
550#if 0
551 // TODO(mtklein): skia:8877
552 SkASSERT(b <= SK_Fixed1);
553#endif
554
555 // Approximating...
556 // SkFixed area = SkFixedMul(a, SkFixedMul(a,b)) / 2;
557 SkFixed area = (a >> 11) * (a >> 11) * (b >> 11);
558
559#if 0
560 // TODO(mtklein): skia:8877
561 return SkTo<SkAlpha>(area >> 8);
562#else
563 return SkTo<SkAlpha>((area >> 8) & 0xFF);
564#endif
565}
566
567static SkAlpha get_partial_alpha(SkAlpha alpha, SkFixed partialHeight) {
568 return SkToU8(SkFixedRoundToInt(alpha * partialHeight));
569}
570
571static SkAlpha get_partial_alpha(SkAlpha alpha, SkAlpha fullAlpha) {
572 return (alpha * fullAlpha) >> 8;
573}
574
575// For SkFixed that's close to SK_Fixed1, we can't convert it to alpha by just shifting right.
576// For example, when f = SK_Fixed1, right shifting 8 will get 256, but we need 255.
577// This is rarely the problem so we'll only use this for blitting rectangles.
578static SkAlpha fixed_to_alpha(SkFixed f) {
579 SkASSERT(f <= SK_Fixed1);
580 return get_partial_alpha(0xFF, f);
581}
582
583// Suppose that line (l1, y)-(r1, y+1) intersects with (l2, y)-(r2, y+1),
584// approximate (very coarsely) the x coordinate of the intersection.
585static SkFixed approximate_intersection(SkFixed l1, SkFixed r1, SkFixed l2, SkFixed r2) {
586 if (l1 > r1) {
587 std::swap(l1, r1);
588 }
589 if (l2 > r2) {
590 std::swap(l2, r2);
591 }
592 return (std::max(l1, l2) + std::min(r1, r2)) / 2;
593}
594
595// Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
596static void compute_alpha_above_line(SkAlpha* alphas,
597 SkFixed l,
598 SkFixed r,
599 SkFixed dY,
600 SkAlpha fullAlpha) {
601 SkASSERT(l <= r);
602 SkASSERT(l >> 16 == 0);
603 int R = SkFixedCeilToInt(r);
604 if (R == 0) {
605 return;
606 } else if (R == 1) {
607 alphas[0] = get_partial_alpha(((R << 17) - l - r) >> 9, fullAlpha);
608 } else {
609 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
610 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
611 SkFixed firstH = SkFixedMul(first, dY); // vertical edge of the left-most triangle
612 alphas[0] = SkFixedMul(first, firstH) >> 9; // triangle alpha
613 SkFixed alpha16 = firstH + (dY >> 1); // rectangle plus triangle
614 for (int i = 1; i < R - 1; ++i) {
615 alphas[i] = alpha16 >> 8;
616 alpha16 += dY;
617 }
618 alphas[R - 1] = fullAlpha - partial_triangle_to_alpha(last, dY);
619 }
620}
621
622// Here we always send in l < SK_Fixed1, and the first alpha we want to compute is alphas[0]
623static void compute_alpha_below_line(SkAlpha* alphas,
624 SkFixed l,
625 SkFixed r,
626 SkFixed dY,
627 SkAlpha fullAlpha) {
628 SkASSERT(l <= r);
629 SkASSERT(l >> 16 == 0);
630 int R = SkFixedCeilToInt(r);
631 if (R == 0) {
632 return;
633 } else if (R == 1) {
634 alphas[0] = get_partial_alpha(trapezoid_to_alpha(l, r), fullAlpha);
635 } else {
636 SkFixed first = SK_Fixed1 - l; // horizontal edge length of the left-most triangle
637 SkFixed last = r - ((R - 1) << 16); // horizontal edge length of the right-most triangle
638 SkFixed lastH = SkFixedMul(last, dY); // vertical edge of the right-most triangle
639 alphas[R - 1] = SkFixedMul(last, lastH) >> 9; // triangle alpha
640 SkFixed alpha16 = lastH + (dY >> 1); // rectangle plus triangle
641 for (int i = R - 2; i > 0; i--) {
642 alphas[i] = (alpha16 >> 8) & 0xFF;
643 alpha16 += dY;
644 }
645 alphas[0] = fullAlpha - partial_triangle_to_alpha(first, dY);
646 }
647}
648
649// Note that if fullAlpha != 0xFF, we'll multiply alpha by fullAlpha
650static SK_ALWAYS_INLINE void blit_single_alpha(AdditiveBlitter* blitter,
651 int y,
652 int x,
653 SkAlpha alpha,
654 SkAlpha fullAlpha,
655 SkAlpha* maskRow,
656 bool isUsingMask,
657 bool noRealBlitter,
658 bool needSafeCheck) {
659 if (isUsingMask) {
660 if (fullAlpha == 0xFF && !noRealBlitter) { // noRealBlitter is needed for concave paths
661 maskRow[x] = alpha;
662 } else if (needSafeCheck) {
663 safely_add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
664 } else {
665 add_alpha(&maskRow[x], get_partial_alpha(alpha, fullAlpha));
666 }
667 } else {
668 if (fullAlpha == 0xFF && !noRealBlitter) {
669 blitter->getRealBlitter()->blitV(x, y, 1, alpha);
670 } else {
671 blitter->blitAntiH(x, y, get_partial_alpha(alpha, fullAlpha));
672 }
673 }
674}
675
676static SK_ALWAYS_INLINE void blit_two_alphas(AdditiveBlitter* blitter,
677 int y,
678 int x,
679 SkAlpha a1,
680 SkAlpha a2,
681 SkAlpha fullAlpha,
682 SkAlpha* maskRow,
683 bool isUsingMask,
684 bool noRealBlitter,
685 bool needSafeCheck) {
686 if (isUsingMask) {
687 if (needSafeCheck) {
688 safely_add_alpha(&maskRow[x], a1);
689 safely_add_alpha(&maskRow[x + 1], a2);
690 } else {
691 add_alpha(&maskRow[x], a1);
692 add_alpha(&maskRow[x + 1], a2);
693 }
694 } else {
695 if (fullAlpha == 0xFF && !noRealBlitter) {
696 blitter->getRealBlitter()->blitAntiH2(x, y, a1, a2);
697 } else {
698 blitter->blitAntiH(x, y, a1);
699 blitter->blitAntiH(x + 1, y, a2);
700 }
701 }
702}
703
704static SK_ALWAYS_INLINE void blit_full_alpha(AdditiveBlitter* blitter,
705 int y,
706 int x,
707 int len,
708 SkAlpha fullAlpha,
709 SkAlpha* maskRow,
710 bool isUsingMask,
711 bool noRealBlitter,
712 bool needSafeCheck) {
713 if (isUsingMask) {
714 for (int i = 0; i < len; ++i) {
715 if (needSafeCheck) {
716 safely_add_alpha(&maskRow[x + i], fullAlpha);
717 } else {
718 add_alpha(&maskRow[x + i], fullAlpha);
719 }
720 }
721 } else {
722 if (fullAlpha == 0xFF && !noRealBlitter) {
723 blitter->getRealBlitter()->blitH(x, y, len);
724 } else {
725 blitter->blitAntiH(x, y, len, fullAlpha);
726 }
727 }
728}
729
730static void blit_aaa_trapezoid_row(AdditiveBlitter* blitter,
731 int y,
732 SkFixed ul,
733 SkFixed ur,
734 SkFixed ll,
735 SkFixed lr,
736 SkFixed lDY,
737 SkFixed rDY,
738 SkAlpha fullAlpha,
739 SkAlpha* maskRow,
740 bool isUsingMask,
741 bool noRealBlitter,
742 bool needSafeCheck) {
743 int L = SkFixedFloorToInt(ul), R = SkFixedCeilToInt(lr);
744 int len = R - L;
745
746 if (len == 1) {
747 SkAlpha alpha = trapezoid_to_alpha(ur - ul, lr - ll);
748 blit_single_alpha(blitter,
749 y,
750 L,
751 alpha,
752 fullAlpha,
753 maskRow,
754 isUsingMask,
755 noRealBlitter,
756 needSafeCheck);
757 return;
758 }
759
760 const int kQuickLen = 31;
761 char quickMemory[(sizeof(SkAlpha) * 2 + sizeof(int16_t)) * (kQuickLen + 1)];
762 SkAlpha* alphas;
763
764 if (len <= kQuickLen) {
765 alphas = (SkAlpha*)quickMemory;
766 } else {
767 alphas = new SkAlpha[(len + 1) * (sizeof(SkAlpha) * 2 + sizeof(int16_t))];
768 }
769
770 SkAlpha* tempAlphas = alphas + len + 1;
771 int16_t* runs = (int16_t*)(alphas + (len + 1) * 2);
772
773 for (int i = 0; i < len; ++i) {
774 runs[i] = 1;
775 alphas[i] = fullAlpha;
776 }
777 runs[len] = 0;
778
779 int uL = SkFixedFloorToInt(ul);
780 int lL = SkFixedCeilToInt(ll);
781 if (uL + 2 == lL) { // We only need to compute two triangles, accelerate this special case
782 SkFixed first = SkIntToFixed(uL) + SK_Fixed1 - ul;
783 SkFixed second = ll - ul - first;
784 SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, lDY);
785 SkAlpha a2 = partial_triangle_to_alpha(second, lDY);
786 alphas[0] = alphas[0] > a1 ? alphas[0] - a1 : 0;
787 alphas[1] = alphas[1] > a2 ? alphas[1] - a2 : 0;
788 } else {
789 compute_alpha_below_line(
790 tempAlphas + uL - L, ul - SkIntToFixed(uL), ll - SkIntToFixed(uL), lDY, fullAlpha);
791 for (int i = uL; i < lL; ++i) {
792 if (alphas[i - L] > tempAlphas[i - L]) {
793 alphas[i - L] -= tempAlphas[i - L];
794 } else {
795 alphas[i - L] = 0;
796 }
797 }
798 }
799
800 int uR = SkFixedFloorToInt(ur);
801 int lR = SkFixedCeilToInt(lr);
802 if (uR + 2 == lR) { // We only need to compute two triangles, accelerate this special case
803 SkFixed first = SkIntToFixed(uR) + SK_Fixed1 - ur;
804 SkFixed second = lr - ur - first;
805 SkAlpha a1 = partial_triangle_to_alpha(first, rDY);
806 SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, rDY);
807 alphas[len - 2] = alphas[len - 2] > a1 ? alphas[len - 2] - a1 : 0;
808 alphas[len - 1] = alphas[len - 1] > a2 ? alphas[len - 1] - a2 : 0;
809 } else {
810 compute_alpha_above_line(
811 tempAlphas + uR - L, ur - SkIntToFixed(uR), lr - SkIntToFixed(uR), rDY, fullAlpha);
812 for (int i = uR; i < lR; ++i) {
813 if (alphas[i - L] > tempAlphas[i - L]) {
814 alphas[i - L] -= tempAlphas[i - L];
815 } else {
816 alphas[i - L] = 0;
817 }
818 }
819 }
820
821 if (isUsingMask) {
822 for (int i = 0; i < len; ++i) {
823 if (needSafeCheck) {
824 safely_add_alpha(&maskRow[L + i], alphas[i]);
825 } else {
826 add_alpha(&maskRow[L + i], alphas[i]);
827 }
828 }
829 } else {
830 if (fullAlpha == 0xFF && !noRealBlitter) {
831 // Real blitter is faster than RunBasedAdditiveBlitter
832 blitter->getRealBlitter()->blitAntiH(L, y, alphas, runs);
833 } else {
834 blitter->blitAntiH(L, y, alphas, len);
835 }
836 }
837
838 if (len > kQuickLen) {
839 delete[] alphas;
840 }
841}
842
843static SK_ALWAYS_INLINE void blit_trapezoid_row(AdditiveBlitter* blitter,
844 int y,
845 SkFixed ul,
846 SkFixed ur,
847 SkFixed ll,
848 SkFixed lr,
849 SkFixed lDY,
850 SkFixed rDY,
851 SkAlpha fullAlpha,
852 SkAlpha* maskRow,
853 bool isUsingMask,
854 bool noRealBlitter = false,
855 bool needSafeCheck = false) {
856 SkASSERT(lDY >= 0 && rDY >= 0); // We should only send in the absolte value
857
858 if (ul > ur) {
859 return;
860 }
861
862 // Edge crosses. Approximate it. This should only happend due to precision limit,
863 // so the approximation could be very coarse.
864 if (ll > lr) {
865 ll = lr = approximate_intersection(ul, ll, ur, lr);
866 }
867
868 if (ul == ur && ll == lr) {
869 return; // empty trapzoid
870 }
871
872 // We're going to use the left line ul-ll and the rite line ur-lr
873 // to exclude the area that's not covered by the path.
874 // Swapping (ul, ll) or (ur, lr) won't affect that exclusion
875 // so we'll do that for simplicity.
876 if (ul > ll) {
877 std::swap(ul, ll);
878 }
879 if (ur > lr) {
880 std::swap(ur, lr);
881 }
882
883 SkFixed joinLeft = SkFixedCeilToFixed(ll);
884 SkFixed joinRite = SkFixedFloorToFixed(ur);
885 if (joinLeft <= joinRite) { // There's a rect from joinLeft to joinRite that we can blit
886 if (ul < joinLeft) {
887 int len = SkFixedCeilToInt(joinLeft - ul);
888 if (len == 1) {
889 SkAlpha alpha = trapezoid_to_alpha(joinLeft - ul, joinLeft - ll);
890 blit_single_alpha(blitter,
891 y,
892 ul >> 16,
893 alpha,
894 fullAlpha,
895 maskRow,
896 isUsingMask,
897 noRealBlitter,
898 needSafeCheck);
899 } else if (len == 2) {
900 SkFixed first = joinLeft - SK_Fixed1 - ul;
901 SkFixed second = ll - ul - first;
902 SkAlpha a1 = partial_triangle_to_alpha(first, lDY);
903 SkAlpha a2 = fullAlpha - partial_triangle_to_alpha(second, lDY);
904 blit_two_alphas(blitter,
905 y,
906 ul >> 16,
907 a1,
908 a2,
909 fullAlpha,
910 maskRow,
911 isUsingMask,
912 noRealBlitter,
913 needSafeCheck);
914 } else {
915 blit_aaa_trapezoid_row(blitter,
916 y,
917 ul,
918 joinLeft,
919 ll,
920 joinLeft,
921 lDY,
922 SK_MaxS32,
923 fullAlpha,
924 maskRow,
925 isUsingMask,
926 noRealBlitter,
927 needSafeCheck);
928 }
929 }
930 // SkAAClip requires that we blit from left to right.
931 // Hence we must blit [ul, joinLeft] before blitting [joinLeft, joinRite]
932 if (joinLeft < joinRite) {
933 blit_full_alpha(blitter,
934 y,
935 SkFixedFloorToInt(joinLeft),
936 SkFixedFloorToInt(joinRite - joinLeft),
937 fullAlpha,
938 maskRow,
939 isUsingMask,
940 noRealBlitter,
941 needSafeCheck);
942 }
943 if (lr > joinRite) {
944 int len = SkFixedCeilToInt(lr - joinRite);
945 if (len == 1) {
946 SkAlpha alpha = trapezoid_to_alpha(ur - joinRite, lr - joinRite);
947 blit_single_alpha(blitter,
948 y,
949 joinRite >> 16,
950 alpha,
951 fullAlpha,
952 maskRow,
953 isUsingMask,
954 noRealBlitter,
955 needSafeCheck);
956 } else if (len == 2) {
957 SkFixed first = joinRite + SK_Fixed1 - ur;
958 SkFixed second = lr - ur - first;
959 SkAlpha a1 = fullAlpha - partial_triangle_to_alpha(first, rDY);
960 SkAlpha a2 = partial_triangle_to_alpha(second, rDY);
961 blit_two_alphas(blitter,
962 y,
963 joinRite >> 16,
964 a1,
965 a2,
966 fullAlpha,
967 maskRow,
968 isUsingMask,
969 noRealBlitter,
970 needSafeCheck);
971 } else {
972 blit_aaa_trapezoid_row(blitter,
973 y,
974 joinRite,
975 ur,
976 joinRite,
977 lr,
978 SK_MaxS32,
979 rDY,
980 fullAlpha,
981 maskRow,
982 isUsingMask,
983 noRealBlitter,
984 needSafeCheck);
985 }
986 }
987 } else {
988 blit_aaa_trapezoid_row(blitter,
989 y,
990 ul,
991 ur,
992 ll,
993 lr,
994 lDY,
995 rDY,
996 fullAlpha,
997 maskRow,
998 isUsingMask,
999 noRealBlitter,
1000 needSafeCheck);
1001 }
1002}
1003
1004static bool operator<(const SkAnalyticEdge& a, const SkAnalyticEdge& b) {
1005 int valuea = a.fUpperY;
1006 int valueb = b.fUpperY;
1007
1008 if (valuea == valueb) {
1009 valuea = a.fX;
1010 valueb = b.fX;
1011 }
1012
1013 if (valuea == valueb) {
1014 valuea = a.fDX;
1015 valueb = b.fDX;
1016 }
1017
1018 return valuea < valueb;
1019}
1020
1021static SkAnalyticEdge* sort_edges(SkAnalyticEdge* list[], int count, SkAnalyticEdge** last) {
1022 SkTQSort(list, list + count - 1);
1023
1024 // now make the edges linked in sorted order
1025 for (int i = 1; i < count; ++i) {
1026 list[i - 1]->fNext = list[i];
1027 list[i]->fPrev = list[i - 1];
1028 }
1029
1030 *last = list[count - 1];
1031 return list[0];
1032}
1033
1034static void validate_sort(const SkAnalyticEdge* edge) {
1035#ifdef SK_DEBUG
1036 SkFixed y = SkIntToFixed(-32768);
1037
1038 while (edge->fUpperY != SK_MaxS32) {
1039 edge->validate();
1040 SkASSERT(y <= edge->fUpperY);
1041
1042 y = edge->fUpperY;
1043 edge = (SkAnalyticEdge*)edge->fNext;
1044 }
1045#endif
1046}
1047
1048// For an edge, we consider it smooth if the Dx doesn't change much, and Dy is large enough
1049// For curves that are updating, the Dx is not changing much if fQDx/fCDx and fQDy/fCDy are
1050// relatively large compared to fQDDx/QCDDx and fQDDy/fCDDy
1051static bool is_smooth_enough(SkAnalyticEdge* thisEdge, SkAnalyticEdge* nextEdge, int stop_y) {
1052 if (thisEdge->fCurveCount < 0) {
1053 const SkCubicEdge& cEdge = static_cast<SkAnalyticCubicEdge*>(thisEdge)->fCEdge;
1054 int ddshift = cEdge.fCurveShift;
1055 return SkAbs32(cEdge.fCDx) >> 1 >= SkAbs32(cEdge.fCDDx) >> ddshift &&
1056 SkAbs32(cEdge.fCDy) >> 1 >= SkAbs32(cEdge.fCDDy) >> ddshift &&
1057 // current Dy is (fCDy - (fCDDy >> ddshift)) >> dshift
1058 (cEdge.fCDy - (cEdge.fCDDy >> ddshift)) >> cEdge.fCubicDShift >= SK_Fixed1;
1059 } else if (thisEdge->fCurveCount > 0) {
1060 const SkQuadraticEdge& qEdge = static_cast<SkAnalyticQuadraticEdge*>(thisEdge)->fQEdge;
1061 return SkAbs32(qEdge.fQDx) >> 1 >= SkAbs32(qEdge.fQDDx) &&
1062 SkAbs32(qEdge.fQDy) >> 1 >= SkAbs32(qEdge.fQDDy) &&
1063 // current Dy is (fQDy - fQDDy) >> shift
1064 (qEdge.fQDy - qEdge.fQDDy) >> qEdge.fCurveShift >= SK_Fixed1;
1065 }
1066 return SkAbs32(nextEdge->fDX - thisEdge->fDX) <= SK_Fixed1 && // DDx should be small
1067 nextEdge->fLowerY - nextEdge->fUpperY >= SK_Fixed1; // Dy should be large
1068}
1069
1070// Check if the leftE and riteE are changing smoothly in terms of fDX.
1071// If yes, we can later skip the fractional y and directly jump to integer y.
1072static bool is_smooth_enough(SkAnalyticEdge* leftE,
1073 SkAnalyticEdge* riteE,
1074 SkAnalyticEdge* currE,
1075 int stop_y) {
1076 if (currE->fUpperY >= SkLeftShift(stop_y, 16)) {
1077 return false; // We're at the end so we won't skip anything
1078 }
1079 if (leftE->fLowerY + SK_Fixed1 < riteE->fLowerY) {
1080 return is_smooth_enough(leftE, currE, stop_y); // Only leftE is changing
1081 } else if (leftE->fLowerY > riteE->fLowerY + SK_Fixed1) {
1082 return is_smooth_enough(riteE, currE, stop_y); // Only riteE is changing
1083 }
1084
1085 // Now both edges are changing, find the second next edge
1086 SkAnalyticEdge* nextCurrE = currE->fNext;
1087 if (nextCurrE->fUpperY >= stop_y << 16) { // Check if we're at the end
1088 return false;
1089 }
1090 // Ensure that currE is the next left edge and nextCurrE is the next right edge. Swap if not.
1091 if (nextCurrE->fUpperX < currE->fUpperX) {
1092 std::swap(currE, nextCurrE);
1093 }
1094 return is_smooth_enough(leftE, currE, stop_y) && is_smooth_enough(riteE, nextCurrE, stop_y);
1095}
1096
1097static void aaa_walk_convex_edges(SkAnalyticEdge* prevHead,
1098 AdditiveBlitter* blitter,
1099 int start_y,
1100 int stop_y,
1101 SkFixed leftBound,
1102 SkFixed riteBound,
1103 bool isUsingMask) {
1104 validate_sort((SkAnalyticEdge*)prevHead->fNext);
1105
1106 SkAnalyticEdge* leftE = (SkAnalyticEdge*)prevHead->fNext;
1107 SkAnalyticEdge* riteE = (SkAnalyticEdge*)leftE->fNext;
1108 SkAnalyticEdge* currE = (SkAnalyticEdge*)riteE->fNext;
1109
1110 SkFixed y = std::max(leftE->fUpperY, riteE->fUpperY);
1111
1112 for (;;) {
1113 // We have to check fLowerY first because some edges might be alone (e.g., there's only
1114 // a left edge but no right edge in a given y scan line) due to precision limit.
1115 while (leftE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1116 if (!leftE->update(y)) {
1117 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1118 goto END_WALK;
1119 }
1120 leftE = currE;
1121 currE = (SkAnalyticEdge*)currE->fNext;
1122 }
1123 }
1124 while (riteE->fLowerY <= y) { // Due to smooth jump, we may pass multiple short edges
1125 if (!riteE->update(y)) {
1126 if (SkFixedFloorToInt(currE->fUpperY) >= stop_y) {
1127 goto END_WALK;
1128 }
1129 riteE = currE;
1130 currE = (SkAnalyticEdge*)currE->fNext;
1131 }
1132 }
1133
1134 SkASSERT(leftE);
1135 SkASSERT(riteE);
1136
1137 // check our bottom clip
1138 if (SkFixedFloorToInt(y) >= stop_y) {
1139 break;
1140 }
1141
1142 SkASSERT(SkFixedFloorToInt(leftE->fUpperY) <= stop_y);
1143 SkASSERT(SkFixedFloorToInt(riteE->fUpperY) <= stop_y);
1144
1145 leftE->goY(y);
1146 riteE->goY(y);
1147
1148 if (leftE->fX > riteE->fX || (leftE->fX == riteE->fX && leftE->fDX > riteE->fDX)) {
1149 std::swap(leftE, riteE);
1150 }
1151
1152 SkFixed local_bot_fixed = std::min(leftE->fLowerY, riteE->fLowerY);
1153 if (is_smooth_enough(leftE, riteE, currE, stop_y)) {
1154 local_bot_fixed = SkFixedCeilToFixed(local_bot_fixed);
1155 }
1156 local_bot_fixed = std::min(local_bot_fixed, SkIntToFixed(stop_y));
1157
1158 SkFixed left = std::max(leftBound, leftE->fX);
1159 SkFixed dLeft = leftE->fDX;
1160 SkFixed rite = std::min(riteBound, riteE->fX);
1161 SkFixed dRite = riteE->fDX;
1162 if (0 == (dLeft | dRite)) {
1163 int fullLeft = SkFixedCeilToInt(left);
1164 int fullRite = SkFixedFloorToInt(rite);
1165 SkFixed partialLeft = SkIntToFixed(fullLeft) - left;
1166 SkFixed partialRite = rite - SkIntToFixed(fullRite);
1167 int fullTop = SkFixedCeilToInt(y);
1168 int fullBot = SkFixedFloorToInt(local_bot_fixed);
1169 SkFixed partialTop = SkIntToFixed(fullTop) - y;
1170 SkFixed partialBot = local_bot_fixed - SkIntToFixed(fullBot);
1171 if (fullTop > fullBot) { // The rectangle is within one pixel height...
1172 partialTop -= (SK_Fixed1 - partialBot);
1173 partialBot = 0;
1174 }
1175
1176 if (fullRite >= fullLeft) {
1177 if (partialTop > 0) { // blit first partial row
1178 if (partialLeft > 0) {
1179 blitter->blitAntiH(fullLeft - 1,
1180 fullTop - 1,
1181 fixed_to_alpha(SkFixedMul(partialTop, partialLeft)));
1182 }
1183 blitter->blitAntiH(
1184 fullLeft, fullTop - 1, fullRite - fullLeft, fixed_to_alpha(partialTop));
1185 if (partialRite > 0) {
1186 blitter->blitAntiH(fullRite,
1187 fullTop - 1,
1188 fixed_to_alpha(SkFixedMul(partialTop, partialRite)));
1189 }
1190 blitter->flush_if_y_changed(y, y + partialTop);
1191 }
1192
1193 // Blit all full-height rows from fullTop to fullBot
1194 if (fullBot > fullTop &&
1195 // SkAAClip cannot handle the empty rect so check the non-emptiness here
1196 // (bug chromium:662800)
1197 (fullRite > fullLeft || fixed_to_alpha(partialLeft) > 0 ||
1198 fixed_to_alpha(partialRite) > 0)) {
1199 blitter->getRealBlitter()->blitAntiRect(fullLeft - 1,
1200 fullTop,
1201 fullRite - fullLeft,
1202 fullBot - fullTop,
1203 fixed_to_alpha(partialLeft),
1204 fixed_to_alpha(partialRite));
1205 }
1206
1207 if (partialBot > 0) { // blit last partial row
1208 if (partialLeft > 0) {
1209 blitter->blitAntiH(fullLeft - 1,
1210 fullBot,
1211 fixed_to_alpha(SkFixedMul(partialBot, partialLeft)));
1212 }
1213 blitter->blitAntiH(
1214 fullLeft, fullBot, fullRite - fullLeft, fixed_to_alpha(partialBot));
1215 if (partialRite > 0) {
1216 blitter->blitAntiH(fullRite,
1217 fullBot,
1218 fixed_to_alpha(SkFixedMul(partialBot, partialRite)));
1219 }
1220 }
1221 } else { // left and rite are within the same pixel
1222 if (partialTop > 0) {
1223 blitter->blitAntiH(fullLeft - 1,
1224 fullTop - 1,
1225 1,
1226 fixed_to_alpha(SkFixedMul(partialTop, rite - left)));
1227 blitter->flush_if_y_changed(y, y + partialTop);
1228 }
1229 if (fullBot > fullTop) {
1230 blitter->getRealBlitter()->blitV(
1231 fullLeft - 1, fullTop, fullBot - fullTop, fixed_to_alpha(rite - left));
1232 }
1233 if (partialBot > 0) {
1234 blitter->blitAntiH(fullLeft - 1,
1235 fullBot,
1236 1,
1237 fixed_to_alpha(SkFixedMul(partialBot, rite - left)));
1238 }
1239 }
1240
1241 y = local_bot_fixed;
1242 } else {
1243 // The following constant are used to snap X
1244 // We snap X mainly for speedup (no tiny triangle) and
1245 // avoiding edge cases caused by precision errors
1246 const SkFixed kSnapDigit = SK_Fixed1 >> 4;
1247 const SkFixed kSnapHalf = kSnapDigit >> 1;
1248 const SkFixed kSnapMask = (-1 ^ (kSnapDigit - 1));
1249 left += kSnapHalf;
1250 rite += kSnapHalf; // For fast rounding
1251
1252 // Number of blit_trapezoid_row calls we'll have
1253 int count = SkFixedCeilToInt(local_bot_fixed) - SkFixedFloorToInt(y);
1254
1255 // If we're using mask blitter, we advance the mask row in this function
1256 // to save some "if" condition checks.
1257 SkAlpha* maskRow = nullptr;
1258 if (isUsingMask) {
1259 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1260 }
1261
1262 // Instead of writing one loop that handles both partial-row blit_trapezoid_row
1263 // and full-row trapezoid_row together, we use the following 3-stage flow to
1264 // handle partial-row blit and full-row blit separately. It will save us much time
1265 // on changing y, left, and rite.
1266 if (count > 1) {
1267 if ((int)(y & 0xFFFF0000) != y) { // There's a partial-row on the top
1268 count--;
1269 SkFixed nextY = SkFixedCeilToFixed(y + 1);
1270 SkFixed dY = nextY - y;
1271 SkFixed nextLeft = left + SkFixedMul(dLeft, dY);
1272 SkFixed nextRite = rite + SkFixedMul(dRite, dY);
1273 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1274 (nextLeft & kSnapMask) >= leftBound &&
1275 (nextRite & kSnapMask) <= riteBound);
1276 blit_trapezoid_row(blitter,
1277 y >> 16,
1278 left & kSnapMask,
1279 rite & kSnapMask,
1280 nextLeft & kSnapMask,
1281 nextRite & kSnapMask,
1282 leftE->fDY,
1283 riteE->fDY,
1284 get_partial_alpha(0xFF, dY),
1285 maskRow,
1286 isUsingMask);
1287 blitter->flush_if_y_changed(y, nextY);
1288 left = nextLeft;
1289 rite = nextRite;
1290 y = nextY;
1291 }
1292
1293 while (count > 1) { // Full rows in the middle
1294 count--;
1295 if (isUsingMask) {
1296 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1297 }
1298 SkFixed nextY = y + SK_Fixed1, nextLeft = left + dLeft, nextRite = rite + dRite;
1299 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1300 (nextLeft & kSnapMask) >= leftBound &&
1301 (nextRite & kSnapMask) <= riteBound);
1302 blit_trapezoid_row(blitter,
1303 y >> 16,
1304 left & kSnapMask,
1305 rite & kSnapMask,
1306 nextLeft & kSnapMask,
1307 nextRite & kSnapMask,
1308 leftE->fDY,
1309 riteE->fDY,
1310 0xFF,
1311 maskRow,
1312 isUsingMask);
1313 blitter->flush_if_y_changed(y, nextY);
1314 left = nextLeft;
1315 rite = nextRite;
1316 y = nextY;
1317 }
1318 }
1319
1320 if (isUsingMask) {
1321 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(y >> 16);
1322 }
1323
1324 SkFixed dY = local_bot_fixed - y; // partial-row on the bottom
1325 SkASSERT(dY <= SK_Fixed1);
1326 // Smooth jumping to integer y may make the last nextLeft/nextRite out of bound.
1327 // Take them back into the bound here.
1328 // Note that we substract kSnapHalf later so we have to add them to leftBound/riteBound
1329 SkFixed nextLeft = std::max(left + SkFixedMul(dLeft, dY), leftBound + kSnapHalf);
1330 SkFixed nextRite = std::min(rite + SkFixedMul(dRite, dY), riteBound + kSnapHalf);
1331 SkASSERT((left & kSnapMask) >= leftBound && (rite & kSnapMask) <= riteBound &&
1332 (nextLeft & kSnapMask) >= leftBound && (nextRite & kSnapMask) <= riteBound);
1333 blit_trapezoid_row(blitter,
1334 y >> 16,
1335 left & kSnapMask,
1336 rite & kSnapMask,
1337 nextLeft & kSnapMask,
1338 nextRite & kSnapMask,
1339 leftE->fDY,
1340 riteE->fDY,
1341 get_partial_alpha(0xFF, dY),
1342 maskRow,
1343 isUsingMask);
1344 blitter->flush_if_y_changed(y, local_bot_fixed);
1345 left = nextLeft;
1346 rite = nextRite;
1347 y = local_bot_fixed;
1348 left -= kSnapHalf;
1349 rite -= kSnapHalf;
1350 }
1351
1352 leftE->fX = left;
1353 riteE->fX = rite;
1354 leftE->fY = riteE->fY = y;
1355 }
1356
1357END_WALK:;
1358}
1359
1360static void update_next_next_y(SkFixed y, SkFixed nextY, SkFixed* nextNextY) {
1361 *nextNextY = y > nextY && y < *nextNextY ? y : *nextNextY;
1362}
1363
1364static void check_intersection(const SkAnalyticEdge* edge, SkFixed nextY, SkFixed* nextNextY) {
1365 if (edge->fPrev->fPrev && edge->fPrev->fX + edge->fPrev->fDX > edge->fX + edge->fDX) {
1366 *nextNextY = nextY + (SK_Fixed1 >> SkAnalyticEdge::kDefaultAccuracy);
1367 }
1368}
1369
1370static void insert_new_edges(SkAnalyticEdge* newEdge, SkFixed y, SkFixed* nextNextY) {
1371 if (newEdge->fUpperY > y) {
1372 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1373 return;
1374 }
1375 SkAnalyticEdge* prev = newEdge->fPrev;
1376 if (prev->fX <= newEdge->fX) {
1377 while (newEdge->fUpperY <= y) {
1378 check_intersection(newEdge, y, nextNextY);
1379 update_next_next_y(newEdge->fLowerY, y, nextNextY);
1380 newEdge = newEdge->fNext;
1381 }
1382 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1383 return;
1384 }
1385 // find first x pos to insert
1386 SkAnalyticEdge* start = backward_insert_start(prev, newEdge->fX);
1387 // insert the lot, fixing up the links as we go
1388 do {
1389 SkAnalyticEdge* next = newEdge->fNext;
1390 do {
1391 if (start->fNext == newEdge) {
1392 goto nextEdge;
1393 }
1394 SkAnalyticEdge* after = start->fNext;
1395 if (after->fX >= newEdge->fX) {
1396 break;
1397 }
1398 SkASSERT(start != after);
1399 start = after;
1400 } while (true);
1401 remove_edge(newEdge);
1402 insert_edge_after(newEdge, start);
1403 nextEdge:
1404 check_intersection(newEdge, y, nextNextY);
1405 update_next_next_y(newEdge->fLowerY, y, nextNextY);
1406 start = newEdge;
1407 newEdge = next;
1408 } while (newEdge->fUpperY <= y);
1409 update_next_next_y(newEdge->fUpperY, y, nextNextY);
1410}
1411
1412static void validate_edges_for_y(const SkAnalyticEdge* edge, SkFixed y) {
1413#ifdef SK_DEBUG
1414 while (edge->fUpperY <= y) {
1415 SkASSERT(edge->fPrev && edge->fNext);
1416 SkASSERT(edge->fPrev->fNext == edge);
1417 SkASSERT(edge->fNext->fPrev == edge);
1418 SkASSERT(edge->fUpperY <= edge->fLowerY);
1419 SkASSERT(edge->fPrev->fPrev == nullptr || edge->fPrev->fX <= edge->fX);
1420 edge = edge->fNext;
1421 }
1422#endif
1423}
1424
1425// Return true if prev->fX, next->fX are too close in the current pixel row.
1426static bool edges_too_close(SkAnalyticEdge* prev, SkAnalyticEdge* next, SkFixed lowerY) {
1427 // When next->fDX == 0, prev->fX >= next->fX - SkAbs32(next->fDX) would be false
1428 // even if prev->fX and next->fX are close and within one pixel (e.g., prev->fX == 0.1,
1429 // next->fX == 0.9). Adding SLACK = 1 to the formula would guarantee it to be true if two
1430 // edges prev and next are within one pixel.
1431 constexpr SkFixed SLACK = SK_Fixed1;
1432
1433 // Note that even if the following test failed, the edges might still be very close to each
1434 // other at some point within the current pixel row because of prev->fDX and next->fDX.
1435 // However, to handle that case, we have to sacrafice more performance.
1436 // I think the current quality is good enough (mainly by looking at Nebraska-StateSeal.svg)
1437 // so I'll ignore fDX for performance tradeoff.
1438 return next && prev && next->fUpperY < lowerY &&
1439 prev->fX + SLACK >= next->fX - SkAbs32(next->fDX);
1440 // The following is more accurate but also slower.
1441 // return (prev && prev->fPrev && next && next->fNext != nullptr && next->fUpperY < lowerY &&
1442 // prev->fX + SkAbs32(prev->fDX) + SLACK >= next->fX - SkAbs32(next->fDX));
1443}
1444
1445// This function exists for the case where the previous rite edge is removed because
1446// its fLowerY <= nextY
1447static bool edges_too_close(int prevRite, SkFixed ul, SkFixed ll) {
1448 return prevRite > SkFixedFloorToInt(ul) || prevRite > SkFixedFloorToInt(ll);
1449}
1450
1451static void blit_saved_trapezoid(SkAnalyticEdge* leftE,
1452 SkFixed lowerY,
1453 SkFixed lowerLeft,
1454 SkFixed lowerRite,
1455 AdditiveBlitter* blitter,
1456 SkAlpha* maskRow,
1457 bool isUsingMask,
1458 bool noRealBlitter,
1459 SkFixed leftClip,
1460 SkFixed rightClip) {
1461 SkAnalyticEdge* riteE = leftE->fRiteE;
1462 SkASSERT(riteE);
1463 SkASSERT(riteE->fNext == nullptr || leftE->fSavedY == riteE->fSavedY);
1464 SkASSERT(SkFixedFloorToInt(lowerY - 1) == SkFixedFloorToInt(leftE->fSavedY));
1465 int y = SkFixedFloorToInt(leftE->fSavedY);
1466 // Instead of using fixed_to_alpha(lowerY - leftE->fSavedY), we use the following fullAlpha
1467 // to elimiate cumulative error: if there are many fractional y scan lines within the
1468 // same row, the former may accumulate the rounding error while the later won't.
1469 SkAlpha fullAlpha = fixed_to_alpha(lowerY - SkIntToFixed(y)) -
1470 fixed_to_alpha(leftE->fSavedY - SkIntToFixed(y));
1471 // We need fSavedDY because the (quad or cubic) edge might be updated
1472 blit_trapezoid_row(
1473 blitter,
1474 y,
1475 std::max(leftE->fSavedX, leftClip),
1476 std::min(riteE->fSavedX, rightClip),
1477 std::max(lowerLeft, leftClip),
1478 std::min(lowerRite, rightClip),
1479 leftE->fSavedDY,
1480 riteE->fSavedDY,
1481 fullAlpha,
1482 maskRow,
1483 isUsingMask,
1484 noRealBlitter || (fullAlpha == 0xFF && (edges_too_close(leftE->fPrev, leftE, lowerY) ||
1485 edges_too_close(riteE, riteE->fNext, lowerY))),
1486 true);
1487 leftE->fRiteE = nullptr;
1488}
1489
1490static void deferred_blit(SkAnalyticEdge* leftE,
1491 SkAnalyticEdge* riteE,
1492 SkFixed left,
1493 SkFixed leftDY, // don't save leftE->fX/fDY as they may have been updated
1494 SkFixed y,
1495 SkFixed nextY,
1496 bool isIntegralNextY,
1497 bool leftEnds,
1498 bool riteEnds,
1499 AdditiveBlitter* blitter,
1500 SkAlpha* maskRow,
1501 bool isUsingMask,
1502 bool noRealBlitter,
1503 SkFixed leftClip,
1504 SkFixed rightClip,
1505 int yShift) {
1506 if (leftE->fRiteE && leftE->fRiteE != riteE) {
1507 // leftE's right edge changed. Blit the saved trapezoid.
1508 SkASSERT(leftE->fRiteE->fNext == nullptr || leftE->fRiteE->fY == y);
1509 blit_saved_trapezoid(leftE,
1510 y,
1511 left,
1512 leftE->fRiteE->fX,
1513 blitter,
1514 maskRow,
1515 isUsingMask,
1516 noRealBlitter,
1517 leftClip,
1518 rightClip);
1519 }
1520 if (!leftE->fRiteE) {
1521 // Save and defer blitting the trapezoid
1522 SkASSERT(riteE->fRiteE == nullptr);
1523 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
1524 SkASSERT(riteE->fNext == nullptr || riteE->fY == y);
1525 leftE->saveXY(left, y, leftDY);
1526 riteE->saveXY(riteE->fX, y, riteE->fDY);
1527 leftE->fRiteE = riteE;
1528 }
1529 SkASSERT(leftE->fPrev == nullptr || leftE->fY == nextY);
1530 riteE->goY(nextY, yShift);
1531 // Always blit when edges end or nextY is integral
1532 if (isIntegralNextY || leftEnds || riteEnds) {
1533 blit_saved_trapezoid(leftE,
1534 nextY,
1535 leftE->fX,
1536 riteE->fX,
1537 blitter,
1538 maskRow,
1539 isUsingMask,
1540 noRealBlitter,
1541 leftClip,
1542 rightClip);
1543 }
1544}
1545
1546static void aaa_walk_edges(SkAnalyticEdge* prevHead,
1547 SkAnalyticEdge* nextTail,
1548 SkPathFillType fillType,
1549 AdditiveBlitter* blitter,
1550 int start_y,
1551 int stop_y,
1552 SkFixed leftClip,
1553 SkFixed rightClip,
1554 bool isUsingMask,
1555 bool forceRLE,
1556 bool useDeferred,
1557 bool skipIntersect) {
1558 prevHead->fX = prevHead->fUpperX = leftClip;
1559 nextTail->fX = nextTail->fUpperX = rightClip;
1560 SkFixed y = std::max(prevHead->fNext->fUpperY, SkIntToFixed(start_y));
1561 SkFixed nextNextY = SK_MaxS32;
1562
1563 {
1564 SkAnalyticEdge* edge;
1565 for (edge = prevHead->fNext; edge->fUpperY <= y; edge = edge->fNext) {
1566 edge->goY(y);
1567 update_next_next_y(edge->fLowerY, y, &nextNextY);
1568 }
1569 update_next_next_y(edge->fUpperY, y, &nextNextY);
1570 }
1571
1572 int windingMask = SkPathFillType_IsEvenOdd(fillType) ? 1 : -1;
1573 bool isInverse = SkPathFillType_IsInverse(fillType);
1574
1575 if (isInverse && SkIntToFixed(start_y) != y) {
1576 int width = SkFixedFloorToInt(rightClip - leftClip);
1577 if (SkFixedFloorToInt(y) != start_y) {
1578 blitter->getRealBlitter()->blitRect(
1579 SkFixedFloorToInt(leftClip), start_y, width, SkFixedFloorToInt(y) - start_y);
1580 start_y = SkFixedFloorToInt(y);
1581 }
1582 SkAlpha* maskRow =
1583 isUsingMask ? static_cast<MaskAdditiveBlitter*>(blitter)->getRow(start_y) : nullptr;
1584 blit_full_alpha(blitter,
1585 start_y,
1586 SkFixedFloorToInt(leftClip),
1587 width,
1588 fixed_to_alpha(y - SkIntToFixed(start_y)),
1589 maskRow,
1590 isUsingMask,
1591 false,
1592 false);
1593 }
1594
1595 while (true) {
1596 int w = 0;
1597 bool in_interval = isInverse;
1598 SkFixed prevX = prevHead->fX;
1599 SkFixed nextY = std::min(nextNextY, SkFixedCeilToFixed(y + 1));
1600 bool isIntegralNextY = (nextY & (SK_Fixed1 - 1)) == 0;
1601 SkAnalyticEdge* currE = prevHead->fNext;
1602 SkAnalyticEdge* leftE = prevHead;
1603 SkFixed left = leftClip;
1604 SkFixed leftDY = 0;
1605 bool leftEnds = false;
1606 int prevRite = SkFixedFloorToInt(leftClip);
1607
1608 nextNextY = SK_MaxS32;
1609
1610 SkASSERT((nextY & ((SK_Fixed1 >> 2) - 1)) == 0);
1611 int yShift = 0;
1612 if ((nextY - y) & (SK_Fixed1 >> 2)) {
1613 yShift = 2;
1614 nextY = y + (SK_Fixed1 >> 2);
1615 } else if ((nextY - y) & (SK_Fixed1 >> 1)) {
1616 yShift = 1;
1617 SkASSERT(nextY == y + (SK_Fixed1 >> 1));
1618 }
1619
1620 SkAlpha fullAlpha = fixed_to_alpha(nextY - y);
1621
1622 // If we're using mask blitter, we advance the mask row in this function
1623 // to save some "if" condition checks.
1624 SkAlpha* maskRow = nullptr;
1625 if (isUsingMask) {
1626 maskRow = static_cast<MaskAdditiveBlitter*>(blitter)->getRow(SkFixedFloorToInt(y));
1627 }
1628
1629 SkASSERT(currE->fPrev == prevHead);
1630 validate_edges_for_y(currE, y);
1631
1632 // Even if next - y == SK_Fixed1, we can still break the left-to-right order requirement
1633 // of the SKAAClip: |\| (two trapezoids with overlapping middle wedges)
1634 bool noRealBlitter = forceRLE; // forceRLE && (nextY - y != SK_Fixed1);
1635
1636 while (currE->fUpperY <= y) {
1637 SkASSERT(currE->fLowerY >= nextY);
1638 SkASSERT(currE->fY == y);
1639
1640 w += currE->fWinding;
1641 bool prev_in_interval = in_interval;
1642 in_interval = !(w & windingMask) == isInverse;
1643
1644 bool isLeft = in_interval && !prev_in_interval;
1645 bool isRite = !in_interval && prev_in_interval;
1646 bool currEnds = currE->fLowerY == nextY;
1647
1648 if (useDeferred) {
1649 if (currE->fRiteE && !isLeft) {
1650 // currE is a left edge previously, but now it's not.
1651 // Blit the trapezoid between fSavedY and y.
1652 SkASSERT(currE->fRiteE->fY == y);
1653 blit_saved_trapezoid(currE,
1654 y,
1655 currE->fX,
1656 currE->fRiteE->fX,
1657 blitter,
1658 maskRow,
1659 isUsingMask,
1660 noRealBlitter,
1661 leftClip,
1662 rightClip);
1663 }
1664 if (leftE->fRiteE == currE && !isRite) {
1665 // currE is a right edge previously, but now it's not.
1666 // Moreover, its corresponding leftE doesn't change (otherwise we'll handle it
1667 // in the previous if clause). Hence we blit the trapezoid.
1668 blit_saved_trapezoid(leftE,
1669 y,
1670 left,
1671 currE->fX,
1672 blitter,
1673 maskRow,
1674 isUsingMask,
1675 noRealBlitter,
1676 leftClip,
1677 rightClip);
1678 }
1679 }
1680
1681 if (isRite) {
1682 if (useDeferred) {
1683 deferred_blit(leftE,
1684 currE,
1685 left,
1686 leftDY,
1687 y,
1688 nextY,
1689 isIntegralNextY,
1690 leftEnds,
1691 currEnds,
1692 blitter,
1693 maskRow,
1694 isUsingMask,
1695 noRealBlitter,
1696 leftClip,
1697 rightClip,
1698 yShift);
1699 } else {
1700 SkFixed rite = currE->fX;
1701 currE->goY(nextY, yShift);
1702 SkFixed nextLeft = std::max(leftClip, leftE->fX);
1703 rite = std::min(rightClip, rite);
1704 SkFixed nextRite = std::min(rightClip, currE->fX);
1705 blit_trapezoid_row(
1706 blitter,
1707 y >> 16,
1708 left,
1709 rite,
1710 nextLeft,
1711 nextRite,
1712 leftDY,
1713 currE->fDY,
1714 fullAlpha,
1715 maskRow,
1716 isUsingMask,
1717 noRealBlitter || (fullAlpha == 0xFF &&
1718 (edges_too_close(prevRite, left, leftE->fX) ||
1719 edges_too_close(currE, currE->fNext, nextY))),
1720 true);
1721 prevRite = SkFixedCeilToInt(std::max(rite, currE->fX));
1722 }
1723 } else {
1724 if (isLeft) {
1725 left = std::max(currE->fX, leftClip);
1726 leftDY = currE->fDY;
1727 leftE = currE;
1728 leftEnds = leftE->fLowerY == nextY;
1729 }
1730 currE->goY(nextY, yShift);
1731 }
1732
1733 SkAnalyticEdge* next = currE->fNext;
1734 SkFixed newX;
1735
1736 while (currE->fLowerY <= nextY) {
1737 if (currE->fCurveCount < 0) {
1738 SkAnalyticCubicEdge* cubicEdge = (SkAnalyticCubicEdge*)currE;
1739 cubicEdge->keepContinuous();
1740 if (!cubicEdge->updateCubic()) {
1741 break;
1742 }
1743 } else if (currE->fCurveCount > 0) {
1744 SkAnalyticQuadraticEdge* quadEdge = (SkAnalyticQuadraticEdge*)currE;
1745 quadEdge->keepContinuous();
1746 if (!quadEdge->updateQuadratic()) {
1747 break;
1748 }
1749 } else {
1750 break;
1751 }
1752 }
1753 SkASSERT(currE->fY == nextY);
1754
1755 if (currE->fLowerY <= nextY) {
1756 remove_edge(currE);
1757 } else {
1758 update_next_next_y(currE->fLowerY, nextY, &nextNextY);
1759 newX = currE->fX;
1760 SkASSERT(currE->fLowerY > nextY);
1761 if (newX < prevX) { // ripple currE backwards until it is x-sorted
1762 // If the crossing edge is a right edge, blit the saved trapezoid.
1763 if (leftE->fRiteE == currE && useDeferred) {
1764 SkASSERT(leftE->fY == nextY && currE->fY == nextY);
1765 blit_saved_trapezoid(leftE,
1766 nextY,
1767 leftE->fX,
1768 currE->fX,
1769 blitter,
1770 maskRow,
1771 isUsingMask,
1772 noRealBlitter,
1773 leftClip,
1774 rightClip);
1775 }
1776 backward_insert_edge_based_on_x(currE);
1777 } else {
1778 prevX = newX;
1779 }
1780 if (!skipIntersect) {
1781 check_intersection(currE, nextY, &nextNextY);
1782 }
1783 }
1784
1785 currE = next;
1786 SkASSERT(currE);
1787 }
1788
1789 // was our right-edge culled away?
1790 if (in_interval) {
1791 if (useDeferred) {
1792 deferred_blit(leftE,
1793 nextTail,
1794 left,
1795 leftDY,
1796 y,
1797 nextY,
1798 isIntegralNextY,
1799 leftEnds,
1800 false,
1801 blitter,
1802 maskRow,
1803 isUsingMask,
1804 noRealBlitter,
1805 leftClip,
1806 rightClip,
1807 yShift);
1808 } else {
1809 blit_trapezoid_row(blitter,
1810 y >> 16,
1811 left,
1812 rightClip,
1813 std::max(leftClip, leftE->fX),
1814 rightClip,
1815 leftDY,
1816 0,
1817 fullAlpha,
1818 maskRow,
1819 isUsingMask,
1820 noRealBlitter || (fullAlpha == 0xFF &&
1821 edges_too_close(leftE->fPrev, leftE, nextY)),
1822 true);
1823 }
1824 }
1825
1826 if (forceRLE) {
1827 ((RunBasedAdditiveBlitter*)blitter)->flush_if_y_changed(y, nextY);
1828 }
1829
1830 y = nextY;
1831 if (y >= SkIntToFixed(stop_y)) {
1832 break;
1833 }
1834
1835 // now currE points to the first edge with a fUpperY larger than the previous y
1836 insert_new_edges(currE, y, &nextNextY);
1837 }
1838}
1839
1840static SK_ALWAYS_INLINE void aaa_fill_path(
1841 const SkPath& path,
1842 const SkIRect& clipRect,
1843 AdditiveBlitter* blitter,
1844 int start_y,
1845 int stop_y,
1846 bool pathContainedInClip,
1847 bool isUsingMask,
1848 bool forceRLE) { // forceRLE implies that SkAAClip is calling us
1849 SkASSERT(blitter);
1850
1851 SkAnalyticEdgeBuilder builder;
1852 int count = builder.buildEdges(path, pathContainedInClip ? nullptr : &clipRect);
1853 SkAnalyticEdge** list = builder.analyticEdgeList();
1854
1855 SkIRect rect = clipRect;
1856 if (0 == count) {
1857 if (path.isInverseFillType()) {
1858 /*
1859 * Since we are in inverse-fill, our caller has already drawn above
1860 * our top (start_y) and will draw below our bottom (stop_y). Thus
1861 * we need to restrict our drawing to the intersection of the clip
1862 * and those two limits.
1863 */
1864 if (rect.fTop < start_y) {
1865 rect.fTop = start_y;
1866 }
1867 if (rect.fBottom > stop_y) {
1868 rect.fBottom = stop_y;
1869 }
1870 if (!rect.isEmpty()) {
1871 blitter->getRealBlitter()->blitRect(
1872 rect.fLeft, rect.fTop, rect.width(), rect.height());
1873 }
1874 }
1875 return;
1876 }
1877
1878 SkAnalyticEdge headEdge, tailEdge, *last;
1879 // this returns the first and last edge after they're sorted into a dlink list
1880 SkAnalyticEdge* edge = sort_edges(list, count, &last);
1881
1882 headEdge.fRiteE = nullptr;
1883 headEdge.fPrev = nullptr;
1884 headEdge.fNext = edge;
1885 headEdge.fUpperY = headEdge.fLowerY = SK_MinS32;
1886 headEdge.fX = SK_MinS32;
1887 headEdge.fDX = 0;
1888 headEdge.fDY = SK_MaxS32;
1889 headEdge.fUpperX = SK_MinS32;
1890 edge->fPrev = &headEdge;
1891
1892 tailEdge.fRiteE = nullptr;
1893 tailEdge.fPrev = last;
1894 tailEdge.fNext = nullptr;
1895 tailEdge.fUpperY = tailEdge.fLowerY = SK_MaxS32;
1896 tailEdge.fX = SK_MaxS32;
1897 tailEdge.fDX = 0;
1898 tailEdge.fDY = SK_MaxS32;
1899 tailEdge.fUpperX = SK_MaxS32;
1900 last->fNext = &tailEdge;
1901
1902 // now edge is the head of the sorted linklist
1903
1904 if (!pathContainedInClip && start_y < clipRect.fTop) {
1905 start_y = clipRect.fTop;
1906 }
1907 if (!pathContainedInClip && stop_y > clipRect.fBottom) {
1908 stop_y = clipRect.fBottom;
1909 }
1910
1911 SkFixed leftBound = SkIntToFixed(rect.fLeft);
1912 SkFixed rightBound = SkIntToFixed(rect.fRight);
1913 if (isUsingMask) {
1914 // If we're using mask, then we have to limit the bound within the path bounds.
1915 // Otherwise, the edge drift may access an invalid address inside the mask.
1916 SkIRect ir;
1917 path.getBounds().roundOut(&ir);
1918 leftBound = std::max(leftBound, SkIntToFixed(ir.fLeft));
1919 rightBound = std::min(rightBound, SkIntToFixed(ir.fRight));
1920 }
1921
1922 if (!path.isInverseFillType() && path.isConvex() && count >= 2) {
1923 aaa_walk_convex_edges(
1924 &headEdge, blitter, start_y, stop_y, leftBound, rightBound, isUsingMask);
1925 } else {
1926 // Only use deferred blitting if there are many edges.
1927 bool useDeferred =
1928 count >
1929 (SkFixedFloorToInt(tailEdge.fPrev->fLowerY - headEdge.fNext->fUpperY) + 1) * 4;
1930
1931 // We skip intersection computation if there are many points which probably already
1932 // give us enough fractional scan lines.
1933 bool skipIntersect = path.countPoints() > (stop_y - start_y) * 2;
1934
1935 aaa_walk_edges(&headEdge,
1936 &tailEdge,
1937 path.getFillType(),
1938 blitter,
1939 start_y,
1940 stop_y,
1941 leftBound,
1942 rightBound,
1943 isUsingMask,
1944 forceRLE,
1945 useDeferred,
1946 skipIntersect);
1947 }
1948}
1949
1950void SkScan::AAAFillPath(const SkPath& path,
1951 SkBlitter* blitter,
1952 const SkIRect& ir,
1953 const SkIRect& clipBounds,
1954 bool forceRLE) {
1955 bool containedInClip = clipBounds.contains(ir);
1956 bool isInverse = path.isInverseFillType();
1957
1958 // The mask blitter (where we store intermediate alpha values directly in a mask, and then call
1959 // the real blitter once in the end to blit the whole mask) is faster than the RLE blitter when
1960 // the blit region is small enough (i.e., CanHandleRect(ir)). When isInverse is true, the blit
1961 // region is no longer the rectangle ir so we won't use the mask blitter. The caller may also
1962 // use the forceRLE flag to force not using the mask blitter. Also, when the path is a simple
1963 // rect, preparing a mask and blitting it might have too much overhead. Hence we'll use
1964 // blitFatAntiRect to avoid the mask and its overhead.
1965 if (MaskAdditiveBlitter::CanHandleRect(ir) && !isInverse && !forceRLE) {
1966 // blitFatAntiRect is slower than the normal AAA flow without MaskAdditiveBlitter.
1967 // Hence only tryBlitFatAntiRect when MaskAdditiveBlitter would have been used.
1968 if (!TryBlitFatAntiRect(blitter, path, clipBounds)) {
1969 MaskAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1970 aaa_fill_path(path,
1971 clipBounds,
1972 &additiveBlitter,
1973 ir.fTop,
1974 ir.fBottom,
1975 containedInClip,
1976 true,
1977 forceRLE);
1978 }
1979 } else if (!isInverse && path.isConvex()) {
1980 // If the filling area is convex (i.e., path.isConvex && !isInverse), our simpler
1981 // aaa_walk_convex_edges won't generate alphas above 255. Hence we don't need
1982 // SafeRLEAdditiveBlitter (which is slow due to clamping). The basic RLE blitter
1983 // RunBasedAdditiveBlitter would suffice.
1984 RunBasedAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1985 aaa_fill_path(path,
1986 clipBounds,
1987 &additiveBlitter,
1988 ir.fTop,
1989 ir.fBottom,
1990 containedInClip,
1991 false,
1992 forceRLE);
1993 } else {
1994 // If the filling area might not be convex, the more involved aaa_walk_edges would
1995 // be called and we have to clamp the alpha downto 255. The SafeRLEAdditiveBlitter
1996 // does that at a cost of performance.
1997 SafeRLEAdditiveBlitter additiveBlitter(blitter, ir, clipBounds, isInverse);
1998 aaa_fill_path(path,
1999 clipBounds,
2000 &additiveBlitter,
2001 ir.fTop,
2002 ir.fBottom,
2003 containedInClip,
2004 false,
2005 forceRLE);
2006 }
2007}
2008#endif // defined(SK_DISABLE_AAA)
2009