1 | /* |
2 | * Copyright 2012 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "include/core/SkString.h" |
9 | #include "include/core/SkTypes.h" |
10 | #include "src/core/SkGeometry.h" |
11 | #include "src/core/SkMatrixPriv.h" |
12 | #include "src/core/SkPathPriv.h" |
13 | #include "src/core/SkPointPriv.h" |
14 | #include "src/gpu/GrAuditTrail.h" |
15 | #include "src/gpu/GrCaps.h" |
16 | #include "src/gpu/GrDrawOpTest.h" |
17 | #include "src/gpu/GrGeometryProcessor.h" |
18 | #include "src/gpu/GrProcessor.h" |
19 | #include "src/gpu/GrProgramInfo.h" |
20 | #include "src/gpu/GrRenderTargetContext.h" |
21 | #include "src/gpu/GrVertexWriter.h" |
22 | #include "src/gpu/geometry/GrPathUtils.h" |
23 | #include "src/gpu/geometry/GrStyledShape.h" |
24 | #include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h" |
25 | #include "src/gpu/glsl/GrGLSLGeometryProcessor.h" |
26 | #include "src/gpu/glsl/GrGLSLProgramDataManager.h" |
27 | #include "src/gpu/glsl/GrGLSLUniformHandler.h" |
28 | #include "src/gpu/glsl/GrGLSLVarying.h" |
29 | #include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h" |
30 | #include "src/gpu/ops/GrAAConvexPathRenderer.h" |
31 | #include "src/gpu/ops/GrMeshDrawOp.h" |
32 | #include "src/gpu/ops/GrSimpleMeshDrawOpHelperWithStencil.h" |
33 | |
34 | GrAAConvexPathRenderer::GrAAConvexPathRenderer() { |
35 | } |
36 | |
37 | struct Segment { |
38 | enum { |
39 | // These enum values are assumed in member functions below. |
40 | kLine = 0, |
41 | kQuad = 1, |
42 | } fType; |
43 | |
44 | // line uses one pt, quad uses 2 pts |
45 | SkPoint fPts[2]; |
46 | // normal to edge ending at each pt |
47 | SkVector fNorms[2]; |
48 | // is the corner where the previous segment meets this segment |
49 | // sharp. If so, fMid is a normalized bisector facing outward. |
50 | SkVector fMid; |
51 | |
52 | int countPoints() { |
53 | static_assert(0 == kLine && 1 == kQuad); |
54 | return fType + 1; |
55 | } |
56 | const SkPoint& endPt() const { |
57 | static_assert(0 == kLine && 1 == kQuad); |
58 | return fPts[fType]; |
59 | } |
60 | const SkPoint& endNorm() const { |
61 | static_assert(0 == kLine && 1 == kQuad); |
62 | return fNorms[fType]; |
63 | } |
64 | }; |
65 | |
66 | typedef SkTArray<Segment, true> SegmentArray; |
67 | |
68 | static bool center_of_mass(const SegmentArray& segments, SkPoint* c) { |
69 | SkScalar area = 0; |
70 | SkPoint center = {0, 0}; |
71 | int count = segments.count(); |
72 | SkPoint p0 = {0, 0}; |
73 | if (count > 2) { |
74 | // We translate the polygon so that the first point is at the origin. |
75 | // This avoids some precision issues with small area polygons far away |
76 | // from the origin. |
77 | p0 = segments[0].endPt(); |
78 | SkPoint pi; |
79 | SkPoint pj; |
80 | // the first and last iteration of the below loop would compute |
81 | // zeros since the starting / ending point is (0,0). So instead we start |
82 | // at i=1 and make the last iteration i=count-2. |
83 | pj = segments[1].endPt() - p0; |
84 | for (int i = 1; i < count - 1; ++i) { |
85 | pi = pj; |
86 | pj = segments[i + 1].endPt() - p0; |
87 | |
88 | SkScalar t = SkPoint::CrossProduct(pi, pj); |
89 | area += t; |
90 | center.fX += (pi.fX + pj.fX) * t; |
91 | center.fY += (pi.fY + pj.fY) * t; |
92 | } |
93 | } |
94 | |
95 | // If the poly has no area then we instead return the average of |
96 | // its points. |
97 | if (SkScalarNearlyZero(area)) { |
98 | SkPoint avg; |
99 | avg.set(0, 0); |
100 | for (int i = 0; i < count; ++i) { |
101 | const SkPoint& pt = segments[i].endPt(); |
102 | avg.fX += pt.fX; |
103 | avg.fY += pt.fY; |
104 | } |
105 | SkScalar denom = SK_Scalar1 / count; |
106 | avg.scale(denom); |
107 | *c = avg; |
108 | } else { |
109 | area *= 3; |
110 | area = SkScalarInvert(area); |
111 | center.scale(area); |
112 | // undo the translate of p0 to the origin. |
113 | *c = center + p0; |
114 | } |
115 | return !SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY) && c->isFinite(); |
116 | } |
117 | |
118 | static bool compute_vectors(SegmentArray* segments, |
119 | SkPoint* fanPt, |
120 | SkPathPriv::FirstDirection dir, |
121 | int* vCount, |
122 | int* iCount) { |
123 | if (!center_of_mass(*segments, fanPt)) { |
124 | return false; |
125 | } |
126 | int count = segments->count(); |
127 | |
128 | // Make the normals point towards the outside |
129 | SkPointPriv::Side normSide; |
130 | if (dir == SkPathPriv::kCCW_FirstDirection) { |
131 | normSide = SkPointPriv::kRight_Side; |
132 | } else { |
133 | normSide = SkPointPriv::kLeft_Side; |
134 | } |
135 | |
136 | int64_t vCount64 = 0; |
137 | int64_t iCount64 = 0; |
138 | // compute normals at all points |
139 | for (int a = 0; a < count; ++a) { |
140 | Segment& sega = (*segments)[a]; |
141 | int b = (a + 1) % count; |
142 | Segment& segb = (*segments)[b]; |
143 | |
144 | const SkPoint* prevPt = &sega.endPt(); |
145 | int n = segb.countPoints(); |
146 | for (int p = 0; p < n; ++p) { |
147 | segb.fNorms[p] = segb.fPts[p] - *prevPt; |
148 | segb.fNorms[p].normalize(); |
149 | segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide); |
150 | prevPt = &segb.fPts[p]; |
151 | } |
152 | if (Segment::kLine == segb.fType) { |
153 | vCount64 += 5; |
154 | iCount64 += 9; |
155 | } else { |
156 | vCount64 += 6; |
157 | iCount64 += 12; |
158 | } |
159 | } |
160 | |
161 | // compute mid-vectors where segments meet. TODO: Detect shallow corners |
162 | // and leave out the wedges and close gaps by stitching segments together. |
163 | for (int a = 0; a < count; ++a) { |
164 | const Segment& sega = (*segments)[a]; |
165 | int b = (a + 1) % count; |
166 | Segment& segb = (*segments)[b]; |
167 | segb.fMid = segb.fNorms[0] + sega.endNorm(); |
168 | segb.fMid.normalize(); |
169 | // corner wedges |
170 | vCount64 += 4; |
171 | iCount64 += 6; |
172 | } |
173 | if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) { |
174 | return false; |
175 | } |
176 | *vCount = vCount64; |
177 | *iCount = iCount64; |
178 | return true; |
179 | } |
180 | |
181 | struct DegenerateTestData { |
182 | DegenerateTestData() { fStage = kInitial; } |
183 | bool isDegenerate() const { return kNonDegenerate != fStage; } |
184 | enum { |
185 | kInitial, |
186 | kPoint, |
187 | kLine, |
188 | kNonDegenerate |
189 | } fStage; |
190 | SkPoint fFirstPoint; |
191 | SkVector fLineNormal; |
192 | SkScalar fLineC; |
193 | }; |
194 | |
195 | static const SkScalar kClose = (SK_Scalar1 / 16); |
196 | static const SkScalar kCloseSqd = kClose * kClose; |
197 | |
198 | static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) { |
199 | switch (data->fStage) { |
200 | case DegenerateTestData::kInitial: |
201 | data->fFirstPoint = pt; |
202 | data->fStage = DegenerateTestData::kPoint; |
203 | break; |
204 | case DegenerateTestData::kPoint: |
205 | if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) { |
206 | data->fLineNormal = pt - data->fFirstPoint; |
207 | data->fLineNormal.normalize(); |
208 | data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal); |
209 | data->fLineC = -data->fLineNormal.dot(data->fFirstPoint); |
210 | data->fStage = DegenerateTestData::kLine; |
211 | } |
212 | break; |
213 | case DegenerateTestData::kLine: |
214 | if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) { |
215 | data->fStage = DegenerateTestData::kNonDegenerate; |
216 | } |
217 | break; |
218 | case DegenerateTestData::kNonDegenerate: |
219 | break; |
220 | default: |
221 | SK_ABORT("Unexpected degenerate test stage." ); |
222 | } |
223 | } |
224 | |
225 | static inline bool get_direction(const SkPath& path, const SkMatrix& m, |
226 | SkPathPriv::FirstDirection* dir) { |
227 | // At this point, we've already returned true from canDraw(), which checked that the path's |
228 | // direction could be determined, so this should just be fetching the cached direction. |
229 | // However, if perspective is involved, we're operating on a transformed path, which may no |
230 | // longer have a computable direction. |
231 | if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) { |
232 | return false; |
233 | } |
234 | |
235 | // check whether m reverses the orientation |
236 | SkASSERT(!m.hasPerspective()); |
237 | SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) - |
238 | m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY); |
239 | if (det2x2 < 0) { |
240 | *dir = SkPathPriv::OppositeFirstDirection(*dir); |
241 | } |
242 | |
243 | return true; |
244 | } |
245 | |
246 | static inline void add_line_to_segment(const SkPoint& pt, |
247 | SegmentArray* segments) { |
248 | segments->push_back(); |
249 | segments->back().fType = Segment::kLine; |
250 | segments->back().fPts[0] = pt; |
251 | } |
252 | |
253 | static inline void add_quad_segment(const SkPoint pts[3], |
254 | SegmentArray* segments) { |
255 | if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) { |
256 | if (pts[0] != pts[2]) { |
257 | add_line_to_segment(pts[2], segments); |
258 | } |
259 | } else { |
260 | segments->push_back(); |
261 | segments->back().fType = Segment::kQuad; |
262 | segments->back().fPts[0] = pts[1]; |
263 | segments->back().fPts[1] = pts[2]; |
264 | } |
265 | } |
266 | |
267 | static inline void add_cubic_segments(const SkPoint pts[4], |
268 | SkPathPriv::FirstDirection dir, |
269 | SegmentArray* segments) { |
270 | SkSTArray<15, SkPoint, true> quads; |
271 | GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads); |
272 | int count = quads.count(); |
273 | for (int q = 0; q < count; q += 3) { |
274 | add_quad_segment(&quads[q], segments); |
275 | } |
276 | } |
277 | |
278 | static bool get_segments(const SkPath& path, |
279 | const SkMatrix& m, |
280 | SegmentArray* segments, |
281 | SkPoint* fanPt, |
282 | int* vCount, |
283 | int* iCount) { |
284 | SkPath::Iter iter(path, true); |
285 | // This renderer over-emphasizes very thin path regions. We use the distance |
286 | // to the path from the sample to compute coverage. Every pixel intersected |
287 | // by the path will be hit and the maximum distance is sqrt(2)/2. We don't |
288 | // notice that the sample may be close to a very thin area of the path and |
289 | // thus should be very light. This is particularly egregious for degenerate |
290 | // line paths. We detect paths that are very close to a line (zero area) and |
291 | // draw nothing. |
292 | DegenerateTestData degenerateData; |
293 | SkPathPriv::FirstDirection dir; |
294 | if (!get_direction(path, m, &dir)) { |
295 | return false; |
296 | } |
297 | |
298 | for (;;) { |
299 | SkPoint pts[4]; |
300 | SkPath::Verb verb = iter.next(pts); |
301 | switch (verb) { |
302 | case SkPath::kMove_Verb: |
303 | m.mapPoints(pts, 1); |
304 | update_degenerate_test(°enerateData, pts[0]); |
305 | break; |
306 | case SkPath::kLine_Verb: { |
307 | if (!SkPathPriv::AllPointsEq(pts, 2)) { |
308 | m.mapPoints(&pts[1], 1); |
309 | update_degenerate_test(°enerateData, pts[1]); |
310 | add_line_to_segment(pts[1], segments); |
311 | } |
312 | break; |
313 | } |
314 | case SkPath::kQuad_Verb: |
315 | if (!SkPathPriv::AllPointsEq(pts, 3)) { |
316 | m.mapPoints(pts, 3); |
317 | update_degenerate_test(°enerateData, pts[1]); |
318 | update_degenerate_test(°enerateData, pts[2]); |
319 | add_quad_segment(pts, segments); |
320 | } |
321 | break; |
322 | case SkPath::kConic_Verb: { |
323 | if (!SkPathPriv::AllPointsEq(pts, 3)) { |
324 | m.mapPoints(pts, 3); |
325 | SkScalar weight = iter.conicWeight(); |
326 | SkAutoConicToQuads converter; |
327 | const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f); |
328 | for (int i = 0; i < converter.countQuads(); ++i) { |
329 | update_degenerate_test(°enerateData, quadPts[2*i + 1]); |
330 | update_degenerate_test(°enerateData, quadPts[2*i + 2]); |
331 | add_quad_segment(quadPts + 2*i, segments); |
332 | } |
333 | } |
334 | break; |
335 | } |
336 | case SkPath::kCubic_Verb: { |
337 | if (!SkPathPriv::AllPointsEq(pts, 4)) { |
338 | m.mapPoints(pts, 4); |
339 | update_degenerate_test(°enerateData, pts[1]); |
340 | update_degenerate_test(°enerateData, pts[2]); |
341 | update_degenerate_test(°enerateData, pts[3]); |
342 | add_cubic_segments(pts, dir, segments); |
343 | } |
344 | break; |
345 | } |
346 | case SkPath::kDone_Verb: |
347 | if (degenerateData.isDegenerate()) { |
348 | return false; |
349 | } else { |
350 | return compute_vectors(segments, fanPt, dir, vCount, iCount); |
351 | } |
352 | default: |
353 | break; |
354 | } |
355 | } |
356 | } |
357 | |
358 | struct Draw { |
359 | Draw() : fVertexCnt(0), fIndexCnt(0) {} |
360 | int fVertexCnt; |
361 | int fIndexCnt; |
362 | }; |
363 | |
364 | typedef SkTArray<Draw, true> DrawArray; |
365 | |
366 | static void create_vertices(const SegmentArray& segments, |
367 | const SkPoint& fanPt, |
368 | const GrVertexColor& color, |
369 | DrawArray* draws, |
370 | GrVertexWriter& verts, |
371 | uint16_t* idxs, |
372 | size_t vertexStride) { |
373 | Draw* draw = &draws->push_back(); |
374 | // alias just to make vert/index assignments easier to read. |
375 | int* v = &draw->fVertexCnt; |
376 | int* i = &draw->fIndexCnt; |
377 | const size_t uvOffset = sizeof(SkPoint) + color.size(); |
378 | |
379 | int count = segments.count(); |
380 | for (int a = 0; a < count; ++a) { |
381 | const Segment& sega = segments[a]; |
382 | int b = (a + 1) % count; |
383 | const Segment& segb = segments[b]; |
384 | |
385 | // Check whether adding the verts for this segment to the current draw would cause index |
386 | // values to overflow. |
387 | int vCount = 4; |
388 | if (Segment::kLine == segb.fType) { |
389 | vCount += 5; |
390 | } else { |
391 | vCount += 6; |
392 | } |
393 | if (draw->fVertexCnt + vCount > (1 << 16)) { |
394 | idxs += *i; |
395 | draw = &draws->push_back(); |
396 | v = &draw->fVertexCnt; |
397 | i = &draw->fIndexCnt; |
398 | } |
399 | |
400 | const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 }; |
401 | |
402 | // FIXME: These tris are inset in the 1 unit arc around the corner |
403 | SkPoint p0 = sega.endPt(); |
404 | // Position, Color, UV, D0, D1 |
405 | verts.write(p0, color, SkPoint{0, 0}, negOneDists); |
406 | verts.write(p0 + sega.endNorm(), color, SkPoint{0, -SK_Scalar1}, negOneDists); |
407 | verts.write(p0 + segb.fMid, color, SkPoint{0, -SK_Scalar1}, negOneDists); |
408 | verts.write(p0 + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists); |
409 | |
410 | idxs[*i + 0] = *v + 0; |
411 | idxs[*i + 1] = *v + 2; |
412 | idxs[*i + 2] = *v + 1; |
413 | idxs[*i + 3] = *v + 0; |
414 | idxs[*i + 4] = *v + 3; |
415 | idxs[*i + 5] = *v + 2; |
416 | |
417 | *v += 4; |
418 | *i += 6; |
419 | |
420 | if (Segment::kLine == segb.fType) { |
421 | // we draw the line edge as a degenerate quad (u is 0, v is the |
422 | // signed distance to the edge) |
423 | SkPoint v1Pos = sega.endPt(); |
424 | SkPoint v2Pos = segb.fPts[0]; |
425 | SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos); |
426 | |
427 | verts.write(fanPt, color, SkPoint{0, dist}, negOneDists); |
428 | verts.write(v1Pos, color, SkPoint{0, 0}, negOneDists); |
429 | verts.write(v2Pos, color, SkPoint{0, 0}, negOneDists); |
430 | verts.write(v1Pos + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists); |
431 | verts.write(v2Pos + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists); |
432 | |
433 | idxs[*i + 0] = *v + 3; |
434 | idxs[*i + 1] = *v + 1; |
435 | idxs[*i + 2] = *v + 2; |
436 | |
437 | idxs[*i + 3] = *v + 4; |
438 | idxs[*i + 4] = *v + 3; |
439 | idxs[*i + 5] = *v + 2; |
440 | |
441 | *i += 6; |
442 | |
443 | // Draw the interior fan if it exists. |
444 | // TODO: Detect and combine colinear segments. This will ensure we catch every case |
445 | // with no interior, and that the resulting shared edge uses the same endpoints. |
446 | if (count >= 3) { |
447 | idxs[*i + 0] = *v + 0; |
448 | idxs[*i + 1] = *v + 2; |
449 | idxs[*i + 2] = *v + 1; |
450 | |
451 | *i += 3; |
452 | } |
453 | |
454 | *v += 5; |
455 | } else { |
456 | void* quadVertsBegin = verts.fPtr; |
457 | |
458 | SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]}; |
459 | |
460 | SkScalar c0 = segb.fNorms[0].dot(qpts[0]); |
461 | SkScalar c1 = segb.fNorms[1].dot(qpts[2]); |
462 | GrVertexWriter::Skip<SkPoint> skipUVs; |
463 | |
464 | verts.write(fanPt, |
465 | color, skipUVs, |
466 | -segb.fNorms[0].dot(fanPt) + c0, |
467 | -segb.fNorms[1].dot(fanPt) + c1); |
468 | |
469 | verts.write(qpts[0], |
470 | color, skipUVs, |
471 | 0.0f, |
472 | -segb.fNorms[1].dot(qpts[0]) + c1); |
473 | |
474 | verts.write(qpts[2], |
475 | color, skipUVs, |
476 | -segb.fNorms[0].dot(qpts[2]) + c0, |
477 | 0.0f); |
478 | // We need a negative value that is very large that it won't effect results if it is |
479 | // interpolated with. However, the value can't be too large of a negative that it |
480 | // effects numerical precision on less powerful GPUs. |
481 | static const SkScalar kStableLargeNegativeValue = -SK_ScalarMax/1000000; |
482 | verts.write(qpts[0] + segb.fNorms[0], |
483 | color, skipUVs, |
484 | kStableLargeNegativeValue, |
485 | kStableLargeNegativeValue); |
486 | |
487 | verts.write(qpts[2] + segb.fNorms[1], |
488 | color, skipUVs, |
489 | kStableLargeNegativeValue, |
490 | kStableLargeNegativeValue); |
491 | |
492 | SkVector midVec = segb.fNorms[0] + segb.fNorms[1]; |
493 | midVec.normalize(); |
494 | |
495 | verts.write(qpts[1] + midVec, |
496 | color, skipUVs, |
497 | kStableLargeNegativeValue, |
498 | kStableLargeNegativeValue); |
499 | |
500 | GrPathUtils::QuadUVMatrix toUV(qpts); |
501 | toUV.apply(quadVertsBegin, 6, vertexStride, uvOffset); |
502 | |
503 | idxs[*i + 0] = *v + 3; |
504 | idxs[*i + 1] = *v + 1; |
505 | idxs[*i + 2] = *v + 2; |
506 | idxs[*i + 3] = *v + 4; |
507 | idxs[*i + 4] = *v + 3; |
508 | idxs[*i + 5] = *v + 2; |
509 | |
510 | idxs[*i + 6] = *v + 5; |
511 | idxs[*i + 7] = *v + 3; |
512 | idxs[*i + 8] = *v + 4; |
513 | |
514 | *i += 9; |
515 | |
516 | // Draw the interior fan if it exists. |
517 | // TODO: Detect and combine colinear segments. This will ensure we catch every case |
518 | // with no interior, and that the resulting shared edge uses the same endpoints. |
519 | if (count >= 3) { |
520 | idxs[*i + 0] = *v + 0; |
521 | idxs[*i + 1] = *v + 2; |
522 | idxs[*i + 2] = *v + 1; |
523 | |
524 | *i += 3; |
525 | } |
526 | |
527 | *v += 6; |
528 | } |
529 | } |
530 | } |
531 | |
532 | /////////////////////////////////////////////////////////////////////////////// |
533 | |
534 | /* |
535 | * Quadratic specified by 0=u^2-v canonical coords. u and v are the first |
536 | * two components of the vertex attribute. Coverage is based on signed |
537 | * distance with negative being inside, positive outside. The edge is specified in |
538 | * window space (y-down). If either the third or fourth component of the interpolated |
539 | * vertex coord is > 0 then the pixel is considered outside the edge. This is used to |
540 | * attempt to trim to a portion of the infinite quad. |
541 | * Requires shader derivative instruction support. |
542 | */ |
543 | |
544 | class QuadEdgeEffect : public GrGeometryProcessor { |
545 | public: |
546 | static GrGeometryProcessor* Make(SkArenaAlloc* arena, |
547 | const SkMatrix& localMatrix, |
548 | bool usesLocalCoords, |
549 | bool wideColor) { |
550 | return arena->make<QuadEdgeEffect>(localMatrix, usesLocalCoords, wideColor); |
551 | } |
552 | |
553 | ~QuadEdgeEffect() override {} |
554 | |
555 | const char* name() const override { return "QuadEdge" ; } |
556 | |
557 | class GLSLProcessor : public GrGLSLGeometryProcessor { |
558 | public: |
559 | GLSLProcessor() {} |
560 | |
561 | void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override { |
562 | const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>(); |
563 | GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder; |
564 | GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler; |
565 | GrGLSLUniformHandler* uniformHandler = args.fUniformHandler; |
566 | |
567 | // emit attributes |
568 | varyingHandler->emitAttributes(qe); |
569 | |
570 | GrGLSLVarying v(kHalf4_GrSLType); |
571 | varyingHandler->addVarying("QuadEdge" , &v); |
572 | vertBuilder->codeAppendf("%s = %s;" , v.vsOut(), qe.fInQuadEdge.name()); |
573 | |
574 | // Setup pass through color |
575 | varyingHandler->addPassThroughAttribute(qe.fInColor, args.fOutputColor); |
576 | |
577 | GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder; |
578 | |
579 | // Setup position |
580 | this->writeOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name()); |
581 | if (qe.fUsesLocalCoords) { |
582 | this->writeLocalCoord(vertBuilder, uniformHandler, gpArgs, |
583 | qe.fInPosition.asShaderVar(), qe.fLocalMatrix, |
584 | &fLocalMatrixUniform); |
585 | } |
586 | |
587 | fragBuilder->codeAppendf("half edgeAlpha;" ); |
588 | |
589 | // keep the derivative instructions outside the conditional |
590 | fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));" , v.fsIn()); |
591 | fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));" , v.fsIn()); |
592 | fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {" , v.fsIn(), v.fsIn()); |
593 | // today we know z and w are in device space. We could use derivatives |
594 | fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);" , v.fsIn(), |
595 | v.fsIn()); |
596 | fragBuilder->codeAppendf ("} else {" ); |
597 | fragBuilder->codeAppendf("half2 gF = half2(2.0*%s.x*duvdx.x - duvdx.y," |
598 | " 2.0*%s.x*duvdy.x - duvdy.y);" , |
599 | v.fsIn(), v.fsIn()); |
600 | fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);" , v.fsIn(), v.fsIn(), |
601 | v.fsIn()); |
602 | fragBuilder->codeAppendf("edgeAlpha = " |
603 | "saturate(0.5 - edgeAlpha / length(gF));}" ); |
604 | |
605 | fragBuilder->codeAppendf("%s = half4(edgeAlpha);" , args.fOutputCoverage); |
606 | } |
607 | |
608 | static inline void GenKey(const GrGeometryProcessor& gp, |
609 | const GrShaderCaps&, |
610 | GrProcessorKeyBuilder* b) { |
611 | const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>(); |
612 | uint32_t key = (uint32_t) qee.fUsesLocalCoords; |
613 | key |= ComputeMatrixKey(qee.fLocalMatrix) << 1; |
614 | b->add32(key); |
615 | } |
616 | |
617 | void setData(const GrGLSLProgramDataManager& pdman, |
618 | const GrPrimitiveProcessor& gp) override { |
619 | const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>(); |
620 | this->setTransform(pdman, fLocalMatrixUniform, qe.fLocalMatrix, &fLocalMatrix); |
621 | } |
622 | |
623 | private: |
624 | typedef GrGLSLGeometryProcessor INHERITED; |
625 | |
626 | SkMatrix fLocalMatrix = SkMatrix::InvalidMatrix(); |
627 | UniformHandle fLocalMatrixUniform; |
628 | }; |
629 | |
630 | void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override { |
631 | GLSLProcessor::GenKey(*this, caps, b); |
632 | } |
633 | |
634 | GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override { |
635 | return new GLSLProcessor(); |
636 | } |
637 | |
638 | private: |
639 | friend class ::SkArenaAlloc; // for access to ctor |
640 | |
641 | QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor) |
642 | : INHERITED(kQuadEdgeEffect_ClassID) |
643 | , fLocalMatrix(localMatrix) |
644 | , fUsesLocalCoords(usesLocalCoords) { |
645 | fInPosition = {"inPosition" , kFloat2_GrVertexAttribType, kFloat2_GrSLType}; |
646 | fInColor = MakeColorAttribute("inColor" , wideColor); |
647 | fInQuadEdge = {"inQuadEdge" , kFloat4_GrVertexAttribType, kHalf4_GrSLType}; |
648 | this->setVertexAttributes(&fInPosition, 3); |
649 | } |
650 | |
651 | Attribute fInPosition; |
652 | Attribute fInColor; |
653 | Attribute fInQuadEdge; |
654 | |
655 | SkMatrix fLocalMatrix; |
656 | bool fUsesLocalCoords; |
657 | |
658 | GR_DECLARE_GEOMETRY_PROCESSOR_TEST |
659 | |
660 | typedef GrGeometryProcessor INHERITED; |
661 | }; |
662 | |
663 | GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect); |
664 | |
665 | #if GR_TEST_UTILS |
666 | GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) { |
667 | // Doesn't work without derivative instructions. |
668 | return d->caps()->shaderCaps()->shaderDerivativeSupport() |
669 | ? QuadEdgeEffect::Make(d->allocator(), GrTest::TestMatrix(d->fRandom), |
670 | d->fRandom->nextBool(), d->fRandom->nextBool()) |
671 | : nullptr; |
672 | } |
673 | #endif |
674 | |
675 | /////////////////////////////////////////////////////////////////////////////// |
676 | |
677 | GrPathRenderer::CanDrawPath |
678 | GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const { |
679 | // This check requires convexity and known direction, since the direction is used to build |
680 | // the geometry segments. Degenerate convex paths will fall through to some other path renderer. |
681 | if (args.fCaps->shaderCaps()->shaderDerivativeSupport() && |
682 | (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() && |
683 | !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() && |
684 | args.fShape->knownDirection()) { |
685 | return CanDrawPath::kYes; |
686 | } |
687 | return CanDrawPath::kNo; |
688 | } |
689 | |
690 | namespace { |
691 | |
692 | class AAConvexPathOp final : public GrMeshDrawOp { |
693 | private: |
694 | using Helper = GrSimpleMeshDrawOpHelperWithStencil; |
695 | |
696 | public: |
697 | DEFINE_OP_CLASS_ID |
698 | |
699 | static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context, |
700 | GrPaint&& paint, |
701 | const SkMatrix& viewMatrix, |
702 | const SkPath& path, |
703 | const GrUserStencilSettings* stencilSettings) { |
704 | return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path, |
705 | stencilSettings); |
706 | } |
707 | |
708 | AAConvexPathOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color, |
709 | const SkMatrix& viewMatrix, const SkPath& path, |
710 | const GrUserStencilSettings* stencilSettings) |
711 | : INHERITED(ClassID()), fHelper(helperArgs, GrAAType::kCoverage, stencilSettings) { |
712 | fPaths.emplace_back(PathData{viewMatrix, path, color}); |
713 | this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes, |
714 | IsHairline::kNo); |
715 | } |
716 | |
717 | const char* name() const override { return "AAConvexPathOp" ; } |
718 | |
719 | void visitProxies(const VisitProxyFunc& func) const override { |
720 | if (fProgramInfo) { |
721 | fProgramInfo->visitFPProxies(func); |
722 | } else { |
723 | fHelper.visitProxies(func); |
724 | } |
725 | } |
726 | |
727 | FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); } |
728 | |
729 | GrProcessorSet::Analysis finalize( |
730 | const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage, |
731 | GrClampType clampType) override { |
732 | return fHelper.finalizeProcessors( |
733 | caps, clip, hasMixedSampledCoverage, clampType, |
734 | GrProcessorAnalysisCoverage::kSingleChannel, &fPaths.back().fColor, &fWideColor); |
735 | } |
736 | |
737 | private: |
738 | GrProgramInfo* programInfo() override { return fProgramInfo; } |
739 | |
740 | void onCreateProgramInfo(const GrCaps* caps, |
741 | SkArenaAlloc* arena, |
742 | const GrSurfaceProxyView* writeView, |
743 | GrAppliedClip&& appliedClip, |
744 | const GrXferProcessor::DstProxyView& dstProxyView) override { |
745 | SkMatrix invert; |
746 | if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) { |
747 | return; |
748 | } |
749 | |
750 | GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert, |
751 | fHelper.usesLocalCoords(), |
752 | fWideColor); |
753 | |
754 | fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, |
755 | std::move(appliedClip), |
756 | dstProxyView, quadProcessor, |
757 | GrPrimitiveType::kTriangles); |
758 | } |
759 | |
760 | void onPrepareDraws(Target* target) override { |
761 | int instanceCount = fPaths.count(); |
762 | |
763 | if (!fProgramInfo) { |
764 | this->createProgramInfo(target); |
765 | if (!fProgramInfo) { |
766 | return; |
767 | } |
768 | } |
769 | |
770 | const size_t kVertexStride = fProgramInfo->primProc().vertexStride(); |
771 | |
772 | fDraws.reserve(instanceCount); |
773 | |
774 | // TODO generate all segments for all paths and use one vertex buffer |
775 | for (int i = 0; i < instanceCount; i++) { |
776 | const PathData& args = fPaths[i]; |
777 | |
778 | // We use the fact that SkPath::transform path does subdivision based on |
779 | // perspective. Otherwise, we apply the view matrix when copying to the |
780 | // segment representation. |
781 | const SkMatrix* viewMatrix = &args.fViewMatrix; |
782 | |
783 | // We avoid initializing the path unless we have to |
784 | const SkPath* pathPtr = &args.fPath; |
785 | SkTLazy<SkPath> tmpPath; |
786 | if (viewMatrix->hasPerspective()) { |
787 | SkPath* tmpPathPtr = tmpPath.init(*pathPtr); |
788 | tmpPathPtr->setIsVolatile(true); |
789 | tmpPathPtr->transform(*viewMatrix); |
790 | viewMatrix = &SkMatrix::I(); |
791 | pathPtr = tmpPathPtr; |
792 | } |
793 | |
794 | int vertexCount; |
795 | int indexCount; |
796 | enum { |
797 | kPreallocSegmentCnt = 512 / sizeof(Segment), |
798 | kPreallocDrawCnt = 4, |
799 | }; |
800 | SkSTArray<kPreallocSegmentCnt, Segment, true> segments; |
801 | SkPoint fanPt; |
802 | |
803 | if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount, |
804 | &indexCount)) { |
805 | continue; |
806 | } |
807 | |
808 | sk_sp<const GrBuffer> vertexBuffer; |
809 | int firstVertex; |
810 | |
811 | GrVertexWriter verts{target->makeVertexSpace(kVertexStride, vertexCount, |
812 | &vertexBuffer, &firstVertex)}; |
813 | |
814 | if (!verts.fPtr) { |
815 | SkDebugf("Could not allocate vertices\n" ); |
816 | return; |
817 | } |
818 | |
819 | sk_sp<const GrBuffer> indexBuffer; |
820 | int firstIndex; |
821 | |
822 | uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex); |
823 | if (!idxs) { |
824 | SkDebugf("Could not allocate indices\n" ); |
825 | return; |
826 | } |
827 | |
828 | SkSTArray<kPreallocDrawCnt, Draw, true> draws; |
829 | GrVertexColor color(args.fColor, fWideColor); |
830 | create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride); |
831 | |
832 | GrSimpleMesh* meshes = target->allocMeshes(draws.count()); |
833 | for (int j = 0; j < draws.count(); ++j) { |
834 | const Draw& draw = draws[j]; |
835 | meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0, |
836 | draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer, |
837 | firstVertex); |
838 | firstIndex += draw.fIndexCnt; |
839 | firstVertex += draw.fVertexCnt; |
840 | } |
841 | |
842 | fDraws.push_back({ meshes, draws.count() }); |
843 | } |
844 | } |
845 | |
846 | void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override { |
847 | if (!fProgramInfo || fDraws.isEmpty()) { |
848 | return; |
849 | } |
850 | |
851 | flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds); |
852 | flushState->bindTextures(fProgramInfo->primProc(), nullptr, fProgramInfo->pipeline()); |
853 | for (int i = 0; i < fDraws.count(); ++i) { |
854 | for (int j = 0; j < fDraws[i].fMeshCount; ++j) { |
855 | flushState->drawMesh(fDraws[i].fMeshes[j]); |
856 | } |
857 | } |
858 | } |
859 | |
860 | CombineResult onCombineIfPossible(GrOp* t, GrRecordingContext::Arenas*, |
861 | const GrCaps& caps) override { |
862 | AAConvexPathOp* that = t->cast<AAConvexPathOp>(); |
863 | if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) { |
864 | return CombineResult::kCannotCombine; |
865 | } |
866 | if (fHelper.usesLocalCoords() && |
867 | !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) { |
868 | return CombineResult::kCannotCombine; |
869 | } |
870 | |
871 | fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin()); |
872 | fWideColor |= that->fWideColor; |
873 | return CombineResult::kMerged; |
874 | } |
875 | |
876 | #if GR_TEST_UTILS |
877 | SkString onDumpInfo() const override { |
878 | return SkStringPrintf("Count: %d\n%s" , fPaths.count(), fHelper.dumpInfo().c_str()); |
879 | } |
880 | #endif |
881 | |
882 | struct PathData { |
883 | SkMatrix fViewMatrix; |
884 | SkPath fPath; |
885 | SkPMColor4f fColor; |
886 | }; |
887 | |
888 | Helper fHelper; |
889 | SkSTArray<1, PathData, true> fPaths; |
890 | bool fWideColor; |
891 | |
892 | struct MeshDraw { |
893 | GrSimpleMesh* fMeshes; |
894 | int fMeshCount; |
895 | }; |
896 | |
897 | SkTDArray<MeshDraw> fDraws; |
898 | GrProgramInfo* fProgramInfo = nullptr; |
899 | |
900 | typedef GrMeshDrawOp INHERITED; |
901 | }; |
902 | |
903 | } // anonymous namespace |
904 | |
905 | bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) { |
906 | GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(), |
907 | "GrAAConvexPathRenderer::onDrawPath" ); |
908 | SkASSERT(args.fRenderTargetContext->numSamples() <= 1); |
909 | SkASSERT(!args.fShape->isEmpty()); |
910 | |
911 | SkPath path; |
912 | args.fShape->asPath(&path); |
913 | |
914 | std::unique_ptr<GrDrawOp> op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint), |
915 | *args.fViewMatrix, |
916 | path, args.fUserStencilSettings); |
917 | args.fRenderTargetContext->addDrawOp(args.fClip, std::move(op)); |
918 | return true; |
919 | } |
920 | |
921 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
922 | |
923 | #if GR_TEST_UTILS |
924 | |
925 | GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) { |
926 | SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random); |
927 | const SkPath& path = GrTest::TestPathConvex(random); |
928 | const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context); |
929 | return AAConvexPathOp::Make(context, std::move(paint), viewMatrix, path, stencilSettings); |
930 | } |
931 | |
932 | #endif |
933 | |