1 | /* |
2 | * Copyright 2012 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "include/core/SkString.h" |
9 | #include "include/core/SkTypes.h" |
10 | #include "src/core/SkGeometry.h" |
11 | #include "src/core/SkPathPriv.h" |
12 | #include "src/core/SkPointPriv.h" |
13 | #include "src/gpu/GrAuditTrail.h" |
14 | #include "src/gpu/GrCaps.h" |
15 | #include "src/gpu/GrDrawOpTest.h" |
16 | #include "src/gpu/GrGeometryProcessor.h" |
17 | #include "src/gpu/GrProcessor.h" |
18 | #include "src/gpu/GrProgramInfo.h" |
19 | #include "src/gpu/GrRenderTargetContext.h" |
20 | #include "src/gpu/GrVertexWriter.h" |
21 | #include "src/gpu/geometry/GrPathUtils.h" |
22 | #include "src/gpu/geometry/GrShape.h" |
23 | #include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h" |
24 | #include "src/gpu/glsl/GrGLSLGeometryProcessor.h" |
25 | #include "src/gpu/glsl/GrGLSLProgramDataManager.h" |
26 | #include "src/gpu/glsl/GrGLSLUniformHandler.h" |
27 | #include "src/gpu/glsl/GrGLSLVarying.h" |
28 | #include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h" |
29 | #include "src/gpu/ops/GrAAConvexPathRenderer.h" |
30 | #include "src/gpu/ops/GrMeshDrawOp.h" |
31 | #include "src/gpu/ops/GrSimpleMeshDrawOpHelperWithStencil.h" |
32 | |
33 | GrAAConvexPathRenderer::GrAAConvexPathRenderer() { |
34 | } |
35 | |
36 | struct Segment { |
37 | enum { |
38 | // These enum values are assumed in member functions below. |
39 | kLine = 0, |
40 | kQuad = 1, |
41 | } fType; |
42 | |
43 | // line uses one pt, quad uses 2 pts |
44 | SkPoint fPts[2]; |
45 | // normal to edge ending at each pt |
46 | SkVector fNorms[2]; |
47 | // is the corner where the previous segment meets this segment |
48 | // sharp. If so, fMid is a normalized bisector facing outward. |
49 | SkVector fMid; |
50 | |
51 | int countPoints() { |
52 | static_assert(0 == kLine && 1 == kQuad); |
53 | return fType + 1; |
54 | } |
55 | const SkPoint& endPt() const { |
56 | static_assert(0 == kLine && 1 == kQuad); |
57 | return fPts[fType]; |
58 | } |
59 | const SkPoint& endNorm() const { |
60 | static_assert(0 == kLine && 1 == kQuad); |
61 | return fNorms[fType]; |
62 | } |
63 | }; |
64 | |
65 | typedef SkTArray<Segment, true> SegmentArray; |
66 | |
67 | static bool center_of_mass(const SegmentArray& segments, SkPoint* c) { |
68 | SkScalar area = 0; |
69 | SkPoint center = {0, 0}; |
70 | int count = segments.count(); |
71 | SkPoint p0 = {0, 0}; |
72 | if (count > 2) { |
73 | // We translate the polygon so that the first point is at the origin. |
74 | // This avoids some precision issues with small area polygons far away |
75 | // from the origin. |
76 | p0 = segments[0].endPt(); |
77 | SkPoint pi; |
78 | SkPoint pj; |
79 | // the first and last iteration of the below loop would compute |
80 | // zeros since the starting / ending point is (0,0). So instead we start |
81 | // at i=1 and make the last iteration i=count-2. |
82 | pj = segments[1].endPt() - p0; |
83 | for (int i = 1; i < count - 1; ++i) { |
84 | pi = pj; |
85 | pj = segments[i + 1].endPt() - p0; |
86 | |
87 | SkScalar t = SkPoint::CrossProduct(pi, pj); |
88 | area += t; |
89 | center.fX += (pi.fX + pj.fX) * t; |
90 | center.fY += (pi.fY + pj.fY) * t; |
91 | } |
92 | } |
93 | |
94 | // If the poly has no area then we instead return the average of |
95 | // its points. |
96 | if (SkScalarNearlyZero(area)) { |
97 | SkPoint avg; |
98 | avg.set(0, 0); |
99 | for (int i = 0; i < count; ++i) { |
100 | const SkPoint& pt = segments[i].endPt(); |
101 | avg.fX += pt.fX; |
102 | avg.fY += pt.fY; |
103 | } |
104 | SkScalar denom = SK_Scalar1 / count; |
105 | avg.scale(denom); |
106 | *c = avg; |
107 | } else { |
108 | area *= 3; |
109 | area = SkScalarInvert(area); |
110 | center.scale(area); |
111 | // undo the translate of p0 to the origin. |
112 | *c = center + p0; |
113 | } |
114 | return !SkScalarIsNaN(c->fX) && !SkScalarIsNaN(c->fY) && c->isFinite(); |
115 | } |
116 | |
117 | static bool compute_vectors(SegmentArray* segments, |
118 | SkPoint* fanPt, |
119 | SkPathPriv::FirstDirection dir, |
120 | int* vCount, |
121 | int* iCount) { |
122 | if (!center_of_mass(*segments, fanPt)) { |
123 | return false; |
124 | } |
125 | int count = segments->count(); |
126 | |
127 | // Make the normals point towards the outside |
128 | SkPointPriv::Side normSide; |
129 | if (dir == SkPathPriv::kCCW_FirstDirection) { |
130 | normSide = SkPointPriv::kRight_Side; |
131 | } else { |
132 | normSide = SkPointPriv::kLeft_Side; |
133 | } |
134 | |
135 | int64_t vCount64 = 0; |
136 | int64_t iCount64 = 0; |
137 | // compute normals at all points |
138 | for (int a = 0; a < count; ++a) { |
139 | Segment& sega = (*segments)[a]; |
140 | int b = (a + 1) % count; |
141 | Segment& segb = (*segments)[b]; |
142 | |
143 | const SkPoint* prevPt = &sega.endPt(); |
144 | int n = segb.countPoints(); |
145 | for (int p = 0; p < n; ++p) { |
146 | segb.fNorms[p] = segb.fPts[p] - *prevPt; |
147 | segb.fNorms[p].normalize(); |
148 | segb.fNorms[p] = SkPointPriv::MakeOrthog(segb.fNorms[p], normSide); |
149 | prevPt = &segb.fPts[p]; |
150 | } |
151 | if (Segment::kLine == segb.fType) { |
152 | vCount64 += 5; |
153 | iCount64 += 9; |
154 | } else { |
155 | vCount64 += 6; |
156 | iCount64 += 12; |
157 | } |
158 | } |
159 | |
160 | // compute mid-vectors where segments meet. TODO: Detect shallow corners |
161 | // and leave out the wedges and close gaps by stitching segments together. |
162 | for (int a = 0; a < count; ++a) { |
163 | const Segment& sega = (*segments)[a]; |
164 | int b = (a + 1) % count; |
165 | Segment& segb = (*segments)[b]; |
166 | segb.fMid = segb.fNorms[0] + sega.endNorm(); |
167 | segb.fMid.normalize(); |
168 | // corner wedges |
169 | vCount64 += 4; |
170 | iCount64 += 6; |
171 | } |
172 | if (vCount64 > SK_MaxS32 || iCount64 > SK_MaxS32) { |
173 | return false; |
174 | } |
175 | *vCount = vCount64; |
176 | *iCount = iCount64; |
177 | return true; |
178 | } |
179 | |
180 | struct DegenerateTestData { |
181 | DegenerateTestData() { fStage = kInitial; } |
182 | bool isDegenerate() const { return kNonDegenerate != fStage; } |
183 | enum { |
184 | kInitial, |
185 | kPoint, |
186 | kLine, |
187 | kNonDegenerate |
188 | } fStage; |
189 | SkPoint fFirstPoint; |
190 | SkVector fLineNormal; |
191 | SkScalar fLineC; |
192 | }; |
193 | |
194 | static const SkScalar kClose = (SK_Scalar1 / 16); |
195 | static const SkScalar kCloseSqd = kClose * kClose; |
196 | |
197 | static void update_degenerate_test(DegenerateTestData* data, const SkPoint& pt) { |
198 | switch (data->fStage) { |
199 | case DegenerateTestData::kInitial: |
200 | data->fFirstPoint = pt; |
201 | data->fStage = DegenerateTestData::kPoint; |
202 | break; |
203 | case DegenerateTestData::kPoint: |
204 | if (SkPointPriv::DistanceToSqd(pt, data->fFirstPoint) > kCloseSqd) { |
205 | data->fLineNormal = pt - data->fFirstPoint; |
206 | data->fLineNormal.normalize(); |
207 | data->fLineNormal = SkPointPriv::MakeOrthog(data->fLineNormal); |
208 | data->fLineC = -data->fLineNormal.dot(data->fFirstPoint); |
209 | data->fStage = DegenerateTestData::kLine; |
210 | } |
211 | break; |
212 | case DegenerateTestData::kLine: |
213 | if (SkScalarAbs(data->fLineNormal.dot(pt) + data->fLineC) > kClose) { |
214 | data->fStage = DegenerateTestData::kNonDegenerate; |
215 | } |
216 | case DegenerateTestData::kNonDegenerate: |
217 | break; |
218 | default: |
219 | SK_ABORT("Unexpected degenerate test stage." ); |
220 | } |
221 | } |
222 | |
223 | static inline bool get_direction(const SkPath& path, const SkMatrix& m, |
224 | SkPathPriv::FirstDirection* dir) { |
225 | // At this point, we've already returned true from canDraw(), which checked that the path's |
226 | // direction could be determined, so this should just be fetching the cached direction. |
227 | // However, if perspective is involved, we're operating on a transformed path, which may no |
228 | // longer have a computable direction. |
229 | if (!SkPathPriv::CheapComputeFirstDirection(path, dir)) { |
230 | return false; |
231 | } |
232 | |
233 | // check whether m reverses the orientation |
234 | SkASSERT(!m.hasPerspective()); |
235 | SkScalar det2x2 = m.get(SkMatrix::kMScaleX) * m.get(SkMatrix::kMScaleY) - |
236 | m.get(SkMatrix::kMSkewX) * m.get(SkMatrix::kMSkewY); |
237 | if (det2x2 < 0) { |
238 | *dir = SkPathPriv::OppositeFirstDirection(*dir); |
239 | } |
240 | |
241 | return true; |
242 | } |
243 | |
244 | static inline void add_line_to_segment(const SkPoint& pt, |
245 | SegmentArray* segments) { |
246 | segments->push_back(); |
247 | segments->back().fType = Segment::kLine; |
248 | segments->back().fPts[0] = pt; |
249 | } |
250 | |
251 | static inline void add_quad_segment(const SkPoint pts[3], |
252 | SegmentArray* segments) { |
253 | if (SkPointPriv::DistanceToLineSegmentBetweenSqd(pts[1], pts[0], pts[2]) < kCloseSqd) { |
254 | if (pts[0] != pts[2]) { |
255 | add_line_to_segment(pts[2], segments); |
256 | } |
257 | } else { |
258 | segments->push_back(); |
259 | segments->back().fType = Segment::kQuad; |
260 | segments->back().fPts[0] = pts[1]; |
261 | segments->back().fPts[1] = pts[2]; |
262 | } |
263 | } |
264 | |
265 | static inline void add_cubic_segments(const SkPoint pts[4], |
266 | SkPathPriv::FirstDirection dir, |
267 | SegmentArray* segments) { |
268 | SkSTArray<15, SkPoint, true> quads; |
269 | GrPathUtils::convertCubicToQuadsConstrainToTangents(pts, SK_Scalar1, dir, &quads); |
270 | int count = quads.count(); |
271 | for (int q = 0; q < count; q += 3) { |
272 | add_quad_segment(&quads[q], segments); |
273 | } |
274 | } |
275 | |
276 | static bool get_segments(const SkPath& path, |
277 | const SkMatrix& m, |
278 | SegmentArray* segments, |
279 | SkPoint* fanPt, |
280 | int* vCount, |
281 | int* iCount) { |
282 | SkPath::Iter iter(path, true); |
283 | // This renderer over-emphasizes very thin path regions. We use the distance |
284 | // to the path from the sample to compute coverage. Every pixel intersected |
285 | // by the path will be hit and the maximum distance is sqrt(2)/2. We don't |
286 | // notice that the sample may be close to a very thin area of the path and |
287 | // thus should be very light. This is particularly egregious for degenerate |
288 | // line paths. We detect paths that are very close to a line (zero area) and |
289 | // draw nothing. |
290 | DegenerateTestData degenerateData; |
291 | SkPathPriv::FirstDirection dir; |
292 | if (!get_direction(path, m, &dir)) { |
293 | return false; |
294 | } |
295 | |
296 | for (;;) { |
297 | SkPoint pts[4]; |
298 | SkPath::Verb verb = iter.next(pts); |
299 | switch (verb) { |
300 | case SkPath::kMove_Verb: |
301 | m.mapPoints(pts, 1); |
302 | update_degenerate_test(°enerateData, pts[0]); |
303 | break; |
304 | case SkPath::kLine_Verb: { |
305 | if (!SkPathPriv::AllPointsEq(pts, 2)) { |
306 | m.mapPoints(&pts[1], 1); |
307 | update_degenerate_test(°enerateData, pts[1]); |
308 | add_line_to_segment(pts[1], segments); |
309 | } |
310 | break; |
311 | } |
312 | case SkPath::kQuad_Verb: |
313 | if (!SkPathPriv::AllPointsEq(pts, 3)) { |
314 | m.mapPoints(pts, 3); |
315 | update_degenerate_test(°enerateData, pts[1]); |
316 | update_degenerate_test(°enerateData, pts[2]); |
317 | add_quad_segment(pts, segments); |
318 | } |
319 | break; |
320 | case SkPath::kConic_Verb: { |
321 | if (!SkPathPriv::AllPointsEq(pts, 3)) { |
322 | m.mapPoints(pts, 3); |
323 | SkScalar weight = iter.conicWeight(); |
324 | SkAutoConicToQuads converter; |
325 | const SkPoint* quadPts = converter.computeQuads(pts, weight, 0.25f); |
326 | for (int i = 0; i < converter.countQuads(); ++i) { |
327 | update_degenerate_test(°enerateData, quadPts[2*i + 1]); |
328 | update_degenerate_test(°enerateData, quadPts[2*i + 2]); |
329 | add_quad_segment(quadPts + 2*i, segments); |
330 | } |
331 | } |
332 | break; |
333 | } |
334 | case SkPath::kCubic_Verb: { |
335 | if (!SkPathPriv::AllPointsEq(pts, 4)) { |
336 | m.mapPoints(pts, 4); |
337 | update_degenerate_test(°enerateData, pts[1]); |
338 | update_degenerate_test(°enerateData, pts[2]); |
339 | update_degenerate_test(°enerateData, pts[3]); |
340 | add_cubic_segments(pts, dir, segments); |
341 | } |
342 | break; |
343 | } |
344 | case SkPath::kDone_Verb: |
345 | if (degenerateData.isDegenerate()) { |
346 | return false; |
347 | } else { |
348 | return compute_vectors(segments, fanPt, dir, vCount, iCount); |
349 | } |
350 | default: |
351 | break; |
352 | } |
353 | } |
354 | } |
355 | |
356 | struct Draw { |
357 | Draw() : fVertexCnt(0), fIndexCnt(0) {} |
358 | int fVertexCnt; |
359 | int fIndexCnt; |
360 | }; |
361 | |
362 | typedef SkTArray<Draw, true> DrawArray; |
363 | |
364 | static void create_vertices(const SegmentArray& segments, |
365 | const SkPoint& fanPt, |
366 | const GrVertexColor& color, |
367 | DrawArray* draws, |
368 | GrVertexWriter& verts, |
369 | uint16_t* idxs, |
370 | size_t vertexStride) { |
371 | Draw* draw = &draws->push_back(); |
372 | // alias just to make vert/index assignments easier to read. |
373 | int* v = &draw->fVertexCnt; |
374 | int* i = &draw->fIndexCnt; |
375 | const size_t uvOffset = sizeof(SkPoint) + color.size(); |
376 | |
377 | int count = segments.count(); |
378 | for (int a = 0; a < count; ++a) { |
379 | const Segment& sega = segments[a]; |
380 | int b = (a + 1) % count; |
381 | const Segment& segb = segments[b]; |
382 | |
383 | // Check whether adding the verts for this segment to the current draw would cause index |
384 | // values to overflow. |
385 | int vCount = 4; |
386 | if (Segment::kLine == segb.fType) { |
387 | vCount += 5; |
388 | } else { |
389 | vCount += 6; |
390 | } |
391 | if (draw->fVertexCnt + vCount > (1 << 16)) { |
392 | idxs += *i; |
393 | draw = &draws->push_back(); |
394 | v = &draw->fVertexCnt; |
395 | i = &draw->fIndexCnt; |
396 | } |
397 | |
398 | const SkScalar negOneDists[2] = { -SK_Scalar1, -SK_Scalar1 }; |
399 | |
400 | // FIXME: These tris are inset in the 1 unit arc around the corner |
401 | SkPoint p0 = sega.endPt(); |
402 | // Position, Color, UV, D0, D1 |
403 | verts.write(p0, color, SkPoint{0, 0}, negOneDists); |
404 | verts.write(p0 + sega.endNorm(), color, SkPoint{0, -SK_Scalar1}, negOneDists); |
405 | verts.write(p0 + segb.fMid, color, SkPoint{0, -SK_Scalar1}, negOneDists); |
406 | verts.write(p0 + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists); |
407 | |
408 | idxs[*i + 0] = *v + 0; |
409 | idxs[*i + 1] = *v + 2; |
410 | idxs[*i + 2] = *v + 1; |
411 | idxs[*i + 3] = *v + 0; |
412 | idxs[*i + 4] = *v + 3; |
413 | idxs[*i + 5] = *v + 2; |
414 | |
415 | *v += 4; |
416 | *i += 6; |
417 | |
418 | if (Segment::kLine == segb.fType) { |
419 | // we draw the line edge as a degenerate quad (u is 0, v is the |
420 | // signed distance to the edge) |
421 | SkPoint v1Pos = sega.endPt(); |
422 | SkPoint v2Pos = segb.fPts[0]; |
423 | SkScalar dist = SkPointPriv::DistanceToLineBetween(fanPt, v1Pos, v2Pos); |
424 | |
425 | verts.write(fanPt, color, SkPoint{0, dist}, negOneDists); |
426 | verts.write(v1Pos, color, SkPoint{0, 0}, negOneDists); |
427 | verts.write(v2Pos, color, SkPoint{0, 0}, negOneDists); |
428 | verts.write(v1Pos + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists); |
429 | verts.write(v2Pos + segb.fNorms[0], color, SkPoint{0, -SK_Scalar1}, negOneDists); |
430 | |
431 | idxs[*i + 0] = *v + 3; |
432 | idxs[*i + 1] = *v + 1; |
433 | idxs[*i + 2] = *v + 2; |
434 | |
435 | idxs[*i + 3] = *v + 4; |
436 | idxs[*i + 4] = *v + 3; |
437 | idxs[*i + 5] = *v + 2; |
438 | |
439 | *i += 6; |
440 | |
441 | // Draw the interior fan if it exists. |
442 | // TODO: Detect and combine colinear segments. This will ensure we catch every case |
443 | // with no interior, and that the resulting shared edge uses the same endpoints. |
444 | if (count >= 3) { |
445 | idxs[*i + 0] = *v + 0; |
446 | idxs[*i + 1] = *v + 2; |
447 | idxs[*i + 2] = *v + 1; |
448 | |
449 | *i += 3; |
450 | } |
451 | |
452 | *v += 5; |
453 | } else { |
454 | void* quadVertsBegin = verts.fPtr; |
455 | |
456 | SkPoint qpts[] = {sega.endPt(), segb.fPts[0], segb.fPts[1]}; |
457 | |
458 | SkScalar c0 = segb.fNorms[0].dot(qpts[0]); |
459 | SkScalar c1 = segb.fNorms[1].dot(qpts[2]); |
460 | GrVertexWriter::Skip<SkPoint> skipUVs; |
461 | |
462 | verts.write(fanPt, |
463 | color, skipUVs, |
464 | -segb.fNorms[0].dot(fanPt) + c0, |
465 | -segb.fNorms[1].dot(fanPt) + c1); |
466 | |
467 | verts.write(qpts[0], |
468 | color, skipUVs, |
469 | 0.0f, |
470 | -segb.fNorms[1].dot(qpts[0]) + c1); |
471 | |
472 | verts.write(qpts[2], |
473 | color, skipUVs, |
474 | -segb.fNorms[0].dot(qpts[2]) + c0, |
475 | 0.0f); |
476 | |
477 | verts.write(qpts[0] + segb.fNorms[0], |
478 | color, skipUVs, |
479 | -SK_ScalarMax/100, |
480 | -SK_ScalarMax/100); |
481 | |
482 | verts.write(qpts[2] + segb.fNorms[1], |
483 | color, skipUVs, |
484 | -SK_ScalarMax/100, |
485 | -SK_ScalarMax/100); |
486 | |
487 | SkVector midVec = segb.fNorms[0] + segb.fNorms[1]; |
488 | midVec.normalize(); |
489 | |
490 | verts.write(qpts[1] + midVec, |
491 | color, skipUVs, |
492 | -SK_ScalarMax/100, |
493 | -SK_ScalarMax/100); |
494 | |
495 | GrPathUtils::QuadUVMatrix toUV(qpts); |
496 | toUV.apply(quadVertsBegin, 6, vertexStride, uvOffset); |
497 | |
498 | idxs[*i + 0] = *v + 3; |
499 | idxs[*i + 1] = *v + 1; |
500 | idxs[*i + 2] = *v + 2; |
501 | idxs[*i + 3] = *v + 4; |
502 | idxs[*i + 4] = *v + 3; |
503 | idxs[*i + 5] = *v + 2; |
504 | |
505 | idxs[*i + 6] = *v + 5; |
506 | idxs[*i + 7] = *v + 3; |
507 | idxs[*i + 8] = *v + 4; |
508 | |
509 | *i += 9; |
510 | |
511 | // Draw the interior fan if it exists. |
512 | // TODO: Detect and combine colinear segments. This will ensure we catch every case |
513 | // with no interior, and that the resulting shared edge uses the same endpoints. |
514 | if (count >= 3) { |
515 | idxs[*i + 0] = *v + 0; |
516 | idxs[*i + 1] = *v + 2; |
517 | idxs[*i + 2] = *v + 1; |
518 | |
519 | *i += 3; |
520 | } |
521 | |
522 | *v += 6; |
523 | } |
524 | } |
525 | } |
526 | |
527 | /////////////////////////////////////////////////////////////////////////////// |
528 | |
529 | /* |
530 | * Quadratic specified by 0=u^2-v canonical coords. u and v are the first |
531 | * two components of the vertex attribute. Coverage is based on signed |
532 | * distance with negative being inside, positive outside. The edge is specified in |
533 | * window space (y-down). If either the third or fourth component of the interpolated |
534 | * vertex coord is > 0 then the pixel is considered outside the edge. This is used to |
535 | * attempt to trim to a portion of the infinite quad. |
536 | * Requires shader derivative instruction support. |
537 | */ |
538 | |
539 | class QuadEdgeEffect : public GrGeometryProcessor { |
540 | public: |
541 | static GrGeometryProcessor* Make(SkArenaAlloc* arena, |
542 | const SkMatrix& localMatrix, |
543 | bool usesLocalCoords, |
544 | bool wideColor) { |
545 | return arena->make<QuadEdgeEffect>(localMatrix, usesLocalCoords, wideColor); |
546 | } |
547 | |
548 | ~QuadEdgeEffect() override {} |
549 | |
550 | const char* name() const override { return "QuadEdge" ; } |
551 | |
552 | class GLSLProcessor : public GrGLSLGeometryProcessor { |
553 | public: |
554 | GLSLProcessor() {} |
555 | |
556 | void onEmitCode(EmitArgs& args, GrGPArgs* gpArgs) override { |
557 | const QuadEdgeEffect& qe = args.fGP.cast<QuadEdgeEffect>(); |
558 | GrGLSLVertexBuilder* vertBuilder = args.fVertBuilder; |
559 | GrGLSLVaryingHandler* varyingHandler = args.fVaryingHandler; |
560 | GrGLSLUniformHandler* uniformHandler = args.fUniformHandler; |
561 | |
562 | // emit attributes |
563 | varyingHandler->emitAttributes(qe); |
564 | |
565 | GrGLSLVarying v(kHalf4_GrSLType); |
566 | varyingHandler->addVarying("QuadEdge" , &v); |
567 | vertBuilder->codeAppendf("%s = %s;" , v.vsOut(), qe.fInQuadEdge.name()); |
568 | |
569 | // Setup pass through color |
570 | varyingHandler->addPassThroughAttribute(qe.fInColor, args.fOutputColor); |
571 | |
572 | GrGLSLFPFragmentBuilder* fragBuilder = args.fFragBuilder; |
573 | |
574 | // Setup position |
575 | this->writeOutputPosition(vertBuilder, gpArgs, qe.fInPosition.name()); |
576 | |
577 | // emit transforms |
578 | this->emitTransforms(vertBuilder, |
579 | varyingHandler, |
580 | uniformHandler, |
581 | qe.fInPosition.asShaderVar(), |
582 | qe.fLocalMatrix, |
583 | args.fFPCoordTransformHandler); |
584 | |
585 | fragBuilder->codeAppendf("half edgeAlpha;" ); |
586 | |
587 | // keep the derivative instructions outside the conditional |
588 | fragBuilder->codeAppendf("half2 duvdx = half2(dFdx(%s.xy));" , v.fsIn()); |
589 | fragBuilder->codeAppendf("half2 duvdy = half2(dFdy(%s.xy));" , v.fsIn()); |
590 | fragBuilder->codeAppendf("if (%s.z > 0.0 && %s.w > 0.0) {" , v.fsIn(), v.fsIn()); |
591 | // today we know z and w are in device space. We could use derivatives |
592 | fragBuilder->codeAppendf("edgeAlpha = min(min(%s.z, %s.w) + 0.5, 1.0);" , v.fsIn(), |
593 | v.fsIn()); |
594 | fragBuilder->codeAppendf ("} else {" ); |
595 | fragBuilder->codeAppendf("half2 gF = half2(2.0*%s.x*duvdx.x - duvdx.y," |
596 | " 2.0*%s.x*duvdy.x - duvdy.y);" , |
597 | v.fsIn(), v.fsIn()); |
598 | fragBuilder->codeAppendf("edgeAlpha = (%s.x*%s.x - %s.y);" , v.fsIn(), v.fsIn(), |
599 | v.fsIn()); |
600 | fragBuilder->codeAppendf("edgeAlpha = " |
601 | "saturate(0.5 - edgeAlpha / length(gF));}" ); |
602 | |
603 | fragBuilder->codeAppendf("%s = half4(edgeAlpha);" , args.fOutputCoverage); |
604 | } |
605 | |
606 | static inline void GenKey(const GrGeometryProcessor& gp, |
607 | const GrShaderCaps&, |
608 | GrProcessorKeyBuilder* b) { |
609 | const QuadEdgeEffect& qee = gp.cast<QuadEdgeEffect>(); |
610 | b->add32(SkToBool(qee.fUsesLocalCoords && qee.fLocalMatrix.hasPerspective())); |
611 | } |
612 | |
613 | void setData(const GrGLSLProgramDataManager& pdman, |
614 | const GrPrimitiveProcessor& gp, |
615 | const CoordTransformRange& transformRange) override { |
616 | const QuadEdgeEffect& qe = gp.cast<QuadEdgeEffect>(); |
617 | this->setTransformDataHelper(qe.fLocalMatrix, pdman, transformRange); |
618 | } |
619 | |
620 | private: |
621 | typedef GrGLSLGeometryProcessor INHERITED; |
622 | }; |
623 | |
624 | void getGLSLProcessorKey(const GrShaderCaps& caps, GrProcessorKeyBuilder* b) const override { |
625 | GLSLProcessor::GenKey(*this, caps, b); |
626 | } |
627 | |
628 | GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const override { |
629 | return new GLSLProcessor(); |
630 | } |
631 | |
632 | private: |
633 | friend class ::SkArenaAlloc; // for access to ctor |
634 | |
635 | QuadEdgeEffect(const SkMatrix& localMatrix, bool usesLocalCoords, bool wideColor) |
636 | : INHERITED(kQuadEdgeEffect_ClassID) |
637 | , fLocalMatrix(localMatrix) |
638 | , fUsesLocalCoords(usesLocalCoords) { |
639 | fInPosition = {"inPosition" , kFloat2_GrVertexAttribType, kFloat2_GrSLType}; |
640 | fInColor = MakeColorAttribute("inColor" , wideColor); |
641 | fInQuadEdge = {"inQuadEdge" , kFloat4_GrVertexAttribType, kHalf4_GrSLType}; |
642 | this->setVertexAttributes(&fInPosition, 3); |
643 | } |
644 | |
645 | Attribute fInPosition; |
646 | Attribute fInColor; |
647 | Attribute fInQuadEdge; |
648 | |
649 | SkMatrix fLocalMatrix; |
650 | bool fUsesLocalCoords; |
651 | |
652 | GR_DECLARE_GEOMETRY_PROCESSOR_TEST |
653 | |
654 | typedef GrGeometryProcessor INHERITED; |
655 | }; |
656 | |
657 | GR_DEFINE_GEOMETRY_PROCESSOR_TEST(QuadEdgeEffect); |
658 | |
659 | #if GR_TEST_UTILS |
660 | GrGeometryProcessor* QuadEdgeEffect::TestCreate(GrProcessorTestData* d) { |
661 | // Doesn't work without derivative instructions. |
662 | return d->caps()->shaderCaps()->shaderDerivativeSupport() |
663 | ? QuadEdgeEffect::Make(d->allocator(), GrTest::TestMatrix(d->fRandom), |
664 | d->fRandom->nextBool(), d->fRandom->nextBool()) |
665 | : nullptr; |
666 | } |
667 | #endif |
668 | |
669 | /////////////////////////////////////////////////////////////////////////////// |
670 | |
671 | GrPathRenderer::CanDrawPath |
672 | GrAAConvexPathRenderer::onCanDrawPath(const CanDrawPathArgs& args) const { |
673 | // This check requires convexity and known direction, since the direction is used to build |
674 | // the geometry segments. Degenerate convex paths will fall through to some other path renderer. |
675 | if (args.fCaps->shaderCaps()->shaderDerivativeSupport() && |
676 | (GrAAType::kCoverage == args.fAAType) && args.fShape->style().isSimpleFill() && |
677 | !args.fShape->inverseFilled() && args.fShape->knownToBeConvex() && |
678 | args.fShape->knownDirection()) { |
679 | return CanDrawPath::kYes; |
680 | } |
681 | return CanDrawPath::kNo; |
682 | } |
683 | |
684 | namespace { |
685 | |
686 | class AAConvexPathOp final : public GrMeshDrawOp { |
687 | private: |
688 | using Helper = GrSimpleMeshDrawOpHelperWithStencil; |
689 | |
690 | public: |
691 | DEFINE_OP_CLASS_ID |
692 | |
693 | static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context, |
694 | GrPaint&& paint, |
695 | const SkMatrix& viewMatrix, |
696 | const SkPath& path, |
697 | const GrUserStencilSettings* stencilSettings) { |
698 | return Helper::FactoryHelper<AAConvexPathOp>(context, std::move(paint), viewMatrix, path, |
699 | stencilSettings); |
700 | } |
701 | |
702 | AAConvexPathOp(const Helper::MakeArgs& helperArgs, const SkPMColor4f& color, |
703 | const SkMatrix& viewMatrix, const SkPath& path, |
704 | const GrUserStencilSettings* stencilSettings) |
705 | : INHERITED(ClassID()), fHelper(helperArgs, GrAAType::kCoverage, stencilSettings) { |
706 | fPaths.emplace_back(PathData{viewMatrix, path, color}); |
707 | this->setTransformedBounds(path.getBounds(), viewMatrix, HasAABloat::kYes, |
708 | IsHairline::kNo); |
709 | } |
710 | |
711 | const char* name() const override { return "AAConvexPathOp" ; } |
712 | |
713 | void visitProxies(const VisitProxyFunc& func) const override { |
714 | if (fProgramInfo) { |
715 | fProgramInfo->visitFPProxies(func); |
716 | } else { |
717 | fHelper.visitProxies(func); |
718 | } |
719 | } |
720 | |
721 | #ifdef SK_DEBUG |
722 | SkString dumpInfo() const override { |
723 | SkString string; |
724 | string.appendf("Count: %d\n" , fPaths.count()); |
725 | string += fHelper.dumpInfo(); |
726 | string += INHERITED::dumpInfo(); |
727 | return string; |
728 | } |
729 | #endif |
730 | |
731 | FixedFunctionFlags fixedFunctionFlags() const override { return fHelper.fixedFunctionFlags(); } |
732 | |
733 | GrProcessorSet::Analysis finalize( |
734 | const GrCaps& caps, const GrAppliedClip* clip, bool hasMixedSampledCoverage, |
735 | GrClampType clampType) override { |
736 | return fHelper.finalizeProcessors( |
737 | caps, clip, hasMixedSampledCoverage, clampType, |
738 | GrProcessorAnalysisCoverage::kSingleChannel, &fPaths.back().fColor, &fWideColor); |
739 | } |
740 | |
741 | private: |
742 | GrProgramInfo* programInfo() override { return fProgramInfo; } |
743 | |
744 | void onCreateProgramInfo(const GrCaps* caps, |
745 | SkArenaAlloc* arena, |
746 | const GrSurfaceProxyView* writeView, |
747 | GrAppliedClip&& appliedClip, |
748 | const GrXferProcessor::DstProxyView& dstProxyView) override { |
749 | SkMatrix invert; |
750 | if (fHelper.usesLocalCoords() && !fPaths.back().fViewMatrix.invert(&invert)) { |
751 | return; |
752 | } |
753 | |
754 | GrGeometryProcessor* quadProcessor = QuadEdgeEffect::Make(arena, invert, |
755 | fHelper.usesLocalCoords(), |
756 | fWideColor); |
757 | |
758 | fProgramInfo = fHelper.createProgramInfoWithStencil(caps, arena, writeView, |
759 | std::move(appliedClip), |
760 | dstProxyView, quadProcessor, |
761 | GrPrimitiveType::kTriangles); |
762 | } |
763 | |
764 | void onPrepareDraws(Target* target) override { |
765 | int instanceCount = fPaths.count(); |
766 | |
767 | if (!fProgramInfo) { |
768 | this->createProgramInfo(target); |
769 | if (!fProgramInfo) { |
770 | return; |
771 | } |
772 | } |
773 | |
774 | const size_t kVertexStride = fProgramInfo->primProc().vertexStride(); |
775 | |
776 | fDraws.reserve(instanceCount); |
777 | |
778 | // TODO generate all segments for all paths and use one vertex buffer |
779 | for (int i = 0; i < instanceCount; i++) { |
780 | const PathData& args = fPaths[i]; |
781 | |
782 | // We use the fact that SkPath::transform path does subdivision based on |
783 | // perspective. Otherwise, we apply the view matrix when copying to the |
784 | // segment representation. |
785 | const SkMatrix* viewMatrix = &args.fViewMatrix; |
786 | |
787 | // We avoid initializing the path unless we have to |
788 | const SkPath* pathPtr = &args.fPath; |
789 | SkTLazy<SkPath> tmpPath; |
790 | if (viewMatrix->hasPerspective()) { |
791 | SkPath* tmpPathPtr = tmpPath.init(*pathPtr); |
792 | tmpPathPtr->setIsVolatile(true); |
793 | tmpPathPtr->transform(*viewMatrix); |
794 | viewMatrix = &SkMatrix::I(); |
795 | pathPtr = tmpPathPtr; |
796 | } |
797 | |
798 | int vertexCount; |
799 | int indexCount; |
800 | enum { |
801 | kPreallocSegmentCnt = 512 / sizeof(Segment), |
802 | kPreallocDrawCnt = 4, |
803 | }; |
804 | SkSTArray<kPreallocSegmentCnt, Segment, true> segments; |
805 | SkPoint fanPt; |
806 | |
807 | if (!get_segments(*pathPtr, *viewMatrix, &segments, &fanPt, &vertexCount, |
808 | &indexCount)) { |
809 | continue; |
810 | } |
811 | |
812 | sk_sp<const GrBuffer> vertexBuffer; |
813 | int firstVertex; |
814 | |
815 | GrVertexWriter verts{target->makeVertexSpace(kVertexStride, vertexCount, |
816 | &vertexBuffer, &firstVertex)}; |
817 | |
818 | if (!verts.fPtr) { |
819 | SkDebugf("Could not allocate vertices\n" ); |
820 | return; |
821 | } |
822 | |
823 | sk_sp<const GrBuffer> indexBuffer; |
824 | int firstIndex; |
825 | |
826 | uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex); |
827 | if (!idxs) { |
828 | SkDebugf("Could not allocate indices\n" ); |
829 | return; |
830 | } |
831 | |
832 | SkSTArray<kPreallocDrawCnt, Draw, true> draws; |
833 | GrVertexColor color(args.fColor, fWideColor); |
834 | create_vertices(segments, fanPt, color, &draws, verts, idxs, kVertexStride); |
835 | |
836 | GrSimpleMesh* meshes = target->allocMeshes(draws.count()); |
837 | for (int j = 0; j < draws.count(); ++j) { |
838 | const Draw& draw = draws[j]; |
839 | meshes[j].setIndexed(indexBuffer, draw.fIndexCnt, firstIndex, 0, |
840 | draw.fVertexCnt - 1, GrPrimitiveRestart::kNo, vertexBuffer, |
841 | firstVertex); |
842 | firstIndex += draw.fIndexCnt; |
843 | firstVertex += draw.fVertexCnt; |
844 | } |
845 | |
846 | fDraws.push_back({ meshes, draws.count() }); |
847 | } |
848 | } |
849 | |
850 | void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override { |
851 | if (!fProgramInfo || fDraws.isEmpty()) { |
852 | return; |
853 | } |
854 | |
855 | flushState->bindPipelineAndScissorClip(*fProgramInfo, chainBounds); |
856 | flushState->bindTextures(fProgramInfo->primProc(), nullptr, fProgramInfo->pipeline()); |
857 | for (int i = 0; i < fDraws.count(); ++i) { |
858 | for (int j = 0; j < fDraws[i].fMeshCount; ++j) { |
859 | flushState->drawMesh(fDraws[i].fMeshes[j]); |
860 | } |
861 | } |
862 | } |
863 | |
864 | CombineResult onCombineIfPossible(GrOp* t, GrRecordingContext::Arenas*, |
865 | const GrCaps& caps) override { |
866 | AAConvexPathOp* that = t->cast<AAConvexPathOp>(); |
867 | if (!fHelper.isCompatible(that->fHelper, caps, this->bounds(), that->bounds())) { |
868 | return CombineResult::kCannotCombine; |
869 | } |
870 | if (fHelper.usesLocalCoords() && |
871 | !SkMatrixPriv::CheapEqual(fPaths[0].fViewMatrix, that->fPaths[0].fViewMatrix)) { |
872 | return CombineResult::kCannotCombine; |
873 | } |
874 | |
875 | fPaths.push_back_n(that->fPaths.count(), that->fPaths.begin()); |
876 | fWideColor |= that->fWideColor; |
877 | return CombineResult::kMerged; |
878 | } |
879 | |
880 | struct PathData { |
881 | SkMatrix fViewMatrix; |
882 | SkPath fPath; |
883 | SkPMColor4f fColor; |
884 | }; |
885 | |
886 | Helper fHelper; |
887 | SkSTArray<1, PathData, true> fPaths; |
888 | bool fWideColor; |
889 | |
890 | struct MeshDraw { |
891 | GrSimpleMesh* fMeshes; |
892 | int fMeshCount; |
893 | }; |
894 | |
895 | SkTDArray<MeshDraw> fDraws; |
896 | GrProgramInfo* fProgramInfo = nullptr; |
897 | |
898 | typedef GrMeshDrawOp INHERITED; |
899 | }; |
900 | |
901 | } // anonymous namespace |
902 | |
903 | bool GrAAConvexPathRenderer::onDrawPath(const DrawPathArgs& args) { |
904 | GR_AUDIT_TRAIL_AUTO_FRAME(args.fRenderTargetContext->auditTrail(), |
905 | "GrAAConvexPathRenderer::onDrawPath" ); |
906 | SkASSERT(args.fRenderTargetContext->numSamples() <= 1); |
907 | SkASSERT(!args.fShape->isEmpty()); |
908 | |
909 | SkPath path; |
910 | args.fShape->asPath(&path); |
911 | |
912 | std::unique_ptr<GrDrawOp> op = AAConvexPathOp::Make(args.fContext, std::move(args.fPaint), |
913 | *args.fViewMatrix, |
914 | path, args.fUserStencilSettings); |
915 | args.fRenderTargetContext->addDrawOp(*args.fClip, std::move(op)); |
916 | return true; |
917 | } |
918 | |
919 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
920 | |
921 | #if GR_TEST_UTILS |
922 | |
923 | GR_DRAW_OP_TEST_DEFINE(AAConvexPathOp) { |
924 | SkMatrix viewMatrix = GrTest::TestMatrixInvertible(random); |
925 | SkPath path = GrTest::TestPathConvex(random); |
926 | const GrUserStencilSettings* stencilSettings = GrGetRandomStencil(random, context); |
927 | return AAConvexPathOp::Make(context, std::move(paint), viewMatrix, path, stencilSettings); |
928 | } |
929 | |
930 | #endif |
931 | |