1/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/ccpr/GrCCFiller.h"
9
10#include "include/core/SkPath.h"
11#include "include/core/SkPoint.h"
12#include "src/core/SkMathPriv.h"
13#include "src/core/SkPathPriv.h"
14#include "src/gpu/GrCaps.h"
15#include "src/gpu/GrOnFlushResourceProvider.h"
16#include "src/gpu/GrOpFlushState.h"
17#include "src/gpu/GrProgramInfo.h"
18#include <cstdlib>
19
20using TriPointInstance = GrCCCoverageProcessor::TriPointInstance;
21using QuadPointInstance = GrCCCoverageProcessor::QuadPointInstance;
22
23GrCCFiller::GrCCFiller(Algorithm algorithm, int numPaths, int numSkPoints, int numSkVerbs,
24 int numConicWeights)
25 : fAlgorithm(algorithm)
26 , fGeometry(numSkPoints, numSkVerbs, numConicWeights)
27 , fPathInfos(numPaths)
28 , fScissorSubBatches(numPaths)
29 , fTotalPrimitiveCounts{PrimitiveTallies(), PrimitiveTallies()} {
30 // Batches decide what to draw by looking where the previous one ended. Define initial batches
31 // that "end" at the beginning of the data. These will not be drawn, but will only be be read by
32 // the first actual batch.
33 fScissorSubBatches.push_back() = {PrimitiveTallies(), SkIRect::MakeEmpty()};
34 fBatches.push_back() = {PrimitiveTallies(), fScissorSubBatches.count(), PrimitiveTallies()};
35}
36
37void GrCCFiller::parseDeviceSpaceFill(const SkPath& path, const SkPoint* deviceSpacePts,
38 GrScissorTest scissorTest, const SkIRect& clippedDevIBounds,
39 const SkIVector& devToAtlasOffset) {
40 SkASSERT(!fInstanceBuffer.hasGpuBuffer()); // Can't call after prepareToDraw().
41 SkASSERT(!path.isEmpty());
42
43 int currPathPointsIdx = fGeometry.points().count();
44 int currPathVerbsIdx = fGeometry.verbs().count();
45 PrimitiveTallies currPathPrimitiveCounts = PrimitiveTallies();
46
47 fGeometry.beginPath();
48
49 const float* conicWeights = SkPathPriv::ConicWeightData(path);
50 int ptsIdx = 0;
51 int conicWeightsIdx = 0;
52 bool insideContour = false;
53
54 for (SkPath::Verb verb : SkPathPriv::Verbs(path)) {
55 switch (verb) {
56 case SkPath::kMove_Verb:
57 if (insideContour) {
58 currPathPrimitiveCounts += fGeometry.endContour();
59 }
60 fGeometry.beginContour(deviceSpacePts[ptsIdx]);
61 ++ptsIdx;
62 insideContour = true;
63 continue;
64 case SkPath::kClose_Verb:
65 if (insideContour) {
66 currPathPrimitiveCounts += fGeometry.endContour();
67 }
68 insideContour = false;
69 continue;
70 case SkPath::kLine_Verb:
71 fGeometry.lineTo(&deviceSpacePts[ptsIdx - 1]);
72 ++ptsIdx;
73 continue;
74 case SkPath::kQuad_Verb:
75 fGeometry.quadraticTo(&deviceSpacePts[ptsIdx - 1]);
76 ptsIdx += 2;
77 continue;
78 case SkPath::kCubic_Verb:
79 fGeometry.cubicTo(&deviceSpacePts[ptsIdx - 1]);
80 ptsIdx += 3;
81 continue;
82 case SkPath::kConic_Verb:
83 fGeometry.conicTo(&deviceSpacePts[ptsIdx - 1], conicWeights[conicWeightsIdx]);
84 ptsIdx += 2;
85 ++conicWeightsIdx;
86 continue;
87 default:
88 SK_ABORT("Unexpected path verb.");
89 }
90 }
91 SkASSERT(ptsIdx == path.countPoints());
92 SkASSERT(conicWeightsIdx == SkPathPriv::ConicWeightCnt(path));
93
94 if (insideContour) {
95 currPathPrimitiveCounts += fGeometry.endContour();
96 }
97
98 fPathInfos.emplace_back(scissorTest, devToAtlasOffset);
99
100 // Tessellate fans from very large and/or simple paths, in order to reduce overdraw.
101 int numVerbs = fGeometry.verbs().count() - currPathVerbsIdx - 1;
102 int64_t tessellationWork = (int64_t)numVerbs * (32 - SkCLZ(numVerbs)); // N log N.
103 int64_t fanningWork = (int64_t)clippedDevIBounds.height() * clippedDevIBounds.width();
104 if (tessellationWork * (50*50) + (100*100) < fanningWork) { // Don't tessellate under 100x100.
105 fPathInfos.back().tessellateFan(
106 fAlgorithm, path, fGeometry, currPathVerbsIdx, currPathPointsIdx, clippedDevIBounds,
107 &currPathPrimitiveCounts);
108 }
109
110 fTotalPrimitiveCounts[(int)scissorTest] += currPathPrimitiveCounts;
111
112 if (GrScissorTest::kEnabled == scissorTest) {
113 fScissorSubBatches.push_back() = {fTotalPrimitiveCounts[(int)GrScissorTest::kEnabled],
114 clippedDevIBounds.makeOffset(devToAtlasOffset)};
115 }
116}
117
118void GrCCFiller::PathInfo::tessellateFan(
119 Algorithm algorithm, const SkPath& originalPath, const GrCCFillGeometry& geometry,
120 int verbsIdx, int ptsIdx, const SkIRect& clippedDevIBounds,
121 PrimitiveTallies* newTriangleCounts) {
122 using Verb = GrCCFillGeometry::Verb;
123 SkASSERT(-1 == fFanTessellationCount);
124 SkASSERT(!fFanTessellation);
125
126 const SkTArray<Verb, true>& verbs = geometry.verbs();
127 const SkTArray<SkPoint, true>& pts = geometry.points();
128
129 newTriangleCounts->fTriangles =
130 newTriangleCounts->fWeightedTriangles = 0;
131
132 // Build an SkPath of the Redbook fan.
133 SkPath fan;
134 if (Algorithm::kCoverageCount == algorithm) {
135 // We use "winding" fill type right now because we are producing a coverage count, and must
136 // fill in every region that has non-zero wind. The path processor will convert coverage
137 // count to the appropriate fill type later.
138 fan.setFillType(SkPathFillType::kWinding);
139 } else {
140 // When counting winding numbers in the stencil buffer, it works to use even/odd for the fan
141 // tessellation (where applicable). But we need to strip out inverse fill info because
142 // inverse-ness gets accounted for later on.
143 fan.setFillType(SkPathFillType_ConvertToNonInverse(originalPath.getFillType()));
144 }
145 SkASSERT(Verb::kBeginPath == verbs[verbsIdx]);
146 for (int i = verbsIdx + 1; i < verbs.count(); ++i) {
147 switch (verbs[i]) {
148 case Verb::kBeginPath:
149 SK_ABORT("Invalid GrCCFillGeometry");
150 continue;
151
152 case Verb::kBeginContour:
153 fan.moveTo(pts[ptsIdx++]);
154 continue;
155
156 case Verb::kLineTo:
157 fan.lineTo(pts[ptsIdx++]);
158 continue;
159
160 case Verb::kMonotonicQuadraticTo:
161 case Verb::kMonotonicConicTo:
162 fan.lineTo(pts[ptsIdx + 1]);
163 ptsIdx += 2;
164 continue;
165
166 case Verb::kMonotonicCubicTo:
167 fan.lineTo(pts[ptsIdx + 2]);
168 ptsIdx += 3;
169 continue;
170
171 case Verb::kEndClosedContour:
172 case Verb::kEndOpenContour:
173 fan.close();
174 continue;
175 }
176 }
177
178 GrTriangulator::WindingVertex* vertices = nullptr;
179 SkASSERT(!fan.isInverseFillType());
180 fFanTessellationCount = GrTriangulator::PathToVertices(
181 fan, std::numeric_limits<float>::infinity(), SkRect::Make(clippedDevIBounds),
182 &vertices);
183 if (fFanTessellationCount <= 0) {
184 SkASSERT(0 == fFanTessellationCount);
185 SkASSERT(nullptr == vertices);
186 return;
187 }
188
189 SkASSERT(0 == fFanTessellationCount % 3);
190 for (int i = 0; i < fFanTessellationCount; i += 3) {
191 int weight = abs(vertices[i].fWinding);
192 if (SkPathFillType::kEvenOdd == fan.getFillType()) {
193 // The tessellator doesn't wrap weights modulo 2 when we request even/odd fill type.
194 SkASSERT(weight & 1);
195 weight = 1;
196 }
197 if (weight > 1 && Algorithm::kCoverageCount == algorithm) {
198 ++newTriangleCounts->fWeightedTriangles;
199 } else {
200 newTriangleCounts->fTriangles += weight;
201 }
202 vertices[i].fWinding = weight;
203 }
204
205 fFanTessellation.reset(vertices);
206}
207
208GrCCFiller::BatchID GrCCFiller::closeCurrentBatch() {
209 SkASSERT(!fInstanceBuffer.hasGpuBuffer());
210 SkASSERT(!fBatches.empty());
211
212 const auto& lastBatch = fBatches.back();
213 int maxMeshes = 1 + fScissorSubBatches.count() - lastBatch.fEndScissorSubBatchIdx;
214 fMaxMeshesPerDraw = std::max(fMaxMeshesPerDraw, maxMeshes);
215
216 const auto& lastScissorSubBatch = fScissorSubBatches[lastBatch.fEndScissorSubBatchIdx - 1];
217 PrimitiveTallies batchTotalCounts = fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled] -
218 lastBatch.fEndNonScissorIndices;
219 batchTotalCounts += fTotalPrimitiveCounts[(int)GrScissorTest::kEnabled] -
220 lastScissorSubBatch.fEndPrimitiveIndices;
221
222 // This will invalidate lastBatch.
223 fBatches.push_back() = {
224 fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled],
225 fScissorSubBatches.count(),
226 batchTotalCounts
227 };
228 return fBatches.count() - 1;
229}
230
231// Emits a contour's triangle fan.
232//
233// Classic Redbook fanning would be the triangles: [0 1 2], [0 2 3], ..., [0 n-2 n-1].
234//
235// This function emits the triangle: [0 n/3 n*2/3], and then recurses on all three sides. The
236// advantage to this approach is that for a convex-ish contour, it generates larger triangles.
237// Classic fanning tends to generate long, skinny triangles, which are expensive to draw since they
238// have a longer perimeter to rasterize and antialias.
239//
240// The indices array indexes the fan's points (think: glDrawElements), and must have at least log3
241// elements past the end for this method to use as scratch space.
242//
243// Returns the next triangle instance after the final one emitted.
244static TriPointInstance* emit_recursive_fan(
245 const SkTArray<SkPoint, true>& pts, SkTArray<int32_t, true>& indices, int firstIndex,
246 int indexCount, const Sk2f& devToAtlasOffset, TriPointInstance::Ordering ordering,
247 TriPointInstance out[]) {
248 if (indexCount < 3) {
249 return out;
250 }
251
252 int32_t oneThirdCount = indexCount / 3;
253 int32_t twoThirdsCount = (2 * indexCount) / 3;
254 out++->set(pts[indices[firstIndex]], pts[indices[firstIndex + oneThirdCount]],
255 pts[indices[firstIndex + twoThirdsCount]], devToAtlasOffset, ordering);
256
257 out = emit_recursive_fan(
258 pts, indices, firstIndex, oneThirdCount + 1, devToAtlasOffset, ordering, out);
259 out = emit_recursive_fan(
260 pts, indices, firstIndex + oneThirdCount, twoThirdsCount - oneThirdCount + 1,
261 devToAtlasOffset, ordering, out);
262
263 int endIndex = firstIndex + indexCount;
264 int32_t oldValue = indices[endIndex];
265 indices[endIndex] = indices[firstIndex];
266 out = emit_recursive_fan(
267 pts, indices, firstIndex + twoThirdsCount, indexCount - twoThirdsCount + 1,
268 devToAtlasOffset, ordering, out);
269 indices[endIndex] = oldValue;
270
271 return out;
272}
273
274void GrCCFiller::emitTessellatedFan(
275 const GrTriangulator::WindingVertex* vertices, int numVertices,
276 const Sk2f& devToAtlasOffset, TriPointInstance::Ordering ordering,
277 TriPointInstance* triPointInstanceData, QuadPointInstance* quadPointInstanceData,
278 GrCCFillGeometry::PrimitiveTallies* indices) {
279 for (int i = 0; i < numVertices; i += 3) {
280 int weight = vertices[i].fWinding;
281 SkASSERT(weight >= 1);
282 if (weight > 1 && Algorithm::kStencilWindingCount != fAlgorithm) {
283 quadPointInstanceData[indices->fWeightedTriangles++].setW(
284 vertices[i].fPos, vertices[i+1].fPos, vertices[i + 2].fPos, devToAtlasOffset,
285 static_cast<float>(abs(vertices[i].fWinding)));
286 } else for (int j = 0; j < weight; ++j) {
287 // Unfortunately, there is not a way to increment stencil values by an amount larger
288 // than 1. Instead we draw the triangle 'weight' times.
289 triPointInstanceData[indices->fTriangles++].set(
290 vertices[i].fPos, vertices[i + 1].fPos, vertices[i + 2].fPos, devToAtlasOffset,
291 ordering);
292 }
293 }
294}
295
296bool GrCCFiller::prepareToDraw(GrOnFlushResourceProvider* onFlushRP) {
297 using Verb = GrCCFillGeometry::Verb;
298 SkASSERT(!fInstanceBuffer.hasGpuBuffer());
299 SkASSERT(fBatches.back().fEndNonScissorIndices == // Call closeCurrentBatch().
300 fTotalPrimitiveCounts[(int)GrScissorTest::kDisabled]);
301 SkASSERT(fBatches.back().fEndScissorSubBatchIdx == fScissorSubBatches.count());
302
303 auto triangleOrdering = (Algorithm::kCoverageCount == fAlgorithm)
304 ? TriPointInstance::Ordering::kXYTransposed
305 : TriPointInstance::Ordering::kXYInterleaved;
306
307 // Here we build a single instance buffer to share with every internal batch.
308 //
309 // CCPR processs 3 different types of primitives: triangles, quadratics, cubics. Each primitive
310 // type is further divided into instances that require a scissor and those that don't. This
311 // leaves us with 3*2 = 6 independent instance arrays to build for the GPU.
312 //
313 // Rather than place each instance array in its own GPU buffer, we allocate a single
314 // megabuffer and lay them all out side-by-side. We can offset the "baseInstance" parameter in
315 // our draw calls to direct the GPU to the applicable elements within a given array.
316 //
317 // We already know how big to make each of the 6 arrays from fTotalPrimitiveCounts, so layout is
318 // straightforward. Start with triangles and quadratics. They both view the instance buffer as
319 // an array of TriPointInstance[], so we can begin at zero and lay them out one after the other.
320 fBaseInstances[0].fTriangles = 0;
321 fBaseInstances[1].fTriangles = fBaseInstances[0].fTriangles +
322 fTotalPrimitiveCounts[0].fTriangles;
323 fBaseInstances[0].fQuadratics = fBaseInstances[1].fTriangles +
324 fTotalPrimitiveCounts[1].fTriangles;
325 fBaseInstances[1].fQuadratics = fBaseInstances[0].fQuadratics +
326 fTotalPrimitiveCounts[0].fQuadratics;
327 int triEndIdx = fBaseInstances[1].fQuadratics + fTotalPrimitiveCounts[1].fQuadratics;
328
329 // Wound triangles and cubics both view the same instance buffer as an array of
330 // QuadPointInstance[]. So, reinterpreting the instance data as QuadPointInstance[], we start
331 // them on the first index that will not overwrite previous TriPointInstance data.
332 int quadBaseIdx =
333 GrSizeDivRoundUp(triEndIdx * sizeof(TriPointInstance), sizeof(QuadPointInstance));
334 fBaseInstances[0].fWeightedTriangles = quadBaseIdx;
335 fBaseInstances[1].fWeightedTriangles = fBaseInstances[0].fWeightedTriangles +
336 fTotalPrimitiveCounts[0].fWeightedTriangles;
337 fBaseInstances[0].fCubics = fBaseInstances[1].fWeightedTriangles +
338 fTotalPrimitiveCounts[1].fWeightedTriangles;
339 fBaseInstances[1].fCubics = fBaseInstances[0].fCubics + fTotalPrimitiveCounts[0].fCubics;
340 fBaseInstances[0].fConics = fBaseInstances[1].fCubics + fTotalPrimitiveCounts[1].fCubics;
341 fBaseInstances[1].fConics = fBaseInstances[0].fConics + fTotalPrimitiveCounts[0].fConics;
342 int quadEndIdx = fBaseInstances[1].fConics + fTotalPrimitiveCounts[1].fConics;
343
344 fInstanceBuffer.resetAndMapBuffer(onFlushRP, quadEndIdx * sizeof(QuadPointInstance));
345 if (!fInstanceBuffer.hasGpuBuffer()) {
346 SkDebugf("WARNING: failed to allocate CCPR fill instance buffer.\n");
347 return false;
348 }
349
350 auto triPointInstanceData = reinterpret_cast<TriPointInstance*>(fInstanceBuffer.data());
351 auto quadPointInstanceData = reinterpret_cast<QuadPointInstance*>(fInstanceBuffer.data());
352 SkASSERT(quadPointInstanceData);
353
354 PathInfo* nextPathInfo = fPathInfos.begin();
355 Sk2f devToAtlasOffset;
356 PrimitiveTallies instanceIndices[2] = {fBaseInstances[0], fBaseInstances[1]};
357 PrimitiveTallies* currIndices = nullptr;
358 SkSTArray<256, int32_t, true> currFan;
359 bool currFanIsTessellated = false;
360
361 const SkTArray<SkPoint, true>& pts = fGeometry.points();
362 int ptsIdx = -1;
363 int nextConicWeightIdx = 0;
364
365 // Expand the ccpr verbs into GPU instance buffers.
366 for (Verb verb : fGeometry.verbs()) {
367 switch (verb) {
368 case Verb::kBeginPath:
369 SkASSERT(currFan.empty());
370 currIndices = &instanceIndices[(int)nextPathInfo->scissorTest()];
371 devToAtlasOffset = Sk2f(static_cast<float>(nextPathInfo->devToAtlasOffset().fX),
372 static_cast<float>(nextPathInfo->devToAtlasOffset().fY));
373 currFanIsTessellated = nextPathInfo->hasFanTessellation();
374 if (currFanIsTessellated) {
375 this->emitTessellatedFan(
376 nextPathInfo->fanTessellation(), nextPathInfo->fanTessellationCount(),
377 devToAtlasOffset, triangleOrdering, triPointInstanceData,
378 quadPointInstanceData, currIndices);
379 }
380 ++nextPathInfo;
381 continue;
382
383 case Verb::kBeginContour:
384 SkASSERT(currFan.empty());
385 ++ptsIdx;
386 if (!currFanIsTessellated) {
387 currFan.push_back(ptsIdx);
388 }
389 continue;
390
391 case Verb::kLineTo:
392 ++ptsIdx;
393 if (!currFanIsTessellated) {
394 SkASSERT(!currFan.empty());
395 currFan.push_back(ptsIdx);
396 }
397 continue;
398
399 case Verb::kMonotonicQuadraticTo:
400 triPointInstanceData[currIndices->fQuadratics++].set(
401 &pts[ptsIdx], devToAtlasOffset, TriPointInstance::Ordering::kXYTransposed);
402 ptsIdx += 2;
403 if (!currFanIsTessellated) {
404 SkASSERT(!currFan.empty());
405 currFan.push_back(ptsIdx);
406 }
407 continue;
408
409 case Verb::kMonotonicCubicTo:
410 quadPointInstanceData[currIndices->fCubics++].set(
411 &pts[ptsIdx], devToAtlasOffset[0], devToAtlasOffset[1]);
412 ptsIdx += 3;
413 if (!currFanIsTessellated) {
414 SkASSERT(!currFan.empty());
415 currFan.push_back(ptsIdx);
416 }
417 continue;
418
419 case Verb::kMonotonicConicTo:
420 quadPointInstanceData[currIndices->fConics++].setW(
421 &pts[ptsIdx], devToAtlasOffset,
422 fGeometry.getConicWeight(nextConicWeightIdx));
423 ptsIdx += 2;
424 ++nextConicWeightIdx;
425 if (!currFanIsTessellated) {
426 SkASSERT(!currFan.empty());
427 currFan.push_back(ptsIdx);
428 }
429 continue;
430
431 case Verb::kEndClosedContour: // endPt == startPt.
432 if (!currFanIsTessellated) {
433 SkASSERT(!currFan.empty());
434 currFan.pop_back();
435 }
436 [[fallthrough]];
437 case Verb::kEndOpenContour: // endPt != startPt.
438 SkASSERT(!currFanIsTessellated || currFan.empty());
439 if (!currFanIsTessellated && currFan.count() >= 3) {
440 int fanSize = currFan.count();
441 // Reserve space for emit_recursive_fan. Technically this can grow to
442 // fanSize + log3(fanSize), but we approximate with log2.
443 currFan.push_back_n(SkNextLog2(fanSize));
444 SkDEBUGCODE(TriPointInstance* end =) emit_recursive_fan(
445 pts, currFan, 0, fanSize, devToAtlasOffset, triangleOrdering,
446 triPointInstanceData + currIndices->fTriangles);
447 currIndices->fTriangles += fanSize - 2;
448 SkASSERT(triPointInstanceData + currIndices->fTriangles == end);
449 }
450 currFan.reset();
451 continue;
452 }
453 }
454
455 fInstanceBuffer.unmapBuffer();
456
457 SkASSERT(nextPathInfo == fPathInfos.end());
458 SkASSERT(ptsIdx == pts.count() - 1);
459 SkASSERT(instanceIndices[0].fTriangles == fBaseInstances[1].fTriangles);
460 SkASSERT(instanceIndices[1].fTriangles == fBaseInstances[0].fQuadratics);
461 SkASSERT(instanceIndices[0].fQuadratics == fBaseInstances[1].fQuadratics);
462 SkASSERT(instanceIndices[1].fQuadratics == triEndIdx);
463 SkASSERT(instanceIndices[0].fWeightedTriangles == fBaseInstances[1].fWeightedTriangles);
464 SkASSERT(instanceIndices[1].fWeightedTriangles == fBaseInstances[0].fCubics);
465 SkASSERT(instanceIndices[0].fCubics == fBaseInstances[1].fCubics);
466 SkASSERT(instanceIndices[1].fCubics == fBaseInstances[0].fConics);
467 SkASSERT(instanceIndices[0].fConics == fBaseInstances[1].fConics);
468 SkASSERT(instanceIndices[1].fConics == quadEndIdx);
469
470 return true;
471}
472
473void GrCCFiller::drawFills(
474 GrOpFlushState* flushState, GrCCCoverageProcessor* proc, const GrPipeline& pipeline,
475 BatchID batchID, const SkIRect& drawBounds) const {
476 using PrimitiveType = GrCCCoverageProcessor::PrimitiveType;
477
478 SkASSERT(fInstanceBuffer.hasGpuBuffer());
479
480 GrResourceProvider* rp = flushState->resourceProvider();
481 const PrimitiveTallies& batchTotalCounts = fBatches[batchID].fTotalPrimitiveCounts;
482
483 int numSubpasses = proc->numSubpasses();
484
485 if (batchTotalCounts.fTriangles) {
486 for (int i = 0; i < numSubpasses; ++i) {
487 proc->reset(PrimitiveType::kTriangles, i, rp);
488 this->drawPrimitives(flushState, *proc, pipeline, batchID,
489 &PrimitiveTallies::fTriangles, drawBounds);
490 }
491 }
492
493 if (batchTotalCounts.fWeightedTriangles) {
494 SkASSERT(Algorithm::kStencilWindingCount != fAlgorithm);
495 for (int i = 0; i < numSubpasses; ++i) {
496 proc->reset(PrimitiveType::kWeightedTriangles, i, rp);
497 this->drawPrimitives(flushState, *proc, pipeline, batchID,
498 &PrimitiveTallies::fWeightedTriangles, drawBounds);
499 }
500 }
501
502 if (batchTotalCounts.fQuadratics) {
503 for (int i = 0; i < numSubpasses; ++i) {
504 proc->reset(PrimitiveType::kQuadratics, i, rp);
505 this->drawPrimitives(flushState, *proc, pipeline, batchID,
506 &PrimitiveTallies::fQuadratics, drawBounds);
507 }
508 }
509
510 if (batchTotalCounts.fCubics) {
511 for (int i = 0; i < numSubpasses; ++i) {
512 proc->reset(PrimitiveType::kCubics, i, rp);
513 this->drawPrimitives(flushState, *proc, pipeline, batchID, &PrimitiveTallies::fCubics,
514 drawBounds);
515 }
516 }
517
518 if (batchTotalCounts.fConics) {
519 for (int i = 0; i < numSubpasses; ++i) {
520 proc->reset(PrimitiveType::kConics, i, rp);
521 this->drawPrimitives(flushState, *proc, pipeline, batchID, &PrimitiveTallies::fConics,
522 drawBounds);
523 }
524 }
525}
526
527void GrCCFiller::drawPrimitives(
528 GrOpFlushState* flushState, const GrCCCoverageProcessor& proc, const GrPipeline& pipeline,
529 BatchID batchID, int PrimitiveTallies::*instanceType, const SkIRect& drawBounds) const {
530 SkASSERT(pipeline.isScissorTestEnabled());
531
532 GrOpsRenderPass* renderPass = flushState->opsRenderPass();
533 proc.bindPipeline(flushState, pipeline, SkRect::Make(drawBounds));
534 proc.bindBuffers(renderPass, fInstanceBuffer.gpuBuffer());
535
536 SkASSERT(batchID > 0);
537 SkASSERT(batchID < fBatches.count());
538 const Batch& previousBatch = fBatches[batchID - 1];
539 const Batch& batch = fBatches[batchID];
540 SkDEBUGCODE(int totalInstanceCount = 0);
541
542 if (int instanceCount = batch.fEndNonScissorIndices.*instanceType -
543 previousBatch.fEndNonScissorIndices.*instanceType) {
544 SkASSERT(instanceCount > 0);
545 int baseInstance = fBaseInstances[(int)GrScissorTest::kDisabled].*instanceType +
546 previousBatch.fEndNonScissorIndices.*instanceType;
547 renderPass->setScissorRect(SkIRect::MakeXYWH(0, 0, drawBounds.width(),
548 drawBounds.height()));
549 proc.drawInstances(renderPass, instanceCount, baseInstance);
550 SkDEBUGCODE(totalInstanceCount += instanceCount);
551 }
552
553 SkASSERT(previousBatch.fEndScissorSubBatchIdx > 0);
554 SkASSERT(batch.fEndScissorSubBatchIdx <= fScissorSubBatches.count());
555 int baseScissorInstance = fBaseInstances[(int)GrScissorTest::kEnabled].*instanceType;
556 for (int i = previousBatch.fEndScissorSubBatchIdx; i < batch.fEndScissorSubBatchIdx; ++i) {
557 const ScissorSubBatch& previousSubBatch = fScissorSubBatches[i - 1];
558 const ScissorSubBatch& scissorSubBatch = fScissorSubBatches[i];
559 int startIndex = previousSubBatch.fEndPrimitiveIndices.*instanceType;
560 int instanceCount = scissorSubBatch.fEndPrimitiveIndices.*instanceType - startIndex;
561 if (!instanceCount) {
562 continue;
563 }
564 SkASSERT(instanceCount > 0);
565 renderPass->setScissorRect(scissorSubBatch.fScissor);
566 proc.drawInstances(renderPass, instanceCount, baseScissorInstance + startIndex);
567 SkDEBUGCODE(totalInstanceCount += instanceCount);
568 }
569
570 SkASSERT(totalInstanceCount == batch.fTotalPrimitiveCounts.*instanceType);
571}
572