1/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrCCPerFlushResources_DEFINED
9#define GrCCPerFlushResources_DEFINED
10
11#include "src/gpu/GrNonAtomicRef.h"
12#include "src/gpu/ccpr/GrAutoMapVertexBuffer.h"
13#include "src/gpu/ccpr/GrCCAtlas.h"
14#include "src/gpu/ccpr/GrCCFiller.h"
15#include "src/gpu/ccpr/GrCCPathProcessor.h"
16#include "src/gpu/ccpr/GrCCStroker.h"
17#include "src/gpu/ccpr/GrStencilAtlasOp.h"
18
19class GrCCPathCache;
20class GrCCPathCacheEntry;
21class GrOctoBounds;
22class GrOnFlushResourceProvider;
23class GrShape;
24
25/**
26 * This struct counts values that help us preallocate buffers for rendered path geometry.
27 */
28struct GrCCRenderedPathStats {
29 int fMaxPointsPerPath = 0;
30 int fNumTotalSkPoints = 0;
31 int fNumTotalSkVerbs = 0;
32 int fNumTotalConicWeights = 0;
33
34 void statPath(const SkPath&);
35};
36
37/**
38 * This struct encapsulates the minimum and desired requirements for the GPU resources required by
39 * CCPR in a given flush.
40 */
41struct GrCCPerFlushResourceSpecs {
42 static constexpr int kFillIdx = 0;
43 static constexpr int kStrokeIdx = 1;
44
45 int fNumCachedPaths = 0;
46
47 int fNumCopiedPaths[2] = {0, 0};
48 GrCCRenderedPathStats fCopyPathStats[2];
49 GrCCAtlas::Specs fCopyAtlasSpecs;
50
51 int fNumRenderedPaths[2] = {0, 0};
52 int fNumClipPaths = 0;
53 GrCCRenderedPathStats fRenderedPathStats[2];
54 GrCCAtlas::Specs fRenderedAtlasSpecs;
55
56 bool isEmpty() const {
57 return 0 == fNumCachedPaths + fNumCopiedPaths[kFillIdx] + fNumCopiedPaths[kStrokeIdx] +
58 fNumRenderedPaths[kFillIdx] + fNumRenderedPaths[kStrokeIdx] + fNumClipPaths;
59 }
60 // Converts the copies to normal cached draws.
61 void cancelCopies();
62};
63
64/**
65 * This class wraps all the GPU resources that CCPR builds at flush time. It is allocated in CCPR's
66 * preFlush() method, and referenced by all the GrCCPerOpsTaskPaths objects that are being flushed.
67 * It is deleted in postFlush() once all the flushing GrCCPerOpsTaskPaths objects are deleted.
68 */
69class GrCCPerFlushResources : public GrNonAtomicRef<GrCCPerFlushResources> {
70public:
71 GrCCPerFlushResources(
72 GrOnFlushResourceProvider*, GrCCAtlas::CoverageType,const GrCCPerFlushResourceSpecs&);
73
74 bool isMapped() const { return fPathInstanceBuffer.isMapped(); }
75
76 GrCCAtlas::CoverageType renderedPathCoverageType() const {
77 return fRenderedAtlasStack.coverageType();
78 }
79
80 // Copies a coverage-counted path out of the given texture proxy, and into a cached, 8-bit,
81 // literal coverage atlas. Updates the cache entry to reference the new atlas.
82 void upgradeEntryToLiteralCoverageAtlas(GrCCPathCache*, GrOnFlushResourceProvider*,
83 GrCCPathCacheEntry*, GrFillRule);
84
85 // These two methods render a path into a temporary coverage count atlas. See
86 // GrCCPathProcessor::Instance for a description of the outputs.
87 //
88 // strokeDevWidth must be 0 for fills, 1 for hairlines, or the stroke width in device-space
89 // pixels for non-hairline strokes (implicitly requiring a rigid-body transform).
90 GrCCAtlas* renderShapeInAtlas(
91 const SkIRect& clipIBounds, const SkMatrix&, const GrShape&, float strokeDevWidth,
92 GrOctoBounds*, SkIRect* devIBounds, SkIVector* devToAtlasOffset);
93 const GrCCAtlas* renderDeviceSpacePathInAtlas(
94 const SkIRect& clipIBounds, const SkPath& devPath, const SkIRect& devPathIBounds,
95 GrFillRule fillRule, SkIVector* devToAtlasOffset);
96
97 // Returns the index in instanceBuffer() of the next instance that will be added by
98 // appendDrawPathInstance().
99 int nextPathInstanceIdx() const { return fNextPathInstanceIdx; }
100
101 // Appends an instance to instanceBuffer() that will draw a path to the destination render
102 // target. The caller is responsible to call set() on the returned instance, to keep track of
103 // its atlas and index (see nextPathInstanceIdx()), and to issue the actual draw call.
104 GrCCPathProcessor::Instance& appendDrawPathInstance() {
105 SkASSERT(this->isMapped());
106 SkASSERT(fNextPathInstanceIdx < fEndPathInstance);
107 return fPathInstanceBuffer[fNextPathInstanceIdx++];
108 }
109
110 // Finishes off the GPU buffers and renders the atlas(es).
111 bool finalize(GrOnFlushResourceProvider*);
112
113 // Accessors used by draw calls, once the resources have been finalized.
114 const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; }
115 const GrCCStroker& stroker() const { SkASSERT(!this->isMapped()); return fStroker; }
116 const GrGpuBuffer* indexBuffer() const {
117 SkASSERT(!this->isMapped());
118 return fIndexBuffer.get();
119 }
120 const GrGpuBuffer* instanceBuffer() const {
121 SkASSERT(!this->isMapped());
122 return fPathInstanceBuffer.gpuBuffer();
123 }
124 const GrGpuBuffer* vertexBuffer() const {
125 SkASSERT(!this->isMapped());
126 return fVertexBuffer.get();
127 }
128 const GrGpuBuffer* stencilResolveBuffer() const {
129 SkASSERT(!this->isMapped());
130 return fStencilResolveBuffer.gpuBuffer();
131 }
132
133private:
134 void recordCopyPathInstance(const GrCCPathCacheEntry&, const SkIVector& newAtlasOffset,
135 GrFillRule, sk_sp<GrTextureProxy> srcProxy);
136 void placeRenderedPathInAtlas(
137 const SkIRect& clippedPathIBounds, GrScissorTest, SkIVector* devToAtlasOffset);
138
139 // In MSAA mode we record an additional instance per path that draws a rectangle on top of its
140 // corresponding path in the atlas and resolves stencil winding values to coverage.
141 void recordStencilResolveInstance(
142 const SkIRect& clippedPathIBounds, const SkIVector& devToAtlasOffset, GrFillRule);
143
144 const SkAutoSTArray<32, SkPoint> fLocalDevPtsBuffer;
145 GrCCFiller fFiller;
146 GrCCStroker fStroker;
147 GrCCAtlasStack fCopyAtlasStack;
148 GrCCAtlasStack fRenderedAtlasStack;
149
150 const sk_sp<const GrGpuBuffer> fIndexBuffer;
151 const sk_sp<const GrGpuBuffer> fVertexBuffer;
152
153 GrTAutoMapVertexBuffer<GrCCPathProcessor::Instance> fPathInstanceBuffer;
154 int fNextCopyInstanceIdx;
155 SkDEBUGCODE(int fEndCopyInstance);
156 int fNextPathInstanceIdx;
157 int fBasePathInstanceIdx;
158 SkDEBUGCODE(int fEndPathInstance);
159
160 // Represents a range of copy-path instances that all share the same source proxy. (i.e. Draw
161 // instances that copy a path mask from a 16-bit coverage count atlas into an 8-bit literal
162 // coverage atlas.)
163 struct CopyPathRange {
164 sk_sp<GrTextureProxy> fSrcProxy;
165 int fCount;
166 };
167
168 SkSTArray<4, CopyPathRange> fCopyPathRanges;
169 int fCurrCopyAtlasRangesIdx = 0;
170
171 // This is a list of coverage count atlas textures that have been invalidated due to us copying
172 // their paths into new 8-bit literal coverage atlases. Since copying is finished by the time
173 // we begin rendering new atlases, we can recycle these textures for the rendered atlases rather
174 // than allocating new texture objects upon instantiation.
175 SkSTArray<2, sk_sp<GrTexture>> fRecyclableAtlasTextures;
176
177 // Used in MSAA mode make an intermediate draw that resolves stencil winding values to coverage.
178 GrTAutoMapVertexBuffer<GrStencilAtlasOp::ResolveRectInstance> fStencilResolveBuffer;
179 int fNextStencilResolveInstanceIdx = 0;
180 SkDEBUGCODE(int fEndStencilResolveInstance);
181
182public:
183#ifdef SK_DEBUG
184 void debugOnly_didReuseRenderedPath() {
185 if (GrCCAtlas::CoverageType::kA8_Multisample == this->renderedPathCoverageType()) {
186 --fEndStencilResolveInstance;
187 }
188 }
189#endif
190 const GrTexture* testingOnly_frontCopyAtlasTexture() const;
191 const GrTexture* testingOnly_frontRenderedAtlasTexture() const;
192};
193
194inline void GrCCRenderedPathStats::statPath(const SkPath& path) {
195 fMaxPointsPerPath = std::max(fMaxPointsPerPath, path.countPoints());
196 fNumTotalSkPoints += path.countPoints();
197 fNumTotalSkVerbs += path.countVerbs();
198 fNumTotalConicWeights += SkPathPriv::ConicWeightCnt(path);
199}
200
201#endif
202