1/*
2 * Copyright 2013 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrPrimitiveProcessor_DEFINED
9#define GrPrimitiveProcessor_DEFINED
10
11#include "src/gpu/GrColor.h"
12#include "src/gpu/GrNonAtomicRef.h"
13#include "src/gpu/GrProcessor.h"
14#include "src/gpu/GrShaderVar.h"
15#include "src/gpu/GrSwizzle.h"
16
17/*
18 * The GrPrimitiveProcessor represents some kind of geometric primitive. This includes the shape
19 * of the primitive and the inherent color of the primitive. The GrPrimitiveProcessor is
20 * responsible for providing a color and coverage input into the Ganesh rendering pipeline. Through
21 * optimization, Ganesh may decide a different color, no color, and / or no coverage are required
22 * from the GrPrimitiveProcessor, so the GrPrimitiveProcessor must be able to support this
23 * functionality.
24 *
25 * There are two feedback loops between the GrFragmentProcessors, the GrXferProcessor, and the
26 * GrPrimitiveProcessor. These loops run on the CPU and to determine known properties of the final
27 * color and coverage inputs to the GrXferProcessor in order to perform optimizations that preserve
28 * correctness. The GrDrawOp seeds these loops with initial color and coverage, in its
29 * getProcessorAnalysisInputs implementation. These seed values are processed by the
30 * subsequent
31 * stages of the rendering pipeline and the output is then fed back into the GrDrawOp in
32 * the applyPipelineOptimizations call, where the op can use the information to inform decisions
33 * about GrPrimitiveProcessor creation.
34 */
35
36class GrGLSLPrimitiveProcessor;
37class GrGLSLUniformHandler;
38
39/**
40 * GrPrimitiveProcessor defines an interface which all subclasses must implement. All
41 * GrPrimitiveProcessors must proivide seed color and coverage for the Ganesh color / coverage
42 * pipelines, and they must provide some notion of equality
43 *
44 * TODO: This class does not really need to be ref counted. Instances should be allocated using
45 * GrOpFlushState's arena and destroyed when the arena is torn down.
46 */
47class GrPrimitiveProcessor : public GrProcessor, public GrNonAtomicRef<GrPrimitiveProcessor> {
48public:
49 class TextureSampler;
50
51 /** Describes a vertex or instance attribute. */
52 class Attribute {
53 public:
54 constexpr Attribute() = default;
55 constexpr Attribute(const char* name,
56 GrVertexAttribType cpuType,
57 GrSLType gpuType)
58 : fName(name), fCPUType(cpuType), fGPUType(gpuType) {
59 SkASSERT(name && gpuType != kVoid_GrSLType);
60 }
61 constexpr Attribute(const Attribute&) = default;
62
63 Attribute& operator=(const Attribute&) = default;
64
65 constexpr bool isInitialized() const { return fGPUType != kVoid_GrSLType; }
66
67 constexpr const char* name() const { return fName; }
68 constexpr GrVertexAttribType cpuType() const { return fCPUType; }
69 constexpr GrSLType gpuType() const { return fGPUType; }
70
71 inline constexpr size_t size() const;
72 constexpr size_t sizeAlign4() const { return SkAlign4(this->size()); }
73
74 GrShaderVar asShaderVar() const {
75 return {fName, fGPUType, GrShaderVar::TypeModifier::In};
76 }
77
78 private:
79 const char* fName = nullptr;
80 GrVertexAttribType fCPUType = kFloat_GrVertexAttribType;
81 GrSLType fGPUType = kVoid_GrSLType;
82 };
83
84 class Iter {
85 public:
86 Iter() : fCurr(nullptr), fRemaining(0) {}
87 Iter(const Iter& iter) : fCurr(iter.fCurr), fRemaining(iter.fRemaining) {}
88 Iter& operator= (const Iter& iter) {
89 fCurr = iter.fCurr;
90 fRemaining = iter.fRemaining;
91 return *this;
92 }
93 Iter(const Attribute* attrs, int count) : fCurr(attrs), fRemaining(count) {
94 this->skipUninitialized();
95 }
96
97 bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
98 const Attribute& operator*() const { return *fCurr; }
99 void operator++() {
100 if (fRemaining) {
101 fRemaining--;
102 fCurr++;
103 this->skipUninitialized();
104 }
105 }
106
107 private:
108 void skipUninitialized() {
109 if (!fRemaining) {
110 fCurr = nullptr;
111 } else {
112 while (!fCurr->isInitialized()) {
113 ++fCurr;
114 }
115 }
116 }
117
118 const Attribute* fCurr;
119 int fRemaining;
120 };
121
122 class AttributeSet {
123 public:
124 Iter begin() const { return Iter(fAttributes, fCount); }
125 Iter end() const { return Iter(); }
126
127 private:
128 friend class GrPrimitiveProcessor;
129
130 void init(const Attribute* attrs, int count) {
131 fAttributes = attrs;
132 fRawCount = count;
133 fCount = 0;
134 fStride = 0;
135 for (int i = 0; i < count; ++i) {
136 if (attrs[i].isInitialized()) {
137 fCount++;
138 fStride += attrs[i].sizeAlign4();
139 }
140 }
141 }
142
143 const Attribute* fAttributes = nullptr;
144 int fRawCount = 0;
145 int fCount = 0;
146 size_t fStride = 0;
147 };
148
149 GrPrimitiveProcessor(ClassID);
150
151 int numTextureSamplers() const { return fTextureSamplerCnt; }
152 const TextureSampler& textureSampler(int index) const;
153 int numVertexAttributes() const { return fVertexAttributes.fCount; }
154 const AttributeSet& vertexAttributes() const { return fVertexAttributes; }
155 int numInstanceAttributes() const { return fInstanceAttributes.fCount; }
156 const AttributeSet& instanceAttributes() const { return fInstanceAttributes; }
157
158 bool hasVertexAttributes() const { return SkToBool(fVertexAttributes.fCount); }
159 bool hasInstanceAttributes() const { return SkToBool(fInstanceAttributes.fCount); }
160
161 /**
162 * A common practice is to populate the the vertex/instance's memory using an implicit array of
163 * structs. In this case, it is best to assert that:
164 * stride == sizeof(struct)
165 */
166 size_t vertexStride() const { return fVertexAttributes.fStride; }
167 size_t instanceStride() const { return fInstanceAttributes.fStride; }
168
169 bool willUseTessellationShaders() const {
170 return fShaders & (kTessControl_GrShaderFlag | kTessEvaluation_GrShaderFlag);
171 }
172
173 bool willUseGeoShader() const {
174 return fShaders & kGeometry_GrShaderFlag;
175 }
176
177 /**
178 * Computes a key for the transforms owned by an FP based on the shader code that will be
179 * emitted by the primitive processor to implement them.
180 */
181 uint32_t computeCoordTransformsKey(const GrFragmentProcessor& fp) const;
182
183 /**
184 * Sets a unique key on the GrProcessorKeyBuilder that is directly associated with this geometry
185 * processor's GL backend implementation.
186 *
187 * TODO: A better name for this function would be "compute" instead of "get".
188 */
189 virtual void getGLSLProcessorKey(const GrShaderCaps&, GrProcessorKeyBuilder*) const = 0;
190
191
192 void getAttributeKey(GrProcessorKeyBuilder* b) const {
193 // Ensure that our CPU and GPU type fields fit together in a 32-bit value, and we never
194 // collide with the "uninitialized" value.
195 static_assert(kGrVertexAttribTypeCount < (1 << 8), "");
196 static_assert(kGrSLTypeCount < (1 << 8), "");
197
198 auto add_attributes = [=](const Attribute* attrs, int attrCount) {
199 for (int i = 0; i < attrCount; ++i) {
200 b->add32(attrs[i].isInitialized() ? (attrs[i].cpuType() << 16) | attrs[i].gpuType()
201 : ~0);
202 }
203 };
204 add_attributes(fVertexAttributes.fAttributes, fVertexAttributes.fRawCount);
205 add_attributes(fInstanceAttributes.fAttributes, fInstanceAttributes.fRawCount);
206 }
207
208 /** Returns a new instance of the appropriate *GL* implementation class
209 for the given GrProcessor; caller is responsible for deleting
210 the object. */
211 virtual GrGLSLPrimitiveProcessor* createGLSLInstance(const GrShaderCaps&) const = 0;
212
213 virtual bool isPathRendering() const { return false; }
214
215 // We use these methods as a temporary back door to inject OpenGL tessellation code. Once
216 // tessellation is supported by SkSL we can remove these.
217 virtual SkString getTessControlShaderGLSL(const GrGLSLPrimitiveProcessor*,
218 const char* versionAndExtensionDecls,
219 const GrGLSLUniformHandler&,
220 const GrShaderCaps&) const {
221 SK_ABORT("Not implemented.");
222 }
223 virtual SkString getTessEvaluationShaderGLSL(const GrGLSLPrimitiveProcessor*,
224 const char* versionAndExtensionDecls,
225 const GrGLSLUniformHandler&,
226 const GrShaderCaps&) const {
227 SK_ABORT("Not implemented.");
228 }
229
230protected:
231 void setVertexAttributes(const Attribute* attrs, int attrCount) {
232 fVertexAttributes.init(attrs, attrCount);
233 }
234 void setInstanceAttributes(const Attribute* attrs, int attrCount) {
235 SkASSERT(attrCount >= 0);
236 fInstanceAttributes.init(attrs, attrCount);
237 }
238 void setWillUseTessellationShaders() {
239 fShaders |= kTessControl_GrShaderFlag | kTessEvaluation_GrShaderFlag;
240 }
241 void setWillUseGeoShader() { fShaders |= kGeometry_GrShaderFlag; }
242 void setTextureSamplerCnt(int cnt) {
243 SkASSERT(cnt >= 0);
244 fTextureSamplerCnt = cnt;
245 }
246
247 /**
248 * Helper for implementing onTextureSampler(). E.g.:
249 * return IthTexureSampler(i, fMyFirstSampler, fMySecondSampler, fMyThirdSampler);
250 */
251 template <typename... Args>
252 static const TextureSampler& IthTextureSampler(int i, const TextureSampler& samp0,
253 const Args&... samps) {
254 return (0 == i) ? samp0 : IthTextureSampler(i - 1, samps...);
255 }
256 inline static const TextureSampler& IthTextureSampler(int i);
257
258private:
259 virtual const TextureSampler& onTextureSampler(int) const { return IthTextureSampler(0); }
260
261 GrShaderFlags fShaders = kVertex_GrShaderFlag | kFragment_GrShaderFlag;
262
263 AttributeSet fVertexAttributes;
264 AttributeSet fInstanceAttributes;
265
266 int fTextureSamplerCnt = 0;
267 typedef GrProcessor INHERITED;
268};
269
270//////////////////////////////////////////////////////////////////////////////
271
272/**
273 * Used to capture the properties of the GrTextureProxies required/expected by a primitiveProcessor
274 * along with an associated GrSamplerState. The actual proxies used are stored in either the
275 * fixed or dynamic state arrays. TextureSamplers don't perform any coord manipulation to account
276 * for texture origin.
277 */
278class GrPrimitiveProcessor::TextureSampler {
279public:
280 TextureSampler() = default;
281
282 TextureSampler(GrSamplerState, const GrBackendFormat&, const GrSwizzle&);
283
284 TextureSampler(const TextureSampler&) = delete;
285 TextureSampler& operator=(const TextureSampler&) = delete;
286
287 void reset(GrSamplerState, const GrBackendFormat&, const GrSwizzle&);
288
289 const GrBackendFormat& backendFormat() const { return fBackendFormat; }
290 GrTextureType textureType() const { return fBackendFormat.textureType(); }
291
292 GrSamplerState samplerState() const { return fSamplerState; }
293 const GrSwizzle& swizzle() const { return fSwizzle; }
294
295 bool isInitialized() const { return fIsInitialized; }
296
297private:
298 GrSamplerState fSamplerState;
299 GrBackendFormat fBackendFormat;
300 GrSwizzle fSwizzle;
301 bool fIsInitialized = false;
302};
303
304const GrPrimitiveProcessor::TextureSampler& GrPrimitiveProcessor::IthTextureSampler(int i) {
305 SK_ABORT("Illegal texture sampler index");
306 static const TextureSampler kBogus;
307 return kBogus;
308}
309
310//////////////////////////////////////////////////////////////////////////////
311
312/**
313 * Returns the size of the attrib type in bytes.
314 * This was moved from include/private/GrTypesPriv.h in service of Skia dependents that build
315 * with C++11.
316 */
317static constexpr inline size_t GrVertexAttribTypeSize(GrVertexAttribType type) {
318 switch (type) {
319 case kFloat_GrVertexAttribType:
320 return sizeof(float);
321 case kFloat2_GrVertexAttribType:
322 return 2 * sizeof(float);
323 case kFloat3_GrVertexAttribType:
324 return 3 * sizeof(float);
325 case kFloat4_GrVertexAttribType:
326 return 4 * sizeof(float);
327 case kHalf_GrVertexAttribType:
328 return sizeof(uint16_t);
329 case kHalf2_GrVertexAttribType:
330 return 2 * sizeof(uint16_t);
331 case kHalf4_GrVertexAttribType:
332 return 4 * sizeof(uint16_t);
333 case kInt2_GrVertexAttribType:
334 return 2 * sizeof(int32_t);
335 case kInt3_GrVertexAttribType:
336 return 3 * sizeof(int32_t);
337 case kInt4_GrVertexAttribType:
338 return 4 * sizeof(int32_t);
339 case kByte_GrVertexAttribType:
340 return 1 * sizeof(char);
341 case kByte2_GrVertexAttribType:
342 return 2 * sizeof(char);
343 case kByte4_GrVertexAttribType:
344 return 4 * sizeof(char);
345 case kUByte_GrVertexAttribType:
346 return 1 * sizeof(char);
347 case kUByte2_GrVertexAttribType:
348 return 2 * sizeof(char);
349 case kUByte4_GrVertexAttribType:
350 return 4 * sizeof(char);
351 case kUByte_norm_GrVertexAttribType:
352 return 1 * sizeof(char);
353 case kUByte4_norm_GrVertexAttribType:
354 return 4 * sizeof(char);
355 case kShort2_GrVertexAttribType:
356 return 2 * sizeof(int16_t);
357 case kShort4_GrVertexAttribType:
358 return 4 * sizeof(int16_t);
359 case kUShort2_GrVertexAttribType: // fall through
360 case kUShort2_norm_GrVertexAttribType:
361 return 2 * sizeof(uint16_t);
362 case kInt_GrVertexAttribType:
363 return sizeof(int32_t);
364 case kUint_GrVertexAttribType:
365 return sizeof(uint32_t);
366 case kUShort_norm_GrVertexAttribType:
367 return sizeof(uint16_t);
368 case kUShort4_norm_GrVertexAttribType:
369 return 4 * sizeof(uint16_t);
370 }
371 // GCC fails because SK_ABORT evaluates to non constexpr. clang and cl.exe think this is
372 // unreachable and don't complain.
373#if defined(__clang__) || !defined(__GNUC__)
374 SK_ABORT("Unsupported type conversion");
375#endif
376 return 0;
377}
378
379constexpr size_t GrPrimitiveProcessor::Attribute::size() const {
380 return GrVertexAttribTypeSize(fCPUType);
381}
382
383#endif
384