1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrOp_DEFINED
9#define GrOp_DEFINED
10
11#include "include/core/SkMatrix.h"
12#include "include/core/SkRect.h"
13#include "include/core/SkString.h"
14#include "include/gpu/GrRecordingContext.h"
15#include "src/gpu/GrGpuResource.h"
16#include "src/gpu/GrNonAtomicRef.h"
17#include "src/gpu/GrTracing.h"
18#include "src/gpu/GrXferProcessor.h"
19#include <atomic>
20#include <new>
21
22class GrAppliedClip;
23class GrCaps;
24class GrOpFlushState;
25class GrOpsRenderPass;
26
27/**
28 * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to
29 * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it
30 * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp
31 * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls
32 * and minimize state changes.
33 *
34 * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge,
35 * one takes on the union of the data and the other is left empty. The merged op becomes responsible
36 * for drawing the data from both the original ops. When ops are chained each op maintains its own
37 * data but they are linked in a list and the head op becomes responsible for executing the work for
38 * the chain.
39 *
40 * It is required that chainability is transitive. Moreover, if op A is able to merge with B then
41 * it must be the case that any op that can chain with A will either merge or chain with any op
42 * that can chain to B.
43 *
44 * The bounds of the op must contain all the vertices in device space *irrespective* of the clip.
45 * The bounds are used in determining which clip elements must be applied and thus the bounds cannot
46 * in turn depend upon the clip.
47 */
48#define GR_OP_SPEW 0
49#if GR_OP_SPEW
50 #define GrOP_SPEW(code) code
51 #define GrOP_INFO(...) SkDebugf(__VA_ARGS__)
52#else
53 #define GrOP_SPEW(code)
54 #define GrOP_INFO(...)
55#endif
56
57// Print out op information at flush time
58#define GR_FLUSH_TIME_OP_SPEW 0
59
60// A helper macro to generate a class static id
61#define DEFINE_OP_CLASS_ID \
62 static uint32_t ClassID() { \
63 static uint32_t kClassID = GenOpClassID(); \
64 return kClassID; \
65 }
66
67class GrOp : private SkNoncopyable {
68public:
69 virtual ~GrOp() = default;
70
71 virtual const char* name() const = 0;
72
73 using VisitProxyFunc = std::function<void(GrSurfaceProxy*, GrMipmapped)>;
74
75 virtual void visitProxies(const VisitProxyFunc&) const {
76 // This default implementation assumes the op has no proxies
77 }
78
79 enum class CombineResult {
80 /**
81 * The op that combineIfPossible was called on now represents its own work plus that of
82 * the passed op. The passed op should be destroyed without being flushed. Currently it
83 * is not legal to merge an op passed to combineIfPossible() the passed op is already in a
84 * chain (though the op on which combineIfPossible() was called may be).
85 */
86 kMerged,
87 /**
88 * The caller *may* (but is not required) to chain these ops together. If they are chained
89 * then prepare() and execute() will be called on the head op but not the other ops in the
90 * chain. The head op will prepare and execute on behalf of all the ops in the chain.
91 */
92 kMayChain,
93 /**
94 * The ops cannot be combined.
95 */
96 kCannotCombine
97 };
98
99 // The arenas are the same as what was available when the op was created.
100 CombineResult combineIfPossible(GrOp* that, GrRecordingContext::Arenas* arena,
101 const GrCaps& caps);
102
103 const SkRect& bounds() const {
104 SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags);
105 return fBounds;
106 }
107
108 void setClippedBounds(const SkRect& clippedBounds) {
109 fBounds = clippedBounds;
110 // The clipped bounds already incorporate any effect of the bounds flags.
111 fBoundsFlags = 0;
112 }
113
114 bool hasAABloat() const {
115 SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
116 return SkToBool(fBoundsFlags & kAABloat_BoundsFlag);
117 }
118
119 bool hasZeroArea() const {
120 SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag);
121 return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag);
122 }
123
124#ifdef SK_DEBUG
125 // All GrOp-derived classes should be allocated in and deleted from a GrMemoryPool
126 void* operator new(size_t size);
127 void operator delete(void* target);
128
129 void* operator new(size_t size, void* placement) {
130 return ::operator new(size, placement);
131 }
132 void operator delete(void* target, void* placement) {
133 ::operator delete(target, placement);
134 }
135#endif
136
137 /**
138 * Helper for safely down-casting to a GrOp subclass
139 */
140 template <typename T> const T& cast() const {
141 SkASSERT(T::ClassID() == this->classID());
142 return *static_cast<const T*>(this);
143 }
144
145 template <typename T> T* cast() {
146 SkASSERT(T::ClassID() == this->classID());
147 return static_cast<T*>(this);
148 }
149
150 uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; }
151
152 // We lazily initialize the uniqueID because currently the only user is GrAuditTrail
153 uint32_t uniqueID() const {
154 if (kIllegalOpID == fUniqueID) {
155 fUniqueID = GenOpID();
156 }
157 return fUniqueID;
158 }
159
160 /**
161 * This can optionally be called before 'prepare' (but after sorting). Each op that overrides
162 * onPrePrepare must be prepared to handle both cases (when onPrePrepare has been called
163 * ahead of time and when it has not been called).
164 */
165 void prePrepare(GrRecordingContext* context, GrSurfaceProxyView* dstView, GrAppliedClip* clip,
166 const GrXferProcessor::DstProxyView& dstProxyView) {
167 this->onPrePrepare(context, dstView, clip, dstProxyView);
168 }
169
170 /**
171 * Called prior to executing. The op should perform any resource creation or data transfers
172 * necessary before execute() is called.
173 */
174 void prepare(GrOpFlushState* state) { this->onPrepare(state); }
175
176 /** Issues the op's commands to GrGpu. */
177 void execute(GrOpFlushState* state, const SkRect& chainBounds) {
178 TRACE_EVENT0("skia.gpu", name());
179 this->onExecute(state, chainBounds);
180 }
181
182 /** Used for spewing information about ops when debugging. */
183#if GR_TEST_UTILS
184 virtual SkString dumpInfo() const final {
185 return SkStringPrintf("%s\nOpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]",
186 this->onDumpInfo().c_str(), fBounds.fLeft, fBounds.fTop,
187 fBounds.fRight, fBounds.fBottom);
188 }
189#endif
190
191 /**
192 * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp
193 * subclass. E.g.:
194 * for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) {
195 * // ...
196 * }
197 */
198 template <typename OpSubclass = GrOp> class ChainRange {
199 private:
200 class Iter {
201 public:
202 explicit Iter(const OpSubclass* head) : fCurr(head) {}
203 inline Iter& operator++() {
204 return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain()));
205 }
206 const OpSubclass& operator*() const { return *fCurr; }
207 bool operator!=(const Iter& that) const { return fCurr != that.fCurr; }
208
209 private:
210 const OpSubclass* fCurr;
211 };
212 const OpSubclass* fHead;
213
214 public:
215 explicit ChainRange(const OpSubclass* head) : fHead(head) {}
216 Iter begin() { return Iter(fHead); }
217 Iter end() { return Iter(nullptr); }
218 };
219
220 /**
221 * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops
222 * must be of the same subclass.
223 */
224 void chainConcat(std::unique_ptr<GrOp>);
225 /** Returns true if this is the head of a chain (including a length 1 chain). */
226 bool isChainHead() const { return !fPrevInChain; }
227 /** Returns true if this is the tail of a chain (including a length 1 chain). */
228 bool isChainTail() const { return !fNextInChain; }
229 /** The next op in the chain. */
230 GrOp* nextInChain() const { return fNextInChain.get(); }
231 /** The previous op in the chain. */
232 GrOp* prevInChain() const { return fPrevInChain; }
233 /**
234 * Cuts the chain after this op. The returned op is the op that was previously next in the
235 * chain or null if this was already a tail.
236 */
237 std::unique_ptr<GrOp> cutChain();
238 SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const);
239
240#ifdef SK_DEBUG
241 virtual void validate() const {}
242#endif
243
244protected:
245 GrOp(uint32_t classID);
246
247 /**
248 * Indicates that the op will produce geometry that extends beyond its bounds for the
249 * purpose of ensuring that the fragment shader runs on partially covered pixels for
250 * non-MSAA antialiasing.
251 */
252 enum class HasAABloat : bool {
253 kNo = false,
254 kYes = true
255 };
256 /**
257 * Indicates that the geometry being drawn in a hairline stroke. A point that is drawn in device
258 * space is also considered a hairline.
259 */
260 enum class IsHairline : bool {
261 kNo = false,
262 kYes = true
263 };
264
265 void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsHairline zeroArea) {
266 fBounds = newBounds;
267 this->setBoundsFlags(aabloat, zeroArea);
268 }
269 void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m,
270 HasAABloat aabloat, IsHairline zeroArea) {
271 m.mapRect(&fBounds, srcBounds);
272 this->setBoundsFlags(aabloat, zeroArea);
273 }
274 void makeFullScreen(GrSurfaceProxy* proxy) {
275 this->setBounds(proxy->getBoundsRect(), HasAABloat::kNo, IsHairline::kNo);
276 }
277
278 static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); }
279
280private:
281 void joinBounds(const GrOp& that) {
282 if (that.hasAABloat()) {
283 fBoundsFlags |= kAABloat_BoundsFlag;
284 }
285 if (that.hasZeroArea()) {
286 fBoundsFlags |= kZeroArea_BoundsFlag;
287 }
288 return fBounds.joinPossiblyEmptyRect(that.fBounds);
289 }
290
291 virtual CombineResult onCombineIfPossible(GrOp*, GrRecordingContext::Arenas*, const GrCaps&) {
292 return CombineResult::kCannotCombine;
293 }
294
295 // TODO: the parameters to onPrePrepare mirror GrOpFlushState::OpArgs - fuse the two?
296 virtual void onPrePrepare(GrRecordingContext*,
297 const GrSurfaceProxyView* writeView,
298 GrAppliedClip*,
299 const GrXferProcessor::DstProxyView&) = 0;
300 virtual void onPrepare(GrOpFlushState*) = 0;
301 // If this op is chained then chainBounds is the union of the bounds of all ops in the chain.
302 // Otherwise, this op's bounds.
303 virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0;
304#if GR_TEST_UTILS
305 virtual SkString onDumpInfo() const { return SkString(); }
306#endif
307
308 static uint32_t GenID(std::atomic<uint32_t>* idCounter) {
309 uint32_t id = (*idCounter)++;
310 if (id == 0) {
311 SK_ABORT("This should never wrap as it should only be called once for each GrOp "
312 "subclass.");
313 }
314 return id;
315 }
316
317 void setBoundsFlags(HasAABloat aabloat, IsHairline zeroArea) {
318 fBoundsFlags = 0;
319 fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0;
320 fBoundsFlags |= (IsHairline ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0;
321 }
322
323 enum {
324 kIllegalOpID = 0,
325 };
326
327 enum BoundsFlags {
328 kAABloat_BoundsFlag = 0x1,
329 kZeroArea_BoundsFlag = 0x2,
330 SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4)
331 };
332
333 std::unique_ptr<GrOp> fNextInChain;
334 GrOp* fPrevInChain = nullptr;
335 const uint16_t fClassID;
336 uint16_t fBoundsFlags;
337
338 static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); }
339 mutable uint32_t fUniqueID = SK_InvalidUniqueID;
340 SkRect fBounds;
341
342 static std::atomic<uint32_t> gCurrOpUniqueID;
343 static std::atomic<uint32_t> gCurrOpClassID;
344};
345
346#endif
347