1 | /* |
2 | * Copyright 2015 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef GrOp_DEFINED |
9 | #define GrOp_DEFINED |
10 | |
11 | #include "include/core/SkMatrix.h" |
12 | #include "include/core/SkRect.h" |
13 | #include "include/core/SkString.h" |
14 | #include "include/private/GrRecordingContext.h" |
15 | #include "src/gpu/GrGpuResource.h" |
16 | #include "src/gpu/GrNonAtomicRef.h" |
17 | #include "src/gpu/GrTracing.h" |
18 | #include "src/gpu/GrXferProcessor.h" |
19 | #include <atomic> |
20 | #include <new> |
21 | |
22 | class GrAppliedClip; |
23 | class GrCaps; |
24 | class GrOpFlushState; |
25 | class GrOpsRenderPass; |
26 | |
27 | /** |
28 | * GrOp is the base class for all Ganesh deferred GPU operations. To facilitate reordering and to |
29 | * minimize draw calls, Ganesh does not generate geometry inline with draw calls. Instead, it |
30 | * captures the arguments to the draw and then generates the geometry when flushing. This gives GrOp |
31 | * subclasses complete freedom to decide how/when to combine in order to produce fewer draw calls |
32 | * and minimize state changes. |
33 | * |
34 | * Ops of the same subclass may be merged or chained using combineIfPossible. When two ops merge, |
35 | * one takes on the union of the data and the other is left empty. The merged op becomes responsible |
36 | * for drawing the data from both the original ops. When ops are chained each op maintains its own |
37 | * data but they are linked in a list and the head op becomes responsible for executing the work for |
38 | * the chain. |
39 | * |
40 | * It is required that chainability is transitive. Moreover, if op A is able to merge with B then |
41 | * it must be the case that any op that can chain with A will either merge or chain with any op |
42 | * that can chain to B. |
43 | * |
44 | * The bounds of the op must contain all the vertices in device space *irrespective* of the clip. |
45 | * The bounds are used in determining which clip elements must be applied and thus the bounds cannot |
46 | * in turn depend upon the clip. |
47 | */ |
48 | #define GR_OP_SPEW 0 |
49 | #if GR_OP_SPEW |
50 | #define GrOP_SPEW(code) code |
51 | #define GrOP_INFO(...) SkDebugf(__VA_ARGS__) |
52 | #else |
53 | #define GrOP_SPEW(code) |
54 | #define GrOP_INFO(...) |
55 | #endif |
56 | |
57 | // Print out op information at flush time |
58 | #define GR_FLUSH_TIME_OP_SPEW 0 |
59 | |
60 | // A helper macro to generate a class static id |
61 | #define DEFINE_OP_CLASS_ID \ |
62 | static uint32_t ClassID() { \ |
63 | static uint32_t kClassID = GenOpClassID(); \ |
64 | return kClassID; \ |
65 | } |
66 | |
67 | class GrOp : private SkNoncopyable { |
68 | public: |
69 | virtual ~GrOp() = default; |
70 | |
71 | virtual const char* name() const = 0; |
72 | |
73 | using VisitProxyFunc = std::function<void(GrSurfaceProxy*, GrMipMapped)>; |
74 | |
75 | virtual void visitProxies(const VisitProxyFunc&) const { |
76 | // This default implementation assumes the op has no proxies |
77 | } |
78 | |
79 | enum class CombineResult { |
80 | /** |
81 | * The op that combineIfPossible was called on now represents its own work plus that of |
82 | * the passed op. The passed op should be destroyed without being flushed. Currently it |
83 | * is not legal to merge an op passed to combineIfPossible() the passed op is already in a |
84 | * chain (though the op on which combineIfPossible() was called may be). |
85 | */ |
86 | kMerged, |
87 | /** |
88 | * The caller *may* (but is not required) to chain these ops together. If they are chained |
89 | * then prepare() and execute() will be called on the head op but not the other ops in the |
90 | * chain. The head op will prepare and execute on behalf of all the ops in the chain. |
91 | */ |
92 | kMayChain, |
93 | /** |
94 | * The ops cannot be combined. |
95 | */ |
96 | kCannotCombine |
97 | }; |
98 | |
99 | // The arenas are the same as what was available when the op was created. |
100 | CombineResult combineIfPossible(GrOp* that, GrRecordingContext::Arenas* arena, |
101 | const GrCaps& caps); |
102 | |
103 | const SkRect& bounds() const { |
104 | SkASSERT(kUninitialized_BoundsFlag != fBoundsFlags); |
105 | return fBounds; |
106 | } |
107 | |
108 | void setClippedBounds(const SkRect& clippedBounds) { |
109 | fBounds = clippedBounds; |
110 | // The clipped bounds already incorporate any effect of the bounds flags. |
111 | fBoundsFlags = 0; |
112 | } |
113 | |
114 | bool hasAABloat() const { |
115 | SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); |
116 | return SkToBool(fBoundsFlags & kAABloat_BoundsFlag); |
117 | } |
118 | |
119 | bool hasZeroArea() const { |
120 | SkASSERT(fBoundsFlags != kUninitialized_BoundsFlag); |
121 | return SkToBool(fBoundsFlags & kZeroArea_BoundsFlag); |
122 | } |
123 | |
124 | #ifdef SK_DEBUG |
125 | // All GrOp-derived classes should be allocated in and deleted from a GrMemoryPool |
126 | void* operator new(size_t size); |
127 | void operator delete(void* target); |
128 | |
129 | void* operator new(size_t size, void* placement) { |
130 | return ::operator new(size, placement); |
131 | } |
132 | void operator delete(void* target, void* placement) { |
133 | ::operator delete(target, placement); |
134 | } |
135 | #endif |
136 | |
137 | /** |
138 | * Helper for safely down-casting to a GrOp subclass |
139 | */ |
140 | template <typename T> const T& cast() const { |
141 | SkASSERT(T::ClassID() == this->classID()); |
142 | return *static_cast<const T*>(this); |
143 | } |
144 | |
145 | template <typename T> T* cast() { |
146 | SkASSERT(T::ClassID() == this->classID()); |
147 | return static_cast<T*>(this); |
148 | } |
149 | |
150 | uint32_t classID() const { SkASSERT(kIllegalOpID != fClassID); return fClassID; } |
151 | |
152 | // We lazily initialize the uniqueID because currently the only user is GrAuditTrail |
153 | uint32_t uniqueID() const { |
154 | if (kIllegalOpID == fUniqueID) { |
155 | fUniqueID = GenOpID(); |
156 | } |
157 | return fUniqueID; |
158 | } |
159 | |
160 | /** |
161 | * This can optionally be called before 'prepare' (but after sorting). Each op that overrides |
162 | * onPrePrepare must be prepared to handle both cases (when onPrePrepare has been called |
163 | * ahead of time and when it has not been called). |
164 | */ |
165 | void prePrepare(GrRecordingContext* context, GrSurfaceProxyView* dstView, GrAppliedClip* clip, |
166 | const GrXferProcessor::DstProxyView& dstProxyView) { |
167 | this->onPrePrepare(context, dstView, clip, dstProxyView); |
168 | } |
169 | |
170 | /** |
171 | * Called prior to executing. The op should perform any resource creation or data transfers |
172 | * necessary before execute() is called. |
173 | */ |
174 | void prepare(GrOpFlushState* state) { this->onPrepare(state); } |
175 | |
176 | /** Issues the op's commands to GrGpu. */ |
177 | void execute(GrOpFlushState* state, const SkRect& chainBounds) { |
178 | TRACE_EVENT0("skia.gpu" , name()); |
179 | this->onExecute(state, chainBounds); |
180 | } |
181 | |
182 | /** Used for spewing information about ops when debugging. */ |
183 | #ifdef SK_DEBUG |
184 | virtual SkString dumpInfo() const { |
185 | SkString string; |
186 | string.appendf("OpBounds: [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n" , |
187 | fBounds.fLeft, fBounds.fTop, fBounds.fRight, fBounds.fBottom); |
188 | return string; |
189 | } |
190 | #else |
191 | SkString dumpInfo() const { return SkString("<Op information unavailable>" ); } |
192 | #endif |
193 | |
194 | /** |
195 | * A helper for iterating over an op chain in a range for loop that also downcasts to a GrOp |
196 | * subclass. E.g.: |
197 | * for (MyOpSubClass& op : ChainRange<MyOpSubClass>(this)) { |
198 | * // ... |
199 | * } |
200 | */ |
201 | template <typename OpSubclass = GrOp> class ChainRange { |
202 | private: |
203 | class Iter { |
204 | public: |
205 | explicit Iter(const OpSubclass* head) : fCurr(head) {} |
206 | inline Iter& operator++() { |
207 | return *this = Iter(static_cast<const OpSubclass*>(fCurr->nextInChain())); |
208 | } |
209 | const OpSubclass& operator*() const { return *fCurr; } |
210 | bool operator!=(const Iter& that) const { return fCurr != that.fCurr; } |
211 | |
212 | private: |
213 | const OpSubclass* fCurr; |
214 | }; |
215 | const OpSubclass* fHead; |
216 | |
217 | public: |
218 | explicit ChainRange(const OpSubclass* head) : fHead(head) {} |
219 | Iter begin() { return Iter(fHead); } |
220 | Iter end() { return Iter(nullptr); } |
221 | }; |
222 | |
223 | /** |
224 | * Concatenates two op chains. This op must be a tail and the passed op must be a head. The ops |
225 | * must be of the same subclass. |
226 | */ |
227 | void chainConcat(std::unique_ptr<GrOp>); |
228 | /** Returns true if this is the head of a chain (including a length 1 chain). */ |
229 | bool isChainHead() const { return !fPrevInChain; } |
230 | /** Returns true if this is the tail of a chain (including a length 1 chain). */ |
231 | bool isChainTail() const { return !fNextInChain; } |
232 | /** The next op in the chain. */ |
233 | GrOp* nextInChain() const { return fNextInChain.get(); } |
234 | /** The previous op in the chain. */ |
235 | GrOp* prevInChain() const { return fPrevInChain; } |
236 | /** |
237 | * Cuts the chain after this op. The returned op is the op that was previously next in the |
238 | * chain or null if this was already a tail. |
239 | */ |
240 | std::unique_ptr<GrOp> cutChain(); |
241 | SkDEBUGCODE(void validateChain(GrOp* expectedTail = nullptr) const); |
242 | |
243 | #ifdef SK_DEBUG |
244 | virtual void validate() const {} |
245 | #endif |
246 | |
247 | protected: |
248 | GrOp(uint32_t classID); |
249 | |
250 | /** |
251 | * Indicates that the op will produce geometry that extends beyond its bounds for the |
252 | * purpose of ensuring that the fragment shader runs on partially covered pixels for |
253 | * non-MSAA antialiasing. |
254 | */ |
255 | enum class HasAABloat : bool { |
256 | kNo = false, |
257 | kYes = true |
258 | }; |
259 | /** |
260 | * Indicates that the geometry being drawn in a hairline stroke. A point that is drawn in device |
261 | * space is also considered a hairline. |
262 | */ |
263 | enum class IsHairline : bool { |
264 | kNo = false, |
265 | kYes = true |
266 | }; |
267 | |
268 | void setBounds(const SkRect& newBounds, HasAABloat aabloat, IsHairline zeroArea) { |
269 | fBounds = newBounds; |
270 | this->setBoundsFlags(aabloat, zeroArea); |
271 | } |
272 | void setTransformedBounds(const SkRect& srcBounds, const SkMatrix& m, |
273 | HasAABloat aabloat, IsHairline zeroArea) { |
274 | m.mapRect(&fBounds, srcBounds); |
275 | this->setBoundsFlags(aabloat, zeroArea); |
276 | } |
277 | void makeFullScreen(GrSurfaceProxy* proxy) { |
278 | this->setBounds(proxy->getBoundsRect(), HasAABloat::kNo, IsHairline::kNo); |
279 | } |
280 | |
281 | static uint32_t GenOpClassID() { return GenID(&gCurrOpClassID); } |
282 | |
283 | private: |
284 | void joinBounds(const GrOp& that) { |
285 | if (that.hasAABloat()) { |
286 | fBoundsFlags |= kAABloat_BoundsFlag; |
287 | } |
288 | if (that.hasZeroArea()) { |
289 | fBoundsFlags |= kZeroArea_BoundsFlag; |
290 | } |
291 | return fBounds.joinPossiblyEmptyRect(that.fBounds); |
292 | } |
293 | |
294 | virtual CombineResult onCombineIfPossible(GrOp*, GrRecordingContext::Arenas*, const GrCaps&) { |
295 | return CombineResult::kCannotCombine; |
296 | } |
297 | |
298 | // TODO: the parameters to onPrePrepare mirror GrOpFlushState::OpArgs - fuse the two? |
299 | virtual void onPrePrepare(GrRecordingContext*, |
300 | const GrSurfaceProxyView* writeView, |
301 | GrAppliedClip*, |
302 | const GrXferProcessor::DstProxyView&) = 0; |
303 | virtual void onPrepare(GrOpFlushState*) = 0; |
304 | // If this op is chained then chainBounds is the union of the bounds of all ops in the chain. |
305 | // Otherwise, this op's bounds. |
306 | virtual void onExecute(GrOpFlushState*, const SkRect& chainBounds) = 0; |
307 | |
308 | static uint32_t GenID(std::atomic<uint32_t>* idCounter) { |
309 | uint32_t id = (*idCounter)++; |
310 | if (id == 0) { |
311 | SK_ABORT("This should never wrap as it should only be called once for each GrOp " |
312 | "subclass." ); |
313 | } |
314 | return id; |
315 | } |
316 | |
317 | void setBoundsFlags(HasAABloat aabloat, IsHairline zeroArea) { |
318 | fBoundsFlags = 0; |
319 | fBoundsFlags |= (HasAABloat::kYes == aabloat) ? kAABloat_BoundsFlag : 0; |
320 | fBoundsFlags |= (IsHairline ::kYes == zeroArea) ? kZeroArea_BoundsFlag : 0; |
321 | } |
322 | |
323 | enum { |
324 | kIllegalOpID = 0, |
325 | }; |
326 | |
327 | enum BoundsFlags { |
328 | kAABloat_BoundsFlag = 0x1, |
329 | kZeroArea_BoundsFlag = 0x2, |
330 | SkDEBUGCODE(kUninitialized_BoundsFlag = 0x4) |
331 | }; |
332 | |
333 | std::unique_ptr<GrOp> fNextInChain; |
334 | GrOp* fPrevInChain = nullptr; |
335 | const uint16_t fClassID; |
336 | uint16_t fBoundsFlags; |
337 | |
338 | static uint32_t GenOpID() { return GenID(&gCurrOpUniqueID); } |
339 | mutable uint32_t fUniqueID = SK_InvalidUniqueID; |
340 | SkRect fBounds; |
341 | |
342 | static std::atomic<uint32_t> gCurrOpUniqueID; |
343 | static std::atomic<uint32_t> gCurrOpClassID; |
344 | }; |
345 | |
346 | #endif |
347 | |