1 | /* |
2 | * Copyright 2010 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef GrTypes_DEFINED |
9 | #define GrTypes_DEFINED |
10 | |
11 | #include "include/core/SkMath.h" |
12 | #include "include/core/SkTypes.h" |
13 | #include "include/gpu/GrConfig.h" |
14 | |
15 | class GrBackendSemaphore; |
16 | class SkImage; |
17 | class SkSurface; |
18 | |
19 | //////////////////////////////////////////////////////////////////////////////// |
20 | |
21 | /** |
22 | * Defines overloaded bitwise operators to make it easier to use an enum as a |
23 | * bitfield. |
24 | */ |
25 | #define GR_MAKE_BITFIELD_OPS(X) \ |
26 | inline X operator |(X a, X b) { \ |
27 | return (X) (+a | +b); \ |
28 | } \ |
29 | inline X& operator |=(X& a, X b) { \ |
30 | return (a = a | b); \ |
31 | } \ |
32 | inline X operator &(X a, X b) { \ |
33 | return (X) (+a & +b); \ |
34 | } \ |
35 | inline X& operator &=(X& a, X b) { \ |
36 | return (a = a & b); \ |
37 | } \ |
38 | template <typename T> \ |
39 | inline X operator &(T a, X b) { \ |
40 | return (X) (+a & +b); \ |
41 | } \ |
42 | template <typename T> \ |
43 | inline X operator &(X a, T b) { \ |
44 | return (X) (+a & +b); \ |
45 | } \ |
46 | |
47 | #define GR_DECL_BITFIELD_OPS_FRIENDS(X) \ |
48 | friend X operator |(X a, X b); \ |
49 | friend X& operator |=(X& a, X b); \ |
50 | \ |
51 | friend X operator &(X a, X b); \ |
52 | friend X& operator &=(X& a, X b); \ |
53 | \ |
54 | template <typename T> \ |
55 | friend X operator &(T a, X b); \ |
56 | \ |
57 | template <typename T> \ |
58 | friend X operator &(X a, T b); \ |
59 | |
60 | /** |
61 | * Wraps a C++11 enum that we use as a bitfield, and enables a limited amount of |
62 | * masking with type safety. Instantiated with the ~ operator. |
63 | */ |
64 | template<typename TFlags> class GrTFlagsMask { |
65 | public: |
66 | constexpr explicit GrTFlagsMask(TFlags value) : GrTFlagsMask(static_cast<int>(value)) {} |
67 | constexpr explicit GrTFlagsMask(int value) : fValue(value) {} |
68 | constexpr int value() const { return fValue; } |
69 | private: |
70 | const int fValue; |
71 | }; |
72 | |
73 | // Or-ing a mask always returns another mask. |
74 | template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a, |
75 | GrTFlagsMask<TFlags> b) { |
76 | return GrTFlagsMask<TFlags>(a.value() | b.value()); |
77 | } |
78 | template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(GrTFlagsMask<TFlags> a, |
79 | TFlags b) { |
80 | return GrTFlagsMask<TFlags>(a.value() | static_cast<int>(b)); |
81 | } |
82 | template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator|(TFlags a, |
83 | GrTFlagsMask<TFlags> b) { |
84 | return GrTFlagsMask<TFlags>(static_cast<int>(a) | b.value()); |
85 | } |
86 | template<typename TFlags> inline GrTFlagsMask<TFlags>& operator|=(GrTFlagsMask<TFlags>& a, |
87 | GrTFlagsMask<TFlags> b) { |
88 | return (a = a | b); |
89 | } |
90 | |
91 | // And-ing two masks returns another mask; and-ing one with regular flags returns flags. |
92 | template<typename TFlags> constexpr GrTFlagsMask<TFlags> operator&(GrTFlagsMask<TFlags> a, |
93 | GrTFlagsMask<TFlags> b) { |
94 | return GrTFlagsMask<TFlags>(a.value() & b.value()); |
95 | } |
96 | template<typename TFlags> constexpr TFlags operator&(GrTFlagsMask<TFlags> a, TFlags b) { |
97 | return static_cast<TFlags>(a.value() & static_cast<int>(b)); |
98 | } |
99 | template<typename TFlags> constexpr TFlags operator&(TFlags a, GrTFlagsMask<TFlags> b) { |
100 | return static_cast<TFlags>(static_cast<int>(a) & b.value()); |
101 | } |
102 | template<typename TFlags> inline TFlags& operator&=(TFlags& a, GrTFlagsMask<TFlags> b) { |
103 | return (a = a & b); |
104 | } |
105 | |
106 | /** |
107 | * Defines bitwise operators that make it possible to use an enum class as a |
108 | * basic bitfield. |
109 | */ |
110 | #define GR_MAKE_BITFIELD_CLASS_OPS(X) \ |
111 | constexpr GrTFlagsMask<X> operator~(X a) { \ |
112 | return GrTFlagsMask<X>(~static_cast<int>(a)); \ |
113 | } \ |
114 | constexpr X operator|(X a, X b) { \ |
115 | return static_cast<X>(static_cast<int>(a) | static_cast<int>(b)); \ |
116 | } \ |
117 | inline X& operator|=(X& a, X b) { \ |
118 | return (a = a | b); \ |
119 | } \ |
120 | constexpr bool operator&(X a, X b) { \ |
121 | return SkToBool(static_cast<int>(a) & static_cast<int>(b)); \ |
122 | } \ |
123 | |
124 | #define GR_DECL_BITFIELD_CLASS_OPS_FRIENDS(X) \ |
125 | friend constexpr GrTFlagsMask<X> operator ~(X); \ |
126 | friend constexpr X operator |(X, X); \ |
127 | friend X& operator |=(X&, X); \ |
128 | friend constexpr bool operator &(X, X) |
129 | |
130 | /////////////////////////////////////////////////////////////////////////////// |
131 | |
132 | /** |
133 | * Possible 3D APIs that may be used by Ganesh. |
134 | */ |
135 | enum class GrBackendApi : unsigned { |
136 | kOpenGL, |
137 | kVulkan, |
138 | kMetal, |
139 | kDirect3D, |
140 | kDawn, |
141 | /** |
142 | * Mock is a backend that does not draw anything. It is used for unit tests |
143 | * and to measure CPU overhead. |
144 | */ |
145 | kMock, |
146 | |
147 | /** |
148 | * Added here to support the legacy GrBackend enum value and clients who referenced it using |
149 | * GrBackend::kOpenGL_GrBackend. |
150 | */ |
151 | kOpenGL_GrBackend = kOpenGL, |
152 | }; |
153 | |
154 | /** |
155 | * Previously the above enum was not an enum class but a normal enum. To support the legacy use of |
156 | * the enum values we define them below so that no clients break. |
157 | */ |
158 | typedef GrBackendApi GrBackend; |
159 | |
160 | static constexpr GrBackendApi kMetal_GrBackend = GrBackendApi::kMetal; |
161 | static constexpr GrBackendApi kVulkan_GrBackend = GrBackendApi::kVulkan; |
162 | static constexpr GrBackendApi kMock_GrBackend = GrBackendApi::kMock; |
163 | |
164 | /////////////////////////////////////////////////////////////////////////////// |
165 | |
166 | /** |
167 | * Used to say whether a texture has mip levels allocated or not. |
168 | */ |
169 | enum class GrMipMapped : bool { |
170 | kNo = false, |
171 | kYes = true |
172 | }; |
173 | |
174 | /* |
175 | * Can a GrBackendObject be rendered to? |
176 | */ |
177 | enum class GrRenderable : bool { |
178 | kNo = false, |
179 | kYes = true |
180 | }; |
181 | |
182 | /* |
183 | * Used to say whether texture is backed by protected memory. |
184 | */ |
185 | enum class GrProtected : bool { |
186 | kNo = false, |
187 | kYes = true |
188 | }; |
189 | |
190 | /////////////////////////////////////////////////////////////////////////////// |
191 | |
192 | /** |
193 | * GPU SkImage and SkSurfaces can be stored such that (0, 0) in texture space may correspond to |
194 | * either the top-left or bottom-left content pixel. |
195 | */ |
196 | enum GrSurfaceOrigin : int { |
197 | kTopLeft_GrSurfaceOrigin, |
198 | kBottomLeft_GrSurfaceOrigin, |
199 | }; |
200 | |
201 | /** |
202 | * A GrContext's cache of backend context state can be partially invalidated. |
203 | * These enums are specific to the GL backend and we'd add a new set for an alternative backend. |
204 | */ |
205 | enum GrGLBackendState { |
206 | kRenderTarget_GrGLBackendState = 1 << 0, |
207 | // Also includes samplers bound to texture units. |
208 | kTextureBinding_GrGLBackendState = 1 << 1, |
209 | // View state stands for scissor and viewport |
210 | kView_GrGLBackendState = 1 << 2, |
211 | kBlend_GrGLBackendState = 1 << 3, |
212 | kMSAAEnable_GrGLBackendState = 1 << 4, |
213 | kVertex_GrGLBackendState = 1 << 5, |
214 | kStencil_GrGLBackendState = 1 << 6, |
215 | kPixelStore_GrGLBackendState = 1 << 7, |
216 | kProgram_GrGLBackendState = 1 << 8, |
217 | kFixedFunction_GrGLBackendState = 1 << 9, |
218 | kMisc_GrGLBackendState = 1 << 10, |
219 | kPathRendering_GrGLBackendState = 1 << 11, |
220 | kALL_GrGLBackendState = 0xffff |
221 | }; |
222 | |
223 | /** |
224 | * This value translates to reseting all the context state for any backend. |
225 | */ |
226 | static const uint32_t kAll_GrBackendState = 0xffffffff; |
227 | |
228 | enum GrFlushFlags { |
229 | kNone_GrFlushFlags = 0, |
230 | // flush will wait till all submitted GPU work is finished before returning. |
231 | kSyncCpu_GrFlushFlag = 0x1, |
232 | }; |
233 | |
234 | typedef void* GrGpuFinishedContext; |
235 | typedef void (*GrGpuFinishedProc)(GrGpuFinishedContext finishedContext); |
236 | |
237 | /** |
238 | * Struct to supply options to flush calls. |
239 | * |
240 | * After issuing all commands, fNumSemaphore semaphores will be signaled by the gpu. The client |
241 | * passes in an array of fNumSemaphores GrBackendSemaphores. In general these GrBackendSemaphore's |
242 | * can be either initialized or not. If they are initialized, the backend uses the passed in |
243 | * semaphore. If it is not initialized, a new semaphore is created and the GrBackendSemaphore |
244 | * object is initialized with that semaphore. |
245 | * |
246 | * The client will own and be responsible for deleting the underlying semaphores that are stored |
247 | * and returned in initialized GrBackendSemaphore objects. The GrBackendSemaphore objects |
248 | * themselves can be deleted as soon as this function returns. |
249 | * |
250 | * If a finishedProc is provided, the finishedProc will be called when all work submitted to the gpu |
251 | * from this flush call and all previous flush calls has finished on the GPU. If the flush call |
252 | * fails due to an error and nothing ends up getting sent to the GPU, the finished proc is called |
253 | * immediately. |
254 | */ |
255 | struct GrFlushInfo { |
256 | GrFlushFlags fFlags = kNone_GrFlushFlags; |
257 | int fNumSemaphores = 0; |
258 | GrBackendSemaphore* fSignalSemaphores = nullptr; |
259 | GrGpuFinishedProc fFinishedProc = nullptr; |
260 | GrGpuFinishedContext fFinishedContext = nullptr; |
261 | }; |
262 | |
263 | /** |
264 | * Enum used as return value when flush with semaphores so the client knows whether the semaphores |
265 | * were submitted to GPU or not. |
266 | */ |
267 | enum class GrSemaphoresSubmitted : bool { |
268 | kNo = false, |
269 | kYes = true |
270 | }; |
271 | |
272 | /** |
273 | * Array of SkImages and SkSurfaces which Skia will prepare for external use when passed into a |
274 | * flush call on GrContext. All the SkImages and SkSurfaces must be GPU backed. |
275 | * |
276 | * If fPrepareSurfaceForPresent is not nullptr, then it must be an array the size of fNumSurfaces. |
277 | * Each entry in the array corresponds to the SkSurface at the same index in the fSurfaces array. If |
278 | * an entry is true, then that surface will be prepared for both external use and present. |
279 | * |
280 | * Currently this only has an effect if the backend API is Vulkan. In this case, all the underlying |
281 | * VkImages associated with the SkImages and SkSurfaces will be transitioned into the VkQueueFamily |
282 | * in which they were originally wrapped or created with. This allows a client to wrap a VkImage |
283 | * from a queue which is different from the graphics queue and then have Skia transition it back to |
284 | * that queue without needing to delete the SkImage or SkSurface. If the an SkSurface is also |
285 | * flagged to be prepared for present, then its VkImageLayout will be set to |
286 | * VK_IMAGE_LAYOUT_PRESENT_SRC_KHR if the VK_KHR_swapchain extension has been enabled for the |
287 | * GrContext and the original queue is not VK_QUEUE_FAMILY_EXTERNAL or VK_QUEUE_FAMILY_FOREIGN_EXT. |
288 | * |
289 | * If an SkSurface or SkImage is used again, it will be transitioned back to the graphics queue and |
290 | * whatever layout is needed for its use. |
291 | */ |
292 | struct GrPrepareForExternalIORequests { |
293 | int fNumImages = 0; |
294 | SkImage** fImages = nullptr; |
295 | int fNumSurfaces = 0; |
296 | SkSurface** fSurfaces = nullptr; |
297 | bool* fPrepareSurfaceForPresent = nullptr; |
298 | |
299 | bool hasRequests() const { return fNumImages || fNumSurfaces; } |
300 | }; |
301 | |
302 | #endif |
303 | |