1 | /* |
2 | * Copyright 2006 The Android Open Source Project |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef SkTypes_DEFINED |
9 | #define SkTypes_DEFINED |
10 | |
11 | /** \file SkTypes.h |
12 | */ |
13 | |
14 | // Pre-SkUserConfig.h setup. |
15 | |
16 | // Allows embedders that want to disable macros that take arguments to just |
17 | // define that symbol to be one of these |
18 | #define SK_NOTHING_ARG1(arg1) |
19 | #define SK_NOTHING_ARG2(arg1, arg2) |
20 | #define SK_NOTHING_ARG3(arg1, arg2, arg3) |
21 | |
22 | #if !defined(SK_BUILD_FOR_ANDROID) && !defined(SK_BUILD_FOR_IOS) && !defined(SK_BUILD_FOR_WIN) && \ |
23 | !defined(SK_BUILD_FOR_UNIX) && !defined(SK_BUILD_FOR_MAC) |
24 | |
25 | #ifdef __APPLE__ |
26 | #include "TargetConditionals.h" |
27 | #endif |
28 | |
29 | #if defined(_WIN32) || defined(__SYMBIAN32__) |
30 | #define SK_BUILD_FOR_WIN |
31 | #elif defined(ANDROID) || defined(__ANDROID__) |
32 | #define SK_BUILD_FOR_ANDROID |
33 | #elif defined(linux) || defined(__linux) || defined(__FreeBSD__) || \ |
34 | defined(__OpenBSD__) || defined(__sun) || defined(__NetBSD__) || \ |
35 | defined(__DragonFly__) || defined(__Fuchsia__) || \ |
36 | defined(__GLIBC__) || defined(__GNU__) || defined(__unix__) |
37 | #define SK_BUILD_FOR_UNIX |
38 | #elif TARGET_OS_IPHONE || TARGET_IPHONE_SIMULATOR |
39 | #define SK_BUILD_FOR_IOS |
40 | #else |
41 | #define SK_BUILD_FOR_MAC |
42 | #endif |
43 | |
44 | #endif |
45 | |
46 | #if defined(SK_BUILD_FOR_WIN) && !defined(__clang__) |
47 | #if !defined(SK_RESTRICT) |
48 | #define SK_RESTRICT __restrict |
49 | #endif |
50 | #if !defined(SK_WARN_UNUSED_RESULT) |
51 | #define SK_WARN_UNUSED_RESULT |
52 | #endif |
53 | #endif |
54 | |
55 | #if !defined(SK_RESTRICT) |
56 | #define SK_RESTRICT __restrict__ |
57 | #endif |
58 | |
59 | #if !defined(SK_WARN_UNUSED_RESULT) |
60 | #define SK_WARN_UNUSED_RESULT __attribute__((warn_unused_result)) |
61 | #endif |
62 | |
63 | #if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN) |
64 | #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__) |
65 | #define SK_CPU_BENDIAN |
66 | #elif defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) |
67 | #define SK_CPU_LENDIAN |
68 | #elif defined(__sparc) || defined(__sparc__) || \ |
69 | defined(_POWER) || defined(__powerpc__) || \ |
70 | defined(__ppc__) || defined(__hppa) || \ |
71 | defined(__PPC__) || defined(__PPC64__) || \ |
72 | defined(_MIPSEB) || defined(__ARMEB__) || \ |
73 | defined(__s390__) || \ |
74 | (defined(__sh__) && defined(__BIG_ENDIAN__)) || \ |
75 | (defined(__ia64) && defined(__BIG_ENDIAN__)) |
76 | #define SK_CPU_BENDIAN |
77 | #else |
78 | #define SK_CPU_LENDIAN |
79 | #endif |
80 | #endif |
81 | |
82 | #if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64) |
83 | #define SK_CPU_X86 1 |
84 | #endif |
85 | |
86 | /** |
87 | * SK_CPU_SSE_LEVEL |
88 | * |
89 | * If defined, SK_CPU_SSE_LEVEL should be set to the highest supported level. |
90 | * On non-intel CPU this should be undefined. |
91 | */ |
92 | #define SK_CPU_SSE_LEVEL_SSE1 10 |
93 | #define SK_CPU_SSE_LEVEL_SSE2 20 |
94 | #define SK_CPU_SSE_LEVEL_SSE3 30 |
95 | #define SK_CPU_SSE_LEVEL_SSSE3 31 |
96 | #define SK_CPU_SSE_LEVEL_SSE41 41 |
97 | #define SK_CPU_SSE_LEVEL_SSE42 42 |
98 | #define SK_CPU_SSE_LEVEL_AVX 51 |
99 | #define SK_CPU_SSE_LEVEL_AVX2 52 |
100 | #define SK_CPU_SSE_LEVEL_AVX512 60 |
101 | |
102 | // When targetting iOS and using gyp to generate the build files, it is not |
103 | // possible to select files to build depending on the architecture (i.e. it |
104 | // is not possible to use hand optimized assembly implementation). In that |
105 | // configuration SK_BUILD_NO_OPTS is defined. Remove optimisation then. |
106 | #ifdef SK_BUILD_NO_OPTS |
107 | #define SK_CPU_SSE_LEVEL 0 |
108 | #endif |
109 | |
110 | // Are we in GCC/Clang? |
111 | #ifndef SK_CPU_SSE_LEVEL |
112 | // These checks must be done in descending order to ensure we set the highest |
113 | // available SSE level. |
114 | #if defined(__AVX512F__) |
115 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX512 |
116 | #elif defined(__AVX2__) |
117 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2 |
118 | #elif defined(__AVX__) |
119 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX |
120 | #elif defined(__SSE4_2__) |
121 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE42 |
122 | #elif defined(__SSE4_1__) |
123 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE41 |
124 | #elif defined(__SSSE3__) |
125 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSSE3 |
126 | #elif defined(__SSE3__) |
127 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE3 |
128 | #elif defined(__SSE2__) |
129 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2 |
130 | #endif |
131 | #endif |
132 | |
133 | // Are we in VisualStudio? |
134 | #ifndef SK_CPU_SSE_LEVEL |
135 | // These checks must be done in descending order to ensure we set the highest |
136 | // available SSE level. 64-bit intel guarantees at least SSE2 support. |
137 | #if defined(__AVX2__) |
138 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX2 |
139 | #elif defined(__AVX__) |
140 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_AVX |
141 | #elif defined(_M_X64) || defined(_M_AMD64) |
142 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2 |
143 | #elif defined(_M_IX86_FP) |
144 | #if _M_IX86_FP >= 2 |
145 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE2 |
146 | #elif _M_IX86_FP == 1 |
147 | #define SK_CPU_SSE_LEVEL SK_CPU_SSE_LEVEL_SSE1 |
148 | #endif |
149 | #endif |
150 | #endif |
151 | |
152 | // ARM defines |
153 | #if defined(__arm__) && (!defined(__APPLE__) || !TARGET_IPHONE_SIMULATOR) |
154 | #define SK_CPU_ARM32 |
155 | #elif defined(__aarch64__) && !defined(SK_BUILD_NO_OPTS) |
156 | #define SK_CPU_ARM64 |
157 | #endif |
158 | |
159 | // All 64-bit ARM chips have NEON. Many 32-bit ARM chips do too. |
160 | #if !defined(SK_ARM_HAS_NEON) && !defined(SK_BUILD_NO_OPTS) && defined(__ARM_NEON) |
161 | #define SK_ARM_HAS_NEON |
162 | #endif |
163 | |
164 | // Really this __APPLE__ check shouldn't be necessary, but it seems that Apple's Clang defines |
165 | // __ARM_FEATURE_CRC32 for -arch arm64, even though their chips don't support those instructions! |
166 | #if defined(__ARM_FEATURE_CRC32) && !defined(__APPLE__) |
167 | #define SK_ARM_HAS_CRC32 |
168 | #endif |
169 | |
170 | |
171 | // DLL/.so exports. |
172 | #if !defined(SKIA_IMPLEMENTATION) |
173 | #define SKIA_IMPLEMENTATION 0 |
174 | #endif |
175 | #if !defined(SK_API) |
176 | #if defined(SKIA_DLL) |
177 | #if defined(_MSC_VER) |
178 | #if SKIA_IMPLEMENTATION |
179 | #define SK_API __declspec(dllexport) |
180 | #else |
181 | #define SK_API __declspec(dllimport) |
182 | #endif |
183 | #else |
184 | #define SK_API __attribute__((visibility("default"))) |
185 | #endif |
186 | #else |
187 | #define SK_API |
188 | #endif |
189 | #endif |
190 | |
191 | // SK_SPI is functionally identical to SK_API, but used within src to clarify that it's less stable |
192 | #if !defined(SK_SPI) |
193 | #define SK_SPI SK_API |
194 | #endif |
195 | |
196 | // IWYU pragma: begin_exports |
197 | #if defined (SK_USER_CONFIG_HEADER) |
198 | #include SK_USER_CONFIG_HEADER |
199 | #else |
200 | #include "include/config/SkUserConfig.h" |
201 | #endif |
202 | #include <stddef.h> |
203 | #include <stdint.h> |
204 | // IWYU pragma: end_exports |
205 | |
206 | // Post SkUserConfig.h checks and such. |
207 | #if !defined(SK_DEBUG) && !defined(SK_RELEASE) |
208 | #ifdef NDEBUG |
209 | #define SK_RELEASE |
210 | #else |
211 | #define SK_DEBUG |
212 | #endif |
213 | #endif |
214 | |
215 | #if defined(SK_DEBUG) && defined(SK_RELEASE) |
216 | # error "cannot define both SK_DEBUG and SK_RELEASE" |
217 | #elif !defined(SK_DEBUG) && !defined(SK_RELEASE) |
218 | # error "must define either SK_DEBUG or SK_RELEASE" |
219 | #endif |
220 | |
221 | #if defined(SK_CPU_LENDIAN) && defined(SK_CPU_BENDIAN) |
222 | # error "cannot define both SK_CPU_LENDIAN and SK_CPU_BENDIAN" |
223 | #elif !defined(SK_CPU_LENDIAN) && !defined(SK_CPU_BENDIAN) |
224 | # error "must define either SK_CPU_LENDIAN or SK_CPU_BENDIAN" |
225 | #endif |
226 | |
227 | #if defined(SK_CPU_BENDIAN) && !defined(I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN) |
228 | #error "The Skia team is not endian-savvy enough to support big-endian CPUs." |
229 | #error "If you still want to use Skia," |
230 | #error "please define I_ACKNOWLEDGE_SKIA_DOES_NOT_SUPPORT_BIG_ENDIAN." |
231 | #endif |
232 | |
233 | #if !defined(SK_ATTRIBUTE) |
234 | # if defined(__clang__) || defined(__GNUC__) |
235 | # define SK_ATTRIBUTE(attr) __attribute__((attr)) |
236 | # else |
237 | # define SK_ATTRIBUTE(attr) |
238 | # endif |
239 | #endif |
240 | |
241 | #if !defined(SK_SUPPORT_GPU) |
242 | # define SK_SUPPORT_GPU 1 |
243 | #endif |
244 | |
245 | /** |
246 | * If GPU is enabled but no GPU backends are enabled then enable GL by default. |
247 | * Traditionally clients have relied on Skia always building with the GL backend |
248 | * and opting in to additional backends. TODO: Require explicit opt in for GL. |
249 | */ |
250 | #if SK_SUPPORT_GPU |
251 | # if !defined(SK_GL) && !defined(SK_VULKAN) && !defined(SK_METAL) && !defined(SK_DAWN) && !defined(SK_DIRECT3D) |
252 | # define SK_GL |
253 | # endif |
254 | #else |
255 | # undef SK_GL |
256 | # undef SK_VULKAN |
257 | # undef SK_METAL |
258 | # undef SK_DAWN |
259 | # undef SK_DIRECT3D |
260 | #endif |
261 | |
262 | #if !defined(SK_SUPPORT_ATLAS_TEXT) |
263 | # define SK_SUPPORT_ATLAS_TEXT 0 |
264 | #elif SK_SUPPORT_ATLAS_TEXT && !SK_SUPPORT_GPU |
265 | # error "SK_SUPPORT_ATLAS_TEXT requires SK_SUPPORT_GPU" |
266 | #endif |
267 | |
268 | #if !defined(SkUNREACHABLE) |
269 | # if defined(_MSC_VER) && !defined(__clang__) |
270 | # define SkUNREACHABLE __assume(false) |
271 | # else |
272 | # define SkUNREACHABLE __builtin_unreachable() |
273 | # endif |
274 | #endif |
275 | |
276 | #if defined(SK_BUILD_FOR_GOOGLE3) |
277 | void SkDebugfForDumpStackTrace(const char* data, void* unused); |
278 | void DumpStackTrace(int skip_count, void w(const char*, void*), void* arg); |
279 | # define SK_DUMP_GOOGLE3_STACK() DumpStackTrace(0, SkDebugfForDumpStackTrace, nullptr) |
280 | #else |
281 | # define SK_DUMP_GOOGLE3_STACK() |
282 | #endif |
283 | |
284 | #ifdef SK_BUILD_FOR_WIN |
285 | // Lets visual studio follow error back to source |
286 | #define SK_DUMP_LINE_FORMAT(message) \ |
287 | SkDebugf("%s(%d): fatal error: \"%s\"\n", __FILE__, __LINE__, message) |
288 | #else |
289 | #define SK_DUMP_LINE_FORMAT(message) \ |
290 | SkDebugf("%s:%d: fatal error: \"%s\"\n", __FILE__, __LINE__, message) |
291 | #endif |
292 | |
293 | #ifndef SK_ABORT |
294 | # define SK_ABORT(message) \ |
295 | do { \ |
296 | SK_DUMP_LINE_FORMAT(message); \ |
297 | SK_DUMP_GOOGLE3_STACK(); \ |
298 | sk_abort_no_print(); \ |
299 | SkUNREACHABLE; \ |
300 | } while (false) |
301 | #endif |
302 | |
303 | // If SK_R32_SHIFT is set, we'll use that to choose RGBA or BGRA. |
304 | // If not, we'll default to RGBA everywhere except BGRA on Windows. |
305 | #if defined(SK_R32_SHIFT) |
306 | static_assert(SK_R32_SHIFT == 0 || SK_R32_SHIFT == 16, "" ); |
307 | #elif defined(SK_BUILD_FOR_WIN) |
308 | #define SK_R32_SHIFT 16 |
309 | #else |
310 | #define SK_R32_SHIFT 0 |
311 | #endif |
312 | |
313 | #if defined(SK_B32_SHIFT) |
314 | static_assert(SK_B32_SHIFT == (16-SK_R32_SHIFT), "" ); |
315 | #else |
316 | #define SK_B32_SHIFT (16-SK_R32_SHIFT) |
317 | #endif |
318 | |
319 | #define SK_G32_SHIFT 8 |
320 | #define SK_A32_SHIFT 24 |
321 | |
322 | |
323 | /** |
324 | * SK_PMCOLOR_BYTE_ORDER can be used to query the byte order of SkPMColor at compile time. The |
325 | * relationship between the byte order and shift values depends on machine endianness. If the shift |
326 | * order is R=0, G=8, B=16, A=24 then ((char*)&pmcolor)[0] will produce the R channel on a little |
327 | * endian machine and the A channel on a big endian machine. Thus, given those shifts values, |
328 | * SK_PMCOLOR_BYTE_ORDER(R,G,B,A) will be true on a little endian machine and |
329 | * SK_PMCOLOR_BYTE_ORDER(A,B,G,R) will be true on a big endian machine. |
330 | */ |
331 | #ifdef SK_CPU_BENDIAN |
332 | # define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \ |
333 | (SK_ ## C3 ## 32_SHIFT == 0 && \ |
334 | SK_ ## C2 ## 32_SHIFT == 8 && \ |
335 | SK_ ## C1 ## 32_SHIFT == 16 && \ |
336 | SK_ ## C0 ## 32_SHIFT == 24) |
337 | #else |
338 | # define SK_PMCOLOR_BYTE_ORDER(C0, C1, C2, C3) \ |
339 | (SK_ ## C0 ## 32_SHIFT == 0 && \ |
340 | SK_ ## C1 ## 32_SHIFT == 8 && \ |
341 | SK_ ## C2 ## 32_SHIFT == 16 && \ |
342 | SK_ ## C3 ## 32_SHIFT == 24) |
343 | #endif |
344 | |
345 | #if defined SK_DEBUG && defined SK_BUILD_FOR_WIN |
346 | #ifdef free |
347 | #undef free |
348 | #endif |
349 | #include <crtdbg.h> |
350 | #undef free |
351 | #endif |
352 | |
353 | #if !defined(SK_UNUSED) |
354 | # if !defined(__clang__) && defined(_MSC_VER) |
355 | # define SK_UNUSED __pragma(warning(suppress:4189)) |
356 | # else |
357 | # define SK_UNUSED SK_ATTRIBUTE(unused) |
358 | # endif |
359 | #endif |
360 | |
361 | /** |
362 | * If your judgment is better than the compiler's (i.e. you've profiled it), |
363 | * you can use SK_ALWAYS_INLINE to force inlining. E.g. |
364 | * inline void someMethod() { ... } // may not be inlined |
365 | * SK_ALWAYS_INLINE void someMethod() { ... } // should always be inlined |
366 | */ |
367 | #if !defined(SK_ALWAYS_INLINE) |
368 | # if defined(SK_BUILD_FOR_WIN) |
369 | # define SK_ALWAYS_INLINE __forceinline |
370 | # else |
371 | # define SK_ALWAYS_INLINE SK_ATTRIBUTE(always_inline) inline |
372 | # endif |
373 | #endif |
374 | |
375 | /** |
376 | * If your judgment is better than the compiler's (i.e. you've profiled it), |
377 | * you can use SK_NEVER_INLINE to prevent inlining. |
378 | */ |
379 | #if !defined(SK_NEVER_INLINE) |
380 | # if defined(SK_BUILD_FOR_WIN) |
381 | # define SK_NEVER_INLINE __declspec(noinline) |
382 | # else |
383 | # define SK_NEVER_INLINE SK_ATTRIBUTE(noinline) |
384 | # endif |
385 | #endif |
386 | |
387 | #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE1 |
388 | #define SK_PREFETCH(ptr) _mm_prefetch(reinterpret_cast<const char*>(ptr), _MM_HINT_T0) |
389 | #elif defined(__GNUC__) |
390 | #define SK_PREFETCH(ptr) __builtin_prefetch(ptr) |
391 | #else |
392 | #define SK_PREFETCH(ptr) |
393 | #endif |
394 | |
395 | #ifndef SK_PRINTF_LIKE |
396 | # if defined(__clang__) || defined(__GNUC__) |
397 | # define SK_PRINTF_LIKE(A, B) __attribute__((format(printf, (A), (B)))) |
398 | # else |
399 | # define SK_PRINTF_LIKE(A, B) |
400 | # endif |
401 | #endif |
402 | |
403 | #ifndef SK_SIZE_T_SPECIFIER |
404 | # if defined(_MSC_VER) && !defined(__clang__) |
405 | # define SK_SIZE_T_SPECIFIER "%Iu" |
406 | # else |
407 | # define SK_SIZE_T_SPECIFIER "%zu" |
408 | # endif |
409 | #endif |
410 | |
411 | #ifndef SK_ALLOW_STATIC_GLOBAL_INITIALIZERS |
412 | #define SK_ALLOW_STATIC_GLOBAL_INITIALIZERS 0 |
413 | #endif |
414 | |
415 | #if !defined(SK_GAMMA_EXPONENT) |
416 | #define SK_GAMMA_EXPONENT (0.0f) // SRGB |
417 | #endif |
418 | |
419 | #ifndef GR_TEST_UTILS |
420 | # define GR_TEST_UTILS 0 |
421 | #endif |
422 | |
423 | #if defined(SK_HISTOGRAM_ENUMERATION) && defined(SK_HISTOGRAM_BOOLEAN) |
424 | # define SK_HISTOGRAMS_ENABLED 1 |
425 | #else |
426 | # define SK_HISTOGRAMS_ENABLED 0 |
427 | #endif |
428 | |
429 | #ifndef SK_HISTOGRAM_BOOLEAN |
430 | # define SK_HISTOGRAM_BOOLEAN(name, value) |
431 | #endif |
432 | |
433 | #ifndef SK_HISTOGRAM_ENUMERATION |
434 | # define SK_HISTOGRAM_ENUMERATION(name, value, boundary_value) |
435 | #endif |
436 | |
437 | #ifndef SK_DISABLE_LEGACY_SHADERCONTEXT |
438 | #define SK_ENABLE_LEGACY_SHADERCONTEXT |
439 | #endif |
440 | |
441 | #ifdef SK_ENABLE_API_AVAILABLE |
442 | #define SK_API_AVAILABLE API_AVAILABLE |
443 | #else |
444 | #define SK_API_AVAILABLE(...) |
445 | #endif |
446 | |
447 | /** Called internally if we hit an unrecoverable error. |
448 | The platform implementation must not return, but should either throw |
449 | an exception or otherwise exit. |
450 | */ |
451 | SK_API extern void sk_abort_no_print(void); |
452 | |
453 | #ifndef SkDebugf |
454 | SK_API void SkDebugf(const char format[], ...); |
455 | #endif |
456 | |
457 | // SkASSERT, SkASSERTF and SkASSERT_RELEASE can be used as stand alone assertion expressions, e.g. |
458 | // uint32_t foo(int x) { |
459 | // SkASSERT(x > 4); |
460 | // return x - 4; |
461 | // } |
462 | // and are also written to be compatible with constexpr functions: |
463 | // constexpr uint32_t foo(int x) { |
464 | // return SkASSERT(x > 4), |
465 | // x - 4; |
466 | // } |
467 | #define SkASSERT_RELEASE(cond) \ |
468 | static_cast<void>( (cond) ? (void)0 : []{ SK_ABORT("assert(" #cond ")"); }() ) |
469 | |
470 | #ifdef SK_DEBUG |
471 | #define SkASSERT(cond) SkASSERT_RELEASE(cond) |
472 | #define SkASSERTF(cond, fmt, ...) static_cast<void>( (cond) ? (void)0 : [&]{ \ |
473 | SkDebugf(fmt"\n", __VA_ARGS__); \ |
474 | SK_ABORT("assert(" #cond ")"); \ |
475 | }() ) |
476 | #define SkDEBUGFAIL(message) SK_ABORT(message) |
477 | #define SkDEBUGFAILF(fmt, ...) SkASSERTF(false, fmt, ##__VA_ARGS__) |
478 | #define SkDEBUGCODE(...) __VA_ARGS__ |
479 | #define SkDEBUGF(...) SkDebugf(__VA_ARGS__) |
480 | #define SkAssertResult(cond) SkASSERT(cond) |
481 | #else |
482 | #define SkASSERT(cond) static_cast<void>(0) |
483 | #define SkASSERTF(cond, fmt, ...) static_cast<void>(0) |
484 | #define SkDEBUGFAIL(message) |
485 | #define SkDEBUGFAILF(fmt, ...) |
486 | #define SkDEBUGCODE(...) |
487 | #define SkDEBUGF(...) |
488 | |
489 | // unlike SkASSERT, this macro executes its condition in the non-debug build. |
490 | // The if is present so that this can be used with functions marked SK_WARN_UNUSED_RESULT. |
491 | #define SkAssertResult(cond) if (cond) {} do {} while(false) |
492 | #endif |
493 | |
494 | //////////////////////////////////////////////////////////////////////////////// |
495 | |
496 | /** Fast type for unsigned 8 bits. Use for parameter passing and local |
497 | variables, not for storage |
498 | */ |
499 | typedef unsigned U8CPU; |
500 | |
501 | /** Fast type for unsigned 16 bits. Use for parameter passing and local |
502 | variables, not for storage |
503 | */ |
504 | typedef unsigned U16CPU; |
505 | |
506 | /** @return false or true based on the condition |
507 | */ |
508 | template <typename T> static constexpr bool SkToBool(const T& x) { return 0 != x; } |
509 | |
510 | static constexpr int16_t SK_MaxS16 = INT16_MAX; |
511 | static constexpr int16_t SK_MinS16 = -SK_MaxS16; |
512 | |
513 | static constexpr int32_t SK_MaxS32 = INT32_MAX; |
514 | static constexpr int32_t SK_MinS32 = -SK_MaxS32; |
515 | static constexpr int32_t SK_NaN32 = INT32_MIN; |
516 | |
517 | static constexpr int64_t SK_MaxS64 = INT64_MAX; |
518 | static constexpr int64_t SK_MinS64 = -SK_MaxS64; |
519 | |
520 | static inline constexpr int32_t SkLeftShift(int32_t value, int32_t shift) { |
521 | return (int32_t) ((uint32_t) value << shift); |
522 | } |
523 | |
524 | static inline constexpr int64_t SkLeftShift(int64_t value, int32_t shift) { |
525 | return (int64_t) ((uint64_t) value << shift); |
526 | } |
527 | |
528 | //////////////////////////////////////////////////////////////////////////////// |
529 | |
530 | /** @return the number of entries in an array (not a pointer) |
531 | */ |
532 | template <typename T, size_t N> char (&SkArrayCountHelper(T (&array)[N]))[N]; |
533 | #define SK_ARRAY_COUNT(array) (sizeof(SkArrayCountHelper(array))) |
534 | |
535 | //////////////////////////////////////////////////////////////////////////////// |
536 | |
537 | template <typename T> static constexpr T SkAlign2(T x) { return (x + 1) >> 1 << 1; } |
538 | template <typename T> static constexpr T SkAlign4(T x) { return (x + 3) >> 2 << 2; } |
539 | template <typename T> static constexpr T SkAlign8(T x) { return (x + 7) >> 3 << 3; } |
540 | |
541 | template <typename T> static constexpr bool SkIsAlign2(T x) { return 0 == (x & 1); } |
542 | template <typename T> static constexpr bool SkIsAlign4(T x) { return 0 == (x & 3); } |
543 | template <typename T> static constexpr bool SkIsAlign8(T x) { return 0 == (x & 7); } |
544 | |
545 | template <typename T> static constexpr T SkAlignPtr(T x) { |
546 | return sizeof(void*) == 8 ? SkAlign8(x) : SkAlign4(x); |
547 | } |
548 | template <typename T> static constexpr bool SkIsAlignPtr(T x) { |
549 | return sizeof(void*) == 8 ? SkIsAlign8(x) : SkIsAlign4(x); |
550 | } |
551 | |
552 | typedef uint32_t SkFourByteTag; |
553 | static inline constexpr SkFourByteTag SkSetFourByteTag(char a, char b, char c, char d) { |
554 | return (((uint8_t)a << 24) | ((uint8_t)b << 16) | ((uint8_t)c << 8) | (uint8_t)d); |
555 | } |
556 | |
557 | //////////////////////////////////////////////////////////////////////////////// |
558 | |
559 | /** 32 bit integer to hold a unicode value |
560 | */ |
561 | typedef int32_t SkUnichar; |
562 | |
563 | /** 16 bit unsigned integer to hold a glyph index |
564 | */ |
565 | typedef uint16_t SkGlyphID; |
566 | |
567 | /** 32 bit value to hold a millisecond duration |
568 | Note that SK_MSecMax is about 25 days. |
569 | */ |
570 | typedef uint32_t SkMSec; |
571 | |
572 | /** Maximum representable milliseconds; 24d 20h 31m 23.647s. |
573 | */ |
574 | static constexpr SkMSec SK_MSecMax = INT32_MAX; |
575 | |
576 | /** The generation IDs in Skia reserve 0 has an invalid marker. |
577 | */ |
578 | static constexpr uint32_t SK_InvalidGenID = 0; |
579 | |
580 | /** The unique IDs in Skia reserve 0 has an invalid marker. |
581 | */ |
582 | static constexpr uint32_t SK_InvalidUniqueID = 0; |
583 | |
584 | static inline int32_t SkAbs32(int32_t value) { |
585 | SkASSERT(value != SK_NaN32); // The most negative int32_t can't be negated. |
586 | if (value < 0) { |
587 | value = -value; |
588 | } |
589 | return value; |
590 | } |
591 | |
592 | template <typename T> static inline T SkTAbs(T value) { |
593 | if (value < 0) { |
594 | value = -value; |
595 | } |
596 | return value; |
597 | } |
598 | |
599 | /** @return value pinned (clamped) between min and max, inclusively. |
600 | |
601 | NOTE: Unlike std::clamp, SkTPin has well-defined behavior if 'value' is a |
602 | floating point NaN. In that case, 'max' is returned. |
603 | */ |
604 | template <typename T> static constexpr const T& SkTPin(const T& value, const T& min, const T& max) { |
605 | return value < min ? min : (value < max ? value : max); |
606 | } |
607 | |
608 | //////////////////////////////////////////////////////////////////////////////// |
609 | |
610 | /** Indicates whether an allocation should count against a cache budget. |
611 | */ |
612 | enum class SkBudgeted : bool { |
613 | kNo = false, |
614 | kYes = true |
615 | }; |
616 | |
617 | /** Indicates whether a backing store needs to be an exact match or can be |
618 | larger than is strictly necessary |
619 | */ |
620 | enum class SkBackingFit { |
621 | kApprox, |
622 | kExact |
623 | }; |
624 | |
625 | #endif |
626 | |