| 1 | /* |
| 2 | * Copyright 2006 The Android Open Source Project |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef SkColorPriv_DEFINED |
| 9 | #define SkColorPriv_DEFINED |
| 10 | |
| 11 | #include "include/core/SkColor.h" |
| 12 | #include "include/core/SkMath.h" |
| 13 | #include "include/private/SkTo.h" |
| 14 | |
| 15 | /** Turn 0..255 into 0..256 by adding 1 at the half-way point. Used to turn a |
| 16 | byte into a scale value, so that we can say scale * value >> 8 instead of |
| 17 | alpha * value / 255. |
| 18 | |
| 19 | In debugging, asserts that alpha is 0..255 |
| 20 | */ |
| 21 | static inline unsigned SkAlpha255To256(U8CPU alpha) { |
| 22 | SkASSERT(SkToU8(alpha) == alpha); |
| 23 | // this one assues that blending on top of an opaque dst keeps it that way |
| 24 | // even though it is less accurate than a+(a>>7) for non-opaque dsts |
| 25 | return alpha + 1; |
| 26 | } |
| 27 | |
| 28 | /** Multiplify value by 0..256, and shift the result down 8 |
| 29 | (i.e. return (value * alpha256) >> 8) |
| 30 | */ |
| 31 | #define SkAlphaMul(value, alpha256) (((value) * (alpha256)) >> 8) |
| 32 | |
| 33 | static inline U8CPU SkUnitScalarClampToByte(SkScalar x) { |
| 34 | return static_cast<U8CPU>(SkTPin(x, 0.0f, 1.0f) * 255 + 0.5); |
| 35 | } |
| 36 | |
| 37 | #define SK_A32_BITS 8 |
| 38 | #define SK_R32_BITS 8 |
| 39 | #define SK_G32_BITS 8 |
| 40 | #define SK_B32_BITS 8 |
| 41 | |
| 42 | #define SK_A32_MASK ((1 << SK_A32_BITS) - 1) |
| 43 | #define SK_R32_MASK ((1 << SK_R32_BITS) - 1) |
| 44 | #define SK_G32_MASK ((1 << SK_G32_BITS) - 1) |
| 45 | #define SK_B32_MASK ((1 << SK_B32_BITS) - 1) |
| 46 | |
| 47 | /* |
| 48 | * Skia's 32bit backend only supports 1 swizzle order at a time (compile-time). |
| 49 | * This is specified by SK_R32_SHIFT=0 or SK_R32_SHIFT=16. |
| 50 | * |
| 51 | * For easier compatibility with Skia's GPU backend, we further restrict these |
| 52 | * to either (in memory-byte-order) RGBA or BGRA. Note that this "order" does |
| 53 | * not directly correspond to the same shift-order, since we have to take endianess |
| 54 | * into account. |
| 55 | * |
| 56 | * Here we enforce this constraint. |
| 57 | */ |
| 58 | |
| 59 | #define SK_RGBA_R32_SHIFT 0 |
| 60 | #define SK_RGBA_G32_SHIFT 8 |
| 61 | #define SK_RGBA_B32_SHIFT 16 |
| 62 | #define SK_RGBA_A32_SHIFT 24 |
| 63 | |
| 64 | #define SK_BGRA_B32_SHIFT 0 |
| 65 | #define SK_BGRA_G32_SHIFT 8 |
| 66 | #define SK_BGRA_R32_SHIFT 16 |
| 67 | #define SK_BGRA_A32_SHIFT 24 |
| 68 | |
| 69 | #if defined(SK_PMCOLOR_IS_RGBA) || defined(SK_PMCOLOR_IS_BGRA) |
| 70 | #error "Configure PMCOLOR by setting SK_R32_SHIFT." |
| 71 | #endif |
| 72 | |
| 73 | // Deduce which SK_PMCOLOR_IS_ to define from the _SHIFT defines |
| 74 | |
| 75 | #if (SK_A32_SHIFT == SK_RGBA_A32_SHIFT && \ |
| 76 | SK_R32_SHIFT == SK_RGBA_R32_SHIFT && \ |
| 77 | SK_G32_SHIFT == SK_RGBA_G32_SHIFT && \ |
| 78 | SK_B32_SHIFT == SK_RGBA_B32_SHIFT) |
| 79 | #define SK_PMCOLOR_IS_RGBA |
| 80 | #elif (SK_A32_SHIFT == SK_BGRA_A32_SHIFT && \ |
| 81 | SK_R32_SHIFT == SK_BGRA_R32_SHIFT && \ |
| 82 | SK_G32_SHIFT == SK_BGRA_G32_SHIFT && \ |
| 83 | SK_B32_SHIFT == SK_BGRA_B32_SHIFT) |
| 84 | #define SK_PMCOLOR_IS_BGRA |
| 85 | #else |
| 86 | #error "need 32bit packing to be either RGBA or BGRA" |
| 87 | #endif |
| 88 | |
| 89 | #define SkGetPackedA32(packed) ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24) |
| 90 | #define SkGetPackedR32(packed) ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24) |
| 91 | #define SkGetPackedG32(packed) ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24) |
| 92 | #define SkGetPackedB32(packed) ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24) |
| 93 | |
| 94 | #define SkA32Assert(a) SkASSERT((unsigned)(a) <= SK_A32_MASK) |
| 95 | #define SkR32Assert(r) SkASSERT((unsigned)(r) <= SK_R32_MASK) |
| 96 | #define SkG32Assert(g) SkASSERT((unsigned)(g) <= SK_G32_MASK) |
| 97 | #define SkB32Assert(b) SkASSERT((unsigned)(b) <= SK_B32_MASK) |
| 98 | |
| 99 | /** |
| 100 | * Pack the components into a SkPMColor, checking (in the debug version) that |
| 101 | * the components are 0..255, and are already premultiplied (i.e. alpha >= color) |
| 102 | */ |
| 103 | static inline SkPMColor SkPackARGB32(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { |
| 104 | SkA32Assert(a); |
| 105 | SkASSERT(r <= a); |
| 106 | SkASSERT(g <= a); |
| 107 | SkASSERT(b <= a); |
| 108 | |
| 109 | return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) | |
| 110 | (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT); |
| 111 | } |
| 112 | |
| 113 | /** |
| 114 | * Same as SkPackARGB32, but this version guarantees to not check that the |
| 115 | * values are premultiplied in the debug version. |
| 116 | */ |
| 117 | static inline SkPMColor SkPackARGB32NoCheck(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { |
| 118 | return (a << SK_A32_SHIFT) | (r << SK_R32_SHIFT) | |
| 119 | (g << SK_G32_SHIFT) | (b << SK_B32_SHIFT); |
| 120 | } |
| 121 | |
| 122 | static inline |
| 123 | SkPMColor SkPremultiplyARGBInline(U8CPU a, U8CPU r, U8CPU g, U8CPU b) { |
| 124 | SkA32Assert(a); |
| 125 | SkR32Assert(r); |
| 126 | SkG32Assert(g); |
| 127 | SkB32Assert(b); |
| 128 | |
| 129 | if (a != 255) { |
| 130 | r = SkMulDiv255Round(r, a); |
| 131 | g = SkMulDiv255Round(g, a); |
| 132 | b = SkMulDiv255Round(b, a); |
| 133 | } |
| 134 | return SkPackARGB32(a, r, g, b); |
| 135 | } |
| 136 | |
| 137 | // When Android is compiled optimizing for size, SkAlphaMulQ doesn't get |
| 138 | // inlined; forcing inlining significantly improves performance. |
| 139 | static SK_ALWAYS_INLINE uint32_t SkAlphaMulQ(uint32_t c, unsigned scale) { |
| 140 | uint32_t mask = 0xFF00FF; |
| 141 | |
| 142 | uint32_t rb = ((c & mask) * scale) >> 8; |
| 143 | uint32_t ag = ((c >> 8) & mask) * scale; |
| 144 | return (rb & mask) | (ag & ~mask); |
| 145 | } |
| 146 | |
| 147 | static inline SkPMColor SkPMSrcOver(SkPMColor src, SkPMColor dst) { |
| 148 | return src + SkAlphaMulQ(dst, SkAlpha255To256(255 - SkGetPackedA32(src))); |
| 149 | } |
| 150 | |
| 151 | #endif |
| 152 | |