1 | /* |
2 | * Copyright (c) 2021 - 2023 the ThorVG project. All rights reserved. |
3 | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | * of this software and associated documentation files (the "Software"), to deal |
6 | * in the Software without restriction, including without limitation the rights |
7 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
8 | * copies of the Software, and to permit persons to whom the Software is |
9 | * furnished to do so, subject to the following conditions: |
10 | |
11 | * The above copyright notice and this permission notice shall be included in all |
12 | * copies or substantial portions of the Software. |
13 | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
19 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
20 | * SOFTWARE. |
21 | */ |
22 | |
23 | #ifdef THORVG_NEON_VECTOR_SUPPORT |
24 | |
25 | #include <arm_neon.h> |
26 | |
27 | static inline uint8x8_t ALPHA_BLEND(uint8x8_t c, uint8x8_t a) |
28 | { |
29 | uint16x8_t t = vmull_u8(c, a); |
30 | return vshrn_n_u16(t, 8); |
31 | } |
32 | |
33 | |
34 | static void neonRasterPixel32(uint32_t *dst, uint32_t val, uint32_t offset, int32_t len) |
35 | { |
36 | uint32_t iterations = len / 4; |
37 | uint32_t neonFilled = iterations * 4; |
38 | |
39 | dst += offset; |
40 | uint32x4_t vectorVal = {val, val, val, val}; |
41 | |
42 | for (uint32_t i = 0; i < iterations; ++i) { |
43 | vst1q_u32(dst, vectorVal); |
44 | dst += 4; |
45 | } |
46 | |
47 | int32_t leftovers = len - neonFilled; |
48 | while (leftovers--) *dst++ = val; |
49 | } |
50 | |
51 | |
52 | static bool neonRasterTranslucentRle(SwSurface* surface, const SwRleData* rle, uint8_t r, uint8_t g, uint8_t b, uint8_t a) |
53 | { |
54 | if (surface->channelSize != sizeof(uint32_t)) { |
55 | TVGERR("SW_ENGINE" , "Unsupported Channel Size = %d" , surface->channelSize); |
56 | return false; |
57 | } |
58 | |
59 | auto color = surface->blender.join(r, g, b, a); |
60 | auto span = rle->spans; |
61 | uint32_t src; |
62 | uint8x8_t *vDst = nullptr; |
63 | uint16_t align; |
64 | |
65 | for (uint32_t i = 0; i < rle->size; ++i) { |
66 | if (span->coverage < 255) src = ALPHA_BLEND(color, span->coverage); |
67 | else src = color; |
68 | |
69 | auto dst = &surface->buf32[span->y * surface->stride + span->x]; |
70 | auto ialpha = IALPHA(src); |
71 | |
72 | if ((((uint32_t) dst) & 0x7) != 0) { |
73 | //fill not aligned byte |
74 | *dst = src + ALPHA_BLEND(*dst, ialpha); |
75 | vDst = (uint8x8_t*)(dst + 1); |
76 | align = 1; |
77 | } else { |
78 | vDst = (uint8x8_t*) dst; |
79 | align = 0; |
80 | } |
81 | |
82 | uint8x8_t vSrc = (uint8x8_t) vdup_n_u32(src); |
83 | uint8x8_t vIalpha = vdup_n_u8((uint8_t) ialpha); |
84 | |
85 | for (uint32_t x = 0; x < (span->len - align) / 2; ++x) |
86 | vDst[x] = vadd_u8(vSrc, ALPHA_BLEND(vDst[x], vIalpha)); |
87 | |
88 | auto leftovers = (span->len - align) % 2; |
89 | if (leftovers > 0) dst[span->len - 1] = src + ALPHA_BLEND(dst[span->len - 1], ialpha); |
90 | |
91 | ++span; |
92 | } |
93 | return true; |
94 | } |
95 | |
96 | |
97 | static bool neonRasterTranslucentRect(SwSurface* surface, const SwBBox& region, uint8_t r, uint8_t g, uint8_t b, uint8_t a) |
98 | { |
99 | if (surface->channelSize != sizeof(uint32_t)) { |
100 | TVGERR("SW_ENGINE" , "Unsupported Channel Size = %d" , surface->channelSize); |
101 | return false; |
102 | } |
103 | |
104 | auto color = surface->blender.join(r, g, b, a); |
105 | auto buffer = surface->buf32 + (region.min.y * surface->stride) + region.min.x; |
106 | auto h = static_cast<uint32_t>(region.max.y - region.min.y); |
107 | auto w = static_cast<uint32_t>(region.max.x - region.min.x); |
108 | auto ialpha = 255 - a; |
109 | |
110 | auto vColor = vdup_n_u32(color); |
111 | auto vIalpha = vdup_n_u8((uint8_t) ialpha); |
112 | |
113 | uint8x8_t* vDst = nullptr; |
114 | uint32_t align; |
115 | |
116 | for (uint32_t y = 0; y < h; ++y) { |
117 | auto dst = &buffer[y * surface->stride]; |
118 | |
119 | if ((((uint32_t) dst) & 0x7) != 0) { |
120 | //fill not aligned byte |
121 | *dst = color + ALPHA_BLEND(*dst, ialpha); |
122 | vDst = (uint8x8_t*) (dst + 1); |
123 | align = 1; |
124 | } else { |
125 | vDst = (uint8x8_t*) dst; |
126 | align = 0; |
127 | } |
128 | |
129 | for (uint32_t x = 0; x < (w - align) / 2; ++x) |
130 | vDst[x] = vadd_u8((uint8x8_t)vColor, ALPHA_BLEND(vDst[x], vIalpha)); |
131 | |
132 | auto leftovers = (w - align) % 2; |
133 | if (leftovers > 0) dst[w - 1] = color + ALPHA_BLEND(dst[w - 1], ialpha); |
134 | } |
135 | return true; |
136 | } |
137 | |
138 | #endif |
139 | |