1 | // Copyright 2011 Google Inc. All Rights Reserved. |
2 | // |
3 | // Use of this source code is governed by a BSD-style license |
4 | // that can be found in the COPYING file in the root of the source |
5 | // tree. An additional intellectual property rights grant can be found |
6 | // in the file PATENTS. All contributing project authors may |
7 | // be found in the AUTHORS file in the root of the source tree. |
8 | // ----------------------------------------------------------------------------- |
9 | // |
10 | // SSE2 version of YUV to RGB upsampling functions. |
11 | // |
12 | // Author: somnath@google.com (Somnath Banerjee) |
13 | |
14 | #include "./dsp.h" |
15 | |
16 | #if defined(WEBP_USE_SSE2) |
17 | |
18 | #include <assert.h> |
19 | #include <emmintrin.h> |
20 | #include <string.h> |
21 | #include "./yuv.h" |
22 | |
23 | #ifdef FANCY_UPSAMPLING |
24 | |
25 | // We compute (9*a + 3*b + 3*c + d + 8) / 16 as follows |
26 | // u = (9*a + 3*b + 3*c + d + 8) / 16 |
27 | // = (a + (a + 3*b + 3*c + d) / 8 + 1) / 2 |
28 | // = (a + m + 1) / 2 |
29 | // where m = (a + 3*b + 3*c + d) / 8 |
30 | // = ((a + b + c + d) / 2 + b + c) / 4 |
31 | // |
32 | // Let's say k = (a + b + c + d) / 4. |
33 | // We can compute k as |
34 | // k = (s + t + 1) / 2 - ((a^d) | (b^c) | (s^t)) & 1 |
35 | // where s = (a + d + 1) / 2 and t = (b + c + 1) / 2 |
36 | // |
37 | // Then m can be written as |
38 | // m = (k + t + 1) / 2 - (((b^c) & (s^t)) | (k^t)) & 1 |
39 | |
40 | // Computes out = (k + in + 1) / 2 - ((ij & (s^t)) | (k^in)) & 1 |
41 | #define GET_M(ij, in, out) do { \ |
42 | const __m128i tmp0 = _mm_avg_epu8(k, (in)); /* (k + in + 1) / 2 */ \ |
43 | const __m128i tmp1 = _mm_and_si128((ij), st); /* (ij) & (s^t) */ \ |
44 | const __m128i tmp2 = _mm_xor_si128(k, (in)); /* (k^in) */ \ |
45 | const __m128i tmp3 = _mm_or_si128(tmp1, tmp2); /* ((ij) & (s^t)) | (k^in) */\ |
46 | const __m128i tmp4 = _mm_and_si128(tmp3, one); /* & 1 -> lsb_correction */ \ |
47 | (out) = _mm_sub_epi8(tmp0, tmp4); /* (k + in + 1) / 2 - lsb_correction */ \ |
48 | } while (0) |
49 | |
50 | // pack and store two alternating pixel rows |
51 | #define PACK_AND_STORE(a, b, da, db, out) do { \ |
52 | const __m128i t_a = _mm_avg_epu8(a, da); /* (9a + 3b + 3c + d + 8) / 16 */ \ |
53 | const __m128i t_b = _mm_avg_epu8(b, db); /* (3a + 9b + c + 3d + 8) / 16 */ \ |
54 | const __m128i t_1 = _mm_unpacklo_epi8(t_a, t_b); \ |
55 | const __m128i t_2 = _mm_unpackhi_epi8(t_a, t_b); \ |
56 | _mm_store_si128(((__m128i*)(out)) + 0, t_1); \ |
57 | _mm_store_si128(((__m128i*)(out)) + 1, t_2); \ |
58 | } while (0) |
59 | |
60 | // Loads 17 pixels each from rows r1 and r2 and generates 32 pixels. |
61 | #define UPSAMPLE_32PIXELS(r1, r2, out) { \ |
62 | const __m128i one = _mm_set1_epi8(1); \ |
63 | const __m128i a = _mm_loadu_si128((const __m128i*)&(r1)[0]); \ |
64 | const __m128i b = _mm_loadu_si128((const __m128i*)&(r1)[1]); \ |
65 | const __m128i c = _mm_loadu_si128((const __m128i*)&(r2)[0]); \ |
66 | const __m128i d = _mm_loadu_si128((const __m128i*)&(r2)[1]); \ |
67 | \ |
68 | const __m128i s = _mm_avg_epu8(a, d); /* s = (a + d + 1) / 2 */ \ |
69 | const __m128i t = _mm_avg_epu8(b, c); /* t = (b + c + 1) / 2 */ \ |
70 | const __m128i st = _mm_xor_si128(s, t); /* st = s^t */ \ |
71 | \ |
72 | const __m128i ad = _mm_xor_si128(a, d); /* ad = a^d */ \ |
73 | const __m128i bc = _mm_xor_si128(b, c); /* bc = b^c */ \ |
74 | \ |
75 | const __m128i t1 = _mm_or_si128(ad, bc); /* (a^d) | (b^c) */ \ |
76 | const __m128i t2 = _mm_or_si128(t1, st); /* (a^d) | (b^c) | (s^t) */ \ |
77 | const __m128i t3 = _mm_and_si128(t2, one); /* (a^d) | (b^c) | (s^t) & 1 */ \ |
78 | const __m128i t4 = _mm_avg_epu8(s, t); \ |
79 | const __m128i k = _mm_sub_epi8(t4, t3); /* k = (a + b + c + d) / 4 */ \ |
80 | __m128i diag1, diag2; \ |
81 | \ |
82 | GET_M(bc, t, diag1); /* diag1 = (a + 3b + 3c + d) / 8 */ \ |
83 | GET_M(ad, s, diag2); /* diag2 = (3a + b + c + 3d) / 8 */ \ |
84 | \ |
85 | /* pack the alternate pixels */ \ |
86 | PACK_AND_STORE(a, b, diag1, diag2, out + 0); /* store top */ \ |
87 | PACK_AND_STORE(c, d, diag2, diag1, out + 2 * 32); /* store bottom */ \ |
88 | } |
89 | |
90 | // Turn the macro into a function for reducing code-size when non-critical |
91 | static void Upsample32Pixels(const uint8_t r1[], const uint8_t r2[], |
92 | uint8_t* const out) { |
93 | UPSAMPLE_32PIXELS(r1, r2, out); |
94 | } |
95 | |
96 | #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \ |
97 | uint8_t r1[17], r2[17]; \ |
98 | memcpy(r1, (tb), (num_pixels)); \ |
99 | memcpy(r2, (bb), (num_pixels)); \ |
100 | /* replicate last byte */ \ |
101 | memset(r1 + (num_pixels), r1[(num_pixels) - 1], 17 - (num_pixels)); \ |
102 | memset(r2 + (num_pixels), r2[(num_pixels) - 1], 17 - (num_pixels)); \ |
103 | /* using the shared function instead of the macro saves ~3k code size */ \ |
104 | Upsample32Pixels(r1, r2, out); \ |
105 | } |
106 | |
107 | #define CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, \ |
108 | top_dst, bottom_dst, cur_x, num_pixels) { \ |
109 | int n; \ |
110 | for (n = 0; n < (num_pixels); ++n) { \ |
111 | FUNC(top_y[(cur_x) + n], r_u[n], r_v[n], \ |
112 | top_dst + ((cur_x) + n) * XSTEP); \ |
113 | } \ |
114 | if (bottom_y != NULL) { \ |
115 | for (n = 0; n < (num_pixels); ++n) { \ |
116 | FUNC(bottom_y[(cur_x) + n], r_u[64 + n], r_v[64 + n], \ |
117 | bottom_dst + ((cur_x) + n) * XSTEP); \ |
118 | } \ |
119 | } \ |
120 | } |
121 | |
122 | #define CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, \ |
123 | top_dst, bottom_dst, cur_x) do { \ |
124 | FUNC##32(top_y + (cur_x), r_u, r_v, top_dst + (cur_x) * XSTEP); \ |
125 | if (bottom_y != NULL) { \ |
126 | FUNC##32(bottom_y + (cur_x), r_u + 64, r_v + 64, \ |
127 | bottom_dst + (cur_x) * XSTEP); \ |
128 | } \ |
129 | } while (0) |
130 | |
131 | #define SSE2_UPSAMPLE_FUNC(FUNC_NAME, FUNC, XSTEP) \ |
132 | static void FUNC_NAME(const uint8_t* top_y, const uint8_t* bottom_y, \ |
133 | const uint8_t* top_u, const uint8_t* top_v, \ |
134 | const uint8_t* cur_u, const uint8_t* cur_v, \ |
135 | uint8_t* top_dst, uint8_t* bottom_dst, int len) { \ |
136 | int uv_pos, pos; \ |
137 | /* 16byte-aligned array to cache reconstructed u and v */ \ |
138 | uint8_t uv_buf[4 * 32 + 15]; \ |
139 | uint8_t* const r_u = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \ |
140 | uint8_t* const r_v = r_u + 32; \ |
141 | \ |
142 | assert(top_y != NULL); \ |
143 | { /* Treat the first pixel in regular way */ \ |
144 | const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \ |
145 | const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \ |
146 | const int u0_t = (top_u[0] + u_diag) >> 1; \ |
147 | const int v0_t = (top_v[0] + v_diag) >> 1; \ |
148 | FUNC(top_y[0], u0_t, v0_t, top_dst); \ |
149 | if (bottom_y != NULL) { \ |
150 | const int u0_b = (cur_u[0] + u_diag) >> 1; \ |
151 | const int v0_b = (cur_v[0] + v_diag) >> 1; \ |
152 | FUNC(bottom_y[0], u0_b, v0_b, bottom_dst); \ |
153 | } \ |
154 | } \ |
155 | /* For UPSAMPLE_32PIXELS, 17 u/v values must be read-able for each block */ \ |
156 | for (pos = 1, uv_pos = 0; pos + 32 + 1 <= len; pos += 32, uv_pos += 16) { \ |
157 | UPSAMPLE_32PIXELS(top_u + uv_pos, cur_u + uv_pos, r_u); \ |
158 | UPSAMPLE_32PIXELS(top_v + uv_pos, cur_v + uv_pos, r_v); \ |
159 | CONVERT2RGB_32(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, pos); \ |
160 | } \ |
161 | if (len > 1) { \ |
162 | const int left_over = ((len + 1) >> 1) - (pos >> 1); \ |
163 | assert(left_over > 0); \ |
164 | UPSAMPLE_LAST_BLOCK(top_u + uv_pos, cur_u + uv_pos, left_over, r_u); \ |
165 | UPSAMPLE_LAST_BLOCK(top_v + uv_pos, cur_v + uv_pos, left_over, r_v); \ |
166 | CONVERT2RGB(FUNC, XSTEP, top_y, bottom_y, top_dst, bottom_dst, \ |
167 | pos, len - pos); \ |
168 | } \ |
169 | } |
170 | |
171 | // SSE2 variants of the fancy upsampler. |
172 | SSE2_UPSAMPLE_FUNC(UpsampleRgbLinePair, VP8YuvToRgb, 3) |
173 | SSE2_UPSAMPLE_FUNC(UpsampleBgrLinePair, VP8YuvToBgr, 3) |
174 | SSE2_UPSAMPLE_FUNC(UpsampleRgbaLinePair, VP8YuvToRgba, 4) |
175 | SSE2_UPSAMPLE_FUNC(UpsampleBgraLinePair, VP8YuvToBgra, 4) |
176 | SSE2_UPSAMPLE_FUNC(UpsampleArgbLinePair, VP8YuvToArgb, 4) |
177 | SSE2_UPSAMPLE_FUNC(UpsampleRgba4444LinePair, VP8YuvToRgba4444, 2) |
178 | SSE2_UPSAMPLE_FUNC(UpsampleRgb565LinePair, VP8YuvToRgb565, 2) |
179 | |
180 | #undef GET_M |
181 | #undef PACK_AND_STORE |
182 | #undef UPSAMPLE_32PIXELS |
183 | #undef UPSAMPLE_LAST_BLOCK |
184 | #undef CONVERT2RGB |
185 | #undef CONVERT2RGB_32 |
186 | #undef SSE2_UPSAMPLE_FUNC |
187 | |
188 | //------------------------------------------------------------------------------ |
189 | // Entry point |
190 | |
191 | extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; |
192 | |
193 | extern void WebPInitUpsamplersSSE2(void); |
194 | |
195 | WEBP_TSAN_IGNORE_FUNCTION void (void) { |
196 | WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair; |
197 | WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair; |
198 | WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair; |
199 | WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair; |
200 | WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair; |
201 | WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair; |
202 | WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair; |
203 | WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair; |
204 | WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair; |
205 | WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair; |
206 | WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair; |
207 | } |
208 | |
209 | #endif // FANCY_UPSAMPLING |
210 | |
211 | //------------------------------------------------------------------------------ |
212 | |
213 | extern WebPYUV444Converter WebPYUV444Converters[/* MODE_LAST */]; |
214 | extern void WebPInitYUV444ConvertersSSE2(void); |
215 | |
216 | #define YUV444_FUNC(FUNC_NAME, CALL, XSTEP) \ |
217 | extern void WebP##FUNC_NAME##C(const uint8_t* y, const uint8_t* u, \ |
218 | const uint8_t* v, uint8_t* dst, int len); \ |
219 | static void FUNC_NAME(const uint8_t* y, const uint8_t* u, const uint8_t* v, \ |
220 | uint8_t* dst, int len) { \ |
221 | int i; \ |
222 | const int max_len = len & ~31; \ |
223 | for (i = 0; i < max_len; i += 32) CALL(y + i, u + i, v + i, dst + i * XSTEP);\ |
224 | if (i < len) { /* C-fallback */ \ |
225 | WebP##FUNC_NAME##C(y + i, u + i, v + i, dst + i * XSTEP, len - i); \ |
226 | } \ |
227 | } |
228 | |
229 | YUV444_FUNC(Yuv444ToRgba, VP8YuvToRgba32, 4); |
230 | YUV444_FUNC(Yuv444ToBgra, VP8YuvToBgra32, 4); |
231 | YUV444_FUNC(Yuv444ToRgb, VP8YuvToRgb32, 3); |
232 | YUV444_FUNC(Yuv444ToBgr, VP8YuvToBgr32, 3); |
233 | |
234 | WEBP_TSAN_IGNORE_FUNCTION void (void) { |
235 | WebPYUV444Converters[MODE_RGBA] = Yuv444ToRgba; |
236 | WebPYUV444Converters[MODE_BGRA] = Yuv444ToBgra; |
237 | WebPYUV444Converters[MODE_RGB] = Yuv444ToRgb; |
238 | WebPYUV444Converters[MODE_BGR] = Yuv444ToBgr; |
239 | } |
240 | |
241 | #else |
242 | |
243 | WEBP_DSP_INIT_STUB(WebPInitYUV444ConvertersSSE2) |
244 | |
245 | #endif // WEBP_USE_SSE2 |
246 | |
247 | #if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_SSE2)) |
248 | WEBP_DSP_INIT_STUB(WebPInitUpsamplersSSE2) |
249 | #endif |
250 | |