| 1 | // Copyright 2011 Google Inc. All Rights Reserved. |
| 2 | // |
| 3 | // Use of this source code is governed by a BSD-style license |
| 4 | // that can be found in the COPYING file in the root of the source |
| 5 | // tree. An additional intellectual property rights grant can be found |
| 6 | // in the file PATENTS. All contributing project authors may |
| 7 | // be found in the AUTHORS file in the root of the source tree. |
| 8 | // ----------------------------------------------------------------------------- |
| 9 | // |
| 10 | // NEON version of YUV to RGB upsampling functions. |
| 11 | // |
| 12 | // Author: mans@mansr.com (Mans Rullgard) |
| 13 | // Based on SSE code by: somnath@google.com (Somnath Banerjee) |
| 14 | |
| 15 | #include "./dsp.h" |
| 16 | |
| 17 | #if defined(WEBP_USE_NEON) |
| 18 | |
| 19 | #include <assert.h> |
| 20 | #include <arm_neon.h> |
| 21 | #include <string.h> |
| 22 | #include "./neon.h" |
| 23 | #include "./yuv.h" |
| 24 | |
| 25 | #ifdef FANCY_UPSAMPLING |
| 26 | |
| 27 | //----------------------------------------------------------------------------- |
| 28 | // U/V upsampling |
| 29 | |
| 30 | // Loads 9 pixels each from rows r1 and r2 and generates 16 pixels. |
| 31 | #define UPSAMPLE_16PIXELS(r1, r2, out) do { \ |
| 32 | const uint8x8_t a = vld1_u8(r1 + 0); \ |
| 33 | const uint8x8_t b = vld1_u8(r1 + 1); \ |
| 34 | const uint8x8_t c = vld1_u8(r2 + 0); \ |
| 35 | const uint8x8_t d = vld1_u8(r2 + 1); \ |
| 36 | /* a + b + c + d */ \ |
| 37 | const uint16x8_t ad = vaddl_u8(a, d); \ |
| 38 | const uint16x8_t bc = vaddl_u8(b, c); \ |
| 39 | const uint16x8_t abcd = vaddq_u16(ad, bc); \ |
| 40 | /* 3a + b + c + 3d */ \ |
| 41 | const uint16x8_t al = vaddq_u16(abcd, vshlq_n_u16(ad, 1)); \ |
| 42 | /* a + 3b + 3c + d */ \ |
| 43 | const uint16x8_t bl = vaddq_u16(abcd, vshlq_n_u16(bc, 1)); \ |
| 44 | \ |
| 45 | const uint8x8_t diag2 = vshrn_n_u16(al, 3); \ |
| 46 | const uint8x8_t diag1 = vshrn_n_u16(bl, 3); \ |
| 47 | \ |
| 48 | const uint8x8_t A = vrhadd_u8(a, diag1); \ |
| 49 | const uint8x8_t B = vrhadd_u8(b, diag2); \ |
| 50 | const uint8x8_t C = vrhadd_u8(c, diag2); \ |
| 51 | const uint8x8_t D = vrhadd_u8(d, diag1); \ |
| 52 | \ |
| 53 | uint8x8x2_t A_B, C_D; \ |
| 54 | INIT_VECTOR2(A_B, A, B); \ |
| 55 | INIT_VECTOR2(C_D, C, D); \ |
| 56 | vst2_u8(out + 0, A_B); \ |
| 57 | vst2_u8(out + 32, C_D); \ |
| 58 | } while (0) |
| 59 | |
| 60 | // Turn the macro into a function for reducing code-size when non-critical |
| 61 | static void Upsample16Pixels(const uint8_t *r1, const uint8_t *r2, |
| 62 | uint8_t *out) { |
| 63 | UPSAMPLE_16PIXELS(r1, r2, out); |
| 64 | } |
| 65 | |
| 66 | #define UPSAMPLE_LAST_BLOCK(tb, bb, num_pixels, out) { \ |
| 67 | uint8_t r1[9], r2[9]; \ |
| 68 | memcpy(r1, (tb), (num_pixels)); \ |
| 69 | memcpy(r2, (bb), (num_pixels)); \ |
| 70 | /* replicate last byte */ \ |
| 71 | memset(r1 + (num_pixels), r1[(num_pixels) - 1], 9 - (num_pixels)); \ |
| 72 | memset(r2 + (num_pixels), r2[(num_pixels) - 1], 9 - (num_pixels)); \ |
| 73 | Upsample16Pixels(r1, r2, out); \ |
| 74 | } |
| 75 | |
| 76 | //----------------------------------------------------------------------------- |
| 77 | // YUV->RGB conversion |
| 78 | |
| 79 | // note: we represent the 33050 large constant as 32768 + 282 |
| 80 | static const int16_t kCoeffs1[4] = { 19077, 26149, 6419, 13320 }; |
| 81 | |
| 82 | #define v255 vdup_n_u8(255) |
| 83 | |
| 84 | #define STORE_Rgb(out, r, g, b) do { \ |
| 85 | uint8x8x3_t r_g_b; \ |
| 86 | INIT_VECTOR3(r_g_b, r, g, b); \ |
| 87 | vst3_u8(out, r_g_b); \ |
| 88 | } while (0) |
| 89 | |
| 90 | #define STORE_Bgr(out, r, g, b) do { \ |
| 91 | uint8x8x3_t b_g_r; \ |
| 92 | INIT_VECTOR3(b_g_r, b, g, r); \ |
| 93 | vst3_u8(out, b_g_r); \ |
| 94 | } while (0) |
| 95 | |
| 96 | #define STORE_Rgba(out, r, g, b) do { \ |
| 97 | uint8x8x4_t r_g_b_v255; \ |
| 98 | INIT_VECTOR4(r_g_b_v255, r, g, b, v255); \ |
| 99 | vst4_u8(out, r_g_b_v255); \ |
| 100 | } while (0) |
| 101 | |
| 102 | #define STORE_Bgra(out, r, g, b) do { \ |
| 103 | uint8x8x4_t b_g_r_v255; \ |
| 104 | INIT_VECTOR4(b_g_r_v255, b, g, r, v255); \ |
| 105 | vst4_u8(out, b_g_r_v255); \ |
| 106 | } while (0) |
| 107 | |
| 108 | #define STORE_Argb(out, r, g, b) do { \ |
| 109 | uint8x8x4_t v255_r_g_b; \ |
| 110 | INIT_VECTOR4(v255_r_g_b, v255, r, g, b); \ |
| 111 | vst4_u8(out, v255_r_g_b); \ |
| 112 | } while (0) |
| 113 | |
| 114 | #if !defined(WEBP_SWAP_16BIT_CSP) |
| 115 | #define ZIP_U8(lo, hi) vzip_u8((lo), (hi)) |
| 116 | #else |
| 117 | #define ZIP_U8(lo, hi) vzip_u8((hi), (lo)) |
| 118 | #endif |
| 119 | |
| 120 | #define STORE_Rgba4444(out, r, g, b) do { \ |
| 121 | const uint8x8_t rg = vsri_n_u8(r, g, 4); /* shift g, insert r */ \ |
| 122 | const uint8x8_t ba = vsri_n_u8(b, v255, 4); /* shift a, insert b */ \ |
| 123 | const uint8x8x2_t rgba4444 = ZIP_U8(rg, ba); \ |
| 124 | vst1q_u8(out, vcombine_u8(rgba4444.val[0], rgba4444.val[1])); \ |
| 125 | } while (0) |
| 126 | |
| 127 | #define STORE_Rgb565(out, r, g, b) do { \ |
| 128 | const uint8x8_t rg = vsri_n_u8(r, g, 5); /* shift g and insert r */ \ |
| 129 | const uint8x8_t g1 = vshl_n_u8(g, 3); /* pre-shift g: 3bits */ \ |
| 130 | const uint8x8_t gb = vsri_n_u8(g1, b, 3); /* shift b and insert g */ \ |
| 131 | const uint8x8x2_t rgb565 = ZIP_U8(rg, gb); \ |
| 132 | vst1q_u8(out, vcombine_u8(rgb565.val[0], rgb565.val[1])); \ |
| 133 | } while (0) |
| 134 | |
| 135 | #define CONVERT8(FMT, XSTEP, N, src_y, src_uv, out, cur_x) do { \ |
| 136 | int i; \ |
| 137 | for (i = 0; i < N; i += 8) { \ |
| 138 | const int off = ((cur_x) + i) * XSTEP; \ |
| 139 | const uint8x8_t y = vld1_u8((src_y) + (cur_x) + i); \ |
| 140 | const uint8x8_t u = vld1_u8((src_uv) + i + 0); \ |
| 141 | const uint8x8_t v = vld1_u8((src_uv) + i + 16); \ |
| 142 | const int16x8_t Y0 = vreinterpretq_s16_u16(vshll_n_u8(y, 7)); \ |
| 143 | const int16x8_t U0 = vreinterpretq_s16_u16(vshll_n_u8(u, 7)); \ |
| 144 | const int16x8_t V0 = vreinterpretq_s16_u16(vshll_n_u8(v, 7)); \ |
| 145 | const int16x8_t Y1 = vqdmulhq_lane_s16(Y0, coeff1, 0); \ |
| 146 | const int16x8_t R0 = vqdmulhq_lane_s16(V0, coeff1, 1); \ |
| 147 | const int16x8_t G0 = vqdmulhq_lane_s16(U0, coeff1, 2); \ |
| 148 | const int16x8_t G1 = vqdmulhq_lane_s16(V0, coeff1, 3); \ |
| 149 | const int16x8_t B0 = vqdmulhq_n_s16(U0, 282); \ |
| 150 | const int16x8_t R1 = vqaddq_s16(Y1, R_Rounder); \ |
| 151 | const int16x8_t G2 = vqaddq_s16(Y1, G_Rounder); \ |
| 152 | const int16x8_t B1 = vqaddq_s16(Y1, B_Rounder); \ |
| 153 | const int16x8_t R2 = vqaddq_s16(R0, R1); \ |
| 154 | const int16x8_t G3 = vqaddq_s16(G0, G1); \ |
| 155 | const int16x8_t B2 = vqaddq_s16(B0, B1); \ |
| 156 | const int16x8_t G4 = vqsubq_s16(G2, G3); \ |
| 157 | const int16x8_t B3 = vqaddq_s16(B2, U0); \ |
| 158 | const uint8x8_t R = vqshrun_n_s16(R2, YUV_FIX2); \ |
| 159 | const uint8x8_t G = vqshrun_n_s16(G4, YUV_FIX2); \ |
| 160 | const uint8x8_t B = vqshrun_n_s16(B3, YUV_FIX2); \ |
| 161 | STORE_ ## FMT(out + off, R, G, B); \ |
| 162 | } \ |
| 163 | } while (0) |
| 164 | |
| 165 | #define CONVERT1(FUNC, XSTEP, N, src_y, src_uv, rgb, cur_x) { \ |
| 166 | int i; \ |
| 167 | for (i = 0; i < N; i++) { \ |
| 168 | const int off = ((cur_x) + i) * XSTEP; \ |
| 169 | const int y = src_y[(cur_x) + i]; \ |
| 170 | const int u = (src_uv)[i]; \ |
| 171 | const int v = (src_uv)[i + 16]; \ |
| 172 | FUNC(y, u, v, rgb + off); \ |
| 173 | } \ |
| 174 | } |
| 175 | |
| 176 | #define CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, uv, \ |
| 177 | top_dst, bottom_dst, cur_x, len) { \ |
| 178 | CONVERT8(FMT, XSTEP, len, top_y, uv, top_dst, cur_x); \ |
| 179 | if (bottom_y != NULL) { \ |
| 180 | CONVERT8(FMT, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \ |
| 181 | } \ |
| 182 | } |
| 183 | |
| 184 | #define CONVERT2RGB_1(FUNC, XSTEP, top_y, bottom_y, uv, \ |
| 185 | top_dst, bottom_dst, cur_x, len) { \ |
| 186 | CONVERT1(FUNC, XSTEP, len, top_y, uv, top_dst, cur_x); \ |
| 187 | if (bottom_y != NULL) { \ |
| 188 | CONVERT1(FUNC, XSTEP, len, bottom_y, (uv) + 32, bottom_dst, cur_x); \ |
| 189 | } \ |
| 190 | } |
| 191 | |
| 192 | #define NEON_UPSAMPLE_FUNC(FUNC_NAME, FMT, XSTEP) \ |
| 193 | static void FUNC_NAME(const uint8_t *top_y, const uint8_t *bottom_y, \ |
| 194 | const uint8_t *top_u, const uint8_t *top_v, \ |
| 195 | const uint8_t *cur_u, const uint8_t *cur_v, \ |
| 196 | uint8_t *top_dst, uint8_t *bottom_dst, int len) { \ |
| 197 | int block; \ |
| 198 | /* 16 byte aligned array to cache reconstructed u and v */ \ |
| 199 | uint8_t uv_buf[2 * 32 + 15]; \ |
| 200 | uint8_t *const r_uv = (uint8_t*)((uintptr_t)(uv_buf + 15) & ~15); \ |
| 201 | const int uv_len = (len + 1) >> 1; \ |
| 202 | /* 9 pixels must be read-able for each block */ \ |
| 203 | const int num_blocks = (uv_len - 1) >> 3; \ |
| 204 | const int leftover = uv_len - num_blocks * 8; \ |
| 205 | const int last_pos = 1 + 16 * num_blocks; \ |
| 206 | \ |
| 207 | const int u_diag = ((top_u[0] + cur_u[0]) >> 1) + 1; \ |
| 208 | const int v_diag = ((top_v[0] + cur_v[0]) >> 1) + 1; \ |
| 209 | \ |
| 210 | const int16x4_t coeff1 = vld1_s16(kCoeffs1); \ |
| 211 | const int16x8_t R_Rounder = vdupq_n_s16(-14234); \ |
| 212 | const int16x8_t G_Rounder = vdupq_n_s16(8708); \ |
| 213 | const int16x8_t B_Rounder = vdupq_n_s16(-17685); \ |
| 214 | \ |
| 215 | /* Treat the first pixel in regular way */ \ |
| 216 | assert(top_y != NULL); \ |
| 217 | { \ |
| 218 | const int u0 = (top_u[0] + u_diag) >> 1; \ |
| 219 | const int v0 = (top_v[0] + v_diag) >> 1; \ |
| 220 | VP8YuvTo ## FMT(top_y[0], u0, v0, top_dst); \ |
| 221 | } \ |
| 222 | if (bottom_y != NULL) { \ |
| 223 | const int u0 = (cur_u[0] + u_diag) >> 1; \ |
| 224 | const int v0 = (cur_v[0] + v_diag) >> 1; \ |
| 225 | VP8YuvTo ## FMT(bottom_y[0], u0, v0, bottom_dst); \ |
| 226 | } \ |
| 227 | \ |
| 228 | for (block = 0; block < num_blocks; ++block) { \ |
| 229 | UPSAMPLE_16PIXELS(top_u, cur_u, r_uv); \ |
| 230 | UPSAMPLE_16PIXELS(top_v, cur_v, r_uv + 16); \ |
| 231 | CONVERT2RGB_8(FMT, XSTEP, top_y, bottom_y, r_uv, \ |
| 232 | top_dst, bottom_dst, 16 * block + 1, 16); \ |
| 233 | top_u += 8; \ |
| 234 | cur_u += 8; \ |
| 235 | top_v += 8; \ |
| 236 | cur_v += 8; \ |
| 237 | } \ |
| 238 | \ |
| 239 | UPSAMPLE_LAST_BLOCK(top_u, cur_u, leftover, r_uv); \ |
| 240 | UPSAMPLE_LAST_BLOCK(top_v, cur_v, leftover, r_uv + 16); \ |
| 241 | CONVERT2RGB_1(VP8YuvTo ## FMT, XSTEP, top_y, bottom_y, r_uv, \ |
| 242 | top_dst, bottom_dst, last_pos, len - last_pos); \ |
| 243 | } |
| 244 | |
| 245 | // NEON variants of the fancy upsampler. |
| 246 | NEON_UPSAMPLE_FUNC(UpsampleRgbLinePair, Rgb, 3) |
| 247 | NEON_UPSAMPLE_FUNC(UpsampleBgrLinePair, Bgr, 3) |
| 248 | NEON_UPSAMPLE_FUNC(UpsampleRgbaLinePair, Rgba, 4) |
| 249 | NEON_UPSAMPLE_FUNC(UpsampleBgraLinePair, Bgra, 4) |
| 250 | NEON_UPSAMPLE_FUNC(UpsampleArgbLinePair, Argb, 4) |
| 251 | NEON_UPSAMPLE_FUNC(UpsampleRgba4444LinePair, Rgba4444, 2) |
| 252 | NEON_UPSAMPLE_FUNC(UpsampleRgb565LinePair, Rgb565, 2) |
| 253 | |
| 254 | //------------------------------------------------------------------------------ |
| 255 | // Entry point |
| 256 | |
| 257 | extern WebPUpsampleLinePairFunc WebPUpsamplers[/* MODE_LAST */]; |
| 258 | |
| 259 | extern void WebPInitUpsamplersNEON(void); |
| 260 | |
| 261 | WEBP_TSAN_IGNORE_FUNCTION void WebPInitUpsamplersNEON(void) { |
| 262 | WebPUpsamplers[MODE_RGB] = UpsampleRgbLinePair; |
| 263 | WebPUpsamplers[MODE_RGBA] = UpsampleRgbaLinePair; |
| 264 | WebPUpsamplers[MODE_BGR] = UpsampleBgrLinePair; |
| 265 | WebPUpsamplers[MODE_BGRA] = UpsampleBgraLinePair; |
| 266 | WebPUpsamplers[MODE_ARGB] = UpsampleArgbLinePair; |
| 267 | WebPUpsamplers[MODE_rgbA] = UpsampleRgbaLinePair; |
| 268 | WebPUpsamplers[MODE_bgrA] = UpsampleBgraLinePair; |
| 269 | WebPUpsamplers[MODE_Argb] = UpsampleArgbLinePair; |
| 270 | WebPUpsamplers[MODE_RGB_565] = UpsampleRgb565LinePair; |
| 271 | WebPUpsamplers[MODE_RGBA_4444] = UpsampleRgba4444LinePair; |
| 272 | WebPUpsamplers[MODE_rgbA_4444] = UpsampleRgba4444LinePair; |
| 273 | } |
| 274 | |
| 275 | #endif // FANCY_UPSAMPLING |
| 276 | |
| 277 | #endif // WEBP_USE_NEON |
| 278 | |
| 279 | #if !(defined(FANCY_UPSAMPLING) && defined(WEBP_USE_NEON)) |
| 280 | WEBP_DSP_INIT_STUB(WebPInitUpsamplersNEON) |
| 281 | #endif |
| 282 | |