1 | // Copyright 2022 Google Inc. All Rights Reserved. |
2 | // |
3 | // Use of this source code is governed by a BSD-style license |
4 | // that can be found in the COPYING file in the root of the source |
5 | // tree. An additional intellectual property rights grant can be found |
6 | // in the file PATENTS. All contributing project authors may |
7 | // be found in the AUTHORS file in the root of the source tree. |
8 | // ----------------------------------------------------------------------------- |
9 | // |
10 | // Speed-critical functions for Sharp YUV. |
11 | // |
12 | // Author: Skal (pascal.massimino@gmail.com) |
13 | |
14 | #include "sharpyuv/sharpyuv_dsp.h" |
15 | |
16 | #if defined(WEBP_USE_NEON) |
17 | #include <assert.h> |
18 | #include <stdlib.h> |
19 | #include <arm_neon.h> |
20 | |
21 | static uint16_t clip_NEON(int v, int max) { |
22 | return (v < 0) ? 0 : (v > max) ? max : (uint16_t)v; |
23 | } |
24 | |
25 | static uint64_t SharpYuvUpdateY_NEON(const uint16_t* ref, const uint16_t* src, |
26 | uint16_t* dst, int len, int bit_depth) { |
27 | const int max_y = (1 << bit_depth) - 1; |
28 | int i; |
29 | const int16x8_t zero = vdupq_n_s16(0); |
30 | const int16x8_t max = vdupq_n_s16(max_y); |
31 | uint64x2_t sum = vdupq_n_u64(0); |
32 | uint64_t diff; |
33 | |
34 | for (i = 0; i + 8 <= len; i += 8) { |
35 | const int16x8_t A = vreinterpretq_s16_u16(vld1q_u16(ref + i)); |
36 | const int16x8_t B = vreinterpretq_s16_u16(vld1q_u16(src + i)); |
37 | const int16x8_t C = vreinterpretq_s16_u16(vld1q_u16(dst + i)); |
38 | const int16x8_t D = vsubq_s16(A, B); // diff_y |
39 | const int16x8_t F = vaddq_s16(C, D); // new_y |
40 | const uint16x8_t H = |
41 | vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(F, max), zero)); |
42 | const int16x8_t I = vabsq_s16(D); // abs(diff_y) |
43 | vst1q_u16(dst + i, H); |
44 | sum = vpadalq_u32(sum, vpaddlq_u16(vreinterpretq_u16_s16(I))); |
45 | } |
46 | diff = vgetq_lane_u64(sum, 0) + vgetq_lane_u64(sum, 1); |
47 | for (; i < len; ++i) { |
48 | const int diff_y = ref[i] - src[i]; |
49 | const int new_y = (int)(dst[i]) + diff_y; |
50 | dst[i] = clip_NEON(new_y, max_y); |
51 | diff += (uint64_t)(abs(diff_y)); |
52 | } |
53 | return diff; |
54 | } |
55 | |
56 | static void SharpYuvUpdateRGB_NEON(const int16_t* ref, const int16_t* src, |
57 | int16_t* dst, int len) { |
58 | int i; |
59 | for (i = 0; i + 8 <= len; i += 8) { |
60 | const int16x8_t A = vld1q_s16(ref + i); |
61 | const int16x8_t B = vld1q_s16(src + i); |
62 | const int16x8_t C = vld1q_s16(dst + i); |
63 | const int16x8_t D = vsubq_s16(A, B); // diff_uv |
64 | const int16x8_t E = vaddq_s16(C, D); // new_uv |
65 | vst1q_s16(dst + i, E); |
66 | } |
67 | for (; i < len; ++i) { |
68 | const int diff_uv = ref[i] - src[i]; |
69 | dst[i] += diff_uv; |
70 | } |
71 | } |
72 | |
73 | static void SharpYuvFilterRow16_NEON(const int16_t* A, const int16_t* B, |
74 | int len, const uint16_t* best_y, |
75 | uint16_t* out, int bit_depth) { |
76 | const int max_y = (1 << bit_depth) - 1; |
77 | int i; |
78 | const int16x8_t max = vdupq_n_s16(max_y); |
79 | const int16x8_t zero = vdupq_n_s16(0); |
80 | for (i = 0; i + 8 <= len; i += 8) { |
81 | const int16x8_t a0 = vld1q_s16(A + i + 0); |
82 | const int16x8_t a1 = vld1q_s16(A + i + 1); |
83 | const int16x8_t b0 = vld1q_s16(B + i + 0); |
84 | const int16x8_t b1 = vld1q_s16(B + i + 1); |
85 | const int16x8_t a0b1 = vaddq_s16(a0, b1); |
86 | const int16x8_t a1b0 = vaddq_s16(a1, b0); |
87 | const int16x8_t a0a1b0b1 = vaddq_s16(a0b1, a1b0); // A0+A1+B0+B1 |
88 | const int16x8_t a0b1_2 = vaddq_s16(a0b1, a0b1); // 2*(A0+B1) |
89 | const int16x8_t a1b0_2 = vaddq_s16(a1b0, a1b0); // 2*(A1+B0) |
90 | const int16x8_t c0 = vshrq_n_s16(vaddq_s16(a0b1_2, a0a1b0b1), 3); |
91 | const int16x8_t c1 = vshrq_n_s16(vaddq_s16(a1b0_2, a0a1b0b1), 3); |
92 | const int16x8_t e0 = vrhaddq_s16(c1, a0); |
93 | const int16x8_t e1 = vrhaddq_s16(c0, a1); |
94 | const int16x8x2_t f = vzipq_s16(e0, e1); |
95 | const int16x8_t g0 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 0)); |
96 | const int16x8_t g1 = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i + 8)); |
97 | const int16x8_t h0 = vaddq_s16(g0, f.val[0]); |
98 | const int16x8_t h1 = vaddq_s16(g1, f.val[1]); |
99 | const int16x8_t i0 = vmaxq_s16(vminq_s16(h0, max), zero); |
100 | const int16x8_t i1 = vmaxq_s16(vminq_s16(h1, max), zero); |
101 | vst1q_u16(out + 2 * i + 0, vreinterpretq_u16_s16(i0)); |
102 | vst1q_u16(out + 2 * i + 8, vreinterpretq_u16_s16(i1)); |
103 | } |
104 | for (; i < len; ++i) { |
105 | const int a0b1 = A[i + 0] + B[i + 1]; |
106 | const int a1b0 = A[i + 1] + B[i + 0]; |
107 | const int a0a1b0b1 = a0b1 + a1b0 + 8; |
108 | const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4; |
109 | const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4; |
110 | out[2 * i + 0] = clip_NEON(best_y[2 * i + 0] + v0, max_y); |
111 | out[2 * i + 1] = clip_NEON(best_y[2 * i + 1] + v1, max_y); |
112 | } |
113 | } |
114 | |
115 | static void SharpYuvFilterRow32_NEON(const int16_t* A, const int16_t* B, |
116 | int len, const uint16_t* best_y, |
117 | uint16_t* out, int bit_depth) { |
118 | const int max_y = (1 << bit_depth) - 1; |
119 | int i; |
120 | const uint16x8_t max = vdupq_n_u16(max_y); |
121 | for (i = 0; i + 4 <= len; i += 4) { |
122 | const int16x4_t a0 = vld1_s16(A + i + 0); |
123 | const int16x4_t a1 = vld1_s16(A + i + 1); |
124 | const int16x4_t b0 = vld1_s16(B + i + 0); |
125 | const int16x4_t b1 = vld1_s16(B + i + 1); |
126 | const int32x4_t a0b1 = vaddl_s16(a0, b1); |
127 | const int32x4_t a1b0 = vaddl_s16(a1, b0); |
128 | const int32x4_t a0a1b0b1 = vaddq_s32(a0b1, a1b0); // A0+A1+B0+B1 |
129 | const int32x4_t a0b1_2 = vaddq_s32(a0b1, a0b1); // 2*(A0+B1) |
130 | const int32x4_t a1b0_2 = vaddq_s32(a1b0, a1b0); // 2*(A1+B0) |
131 | const int32x4_t c0 = vshrq_n_s32(vaddq_s32(a0b1_2, a0a1b0b1), 3); |
132 | const int32x4_t c1 = vshrq_n_s32(vaddq_s32(a1b0_2, a0a1b0b1), 3); |
133 | const int32x4_t e0 = vrhaddq_s32(c1, vmovl_s16(a0)); |
134 | const int32x4_t e1 = vrhaddq_s32(c0, vmovl_s16(a1)); |
135 | const int32x4x2_t f = vzipq_s32(e0, e1); |
136 | |
137 | const int16x8_t g = vreinterpretq_s16_u16(vld1q_u16(best_y + 2 * i)); |
138 | const int32x4_t h0 = vaddw_s16(f.val[0], vget_low_s16(g)); |
139 | const int32x4_t h1 = vaddw_s16(f.val[1], vget_high_s16(g)); |
140 | const uint16x8_t i_16 = vcombine_u16(vqmovun_s32(h0), vqmovun_s32(h1)); |
141 | const uint16x8_t i_clamped = vminq_u16(i_16, max); |
142 | vst1q_u16(out + 2 * i + 0, i_clamped); |
143 | } |
144 | for (; i < len; ++i) { |
145 | const int a0b1 = A[i + 0] + B[i + 1]; |
146 | const int a1b0 = A[i + 1] + B[i + 0]; |
147 | const int a0a1b0b1 = a0b1 + a1b0 + 8; |
148 | const int v0 = (8 * A[i + 0] + 2 * a1b0 + a0a1b0b1) >> 4; |
149 | const int v1 = (8 * A[i + 1] + 2 * a0b1 + a0a1b0b1) >> 4; |
150 | out[2 * i + 0] = clip_NEON(best_y[2 * i + 0] + v0, max_y); |
151 | out[2 * i + 1] = clip_NEON(best_y[2 * i + 1] + v1, max_y); |
152 | } |
153 | } |
154 | |
155 | static void SharpYuvFilterRow_NEON(const int16_t* A, const int16_t* B, int len, |
156 | const uint16_t* best_y, uint16_t* out, |
157 | int bit_depth) { |
158 | if (bit_depth <= 10) { |
159 | SharpYuvFilterRow16_NEON(A, B, len, best_y, out, bit_depth); |
160 | } else { |
161 | SharpYuvFilterRow32_NEON(A, B, len, best_y, out, bit_depth); |
162 | } |
163 | } |
164 | |
165 | //------------------------------------------------------------------------------ |
166 | |
167 | extern void InitSharpYuvNEON(void); |
168 | |
169 | WEBP_TSAN_IGNORE_FUNCTION void InitSharpYuvNEON(void) { |
170 | SharpYuvUpdateY = SharpYuvUpdateY_NEON; |
171 | SharpYuvUpdateRGB = SharpYuvUpdateRGB_NEON; |
172 | SharpYuvFilterRow = SharpYuvFilterRow_NEON; |
173 | } |
174 | |
175 | #else // !WEBP_USE_NEON |
176 | |
177 | extern void InitSharpYuvNEON(void); |
178 | |
179 | void InitSharpYuvNEON(void) {} |
180 | |
181 | #endif // WEBP_USE_NEON |
182 | |