1 | // Copyright 2020 Google LLC. |
2 | // Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. |
3 | |
4 | #ifndef SkVM_opts_DEFINED |
5 | #define SkVM_opts_DEFINED |
6 | |
7 | #include "include/private/SkVx.h" |
8 | #include "src/core/SkVM.h" |
9 | |
10 | namespace SK_OPTS_NS { |
11 | |
12 | inline void interpret_skvm(const skvm::InterpreterInstruction insts[], const int ninsts, |
13 | const int nregs, const int loop, |
14 | const int strides[], const int nargs, |
15 | int n, void* args[]) { |
16 | using namespace skvm; |
17 | |
18 | // We'll operate in SIMT style, knocking off K-size chunks from n while possible. |
19 | // We noticed quad-pumping is slower than single-pumping and both were slower than double. |
20 | #if defined(__AVX2__) |
21 | constexpr int K = 16; |
22 | #else |
23 | constexpr int K = 8; |
24 | #endif |
25 | using I32 = skvx::Vec<K, int>; |
26 | using F32 = skvx::Vec<K, float>; |
27 | using U32 = skvx::Vec<K, uint32_t>; |
28 | using U16 = skvx::Vec<K, uint16_t>; |
29 | using U8 = skvx::Vec<K, uint8_t>; |
30 | |
31 | using I16x2 = skvx::Vec<2*K, int16_t>; |
32 | using U16x2 = skvx::Vec<2*K, uint16_t>; |
33 | |
34 | union Slot { |
35 | F32 f32; |
36 | I32 i32; |
37 | U32 u32; |
38 | I16x2 i16x2; |
39 | U16x2 u16x2; |
40 | }; |
41 | |
42 | Slot few_regs[16]; |
43 | std::unique_ptr<char[]> many_regs; |
44 | |
45 | Slot* regs = few_regs; |
46 | |
47 | if (nregs > (int)SK_ARRAY_COUNT(few_regs)) { |
48 | // Annoyingly we can't trust that malloc() or new will work with Slot because |
49 | // the skvx::Vec types may have alignment greater than what they provide. |
50 | // We'll overallocate one extra register so we can align manually. |
51 | many_regs.reset(new char[ sizeof(Slot) * (nregs + 1) ]); |
52 | |
53 | uintptr_t addr = (uintptr_t)many_regs.get(); |
54 | addr += alignof(Slot) - |
55 | (addr & (alignof(Slot) - 1)); |
56 | SkASSERT((addr & (alignof(Slot) - 1)) == 0); |
57 | regs = (Slot*)addr; |
58 | } |
59 | |
60 | |
61 | auto r = [&](Reg id) -> Slot& { |
62 | SkASSERT(0 <= id && id < nregs); |
63 | return regs[id]; |
64 | }; |
65 | auto arg = [&](int ix) { |
66 | SkASSERT(0 <= ix && ix < nargs); |
67 | return args[ix]; |
68 | }; |
69 | |
70 | // Step each argument pointer ahead by its stride a number of times. |
71 | auto step_args = [&](int times) { |
72 | for (int i = 0; i < nargs; i++) { |
73 | args[i] = (void*)( (char*)args[i] + times * strides[i] ); |
74 | } |
75 | }; |
76 | |
77 | int start = 0, |
78 | stride; |
79 | for ( ; n > 0; start = loop, n -= stride, step_args(stride)) { |
80 | stride = n >= K ? K : 1; |
81 | |
82 | for (int i = start; i < ninsts; i++) { |
83 | InterpreterInstruction inst = insts[i]; |
84 | |
85 | // d = op(x,y/imm,z/imm) |
86 | Reg d = inst.d, |
87 | x = inst.x, |
88 | y = inst.y, |
89 | z = inst.z; |
90 | int immy = inst.immy, |
91 | immz = inst.immz; |
92 | |
93 | // Ops that interact with memory need to know whether we're stride=1 or K, |
94 | // but all non-memory ops can run the same code no matter the stride. |
95 | switch (2*(int)inst.op + (stride == K ? 1 : 0)) { |
96 | default: SkUNREACHABLE; |
97 | |
98 | #define STRIDE_1(op) case 2*(int)op |
99 | #define STRIDE_K(op) case 2*(int)op + 1 |
100 | STRIDE_1(Op::store8 ): memcpy(arg(immy), &r(x).i32, 1); break; |
101 | STRIDE_1(Op::store16): memcpy(arg(immy), &r(x).i32, 2); break; |
102 | STRIDE_1(Op::store32): memcpy(arg(immy), &r(x).i32, 4); break; |
103 | |
104 | STRIDE_K(Op::store8 ): skvx::cast<uint8_t> (r(x).i32).store(arg(immy)); break; |
105 | STRIDE_K(Op::store16): skvx::cast<uint16_t>(r(x).i32).store(arg(immy)); break; |
106 | STRIDE_K(Op::store32): (r(x).i32).store(arg(immy)); break; |
107 | |
108 | STRIDE_1(Op::load8 ): r(d).i32 = 0; memcpy(&r(d).i32, arg(immy), 1); break; |
109 | STRIDE_1(Op::load16): r(d).i32 = 0; memcpy(&r(d).i32, arg(immy), 2); break; |
110 | STRIDE_1(Op::load32): r(d).i32 = 0; memcpy(&r(d).i32, arg(immy), 4); break; |
111 | |
112 | STRIDE_K(Op::load8 ): r(d).i32= skvx::cast<int>(U8 ::Load(arg(immy))); break; |
113 | STRIDE_K(Op::load16): r(d).i32= skvx::cast<int>(U16::Load(arg(immy))); break; |
114 | STRIDE_K(Op::load32): r(d).i32= I32::Load(arg(immy)) ; break; |
115 | |
116 | // The pointer we base our gather on is loaded indirectly from a uniform: |
117 | // - arg(immy) is the uniform holding our gather base pointer somewhere; |
118 | // - (const uint8_t*)arg(immy) + immz points to the gather base pointer; |
119 | // - memcpy() loads the gather base and into a pointer of the right type. |
120 | // After all that we have an ordinary (uniform) pointer `ptr` to load from, |
121 | // and we then gather from it using the varying indices in r(x). |
122 | STRIDE_1(Op::gather8): |
123 | for (int i = 0; i < K; i++) { |
124 | const uint8_t* ptr; |
125 | memcpy(&ptr, (const uint8_t*)arg(immy) + immz, sizeof(ptr)); |
126 | r(d).i32[i] = (i==0) ? ptr[ r(x).i32[i] ] : 0; |
127 | } break; |
128 | STRIDE_1(Op::gather16): |
129 | for (int i = 0; i < K; i++) { |
130 | const uint16_t* ptr; |
131 | memcpy(&ptr, (const uint8_t*)arg(immy) + immz, sizeof(ptr)); |
132 | r(d).i32[i] = (i==0) ? ptr[ r(x).i32[i] ] : 0; |
133 | } break; |
134 | STRIDE_1(Op::gather32): |
135 | for (int i = 0; i < K; i++) { |
136 | const int* ptr; |
137 | memcpy(&ptr, (const uint8_t*)arg(immy) + immz, sizeof(ptr)); |
138 | r(d).i32[i] = (i==0) ? ptr[ r(x).i32[i] ] : 0; |
139 | } break; |
140 | |
141 | STRIDE_K(Op::gather8): |
142 | for (int i = 0; i < K; i++) { |
143 | const uint8_t* ptr; |
144 | memcpy(&ptr, (const uint8_t*)arg(immy) + immz, sizeof(ptr)); |
145 | r(d).i32[i] = ptr[ r(x).i32[i] ]; |
146 | } break; |
147 | STRIDE_K(Op::gather16): |
148 | for (int i = 0; i < K; i++) { |
149 | const uint16_t* ptr; |
150 | memcpy(&ptr, (const uint8_t*)arg(immy) + immz, sizeof(ptr)); |
151 | r(d).i32[i] = ptr[ r(x).i32[i] ]; |
152 | } break; |
153 | STRIDE_K(Op::gather32): |
154 | for (int i = 0; i < K; i++) { |
155 | const int* ptr; |
156 | memcpy(&ptr, (const uint8_t*)arg(immy) + immz, sizeof(ptr)); |
157 | r(d).i32[i] = ptr[ r(x).i32[i] ]; |
158 | } break; |
159 | |
160 | #undef STRIDE_1 |
161 | #undef STRIDE_K |
162 | |
163 | // Ops that don't interact with memory should never care about the stride. |
164 | #define CASE(op) case 2*(int)op: /*fallthrough*/ case 2*(int)op+1 |
165 | |
166 | CASE(Op::assert_true): |
167 | #ifdef SK_DEBUG |
168 | if (!all(r(x).i32)) { |
169 | SkDebugf("inst %d, register %d\n" , i, y); |
170 | for (int i = 0; i < K; i++) { |
171 | SkDebugf("\t%2d: %08x (%g)\n" , i, r(y).i32[i], r(y).f32[i]); |
172 | } |
173 | } |
174 | SkASSERT(all(r(x).i32)); |
175 | #endif |
176 | break; |
177 | |
178 | CASE(Op::index): { |
179 | const int iota[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15, |
180 | 16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31}; |
181 | static_assert(K <= SK_ARRAY_COUNT(iota), "" ); |
182 | |
183 | r(d).i32 = n - I32::Load(iota); |
184 | } break; |
185 | |
186 | CASE(Op::uniform8): |
187 | r(d).i32 = *(const uint8_t* )( (const char*)arg(immy) + immz ); |
188 | break; |
189 | CASE(Op::uniform16): |
190 | r(d).i32 = *(const uint16_t*)( (const char*)arg(immy) + immz ); |
191 | break; |
192 | CASE(Op::uniform32): |
193 | r(d).i32 = *(const int* )( (const char*)arg(immy) + immz ); |
194 | break; |
195 | |
196 | CASE(Op::splat): r(d).i32 = immy; break; |
197 | |
198 | CASE(Op::add_f32): r(d).f32 = r(x).f32 + r(y).f32; break; |
199 | CASE(Op::sub_f32): r(d).f32 = r(x).f32 - r(y).f32; break; |
200 | CASE(Op::mul_f32): r(d).f32 = r(x).f32 * r(y).f32; break; |
201 | CASE(Op::div_f32): r(d).f32 = r(x).f32 / r(y).f32; break; |
202 | CASE(Op::min_f32): r(d).f32 = min(r(x).f32, r(y).f32); break; |
203 | CASE(Op::max_f32): r(d).f32 = max(r(x).f32, r(y).f32); break; |
204 | |
205 | // These _imm instructions are all x86/JIT only. |
206 | CASE(Op::add_f32_imm): |
207 | CASE(Op::sub_f32_imm): |
208 | CASE(Op::mul_f32_imm): |
209 | CASE(Op::min_f32_imm): |
210 | CASE(Op::max_f32_imm): |
211 | CASE(Op::bit_and_imm): |
212 | CASE(Op::bit_or_imm ): |
213 | CASE(Op::bit_xor_imm): SkUNREACHABLE; break; |
214 | |
215 | CASE(Op::fma_f32): r(d).f32 = fma(r(x).f32, r(y).f32, r(z).f32); break; |
216 | CASE(Op::fms_f32): r(d).f32 = fma(r(x).f32, r(y).f32, -r(z).f32); break; |
217 | CASE(Op::fnma_f32): r(d).f32 = fma(-r(x).f32, r(y).f32, r(z).f32); break; |
218 | |
219 | CASE(Op::sqrt_f32): r(d).f32 = sqrt(r(x).f32); break; |
220 | |
221 | CASE(Op::add_i32): r(d).i32 = r(x).i32 + r(y).i32; break; |
222 | CASE(Op::sub_i32): r(d).i32 = r(x).i32 - r(y).i32; break; |
223 | CASE(Op::mul_i32): r(d).i32 = r(x).i32 * r(y).i32; break; |
224 | |
225 | CASE(Op::add_i16x2): r(d).i16x2 = r(x).i16x2 + r(y).i16x2; break; |
226 | CASE(Op::sub_i16x2): r(d).i16x2 = r(x).i16x2 - r(y).i16x2; break; |
227 | CASE(Op::mul_i16x2): r(d).i16x2 = r(x).i16x2 * r(y).i16x2; break; |
228 | |
229 | CASE(Op::shl_i32): r(d).i32 = r(x).i32 << immy; break; |
230 | CASE(Op::sra_i32): r(d).i32 = r(x).i32 >> immy; break; |
231 | CASE(Op::shr_i32): r(d).u32 = r(x).u32 >> immy; break; |
232 | |
233 | CASE(Op::shl_i16x2): r(d).i16x2 = r(x).i16x2 << immy; break; |
234 | CASE(Op::sra_i16x2): r(d).i16x2 = r(x).i16x2 >> immy; break; |
235 | CASE(Op::shr_i16x2): r(d).u16x2 = r(x).u16x2 >> immy; break; |
236 | |
237 | CASE(Op:: eq_f32): r(d).i32 = r(x).f32 == r(y).f32; break; |
238 | CASE(Op::neq_f32): r(d).i32 = r(x).f32 != r(y).f32; break; |
239 | CASE(Op:: gt_f32): r(d).i32 = r(x).f32 > r(y).f32; break; |
240 | CASE(Op::gte_f32): r(d).i32 = r(x).f32 >= r(y).f32; break; |
241 | |
242 | CASE(Op:: eq_i32): r(d).i32 = r(x).i32 == r(y).i32; break; |
243 | CASE(Op::neq_i32): r(d).i32 = r(x).i32 != r(y).i32; break; |
244 | CASE(Op:: gt_i32): r(d).i32 = r(x).i32 > r(y).i32; break; |
245 | CASE(Op::gte_i32): r(d).i32 = r(x).i32 >= r(y).i32; break; |
246 | |
247 | CASE(Op:: eq_i16x2): r(d).i16x2 = r(x).i16x2 == r(y).i16x2; break; |
248 | CASE(Op::neq_i16x2): r(d).i16x2 = r(x).i16x2 != r(y).i16x2; break; |
249 | CASE(Op:: gt_i16x2): r(d).i16x2 = r(x).i16x2 > r(y).i16x2; break; |
250 | CASE(Op::gte_i16x2): r(d).i16x2 = r(x).i16x2 >= r(y).i16x2; break; |
251 | |
252 | CASE(Op::bit_and ): r(d).i32 = r(x).i32 & r(y).i32; break; |
253 | CASE(Op::bit_or ): r(d).i32 = r(x).i32 | r(y).i32; break; |
254 | CASE(Op::bit_xor ): r(d).i32 = r(x).i32 ^ r(y).i32; break; |
255 | CASE(Op::bit_clear): r(d).i32 = r(x).i32 & ~r(y).i32; break; |
256 | |
257 | CASE(Op::select): r(d).i32 = skvx::if_then_else(r(x).i32, r(y).i32, r(z).i32); |
258 | break; |
259 | |
260 | CASE(Op::pack): r(d).u32 = r(x).u32 | (r(y).u32 << immz); break; |
261 | |
262 | CASE(Op::floor): r(d).f32 = skvx::floor(r(x).f32) ; break; |
263 | CASE(Op::to_f32): r(d).f32 = skvx::cast<float>( r(x).i32 ); break; |
264 | CASE(Op::trunc): r(d).i32 = skvx::cast<int> ( r(x).f32 ); break; |
265 | CASE(Op::round): r(d).i32 = skvx::cast<int> (skvx::lrint(r(x).f32)); break; |
266 | #undef CASE |
267 | } |
268 | } |
269 | } |
270 | } |
271 | |
272 | } |
273 | |
274 | #endif//SkVM_opts_DEFINED |
275 | |