1/*
2 * Copyright 2019 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkVM_DEFINED
9#define SkVM_DEFINED
10
11#include "include/core/SkBlendMode.h"
12#include "include/core/SkColor.h"
13#include "include/private/SkMacros.h"
14#include "include/private/SkTArray.h"
15#include "include/private/SkTHash.h"
16#include "src/core/SkSpan.h"
17#include "src/core/SkVM_fwd.h"
18#include <vector> // std::vector
19
20class SkWStream;
21
22#if 0
23 #define SKVM_LLVM
24#endif
25
26namespace skvm {
27
28 class Assembler {
29 public:
30 explicit Assembler(void* buf);
31
32 size_t size() const;
33
34 // Order matters... GP64, Xmm, Ymm values match 4-bit register encoding for each.
35 enum GP64 {
36 rax, rcx, rdx, rbx, rsp, rbp, rsi, rdi,
37 r8 , r9 , r10, r11, r12, r13, r14, r15,
38 };
39 enum Xmm {
40 xmm0, xmm1, xmm2 , xmm3 , xmm4 , xmm5 , xmm6 , xmm7 ,
41 xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
42 };
43 enum Ymm {
44 ymm0, ymm1, ymm2 , ymm3 , ymm4 , ymm5 , ymm6 , ymm7 ,
45 ymm8, ymm9, ymm10, ymm11, ymm12, ymm13, ymm14, ymm15,
46 };
47
48 // X and V values match 5-bit encoding for each (nothing tricky).
49 enum X {
50 x0 , x1 , x2 , x3 , x4 , x5 , x6 , x7 ,
51 x8 , x9 , x10, x11, x12, x13, x14, x15,
52 x16, x17, x18, x19, x20, x21, x22, x23,
53 x24, x25, x26, x27, x28, x29, x30, xzr,
54 };
55 enum V {
56 v0 , v1 , v2 , v3 , v4 , v5 , v6 , v7 ,
57 v8 , v9 , v10, v11, v12, v13, v14, v15,
58 v16, v17, v18, v19, v20, v21, v22, v23,
59 v24, v25, v26, v27, v28, v29, v30, v31,
60 };
61
62 void bytes(const void*, int);
63 void byte(uint8_t);
64 void word(uint32_t);
65
66 // x86-64
67
68 void align(int mod);
69
70 void int3();
71 void vzeroupper();
72 void ret();
73
74 void add(GP64, int imm);
75 void sub(GP64, int imm);
76
77 void movq(GP64 dst, GP64 src, int off); // dst = *(src+off)
78
79 struct Label {
80 int offset = 0;
81 enum { NotYetSet, ARMDisp19, X86Disp32 } kind = NotYetSet;
82 SkSTArray<1, int> references;
83 };
84
85 struct YmmOrLabel {
86 Ymm ymm = ymm0;
87 Label* label = nullptr;
88
89 /*implicit*/ YmmOrLabel(Ymm y) : ymm (y) { SkASSERT(!label); }
90 /*implicit*/ YmmOrLabel(Label* l) : label(l) { SkASSERT( label); }
91 };
92
93 // All dst = x op y.
94 using DstEqXOpY = void(Ymm dst, Ymm x, Ymm y);
95 DstEqXOpY vpandn,
96 vpmulld,
97 vpsubw, vpmullw,
98 vdivps,
99 vfmadd132ps, vfmadd213ps, vfmadd231ps,
100 vfmsub132ps, vfmsub213ps, vfmsub231ps,
101 vfnmadd132ps, vfnmadd213ps, vfnmadd231ps,
102 vpackusdw, vpackuswb,
103 vpcmpeqd, vpcmpgtd;
104
105 using DstEqXOpYOrLabel = void(Ymm dst, Ymm x, YmmOrLabel y);
106 DstEqXOpYOrLabel vpand, vpor, vpxor,
107 vpaddd, vpsubd,
108 vaddps, vsubps, vmulps, vminps, vmaxps;
109
110 // Floating point comparisons are all the same instruction with varying imm.
111 void vcmpps(Ymm dst, Ymm x, Ymm y, int imm);
112 void vcmpeqps (Ymm dst, Ymm x, Ymm y) { this->vcmpps(dst,x,y,0); }
113 void vcmpltps (Ymm dst, Ymm x, Ymm y) { this->vcmpps(dst,x,y,1); }
114 void vcmpleps (Ymm dst, Ymm x, Ymm y) { this->vcmpps(dst,x,y,2); }
115 void vcmpneqps(Ymm dst, Ymm x, Ymm y) { this->vcmpps(dst,x,y,4); }
116
117 using DstEqXOpImm = void(Ymm dst, Ymm x, int imm);
118 DstEqXOpImm vpslld, vpsrld, vpsrad,
119 vpsrlw,
120 vpermq,
121 vroundps;
122
123 enum { NEAREST, FLOOR, CEIL, TRUNC }; // vroundps immediates
124
125 using DstEqOpX = void(Ymm dst, Ymm x);
126 DstEqOpX vmovdqa, vcvtdq2ps, vcvttps2dq, vcvtps2dq, vsqrtps;
127
128 void vpblendvb(Ymm dst, Ymm x, Ymm y, Ymm z);
129
130 Label here();
131 void label(Label*);
132
133 void jmp(Label*);
134 void je (Label*);
135 void jne(Label*);
136 void jl (Label*);
137 void jc (Label*);
138 void cmp(GP64, int imm);
139
140 void vpshufb(Ymm dst, Ymm x, Label*);
141 void vptest(Ymm dst, Label*);
142
143 void vbroadcastss(Ymm dst, Label*);
144 void vbroadcastss(Ymm dst, Xmm src);
145 void vbroadcastss(Ymm dst, GP64 ptr, int off); // dst = *(ptr+off)
146
147 void vmovups (Ymm dst, int imm); // dst = *(sp + imm)
148 void vmovups (Ymm dst, GP64 ptr); // dst = *ptr, 256-bit
149 void vpmovzxwd(Ymm dst, GP64 ptr); // dst = *ptr, 128-bit, each uint16_t expanded to int
150 void vpmovzxbd(Ymm dst, GP64 ptr); // dst = *ptr, 64-bit, each uint8_t expanded to int
151 void vmovd (Xmm dst, GP64 ptr); // dst = *ptr, 32-bit
152
153 enum Scale { ONE, TWO, FOUR, EIGHT };
154 void vmovd(Xmm dst, Scale, GP64 index, GP64 base); // dst = *(base + scale*index), 32-bit
155
156 void vmovups(int imm, Ymm src); // *(sp + imm) = src
157 void vmovups(GP64 ptr, Ymm src); // *ptr = src, 256-bit
158 void vmovups(GP64 ptr, Xmm src); // *ptr = src, 128-bit
159 void vmovq (GP64 ptr, Xmm src); // *ptr = src, 64-bit
160 void vmovd (GP64 ptr, Xmm src); // *ptr = src, 32-bit
161
162 void movzbl(GP64 dst, GP64 ptr, int off); // dst = *(ptr+off), uint8_t -> int
163 void movb (GP64 ptr, GP64 src); // *ptr = src, 8-bit
164
165 void vmovd_direct(GP64 dst, Xmm src); // dst = src, 32-bit
166 void vmovd_direct(Xmm dst, GP64 src); // dst = src, 32-bit
167
168 void vpinsrw(Xmm dst, Xmm src, GP64 ptr, int imm); // dst = src; dst[imm] = *ptr, 16-bit
169 void vpinsrb(Xmm dst, Xmm src, GP64 ptr, int imm); // dst = src; dst[imm] = *ptr, 8-bit
170
171 void vpextrw(GP64 ptr, Xmm src, int imm); // *dst = src[imm] , 16-bit
172 void vpextrb(GP64 ptr, Xmm src, int imm); // *dst = src[imm] , 8-bit
173
174 // if (mask & 0x8000'0000) {
175 // dst = base[scale*ix];
176 // }
177 // mask = 0;
178 void vgatherdps(Ymm dst, Scale scale, Ymm ix, GP64 base, Ymm mask);
179
180 // aarch64
181
182 // d = op(n,m)
183 using DOpNM = void(V d, V n, V m);
184 DOpNM and16b, orr16b, eor16b, bic16b, bsl16b,
185 add4s, sub4s, mul4s,
186 cmeq4s, cmgt4s,
187 sub8h, mul8h,
188 fadd4s, fsub4s, fmul4s, fdiv4s, fmin4s, fmax4s,
189 fcmeq4s, fcmgt4s, fcmge4s,
190 tbl;
191
192 // TODO: there are also float ==,<,<=,>,>= instructions with an immediate 0.0f,
193 // and the register comparison > and >= can also compare absolute values. Interesting.
194
195 // d += n*m
196 void fmla4s(V d, V n, V m);
197
198 // d -= n*m
199 void fmls4s(V d, V n, V m);
200
201 // d = op(n,imm)
202 using DOpNImm = void(V d, V n, int imm);
203 DOpNImm sli4s,
204 shl4s, sshr4s, ushr4s,
205 ushr8h;
206
207 // d = op(n)
208 using DOpN = void(V d, V n);
209 DOpN not16b, // d = ~n
210 fneg4s, // d = -n
211 scvtf4s, // int -> float
212 fcvtzs4s, // truncate float -> int
213 fcvtns4s, // round float -> int (nearest even)
214 xtns2h, // u32 -> u16
215 xtnh2b, // u16 -> u8
216 uxtlb2h, // u8 -> u16
217 uxtlh2s, // u16 -> u32
218 uminv4s; // dst[0] = min(n[0],n[1],n[2],n[3]), n as unsigned
219
220 void brk (int imm16);
221 void ret (X);
222 void add (X d, X n, int imm12);
223 void sub (X d, X n, int imm12);
224 void subs(X d, X n, int imm12); // subtract setting condition flags
225
226 // There's another encoding for unconditional branches that can jump further,
227 // but this one encoded as b.al is simple to implement and should be fine.
228 void b (Label* l) { this->b(Condition::al, l); }
229 void bne(Label* l) { this->b(Condition::ne, l); }
230 void blt(Label* l) { this->b(Condition::lt, l); }
231
232 // "cmp ..." is just an assembler mnemonic for "subs xzr, ..."!
233 void cmp(X n, int imm12) { this->subs(xzr, n, imm12); }
234
235 // Compare and branch if zero/non-zero, as if
236 // cmp(t,0)
237 // beq/bne(l)
238 // but without setting condition flags.
239 void cbz (X t, Label* l);
240 void cbnz(X t, Label* l);
241
242 void ldrq(V dst, Label*); // 128-bit PC-relative load
243
244 void ldrq(V dst, X src); // 128-bit dst = *src
245 void ldrs(V dst, X src); // 32-bit dst = *src
246 void ldrb(V dst, X src); // 8-bit dst = *src
247
248 void strq(V src, X dst); // 128-bit *dst = src
249 void strs(V src, X dst); // 32-bit *dst = src
250 void strb(V src, X dst); // 8-bit *dst = src
251
252 void fmovs(X dst, V src); // dst = 32-bit src[0]
253
254 private:
255 // dst = op(dst, imm)
256 void op(int opcode, int opcode_ext, GP64 dst, int imm);
257
258
259 // dst = op(x,y) or op(x)
260 void op(int prefix, int map, int opcode, Ymm dst, Ymm x, Ymm y, bool W=false);
261 void op(int prefix, int map, int opcode, Ymm dst, Ymm x, bool W=false) {
262 // Two arguments ops seem to pass them in dst and y, forcing x to 0 so VEX.vvvv == 1111.
263 this->op(prefix, map, opcode, dst,(Ymm)0,x, W);
264 }
265
266 // dst = op(x,imm)
267 void op(int prefix, int map, int opcode, int opcode_ext, Ymm dst, Ymm x, int imm);
268
269 // dst = op(x,label) or op(label)
270 void op(int prefix, int map, int opcode, Ymm dst, Ymm x, Label* l);
271 void op(int prefix, int map, int opcode, Ymm dst, Ymm x, YmmOrLabel);
272
273 // *ptr = ymm or ymm = *ptr, depending on opcode.
274 void load_store(int prefix, int map, int opcode, Ymm ymm, GP64 ptr);
275 // *(sp+off) = ymm or ymm = *(sp+off), depending on opcode.
276 void stack_load_store(int prefix, int map, int opcode, Ymm ymm, int off);
277
278 // Opcode for 3-arguments ops is split between hi and lo:
279 // [11 bits hi] [5 bits m] [6 bits lo] [5 bits n] [5 bits d]
280 void op(uint32_t hi, V m, uint32_t lo, V n, V d);
281
282 // 2-argument ops, with or without an immediate.
283 void op(uint32_t op22, int imm, V n, V d);
284 void op(uint32_t op22, V n, V d) { this->op(op22,0,n,d); }
285 void op(uint32_t op22, X x, V v) { this->op(op22,0,(V)x,v); }
286
287 // Order matters... value is 4-bit encoding for condition code.
288 enum class Condition { eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,al };
289 void b(Condition, Label*);
290
291 void jump(uint8_t condition, Label*);
292
293 int disp19(Label*);
294 int disp32(Label*);
295
296 uint8_t* fCode;
297 uint8_t* fCurr;
298 size_t fSize;
299 };
300
301 // Order matters a little: Ops <=store32 are treated as having side effects.
302 #define SKVM_OPS(M) \
303 M(assert_true) \
304 M(store8) M(store16) M(store32) \
305 M(index) \
306 M(load8) M(load16) M(load32) \
307 M(gather8) M(gather16) M(gather32) \
308 M(uniform8) M(uniform16) M(uniform32) \
309 M(splat) \
310 M(add_f32) M(add_i32) M(add_i16x2) \
311 M(sub_f32) M(sub_i32) M(sub_i16x2) \
312 M(mul_f32) M(mul_i32) M(mul_i16x2) \
313 M(div_f32) \
314 M(min_f32) \
315 M(max_f32) \
316 M(fma_f32) M(fms_f32) M(fnma_f32) \
317 M(sqrt_f32) \
318 M(shl_i32) M(shl_i16x2) \
319 M(shr_i32) M(shr_i16x2) \
320 M(sra_i32) M(sra_i16x2) \
321 M(add_f32_imm) \
322 M(sub_f32_imm) \
323 M(mul_f32_imm) \
324 M(min_f32_imm) \
325 M(max_f32_imm) \
326 M(floor) M(trunc) M(round) M(to_f32) \
327 M( eq_f32) M( eq_i32) M( eq_i16x2) \
328 M(neq_f32) M(neq_i32) M(neq_i16x2) \
329 M( gt_f32) M( gt_i32) M( gt_i16x2) \
330 M(gte_f32) M(gte_i32) M(gte_i16x2) \
331 M(bit_and) \
332 M(bit_or) \
333 M(bit_xor) \
334 M(bit_clear) \
335 M(bit_and_imm) \
336 M(bit_or_imm) \
337 M(bit_xor_imm) \
338 M(select) M(pack) \
339 // End of SKVM_OPS
340
341 enum class Op : int {
342 #define M(op) op,
343 SKVM_OPS(M)
344 #undef M
345 };
346
347 static inline bool has_side_effect(Op op) {
348 return op <= Op::store32;
349 }
350 static inline bool is_always_varying(Op op) {
351 return op <= Op::gather32 && op != Op::assert_true;
352 }
353
354 using Val = int;
355 // We reserve an impossibe Val ID as a sentinel
356 // NA meaning none, n/a, null, nil, etc.
357 static const Val NA = -1;
358
359 struct Arg { int ix; };
360
361 struct I32 {
362 Builder* builder = nullptr;
363 Val id = NA;
364 explicit operator bool() const { return id != NA; }
365 Builder* operator->() const { return builder; }
366 };
367
368 struct F32 {
369 Builder* builder = nullptr;
370 Val id = NA;
371 explicit operator bool() const { return id != NA; }
372 Builder* operator->() const { return builder; }
373 };
374
375 // Some operations make sense with immediate arguments,
376 // so we use I32a and F32a to receive them transparently.
377 //
378 // We omit overloads that may indicate a bug or performance issue.
379 // In general it does not make sense to pass immediates to unary operations,
380 // and even sometimes not for binary operations, e.g.
381 //
382 // div(x,y) -- normal every day divide
383 // div(3.0f,y) -- yep, makes sense
384 // div(x,3.0f) -- omitted as a reminder you probably want mul(x, 1/3.0f).
385 //
386 // You can of course always splat() to override these opinions.
387 struct I32a {
388 I32a(I32 v) : SkDEBUGCODE(builder(v.builder),) id(v.id) {}
389 I32a(int v) : imm(v) {}
390
391 SkDEBUGCODE(Builder* builder = nullptr;)
392 Val id = NA;
393 int imm = 0;
394 };
395
396 struct F32a {
397 F32a(F32 v) : SkDEBUGCODE(builder(v.builder),) id(v.id) {}
398 F32a(float v) : imm(v) {}
399
400 SkDEBUGCODE(Builder* builder = nullptr;)
401 Val id = NA;
402 float imm = 0;
403 };
404
405 struct Color {
406 skvm::F32 r,g,b,a;
407 explicit operator bool() const { return r && g && b && a; }
408 Builder* operator->() const { return a.operator->(); }
409 };
410
411 struct HSLA {
412 skvm::F32 h,s,l,a;
413 explicit operator bool() const { return h && s && l && a; }
414 Builder* operator->() const { return a.operator->(); }
415 };
416
417 struct Uniform {
418 Arg ptr;
419 int offset;
420 };
421 struct Uniforms {
422 Arg base;
423 std::vector<int> buf;
424
425 explicit Uniforms(int init) : base(Arg{0}), buf(init) {}
426
427 Uniform push(int val) {
428 buf.push_back(val);
429 return {base, (int)( sizeof(int)*(buf.size() - 1) )};
430 }
431
432 Uniform pushF(float val) {
433 int bits;
434 memcpy(&bits, &val, sizeof(int));
435 return this->push(bits);
436 }
437
438 Uniform pushPtr(const void* ptr) {
439 // Jam the pointer into 1 or 2 ints.
440 int ints[sizeof(ptr) / sizeof(int)];
441 memcpy(ints, &ptr, sizeof(ptr));
442 for (int bits : ints) {
443 buf.push_back(bits);
444 }
445 return {base, (int)( sizeof(int)*(buf.size() - SK_ARRAY_COUNT(ints)) )};
446 }
447 };
448
449 SK_BEGIN_REQUIRE_DENSE
450 struct Instruction {
451 Op op; // v* = op(x,y,z,imm), where * == index of this Instruction.
452 Val x,y,z; // Enough arguments for mad().
453 int immy,immz; // Immediate bit pattern, shift count, argument index, etc.
454 };
455 SK_END_REQUIRE_DENSE
456
457 bool operator==(const Instruction&, const Instruction&);
458 struct InstructionHash {
459 uint32_t operator()(const Instruction&, uint32_t seed=0) const;
460 };
461
462 struct OptimizedInstruction {
463 Op op;
464 Val x,y,z;
465 int immy,immz;
466
467 Val death;
468 bool can_hoist;
469 bool used_in_loop;
470 };
471
472 class Builder {
473 public:
474
475 Program done(const char* debug_name = nullptr) const;
476
477 // Mostly for debugging, tests, etc.
478 std::vector<Instruction> program() const { return fProgram; }
479 std::vector<OptimizedInstruction> optimize(bool for_jit=false) const;
480
481 // Declare an argument with given stride (use stride=0 for uniforms).
482 // TODO: different types for varying and uniforms?
483 Arg arg(int stride);
484
485 // Convenience arg() wrappers for most common strides, sizeof(T) and 0.
486 template <typename T>
487 Arg varying() { return this->arg(sizeof(T)); }
488 Arg uniform() { return this->arg(0); }
489
490 // TODO: allow uniform (i.e. Arg) offsets to store* and load*?
491 // TODO: sign extension (signed types) for <32-bit loads?
492 // TODO: unsigned integer operations where relevant (just comparisons?)?
493
494 // Assert cond is true, printing debug when not.
495 void assert_true(I32 cond, I32 debug);
496 void assert_true(I32 cond, F32 debug) { assert_true(cond, bit_cast(debug)); }
497 void assert_true(I32 cond) { assert_true(cond, cond); }
498
499 // Store {8,16,32}-bit varying.
500 void store8 (Arg ptr, I32 val);
501 void store16(Arg ptr, I32 val);
502 void store32(Arg ptr, I32 val);
503 void storeF (Arg ptr, F32 val) { store32(ptr, bit_cast(val)); }
504
505 // Returns varying {n, n-1, n-2, ..., 1}, where n is the argument to Program::eval().
506 I32 index();
507
508 // Load u8,u16,i32 varying.
509 I32 load8 (Arg ptr);
510 I32 load16(Arg ptr);
511 I32 load32(Arg ptr);
512 F32 loadF (Arg ptr) { return bit_cast(load32(ptr)); }
513
514 // Load u8,u16,i32 uniform with byte-count offset.
515 I32 uniform8 (Arg ptr, int offset);
516 I32 uniform16(Arg ptr, int offset);
517 I32 uniform32(Arg ptr, int offset);
518 F32 uniformF (Arg ptr, int offset) { return this->bit_cast(this->uniform32(ptr,offset)); }
519
520 // Load this color as a uniform, premultiplied and converted to dst SkColorSpace.
521 Color uniformPremul(SkColor4f, SkColorSpace* src,
522 Uniforms*, SkColorSpace* dst);
523
524 // Gather u8,u16,i32 with varying element-count index from *(ptr + byte-count offset).
525 I32 gather8 (Arg ptr, int offset, I32 index);
526 I32 gather16(Arg ptr, int offset, I32 index);
527 I32 gather32(Arg ptr, int offset, I32 index);
528 F32 gatherF (Arg ptr, int offset, I32 index) {
529 return bit_cast(gather32(ptr, offset, index));
530 }
531
532 // Convenience methods for working with skvm::Uniform(s).
533 I32 uniform8 (Uniform u) { return this->uniform8 (u.ptr, u.offset); }
534 I32 uniform16(Uniform u) { return this->uniform16(u.ptr, u.offset); }
535 I32 uniform32(Uniform u) { return this->uniform32(u.ptr, u.offset); }
536 F32 uniformF (Uniform u) { return this->uniformF (u.ptr, u.offset); }
537 I32 gather8 (Uniform u, I32 index) { return this->gather8 (u.ptr, u.offset, index); }
538 I32 gather16 (Uniform u, I32 index) { return this->gather16 (u.ptr, u.offset, index); }
539 I32 gather32 (Uniform u, I32 index) { return this->gather32 (u.ptr, u.offset, index); }
540 F32 gatherF (Uniform u, I32 index) { return this->gatherF (u.ptr, u.offset, index); }
541
542 // Load an immediate constant.
543 I32 splat(int n);
544 I32 splat(unsigned u) { return splat((int)u); }
545 F32 splat(float f);
546
547 // float math, comparisons, etc.
548 F32 add(F32, F32); F32 add(F32a x, F32a y) { return add(_(x), _(y)); }
549 F32 sub(F32, F32); F32 sub(F32a x, F32a y) { return sub(_(x), _(y)); }
550 F32 mul(F32, F32); F32 mul(F32a x, F32a y) { return mul(_(x), _(y)); }
551 F32 div(F32, F32); F32 div(F32a x, F32 y) { return div(_(x), y ); }
552 F32 min(F32, F32); F32 min(F32a x, F32a y) { return min(_(x), _(y)); }
553 F32 max(F32, F32); F32 max(F32a x, F32a y) { return max(_(x), _(y)); }
554
555 F32 mad(F32 x, F32 y, F32 z) { return add(mul(x,y), z); }
556 F32 mad(F32a x, F32a y, F32a z) { return mad(_(x), _(y), _(z)); }
557
558 F32 sqrt(F32);
559 F32 approx_log2(F32);
560 F32 approx_pow2(F32);
561 F32 approx_log (F32 x) { return mul(0.69314718f, approx_log2(x)); }
562 F32 approx_exp (F32 x) { return approx_pow2(mul(x, 1.4426950408889634074f)); }
563
564 F32 approx_powf(F32 base, F32 exp);
565 F32 approx_powf(F32a base, F32a exp) { return approx_powf(_(base), _(exp)); }
566
567 F32 approx_sin(F32 radians);
568 F32 approx_cos(F32 radians) { return approx_sin(add(radians, SK_ScalarPI/2)); }
569 F32 approx_tan(F32 radians);
570
571 F32 lerp(F32 lo, F32 hi, F32 t) { return mad(sub(hi, lo), t, lo); }
572 F32 lerp(F32a lo, F32a hi, F32a t) { return lerp(_(lo), _(hi), _(t)); }
573
574 F32 clamp(F32 x, F32 lo, F32 hi) { return max(lo, min(x, hi)); }
575 F32 clamp(F32a x, F32a lo, F32a hi) { return clamp(_(x), _(lo), _(hi)); }
576 F32 clamp01(F32 x) { return clamp(x, 0.0f, 1.0f); }
577
578 F32 abs(F32 x) { return bit_cast(bit_and(bit_cast(x), 0x7fff'ffff)); }
579 F32 fract(F32 x) { return sub(x, floor(x)); }
580 F32 floor(F32);
581 I32 is_NaN(F32 x) { return neq(x,x); }
582
583 I32 trunc(F32 x);
584 I32 round(F32 x); // Round to int using current rounding mode (as if lrintf()).
585 I32 bit_cast(F32 x) { return {x.builder, x.id}; }
586
587 F32 norm(F32 x, F32 y) {
588 return sqrt(add(mul(x,x),
589 mul(y,y)));
590 }
591 F32 norm(F32a x, F32a y) { return norm(_(x), _(y)); }
592
593 I32 eq(F32, F32); I32 eq(F32a x, F32a y) { return eq(_(x), _(y)); }
594 I32 neq(F32, F32); I32 neq(F32a x, F32a y) { return neq(_(x), _(y)); }
595 I32 lt (F32, F32); I32 lt (F32a x, F32a y) { return lt (_(x), _(y)); }
596 I32 lte(F32, F32); I32 lte(F32a x, F32a y) { return lte(_(x), _(y)); }
597 I32 gt (F32, F32); I32 gt (F32a x, F32a y) { return gt (_(x), _(y)); }
598 I32 gte(F32, F32); I32 gte(F32a x, F32a y) { return gte(_(x), _(y)); }
599
600 // int math, comparisons, etc.
601 I32 add(I32, I32); I32 add(I32a x, I32a y) { return add(_(x), _(y)); }
602 I32 sub(I32, I32); I32 sub(I32a x, I32a y) { return sub(_(x), _(y)); }
603 I32 mul(I32, I32); I32 mul(I32a x, I32a y) { return mul(_(x), _(y)); }
604
605 I32 shl(I32 x, int bits);
606 I32 shr(I32 x, int bits);
607 I32 sra(I32 x, int bits);
608
609 I32 eq (I32 x, I32 y); I32 eq(I32a x, I32a y) { return eq(_(x), _(y)); }
610 I32 neq(I32 x, I32 y); I32 neq(I32a x, I32a y) { return neq(_(x), _(y)); }
611 I32 lt (I32 x, I32 y); I32 lt (I32a x, I32a y) { return lt (_(x), _(y)); }
612 I32 lte(I32 x, I32 y); I32 lte(I32a x, I32a y) { return lte(_(x), _(y)); }
613 I32 gt (I32 x, I32 y); I32 gt (I32a x, I32a y) { return gt (_(x), _(y)); }
614 I32 gte(I32 x, I32 y); I32 gte(I32a x, I32a y) { return gte(_(x), _(y)); }
615
616 F32 to_f32(I32 x);
617 F32 bit_cast(I32 x) { return {x.builder, x.id}; }
618
619 // Treat each 32-bit lane as a pair of 16-bit ints.
620 I32 add_16x2(I32, I32); I32 add_16x2(I32a x, I32a y) { return add_16x2(_(x), _(y)); }
621 I32 sub_16x2(I32, I32); I32 sub_16x2(I32a x, I32a y) { return sub_16x2(_(x), _(y)); }
622 I32 mul_16x2(I32, I32); I32 mul_16x2(I32a x, I32a y) { return mul_16x2(_(x), _(y)); }
623
624 I32 shl_16x2(I32 x, int bits);
625 I32 shr_16x2(I32 x, int bits);
626 I32 sra_16x2(I32 x, int bits);
627
628 I32 eq_16x2(I32, I32); I32 eq_16x2(I32a x, I32a y) { return eq_16x2(_(x), _(y)); }
629 I32 neq_16x2(I32, I32); I32 neq_16x2(I32a x, I32a y) { return neq_16x2(_(x), _(y)); }
630 I32 lt_16x2(I32, I32); I32 lt_16x2(I32a x, I32a y) { return lt_16x2(_(x), _(y)); }
631 I32 lte_16x2(I32, I32); I32 lte_16x2(I32a x, I32a y) { return lte_16x2(_(x), _(y)); }
632 I32 gt_16x2(I32, I32); I32 gt_16x2(I32a x, I32a y) { return gt_16x2(_(x), _(y)); }
633 I32 gte_16x2(I32, I32); I32 gte_16x2(I32a x, I32a y) { return gte_16x2(_(x), _(y)); }
634
635 // Bitwise operations.
636 I32 bit_and (I32, I32); I32 bit_and (I32a x, I32a y) { return bit_and (_(x), _(y)); }
637 I32 bit_or (I32, I32); I32 bit_or (I32a x, I32a y) { return bit_or (_(x), _(y)); }
638 I32 bit_xor (I32, I32); I32 bit_xor (I32a x, I32a y) { return bit_xor (_(x), _(y)); }
639 I32 bit_clear(I32, I32); I32 bit_clear(I32a x, I32a y) { return bit_clear(_(x), _(y)); }
640
641 I32 min(I32 x, I32 y) { return select(lte(x,y), x, y); }
642 I32 max(I32 x, I32 y) { return select(gte(x,y), x, y); }
643
644 I32 min(I32a x, I32a y) { return min(_(x), _(y)); }
645 I32 max(I32a x, I32a y) { return max(_(x), _(y)); }
646
647 I32 select(I32 cond, I32 t, I32 f); // cond ? t : f
648 F32 select(I32 cond, F32 t, F32 f) {
649 return bit_cast(select(cond, bit_cast(t)
650 , bit_cast(f)));
651 }
652
653 I32 select(I32a cond, I32a t, I32a f) { return select(_(cond), _(t), _(f)); }
654 F32 select(I32a cond, F32a t, F32a f) { return select(_(cond), _(t), _(f)); }
655
656 I32 extract(I32 x, int bits, I32 z); // (x>>bits) & z
657 I32 pack (I32 x, I32 y, int bits); // x | (y << bits), assuming (x & (y << bits)) == 0
658
659 I32 extract(I32a x, int bits, I32a z) { return extract(_(x), bits, _(z)); }
660 I32 pack (I32a x, I32a y, int bits) { return pack (_(x), _(y), bits); }
661
662
663 // Common idioms used in several places, worth centralizing for consistency.
664 F32 from_unorm(int bits, I32); // E.g. from_unorm(8, x) -> x * (1/255.0f)
665 I32 to_unorm(int bits, F32); // E.g. to_unorm(8, x) -> round(x * 255)
666
667 Color unpack_1010102(I32 rgba);
668 Color unpack_8888 (I32 rgba);
669 Color unpack_565 (I32 bgr ); // bottom 16 bits
670
671 void premul(F32* r, F32* g, F32* b, F32 a);
672 void unpremul(F32* r, F32* g, F32* b, F32 a);
673
674 Color premul(Color c) { this->premul(&c.r, &c.g, &c.b, c.a); return c; }
675 Color unpremul(Color c) { this->unpremul(&c.r, &c.g, &c.b, c.a); return c; }
676 Color lerp(Color lo, Color hi, F32 t);
677 Color blend(SkBlendMode, Color src, Color dst);
678
679 HSLA to_hsla(Color);
680 Color to_rgba(HSLA);
681
682 void dump(SkWStream* = nullptr) const;
683 void dot (SkWStream* = nullptr, bool for_jit=false) const;
684
685 uint64_t hash() const;
686
687 Val push(Instruction);
688 private:
689 Val push(Op op, Val x, Val y=NA, Val z=NA, int immy=0, int immz=0) {
690 return this->push(Instruction{op, x,y,z, immy,immz});
691 }
692
693 I32 _(I32a x) {
694 if (x.id != NA) {
695 SkASSERT(x.builder == this);
696 return {this, x.id};
697 }
698 return splat(x.imm);
699 }
700
701 F32 _(F32a x) {
702 if (x.id != NA) {
703 SkASSERT(x.builder == this);
704 return {this, x.id};
705 }
706 return splat(x.imm);
707 }
708
709 bool allImm() const;
710
711 template <typename T, typename... Rest>
712 bool allImm(Val, T* imm, Rest...) const;
713
714 template <typename T>
715 bool isImm(Val id, T want) const {
716 T imm = 0;
717 return this->allImm(id, &imm) && imm == want;
718 }
719
720 SkTHashMap<Instruction, Val, InstructionHash> fIndex;
721 std::vector<Instruction> fProgram;
722 std::vector<int> fStrides;
723 };
724
725 // Optimization passes and data structures normally used by Builder::optimize(),
726 // extracted here so they can be unit tested.
727 std::vector<Instruction> specialize_for_jit (std::vector<Instruction>);
728 std::vector<Instruction> eliminate_dead_code(std::vector<Instruction>);
729 std::vector<Instruction> schedule (std::vector<Instruction>);
730 std::vector<OptimizedInstruction> finalize (std::vector<Instruction>);
731
732 class Usage {
733 public:
734 Usage(const std::vector<Instruction>&);
735
736 // Return a sorted span of Vals which use result of Instruction id.
737 SkSpan<const Val> operator[](Val id) const;
738
739 private:
740 std::vector<int> fIndex;
741 std::vector<Val> fTable;
742 };
743
744 using Reg = int;
745
746 // d = op(x, y/imm, z/imm)
747 struct InterpreterInstruction {
748 Op op;
749 Reg d,x;
750 union { Reg y; int immy; };
751 union { Reg z; int immz; };
752 };
753
754 class Program {
755 public:
756 Program(const std::vector<OptimizedInstruction>& interpreter,
757 const std::vector<int>& strides);
758
759 Program(const std::vector<OptimizedInstruction>& interpreter,
760 const std::vector<OptimizedInstruction>& jit,
761 const std::vector<int>& strides,
762 const char* debug_name);
763
764 Program();
765 ~Program();
766
767 Program(Program&&);
768 Program& operator=(Program&&);
769
770 Program(const Program&) = delete;
771 Program& operator=(const Program&) = delete;
772
773 void eval(int n, void* args[]) const;
774
775 template <typename... T>
776 void eval(int n, T*... arg) const {
777 SkASSERT(sizeof...(arg) == this->nargs());
778 // This nullptr isn't important except that it makes args[] non-empty if you pass none.
779 void* args[] = { (void*)arg..., nullptr };
780 this->eval(n, args);
781 }
782
783 std::vector<InterpreterInstruction> instructions() const;
784 int nargs() const;
785 int nregs() const;
786 int loop () const;
787 bool empty() const;
788
789 bool hasJIT() const; // Has this Program been JITted?
790 void dropJIT(); // If hasJIT(), drop it, forcing interpreter fallback.
791
792 void dump(SkWStream* = nullptr) const;
793
794 private:
795 void setupInterpreter(const std::vector<OptimizedInstruction>&);
796 void setupJIT (const std::vector<OptimizedInstruction>&, const char* debug_name);
797 void setupLLVM (const std::vector<OptimizedInstruction>&, const char* debug_name);
798
799 enum class JITMode {
800 // Fastest but most fragile strategy: values in registers, loop-invariant work hoisted.
801 Register,
802 // Usually next best: values in registers, loop-invariant work not hoisted.
803 RegisterNoHoist,
804 // Backup plan: values on the stack, loop-invariant work hoisted.
805 Stack,
806 };
807 bool jit(const std::vector<OptimizedInstruction>&, JITMode, Assembler*) const;
808
809 void waitForLLVM() const;
810
811 struct Impl;
812 std::unique_ptr<Impl> fImpl;
813 };
814
815 // TODO: control flow
816 // TODO: 64-bit values?
817
818 static inline I32 operator+(I32 x, I32a y) { return x->add(x,y); }
819 static inline I32 operator+(int x, I32 y) { return y->add(x,y); }
820
821 static inline I32 operator-(I32 x, I32a y) { return x->sub(x,y); }
822 static inline I32 operator-(int x, I32 y) { return y->sub(x,y); }
823
824 static inline I32 operator*(I32 x, I32a y) { return x->mul(x,y); }
825 static inline I32 operator*(int x, I32 y) { return y->mul(x,y); }
826
827 static inline I32 min(I32 x, I32a y) { return x->min(x,y); }
828 static inline I32 min(int x, I32 y) { return y->min(x,y); }
829
830 static inline I32 max(I32 x, I32a y) { return x->max(x,y); }
831 static inline I32 max(int x, I32 y) { return y->max(x,y); }
832
833 static inline I32 operator==(I32 x, I32a y) { return x->eq(x,y); }
834 static inline I32 operator==(int x, I32 y) { return y->eq(x,y); }
835
836 static inline I32 operator!=(I32 x, I32a y) { return x->neq(x,y); }
837 static inline I32 operator!=(int x, I32 y) { return y->neq(x,y); }
838
839 static inline I32 operator< (I32 x, I32a y) { return x->lt(x,y); }
840 static inline I32 operator< (int x, I32 y) { return y->lt(x,y); }
841
842 static inline I32 operator<=(I32 x, I32a y) { return x->lte(x,y); }
843 static inline I32 operator<=(int x, I32 y) { return y->lte(x,y); }
844
845 static inline I32 operator> (I32 x, I32a y) { return x->gt(x,y); }
846 static inline I32 operator> (int x, I32 y) { return y->gt(x,y); }
847
848 static inline I32 operator>=(I32 x, I32a y) { return x->gte(x,y); }
849 static inline I32 operator>=(int x, I32 y) { return y->gte(x,y); }
850
851
852 static inline F32 operator+(F32 x, F32a y) { return x->add(x,y); }
853 static inline F32 operator+(float x, F32 y) { return y->add(x,y); }
854
855 static inline F32 operator-(F32 x, F32a y) { return x->sub(x,y); }
856 static inline F32 operator-(float x, F32 y) { return y->sub(x,y); }
857
858 static inline F32 operator*(F32 x, F32a y) { return x->mul(x,y); }
859 static inline F32 operator*(float x, F32 y) { return y->mul(x,y); }
860
861 static inline F32 operator/(F32 x, F32 y) { return x->div(x,y); }
862 static inline F32 operator/(float x, F32 y) { return y->div(x,y); }
863
864 static inline F32 min(F32 x, F32a y) { return x->min(x,y); }
865 static inline F32 min(float x, F32 y) { return y->min(x,y); }
866
867 static inline F32 max(F32 x, F32a y) { return x->max(x,y); }
868 static inline F32 max(float x, F32 y) { return y->max(x,y); }
869
870 static inline I32 operator==(F32 x, F32a y) { return x->eq(x,y); }
871 static inline I32 operator==(float x, F32 y) { return y->eq(x,y); }
872
873 static inline I32 operator!=(F32 x, F32a y) { return x->neq(x,y); }
874 static inline I32 operator!=(float x, F32 y) { return y->neq(x,y); }
875
876 static inline I32 operator< (F32 x, F32a y) { return x->lt(x,y); }
877 static inline I32 operator< (float x, F32 y) { return y->lt(x,y); }
878
879 static inline I32 operator<=(F32 x, F32a y) { return x->lte(x,y); }
880 static inline I32 operator<=(float x, F32 y) { return y->lte(x,y); }
881
882 static inline I32 operator> (F32 x, F32a y) { return x->gt(x,y); }
883 static inline I32 operator> (float x, F32 y) { return y->gt(x,y); }
884
885 static inline I32 operator>=(F32 x, F32a y) { return x->gte(x,y); }
886 static inline I32 operator>=(float x, F32 y) { return y->gte(x,y); }
887
888
889 static inline I32& operator+=(I32& x, I32a y) { return (x = x + y); }
890 static inline I32& operator-=(I32& x, I32a y) { return (x = x - y); }
891 static inline I32& operator*=(I32& x, I32a y) { return (x = x * y); }
892
893 static inline F32& operator+=(F32& x, F32a y) { return (x = x + y); }
894 static inline F32& operator-=(F32& x, F32a y) { return (x = x - y); }
895 static inline F32& operator*=(F32& x, F32a y) { return (x = x * y); }
896
897 static inline I32 operator-(I32 x) { return 0-x; }
898 static inline F32 operator-(F32 x) { return 0-x; }
899
900 static inline void assert_true(I32 cond, I32 debug) { cond->assert_true(cond,debug); }
901 static inline void assert_true(I32 cond, F32 debug) { cond->assert_true(cond,debug); }
902 static inline void assert_true(I32 cond) { cond->assert_true(cond); }
903
904 static inline void store8 (Arg ptr, I32 val) { val->store8 (ptr, val); }
905 static inline void store16(Arg ptr, I32 val) { val->store16(ptr, val); }
906 static inline void store32(Arg ptr, I32 val) { val->store32(ptr, val); }
907 static inline void storeF (Arg ptr, F32 val) { val->storeF (ptr, val); }
908
909 static inline I32 gather8 (Arg ptr, int off, I32 ix) { return ix->gather8 (ptr, off, ix); }
910 static inline I32 gather16(Arg ptr, int off, I32 ix) { return ix->gather16(ptr, off, ix); }
911 static inline I32 gather32(Arg ptr, int off, I32 ix) { return ix->gather32(ptr, off, ix); }
912 static inline F32 gatherF (Arg ptr, int off, I32 ix) { return ix->gatherF (ptr, off, ix); }
913
914 static inline I32 gather8 (Uniform u, I32 ix) { return ix->gather8 (u, ix); }
915 static inline I32 gather16(Uniform u, I32 ix) { return ix->gather16(u, ix); }
916 static inline I32 gather32(Uniform u, I32 ix) { return ix->gather32(u, ix); }
917 static inline F32 gatherF (Uniform u, I32 ix) { return ix->gatherF (u, ix); }
918
919 static inline F32 sqrt(F32 x) { return x-> sqrt(x); }
920 static inline F32 approx_log2(F32 x) { return x->approx_log2(x); }
921 static inline F32 approx_pow2(F32 x) { return x->approx_pow2(x); }
922 static inline F32 approx_log (F32 x) { return x->approx_log (x); }
923 static inline F32 approx_exp (F32 x) { return x->approx_exp (x); }
924
925 static inline F32 approx_powf(F32 base, F32a exp) { return base->approx_powf(base, exp); }
926 static inline F32 approx_powf(float base, F32 exp) { return exp->approx_powf(base, exp); }
927
928 static inline F32 approx_sin(F32 radians) { return radians->approx_sin(radians); }
929 static inline F32 approx_cos(F32 radians) { return radians->approx_cos(radians); }
930 static inline F32 approx_tan(F32 radians) { return radians->approx_tan(radians); }
931
932 static inline F32 clamp01(F32 x) { return x->clamp01(x); }
933 static inline F32 abs(F32 x) { return x-> abs(x); }
934 static inline F32 fract(F32 x) { return x-> fract(x); }
935 static inline F32 floor(F32 x) { return x-> floor(x); }
936 static inline I32 is_NaN(F32 x) { return x-> is_NaN(x); }
937
938 static inline I32 trunc(F32 x) { return x-> trunc(x); }
939 static inline I32 round(F32 x) { return x-> round(x); }
940 static inline I32 bit_cast(F32 x) { return x->bit_cast(x); }
941 static inline F32 bit_cast(I32 x) { return x->bit_cast(x); }
942 static inline F32 to_f32(I32 x) { return x-> to_f32(x); }
943
944 static inline F32 lerp(F32 lo, F32a hi, F32a t) { return lo->lerp(lo,hi,t); }
945 static inline F32 lerp(float lo, F32 hi, F32a t) { return hi->lerp(lo,hi,t); }
946 static inline F32 lerp(float lo, float hi, F32 t) { return t->lerp(lo,hi,t); }
947
948 static inline F32 clamp(F32 x, F32a lo, F32a hi) { return x->clamp(x,lo,hi); }
949 static inline F32 clamp(float x, F32 lo, F32a hi) { return lo->clamp(x,lo,hi); }
950 static inline F32 clamp(float x, float lo, F32 hi) { return hi->clamp(x,lo,hi); }
951
952 static inline F32 norm(F32 x, F32a y) { return x->norm(x,y); }
953 static inline F32 norm(float x, F32 y) { return y->norm(x,y); }
954
955 static inline I32 operator<<(I32 x, int bits) { return x->shl(x, bits); }
956 static inline I32 shl(I32 x, int bits) { return x->shl(x, bits); }
957 static inline I32 shr(I32 x, int bits) { return x->shr(x, bits); }
958 static inline I32 sra(I32 x, int bits) { return x->sra(x, bits); }
959
960 static inline I32 operator&(I32 x, I32a y) { return x->bit_and(x,y); }
961 static inline I32 operator&(int x, I32 y) { return y->bit_and(x,y); }
962
963 static inline I32 operator|(I32 x, I32a y) { return x->bit_or (x,y); }
964 static inline I32 operator|(int x, I32 y) { return y->bit_or (x,y); }
965
966 static inline I32 operator^(I32 x, I32a y) { return x->bit_xor(x,y); }
967 static inline I32 operator^(int x, I32 y) { return y->bit_xor(x,y); }
968
969 static inline I32& operator&=(I32& x, I32a y) { return (x = x & y); }
970 static inline I32& operator|=(I32& x, I32a y) { return (x = x | y); }
971 static inline I32& operator^=(I32& x, I32a y) { return (x = x ^ y); }
972
973 static inline I32 select(I32 cond, I32a t, I32a f) { return cond->select(cond,t,f); }
974 static inline F32 select(I32 cond, F32a t, F32a f) { return cond->select(cond,t,f); }
975
976 static inline I32 extract(I32 x, int bits, I32a z) { return x->extract(x,bits,z); }
977 static inline I32 extract(int x, int bits, I32 z) { return z->extract(x,bits,z); }
978 static inline I32 pack (I32 x, I32a y, int bits) { return x->pack (x,y,bits); }
979 static inline I32 pack (int x, I32 y, int bits) { return y->pack (x,y,bits); }
980
981 static inline F32 from_unorm(int bits, I32 x) { return x->from_unorm(bits,x); }
982 static inline I32 to_unorm(int bits, F32 x) { return x-> to_unorm(bits,x); }
983
984 static inline Color unpack_1010102(I32 rgba) { return rgba->unpack_1010102(rgba); }
985 static inline Color unpack_8888 (I32 rgba) { return rgba->unpack_8888 (rgba); }
986 static inline Color unpack_565 (I32 bgr ) { return bgr ->unpack_565 (bgr ); }
987
988 static inline void premul(F32* r, F32* g, F32* b, F32 a) { a-> premul(r,g,b,a); }
989 static inline void unpremul(F32* r, F32* g, F32* b, F32 a) { a->unpremul(r,g,b,a); }
990
991 static inline Color premul(Color c) { return c-> premul(c); }
992 static inline Color unpremul(Color c) { return c->unpremul(c); }
993
994 static inline Color lerp(Color lo, Color hi, F32 t) { return t->lerp(lo,hi,t); }
995
996 static inline Color blend(SkBlendMode m, Color s, Color d) { return s->blend(m,s,d); }
997
998 static inline HSLA to_hsla(Color c) { return c->to_hsla(c); }
999 static inline Color to_rgba(HSLA c) { return c->to_rgba(c); }
1000
1001 // Evaluate polynomials: ax^n + bx^(n-1) + ... for n >= 1
1002 template <typename... Rest>
1003 static inline F32 poly(F32 x, F32a a, F32a b, Rest... rest) {
1004 if constexpr (sizeof...(rest) == 0) {
1005 return x*a+b;
1006 } else {
1007 return poly(x, x*a+b, rest...);
1008 }
1009 }
1010}
1011
1012#endif//SkVM_DEFINED
1013