1/*
2 * Copyright 2019 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SKVX_DEFINED
9#define SKVX_DEFINED
10
11// skvx::Vec<N,T> are SIMD vectors of N T's, a v1.5 successor to SkNx<N,T>.
12//
13// This time we're leaning a bit less on platform-specific intrinsics and a bit
14// more on Clang/GCC vector extensions, but still keeping the option open to
15// drop in platform-specific intrinsics, actually more easily than before.
16//
17// We've also fixed a few of the caveats that used to make SkNx awkward to work
18// with across translation units. skvx::Vec<N,T> always has N*sizeof(T) size
19// and alignment[1][2] and is safe to use across translation units freely.
20//
21// [1] Ideally we'd only align to T, but that tanks ARMv7 NEON codegen.
22// [2] Some compilers barf if we try to use N*sizeof(T), so instead we leave them at T.
23
24// Please try to keep this file independent of Skia headers.
25#include <algorithm> // std::min, std::max
26#include <cmath> // std::ceil, std::floor, std::trunc, std::round, std::sqrt, etc.
27#include <cstdint> // intXX_t
28#include <cstring> // memcpy()
29#include <initializer_list> // std::initializer_list
30
31#if defined(__SSE__) || defined(__AVX__) || defined(__AVX2__)
32 #include <immintrin.h>
33#elif defined(__ARM_NEON)
34 #include <arm_neon.h>
35#endif
36
37#if defined __wasm_simd128__
38 // WASM SIMD intrinsics definitions: https://github.com/llvm/llvm-project/blob/master/clang/lib/Headers/wasm_simd128.h
39 #include <wasm_simd128.h>
40#endif
41
42#if !defined(__clang__) && defined(__GNUC__) && defined(__mips64)
43 // GCC 7 hits an internal compiler error when targeting MIPS64.
44 #define SKVX_ALIGNMENT
45#elif !defined(__clang__) && defined(_MSC_VER) && defined(_M_IX86)
46 // Our SkVx unit tests fail when built by MSVC for 32-bit x86.
47 #define SKVX_ALIGNMENT
48#else
49 #define SKVX_ALIGNMENT alignas(N * sizeof(T))
50#endif
51
52#if defined(__GNUC__) && !defined(__clang__) && defined(__SSE__)
53 // GCC warns about ABI changes when returning >= 32 byte vectors when -mavx is not enabled.
54 // This only happens for types like VExt whose ABI we don't care about, not for Vec itself.
55 #pragma GCC diagnostic ignored "-Wpsabi"
56#endif
57
58// To avoid ODR violations, all methods must be force-inlined,
59// and all standalone functions must be static, perhaps using these helpers.
60#if defined(_MSC_VER)
61 #define SKVX_ALWAYS_INLINE __forceinline
62#else
63 #define SKVX_ALWAYS_INLINE __attribute__((always_inline))
64#endif
65
66#define SIT template < typename T> static inline
67#define SINT template <int N, typename T> static inline
68#define SINTU template <int N, typename T, typename U, \
69 typename=typename std::enable_if<std::is_convertible<U,T>::value>::type> \
70 static inline
71
72namespace skvx {
73
74// All Vec have the same simple memory layout, the same as `T vec[N]`.
75template <int N, typename T>
76struct SKVX_ALIGNMENT Vec {
77 static_assert((N & (N-1)) == 0, "N must be a power of 2.");
78 static_assert(sizeof(T) >= alignof(T), "What kind of crazy T is this?");
79
80 Vec<N/2,T> lo, hi;
81
82 // Methods belong here in the class declaration of Vec only if:
83 // - they must be here, like constructors or operator[];
84 // - they'll definitely never want a specialized implementation.
85 // Other operations on Vec should be defined outside the type.
86
87 SKVX_ALWAYS_INLINE Vec() = default;
88
89 template <typename U,
90 typename=typename std::enable_if<std::is_convertible<U,T>::value>::type>
91 SKVX_ALWAYS_INLINE
92 Vec(U x) : lo(x), hi(x) {}
93
94 SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) {
95 T vals[N] = {0};
96 memcpy(vals, xs.begin(), std::min(xs.size(), (size_t)N)*sizeof(T));
97
98 lo = Vec<N/2,T>::Load(vals + 0);
99 hi = Vec<N/2,T>::Load(vals + N/2);
100 }
101
102 SKVX_ALWAYS_INLINE T operator[](int i) const { return i < N/2 ? lo[i] : hi[i-N/2]; }
103 SKVX_ALWAYS_INLINE T& operator[](int i) { return i < N/2 ? lo[i] : hi[i-N/2]; }
104
105 SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
106 Vec v;
107 memcpy(&v, ptr, sizeof(Vec));
108 return v;
109 }
110 SKVX_ALWAYS_INLINE void store(void* ptr) const {
111 memcpy(ptr, this, sizeof(Vec));
112 }
113};
114
115template <typename T>
116struct Vec<1,T> {
117 T val;
118
119 SKVX_ALWAYS_INLINE Vec() = default;
120
121 template <typename U,
122 typename=typename std::enable_if<std::is_convertible<U,T>::value>::type>
123 SKVX_ALWAYS_INLINE
124 Vec(U x) : val(x) {}
125
126 SKVX_ALWAYS_INLINE Vec(std::initializer_list<T> xs) : val(xs.size() ? *xs.begin() : 0) {}
127
128 SKVX_ALWAYS_INLINE T operator[](int) const { return val; }
129 SKVX_ALWAYS_INLINE T& operator[](int) { return val; }
130
131 SKVX_ALWAYS_INLINE static Vec Load(const void* ptr) {
132 Vec v;
133 memcpy(&v, ptr, sizeof(Vec));
134 return v;
135 }
136 SKVX_ALWAYS_INLINE void store(void* ptr) const {
137 memcpy(ptr, this, sizeof(Vec));
138 }
139};
140
141template <typename D, typename S>
142static inline D unchecked_bit_pun(const S& s) {
143 D d;
144 memcpy(&d, &s, sizeof(D));
145 return d;
146}
147
148template <typename D, typename S>
149static inline D bit_pun(const S& s) {
150 static_assert(sizeof(D) == sizeof(S), "");
151 return unchecked_bit_pun<D>(s);
152}
153
154// Translate from a value type T to its corresponding Mask, the result of a comparison.
155template <typename T> struct Mask { using type = T; };
156template <> struct Mask<float > { using type = int32_t; };
157template <> struct Mask<double> { using type = int64_t; };
158template <typename T> using M = typename Mask<T>::type;
159
160// Join two Vec<N,T> into one Vec<2N,T>.
161SINT Vec<2*N,T> join(const Vec<N,T>& lo, const Vec<N,T>& hi) {
162 Vec<2*N,T> v;
163 v.lo = lo;
164 v.hi = hi;
165 return v;
166}
167
168// We have two default strategies for implementing most operations:
169// 1) lean on Clang/GCC vector extensions when available;
170// 2) recurse to scalar portable implementations when not.
171// At the end we can drop in platform-specific implementations that override either default.
172
173#if !defined(SKNX_NO_SIMD) && (defined(__clang__) || defined(__GNUC__))
174
175 // VExt<N,T> types have the same size as Vec<N,T> and support most operations directly.
176 // N.B. VExt<N,T> alignment is N*alignof(T), stricter than Vec<N,T>'s alignof(T).
177 #if defined(__clang__)
178 template <int N, typename T>
179 using VExt = T __attribute__((ext_vector_type(N)));
180
181 #elif defined(__GNUC__)
182 template <int N, typename T>
183 struct VExtHelper {
184 typedef T __attribute__((vector_size(N*sizeof(T)))) type;
185 };
186
187 template <int N, typename T>
188 using VExt = typename VExtHelper<N,T>::type;
189
190 // For some reason some (new!) versions of GCC cannot seem to deduce N in the generic
191 // to_vec<N,T>() below for N=4 and T=float. This workaround seems to help...
192 static inline Vec<4,float> to_vec(VExt<4,float> v) { return bit_pun<Vec<4,float>>(v); }
193 #endif
194
195 SINT VExt<N,T> to_vext(const Vec<N,T>& v) { return bit_pun<VExt<N,T>>(v); }
196 SINT Vec <N,T> to_vec(const VExt<N,T>& v) { return bit_pun<Vec <N,T>>(v); }
197
198 SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) + to_vext(y)); }
199 SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) - to_vext(y)); }
200 SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) * to_vext(y)); }
201 SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) / to_vext(y)); }
202
203 SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) ^ to_vext(y)); }
204 SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) & to_vext(y)); }
205 SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) { return to_vec<N,T>(to_vext(x) | to_vext(y)); }
206
207 SINT Vec<N,T> operator!(const Vec<N,T>& x) { return to_vec<N,T>(!to_vext(x)); }
208 SINT Vec<N,T> operator-(const Vec<N,T>& x) { return to_vec<N,T>(-to_vext(x)); }
209 SINT Vec<N,T> operator~(const Vec<N,T>& x) { return to_vec<N,T>(~to_vext(x)); }
210
211 SINT Vec<N,T> operator<<(const Vec<N,T>& x, int bits) { return to_vec<N,T>(to_vext(x) << bits); }
212 SINT Vec<N,T> operator>>(const Vec<N,T>& x, int bits) { return to_vec<N,T>(to_vext(x) >> bits); }
213
214 SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) == to_vext(y)); }
215 SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) != to_vext(y)); }
216 SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) <= to_vext(y)); }
217 SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) >= to_vext(y)); }
218 SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) < to_vext(y)); }
219 SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) { return bit_pun<Vec<N,M<T>>>(to_vext(x) > to_vext(y)); }
220
221#else
222
223 // Either SKNX_NO_SIMD is defined, or Clang/GCC vector extensions are not available.
224 // We'll implement things portably, in a way that should be easily autovectorizable.
225
226 // N == 1 scalar implementations.
227 SIT Vec<1,T> operator+(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val + y.val; }
228 SIT Vec<1,T> operator-(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val - y.val; }
229 SIT Vec<1,T> operator*(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val * y.val; }
230 SIT Vec<1,T> operator/(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val / y.val; }
231
232 SIT Vec<1,T> operator^(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val ^ y.val; }
233 SIT Vec<1,T> operator&(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val & y.val; }
234 SIT Vec<1,T> operator|(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val | y.val; }
235
236 SIT Vec<1,T> operator!(const Vec<1,T>& x) { return !x.val; }
237 SIT Vec<1,T> operator-(const Vec<1,T>& x) { return -x.val; }
238 SIT Vec<1,T> operator~(const Vec<1,T>& x) { return ~x.val; }
239
240 SIT Vec<1,T> operator<<(const Vec<1,T>& x, int bits) { return x.val << bits; }
241 SIT Vec<1,T> operator>>(const Vec<1,T>& x, int bits) { return x.val >> bits; }
242
243 SIT Vec<1,M<T>> operator==(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val == y.val ? ~0 : 0; }
244 SIT Vec<1,M<T>> operator!=(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val != y.val ? ~0 : 0; }
245 SIT Vec<1,M<T>> operator<=(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val <= y.val ? ~0 : 0; }
246 SIT Vec<1,M<T>> operator>=(const Vec<1,T>& x, const Vec<1,T>& y) { return x.val >= y.val ? ~0 : 0; }
247 SIT Vec<1,M<T>> operator< (const Vec<1,T>& x, const Vec<1,T>& y) { return x.val < y.val ? ~0 : 0; }
248 SIT Vec<1,M<T>> operator> (const Vec<1,T>& x, const Vec<1,T>& y) { return x.val > y.val ? ~0 : 0; }
249
250 // All default N != 1 implementations just recurse on lo and hi halves.
251 SINT Vec<N,T> operator+(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo + y.lo, x.hi + y.hi); }
252 SINT Vec<N,T> operator-(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo - y.lo, x.hi - y.hi); }
253 SINT Vec<N,T> operator*(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo * y.lo, x.hi * y.hi); }
254 SINT Vec<N,T> operator/(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo / y.lo, x.hi / y.hi); }
255
256 SINT Vec<N,T> operator^(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo ^ y.lo, x.hi ^ y.hi); }
257 SINT Vec<N,T> operator&(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo & y.lo, x.hi & y.hi); }
258 SINT Vec<N,T> operator|(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo | y.lo, x.hi | y.hi); }
259
260 SINT Vec<N,T> operator!(const Vec<N,T>& x) { return join(!x.lo, !x.hi); }
261 SINT Vec<N,T> operator-(const Vec<N,T>& x) { return join(-x.lo, -x.hi); }
262 SINT Vec<N,T> operator~(const Vec<N,T>& x) { return join(~x.lo, ~x.hi); }
263
264 SINT Vec<N,T> operator<<(const Vec<N,T>& x, int bits) { return join(x.lo << bits, x.hi << bits); }
265 SINT Vec<N,T> operator>>(const Vec<N,T>& x, int bits) { return join(x.lo >> bits, x.hi >> bits); }
266
267 SINT Vec<N,M<T>> operator==(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo == y.lo, x.hi == y.hi); }
268 SINT Vec<N,M<T>> operator!=(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo != y.lo, x.hi != y.hi); }
269 SINT Vec<N,M<T>> operator<=(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo <= y.lo, x.hi <= y.hi); }
270 SINT Vec<N,M<T>> operator>=(const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo >= y.lo, x.hi >= y.hi); }
271 SINT Vec<N,M<T>> operator< (const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo < y.lo, x.hi < y.hi); }
272 SINT Vec<N,M<T>> operator> (const Vec<N,T>& x, const Vec<N,T>& y) { return join(x.lo > y.lo, x.hi > y.hi); }
273#endif
274
275// Some operations we want are not expressible with Clang/GCC vector
276// extensions, so we implement them using the recursive approach.
277
278// N == 1 scalar implementations.
279SIT Vec<1,T> if_then_else(const Vec<1,M<T>>& cond, const Vec<1,T>& t, const Vec<1,T>& e) {
280 // In practice this scalar implementation is unlikely to be used. See if_then_else() below.
281 return bit_pun<Vec<1,T>>(( cond & bit_pun<Vec<1, M<T>>>(t)) |
282 (~cond & bit_pun<Vec<1, M<T>>>(e)) );
283}
284
285SIT bool any(const Vec<1,T>& x) { return x.val != 0; }
286SIT bool all(const Vec<1,T>& x) { return x.val != 0; }
287
288SIT T min(const Vec<1,T>& x) { return x.val; }
289SIT T max(const Vec<1,T>& x) { return x.val; }
290
291SIT Vec<1,T> min(const Vec<1,T>& x, const Vec<1,T>& y) { return std::min(x.val, y.val); }
292SIT Vec<1,T> max(const Vec<1,T>& x, const Vec<1,T>& y) { return std::max(x.val, y.val); }
293SIT Vec<1,T> pow(const Vec<1,T>& x, const Vec<1,T>& y) { return std::pow(x.val, y.val); }
294
295SIT Vec<1,T> atan(const Vec<1,T>& x) { return std:: atan(x.val); }
296SIT Vec<1,T> ceil(const Vec<1,T>& x) { return std:: ceil(x.val); }
297SIT Vec<1,T> floor(const Vec<1,T>& x) { return std::floor(x.val); }
298SIT Vec<1,T> trunc(const Vec<1,T>& x) { return std::trunc(x.val); }
299SIT Vec<1,T> round(const Vec<1,T>& x) { return std::round(x.val); }
300SIT Vec<1,T> sqrt(const Vec<1,T>& x) { return std:: sqrt(x.val); }
301SIT Vec<1,T> abs(const Vec<1,T>& x) { return std:: abs(x.val); }
302SIT Vec<1,T> sin(const Vec<1,T>& x) { return std:: sin(x.val); }
303SIT Vec<1,T> cos(const Vec<1,T>& x) { return std:: cos(x.val); }
304SIT Vec<1,T> tan(const Vec<1,T>& x) { return std:: tan(x.val); }
305
306SIT Vec<1,int> lrint(const Vec<1,T>& x) { return (int)std::lrint(x.val); }
307
308SIT Vec<1,T> rcp(const Vec<1,T>& x) { return 1 / x.val; }
309SIT Vec<1,T> rsqrt(const Vec<1,T>& x) { return rcp(sqrt(x)); }
310
311// All default N != 1 implementations just recurse on lo and hi halves.
312SINT Vec<N,T> if_then_else(const Vec<N,M<T>>& cond, const Vec<N,T>& t, const Vec<N,T>& e) {
313 // Specializations inline here so they can generalize what types the apply to.
314 // (This header is used in C++14 contexts, so we have to kind of fake constexpr if.)
315#if defined(__AVX__)
316 if /*constexpr*/ (N == 8 && sizeof(T) == 4) {
317 return unchecked_bit_pun<Vec<N,T>>(_mm256_blendv_ps(unchecked_bit_pun<__m256>(e),
318 unchecked_bit_pun<__m256>(t),
319 unchecked_bit_pun<__m256>(cond)));
320 }
321#endif
322#if defined(__SSE4_1__)
323 if /*constexpr*/ (N == 4 && sizeof(T) == 4) {
324 return unchecked_bit_pun<Vec<N,T>>(_mm_blendv_ps(unchecked_bit_pun<__m128>(e),
325 unchecked_bit_pun<__m128>(t),
326 unchecked_bit_pun<__m128>(cond)));
327 }
328#endif
329#if defined(__ARM_NEON)
330 if /*constexpr*/ (N == 4 && sizeof(T) == 4) {
331 return unchecked_bit_pun<Vec<N,T>>(vbslq_f32(unchecked_bit_pun< uint32x4_t>(cond),
332 unchecked_bit_pun<float32x4_t>(t),
333 unchecked_bit_pun<float32x4_t>(e)));
334 }
335#endif
336 // Recurse for large vectors to try to hit the specializations above.
337 if /*constexpr*/ (N > 4) {
338 return join(if_then_else(cond.lo, t.lo, e.lo),
339 if_then_else(cond.hi, t.hi, e.hi));
340 }
341 // This default can lead to better code than the recursing onto scalars.
342 return bit_pun<Vec<N,T>>(( cond & bit_pun<Vec<N, M<T>>>(t)) |
343 (~cond & bit_pun<Vec<N, M<T>>>(e)) );
344}
345
346SINT bool any(const Vec<N,T>& x) { return any(x.lo) || any(x.hi); }
347SINT bool all(const Vec<N,T>& x) { return all(x.lo) && all(x.hi); }
348
349SINT T min(const Vec<N,T>& x) { return std::min(min(x.lo), min(x.hi)); }
350SINT T max(const Vec<N,T>& x) { return std::max(max(x.lo), max(x.hi)); }
351
352SINT Vec<N,T> min(const Vec<N,T>& x, const Vec<N,T>& y) { return join(min(x.lo, y.lo), min(x.hi, y.hi)); }
353SINT Vec<N,T> max(const Vec<N,T>& x, const Vec<N,T>& y) { return join(max(x.lo, y.lo), max(x.hi, y.hi)); }
354SINT Vec<N,T> pow(const Vec<N,T>& x, const Vec<N,T>& y) { return join(pow(x.lo, y.lo), pow(x.hi, y.hi)); }
355
356SINT Vec<N,T> atan(const Vec<N,T>& x) { return join( atan(x.lo), atan(x.hi)); }
357SINT Vec<N,T> ceil(const Vec<N,T>& x) { return join( ceil(x.lo), ceil(x.hi)); }
358SINT Vec<N,T> floor(const Vec<N,T>& x) { return join(floor(x.lo), floor(x.hi)); }
359SINT Vec<N,T> trunc(const Vec<N,T>& x) { return join(trunc(x.lo), trunc(x.hi)); }
360SINT Vec<N,T> round(const Vec<N,T>& x) { return join(round(x.lo), round(x.hi)); }
361SINT Vec<N,T> sqrt(const Vec<N,T>& x) { return join( sqrt(x.lo), sqrt(x.hi)); }
362SINT Vec<N,T> abs(const Vec<N,T>& x) { return join( abs(x.lo), abs(x.hi)); }
363SINT Vec<N,T> sin(const Vec<N,T>& x) { return join( sin(x.lo), sin(x.hi)); }
364SINT Vec<N,T> cos(const Vec<N,T>& x) { return join( cos(x.lo), cos(x.hi)); }
365SINT Vec<N,T> tan(const Vec<N,T>& x) { return join( tan(x.lo), tan(x.hi)); }
366
367SINT Vec<N,int> lrint(const Vec<N,T>& x) { return join(lrint(x.lo), lrint(x.hi)); }
368
369SINT Vec<N,T> rcp(const Vec<N,T>& x) { return join( rcp(x.lo), rcp(x.hi)); }
370SINT Vec<N,T> rsqrt(const Vec<N,T>& x) { return join(rsqrt(x.lo), rsqrt(x.hi)); }
371
372
373// Scalar/vector operations just splat the scalar to a vector...
374SINTU Vec<N,T> operator+ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) + y; }
375SINTU Vec<N,T> operator- (U x, const Vec<N,T>& y) { return Vec<N,T>(x) - y; }
376SINTU Vec<N,T> operator* (U x, const Vec<N,T>& y) { return Vec<N,T>(x) * y; }
377SINTU Vec<N,T> operator/ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) / y; }
378SINTU Vec<N,T> operator^ (U x, const Vec<N,T>& y) { return Vec<N,T>(x) ^ y; }
379SINTU Vec<N,T> operator& (U x, const Vec<N,T>& y) { return Vec<N,T>(x) & y; }
380SINTU Vec<N,T> operator| (U x, const Vec<N,T>& y) { return Vec<N,T>(x) | y; }
381SINTU Vec<N,M<T>> operator==(U x, const Vec<N,T>& y) { return Vec<N,T>(x) == y; }
382SINTU Vec<N,M<T>> operator!=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) != y; }
383SINTU Vec<N,M<T>> operator<=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) <= y; }
384SINTU Vec<N,M<T>> operator>=(U x, const Vec<N,T>& y) { return Vec<N,T>(x) >= y; }
385SINTU Vec<N,M<T>> operator< (U x, const Vec<N,T>& y) { return Vec<N,T>(x) < y; }
386SINTU Vec<N,M<T>> operator> (U x, const Vec<N,T>& y) { return Vec<N,T>(x) > y; }
387SINTU Vec<N,T> min(U x, const Vec<N,T>& y) { return min(Vec<N,T>(x), y); }
388SINTU Vec<N,T> max(U x, const Vec<N,T>& y) { return max(Vec<N,T>(x), y); }
389SINTU Vec<N,T> pow(U x, const Vec<N,T>& y) { return pow(Vec<N,T>(x), y); }
390
391// ... and same deal for vector/scalar operations.
392SINTU Vec<N,T> operator+ (const Vec<N,T>& x, U y) { return x + Vec<N,T>(y); }
393SINTU Vec<N,T> operator- (const Vec<N,T>& x, U y) { return x - Vec<N,T>(y); }
394SINTU Vec<N,T> operator* (const Vec<N,T>& x, U y) { return x * Vec<N,T>(y); }
395SINTU Vec<N,T> operator/ (const Vec<N,T>& x, U y) { return x / Vec<N,T>(y); }
396SINTU Vec<N,T> operator^ (const Vec<N,T>& x, U y) { return x ^ Vec<N,T>(y); }
397SINTU Vec<N,T> operator& (const Vec<N,T>& x, U y) { return x & Vec<N,T>(y); }
398SINTU Vec<N,T> operator| (const Vec<N,T>& x, U y) { return x | Vec<N,T>(y); }
399SINTU Vec<N,M<T>> operator==(const Vec<N,T>& x, U y) { return x == Vec<N,T>(y); }
400SINTU Vec<N,M<T>> operator!=(const Vec<N,T>& x, U y) { return x != Vec<N,T>(y); }
401SINTU Vec<N,M<T>> operator<=(const Vec<N,T>& x, U y) { return x <= Vec<N,T>(y); }
402SINTU Vec<N,M<T>> operator>=(const Vec<N,T>& x, U y) { return x >= Vec<N,T>(y); }
403SINTU Vec<N,M<T>> operator< (const Vec<N,T>& x, U y) { return x < Vec<N,T>(y); }
404SINTU Vec<N,M<T>> operator> (const Vec<N,T>& x, U y) { return x > Vec<N,T>(y); }
405SINTU Vec<N,T> min(const Vec<N,T>& x, U y) { return min(x, Vec<N,T>(y)); }
406SINTU Vec<N,T> max(const Vec<N,T>& x, U y) { return max(x, Vec<N,T>(y)); }
407SINTU Vec<N,T> pow(const Vec<N,T>& x, U y) { return pow(x, Vec<N,T>(y)); }
408
409// The various op= operators, for vectors...
410SINT Vec<N,T>& operator+=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x + y); }
411SINT Vec<N,T>& operator-=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x - y); }
412SINT Vec<N,T>& operator*=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x * y); }
413SINT Vec<N,T>& operator/=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x / y); }
414SINT Vec<N,T>& operator^=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x ^ y); }
415SINT Vec<N,T>& operator&=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x & y); }
416SINT Vec<N,T>& operator|=(Vec<N,T>& x, const Vec<N,T>& y) { return (x = x | y); }
417
418// ... for scalars...
419SINTU Vec<N,T>& operator+=(Vec<N,T>& x, U y) { return (x = x + Vec<N,T>(y)); }
420SINTU Vec<N,T>& operator-=(Vec<N,T>& x, U y) { return (x = x - Vec<N,T>(y)); }
421SINTU Vec<N,T>& operator*=(Vec<N,T>& x, U y) { return (x = x * Vec<N,T>(y)); }
422SINTU Vec<N,T>& operator/=(Vec<N,T>& x, U y) { return (x = x / Vec<N,T>(y)); }
423SINTU Vec<N,T>& operator^=(Vec<N,T>& x, U y) { return (x = x ^ Vec<N,T>(y)); }
424SINTU Vec<N,T>& operator&=(Vec<N,T>& x, U y) { return (x = x & Vec<N,T>(y)); }
425SINTU Vec<N,T>& operator|=(Vec<N,T>& x, U y) { return (x = x | Vec<N,T>(y)); }
426
427// ... and for shifts.
428SINT Vec<N,T>& operator<<=(Vec<N,T>& x, int bits) { return (x = x << bits); }
429SINT Vec<N,T>& operator>>=(Vec<N,T>& x, int bits) { return (x = x >> bits); }
430
431// cast() Vec<N,S> to Vec<N,D>, as if applying a C-cast to each lane.
432template <typename D, typename S>
433static inline Vec<1,D> cast(const Vec<1,S>& src) { return (D)src.val; }
434
435template <typename D, int N, typename S>
436static inline Vec<N,D> cast(const Vec<N,S>& src) {
437#if !defined(SKNX_NO_SIMD) && defined(__clang__)
438 return to_vec(__builtin_convertvector(to_vext(src), VExt<N,D>));
439#else
440 return join(cast<D>(src.lo), cast<D>(src.hi));
441#endif
442}
443
444// Shuffle values from a vector pretty arbitrarily:
445// skvx::Vec<4,float> rgba = {R,G,B,A};
446// shuffle<2,1,0,3> (rgba) ~> {B,G,R,A}
447// shuffle<2,1> (rgba) ~> {B,G}
448// shuffle<2,1,2,1,2,1,2,1>(rgba) ~> {B,G,B,G,B,G,B,G}
449// shuffle<3,3,3,3> (rgba) ~> {A,A,A,A}
450// The only real restriction is that the output also be a legal N=power-of-two sknx::Vec.
451template <int... Ix, int N, typename T>
452static inline Vec<sizeof...(Ix),T> shuffle(const Vec<N,T>& x) {
453#if !defined(SKNX_NO_SIMD) && defined(__clang__)
454 return to_vec<sizeof...(Ix),T>(__builtin_shufflevector(to_vext(x), to_vext(x), Ix...));
455#else
456 return { x[Ix]... };
457#endif
458}
459
460// fma() delivers a fused mul-add, even if that's really expensive. Call it when you know it's not.
461static inline Vec<1,float> fma(const Vec<1,float>& x,
462 const Vec<1,float>& y,
463 const Vec<1,float>& z) {
464 return std::fma(x.val, y.val, z.val);
465}
466template <int N>
467static inline Vec<N,float> fma(const Vec<N,float>& x,
468 const Vec<N,float>& y,
469 const Vec<N,float>& z) {
470 return join(fma(x.lo, y.lo, z.lo),
471 fma(x.hi, y.hi, z.hi));
472}
473
474template <int N>
475static inline Vec<N,float> fract(const Vec<N,float>& x) {
476 return x - floor(x);
477}
478
479// The default cases for to_half/from_half are borrowed from skcms,
480// and assume inputs are finite and treat/flush denorm half floats as/to zero.
481// Key constants to watch for:
482// - a float is 32-bit, 1-8-23 sign-exponent-mantissa, with 127 exponent bias;
483// - a half is 16-bit, 1-5-10 sign-exponent-mantissa, with 15 exponent bias.
484template <int N>
485static inline Vec<N,uint16_t> to_half_finite_ftz(const Vec<N,float>& x) {
486 Vec<N,uint32_t> sem = bit_pun<Vec<N,uint32_t>>(x),
487 s = sem & 0x8000'0000,
488 em = sem ^ s,
489 is_denorm = em < 0x3880'0000;
490 return cast<uint16_t>(if_then_else(is_denorm, Vec<N,uint32_t>(0)
491 , (s>>16) + (em>>13) - ((127-15)<<10)));
492}
493template <int N>
494static inline Vec<N,float> from_half_finite_ftz(const Vec<N,uint16_t>& x) {
495 Vec<N,uint32_t> wide = cast<uint32_t>(x),
496 s = wide & 0x8000,
497 em = wide ^ s;
498 auto is_denorm = bit_pun<Vec<N,int32_t>>(em < 0x0400);
499 return if_then_else(is_denorm, Vec<N,float>(0)
500 , bit_pun<Vec<N,float>>( (s<<16) + (em<<13) + ((127-15)<<23) ));
501}
502
503// Like if_then_else(), these N=1 base cases won't actually be used unless explicitly called.
504static inline Vec<1,uint16_t> to_half(const Vec<1,float>& x) { return to_half_finite_ftz(x); }
505static inline Vec<1,float> from_half(const Vec<1,uint16_t>& x) { return from_half_finite_ftz(x); }
506
507template <int N>
508static inline Vec<N,uint16_t> to_half(const Vec<N,float>& x) {
509#if defined(__F16C__)
510 if /*constexpr*/ (N == 8) {
511 return unchecked_bit_pun<Vec<N,uint16_t>>(_mm256_cvtps_ph(unchecked_bit_pun<__m256>(x),
512 _MM_FROUND_CUR_DIRECTION));
513 }
514#endif
515#if defined(__aarch64__)
516 if /*constexpr*/ (N == 4) {
517 return unchecked_bit_pun<Vec<N,uint16_t>>(vcvt_f16_f32(unchecked_bit_pun<float32x4_t>(x)));
518
519 }
520#endif
521 if /*constexpr*/ (N > 4) {
522 return join(to_half(x.lo),
523 to_half(x.hi));
524 }
525 return to_half_finite_ftz(x);
526}
527
528template <int N>
529static inline Vec<N,float> from_half(const Vec<N,uint16_t>& x) {
530#if defined(__F16C__)
531 if /*constexpr*/ (N == 8) {
532 return unchecked_bit_pun<Vec<N,float>>(_mm256_cvtph_ps(unchecked_bit_pun<__m128i>(x)));
533 }
534#endif
535#if defined(__aarch64__)
536 if /*constexpr*/ (N == 4) {
537 return unchecked_bit_pun<Vec<N,float>>(vcvt_f32_f16(unchecked_bit_pun<float16x4_t>(x)));
538 }
539#endif
540 if /*constexpr*/ (N > 4) {
541 return join(from_half(x.lo),
542 from_half(x.hi));
543 }
544 return from_half_finite_ftz(x);
545}
546
547
548// div255(x) = (x + 127) / 255 is a bit-exact rounding divide-by-255, packing down to 8-bit.
549template <int N>
550static inline Vec<N,uint8_t> div255(const Vec<N,uint16_t>& x) {
551 return cast<uint8_t>( (x+127)/255 );
552}
553
554// approx_scale(x,y) approximates div255(cast<uint16_t>(x)*cast<uint16_t>(y)) within a bit,
555// and is always perfect when x or y is 0 or 255.
556template <int N>
557static inline Vec<N,uint8_t> approx_scale(const Vec<N,uint8_t>& x, const Vec<N,uint8_t>& y) {
558 // All of (x*y+x)/256, (x*y+y)/256, and (x*y+255)/256 meet the criteria above.
559 // We happen to have historically picked (x*y+x)/256.
560 auto X = cast<uint16_t>(x),
561 Y = cast<uint16_t>(y);
562 return cast<uint8_t>( (X*Y+X)/256 );
563}
564
565#if !defined(SKNX_NO_SIMD) && defined(__ARM_NEON)
566 // With NEON we can do eight u8*u8 -> u16 in one instruction, vmull_u8 (read, mul-long).
567 static inline Vec<8,uint16_t> mull(const Vec<8,uint8_t>& x,
568 const Vec<8,uint8_t>& y) {
569 return to_vec<8,uint16_t>(vmull_u8(to_vext(x),
570 to_vext(y)));
571 }
572
573 template <int N>
574 static inline typename std::enable_if<(N < 8),
575 Vec<N,uint16_t>>::type mull(const Vec<N,uint8_t>& x,
576 const Vec<N,uint8_t>& y) {
577 // N < 8 --> double up data until N == 8, returning the part we need.
578 return mull(join(x,x),
579 join(y,y)).lo;
580 }
581
582 template <int N>
583 static inline typename std::enable_if<(N > 8),
584 Vec<N,uint16_t>>::type mull(const Vec<N,uint8_t>& x,
585 const Vec<N,uint8_t>& y) {
586 // N > 8 --> usual join(lo,hi) strategy to recurse down to N == 8.
587 return join(mull(x.lo, y.lo),
588 mull(x.hi, y.hi));
589 }
590#else
591 // Nothing special when we don't have NEON... just cast up to 16-bit and multiply.
592 template <int N>
593 static inline Vec<N,uint16_t> mull(const Vec<N,uint8_t>& x,
594 const Vec<N,uint8_t>& y) {
595 return cast<uint16_t>(x)
596 * cast<uint16_t>(y);
597 }
598#endif
599
600#if !defined(SKNX_NO_SIMD)
601
602 // Platform-specific specializations and overloads can now drop in here.
603
604 #if defined(__AVX__)
605 static inline Vec<8,float> sqrt(const Vec<8,float>& x) {
606 return bit_pun<Vec<8,float>>(_mm256_sqrt_ps(bit_pun<__m256>(x)));
607 }
608 static inline Vec<8,float> rsqrt(const Vec<8,float>& x) {
609 return bit_pun<Vec<8,float>>(_mm256_rsqrt_ps(bit_pun<__m256>(x)));
610 }
611 static inline Vec<8,float> rcp(const Vec<8,float>& x) {
612 return bit_pun<Vec<8,float>>(_mm256_rcp_ps(bit_pun<__m256>(x)));
613 }
614 static inline Vec<8,int> lrint(const Vec<8,float>& x) {
615 return bit_pun<Vec<8,int>>(_mm256_cvtps_epi32(bit_pun<__m256>(x)));
616 }
617 #endif
618
619 #if defined(__SSE__)
620 static inline Vec<4,float> sqrt(const Vec<4,float>& x) {
621 return bit_pun<Vec<4,float>>(_mm_sqrt_ps(bit_pun<__m128>(x)));
622 }
623 static inline Vec<4,float> rsqrt(const Vec<4,float>& x) {
624 return bit_pun<Vec<4,float>>(_mm_rsqrt_ps(bit_pun<__m128>(x)));
625 }
626 static inline Vec<4,float> rcp(const Vec<4,float>& x) {
627 return bit_pun<Vec<4,float>>(_mm_rcp_ps(bit_pun<__m128>(x)));
628 }
629 static inline Vec<4,int> lrint(const Vec<4,float>& x) {
630 return bit_pun<Vec<4,int>>(_mm_cvtps_epi32(bit_pun<__m128>(x)));
631 }
632
633 static inline Vec<2,float> sqrt(const Vec<2,float>& x) {
634 return shuffle<0,1>( sqrt(shuffle<0,1,0,1>(x)));
635 }
636 static inline Vec<2,float> rsqrt(const Vec<2,float>& x) {
637 return shuffle<0,1>(rsqrt(shuffle<0,1,0,1>(x)));
638 }
639 static inline Vec<2,float> rcp(const Vec<2,float>& x) {
640 return shuffle<0,1>( rcp(shuffle<0,1,0,1>(x)));
641 }
642 static inline Vec<2,int> lrint(const Vec<2,float>& x) {
643 return shuffle<0,1>(lrint(shuffle<0,1,0,1>(x)));
644 }
645 #endif
646
647 #if defined(__AVX2__)
648 static inline Vec<4,float> fma(const Vec<4,float>& x,
649 const Vec<4,float>& y,
650 const Vec<4,float>& z) {
651 return bit_pun<Vec<4,float>>(_mm_fmadd_ps(bit_pun<__m128>(x),
652 bit_pun<__m128>(y),
653 bit_pun<__m128>(z)));
654 }
655
656 static inline Vec<8,float> fma(const Vec<8,float>& x,
657 const Vec<8,float>& y,
658 const Vec<8,float>& z) {
659 return bit_pun<Vec<8,float>>(_mm256_fmadd_ps(bit_pun<__m256>(x),
660 bit_pun<__m256>(y),
661 bit_pun<__m256>(z)));
662 }
663 #elif defined(__aarch64__)
664 static inline Vec<4,float> fma(const Vec<4,float>& x,
665 const Vec<4,float>& y,
666 const Vec<4,float>& z) {
667 // These instructions tend to work like z += xy, so the order here is z,x,y.
668 return bit_pun<Vec<4,float>>(vfmaq_f32(bit_pun<float32x4_t>(z),
669 bit_pun<float32x4_t>(x),
670 bit_pun<float32x4_t>(y)));
671 }
672 #endif
673
674 // WASM SIMD compatible operations which are not automatically compiled to SIMD commands
675 // by emscripten:
676 #if defined __wasm_simd128__
677 static inline Vec<4,float> min(const Vec<4,float>& x, const Vec<4,float>& y) {
678 return to_vec<4,float>(wasm_f32x4_min(to_vext(x), to_vext(y)));
679 }
680 static inline Vec<4,float> max(const Vec<4,float>& x, const Vec<4,float>& y) {
681 return to_vec<4,float>(wasm_f32x4_max(to_vext(x), to_vext(y)));
682 }
683 static inline Vec<4,float> sqrt(const Vec<4,float>& x) {
684 return to_vec<4,float>(wasm_f32x4_sqrt(to_vext(x)));
685 }
686 static inline Vec<4,float> abs(const Vec<4,float>& x) {
687 return to_vec<4,float>(wasm_f32x4_abs(to_vext(x)));
688 }
689 static inline Vec<4,float> rcp(const Vec<4,float>& x) {
690 return 1.0f / x;
691 }
692 static inline Vec<4,float> rsqrt(const Vec<4,float>& x) {
693 return 1.0f / sqrt(x);
694 }
695
696 static inline Vec<2,double> min(const Vec<2,double>& x, const Vec<2,double>& y) {
697 return to_vec<2,double>(wasm_f64x2_min(to_vext(x), to_vext(y)));
698 }
699 static inline Vec<2,double> max(const Vec<2,double>& x, const Vec<2,double>& y) {
700 return to_vec<2,double>(wasm_f64x2_max(to_vext(x), to_vext(y)));
701 }
702 static inline Vec<2,double> sqrt(const Vec<2,double>& x) {
703 return to_vec<2,double>(wasm_f64x2_sqrt(to_vext(x)));
704 }
705 static inline Vec<2,double> abs(const Vec<2,double>& x) {
706 return to_vec<2,double>(wasm_f64x2_abs(to_vext(x)));
707 }
708 static inline Vec<2,double> rcp(const Vec<2,double>& x) {
709 return 1.0f / x;
710 }
711 static inline Vec<2,double> rsqrt(const Vec<2,double>& x) {
712 return 1.0f / sqrt(x);
713 }
714
715 static inline bool any(const Vec<4,int32_t>& x) {
716 return wasm_i32x4_any_true(to_vext(x));
717 }
718 static inline bool all(const Vec<4,int32_t>& x) {
719 return wasm_i32x4_all_true(to_vext(x));
720 }
721 static inline Vec<4,int32_t> min(const Vec<4,int32_t>& x, const Vec<4,int32_t>& y) {
722 return to_vec<4,int32_t>(wasm_i32x4_min(to_vext(x), to_vext(y)));
723 }
724 static inline Vec<4,int32_t> max(const Vec<4,int32_t>& x, const Vec<4,int32_t>& y) {
725 return to_vec<4,int32_t>(wasm_i32x4_max(to_vext(x), to_vext(y)));
726 }
727 static inline Vec<4,int32_t> abs(const Vec<4,int32_t>& x) {
728 return to_vec<4,int32_t>(wasm_i32x4_abs(to_vext(x)));
729 }
730
731 static inline bool any(const Vec<4,uint32_t>& x) {
732 return wasm_i32x4_any_true(to_vext(x));
733 }
734 static inline bool all(const Vec<4,uint32_t>& x) {
735 return wasm_i32x4_all_true(to_vext(x));
736 }
737 static inline Vec<4,uint32_t> min(const Vec<4,uint32_t>& x,
738 const Vec<4,uint32_t>& y) {
739 return to_vec<4,uint32_t>(wasm_u32x4_min(to_vext(x), to_vext(y)));
740 }
741 static inline Vec<4,uint32_t> max(const Vec<4,uint32_t>& x,
742 const Vec<4,uint32_t>& y) {
743 return to_vec<4,uint32_t>(wasm_u32x4_max(to_vext(x), to_vext(y)));
744 }
745 #endif
746
747#endif // !defined(SKNX_NO_SIMD)
748
749} // namespace skvx
750
751#undef SINTU
752#undef SINT
753#undef SIT
754#undef SKVX_ALIGNMENT
755
756#endif//SKVX_DEFINED
757