| 1 | // Copyright 2009-2021 Intel Corporation |
| 2 | // SPDX-License-Identifier: Apache-2.0 |
| 3 | |
| 4 | #pragma once |
| 5 | |
| 6 | #include "../math/math.h" |
| 7 | |
| 8 | #define vboolf vboolf_impl |
| 9 | #define vboold vboold_impl |
| 10 | #define vint vint_impl |
| 11 | #define vuint vuint_impl |
| 12 | #define vllong vllong_impl |
| 13 | #define vfloat vfloat_impl |
| 14 | #define vdouble vdouble_impl |
| 15 | |
| 16 | namespace embree |
| 17 | { |
| 18 | /* 4-wide SSE integer type */ |
| 19 | template<> |
| 20 | struct vint<4> |
| 21 | { |
| 22 | ALIGNED_STRUCT_(16); |
| 23 | |
| 24 | typedef vboolf4 Bool; |
| 25 | typedef vint4 Int; |
| 26 | typedef vfloat4 Float; |
| 27 | |
| 28 | enum { size = 4 }; // number of SIMD elements |
| 29 | union { __m128i v; int i[4]; }; // data |
| 30 | |
| 31 | //////////////////////////////////////////////////////////////////////////////// |
| 32 | /// Constructors, Assignment & Cast Operators |
| 33 | //////////////////////////////////////////////////////////////////////////////// |
| 34 | |
| 35 | __forceinline vint() {} |
| 36 | __forceinline vint(const vint4& a) { v = a.v; } |
| 37 | __forceinline vint4& operator =(const vint4& a) { v = a.v; return *this; } |
| 38 | |
| 39 | __forceinline vint(__m128i a) : v(a) {} |
| 40 | __forceinline operator const __m128i&() const { return v; } |
| 41 | __forceinline operator __m128i&() { return v; } |
| 42 | |
| 43 | __forceinline vint(int a) : v(_mm_set1_epi32(a)) {} |
| 44 | __forceinline vint(int a, int b, int c, int d) : v(_mm_set_epi32(d, c, b, a)) {} |
| 45 | |
| 46 | __forceinline explicit vint(__m128 a) : v(_mm_cvtps_epi32(a)) {} |
| 47 | #if defined(__AVX512VL__) |
| 48 | __forceinline explicit vint(const vboolf4& a) : v(_mm_movm_epi32(a)) {} |
| 49 | #else |
| 50 | __forceinline explicit vint(const vboolf4& a) : v(_mm_castps_si128((__m128)a)) {} |
| 51 | #endif |
| 52 | |
| 53 | __forceinline vint(long long a, long long b) : v(_mm_set_epi64x(b,a)) {} |
| 54 | |
| 55 | //////////////////////////////////////////////////////////////////////////////// |
| 56 | /// Constants |
| 57 | //////////////////////////////////////////////////////////////////////////////// |
| 58 | |
| 59 | __forceinline vint(ZeroTy) : v(_mm_setzero_si128()) {} |
| 60 | __forceinline vint(OneTy) : v(_mm_set_epi32(1, 1, 1, 1)) {} |
| 61 | __forceinline vint(PosInfTy) : v(_mm_set_epi32(pos_inf, pos_inf, pos_inf, pos_inf)) {} |
| 62 | __forceinline vint(NegInfTy) : v(_mm_set_epi32(neg_inf, neg_inf, neg_inf, neg_inf)) {} |
| 63 | __forceinline vint(StepTy) : v(_mm_set_epi32(3, 2, 1, 0)) {} |
| 64 | __forceinline vint(ReverseStepTy) : v(_mm_set_epi32(0, 1, 2, 3)) {} |
| 65 | |
| 66 | __forceinline vint(TrueTy) { v = _mm_cmpeq_epi32(v,v); } |
| 67 | __forceinline vint(UndefinedTy) : v(_mm_castps_si128(_mm_undefined_ps())) {} |
| 68 | |
| 69 | |
| 70 | //////////////////////////////////////////////////////////////////////////////// |
| 71 | /// Loads and Stores |
| 72 | //////////////////////////////////////////////////////////////////////////////// |
| 73 | |
| 74 | static __forceinline vint4 load (const void* a) { return _mm_load_si128((__m128i*)a); } |
| 75 | static __forceinline vint4 loadu(const void* a) { return _mm_loadu_si128((__m128i*)a); } |
| 76 | |
| 77 | static __forceinline void store (void* ptr, const vint4& v) { _mm_store_si128((__m128i*)ptr,v); } |
| 78 | static __forceinline void storeu(void* ptr, const vint4& v) { _mm_storeu_si128((__m128i*)ptr,v); } |
| 79 | |
| 80 | #if defined(__AVX512VL__) |
| 81 | |
| 82 | static __forceinline vint4 compact(const vboolf4& mask, vint4 &v) { |
| 83 | return _mm_mask_compress_epi32(v, mask, v); |
| 84 | } |
| 85 | static __forceinline vint4 compact(const vboolf4& mask, vint4 &a, const vint4& b) { |
| 86 | return _mm_mask_compress_epi32(a, mask, b); |
| 87 | } |
| 88 | |
| 89 | static __forceinline vint4 load (const vboolf4& mask, const void* ptr) { return _mm_mask_load_epi32 (_mm_setzero_si128(),mask,ptr); } |
| 90 | static __forceinline vint4 loadu(const vboolf4& mask, const void* ptr) { return _mm_mask_loadu_epi32(_mm_setzero_si128(),mask,ptr); } |
| 91 | |
| 92 | static __forceinline void store (const vboolf4& mask, void* ptr, const vint4& v) { _mm_mask_store_epi32 (ptr,mask,v); } |
| 93 | static __forceinline void storeu(const vboolf4& mask, void* ptr, const vint4& v) { _mm_mask_storeu_epi32(ptr,mask,v); } |
| 94 | #elif defined(__AVX__) |
| 95 | static __forceinline vint4 load (const vbool4& mask, const void* a) { return _mm_castps_si128(_mm_maskload_ps((float*)a,mask)); } |
| 96 | static __forceinline vint4 loadu(const vbool4& mask, const void* a) { return _mm_castps_si128(_mm_maskload_ps((float*)a,mask)); } |
| 97 | |
| 98 | static __forceinline void store (const vboolf4& mask, void* ptr, const vint4& i) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,_mm_castsi128_ps(i)); } |
| 99 | static __forceinline void storeu(const vboolf4& mask, void* ptr, const vint4& i) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,_mm_castsi128_ps(i)); } |
| 100 | #else |
| 101 | static __forceinline vint4 load (const vbool4& mask, const void* a) { return _mm_and_si128(_mm_load_si128 ((__m128i*)a),mask); } |
| 102 | static __forceinline vint4 loadu(const vbool4& mask, const void* a) { return _mm_and_si128(_mm_loadu_si128((__m128i*)a),mask); } |
| 103 | |
| 104 | static __forceinline void store (const vboolf4& mask, void* ptr, const vint4& i) { store (ptr,select(mask,i,load (ptr))); } |
| 105 | static __forceinline void storeu(const vboolf4& mask, void* ptr, const vint4& i) { storeu(ptr,select(mask,i,loadu(ptr))); } |
| 106 | #endif |
| 107 | |
| 108 | |
| 109 | #if defined(__aarch64__) |
| 110 | static __forceinline vint4 load(const unsigned char* ptr) { |
| 111 | return _mm_load4epu8_epi32(((__m128i*)ptr)); |
| 112 | } |
| 113 | static __forceinline vint4 loadu(const unsigned char* ptr) { |
| 114 | return _mm_load4epu8_epi32(((__m128i*)ptr)); |
| 115 | } |
| 116 | #elif defined(__SSE4_1__) |
| 117 | static __forceinline vint4 load(const unsigned char* ptr) { |
| 118 | return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); |
| 119 | } |
| 120 | |
| 121 | static __forceinline vint4 loadu(const unsigned char* ptr) { |
| 122 | return _mm_cvtepu8_epi32(_mm_loadl_epi64((__m128i*)ptr)); |
| 123 | } |
| 124 | #else |
| 125 | |
| 126 | static __forceinline vint4 load(const unsigned char* ptr) { |
| 127 | return vint4(ptr[0],ptr[1],ptr[2],ptr[3]); |
| 128 | } |
| 129 | |
| 130 | static __forceinline vint4 loadu(const unsigned char* ptr) { |
| 131 | return vint4(ptr[0],ptr[1],ptr[2],ptr[3]); |
| 132 | } |
| 133 | |
| 134 | #endif |
| 135 | |
| 136 | static __forceinline vint4 load(const unsigned short* ptr) { |
| 137 | #if defined(__aarch64__) |
| 138 | return __m128i(vmovl_u16(vld1_u16(ptr))); |
| 139 | #elif defined (__SSE4_1__) |
| 140 | return _mm_cvtepu16_epi32(_mm_loadu_si128((__m128i*)ptr)); |
| 141 | #else |
| 142 | return vint4(ptr[0],ptr[1],ptr[2],ptr[3]); |
| 143 | #endif |
| 144 | } |
| 145 | |
| 146 | static __forceinline void store(unsigned char* ptr, const vint4& v) { |
| 147 | #if defined(__aarch64__) |
| 148 | int32x4_t x = v; |
| 149 | uint16x4_t y = vqmovn_u32(uint32x4_t(x)); |
| 150 | uint8x8_t z = vqmovn_u16(vcombine_u16(y, y)); |
| 151 | vst1_lane_u32((uint32_t *)ptr,uint32x2_t(z), 0); |
| 152 | #elif defined(__SSE4_1__) |
| 153 | __m128i x = v; |
| 154 | x = _mm_packus_epi32(x, x); |
| 155 | x = _mm_packus_epi16(x, x); |
| 156 | *(int*)ptr = _mm_cvtsi128_si32(x); |
| 157 | #else |
| 158 | for (size_t i=0;i<4;i++) |
| 159 | ptr[i] = (unsigned char)v[i]; |
| 160 | #endif |
| 161 | } |
| 162 | |
| 163 | static __forceinline void store(unsigned short* ptr, const vint4& v) { |
| 164 | #if defined(__aarch64__) |
| 165 | uint32x4_t x = uint32x4_t(v.v); |
| 166 | uint16x4_t y = vqmovn_u32(x); |
| 167 | vst1_u16(ptr, y); |
| 168 | #else |
| 169 | for (size_t i=0;i<4;i++) |
| 170 | ptr[i] = (unsigned short)v[i]; |
| 171 | #endif |
| 172 | } |
| 173 | |
| 174 | static __forceinline vint4 load_nt(void* ptr) { |
| 175 | #if defined(__aarch64__) || defined(__SSE4_1__) |
| 176 | return _mm_stream_load_si128((__m128i*)ptr); |
| 177 | #else |
| 178 | return _mm_load_si128((__m128i*)ptr); |
| 179 | #endif |
| 180 | } |
| 181 | |
| 182 | static __forceinline void store_nt(void* ptr, const vint4& v) { |
| 183 | #if !defined(__aarch64__) && defined(__SSE4_1__) |
| 184 | _mm_stream_ps((float*)ptr, _mm_castsi128_ps(v)); |
| 185 | #else |
| 186 | _mm_store_si128((__m128i*)ptr,v); |
| 187 | #endif |
| 188 | } |
| 189 | |
| 190 | template<int scale = 4> |
| 191 | static __forceinline vint4 gather(const int* ptr, const vint4& index) { |
| 192 | #if defined(__AVX2__) && !defined(__aarch64__) |
| 193 | return _mm_i32gather_epi32(ptr, index, scale); |
| 194 | #else |
| 195 | return vint4( |
| 196 | *(int*)(((char*)ptr)+scale*index[0]), |
| 197 | *(int*)(((char*)ptr)+scale*index[1]), |
| 198 | *(int*)(((char*)ptr)+scale*index[2]), |
| 199 | *(int*)(((char*)ptr)+scale*index[3])); |
| 200 | #endif |
| 201 | } |
| 202 | |
| 203 | template<int scale = 4> |
| 204 | static __forceinline vint4 gather(const vboolf4& mask, const int* ptr, const vint4& index) { |
| 205 | vint4 r = zero; |
| 206 | #if defined(__AVX512VL__) |
| 207 | return _mm_mmask_i32gather_epi32(r, mask, index, ptr, scale); |
| 208 | #elif defined(__AVX2__) && !defined(__aarch64__) |
| 209 | return _mm_mask_i32gather_epi32(r, ptr, index, mask, scale); |
| 210 | #else |
| 211 | if (likely(mask[0])) r[0] = *(int*)(((char*)ptr)+scale*index[0]); |
| 212 | if (likely(mask[1])) r[1] = *(int*)(((char*)ptr)+scale*index[1]); |
| 213 | if (likely(mask[2])) r[2] = *(int*)(((char*)ptr)+scale*index[2]); |
| 214 | if (likely(mask[3])) r[3] = *(int*)(((char*)ptr)+scale*index[3]); |
| 215 | return r; |
| 216 | #endif |
| 217 | } |
| 218 | |
| 219 | template<int scale = 4> |
| 220 | static __forceinline void scatter(void* ptr, const vint4& index, const vint4& v) |
| 221 | { |
| 222 | #if defined(__AVX512VL__) |
| 223 | _mm_i32scatter_epi32((int*)ptr, index, v, scale); |
| 224 | #else |
| 225 | *(int*)(((char*)ptr)+scale*index[0]) = v[0]; |
| 226 | *(int*)(((char*)ptr)+scale*index[1]) = v[1]; |
| 227 | *(int*)(((char*)ptr)+scale*index[2]) = v[2]; |
| 228 | *(int*)(((char*)ptr)+scale*index[3]) = v[3]; |
| 229 | #endif |
| 230 | } |
| 231 | |
| 232 | template<int scale = 4> |
| 233 | static __forceinline void scatter(const vboolf4& mask, void* ptr, const vint4& index, const vint4& v) |
| 234 | { |
| 235 | #if defined(__AVX512VL__) |
| 236 | _mm_mask_i32scatter_epi32((int*)ptr, mask, index, v, scale); |
| 237 | #else |
| 238 | if (likely(mask[0])) *(int*)(((char*)ptr)+scale*index[0]) = v[0]; |
| 239 | if (likely(mask[1])) *(int*)(((char*)ptr)+scale*index[1]) = v[1]; |
| 240 | if (likely(mask[2])) *(int*)(((char*)ptr)+scale*index[2]) = v[2]; |
| 241 | if (likely(mask[3])) *(int*)(((char*)ptr)+scale*index[3]) = v[3]; |
| 242 | #endif |
| 243 | } |
| 244 | |
| 245 | #if defined(__x86_64__) || defined(__aarch64__) |
| 246 | static __forceinline vint4 broadcast64(long long a) { return _mm_set1_epi64x(a); } |
| 247 | #endif |
| 248 | |
| 249 | //////////////////////////////////////////////////////////////////////////////// |
| 250 | /// Array Access |
| 251 | //////////////////////////////////////////////////////////////////////////////// |
| 252 | |
| 253 | __forceinline const int& operator [](size_t index) const { assert(index < 4); return i[index]; } |
| 254 | __forceinline int& operator [](size_t index) { assert(index < 4); return i[index]; } |
| 255 | |
| 256 | friend __forceinline vint4 select(const vboolf4& m, const vint4& t, const vint4& f) { |
| 257 | #if defined(__AVX512VL__) |
| 258 | return _mm_mask_blend_epi32(m, (__m128i)f, (__m128i)t); |
| 259 | #elif defined(__aarch64__) |
| 260 | return _mm_castps_si128(_mm_blendv_ps((__m128)f.v,(__m128) t.v, (__m128)m.v)); |
| 261 | #elif defined(__SSE4_1__) |
| 262 | return _mm_castps_si128(_mm_blendv_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), m)); |
| 263 | #else |
| 264 | return _mm_or_si128(_mm_and_si128(m, t), _mm_andnot_si128(m, f)); |
| 265 | #endif |
| 266 | } |
| 267 | }; |
| 268 | |
| 269 | //////////////////////////////////////////////////////////////////////////////// |
| 270 | /// Unary Operators |
| 271 | //////////////////////////////////////////////////////////////////////////////// |
| 272 | |
| 273 | #if defined(__AVX512VL__) |
| 274 | __forceinline vboolf4 asBool(const vint4& a) { return _mm_movepi32_mask(a); } |
| 275 | #else |
| 276 | __forceinline vboolf4 asBool(const vint4& a) { return _mm_castsi128_ps(a); } |
| 277 | #endif |
| 278 | |
| 279 | __forceinline vint4 operator +(const vint4& a) { return a; } |
| 280 | __forceinline vint4 operator -(const vint4& a) { return _mm_sub_epi32(_mm_setzero_si128(), a); } |
| 281 | #if defined(__aarch64__) |
| 282 | __forceinline vint4 abs(const vint4& a) { return vabsq_s32(a.v); } |
| 283 | #elif defined(__SSSE3__) |
| 284 | __forceinline vint4 abs(const vint4& a) { return _mm_abs_epi32(a); } |
| 285 | #endif |
| 286 | |
| 287 | //////////////////////////////////////////////////////////////////////////////// |
| 288 | /// Binary Operators |
| 289 | //////////////////////////////////////////////////////////////////////////////// |
| 290 | |
| 291 | __forceinline vint4 operator +(const vint4& a, const vint4& b) { return _mm_add_epi32(a, b); } |
| 292 | __forceinline vint4 operator +(const vint4& a, int b) { return a + vint4(b); } |
| 293 | __forceinline vint4 operator +(int a, const vint4& b) { return vint4(a) + b; } |
| 294 | |
| 295 | __forceinline vint4 operator -(const vint4& a, const vint4& b) { return _mm_sub_epi32(a, b); } |
| 296 | __forceinline vint4 operator -(const vint4& a, int b) { return a - vint4(b); } |
| 297 | __forceinline vint4 operator -(int a, const vint4& b) { return vint4(a) - b; } |
| 298 | |
| 299 | #if (defined(__aarch64__)) || defined(__SSE4_1__) |
| 300 | __forceinline vint4 operator *(const vint4& a, const vint4& b) { return _mm_mullo_epi32(a, b); } |
| 301 | #else |
| 302 | __forceinline vint4 operator *(const vint4& a, const vint4& b) { return vint4(a[0]*b[0],a[1]*b[1],a[2]*b[2],a[3]*b[3]); } |
| 303 | #endif |
| 304 | __forceinline vint4 operator *(const vint4& a, int b) { return a * vint4(b); } |
| 305 | __forceinline vint4 operator *(int a, const vint4& b) { return vint4(a) * b; } |
| 306 | |
| 307 | __forceinline vint4 operator &(const vint4& a, const vint4& b) { return _mm_and_si128(a, b); } |
| 308 | __forceinline vint4 operator &(const vint4& a, int b) { return a & vint4(b); } |
| 309 | __forceinline vint4 operator &(int a, const vint4& b) { return vint4(a) & b; } |
| 310 | |
| 311 | __forceinline vint4 operator |(const vint4& a, const vint4& b) { return _mm_or_si128(a, b); } |
| 312 | __forceinline vint4 operator |(const vint4& a, int b) { return a | vint4(b); } |
| 313 | __forceinline vint4 operator |(int a, const vint4& b) { return vint4(a) | b; } |
| 314 | |
| 315 | __forceinline vint4 operator ^(const vint4& a, const vint4& b) { return _mm_xor_si128(a, b); } |
| 316 | __forceinline vint4 operator ^(const vint4& a, int b) { return a ^ vint4(b); } |
| 317 | __forceinline vint4 operator ^(int a, const vint4& b) { return vint4(a) ^ b; } |
| 318 | |
| 319 | __forceinline vint4 operator <<(const vint4& a, const int n) { return _mm_slli_epi32(a, n); } |
| 320 | __forceinline vint4 operator >>(const vint4& a, const int n) { return _mm_srai_epi32(a, n); } |
| 321 | |
| 322 | __forceinline vint4 sll (const vint4& a, int b) { return _mm_slli_epi32(a, b); } |
| 323 | __forceinline vint4 sra (const vint4& a, int b) { return _mm_srai_epi32(a, b); } |
| 324 | __forceinline vint4 srl (const vint4& a, int b) { return _mm_srli_epi32(a, b); } |
| 325 | |
| 326 | //////////////////////////////////////////////////////////////////////////////// |
| 327 | /// Assignment Operators |
| 328 | //////////////////////////////////////////////////////////////////////////////// |
| 329 | |
| 330 | __forceinline vint4& operator +=(vint4& a, const vint4& b) { return a = a + b; } |
| 331 | __forceinline vint4& operator +=(vint4& a, int b) { return a = a + b; } |
| 332 | |
| 333 | __forceinline vint4& operator -=(vint4& a, const vint4& b) { return a = a - b; } |
| 334 | __forceinline vint4& operator -=(vint4& a, int b) { return a = a - b; } |
| 335 | |
| 336 | #if (defined(__aarch64__)) || defined(__SSE4_1__) |
| 337 | __forceinline vint4& operator *=(vint4& a, const vint4& b) { return a = a * b; } |
| 338 | __forceinline vint4& operator *=(vint4& a, int b) { return a = a * b; } |
| 339 | #endif |
| 340 | |
| 341 | __forceinline vint4& operator &=(vint4& a, const vint4& b) { return a = a & b; } |
| 342 | __forceinline vint4& operator &=(vint4& a, int b) { return a = a & b; } |
| 343 | |
| 344 | __forceinline vint4& operator |=(vint4& a, const vint4& b) { return a = a | b; } |
| 345 | __forceinline vint4& operator |=(vint4& a, int b) { return a = a | b; } |
| 346 | |
| 347 | __forceinline vint4& operator <<=(vint4& a, int b) { return a = a << b; } |
| 348 | __forceinline vint4& operator >>=(vint4& a, int b) { return a = a >> b; } |
| 349 | |
| 350 | //////////////////////////////////////////////////////////////////////////////// |
| 351 | /// Comparison Operators + Select |
| 352 | //////////////////////////////////////////////////////////////////////////////// |
| 353 | |
| 354 | #if defined(__AVX512VL__) |
| 355 | __forceinline vboolf4 operator ==(const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_EQ); } |
| 356 | __forceinline vboolf4 operator !=(const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_NE); } |
| 357 | __forceinline vboolf4 operator < (const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_LT); } |
| 358 | __forceinline vboolf4 operator >=(const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_GE); } |
| 359 | __forceinline vboolf4 operator > (const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_GT); } |
| 360 | __forceinline vboolf4 operator <=(const vint4& a, const vint4& b) { return _mm_cmp_epi32_mask(a,b,_MM_CMPINT_LE); } |
| 361 | #else |
| 362 | __forceinline vboolf4 operator ==(const vint4& a, const vint4& b) { return _mm_castsi128_ps(_mm_cmpeq_epi32(a, b)); } |
| 363 | __forceinline vboolf4 operator !=(const vint4& a, const vint4& b) { return !(a == b); } |
| 364 | __forceinline vboolf4 operator < (const vint4& a, const vint4& b) { return _mm_castsi128_ps(_mm_cmplt_epi32(a, b)); } |
| 365 | __forceinline vboolf4 operator >=(const vint4& a, const vint4& b) { return !(a < b); } |
| 366 | __forceinline vboolf4 operator > (const vint4& a, const vint4& b) { return _mm_castsi128_ps(_mm_cmpgt_epi32(a, b)); } |
| 367 | __forceinline vboolf4 operator <=(const vint4& a, const vint4& b) { return !(a > b); } |
| 368 | #endif |
| 369 | |
| 370 | __forceinline vboolf4 operator ==(const vint4& a, int b) { return a == vint4(b); } |
| 371 | __forceinline vboolf4 operator ==(int a, const vint4& b) { return vint4(a) == b; } |
| 372 | |
| 373 | __forceinline vboolf4 operator !=(const vint4& a, int b) { return a != vint4(b); } |
| 374 | __forceinline vboolf4 operator !=(int a, const vint4& b) { return vint4(a) != b; } |
| 375 | |
| 376 | __forceinline vboolf4 operator < (const vint4& a, int b) { return a < vint4(b); } |
| 377 | __forceinline vboolf4 operator < (int a, const vint4& b) { return vint4(a) < b; } |
| 378 | |
| 379 | __forceinline vboolf4 operator >=(const vint4& a, int b) { return a >= vint4(b); } |
| 380 | __forceinline vboolf4 operator >=(int a, const vint4& b) { return vint4(a) >= b; } |
| 381 | |
| 382 | __forceinline vboolf4 operator > (const vint4& a, int b) { return a > vint4(b); } |
| 383 | __forceinline vboolf4 operator > (int a, const vint4& b) { return vint4(a) > b; } |
| 384 | |
| 385 | __forceinline vboolf4 operator <=(const vint4& a, int b) { return a <= vint4(b); } |
| 386 | __forceinline vboolf4 operator <=(int a, const vint4& b) { return vint4(a) <= b; } |
| 387 | |
| 388 | __forceinline vboolf4 eq(const vint4& a, const vint4& b) { return a == b; } |
| 389 | __forceinline vboolf4 ne(const vint4& a, const vint4& b) { return a != b; } |
| 390 | __forceinline vboolf4 lt(const vint4& a, const vint4& b) { return a < b; } |
| 391 | __forceinline vboolf4 ge(const vint4& a, const vint4& b) { return a >= b; } |
| 392 | __forceinline vboolf4 gt(const vint4& a, const vint4& b) { return a > b; } |
| 393 | __forceinline vboolf4 le(const vint4& a, const vint4& b) { return a <= b; } |
| 394 | |
| 395 | #if defined(__AVX512VL__) |
| 396 | __forceinline vboolf4 eq(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_EQ); } |
| 397 | __forceinline vboolf4 ne(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_NE); } |
| 398 | __forceinline vboolf4 lt(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_LT); } |
| 399 | __forceinline vboolf4 ge(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_GE); } |
| 400 | __forceinline vboolf4 gt(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_GT); } |
| 401 | __forceinline vboolf4 le(const vboolf4& mask, const vint4& a, const vint4& b) { return _mm_mask_cmp_epi32_mask(mask, a, b, _MM_CMPINT_LE); } |
| 402 | #else |
| 403 | __forceinline vboolf4 eq(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a == b); } |
| 404 | __forceinline vboolf4 ne(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a != b); } |
| 405 | __forceinline vboolf4 lt(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a < b); } |
| 406 | __forceinline vboolf4 ge(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a >= b); } |
| 407 | __forceinline vboolf4 gt(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a > b); } |
| 408 | __forceinline vboolf4 le(const vboolf4& mask, const vint4& a, const vint4& b) { return mask & (a <= b); } |
| 409 | #endif |
| 410 | |
| 411 | template<int mask> |
| 412 | __forceinline vint4 select(const vint4& t, const vint4& f) { |
| 413 | #if defined(__SSE4_1__) |
| 414 | return _mm_castps_si128(_mm_blend_ps(_mm_castsi128_ps(f), _mm_castsi128_ps(t), mask)); |
| 415 | #else |
| 416 | return select(vboolf4(mask), t, f); |
| 417 | #endif |
| 418 | } |
| 419 | |
| 420 | #if defined(__aarch64__) || defined(__SSE4_1__) |
| 421 | __forceinline vint4 min(const vint4& a, const vint4& b) { return _mm_min_epi32(a, b); } |
| 422 | __forceinline vint4 max(const vint4& a, const vint4& b) { return _mm_max_epi32(a, b); } |
| 423 | |
| 424 | __forceinline vint4 umin(const vint4& a, const vint4& b) { return _mm_min_epu32(a, b); } |
| 425 | __forceinline vint4 umax(const vint4& a, const vint4& b) { return _mm_max_epu32(a, b); } |
| 426 | |
| 427 | #else |
| 428 | __forceinline vint4 min(const vint4& a, const vint4& b) { return select(a < b,a,b); } |
| 429 | __forceinline vint4 max(const vint4& a, const vint4& b) { return select(a < b,b,a); } |
| 430 | #endif |
| 431 | |
| 432 | __forceinline vint4 min(const vint4& a, int b) { return min(a,vint4(b)); } |
| 433 | __forceinline vint4 min(int a, const vint4& b) { return min(vint4(a),b); } |
| 434 | __forceinline vint4 max(const vint4& a, int b) { return max(a,vint4(b)); } |
| 435 | __forceinline vint4 max(int a, const vint4& b) { return max(vint4(a),b); } |
| 436 | |
| 437 | //////////////////////////////////////////////////////////////////////////////// |
| 438 | // Movement/Shifting/Shuffling Functions |
| 439 | //////////////////////////////////////////////////////////////////////////////// |
| 440 | |
| 441 | __forceinline vint4 unpacklo(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpacklo_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); } |
| 442 | __forceinline vint4 unpackhi(const vint4& a, const vint4& b) { return _mm_castps_si128(_mm_unpackhi_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b))); } |
| 443 | |
| 444 | #if defined(__aarch64__) |
| 445 | template<int i0, int i1, int i2, int i3> |
| 446 | __forceinline vint4 shuffle(const vint4& v) { |
| 447 | return vreinterpretq_s32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3))); |
| 448 | } |
| 449 | template<int i0, int i1, int i2, int i3> |
| 450 | __forceinline vint4 shuffle(const vint4& a, const vint4& b) { |
| 451 | return vreinterpretq_s32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3))); |
| 452 | } |
| 453 | #else |
| 454 | template<int i0, int i1, int i2, int i3> |
| 455 | __forceinline vint4 shuffle(const vint4& v) { |
| 456 | return _mm_shuffle_epi32(v, _MM_SHUFFLE(i3, i2, i1, i0)); |
| 457 | } |
| 458 | |
| 459 | template<int i0, int i1, int i2, int i3> |
| 460 | __forceinline vint4 shuffle(const vint4& a, const vint4& b) { |
| 461 | return _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(a), _mm_castsi128_ps(b), _MM_SHUFFLE(i3, i2, i1, i0))); |
| 462 | } |
| 463 | #endif |
| 464 | #if defined(__SSE3__) |
| 465 | template<> __forceinline vint4 shuffle<0, 0, 2, 2>(const vint4& v) { return _mm_castps_si128(_mm_moveldup_ps(_mm_castsi128_ps(v))); } |
| 466 | template<> __forceinline vint4 shuffle<1, 1, 3, 3>(const vint4& v) { return _mm_castps_si128(_mm_movehdup_ps(_mm_castsi128_ps(v))); } |
| 467 | template<> __forceinline vint4 shuffle<0, 1, 0, 1>(const vint4& v) { return _mm_castpd_si128(_mm_movedup_pd (_mm_castsi128_pd(v))); } |
| 468 | #endif |
| 469 | |
| 470 | template<int i> |
| 471 | __forceinline vint4 shuffle(const vint4& v) { |
| 472 | return shuffle<i,i,i,i>(v); |
| 473 | } |
| 474 | |
| 475 | #if defined(__SSE4_1__) && !defined(__aarch64__) |
| 476 | template<int src> __forceinline int extract(const vint4& b) { return _mm_extract_epi32(b, src); } |
| 477 | template<int dst> __forceinline vint4 insert(const vint4& a, const int b) { return _mm_insert_epi32(a, b, dst); } |
| 478 | #else |
| 479 | template<int src> __forceinline int (const vint4& b) { return b[src&3]; } |
| 480 | template<int dst> __forceinline vint4 insert(const vint4& a, int b) { vint4 c = a; c[dst&3] = b; return c; } |
| 481 | #endif |
| 482 | |
| 483 | template<> __forceinline int <0>(const vint4& b) { return _mm_cvtsi128_si32(b); } |
| 484 | |
| 485 | __forceinline int toScalar(const vint4& v) { return _mm_cvtsi128_si32(v); } |
| 486 | |
| 487 | #if defined(__aarch64__) |
| 488 | __forceinline size_t toSizeT(const vint4& v) { |
| 489 | uint64x2_t x = uint64x2_t(v.v); |
| 490 | return x[0]; |
| 491 | } |
| 492 | #else |
| 493 | __forceinline size_t toSizeT(const vint4& v) { |
| 494 | #if defined(__WIN32__) && !defined(__X86_64__) // win32 workaround |
| 495 | return toScalar(v); |
| 496 | #elif defined(__ARM_NEON) |
| 497 | // FIXME(LTE): Do we need a swap(i.e. use lane 1)? |
| 498 | return vgetq_lane_u64(*(reinterpret_cast<const uint64x2_t *>(&v)), 0); |
| 499 | #else |
| 500 | return _mm_cvtsi128_si64(v); |
| 501 | #endif |
| 502 | } |
| 503 | #endif |
| 504 | |
| 505 | #if defined(__AVX512VL__) |
| 506 | |
| 507 | __forceinline vint4 permute(const vint4 &a, const vint4 &index) { |
| 508 | return _mm_castps_si128(_mm_permutevar_ps(_mm_castsi128_ps(a),index)); |
| 509 | } |
| 510 | |
| 511 | template<int i> |
| 512 | __forceinline vint4 align_shift_right(const vint4& a, const vint4& b) { |
| 513 | return _mm_alignr_epi32(a, b, i); |
| 514 | } |
| 515 | #endif |
| 516 | |
| 517 | //////////////////////////////////////////////////////////////////////////////// |
| 518 | /// Reductions |
| 519 | //////////////////////////////////////////////////////////////////////////////// |
| 520 | |
| 521 | #if defined(__aarch64__) || defined(__SSE4_1__) |
| 522 | |
| 523 | #if defined(__aarch64__) |
| 524 | __forceinline vint4 vreduce_min(const vint4& v) { int h = vminvq_s32(v); return vdupq_n_s32(h); } |
| 525 | __forceinline vint4 vreduce_max(const vint4& v) { int h = vmaxvq_s32(v); return vdupq_n_s32(h); } |
| 526 | __forceinline vint4 vreduce_add(const vint4& v) { int h = vaddvq_s32(v); return vdupq_n_s32(h); } |
| 527 | |
| 528 | __forceinline int reduce_min(const vint4& v) { return vminvq_s32(v); } |
| 529 | __forceinline int reduce_max(const vint4& v) { return vmaxvq_s32(v); } |
| 530 | __forceinline int reduce_add(const vint4& v) { return vaddvq_s32(v); } |
| 531 | #else |
| 532 | __forceinline vint4 vreduce_min(const vint4& v) { vint4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); } |
| 533 | __forceinline vint4 vreduce_max(const vint4& v) { vint4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); } |
| 534 | __forceinline vint4 vreduce_add(const vint4& v) { vint4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; } |
| 535 | |
| 536 | __forceinline int reduce_min(const vint4& v) { return toScalar(vreduce_min(v)); } |
| 537 | __forceinline int reduce_max(const vint4& v) { return toScalar(vreduce_max(v)); } |
| 538 | __forceinline int reduce_add(const vint4& v) { return toScalar(vreduce_add(v)); } |
| 539 | #endif |
| 540 | |
| 541 | __forceinline size_t select_min(const vint4& v) { return bsf(movemask(v == vreduce_min(v))); } |
| 542 | __forceinline size_t select_max(const vint4& v) { return bsf(movemask(v == vreduce_max(v))); } |
| 543 | |
| 544 | __forceinline size_t select_min(const vboolf4& valid, const vint4& v) { const vint4 a = select(valid,v,vint4(pos_inf)); return bsf(movemask(valid & (a == vreduce_min(a)))); } |
| 545 | __forceinline size_t select_max(const vboolf4& valid, const vint4& v) { const vint4 a = select(valid,v,vint4(neg_inf)); return bsf(movemask(valid & (a == vreduce_max(a)))); } |
| 546 | |
| 547 | #else |
| 548 | |
| 549 | __forceinline int reduce_min(const vint4& v) { return min(v[0],v[1],v[2],v[3]); } |
| 550 | __forceinline int reduce_max(const vint4& v) { return max(v[0],v[1],v[2],v[3]); } |
| 551 | __forceinline int reduce_add(const vint4& v) { return v[0]+v[1]+v[2]+v[3]; } |
| 552 | |
| 553 | #endif |
| 554 | |
| 555 | //////////////////////////////////////////////////////////////////////////////// |
| 556 | /// Sorting networks |
| 557 | //////////////////////////////////////////////////////////////////////////////// |
| 558 | |
| 559 | #if (defined(__aarch64__)) || defined(__SSE4_1__) |
| 560 | |
| 561 | __forceinline vint4 usort_ascending(const vint4& v) |
| 562 | { |
| 563 | const vint4 a0 = v; |
| 564 | const vint4 b0 = shuffle<1,0,3,2>(a0); |
| 565 | const vint4 c0 = umin(a0,b0); |
| 566 | const vint4 d0 = umax(a0,b0); |
| 567 | const vint4 a1 = select<0x5 /* 0b0101 */>(c0,d0); |
| 568 | const vint4 b1 = shuffle<2,3,0,1>(a1); |
| 569 | const vint4 c1 = umin(a1,b1); |
| 570 | const vint4 d1 = umax(a1,b1); |
| 571 | const vint4 a2 = select<0x3 /* 0b0011 */>(c1,d1); |
| 572 | const vint4 b2 = shuffle<0,2,1,3>(a2); |
| 573 | const vint4 c2 = umin(a2,b2); |
| 574 | const vint4 d2 = umax(a2,b2); |
| 575 | const vint4 a3 = select<0x2 /* 0b0010 */>(c2,d2); |
| 576 | return a3; |
| 577 | } |
| 578 | |
| 579 | __forceinline vint4 usort_descending(const vint4& v) |
| 580 | { |
| 581 | const vint4 a0 = v; |
| 582 | const vint4 b0 = shuffle<1,0,3,2>(a0); |
| 583 | const vint4 c0 = umax(a0,b0); |
| 584 | const vint4 d0 = umin(a0,b0); |
| 585 | const vint4 a1 = select<0x5 /* 0b0101 */>(c0,d0); |
| 586 | const vint4 b1 = shuffle<2,3,0,1>(a1); |
| 587 | const vint4 c1 = umax(a1,b1); |
| 588 | const vint4 d1 = umin(a1,b1); |
| 589 | const vint4 a2 = select<0x3 /* 0b0011 */>(c1,d1); |
| 590 | const vint4 b2 = shuffle<0,2,1,3>(a2); |
| 591 | const vint4 c2 = umax(a2,b2); |
| 592 | const vint4 d2 = umin(a2,b2); |
| 593 | const vint4 a3 = select<0x2 /* 0b0010 */>(c2,d2); |
| 594 | return a3; |
| 595 | } |
| 596 | |
| 597 | #else |
| 598 | |
| 599 | __forceinline vint4 usort_ascending(const vint4& v) |
| 600 | { |
| 601 | const vint4 a0 = v-vint4(0x80000000); |
| 602 | const vint4 b0 = shuffle<1,0,3,2>(a0); |
| 603 | const vint4 c0 = min(a0,b0); |
| 604 | const vint4 d0 = max(a0,b0); |
| 605 | const vint4 a1 = select<0x5 /* 0b0101 */>(c0,d0); |
| 606 | const vint4 b1 = shuffle<2,3,0,1>(a1); |
| 607 | const vint4 c1 = min(a1,b1); |
| 608 | const vint4 d1 = max(a1,b1); |
| 609 | const vint4 a2 = select<0x3 /* 0b0011 */>(c1,d1); |
| 610 | const vint4 b2 = shuffle<0,2,1,3>(a2); |
| 611 | const vint4 c2 = min(a2,b2); |
| 612 | const vint4 d2 = max(a2,b2); |
| 613 | const vint4 a3 = select<0x2 /* 0b0010 */>(c2,d2); |
| 614 | return a3+vint4(0x80000000); |
| 615 | } |
| 616 | |
| 617 | __forceinline vint4 usort_descending(const vint4& v) |
| 618 | { |
| 619 | const vint4 a0 = v-vint4(0x80000000); |
| 620 | const vint4 b0 = shuffle<1,0,3,2>(a0); |
| 621 | const vint4 c0 = max(a0,b0); |
| 622 | const vint4 d0 = min(a0,b0); |
| 623 | const vint4 a1 = select<0x5 /* 0b0101 */>(c0,d0); |
| 624 | const vint4 b1 = shuffle<2,3,0,1>(a1); |
| 625 | const vint4 c1 = max(a1,b1); |
| 626 | const vint4 d1 = min(a1,b1); |
| 627 | const vint4 a2 = select<0x3 /* 0b0011 */>(c1,d1); |
| 628 | const vint4 b2 = shuffle<0,2,1,3>(a2); |
| 629 | const vint4 c2 = max(a2,b2); |
| 630 | const vint4 d2 = min(a2,b2); |
| 631 | const vint4 a3 = select<0x2 /* 0b0010 */>(c2,d2); |
| 632 | return a3+vint4(0x80000000); |
| 633 | } |
| 634 | |
| 635 | #endif |
| 636 | |
| 637 | //////////////////////////////////////////////////////////////////////////////// |
| 638 | /// Output Operators |
| 639 | //////////////////////////////////////////////////////////////////////////////// |
| 640 | |
| 641 | __forceinline embree_ostream operator <<(embree_ostream cout, const vint4& a) { |
| 642 | return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ">" ; |
| 643 | } |
| 644 | } |
| 645 | |
| 646 | #undef vboolf |
| 647 | #undef vboold |
| 648 | #undef vint |
| 649 | #undef vuint |
| 650 | #undef vllong |
| 651 | #undef vfloat |
| 652 | #undef vdouble |
| 653 | |