1 | // Copyright 2009-2021 Intel Corporation |
2 | // SPDX-License-Identifier: Apache-2.0 |
3 | |
4 | #pragma once |
5 | |
6 | #define vboolf vboolf_impl |
7 | #define vboold vboold_impl |
8 | #define vint vint_impl |
9 | #define vuint vuint_impl |
10 | #define vllong vllong_impl |
11 | #define vfloat vfloat_impl |
12 | #define vdouble vdouble_impl |
13 | |
14 | namespace embree |
15 | { |
16 | /* 4-wide SSE float type */ |
17 | template<> |
18 | struct vfloat<4> |
19 | { |
20 | ALIGNED_STRUCT_(16); |
21 | |
22 | typedef vboolf4 Bool; |
23 | typedef vint4 Int; |
24 | typedef vfloat4 Float; |
25 | |
26 | enum { size = 4 }; // number of SIMD elements |
27 | union { __m128 v; float f[4]; int i[4]; }; // data |
28 | |
29 | //////////////////////////////////////////////////////////////////////////////// |
30 | /// Constructors, Assignment & Cast Operators |
31 | //////////////////////////////////////////////////////////////////////////////// |
32 | |
33 | __forceinline vfloat() {} |
34 | __forceinline vfloat(const vfloat4& other) { v = other.v; } |
35 | __forceinline vfloat4& operator =(const vfloat4& other) { v = other.v; return *this; } |
36 | |
37 | __forceinline vfloat(__m128 a) : v(a) {} |
38 | __forceinline operator const __m128&() const { return v; } |
39 | __forceinline operator __m128&() { return v; } |
40 | |
41 | __forceinline vfloat(float a) : v(_mm_set1_ps(a)) {} |
42 | __forceinline vfloat(float a, float b, float c, float d) : v(_mm_set_ps(d, c, b, a)) {} |
43 | |
44 | __forceinline explicit vfloat(const vint4& a) : v(_mm_cvtepi32_ps(a)) {} |
45 | #if defined(__aarch64__) |
46 | __forceinline explicit vfloat(const vuint4& x) { |
47 | v = vcvtq_f32_u32(vreinterpretq_u32_s32(x.v)); |
48 | } |
49 | #else |
50 | __forceinline explicit vfloat(const vuint4& x) { |
51 | const __m128i a = _mm_and_si128(x,_mm_set1_epi32(0x7FFFFFFF)); |
52 | const __m128i b = _mm_and_si128(_mm_srai_epi32(x,31),_mm_set1_epi32(0x4F000000)); //0x4F000000 = 2^31 |
53 | const __m128 af = _mm_cvtepi32_ps(a); |
54 | const __m128 bf = _mm_castsi128_ps(b); |
55 | v = _mm_add_ps(af,bf); |
56 | } |
57 | #endif |
58 | //////////////////////////////////////////////////////////////////////////////// |
59 | /// Constants |
60 | //////////////////////////////////////////////////////////////////////////////// |
61 | |
62 | __forceinline vfloat(ZeroTy) : v(_mm_setzero_ps()) {} |
63 | __forceinline vfloat(OneTy) : v(_mm_set1_ps(1.0f)) {} |
64 | __forceinline vfloat(PosInfTy) : v(_mm_set1_ps(pos_inf)) {} |
65 | __forceinline vfloat(NegInfTy) : v(_mm_set1_ps(neg_inf)) {} |
66 | __forceinline vfloat(StepTy) : v(_mm_set_ps(3.0f, 2.0f, 1.0f, 0.0f)) {} |
67 | __forceinline vfloat(NaNTy) : v(_mm_set1_ps(nan)) {} |
68 | __forceinline vfloat(UndefinedTy) : v(_mm_undefined_ps()) {} |
69 | |
70 | //////////////////////////////////////////////////////////////////////////////// |
71 | /// Loads and Stores |
72 | //////////////////////////////////////////////////////////////////////////////// |
73 | |
74 | static __forceinline vfloat4 load (const void* a) { return _mm_load_ps((float*)a); } |
75 | static __forceinline vfloat4 loadu(const void* a) { return _mm_loadu_ps((float*)a); } |
76 | |
77 | static __forceinline void store (void* ptr, const vfloat4& v) { _mm_store_ps((float*)ptr,v); } |
78 | static __forceinline void storeu(void* ptr, const vfloat4& v) { _mm_storeu_ps((float*)ptr,v); } |
79 | |
80 | #if defined(__AVX512VL__) |
81 | |
82 | static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_mask_load_ps (_mm_setzero_ps(),mask,(float*)ptr); } |
83 | static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_mask_loadu_ps(_mm_setzero_ps(),mask,(float*)ptr); } |
84 | |
85 | static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_mask_store_ps ((float*)ptr,mask,v); } |
86 | static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_mask_storeu_ps((float*)ptr,mask,v); } |
87 | #elif defined(__AVX__) |
88 | static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_maskload_ps((float*)ptr,mask); } |
89 | static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_maskload_ps((float*)ptr,mask); } |
90 | |
91 | static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,v); } |
92 | static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { _mm_maskstore_ps((float*)ptr,(__m128i)mask,v); } |
93 | #else |
94 | static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return _mm_and_ps(_mm_load_ps ((float*)ptr),mask); } |
95 | static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return _mm_and_ps(_mm_loadu_ps((float*)ptr),mask); } |
96 | |
97 | static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { store (ptr,select(mask,v,load (ptr))); } |
98 | static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { storeu(ptr,select(mask,v,loadu(ptr))); } |
99 | #endif |
100 | |
101 | #if defined(__AVX__) |
102 | static __forceinline vfloat4 broadcast(const void* a) { return _mm_broadcast_ss((float*)a); } |
103 | #else |
104 | static __forceinline vfloat4 broadcast(const void* a) { return _mm_set1_ps(*(float*)a); } |
105 | #endif |
106 | |
107 | static __forceinline vfloat4 load_nt (const float* ptr) { |
108 | #if defined (__SSE4_1__) |
109 | return _mm_castsi128_ps(_mm_stream_load_si128((__m128i*)ptr)); |
110 | #else |
111 | return _mm_load_ps(ptr); |
112 | #endif |
113 | } |
114 | |
115 | #if defined(__aarch64__) |
116 | static __forceinline vfloat4 load(const char* ptr) { |
117 | return __m128(_mm_load4epi8_f32(((__m128i*)ptr))); |
118 | } |
119 | #elif defined(__SSE4_1__) |
120 | static __forceinline vfloat4 load(const char* ptr) { |
121 | return _mm_cvtepi32_ps(_mm_cvtepi8_epi32(_mm_loadu_si128((__m128i*)ptr))); |
122 | } |
123 | #else |
124 | static __forceinline vfloat4 load(const char* ptr) { |
125 | return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]); |
126 | } |
127 | #endif |
128 | |
129 | #if defined(__aarch64__) |
130 | static __forceinline vfloat4 load(const unsigned char* ptr) { |
131 | return __m128(_mm_load4epu8_f32(((__m128i*)ptr))); |
132 | } |
133 | #elif defined(__SSE4_1__) |
134 | static __forceinline vfloat4 load(const unsigned char* ptr) { |
135 | return _mm_cvtepi32_ps(_mm_cvtepu8_epi32(_mm_loadu_si128((__m128i*)ptr))); |
136 | } |
137 | #else |
138 | static __forceinline vfloat4 load(const unsigned char* ptr) { |
139 | //return _mm_cvtpu8_ps(*(__m64*)ptr); // don't enable, will use MMX instructions |
140 | return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]); |
141 | } |
142 | #endif |
143 | |
144 | #if defined(__aarch64__) |
145 | static __forceinline vfloat4 load(const short* ptr) { |
146 | return __m128(_mm_load4epi16_f32(((__m128i*)ptr))); |
147 | } |
148 | #elif defined(__SSE4_1__) |
149 | static __forceinline vfloat4 load(const short* ptr) { |
150 | return _mm_cvtepi32_ps(_mm_cvtepi16_epi32(_mm_loadu_si128((__m128i*)ptr))); |
151 | } |
152 | #else |
153 | static __forceinline vfloat4 load(const short* ptr) { |
154 | return vfloat4(ptr[0],ptr[1],ptr[2],ptr[3]); |
155 | } |
156 | #endif |
157 | |
158 | static __forceinline vfloat4 load(const unsigned short* ptr) { |
159 | return _mm_mul_ps(vfloat4(vint4::load(ptr)),vfloat4(1.0f/65535.0f)); |
160 | } |
161 | |
162 | static __forceinline void store_nt(void* ptr, const vfloat4& v) |
163 | { |
164 | #if defined (__SSE4_1__) |
165 | #if defined(__aarch64__) |
166 | _mm_stream_ps((float*)ptr,v); |
167 | #else |
168 | _mm_stream_ps((float*)ptr,v); |
169 | #endif |
170 | #else |
171 | _mm_store_ps((float*)ptr,v); |
172 | #endif |
173 | } |
174 | |
175 | template<int scale = 4> |
176 | static __forceinline vfloat4 gather(const float* ptr, const vint4& index) { |
177 | #if defined(__AVX2__) && !defined(__aarch64__) |
178 | return _mm_i32gather_ps(ptr, index, scale); |
179 | #else |
180 | return vfloat4( |
181 | *(float*)(((char*)ptr)+scale*index[0]), |
182 | *(float*)(((char*)ptr)+scale*index[1]), |
183 | *(float*)(((char*)ptr)+scale*index[2]), |
184 | *(float*)(((char*)ptr)+scale*index[3])); |
185 | #endif |
186 | } |
187 | |
188 | template<int scale = 4> |
189 | static __forceinline vfloat4 gather(const vboolf4& mask, const float* ptr, const vint4& index) { |
190 | vfloat4 r = zero; |
191 | #if defined(__AVX512VL__) |
192 | return _mm_mmask_i32gather_ps(r, mask, index, ptr, scale); |
193 | #elif defined(__AVX2__) && !defined(__aarch64__) |
194 | return _mm_mask_i32gather_ps(r, ptr, index, mask, scale); |
195 | #else |
196 | if (likely(mask[0])) r[0] = *(float*)(((char*)ptr)+scale*index[0]); |
197 | if (likely(mask[1])) r[1] = *(float*)(((char*)ptr)+scale*index[1]); |
198 | if (likely(mask[2])) r[2] = *(float*)(((char*)ptr)+scale*index[2]); |
199 | if (likely(mask[3])) r[3] = *(float*)(((char*)ptr)+scale*index[3]); |
200 | return r; |
201 | #endif |
202 | } |
203 | |
204 | template<int scale = 4> |
205 | static __forceinline void scatter(void* ptr, const vint4& index, const vfloat4& v) |
206 | { |
207 | #if defined(__AVX512VL__) |
208 | _mm_i32scatter_ps((float*)ptr, index, v, scale); |
209 | #else |
210 | *(float*)(((char*)ptr)+scale*index[0]) = v[0]; |
211 | *(float*)(((char*)ptr)+scale*index[1]) = v[1]; |
212 | *(float*)(((char*)ptr)+scale*index[2]) = v[2]; |
213 | *(float*)(((char*)ptr)+scale*index[3]) = v[3]; |
214 | #endif |
215 | } |
216 | |
217 | template<int scale = 4> |
218 | static __forceinline void scatter(const vboolf4& mask, void* ptr, const vint4& index, const vfloat4& v) |
219 | { |
220 | #if defined(__AVX512VL__) |
221 | _mm_mask_i32scatter_ps((float*)ptr ,mask, index, v, scale); |
222 | #else |
223 | if (likely(mask[0])) *(float*)(((char*)ptr)+scale*index[0]) = v[0]; |
224 | if (likely(mask[1])) *(float*)(((char*)ptr)+scale*index[1]) = v[1]; |
225 | if (likely(mask[2])) *(float*)(((char*)ptr)+scale*index[2]) = v[2]; |
226 | if (likely(mask[3])) *(float*)(((char*)ptr)+scale*index[3]) = v[3]; |
227 | #endif |
228 | } |
229 | |
230 | static __forceinline void store(const vboolf4& mask, char* ptr, const vint4& ofs, const vfloat4& v) { |
231 | scatter<1>(mask,ptr,ofs,v); |
232 | } |
233 | static __forceinline void store(const vboolf4& mask, float* ptr, const vint4& ofs, const vfloat4& v) { |
234 | scatter<4>(mask,ptr,ofs,v); |
235 | } |
236 | |
237 | //////////////////////////////////////////////////////////////////////////////// |
238 | /// Array Access |
239 | //////////////////////////////////////////////////////////////////////////////// |
240 | |
241 | __forceinline const float& operator [](size_t index) const { assert(index < 4); return f[index]; } |
242 | __forceinline float& operator [](size_t index) { assert(index < 4); return f[index]; } |
243 | |
244 | friend __forceinline vfloat4 select(const vboolf4& m, const vfloat4& t, const vfloat4& f) { |
245 | #if defined(__AVX512VL__) |
246 | return _mm_mask_blend_ps(m, f, t); |
247 | #elif defined(__SSE4_1__) || (defined(__aarch64__)) |
248 | return _mm_blendv_ps(f, t, m); |
249 | #else |
250 | return _mm_or_ps(_mm_and_ps(m, t), _mm_andnot_ps(m, f)); |
251 | #endif |
252 | } |
253 | }; |
254 | |
255 | //////////////////////////////////////////////////////////////////////////////// |
256 | /// Load/Store |
257 | //////////////////////////////////////////////////////////////////////////////// |
258 | |
259 | template<> struct mem<vfloat4> |
260 | { |
261 | static __forceinline vfloat4 load (const vboolf4& mask, const void* ptr) { return vfloat4::load (mask,ptr); } |
262 | static __forceinline vfloat4 loadu(const vboolf4& mask, const void* ptr) { return vfloat4::loadu(mask,ptr); } |
263 | |
264 | static __forceinline void store (const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::store (mask,ptr,v); } |
265 | static __forceinline void storeu(const vboolf4& mask, void* ptr, const vfloat4& v) { vfloat4::storeu(mask,ptr,v); } |
266 | }; |
267 | |
268 | //////////////////////////////////////////////////////////////////////////////// |
269 | /// Unary Operators |
270 | //////////////////////////////////////////////////////////////////////////////// |
271 | |
272 | __forceinline vfloat4 asFloat(const vint4& a) { return _mm_castsi128_ps(a); } |
273 | __forceinline vint4 asInt (const vfloat4& a) { return _mm_castps_si128(a); } |
274 | __forceinline vuint4 asUInt (const vfloat4& a) { return _mm_castps_si128(a); } |
275 | |
276 | __forceinline vint4 toInt (const vfloat4& a) { return vint4(a); } |
277 | __forceinline vfloat4 toFloat(const vint4& a) { return vfloat4(a); } |
278 | |
279 | __forceinline vfloat4 operator +(const vfloat4& a) { return a; } |
280 | #if defined(__aarch64__) |
281 | __forceinline vfloat4 operator -(const vfloat4& a) { |
282 | return vnegq_f32(a); |
283 | } |
284 | #else |
285 | __forceinline vfloat4 operator -(const vfloat4& a) { return _mm_xor_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x80000000))); } |
286 | #endif |
287 | |
288 | #if defined(__aarch64__) |
289 | __forceinline vfloat4 abs(const vfloat4& a) { return _mm_abs_ps(a); } |
290 | #else |
291 | __forceinline vfloat4 abs(const vfloat4& a) { return _mm_and_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))); } |
292 | #endif |
293 | |
294 | #if defined(__AVX512VL__) |
295 | __forceinline vfloat4 sign(const vfloat4& a) { return _mm_mask_blend_ps(_mm_cmp_ps_mask(a, vfloat4(zero), _CMP_LT_OQ), vfloat4(one), -vfloat4(one)); } |
296 | #else |
297 | __forceinline vfloat4 sign(const vfloat4& a) { return blendv_ps(vfloat4(one), -vfloat4(one), _mm_cmplt_ps(a, vfloat4(zero))); } |
298 | #endif |
299 | |
300 | __forceinline vfloat4 signmsk(const vfloat4& a) { return _mm_and_ps(a,_mm_castsi128_ps(_mm_set1_epi32(0x80000000))); } |
301 | |
302 | __forceinline vfloat4 rcp(const vfloat4& a) |
303 | { |
304 | #if defined(__aarch64__) |
305 | return vfloat4(vdivq_f32(vdupq_n_f32(1.0f),a.v)); |
306 | #else |
307 | |
308 | #if defined(__AVX512VL__) |
309 | const vfloat4 r = _mm_rcp14_ps(a); |
310 | #else |
311 | const vfloat4 r = _mm_rcp_ps(a); |
312 | #endif |
313 | |
314 | #if defined(__AVX2__) |
315 | return _mm_fmadd_ps(r, _mm_fnmadd_ps(a, r, vfloat4(1.0f)), r); // computes r + r * (1 - a * r) |
316 | #else |
317 | return _mm_add_ps(r,_mm_mul_ps(r, _mm_sub_ps(vfloat4(1.0f), _mm_mul_ps(a, r)))); // computes r + r * (1 - a * r) |
318 | #endif |
319 | |
320 | #endif //defined(__aarch64__) |
321 | } |
322 | __forceinline vfloat4 sqr (const vfloat4& a) { return _mm_mul_ps(a,a); } |
323 | __forceinline vfloat4 sqrt(const vfloat4& a) { return _mm_sqrt_ps(a); } |
324 | |
325 | __forceinline vfloat4 rsqrt(const vfloat4& a) |
326 | { |
327 | #if defined(__aarch64__) |
328 | vfloat4 r = _mm_rsqrt_ps(a); |
329 | r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r)); |
330 | r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r)); |
331 | r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(a, r), r)); |
332 | return r; |
333 | #else |
334 | |
335 | #if defined(__AVX512VL__) |
336 | vfloat4 r = _mm_rsqrt14_ps(a); |
337 | #else |
338 | vfloat4 r = _mm_rsqrt_ps(a); |
339 | #endif |
340 | |
341 | #if defined(__AVX2__) |
342 | r = _mm_fmadd_ps(_mm_set1_ps(1.5f), r, _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); |
343 | #else |
344 | r = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(1.5f), r), _mm_mul_ps(_mm_mul_ps(_mm_mul_ps(a, _mm_set1_ps(-0.5f)), r), _mm_mul_ps(r, r))); |
345 | #endif |
346 | |
347 | #endif |
348 | return r; |
349 | } |
350 | |
351 | __forceinline vboolf4 isnan(const vfloat4& a) { |
352 | const vfloat4 b = _mm_and_ps(a, _mm_castsi128_ps(_mm_set1_epi32(0x7fffffff))); |
353 | #if defined(__AVX512VL__) |
354 | return _mm_cmp_epi32_mask(_mm_castps_si128(b), _mm_set1_epi32(0x7f800000), _MM_CMPINT_GT); |
355 | #else |
356 | return _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_castps_si128(b), _mm_set1_epi32(0x7f800000))); |
357 | #endif |
358 | } |
359 | |
360 | //////////////////////////////////////////////////////////////////////////////// |
361 | /// Binary Operators |
362 | //////////////////////////////////////////////////////////////////////////////// |
363 | |
364 | __forceinline vfloat4 operator +(const vfloat4& a, const vfloat4& b) { return _mm_add_ps(a, b); } |
365 | __forceinline vfloat4 operator +(const vfloat4& a, float b) { return a + vfloat4(b); } |
366 | __forceinline vfloat4 operator +(float a, const vfloat4& b) { return vfloat4(a) + b; } |
367 | |
368 | __forceinline vfloat4 operator -(const vfloat4& a, const vfloat4& b) { return _mm_sub_ps(a, b); } |
369 | __forceinline vfloat4 operator -(const vfloat4& a, float b) { return a - vfloat4(b); } |
370 | __forceinline vfloat4 operator -(float a, const vfloat4& b) { return vfloat4(a) - b; } |
371 | |
372 | __forceinline vfloat4 operator *(const vfloat4& a, const vfloat4& b) { return _mm_mul_ps(a, b); } |
373 | __forceinline vfloat4 operator *(const vfloat4& a, float b) { return a * vfloat4(b); } |
374 | __forceinline vfloat4 operator *(float a, const vfloat4& b) { return vfloat4(a) * b; } |
375 | |
376 | __forceinline vfloat4 operator /(const vfloat4& a, const vfloat4& b) { return _mm_div_ps(a,b); } |
377 | __forceinline vfloat4 operator /(const vfloat4& a, float b) { return a/vfloat4(b); } |
378 | __forceinline vfloat4 operator /(float a, const vfloat4& b) { return vfloat4(a)/b; } |
379 | |
380 | __forceinline vfloat4 operator &(const vfloat4& a, const vfloat4& b) { return _mm_and_ps(a,b); } |
381 | __forceinline vfloat4 operator |(const vfloat4& a, const vfloat4& b) { return _mm_or_ps(a,b); } |
382 | __forceinline vfloat4 operator ^(const vfloat4& a, const vfloat4& b) { return _mm_xor_ps(a,b); } |
383 | __forceinline vfloat4 operator ^(const vfloat4& a, const vint4& b) { return _mm_xor_ps(a,_mm_castsi128_ps(b)); } |
384 | |
385 | __forceinline vfloat4 min(const vfloat4& a, const vfloat4& b) { return _mm_min_ps(a,b); } |
386 | __forceinline vfloat4 min(const vfloat4& a, float b) { return _mm_min_ps(a,vfloat4(b)); } |
387 | __forceinline vfloat4 min(float a, const vfloat4& b) { return _mm_min_ps(vfloat4(a),b); } |
388 | |
389 | __forceinline vfloat4 max(const vfloat4& a, const vfloat4& b) { return _mm_max_ps(a,b); } |
390 | __forceinline vfloat4 max(const vfloat4& a, float b) { return _mm_max_ps(a,vfloat4(b)); } |
391 | __forceinline vfloat4 max(float a, const vfloat4& b) { return _mm_max_ps(vfloat4(a),b); } |
392 | |
393 | #if defined(__SSE4_1__) || defined(__aarch64__) |
394 | |
395 | __forceinline vfloat4 mini(const vfloat4& a, const vfloat4& b) { |
396 | const vint4 ai = _mm_castps_si128(a); |
397 | const vint4 bi = _mm_castps_si128(b); |
398 | const vint4 ci = _mm_min_epi32(ai,bi); |
399 | return _mm_castsi128_ps(ci); |
400 | } |
401 | |
402 | __forceinline vfloat4 maxi(const vfloat4& a, const vfloat4& b) { |
403 | const vint4 ai = _mm_castps_si128(a); |
404 | const vint4 bi = _mm_castps_si128(b); |
405 | const vint4 ci = _mm_max_epi32(ai,bi); |
406 | return _mm_castsi128_ps(ci); |
407 | } |
408 | |
409 | __forceinline vfloat4 minui(const vfloat4& a, const vfloat4& b) { |
410 | const vint4 ai = _mm_castps_si128(a); |
411 | const vint4 bi = _mm_castps_si128(b); |
412 | const vint4 ci = _mm_min_epu32(ai,bi); |
413 | return _mm_castsi128_ps(ci); |
414 | } |
415 | |
416 | __forceinline vfloat4 maxui(const vfloat4& a, const vfloat4& b) { |
417 | const vint4 ai = _mm_castps_si128(a); |
418 | const vint4 bi = _mm_castps_si128(b); |
419 | const vint4 ci = _mm_max_epu32(ai,bi); |
420 | return _mm_castsi128_ps(ci); |
421 | } |
422 | #else |
423 | __forceinline vfloat4 mini(const vfloat4& a, const vfloat4& b) { |
424 | return min(a,b); |
425 | } |
426 | |
427 | __forceinline vfloat4 maxi(const vfloat4& a, const vfloat4& b) { |
428 | return max(a,b); |
429 | } |
430 | #endif |
431 | |
432 | //////////////////////////////////////////////////////////////////////////////// |
433 | /// Ternary Operators |
434 | //////////////////////////////////////////////////////////////////////////////// |
435 | |
436 | #if defined(__AVX2__) || defined(__ARM_NEON) |
437 | __forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmadd_ps(a,b,c); } |
438 | __forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fmsub_ps(a,b,c); } |
439 | __forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmadd_ps(a,b,c); } |
440 | __forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return _mm_fnmsub_ps(a,b,c); } |
441 | #else |
442 | __forceinline vfloat4 madd (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b+c; } |
443 | __forceinline vfloat4 nmadd(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b+c;} |
444 | __forceinline vfloat4 nmsub(const vfloat4& a, const vfloat4& b, const vfloat4& c) { return -a*b-c; } |
445 | __forceinline vfloat4 msub (const vfloat4& a, const vfloat4& b, const vfloat4& c) { return a*b-c; } |
446 | |
447 | #endif |
448 | |
449 | //////////////////////////////////////////////////////////////////////////////// |
450 | /// Assignment Operators |
451 | //////////////////////////////////////////////////////////////////////////////// |
452 | |
453 | __forceinline vfloat4& operator +=(vfloat4& a, const vfloat4& b) { return a = a + b; } |
454 | __forceinline vfloat4& operator +=(vfloat4& a, float b) { return a = a + b; } |
455 | |
456 | __forceinline vfloat4& operator -=(vfloat4& a, const vfloat4& b) { return a = a - b; } |
457 | __forceinline vfloat4& operator -=(vfloat4& a, float b) { return a = a - b; } |
458 | |
459 | __forceinline vfloat4& operator *=(vfloat4& a, const vfloat4& b) { return a = a * b; } |
460 | __forceinline vfloat4& operator *=(vfloat4& a, float b) { return a = a * b; } |
461 | |
462 | __forceinline vfloat4& operator /=(vfloat4& a, const vfloat4& b) { return a = a / b; } |
463 | __forceinline vfloat4& operator /=(vfloat4& a, float b) { return a = a / b; } |
464 | |
465 | //////////////////////////////////////////////////////////////////////////////// |
466 | /// Comparison Operators + Select |
467 | //////////////////////////////////////////////////////////////////////////////// |
468 | |
469 | #if defined(__AVX512VL__) |
470 | __forceinline vboolf4 operator ==(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_EQ); } |
471 | __forceinline vboolf4 operator !=(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_NE); } |
472 | __forceinline vboolf4 operator < (const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_LT); } |
473 | __forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_GE); } |
474 | __forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_GT); } |
475 | __forceinline vboolf4 operator <=(const vfloat4& a, const vfloat4& b) { return _mm_cmp_ps_mask(a, b, _MM_CMPINT_LE); } |
476 | #else |
477 | __forceinline vboolf4 operator ==(const vfloat4& a, const vfloat4& b) { return _mm_cmpeq_ps (a, b); } |
478 | __forceinline vboolf4 operator !=(const vfloat4& a, const vfloat4& b) { return _mm_cmpneq_ps(a, b); } |
479 | __forceinline vboolf4 operator < (const vfloat4& a, const vfloat4& b) { return _mm_cmplt_ps (a, b); } |
480 | #if defined(__aarch64__) |
481 | __forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpge_ps (a, b); } |
482 | __forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpgt_ps (a, b); } |
483 | #else |
484 | __forceinline vboolf4 operator >=(const vfloat4& a, const vfloat4& b) { return _mm_cmpnlt_ps(a, b); } |
485 | __forceinline vboolf4 operator > (const vfloat4& a, const vfloat4& b) { return _mm_cmpnle_ps(a, b); } |
486 | #endif |
487 | __forceinline vboolf4 operator <=(const vfloat4& a, const vfloat4& b) { return _mm_cmple_ps (a, b); } |
488 | #endif |
489 | |
490 | __forceinline vboolf4 operator ==(const vfloat4& a, float b) { return a == vfloat4(b); } |
491 | __forceinline vboolf4 operator ==(float a, const vfloat4& b) { return vfloat4(a) == b; } |
492 | |
493 | __forceinline vboolf4 operator !=(const vfloat4& a, float b) { return a != vfloat4(b); } |
494 | __forceinline vboolf4 operator !=(float a, const vfloat4& b) { return vfloat4(a) != b; } |
495 | |
496 | __forceinline vboolf4 operator < (const vfloat4& a, float b) { return a < vfloat4(b); } |
497 | __forceinline vboolf4 operator < (float a, const vfloat4& b) { return vfloat4(a) < b; } |
498 | |
499 | __forceinline vboolf4 operator >=(const vfloat4& a, float b) { return a >= vfloat4(b); } |
500 | __forceinline vboolf4 operator >=(float a, const vfloat4& b) { return vfloat4(a) >= b; } |
501 | |
502 | __forceinline vboolf4 operator > (const vfloat4& a, float b) { return a > vfloat4(b); } |
503 | __forceinline vboolf4 operator > (float a, const vfloat4& b) { return vfloat4(a) > b; } |
504 | |
505 | __forceinline vboolf4 operator <=(const vfloat4& a, float b) { return a <= vfloat4(b); } |
506 | __forceinline vboolf4 operator <=(float a, const vfloat4& b) { return vfloat4(a) <= b; } |
507 | |
508 | __forceinline vboolf4 eq(const vfloat4& a, const vfloat4& b) { return a == b; } |
509 | __forceinline vboolf4 ne(const vfloat4& a, const vfloat4& b) { return a != b; } |
510 | __forceinline vboolf4 lt(const vfloat4& a, const vfloat4& b) { return a < b; } |
511 | __forceinline vboolf4 ge(const vfloat4& a, const vfloat4& b) { return a >= b; } |
512 | __forceinline vboolf4 gt(const vfloat4& a, const vfloat4& b) { return a > b; } |
513 | __forceinline vboolf4 le(const vfloat4& a, const vfloat4& b) { return a <= b; } |
514 | |
515 | #if defined(__AVX512VL__) |
516 | __forceinline vboolf4 eq(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_EQ); } |
517 | __forceinline vboolf4 ne(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_NE); } |
518 | __forceinline vboolf4 lt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_LT); } |
519 | __forceinline vboolf4 ge(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_GE); } |
520 | __forceinline vboolf4 gt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_GT); } |
521 | __forceinline vboolf4 le(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return _mm_mask_cmp_ps_mask(mask, a, b, _MM_CMPINT_LE); } |
522 | #else |
523 | __forceinline vboolf4 eq(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a == b); } |
524 | __forceinline vboolf4 ne(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a != b); } |
525 | __forceinline vboolf4 lt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a < b); } |
526 | __forceinline vboolf4 ge(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a >= b); } |
527 | __forceinline vboolf4 gt(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a > b); } |
528 | __forceinline vboolf4 le(const vboolf4& mask, const vfloat4& a, const vfloat4& b) { return mask & (a <= b); } |
529 | #endif |
530 | |
531 | template<int mask> |
532 | __forceinline vfloat4 select(const vfloat4& t, const vfloat4& f) |
533 | { |
534 | #if defined(__SSE4_1__) |
535 | return _mm_blend_ps(f, t, mask); |
536 | #else |
537 | return select(vboolf4(mask), t, f); |
538 | #endif |
539 | } |
540 | |
541 | __forceinline vfloat4 lerp(const vfloat4& a, const vfloat4& b, const vfloat4& t) { |
542 | return madd(t,b-a,a); |
543 | } |
544 | |
545 | __forceinline bool isvalid(const vfloat4& v) { |
546 | return all((v > vfloat4(-FLT_LARGE)) & (v < vfloat4(+FLT_LARGE))); |
547 | } |
548 | |
549 | __forceinline bool is_finite(const vfloat4& a) { |
550 | return all((a >= vfloat4(-FLT_MAX)) & (a <= vfloat4(+FLT_MAX))); |
551 | } |
552 | |
553 | __forceinline bool is_finite(const vboolf4& valid, const vfloat4& a) { |
554 | return all(valid, (a >= vfloat4(-FLT_MAX)) & (a <= vfloat4(+FLT_MAX))); |
555 | } |
556 | |
557 | //////////////////////////////////////////////////////////////////////////////// |
558 | /// Rounding Functions |
559 | //////////////////////////////////////////////////////////////////////////////// |
560 | |
561 | #if defined(__aarch64__) |
562 | __forceinline vfloat4 floor(const vfloat4& a) { return vrndmq_f32(a.v); } // towards -inf |
563 | __forceinline vfloat4 ceil (const vfloat4& a) { return vrndpq_f32(a.v); } // toward +inf |
564 | __forceinline vfloat4 trunc(const vfloat4& a) { return vrndq_f32(a.v); } // towards 0 |
565 | __forceinline vfloat4 round(const vfloat4& a) { return vrndnq_f32(a.v); } // to nearest, ties to even. NOTE(LTE): arm clang uses vrndnq, old gcc uses vrndqn? |
566 | #elif defined (__SSE4_1__) |
567 | __forceinline vfloat4 floor(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEG_INF ); } |
568 | __forceinline vfloat4 ceil (const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_POS_INF ); } |
569 | __forceinline vfloat4 trunc(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_ZERO ); } |
570 | __forceinline vfloat4 round(const vfloat4& a) { return _mm_round_ps(a, _MM_FROUND_TO_NEAREST_INT); } |
571 | #else |
572 | __forceinline vfloat4 floor(const vfloat4& a) { return vfloat4(floorf(a[0]),floorf(a[1]),floorf(a[2]),floorf(a[3])); } |
573 | __forceinline vfloat4 ceil (const vfloat4& a) { return vfloat4(ceilf (a[0]),ceilf (a[1]),ceilf (a[2]),ceilf (a[3])); } |
574 | __forceinline vfloat4 trunc(const vfloat4& a) { return vfloat4(truncf(a[0]),truncf(a[1]),truncf(a[2]),truncf(a[3])); } |
575 | __forceinline vfloat4 round(const vfloat4& a) { return vfloat4(roundf(a[0]),roundf(a[1]),roundf(a[2]),roundf(a[3])); } |
576 | #endif |
577 | __forceinline vfloat4 frac(const vfloat4& a) { return a-floor(a); } |
578 | |
579 | __forceinline vint4 floori(const vfloat4& a) { |
580 | #if defined(__aarch64__) |
581 | return vcvtq_s32_f32(floor(a)); |
582 | #elif defined(__SSE4_1__) |
583 | return vint4(floor(a)); |
584 | #else |
585 | return vint4(a-vfloat4(0.5f)); |
586 | #endif |
587 | } |
588 | |
589 | //////////////////////////////////////////////////////////////////////////////// |
590 | /// Movement/Shifting/Shuffling Functions |
591 | //////////////////////////////////////////////////////////////////////////////// |
592 | |
593 | __forceinline vfloat4 unpacklo(const vfloat4& a, const vfloat4& b) { return _mm_unpacklo_ps(a, b); } |
594 | __forceinline vfloat4 unpackhi(const vfloat4& a, const vfloat4& b) { return _mm_unpackhi_ps(a, b); } |
595 | |
596 | #if defined(__aarch64__) |
597 | template<int i0, int i1, int i2, int i3> |
598 | __forceinline vfloat4 shuffle(const vfloat4& v) { |
599 | return vreinterpretq_f32_u8(vqtbl1q_u8( (uint8x16_t)v.v, _MN_SHUFFLE(i0, i1, i2, i3))); |
600 | } |
601 | template<int i0, int i1, int i2, int i3> |
602 | __forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) { |
603 | return vreinterpretq_f32_u8(vqtbl2q_u8( (uint8x16x2_t){(uint8x16_t)a.v, (uint8x16_t)b.v}, _MF_SHUFFLE(i0, i1, i2, i3))); |
604 | } |
605 | #else |
606 | template<int i0, int i1, int i2, int i3> |
607 | __forceinline vfloat4 shuffle(const vfloat4& v) { |
608 | return _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(v), _MM_SHUFFLE(i3, i2, i1, i0))); |
609 | } |
610 | |
611 | template<int i0, int i1, int i2, int i3> |
612 | __forceinline vfloat4 shuffle(const vfloat4& a, const vfloat4& b) { |
613 | return _mm_shuffle_ps(a, b, _MM_SHUFFLE(i3, i2, i1, i0)); |
614 | } |
615 | #endif |
616 | |
617 | #if defined(__SSE3__) && !defined(__aarch64__) |
618 | template<> __forceinline vfloat4 shuffle<0, 0, 2, 2>(const vfloat4& v) { return _mm_moveldup_ps(v); } |
619 | template<> __forceinline vfloat4 shuffle<1, 1, 3, 3>(const vfloat4& v) { return _mm_movehdup_ps(v); } |
620 | template<> __forceinline vfloat4 shuffle<0, 1, 0, 1>(const vfloat4& v) { return _mm_castpd_ps(_mm_movedup_pd(_mm_castps_pd(v))); } |
621 | #endif |
622 | |
623 | template<int i> |
624 | __forceinline vfloat4 shuffle(const vfloat4& v) { |
625 | return shuffle<i,i,i,i>(v); |
626 | } |
627 | |
628 | #if defined(__aarch64__) |
629 | template<int i> __forceinline float extract(const vfloat4& a) { return a[i]; } |
630 | #else |
631 | template<int i> __forceinline float (const vfloat4& a) { return _mm_cvtss_f32(shuffle<i>(a)); } |
632 | template<> __forceinline float <0>(const vfloat4& a) { return _mm_cvtss_f32(a); } |
633 | #endif |
634 | |
635 | #if defined (__SSE4_1__) && !defined(__aarch64__) |
636 | template<int dst, int src, int clr> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return _mm_insert_ps(a, b, (dst << 4) | (src << 6) | clr); } |
637 | template<int dst, int src> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { return insert<dst, src, 0>(a, b); } |
638 | template<int dst> __forceinline vfloat4 insert(const vfloat4& a, const float b) { return insert<dst, 0>(a, _mm_set_ss(b)); } |
639 | #else |
640 | template<int dst, int src> __forceinline vfloat4 insert(const vfloat4& a, const vfloat4& b) { vfloat4 c = a; c[dst&3] = b[src&3]; return c; } |
641 | template<int dst> __forceinline vfloat4 insert(const vfloat4& a, float b) { vfloat4 c = a; c[dst&3] = b; return c; } |
642 | #endif |
643 | |
644 | __forceinline float toScalar(const vfloat4& v) { return _mm_cvtss_f32(v); } |
645 | |
646 | __forceinline vfloat4 shift_right_1(const vfloat4& x) { |
647 | return _mm_castsi128_ps(_mm_srli_si128(_mm_castps_si128(x), 4)); |
648 | } |
649 | |
650 | #if defined (__AVX2__) |
651 | __forceinline vfloat4 permute(const vfloat4 &a, const __m128i &index) { |
652 | return _mm_permutevar_ps(a,index); |
653 | } |
654 | |
655 | __forceinline vfloat4 broadcast1f(const void* a) { return _mm_broadcast_ss((float*)a); } |
656 | |
657 | #endif |
658 | |
659 | #if defined(__AVX512VL__) |
660 | template<int i> |
661 | __forceinline vfloat4 align_shift_right(const vfloat4& a, const vfloat4& b) { |
662 | return _mm_castsi128_ps(_mm_alignr_epi32(_mm_castps_si128(a), _mm_castps_si128(b), i)); |
663 | } |
664 | #endif |
665 | |
666 | |
667 | //////////////////////////////////////////////////////////////////////////////// |
668 | /// Sorting Network |
669 | //////////////////////////////////////////////////////////////////////////////// |
670 | |
671 | __forceinline vfloat4 sort_ascending(const vfloat4& v) |
672 | { |
673 | const vfloat4 a0 = v; |
674 | const vfloat4 b0 = shuffle<1,0,3,2>(a0); |
675 | const vfloat4 c0 = min(a0,b0); |
676 | const vfloat4 d0 = max(a0,b0); |
677 | const vfloat4 a1 = select<0x5 /* 0b0101 */>(c0,d0); |
678 | const vfloat4 b1 = shuffle<2,3,0,1>(a1); |
679 | const vfloat4 c1 = min(a1,b1); |
680 | const vfloat4 d1 = max(a1,b1); |
681 | const vfloat4 a2 = select<0x3 /* 0b0011 */>(c1,d1); |
682 | const vfloat4 b2 = shuffle<0,2,1,3>(a2); |
683 | const vfloat4 c2 = min(a2,b2); |
684 | const vfloat4 d2 = max(a2,b2); |
685 | const vfloat4 a3 = select<0x2 /* 0b0010 */>(c2,d2); |
686 | return a3; |
687 | } |
688 | |
689 | __forceinline vfloat4 sort_descending(const vfloat4& v) |
690 | { |
691 | const vfloat4 a0 = v; |
692 | const vfloat4 b0 = shuffle<1,0,3,2>(a0); |
693 | const vfloat4 c0 = max(a0,b0); |
694 | const vfloat4 d0 = min(a0,b0); |
695 | const vfloat4 a1 = select<0x5 /* 0b0101 */>(c0,d0); |
696 | const vfloat4 b1 = shuffle<2,3,0,1>(a1); |
697 | const vfloat4 c1 = max(a1,b1); |
698 | const vfloat4 d1 = min(a1,b1); |
699 | const vfloat4 a2 = select<0x3 /* 0b0011 */>(c1,d1); |
700 | const vfloat4 b2 = shuffle<0,2,1,3>(a2); |
701 | const vfloat4 c2 = max(a2,b2); |
702 | const vfloat4 d2 = min(a2,b2); |
703 | const vfloat4 a3 = select<0x2 /* 0b0010 */>(c2,d2); |
704 | return a3; |
705 | } |
706 | |
707 | //////////////////////////////////////////////////////////////////////////////// |
708 | /// Transpose |
709 | //////////////////////////////////////////////////////////////////////////////// |
710 | |
711 | __forceinline void transpose(const vfloat4& r0, const vfloat4& r1, const vfloat4& r2, const vfloat4& r3, vfloat4& c0, vfloat4& c1, vfloat4& c2, vfloat4& c3) |
712 | { |
713 | vfloat4 l02 = unpacklo(r0,r2); |
714 | vfloat4 h02 = unpackhi(r0,r2); |
715 | vfloat4 l13 = unpacklo(r1,r3); |
716 | vfloat4 h13 = unpackhi(r1,r3); |
717 | c0 = unpacklo(l02,l13); |
718 | c1 = unpackhi(l02,l13); |
719 | c2 = unpacklo(h02,h13); |
720 | c3 = unpackhi(h02,h13); |
721 | } |
722 | |
723 | __forceinline void transpose(const vfloat4& r0, const vfloat4& r1, const vfloat4& r2, const vfloat4& r3, vfloat4& c0, vfloat4& c1, vfloat4& c2) |
724 | { |
725 | vfloat4 l02 = unpacklo(r0,r2); |
726 | vfloat4 h02 = unpackhi(r0,r2); |
727 | vfloat4 l13 = unpacklo(r1,r3); |
728 | vfloat4 h13 = unpackhi(r1,r3); |
729 | c0 = unpacklo(l02,l13); |
730 | c1 = unpackhi(l02,l13); |
731 | c2 = unpacklo(h02,h13); |
732 | } |
733 | |
734 | //////////////////////////////////////////////////////////////////////////////// |
735 | /// Reductions |
736 | //////////////////////////////////////////////////////////////////////////////// |
737 | #if defined(__aarch64__) |
738 | __forceinline vfloat4 vreduce_min(const vfloat4& v) { float h = vminvq_f32(v); return vdupq_n_f32(h); } |
739 | __forceinline vfloat4 vreduce_max(const vfloat4& v) { float h = vmaxvq_f32(v); return vdupq_n_f32(h); } |
740 | __forceinline vfloat4 vreduce_add(const vfloat4& v) { float h = vaddvq_f32(v); return vdupq_n_f32(h); } |
741 | #else |
742 | __forceinline vfloat4 vreduce_min(const vfloat4& v) { vfloat4 h = min(shuffle<1,0,3,2>(v),v); return min(shuffle<2,3,0,1>(h),h); } |
743 | __forceinline vfloat4 vreduce_max(const vfloat4& v) { vfloat4 h = max(shuffle<1,0,3,2>(v),v); return max(shuffle<2,3,0,1>(h),h); } |
744 | __forceinline vfloat4 vreduce_add(const vfloat4& v) { vfloat4 h = shuffle<1,0,3,2>(v) + v ; return shuffle<2,3,0,1>(h) + h ; } |
745 | #endif |
746 | |
747 | #if defined(__aarch64__) |
748 | __forceinline float reduce_min(const vfloat4& v) { return vminvq_f32(v); } |
749 | __forceinline float reduce_max(const vfloat4& v) { return vmaxvq_f32(v); } |
750 | __forceinline float reduce_add(const vfloat4& v) { return vaddvq_f32(v); } |
751 | #else |
752 | __forceinline float reduce_min(const vfloat4& v) { return _mm_cvtss_f32(vreduce_min(v)); } |
753 | __forceinline float reduce_max(const vfloat4& v) { return _mm_cvtss_f32(vreduce_max(v)); } |
754 | __forceinline float reduce_add(const vfloat4& v) { return _mm_cvtss_f32(vreduce_add(v)); } |
755 | #endif |
756 | |
757 | __forceinline size_t select_min(const vboolf4& valid, const vfloat4& v) |
758 | { |
759 | const vfloat4 a = select(valid,v,vfloat4(pos_inf)); |
760 | const vbool4 valid_min = valid & (a == vreduce_min(a)); |
761 | return bsf(movemask(any(valid_min) ? valid_min : valid)); |
762 | } |
763 | __forceinline size_t select_max(const vboolf4& valid, const vfloat4& v) |
764 | { |
765 | const vfloat4 a = select(valid,v,vfloat4(neg_inf)); |
766 | const vbool4 valid_max = valid & (a == vreduce_max(a)); |
767 | return bsf(movemask(any(valid_max) ? valid_max : valid)); |
768 | } |
769 | |
770 | //////////////////////////////////////////////////////////////////////////////// |
771 | /// Euclidean Space Operators |
772 | //////////////////////////////////////////////////////////////////////////////// |
773 | |
774 | __forceinline float dot(const vfloat4& a, const vfloat4& b) { |
775 | return reduce_add(a*b); |
776 | } |
777 | |
778 | __forceinline vfloat4 cross(const vfloat4& a, const vfloat4& b) |
779 | { |
780 | const vfloat4 a0 = a; |
781 | const vfloat4 b0 = shuffle<1,2,0,3>(b); |
782 | const vfloat4 a1 = shuffle<1,2,0,3>(a); |
783 | const vfloat4 b1 = b; |
784 | return shuffle<1,2,0,3>(msub(a0,b0,a1*b1)); |
785 | } |
786 | |
787 | //////////////////////////////////////////////////////////////////////////////// |
788 | /// Output Operators |
789 | //////////////////////////////////////////////////////////////////////////////// |
790 | |
791 | __forceinline embree_ostream operator <<(embree_ostream cout, const vfloat4& a) { |
792 | return cout << "<" << a[0] << ", " << a[1] << ", " << a[2] << ", " << a[3] << ">" ; |
793 | } |
794 | |
795 | } |
796 | |
797 | #undef vboolf |
798 | #undef vboold |
799 | #undef vint |
800 | #undef vuint |
801 | #undef vllong |
802 | #undef vfloat |
803 | #undef vdouble |
804 | |