| 1 | // Copyright 2016 The SwiftShader Authors. All Rights Reserved. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | #include "SamplerCore.hpp" |
| 16 | |
| 17 | #include "PixelRoutine.hpp" |
| 18 | #include "Constants.hpp" |
| 19 | #include "Vulkan/VkSampler.hpp" |
| 20 | #include "Vulkan/VkDebug.hpp" |
| 21 | |
| 22 | #include <limits> |
| 23 | |
| 24 | namespace |
| 25 | { |
| 26 | void applySwizzle(VkComponentSwizzle swizzle, sw::Float4& f, const sw::Vector4f& c, bool integer) |
| 27 | { |
| 28 | switch(swizzle) |
| 29 | { |
| 30 | case VK_COMPONENT_SWIZZLE_R: f = c.x; break; |
| 31 | case VK_COMPONENT_SWIZZLE_G: f = c.y; break; |
| 32 | case VK_COMPONENT_SWIZZLE_B: f = c.z; break; |
| 33 | case VK_COMPONENT_SWIZZLE_A: f = c.w; break; |
| 34 | case VK_COMPONENT_SWIZZLE_ZERO: f = sw::Float4(0.0f, 0.0f, 0.0f, 0.0f); break; |
| 35 | case VK_COMPONENT_SWIZZLE_ONE: |
| 36 | if (integer) |
| 37 | { |
| 38 | f = rr::As<sw::Float4>(sw::Int4(1, 1, 1, 1)); |
| 39 | } |
| 40 | else |
| 41 | { |
| 42 | f = sw::Float4(1.0f, 1.0f, 1.0f, 1.0f); |
| 43 | } |
| 44 | break; |
| 45 | default: ASSERT(false); |
| 46 | } |
| 47 | } |
| 48 | |
| 49 | template <typename T> |
| 50 | void applyQuadLayout(T& x, T& y) |
| 51 | { |
| 52 | x = (((y & T(1)) + x) << 1) - (x & T(1)); |
| 53 | y &= T(~1); |
| 54 | } |
| 55 | } |
| 56 | |
| 57 | namespace sw |
| 58 | { |
| 59 | SamplerCore::SamplerCore(Pointer<Byte> &constants, const Sampler &state) : constants(constants), state(state) |
| 60 | { |
| 61 | } |
| 62 | |
| 63 | Vector4f SamplerCore::sampleTexture(Pointer<Byte> &texture, Pointer<Byte> &sampler, Float4 &u, Float4 &v, Float4 &w, Float4 &q, Float &&lodOrBias, Float4 &dsx, Float4 &dsy, Vector4f &offset, SamplerFunction function) |
| 64 | { |
| 65 | Vector4f c; |
| 66 | |
| 67 | Float4 uuuu = u; |
| 68 | Float4 vvvv = v; |
| 69 | Float4 wwww = w; |
| 70 | Float4 qqqq = q; |
| 71 | |
| 72 | Float lod; |
| 73 | Float anisotropy; |
| 74 | Float4 uDelta; |
| 75 | Float4 vDelta; |
| 76 | Float4 M; // Major axis |
| 77 | |
| 78 | if(state.textureType == VK_IMAGE_VIEW_TYPE_CUBE) |
| 79 | { |
| 80 | Int4 face = cubeFace(uuuu, vvvv, u, v, w, M); |
| 81 | wwww = As<Float4>(face); |
| 82 | } |
| 83 | |
| 84 | if(function == Implicit || function == Bias || function == Grad || function == Query) |
| 85 | { |
| 86 | if(state.textureType != VK_IMAGE_VIEW_TYPE_3D) |
| 87 | { |
| 88 | if(state.textureType != VK_IMAGE_VIEW_TYPE_CUBE) |
| 89 | { |
| 90 | computeLod(texture, sampler, lod, anisotropy, uDelta, vDelta, uuuu, vvvv, dsx, dsy, function); |
| 91 | } |
| 92 | else |
| 93 | { |
| 94 | computeLodCube(texture, sampler, lod, u, v, w, dsx, dsy, M, function); |
| 95 | } |
| 96 | } |
| 97 | else |
| 98 | { |
| 99 | computeLod3D(texture, sampler, lod, uuuu, vvvv, wwww, dsx, dsy, function); |
| 100 | } |
| 101 | |
| 102 | Float bias = *Pointer<Float>(sampler + OFFSET(vk::Sampler, mipLodBias)); |
| 103 | |
| 104 | if(function == Bias) |
| 105 | { |
| 106 | // Add SPIR-V Bias operand to the sampler provided bias and clamp to maxSamplerLodBias limit. |
| 107 | bias = Min(Max(bias + lodOrBias, -vk::MAX_SAMPLER_LOD_BIAS), vk::MAX_SAMPLER_LOD_BIAS); |
| 108 | } |
| 109 | |
| 110 | lod += bias; |
| 111 | } |
| 112 | else if(function == Lod) |
| 113 | { |
| 114 | // Vulkan 1.1: "The absolute value of mipLodBias must be less than or equal to VkPhysicalDeviceLimits::maxSamplerLodBias" |
| 115 | // Hence no explicit clamping to maxSamplerLodBias is required in this case. |
| 116 | lod = lodOrBias + *Pointer<Float>(sampler + OFFSET(vk::Sampler, mipLodBias)); |
| 117 | } |
| 118 | else if(function == Fetch) |
| 119 | { |
| 120 | // TODO: Eliminate int-float-int conversion. |
| 121 | lod = Float(As<Int>(lodOrBias)); |
| 122 | } |
| 123 | else if(function == Base || function == Gather) |
| 124 | { |
| 125 | lod = Float(0); |
| 126 | } |
| 127 | else UNREACHABLE("Sampler function %d" , int(function)); |
| 128 | |
| 129 | if(function != Base && function != Fetch && function != Gather) |
| 130 | { |
| 131 | if(function == Query) |
| 132 | { |
| 133 | c.y = Float4(lod); // Unclamped LOD. |
| 134 | } |
| 135 | |
| 136 | lod = Max(lod, *Pointer<Float>(sampler + OFFSET(vk::Sampler, minLod))); |
| 137 | lod = Min(lod, *Pointer<Float>(sampler + OFFSET(vk::Sampler, maxLod))); |
| 138 | |
| 139 | if(function == Query) |
| 140 | { |
| 141 | if(state.mipmapFilter == MIPMAP_POINT) |
| 142 | { |
| 143 | lod = Round(lod); // TODO: Preferred formula is ceil(lod + 0.5) - 1 |
| 144 | } |
| 145 | |
| 146 | c.x = lod; |
| 147 | // c.y contains unclamped LOD. |
| 148 | |
| 149 | return c; |
| 150 | } |
| 151 | } |
| 152 | |
| 153 | bool force32BitFiltering = state.highPrecisionFiltering && !isYcbcrFormat() && (state.textureFilter != FILTER_POINT); |
| 154 | bool seamlessCube = (state.addressingModeU == ADDRESSING_SEAMLESS); |
| 155 | bool use32BitFiltering = hasFloatTexture() || hasUnnormalizedIntegerTexture() || force32BitFiltering || |
| 156 | seamlessCube || state.unnormalizedCoordinates || state.compareEnable || state.largeTexture || |
| 157 | borderModeActive() || (function == Gather); |
| 158 | |
| 159 | if(use32BitFiltering) |
| 160 | { |
| 161 | c = sampleFloatFilter(texture, uuuu, vvvv, wwww, qqqq, offset, lod, anisotropy, uDelta, vDelta, function); |
| 162 | |
| 163 | if (!hasFloatTexture() && !hasUnnormalizedIntegerTexture() && !state.compareEnable) |
| 164 | { |
| 165 | switch (state.textureFormat) |
| 166 | { |
| 167 | case VK_FORMAT_R5G6B5_UNORM_PACK16: |
| 168 | c.x *= Float4(1.0f / 0xF800); |
| 169 | c.y *= Float4(1.0f / 0xFC00); |
| 170 | c.z *= Float4(1.0f / 0xF800); |
| 171 | break; |
| 172 | case VK_FORMAT_B4G4R4A4_UNORM_PACK16: |
| 173 | c.x *= Float4(1.0f / 0xF000); |
| 174 | c.y *= Float4(1.0f / 0xF000); |
| 175 | c.z *= Float4(1.0f / 0xF000); |
| 176 | c.w *= Float4(1.0f / 0xF000); |
| 177 | break; |
| 178 | case VK_FORMAT_A1R5G5B5_UNORM_PACK16: |
| 179 | c.x *= Float4(1.0f / 0xF800); |
| 180 | c.y *= Float4(1.0f / 0xF800); |
| 181 | c.z *= Float4(1.0f / 0xF800); |
| 182 | c.w *= Float4(1.0f / 0x8000); |
| 183 | break; |
| 184 | case VK_FORMAT_R8_SNORM: |
| 185 | case VK_FORMAT_R8G8_SNORM: |
| 186 | case VK_FORMAT_R8G8B8A8_SNORM: |
| 187 | case VK_FORMAT_A8B8G8R8_SNORM_PACK32: |
| 188 | c.x *= Float4(1.0f / 0x7F00); |
| 189 | c.y *= Float4(1.0f / 0x7F00); |
| 190 | c.z *= Float4(1.0f / 0x7F00); |
| 191 | c.w *= Float4(1.0f / 0x7F00); |
| 192 | break; |
| 193 | case VK_FORMAT_R8_UNORM: |
| 194 | case VK_FORMAT_R8G8_UNORM: |
| 195 | case VK_FORMAT_R8G8B8A8_UNORM: |
| 196 | case VK_FORMAT_B8G8R8A8_UNORM: |
| 197 | case VK_FORMAT_A8B8G8R8_UNORM_PACK32: |
| 198 | case VK_FORMAT_B8G8R8A8_SRGB: |
| 199 | case VK_FORMAT_R8G8B8A8_SRGB: |
| 200 | case VK_FORMAT_R8_SRGB: |
| 201 | case VK_FORMAT_R8G8_SRGB: |
| 202 | c.x *= Float4(1.0f / 0xFF00u); |
| 203 | c.y *= Float4(1.0f / 0xFF00u); |
| 204 | c.z *= Float4(1.0f / 0xFF00u); |
| 205 | c.w *= Float4(1.0f / 0xFF00u); |
| 206 | break; |
| 207 | default: |
| 208 | for (int component = 0; component < textureComponentCount(); component++) |
| 209 | { |
| 210 | c[component] *= Float4(hasUnsignedTextureComponent(component) ? 1.0f / 0xFFFF : 1.0f / 0x7FFF); |
| 211 | } |
| 212 | } |
| 213 | } |
| 214 | } |
| 215 | else // 16-bit filtering. |
| 216 | { |
| 217 | Vector4s cs = sampleFilter(texture, uuuu, vvvv, wwww, offset, lod, anisotropy, uDelta, vDelta, function); |
| 218 | |
| 219 | switch (state.textureFormat) |
| 220 | { |
| 221 | case VK_FORMAT_R5G6B5_UNORM_PACK16: |
| 222 | c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF800); |
| 223 | c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xFC00); |
| 224 | c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xF800); |
| 225 | break; |
| 226 | case VK_FORMAT_B4G4R4A4_UNORM_PACK16: |
| 227 | c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF000); |
| 228 | c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xF000); |
| 229 | c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xF000); |
| 230 | c.w = Float4(As<UShort4>(cs.w)) * Float4(1.0f / 0xF000); |
| 231 | break; |
| 232 | case VK_FORMAT_A1R5G5B5_UNORM_PACK16: |
| 233 | c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xF800); |
| 234 | c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xF800); |
| 235 | c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xF800); |
| 236 | c.w = Float4(As<UShort4>(cs.w)) * Float4(1.0f / 0x8000); |
| 237 | break; |
| 238 | case VK_FORMAT_R8_SNORM: |
| 239 | case VK_FORMAT_R8G8_SNORM: |
| 240 | case VK_FORMAT_R8G8B8A8_SNORM: |
| 241 | case VK_FORMAT_A8B8G8R8_SNORM_PACK32: |
| 242 | c.x = Float4(cs.x) * Float4(1.0f / 0x7F00); |
| 243 | c.y = Float4(cs.y) * Float4(1.0f / 0x7F00); |
| 244 | c.z = Float4(cs.z) * Float4(1.0f / 0x7F00); |
| 245 | c.w = Float4(cs.w) * Float4(1.0f / 0x7F00); |
| 246 | break; |
| 247 | case VK_FORMAT_R8_UNORM: |
| 248 | case VK_FORMAT_R8G8_UNORM: |
| 249 | case VK_FORMAT_R8G8B8A8_UNORM: |
| 250 | case VK_FORMAT_B8G8R8A8_UNORM: |
| 251 | case VK_FORMAT_A8B8G8R8_UNORM_PACK32: |
| 252 | case VK_FORMAT_B8G8R8A8_SRGB: |
| 253 | case VK_FORMAT_R8G8B8A8_SRGB: |
| 254 | case VK_FORMAT_R8_SRGB: |
| 255 | case VK_FORMAT_R8G8_SRGB: |
| 256 | c.x = Float4(As<UShort4>(cs.x)) * Float4(1.0f / 0xFF00u); |
| 257 | c.y = Float4(As<UShort4>(cs.y)) * Float4(1.0f / 0xFF00u); |
| 258 | c.z = Float4(As<UShort4>(cs.z)) * Float4(1.0f / 0xFF00u); |
| 259 | c.w = Float4(As<UShort4>(cs.w)) * Float4(1.0f / 0xFF00u); |
| 260 | break; |
| 261 | default: |
| 262 | for(int component = 0; component < textureComponentCount(); component++) |
| 263 | { |
| 264 | if(hasUnsignedTextureComponent(component)) |
| 265 | { |
| 266 | convertUnsigned16(c[component], cs[component]); |
| 267 | } |
| 268 | else |
| 269 | { |
| 270 | convertSigned15(c[component], cs[component]); |
| 271 | } |
| 272 | } |
| 273 | } |
| 274 | } |
| 275 | |
| 276 | if(state.textureFilter != FILTER_GATHER) |
| 277 | { |
| 278 | if((state.swizzle.r != VK_COMPONENT_SWIZZLE_R) || |
| 279 | (state.swizzle.g != VK_COMPONENT_SWIZZLE_G) || |
| 280 | (state.swizzle.b != VK_COMPONENT_SWIZZLE_B) || |
| 281 | (state.swizzle.a != VK_COMPONENT_SWIZZLE_A)) |
| 282 | { |
| 283 | const Vector4f col(c); |
| 284 | bool integer = hasUnnormalizedIntegerTexture(); |
| 285 | applySwizzle(state.swizzle.r, c.x, col, integer); |
| 286 | applySwizzle(state.swizzle.g, c.y, col, integer); |
| 287 | applySwizzle(state.swizzle.b, c.z, col, integer); |
| 288 | applySwizzle(state.swizzle.a, c.w, col, integer); |
| 289 | } |
| 290 | } |
| 291 | else // Gather |
| 292 | { |
| 293 | VkComponentSwizzle swizzle = gatherSwizzle(); |
| 294 | |
| 295 | // R/G/B/A swizzles affect the component collected from each texel earlier. |
| 296 | // Handle the ZERO and ONE cases here because we don't need to know the format. |
| 297 | |
| 298 | if(swizzle == VK_COMPONENT_SWIZZLE_ZERO) |
| 299 | { |
| 300 | c.x = c.y = c.z = c.w = Float4(0); |
| 301 | } |
| 302 | else if(swizzle == VK_COMPONENT_SWIZZLE_ONE) |
| 303 | { |
| 304 | bool integer = hasUnnormalizedIntegerTexture(); |
| 305 | c.x = c.y = c.z = c.w = integer ? As<Float4>(Int4(1)) : RValue<Float4>(Float4(1.0f)); |
| 306 | } |
| 307 | } |
| 308 | |
| 309 | return c; |
| 310 | } |
| 311 | |
| 312 | Short4 SamplerCore::offsetSample(Short4 &uvw, Pointer<Byte> &mipmap, int halfOffset, bool wrap, int count, Float &lod) |
| 313 | { |
| 314 | Short4 offset = *Pointer<Short4>(mipmap + halfOffset); |
| 315 | |
| 316 | if(state.textureFilter == FILTER_MIN_LINEAR_MAG_POINT) |
| 317 | { |
| 318 | offset &= Short4(CmpNLE(Float4(lod), Float4(0.0f))); |
| 319 | } |
| 320 | else if(state.textureFilter == FILTER_MIN_POINT_MAG_LINEAR) |
| 321 | { |
| 322 | offset &= Short4(CmpLE(Float4(lod), Float4(0.0f))); |
| 323 | } |
| 324 | |
| 325 | if(wrap) |
| 326 | { |
| 327 | switch(count) |
| 328 | { |
| 329 | case -1: return uvw - offset; |
| 330 | case 0: return uvw; |
| 331 | case +1: return uvw + offset; |
| 332 | case 2: return uvw + offset + offset; |
| 333 | } |
| 334 | } |
| 335 | else // Clamp or mirror |
| 336 | { |
| 337 | switch(count) |
| 338 | { |
| 339 | case -1: return SubSat(As<UShort4>(uvw), As<UShort4>(offset)); |
| 340 | case 0: return uvw; |
| 341 | case +1: return AddSat(As<UShort4>(uvw), As<UShort4>(offset)); |
| 342 | case 2: return AddSat(AddSat(As<UShort4>(uvw), As<UShort4>(offset)), As<UShort4>(offset)); |
| 343 | } |
| 344 | } |
| 345 | |
| 346 | return uvw; |
| 347 | } |
| 348 | |
| 349 | Vector4s SamplerCore::sampleFilter(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Vector4f &offset, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta, SamplerFunction function) |
| 350 | { |
| 351 | Vector4s c = sampleAniso(texture, u, v, w, offset, lod, anisotropy, uDelta, vDelta, false, function); |
| 352 | |
| 353 | if(function == Fetch) |
| 354 | { |
| 355 | return c; |
| 356 | } |
| 357 | |
| 358 | if(state.mipmapFilter == MIPMAP_LINEAR) |
| 359 | { |
| 360 | Vector4s cc = sampleAniso(texture, u, v, w, offset, lod, anisotropy, uDelta, vDelta, true, function); |
| 361 | |
| 362 | lod *= Float(1 << 16); |
| 363 | |
| 364 | UShort4 utri = UShort4(Float4(lod)); // FIXME: Optimize |
| 365 | Short4 stri = utri >> 1; // FIXME: Optimize |
| 366 | |
| 367 | if(hasUnsignedTextureComponent(0)) cc.x = MulHigh(As<UShort4>(cc.x), utri); else cc.x = MulHigh(cc.x, stri); |
| 368 | if(hasUnsignedTextureComponent(1)) cc.y = MulHigh(As<UShort4>(cc.y), utri); else cc.y = MulHigh(cc.y, stri); |
| 369 | if(hasUnsignedTextureComponent(2)) cc.z = MulHigh(As<UShort4>(cc.z), utri); else cc.z = MulHigh(cc.z, stri); |
| 370 | if(hasUnsignedTextureComponent(3)) cc.w = MulHigh(As<UShort4>(cc.w), utri); else cc.w = MulHigh(cc.w, stri); |
| 371 | |
| 372 | utri = ~utri; |
| 373 | stri = Short4(0x7FFF) - stri; |
| 374 | |
| 375 | if(hasUnsignedTextureComponent(0)) c.x = MulHigh(As<UShort4>(c.x), utri); else c.x = MulHigh(c.x, stri); |
| 376 | if(hasUnsignedTextureComponent(1)) c.y = MulHigh(As<UShort4>(c.y), utri); else c.y = MulHigh(c.y, stri); |
| 377 | if(hasUnsignedTextureComponent(2)) c.z = MulHigh(As<UShort4>(c.z), utri); else c.z = MulHigh(c.z, stri); |
| 378 | if(hasUnsignedTextureComponent(3)) c.w = MulHigh(As<UShort4>(c.w), utri); else c.w = MulHigh(c.w, stri); |
| 379 | |
| 380 | c.x += cc.x; |
| 381 | c.y += cc.y; |
| 382 | c.z += cc.z; |
| 383 | c.w += cc.w; |
| 384 | |
| 385 | if(!hasUnsignedTextureComponent(0)) c.x += c.x; |
| 386 | if(!hasUnsignedTextureComponent(1)) c.y += c.y; |
| 387 | if(!hasUnsignedTextureComponent(2)) c.z += c.z; |
| 388 | if(!hasUnsignedTextureComponent(3)) c.w += c.w; |
| 389 | } |
| 390 | |
| 391 | return c; |
| 392 | } |
| 393 | |
| 394 | Vector4s SamplerCore::sampleAniso(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Vector4f &offset, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta, bool secondLOD, SamplerFunction function) |
| 395 | { |
| 396 | Vector4s c; |
| 397 | |
| 398 | if(state.textureFilter != FILTER_ANISOTROPIC || function == Lod || function == Fetch) |
| 399 | { |
| 400 | c = sampleQuad(texture, u, v, w, offset, lod, secondLOD, function); |
| 401 | } |
| 402 | else |
| 403 | { |
| 404 | Int a = RoundInt(anisotropy); |
| 405 | |
| 406 | Vector4s cSum; |
| 407 | |
| 408 | cSum.x = Short4(0); |
| 409 | cSum.y = Short4(0); |
| 410 | cSum.z = Short4(0); |
| 411 | cSum.w = Short4(0); |
| 412 | |
| 413 | Float4 A = *Pointer<Float4>(constants + OFFSET(Constants,uvWeight) + 16 * a); |
| 414 | Float4 B = *Pointer<Float4>(constants + OFFSET(Constants,uvStart) + 16 * a); |
| 415 | UShort4 cw = *Pointer<UShort4>(constants + OFFSET(Constants,cWeight) + 8 * a); |
| 416 | Short4 sw = Short4(cw >> 1); |
| 417 | |
| 418 | Float4 du = uDelta; |
| 419 | Float4 dv = vDelta; |
| 420 | |
| 421 | Float4 u0 = u + B * du; |
| 422 | Float4 v0 = v + B * dv; |
| 423 | |
| 424 | du *= A; |
| 425 | dv *= A; |
| 426 | |
| 427 | Int i = 0; |
| 428 | |
| 429 | Do |
| 430 | { |
| 431 | c = sampleQuad(texture, u0, v0, w, offset, lod, secondLOD, function); |
| 432 | |
| 433 | u0 += du; |
| 434 | v0 += dv; |
| 435 | |
| 436 | if(hasUnsignedTextureComponent(0)) cSum.x += As<Short4>(MulHigh(As<UShort4>(c.x), cw)); else cSum.x += MulHigh(c.x, sw); |
| 437 | if(hasUnsignedTextureComponent(1)) cSum.y += As<Short4>(MulHigh(As<UShort4>(c.y), cw)); else cSum.y += MulHigh(c.y, sw); |
| 438 | if(hasUnsignedTextureComponent(2)) cSum.z += As<Short4>(MulHigh(As<UShort4>(c.z), cw)); else cSum.z += MulHigh(c.z, sw); |
| 439 | if(hasUnsignedTextureComponent(3)) cSum.w += As<Short4>(MulHigh(As<UShort4>(c.w), cw)); else cSum.w += MulHigh(c.w, sw); |
| 440 | |
| 441 | i++; |
| 442 | } |
| 443 | Until(i >= a) |
| 444 | |
| 445 | if(hasUnsignedTextureComponent(0)) c.x = cSum.x; else c.x = AddSat(cSum.x, cSum.x); |
| 446 | if(hasUnsignedTextureComponent(1)) c.y = cSum.y; else c.y = AddSat(cSum.y, cSum.y); |
| 447 | if(hasUnsignedTextureComponent(2)) c.z = cSum.z; else c.z = AddSat(cSum.z, cSum.z); |
| 448 | if(hasUnsignedTextureComponent(3)) c.w = cSum.w; else c.w = AddSat(cSum.w, cSum.w); |
| 449 | } |
| 450 | |
| 451 | return c; |
| 452 | } |
| 453 | |
| 454 | Vector4s SamplerCore::sampleQuad(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Vector4f &offset, Float &lod, bool secondLOD, SamplerFunction function) |
| 455 | { |
| 456 | if(state.textureType != VK_IMAGE_VIEW_TYPE_3D) |
| 457 | { |
| 458 | return sampleQuad2D(texture, u, v, w, offset, lod, secondLOD, function); |
| 459 | } |
| 460 | else |
| 461 | { |
| 462 | return sample3D(texture, u, v, w, offset, lod, secondLOD, function); |
| 463 | } |
| 464 | } |
| 465 | |
| 466 | Vector4s SamplerCore::sampleQuad2D(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Vector4f &offset, Float &lod, bool secondLOD, SamplerFunction function) |
| 467 | { |
| 468 | Vector4s c; |
| 469 | |
| 470 | int componentCount = textureComponentCount(); |
| 471 | bool gather = (state.textureFilter == FILTER_GATHER); |
| 472 | |
| 473 | Pointer<Byte> mipmap; |
| 474 | Pointer<Byte> buffer; |
| 475 | selectMipmap(texture, mipmap, buffer, lod, secondLOD); |
| 476 | |
| 477 | bool texelFetch = (function == Fetch); |
| 478 | |
| 479 | Short4 uuuu = texelFetch ? Short4(As<Int4>(u)) : address(u, state.addressingModeU, mipmap); |
| 480 | Short4 vvvv = texelFetch ? Short4(As<Int4>(v)) : address(v, state.addressingModeV, mipmap); |
| 481 | Short4 wwww = texelFetch ? Short4(As<Int4>(w)) : address(w, state.addressingModeW, mipmap); |
| 482 | |
| 483 | if(state.textureFilter == FILTER_POINT || texelFetch) |
| 484 | { |
| 485 | c = sampleTexel(uuuu, vvvv, wwww, offset, mipmap, buffer, function); |
| 486 | } |
| 487 | else |
| 488 | { |
| 489 | Short4 uuuu0 = offsetSample(uuuu, mipmap, OFFSET(Mipmap,uHalf), state.addressingModeU == ADDRESSING_WRAP, -1, lod); |
| 490 | Short4 vvvv0 = offsetSample(vvvv, mipmap, OFFSET(Mipmap,vHalf), state.addressingModeV == ADDRESSING_WRAP, -1, lod); |
| 491 | Short4 uuuu1 = offsetSample(uuuu, mipmap, OFFSET(Mipmap,uHalf), state.addressingModeU == ADDRESSING_WRAP, +1, lod); |
| 492 | Short4 vvvv1 = offsetSample(vvvv, mipmap, OFFSET(Mipmap,vHalf), state.addressingModeV == ADDRESSING_WRAP, +1, lod); |
| 493 | |
| 494 | Vector4s c00 = sampleTexel(uuuu0, vvvv0, wwww, offset, mipmap, buffer, function); |
| 495 | Vector4s c10 = sampleTexel(uuuu1, vvvv0, wwww, offset, mipmap, buffer, function); |
| 496 | Vector4s c01 = sampleTexel(uuuu0, vvvv1, wwww, offset, mipmap, buffer, function); |
| 497 | Vector4s c11 = sampleTexel(uuuu1, vvvv1, wwww, offset, mipmap, buffer, function); |
| 498 | |
| 499 | if(!gather) // Blend |
| 500 | { |
| 501 | // Fractions |
| 502 | UShort4 f0u = As<UShort4>(uuuu0) * UShort4(*Pointer<Int4>(mipmap + OFFSET(Mipmap,width))); |
| 503 | UShort4 f0v = As<UShort4>(vvvv0) * UShort4(*Pointer<Int4>(mipmap + OFFSET(Mipmap,height))); |
| 504 | |
| 505 | UShort4 f1u = ~f0u; |
| 506 | UShort4 f1v = ~f0v; |
| 507 | |
| 508 | UShort4 f0u0v = MulHigh(f0u, f0v); |
| 509 | UShort4 f1u0v = MulHigh(f1u, f0v); |
| 510 | UShort4 f0u1v = MulHigh(f0u, f1v); |
| 511 | UShort4 f1u1v = MulHigh(f1u, f1v); |
| 512 | |
| 513 | // Signed fractions |
| 514 | Short4 f1u1vs; |
| 515 | Short4 f0u1vs; |
| 516 | Short4 f1u0vs; |
| 517 | Short4 f0u0vs; |
| 518 | |
| 519 | if(!hasUnsignedTextureComponent(0) || !hasUnsignedTextureComponent(1) || !hasUnsignedTextureComponent(2) || !hasUnsignedTextureComponent(3)) |
| 520 | { |
| 521 | f1u1vs = f1u1v >> 1; |
| 522 | f0u1vs = f0u1v >> 1; |
| 523 | f1u0vs = f1u0v >> 1; |
| 524 | f0u0vs = f0u0v >> 1; |
| 525 | } |
| 526 | |
| 527 | // Bilinear interpolation |
| 528 | if(componentCount >= 1) |
| 529 | { |
| 530 | if(has16bitTextureComponents() && hasUnsignedTextureComponent(0)) |
| 531 | { |
| 532 | c00.x = As<UShort4>(c00.x) - MulHigh(As<UShort4>(c00.x), f0u) + MulHigh(As<UShort4>(c10.x), f0u); |
| 533 | c01.x = As<UShort4>(c01.x) - MulHigh(As<UShort4>(c01.x), f0u) + MulHigh(As<UShort4>(c11.x), f0u); |
| 534 | c.x = As<UShort4>(c00.x) - MulHigh(As<UShort4>(c00.x), f0v) + MulHigh(As<UShort4>(c01.x), f0v); |
| 535 | } |
| 536 | else |
| 537 | { |
| 538 | if(hasUnsignedTextureComponent(0)) |
| 539 | { |
| 540 | c00.x = MulHigh(As<UShort4>(c00.x), f1u1v); |
| 541 | c10.x = MulHigh(As<UShort4>(c10.x), f0u1v); |
| 542 | c01.x = MulHigh(As<UShort4>(c01.x), f1u0v); |
| 543 | c11.x = MulHigh(As<UShort4>(c11.x), f0u0v); |
| 544 | } |
| 545 | else |
| 546 | { |
| 547 | c00.x = MulHigh(c00.x, f1u1vs); |
| 548 | c10.x = MulHigh(c10.x, f0u1vs); |
| 549 | c01.x = MulHigh(c01.x, f1u0vs); |
| 550 | c11.x = MulHigh(c11.x, f0u0vs); |
| 551 | } |
| 552 | |
| 553 | c.x = (c00.x + c10.x) + (c01.x + c11.x); |
| 554 | if(!hasUnsignedTextureComponent(0)) c.x = AddSat(c.x, c.x); // Correct for signed fractions |
| 555 | } |
| 556 | } |
| 557 | |
| 558 | if(componentCount >= 2) |
| 559 | { |
| 560 | if(has16bitTextureComponents() && hasUnsignedTextureComponent(1)) |
| 561 | { |
| 562 | c00.y = As<UShort4>(c00.y) - MulHigh(As<UShort4>(c00.y), f0u) + MulHigh(As<UShort4>(c10.y), f0u); |
| 563 | c01.y = As<UShort4>(c01.y) - MulHigh(As<UShort4>(c01.y), f0u) + MulHigh(As<UShort4>(c11.y), f0u); |
| 564 | c.y = As<UShort4>(c00.y) - MulHigh(As<UShort4>(c00.y), f0v) + MulHigh(As<UShort4>(c01.y), f0v); |
| 565 | } |
| 566 | else |
| 567 | { |
| 568 | if(hasUnsignedTextureComponent(1)) |
| 569 | { |
| 570 | c00.y = MulHigh(As<UShort4>(c00.y), f1u1v); |
| 571 | c10.y = MulHigh(As<UShort4>(c10.y), f0u1v); |
| 572 | c01.y = MulHigh(As<UShort4>(c01.y), f1u0v); |
| 573 | c11.y = MulHigh(As<UShort4>(c11.y), f0u0v); |
| 574 | } |
| 575 | else |
| 576 | { |
| 577 | c00.y = MulHigh(c00.y, f1u1vs); |
| 578 | c10.y = MulHigh(c10.y, f0u1vs); |
| 579 | c01.y = MulHigh(c01.y, f1u0vs); |
| 580 | c11.y = MulHigh(c11.y, f0u0vs); |
| 581 | } |
| 582 | |
| 583 | c.y = (c00.y + c10.y) + (c01.y + c11.y); |
| 584 | if(!hasUnsignedTextureComponent(1)) c.y = AddSat(c.y, c.y); // Correct for signed fractions |
| 585 | } |
| 586 | } |
| 587 | |
| 588 | if(componentCount >= 3) |
| 589 | { |
| 590 | if(has16bitTextureComponents() && hasUnsignedTextureComponent(2)) |
| 591 | { |
| 592 | c00.z = As<UShort4>(c00.z) - MulHigh(As<UShort4>(c00.z), f0u) + MulHigh(As<UShort4>(c10.z), f0u); |
| 593 | c01.z = As<UShort4>(c01.z) - MulHigh(As<UShort4>(c01.z), f0u) + MulHigh(As<UShort4>(c11.z), f0u); |
| 594 | c.z = As<UShort4>(c00.z) - MulHigh(As<UShort4>(c00.z), f0v) + MulHigh(As<UShort4>(c01.z), f0v); |
| 595 | } |
| 596 | else |
| 597 | { |
| 598 | if(hasUnsignedTextureComponent(2)) |
| 599 | { |
| 600 | c00.z = MulHigh(As<UShort4>(c00.z), f1u1v); |
| 601 | c10.z = MulHigh(As<UShort4>(c10.z), f0u1v); |
| 602 | c01.z = MulHigh(As<UShort4>(c01.z), f1u0v); |
| 603 | c11.z = MulHigh(As<UShort4>(c11.z), f0u0v); |
| 604 | } |
| 605 | else |
| 606 | { |
| 607 | c00.z = MulHigh(c00.z, f1u1vs); |
| 608 | c10.z = MulHigh(c10.z, f0u1vs); |
| 609 | c01.z = MulHigh(c01.z, f1u0vs); |
| 610 | c11.z = MulHigh(c11.z, f0u0vs); |
| 611 | } |
| 612 | |
| 613 | c.z = (c00.z + c10.z) + (c01.z + c11.z); |
| 614 | if(!hasUnsignedTextureComponent(2)) c.z = AddSat(c.z, c.z); // Correct for signed fractions |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | if(componentCount >= 4) |
| 619 | { |
| 620 | if(has16bitTextureComponents() && hasUnsignedTextureComponent(3)) |
| 621 | { |
| 622 | c00.w = As<UShort4>(c00.w) - MulHigh(As<UShort4>(c00.w), f0u) + MulHigh(As<UShort4>(c10.w), f0u); |
| 623 | c01.w = As<UShort4>(c01.w) - MulHigh(As<UShort4>(c01.w), f0u) + MulHigh(As<UShort4>(c11.w), f0u); |
| 624 | c.w = As<UShort4>(c00.w) - MulHigh(As<UShort4>(c00.w), f0v) + MulHigh(As<UShort4>(c01.w), f0v); |
| 625 | } |
| 626 | else |
| 627 | { |
| 628 | if(hasUnsignedTextureComponent(3)) |
| 629 | { |
| 630 | c00.w = MulHigh(As<UShort4>(c00.w), f1u1v); |
| 631 | c10.w = MulHigh(As<UShort4>(c10.w), f0u1v); |
| 632 | c01.w = MulHigh(As<UShort4>(c01.w), f1u0v); |
| 633 | c11.w = MulHigh(As<UShort4>(c11.w), f0u0v); |
| 634 | } |
| 635 | else |
| 636 | { |
| 637 | c00.w = MulHigh(c00.w, f1u1vs); |
| 638 | c10.w = MulHigh(c10.w, f0u1vs); |
| 639 | c01.w = MulHigh(c01.w, f1u0vs); |
| 640 | c11.w = MulHigh(c11.w, f0u0vs); |
| 641 | } |
| 642 | |
| 643 | c.w = (c00.w + c10.w) + (c01.w + c11.w); |
| 644 | if(!hasUnsignedTextureComponent(3)) c.w = AddSat(c.w, c.w); // Correct for signed fractions |
| 645 | } |
| 646 | } |
| 647 | } |
| 648 | else // Gather |
| 649 | { |
| 650 | VkComponentSwizzle swizzle = gatherSwizzle(); |
| 651 | switch(swizzle) |
| 652 | { |
| 653 | case VK_COMPONENT_SWIZZLE_ZERO: |
| 654 | case VK_COMPONENT_SWIZZLE_ONE: |
| 655 | // Handled at the final component swizzle. |
| 656 | break; |
| 657 | default: |
| 658 | c.x = c01[swizzle - VK_COMPONENT_SWIZZLE_R]; |
| 659 | c.y = c11[swizzle - VK_COMPONENT_SWIZZLE_R]; |
| 660 | c.z = c10[swizzle - VK_COMPONENT_SWIZZLE_R]; |
| 661 | c.w = c00[swizzle - VK_COMPONENT_SWIZZLE_R]; |
| 662 | break; |
| 663 | } |
| 664 | } |
| 665 | } |
| 666 | |
| 667 | return c; |
| 668 | } |
| 669 | |
| 670 | Vector4s SamplerCore::sample3D(Pointer<Byte> &texture, Float4 &u_, Float4 &v_, Float4 &w_, Vector4f &offset, Float &lod, bool secondLOD, SamplerFunction function) |
| 671 | { |
| 672 | Vector4s c_; |
| 673 | |
| 674 | int componentCount = textureComponentCount(); |
| 675 | |
| 676 | Pointer<Byte> mipmap; |
| 677 | Pointer<Byte> buffer; |
| 678 | selectMipmap(texture, mipmap, buffer, lod, secondLOD); |
| 679 | |
| 680 | bool texelFetch = (function == Fetch); |
| 681 | |
| 682 | Short4 uuuu = texelFetch ? Short4(As<Int4>(u_)) : address(u_, state.addressingModeU, mipmap); |
| 683 | Short4 vvvv = texelFetch ? Short4(As<Int4>(v_)) : address(v_, state.addressingModeV, mipmap); |
| 684 | Short4 wwww = texelFetch ? Short4(As<Int4>(w_)) : address(w_, state.addressingModeW, mipmap); |
| 685 | |
| 686 | if(state.textureFilter == FILTER_POINT || texelFetch) |
| 687 | { |
| 688 | c_ = sampleTexel(uuuu, vvvv, wwww, offset, mipmap, buffer, function); |
| 689 | } |
| 690 | else |
| 691 | { |
| 692 | Vector4s c[2][2][2]; |
| 693 | |
| 694 | Short4 u[2][2][2]; |
| 695 | Short4 v[2][2][2]; |
| 696 | Short4 s[2][2][2]; |
| 697 | |
| 698 | for(int i = 0; i < 2; i++) |
| 699 | { |
| 700 | for(int j = 0; j < 2; j++) |
| 701 | { |
| 702 | for(int k = 0; k < 2; k++) |
| 703 | { |
| 704 | u[i][j][k] = offsetSample(uuuu, mipmap, OFFSET(Mipmap,uHalf), state.addressingModeU == ADDRESSING_WRAP, i * 2 - 1, lod); |
| 705 | v[i][j][k] = offsetSample(vvvv, mipmap, OFFSET(Mipmap,vHalf), state.addressingModeV == ADDRESSING_WRAP, j * 2 - 1, lod); |
| 706 | s[i][j][k] = offsetSample(wwww, mipmap, OFFSET(Mipmap,wHalf), state.addressingModeW == ADDRESSING_WRAP, k * 2 - 1, lod); |
| 707 | } |
| 708 | } |
| 709 | } |
| 710 | |
| 711 | // Fractions |
| 712 | UShort4 f0u = As<UShort4>(u[0][0][0]) * UShort4(*Pointer<Int4>(mipmap + OFFSET(Mipmap,width))); |
| 713 | UShort4 f0v = As<UShort4>(v[0][0][0]) * UShort4(*Pointer<Int4>(mipmap + OFFSET(Mipmap,height))); |
| 714 | UShort4 f0s = As<UShort4>(s[0][0][0]) * UShort4(*Pointer<Int4>(mipmap + OFFSET(Mipmap,depth))); |
| 715 | |
| 716 | UShort4 f1u = ~f0u; |
| 717 | UShort4 f1v = ~f0v; |
| 718 | UShort4 f1s = ~f0s; |
| 719 | |
| 720 | UShort4 f[2][2][2]; |
| 721 | Short4 fs[2][2][2]; |
| 722 | |
| 723 | f[1][1][1] = MulHigh(f1u, f1v); |
| 724 | f[0][1][1] = MulHigh(f0u, f1v); |
| 725 | f[1][0][1] = MulHigh(f1u, f0v); |
| 726 | f[0][0][1] = MulHigh(f0u, f0v); |
| 727 | f[1][1][0] = MulHigh(f1u, f1v); |
| 728 | f[0][1][0] = MulHigh(f0u, f1v); |
| 729 | f[1][0][0] = MulHigh(f1u, f0v); |
| 730 | f[0][0][0] = MulHigh(f0u, f0v); |
| 731 | |
| 732 | f[1][1][1] = MulHigh(f[1][1][1], f1s); |
| 733 | f[0][1][1] = MulHigh(f[0][1][1], f1s); |
| 734 | f[1][0][1] = MulHigh(f[1][0][1], f1s); |
| 735 | f[0][0][1] = MulHigh(f[0][0][1], f1s); |
| 736 | f[1][1][0] = MulHigh(f[1][1][0], f0s); |
| 737 | f[0][1][0] = MulHigh(f[0][1][0], f0s); |
| 738 | f[1][0][0] = MulHigh(f[1][0][0], f0s); |
| 739 | f[0][0][0] = MulHigh(f[0][0][0], f0s); |
| 740 | |
| 741 | // Signed fractions |
| 742 | if(!hasUnsignedTextureComponent(0) || !hasUnsignedTextureComponent(1) || !hasUnsignedTextureComponent(2) || !hasUnsignedTextureComponent(3)) |
| 743 | { |
| 744 | fs[0][0][0] = f[0][0][0] >> 1; |
| 745 | fs[0][0][1] = f[0][0][1] >> 1; |
| 746 | fs[0][1][0] = f[0][1][0] >> 1; |
| 747 | fs[0][1][1] = f[0][1][1] >> 1; |
| 748 | fs[1][0][0] = f[1][0][0] >> 1; |
| 749 | fs[1][0][1] = f[1][0][1] >> 1; |
| 750 | fs[1][1][0] = f[1][1][0] >> 1; |
| 751 | fs[1][1][1] = f[1][1][1] >> 1; |
| 752 | } |
| 753 | |
| 754 | for(int i = 0; i < 2; i++) |
| 755 | { |
| 756 | for(int j = 0; j < 2; j++) |
| 757 | { |
| 758 | for(int k = 0; k < 2; k++) |
| 759 | { |
| 760 | c[i][j][k] = sampleTexel(u[i][j][k], v[i][j][k], s[i][j][k], offset, mipmap, buffer, function); |
| 761 | |
| 762 | if(componentCount >= 1) { if(hasUnsignedTextureComponent(0)) c[i][j][k].x = MulHigh(As<UShort4>(c[i][j][k].x), f[1 - i][1 - j][1 - k]); else c[i][j][k].x = MulHigh(c[i][j][k].x, fs[1 - i][1 - j][1 - k]); } |
| 763 | if(componentCount >= 2) { if(hasUnsignedTextureComponent(1)) c[i][j][k].y = MulHigh(As<UShort4>(c[i][j][k].y), f[1 - i][1 - j][1 - k]); else c[i][j][k].y = MulHigh(c[i][j][k].y, fs[1 - i][1 - j][1 - k]); } |
| 764 | if(componentCount >= 3) { if(hasUnsignedTextureComponent(2)) c[i][j][k].z = MulHigh(As<UShort4>(c[i][j][k].z), f[1 - i][1 - j][1 - k]); else c[i][j][k].z = MulHigh(c[i][j][k].z, fs[1 - i][1 - j][1 - k]); } |
| 765 | if(componentCount >= 4) { if(hasUnsignedTextureComponent(3)) c[i][j][k].w = MulHigh(As<UShort4>(c[i][j][k].w), f[1 - i][1 - j][1 - k]); else c[i][j][k].w = MulHigh(c[i][j][k].w, fs[1 - i][1 - j][1 - k]); } |
| 766 | |
| 767 | if(i != 0 || j != 0 || k != 0) |
| 768 | { |
| 769 | if(componentCount >= 1) c[0][0][0].x += c[i][j][k].x; |
| 770 | if(componentCount >= 2) c[0][0][0].y += c[i][j][k].y; |
| 771 | if(componentCount >= 3) c[0][0][0].z += c[i][j][k].z; |
| 772 | if(componentCount >= 4) c[0][0][0].w += c[i][j][k].w; |
| 773 | } |
| 774 | } |
| 775 | } |
| 776 | } |
| 777 | |
| 778 | if(componentCount >= 1) c_.x = c[0][0][0].x; |
| 779 | if(componentCount >= 2) c_.y = c[0][0][0].y; |
| 780 | if(componentCount >= 3) c_.z = c[0][0][0].z; |
| 781 | if(componentCount >= 4) c_.w = c[0][0][0].w; |
| 782 | |
| 783 | // Correct for signed fractions |
| 784 | if(componentCount >= 1) if(!hasUnsignedTextureComponent(0)) c_.x = AddSat(c_.x, c_.x); |
| 785 | if(componentCount >= 2) if(!hasUnsignedTextureComponent(1)) c_.y = AddSat(c_.y, c_.y); |
| 786 | if(componentCount >= 3) if(!hasUnsignedTextureComponent(2)) c_.z = AddSat(c_.z, c_.z); |
| 787 | if(componentCount >= 4) if(!hasUnsignedTextureComponent(3)) c_.w = AddSat(c_.w, c_.w); |
| 788 | } |
| 789 | |
| 790 | return c_; |
| 791 | } |
| 792 | |
| 793 | Vector4f SamplerCore::sampleFloatFilter(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Float4 &q, Vector4f &offset, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta, SamplerFunction function) |
| 794 | { |
| 795 | Vector4f c = sampleFloatAniso(texture, u, v, w, q, offset, lod, anisotropy, uDelta, vDelta, false, function); |
| 796 | |
| 797 | if(function == Fetch) |
| 798 | { |
| 799 | return c; |
| 800 | } |
| 801 | |
| 802 | if(state.mipmapFilter == MIPMAP_LINEAR) |
| 803 | { |
| 804 | Vector4f cc = sampleFloatAniso(texture, u, v, w, q, offset, lod, anisotropy, uDelta, vDelta, true, function); |
| 805 | |
| 806 | Float4 lod4 = Float4(Frac(lod)); |
| 807 | |
| 808 | c.x = (cc.x - c.x) * lod4 + c.x; |
| 809 | c.y = (cc.y - c.y) * lod4 + c.y; |
| 810 | c.z = (cc.z - c.z) * lod4 + c.z; |
| 811 | c.w = (cc.w - c.w) * lod4 + c.w; |
| 812 | } |
| 813 | |
| 814 | return c; |
| 815 | } |
| 816 | |
| 817 | Vector4f SamplerCore::sampleFloatAniso(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Float4 &q, Vector4f &offset, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta, bool secondLOD, SamplerFunction function) |
| 818 | { |
| 819 | Vector4f c; |
| 820 | |
| 821 | if(state.textureFilter != FILTER_ANISOTROPIC || function == Lod || function == Fetch) |
| 822 | { |
| 823 | c = sampleFloat(texture, u, v, w, q, offset, lod, secondLOD, function); |
| 824 | } |
| 825 | else |
| 826 | { |
| 827 | Int a = RoundInt(anisotropy); |
| 828 | |
| 829 | Vector4f cSum; |
| 830 | |
| 831 | cSum.x = Float4(0.0f); |
| 832 | cSum.y = Float4(0.0f); |
| 833 | cSum.z = Float4(0.0f); |
| 834 | cSum.w = Float4(0.0f); |
| 835 | |
| 836 | Float4 A = *Pointer<Float4>(constants + OFFSET(Constants,uvWeight) + 16 * a); |
| 837 | Float4 B = *Pointer<Float4>(constants + OFFSET(Constants,uvStart) + 16 * a); |
| 838 | |
| 839 | Float4 du = uDelta; |
| 840 | Float4 dv = vDelta; |
| 841 | |
| 842 | Float4 u0 = u + B * du; |
| 843 | Float4 v0 = v + B * dv; |
| 844 | |
| 845 | du *= A; |
| 846 | dv *= A; |
| 847 | |
| 848 | Int i = 0; |
| 849 | |
| 850 | Do |
| 851 | { |
| 852 | c = sampleFloat(texture, u0, v0, w, q, offset, lod, secondLOD, function); |
| 853 | |
| 854 | u0 += du; |
| 855 | v0 += dv; |
| 856 | |
| 857 | cSum.x += c.x * A; |
| 858 | cSum.y += c.y * A; |
| 859 | cSum.z += c.z * A; |
| 860 | cSum.w += c.w * A; |
| 861 | |
| 862 | i++; |
| 863 | } |
| 864 | Until(i >= a) |
| 865 | |
| 866 | c.x = cSum.x; |
| 867 | c.y = cSum.y; |
| 868 | c.z = cSum.z; |
| 869 | c.w = cSum.w; |
| 870 | } |
| 871 | |
| 872 | return c; |
| 873 | } |
| 874 | |
| 875 | Vector4f SamplerCore::sampleFloat(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Float4 &q, Vector4f &offset, Float &lod, bool secondLOD, SamplerFunction function) |
| 876 | { |
| 877 | if(state.textureType != VK_IMAGE_VIEW_TYPE_3D) |
| 878 | { |
| 879 | return sampleFloat2D(texture, u, v, w, q, offset, lod, secondLOD, function); |
| 880 | } |
| 881 | else |
| 882 | { |
| 883 | return sampleFloat3D(texture, u, v, w, offset, lod, secondLOD, function); |
| 884 | } |
| 885 | } |
| 886 | |
| 887 | Vector4f SamplerCore::sampleFloat2D(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Float4 &q, Vector4f &offset, Float &lod, bool secondLOD, SamplerFunction function) |
| 888 | { |
| 889 | Vector4f c; |
| 890 | |
| 891 | int componentCount = textureComponentCount(); |
| 892 | bool gather = (state.textureFilter == FILTER_GATHER); |
| 893 | |
| 894 | Pointer<Byte> mipmap; |
| 895 | Pointer<Byte> buffer; |
| 896 | selectMipmap(texture, mipmap, buffer, lod, secondLOD); |
| 897 | |
| 898 | Int4 x0, x1, y0, y1, z0; |
| 899 | Float4 fu, fv, fw; |
| 900 | Int4 filter = computeFilterOffset(lod); |
| 901 | address(u, x0, x1, fu, mipmap, offset.x, filter, OFFSET(Mipmap, width), state.addressingModeU, function); |
| 902 | address(v, y0, y1, fv, mipmap, offset.y, filter, OFFSET(Mipmap, height), state.addressingModeV, function); |
| 903 | address(w, z0, z0, fw, mipmap, offset.z, filter, OFFSET(Mipmap, depth), state.addressingModeW, function); |
| 904 | |
| 905 | if(hasQuadLayout()) |
| 906 | { |
| 907 | ::applyQuadLayout(x0, y0); |
| 908 | } |
| 909 | |
| 910 | Int4 pitchP = *Pointer<Int4>(mipmap + OFFSET(Mipmap, pitchP), 16); |
| 911 | y0 *= pitchP; |
| 912 | if(state.addressingModeW != ADDRESSING_UNUSED) |
| 913 | { |
| 914 | z0 *= *Pointer<Int4>(mipmap + OFFSET(Mipmap, sliceP), 16); |
| 915 | } |
| 916 | |
| 917 | if(state.textureFilter == FILTER_POINT || (function == Fetch)) |
| 918 | { |
| 919 | c = sampleTexel(x0, y0, z0, q, mipmap, buffer, function); |
| 920 | } |
| 921 | else |
| 922 | { |
| 923 | if(hasQuadLayout()) |
| 924 | { |
| 925 | ::applyQuadLayout(x1, y1); |
| 926 | } |
| 927 | |
| 928 | y1 *= pitchP; |
| 929 | |
| 930 | Vector4f c00 = sampleTexel(x0, y0, z0, q, mipmap, buffer, function); |
| 931 | Vector4f c10 = sampleTexel(x1, y0, z0, q, mipmap, buffer, function); |
| 932 | Vector4f c01 = sampleTexel(x0, y1, z0, q, mipmap, buffer, function); |
| 933 | Vector4f c11 = sampleTexel(x1, y1, z0, q, mipmap, buffer, function); |
| 934 | |
| 935 | if(!gather) // Blend |
| 936 | { |
| 937 | if(componentCount >= 1) c00.x = c00.x + fu * (c10.x - c00.x); |
| 938 | if(componentCount >= 2) c00.y = c00.y + fu * (c10.y - c00.y); |
| 939 | if(componentCount >= 3) c00.z = c00.z + fu * (c10.z - c00.z); |
| 940 | if(componentCount >= 4) c00.w = c00.w + fu * (c10.w - c00.w); |
| 941 | |
| 942 | if(componentCount >= 1) c01.x = c01.x + fu * (c11.x - c01.x); |
| 943 | if(componentCount >= 2) c01.y = c01.y + fu * (c11.y - c01.y); |
| 944 | if(componentCount >= 3) c01.z = c01.z + fu * (c11.z - c01.z); |
| 945 | if(componentCount >= 4) c01.w = c01.w + fu * (c11.w - c01.w); |
| 946 | |
| 947 | if(componentCount >= 1) c.x = c00.x + fv * (c01.x - c00.x); |
| 948 | if(componentCount >= 2) c.y = c00.y + fv * (c01.y - c00.y); |
| 949 | if(componentCount >= 3) c.z = c00.z + fv * (c01.z - c00.z); |
| 950 | if(componentCount >= 4) c.w = c00.w + fv * (c01.w - c00.w); |
| 951 | } |
| 952 | else // Gather |
| 953 | { |
| 954 | VkComponentSwizzle swizzle = gatherSwizzle(); |
| 955 | switch(swizzle) |
| 956 | { |
| 957 | case VK_COMPONENT_SWIZZLE_ZERO: |
| 958 | case VK_COMPONENT_SWIZZLE_ONE: |
| 959 | // Handled at the final component swizzle. |
| 960 | break; |
| 961 | default: |
| 962 | c.x = c01[swizzle - VK_COMPONENT_SWIZZLE_R]; |
| 963 | c.y = c11[swizzle - VK_COMPONENT_SWIZZLE_R]; |
| 964 | c.z = c10[swizzle - VK_COMPONENT_SWIZZLE_R]; |
| 965 | c.w = c00[swizzle - VK_COMPONENT_SWIZZLE_R]; |
| 966 | break; |
| 967 | } |
| 968 | } |
| 969 | } |
| 970 | |
| 971 | return c; |
| 972 | } |
| 973 | |
| 974 | Vector4f SamplerCore::sampleFloat3D(Pointer<Byte> &texture, Float4 &u, Float4 &v, Float4 &w, Vector4f &offset, Float &lod, bool secondLOD, SamplerFunction function) |
| 975 | { |
| 976 | Vector4f c; |
| 977 | |
| 978 | int componentCount = textureComponentCount(); |
| 979 | |
| 980 | Pointer<Byte> mipmap; |
| 981 | Pointer<Byte> buffer; |
| 982 | selectMipmap(texture, mipmap, buffer, lod, secondLOD); |
| 983 | |
| 984 | Int4 x0, x1, y0, y1, z0, z1; |
| 985 | Float4 fu, fv, fw; |
| 986 | Int4 filter = computeFilterOffset(lod); |
| 987 | address(u, x0, x1, fu, mipmap, offset.x, filter, OFFSET(Mipmap, width), state.addressingModeU, function); |
| 988 | address(v, y0, y1, fv, mipmap, offset.y, filter, OFFSET(Mipmap, height), state.addressingModeV, function); |
| 989 | address(w, z0, z1, fw, mipmap, offset.z, filter, OFFSET(Mipmap, depth), state.addressingModeW, function); |
| 990 | |
| 991 | if(hasQuadLayout()) |
| 992 | { |
| 993 | ::applyQuadLayout(x0, y0); |
| 994 | } |
| 995 | |
| 996 | Int4 pitchP = *Pointer<Int4>(mipmap + OFFSET(Mipmap, pitchP), 16); |
| 997 | Int4 sliceP = *Pointer<Int4>(mipmap + OFFSET(Mipmap, sliceP), 16); |
| 998 | y0 *= pitchP; |
| 999 | z0 *= sliceP; |
| 1000 | |
| 1001 | if(state.textureFilter == FILTER_POINT || (function == Fetch)) |
| 1002 | { |
| 1003 | c = sampleTexel(x0, y0, z0, w, mipmap, buffer, function); |
| 1004 | } |
| 1005 | else |
| 1006 | { |
| 1007 | if(hasQuadLayout()) |
| 1008 | { |
| 1009 | ::applyQuadLayout(x1, y1); |
| 1010 | } |
| 1011 | |
| 1012 | y1 *= pitchP; |
| 1013 | z1 *= sliceP; |
| 1014 | |
| 1015 | Vector4f c000 = sampleTexel(x0, y0, z0, w, mipmap, buffer, function); |
| 1016 | Vector4f c100 = sampleTexel(x1, y0, z0, w, mipmap, buffer, function); |
| 1017 | Vector4f c010 = sampleTexel(x0, y1, z0, w, mipmap, buffer, function); |
| 1018 | Vector4f c110 = sampleTexel(x1, y1, z0, w, mipmap, buffer, function); |
| 1019 | Vector4f c001 = sampleTexel(x0, y0, z1, w, mipmap, buffer, function); |
| 1020 | Vector4f c101 = sampleTexel(x1, y0, z1, w, mipmap, buffer, function); |
| 1021 | Vector4f c011 = sampleTexel(x0, y1, z1, w, mipmap, buffer, function); |
| 1022 | Vector4f c111 = sampleTexel(x1, y1, z1, w, mipmap, buffer, function); |
| 1023 | |
| 1024 | // Blend first slice |
| 1025 | if(componentCount >= 1) c000.x = c000.x + fu * (c100.x - c000.x); |
| 1026 | if(componentCount >= 2) c000.y = c000.y + fu * (c100.y - c000.y); |
| 1027 | if(componentCount >= 3) c000.z = c000.z + fu * (c100.z - c000.z); |
| 1028 | if(componentCount >= 4) c000.w = c000.w + fu * (c100.w - c000.w); |
| 1029 | |
| 1030 | if(componentCount >= 1) c010.x = c010.x + fu * (c110.x - c010.x); |
| 1031 | if(componentCount >= 2) c010.y = c010.y + fu * (c110.y - c010.y); |
| 1032 | if(componentCount >= 3) c010.z = c010.z + fu * (c110.z - c010.z); |
| 1033 | if(componentCount >= 4) c010.w = c010.w + fu * (c110.w - c010.w); |
| 1034 | |
| 1035 | if(componentCount >= 1) c000.x = c000.x + fv * (c010.x - c000.x); |
| 1036 | if(componentCount >= 2) c000.y = c000.y + fv * (c010.y - c000.y); |
| 1037 | if(componentCount >= 3) c000.z = c000.z + fv * (c010.z - c000.z); |
| 1038 | if(componentCount >= 4) c000.w = c000.w + fv * (c010.w - c000.w); |
| 1039 | |
| 1040 | // Blend second slice |
| 1041 | if(componentCount >= 1) c001.x = c001.x + fu * (c101.x - c001.x); |
| 1042 | if(componentCount >= 2) c001.y = c001.y + fu * (c101.y - c001.y); |
| 1043 | if(componentCount >= 3) c001.z = c001.z + fu * (c101.z - c001.z); |
| 1044 | if(componentCount >= 4) c001.w = c001.w + fu * (c101.w - c001.w); |
| 1045 | |
| 1046 | if(componentCount >= 1) c011.x = c011.x + fu * (c111.x - c011.x); |
| 1047 | if(componentCount >= 2) c011.y = c011.y + fu * (c111.y - c011.y); |
| 1048 | if(componentCount >= 3) c011.z = c011.z + fu * (c111.z - c011.z); |
| 1049 | if(componentCount >= 4) c011.w = c011.w + fu * (c111.w - c011.w); |
| 1050 | |
| 1051 | if(componentCount >= 1) c001.x = c001.x + fv * (c011.x - c001.x); |
| 1052 | if(componentCount >= 2) c001.y = c001.y + fv * (c011.y - c001.y); |
| 1053 | if(componentCount >= 3) c001.z = c001.z + fv * (c011.z - c001.z); |
| 1054 | if(componentCount >= 4) c001.w = c001.w + fv * (c011.w - c001.w); |
| 1055 | |
| 1056 | // Blend slices |
| 1057 | if(componentCount >= 1) c.x = c000.x + fw * (c001.x - c000.x); |
| 1058 | if(componentCount >= 2) c.y = c000.y + fw * (c001.y - c000.y); |
| 1059 | if(componentCount >= 3) c.z = c000.z + fw * (c001.z - c000.z); |
| 1060 | if(componentCount >= 4) c.w = c000.w + fw * (c001.w - c000.w); |
| 1061 | } |
| 1062 | |
| 1063 | return c; |
| 1064 | } |
| 1065 | |
| 1066 | Float SamplerCore::log2sqrt(Float lod) |
| 1067 | { |
| 1068 | // log2(sqrt(lod)) // Equals 0.25 * log2(lod^2). |
| 1069 | lod *= lod; // Squaring doubles the exponent and produces an extra bit of precision. |
| 1070 | lod = Float(As<Int>(lod)) - Float(0x3F800000); // Interpret as integer and subtract the exponent bias. |
| 1071 | lod *= As<Float>(Int(0x33000000)); // Scale by 0.25 * 2^-23 (mantissa length). |
| 1072 | |
| 1073 | return lod; |
| 1074 | } |
| 1075 | |
| 1076 | Float SamplerCore::log2(Float lod) |
| 1077 | { |
| 1078 | lod *= lod; // Squaring doubles the exponent and produces an extra bit of precision. |
| 1079 | lod = Float(As<Int>(lod)) - Float(0x3F800000); // Interpret as integer and subtract the exponent bias. |
| 1080 | lod *= As<Float>(Int(0x33800000)); // Scale by 0.5 * 2^-23 (mantissa length). |
| 1081 | |
| 1082 | return lod; |
| 1083 | } |
| 1084 | |
| 1085 | void SamplerCore::computeLod(Pointer<Byte> &texture, Pointer<Byte> &sampler, Float &lod, Float &anisotropy, Float4 &uDelta, Float4 &vDelta, Float4 &uuuu, Float4 &vvvv, Float4 &dsx, Float4 &dsy, SamplerFunction function) |
| 1086 | { |
| 1087 | Float4 duvdxy; |
| 1088 | |
| 1089 | if(function != Grad) // Implicit |
| 1090 | { |
| 1091 | duvdxy = Float4(uuuu.yz, vvvv.yz) - Float4(uuuu.xx, vvvv.xx); |
| 1092 | } |
| 1093 | else |
| 1094 | { |
| 1095 | Float4 dudxy = Float4(dsx.xx, dsy.xx); |
| 1096 | Float4 dvdxy = Float4(dsx.yy, dsy.yy); |
| 1097 | |
| 1098 | duvdxy = Float4(dudxy.xz, dvdxy.xz); |
| 1099 | } |
| 1100 | |
| 1101 | // Scale by texture dimensions. |
| 1102 | Float4 dUVdxy = duvdxy * *Pointer<Float4>(texture + OFFSET(Texture, widthWidthHeightHeight)); |
| 1103 | |
| 1104 | Float4 dUV2dxy = dUVdxy * dUVdxy; |
| 1105 | Float4 dUV2 = dUV2dxy.xy + dUV2dxy.zw; |
| 1106 | |
| 1107 | lod = Max(Float(dUV2.x), Float(dUV2.y)); // Square length of major axis |
| 1108 | |
| 1109 | if(state.textureFilter == FILTER_ANISOTROPIC) |
| 1110 | { |
| 1111 | Float det = Abs(Float(dUVdxy.x) * Float(dUVdxy.w) - Float(dUVdxy.y) * Float(dUVdxy.z)); |
| 1112 | |
| 1113 | Float4 dudx = duvdxy.xxxx; |
| 1114 | Float4 dudy = duvdxy.yyyy; |
| 1115 | Float4 dvdx = duvdxy.zzzz; |
| 1116 | Float4 dvdy = duvdxy.wwww; |
| 1117 | |
| 1118 | Int4 mask = As<Int4>(CmpNLT(dUV2.x, dUV2.y)); |
| 1119 | uDelta = As<Float4>((As<Int4>(dudx) & mask) | ((As<Int4>(dudy) & ~mask))); |
| 1120 | vDelta = As<Float4>((As<Int4>(dvdx) & mask) | ((As<Int4>(dvdy) & ~mask))); |
| 1121 | |
| 1122 | anisotropy = lod * Rcp_pp(det); |
| 1123 | anisotropy = Min(anisotropy, *Pointer<Float>(sampler + OFFSET(vk::Sampler,maxAnisotropy))); |
| 1124 | |
| 1125 | lod *= Rcp_pp(anisotropy * anisotropy); |
| 1126 | } |
| 1127 | |
| 1128 | lod = log2sqrt(lod); // log2(sqrt(lod)) |
| 1129 | } |
| 1130 | |
| 1131 | void SamplerCore::computeLodCube(Pointer<Byte> &texture, Pointer<Byte> &sampler, Float &lod, Float4 &u, Float4 &v, Float4 &w, Float4 &dsx, Float4 &dsy, Float4 &M, SamplerFunction function) |
| 1132 | { |
| 1133 | Float4 dudxy, dvdxy, dsdxy; |
| 1134 | |
| 1135 | if(function != Grad) // Implicit |
| 1136 | { |
| 1137 | Float4 U = u * M; |
| 1138 | Float4 V = v * M; |
| 1139 | Float4 W = w * M; |
| 1140 | |
| 1141 | dudxy = Abs(U - U.xxxx); |
| 1142 | dvdxy = Abs(V - V.xxxx); |
| 1143 | dsdxy = Abs(W - W.xxxx); |
| 1144 | } |
| 1145 | else |
| 1146 | { |
| 1147 | dudxy = Float4(dsx.xx, dsy.xx); |
| 1148 | dvdxy = Float4(dsx.yy, dsy.yy); |
| 1149 | dsdxy = Float4(dsx.zz, dsy.zz); |
| 1150 | |
| 1151 | dudxy = Abs(dudxy * Float4(M.x)); |
| 1152 | dvdxy = Abs(dvdxy * Float4(M.x)); |
| 1153 | dsdxy = Abs(dsdxy * Float4(M.x)); |
| 1154 | } |
| 1155 | |
| 1156 | // Compute the largest Manhattan distance in two dimensions. |
| 1157 | // This takes the footprint across adjacent faces into account. |
| 1158 | Float4 duvdxy = dudxy + dvdxy; |
| 1159 | Float4 dusdxy = dudxy + dsdxy; |
| 1160 | Float4 dvsdxy = dvdxy + dsdxy; |
| 1161 | |
| 1162 | dudxy = Max(Max(duvdxy, dusdxy), dvsdxy); |
| 1163 | |
| 1164 | lod = Max(Float(dudxy.y), Float(dudxy.z)); // FIXME: Max(dudxy.y, dudxy.z); |
| 1165 | |
| 1166 | // Scale by texture dimension. |
| 1167 | lod *= *Pointer<Float>(texture + OFFSET(Texture,width)); |
| 1168 | |
| 1169 | lod = log2(lod); |
| 1170 | } |
| 1171 | |
| 1172 | void SamplerCore::computeLod3D(Pointer<Byte> &texture, Pointer<Byte> &sampler, Float &lod, Float4 &uuuu, Float4 &vvvv, Float4 &wwww, Float4 &dsx, Float4 &dsy, SamplerFunction function) |
| 1173 | { |
| 1174 | Float4 dudxy, dvdxy, dsdxy; |
| 1175 | |
| 1176 | if(function != Grad) // Implicit |
| 1177 | { |
| 1178 | dudxy = uuuu - uuuu.xxxx; |
| 1179 | dvdxy = vvvv - vvvv.xxxx; |
| 1180 | dsdxy = wwww - wwww.xxxx; |
| 1181 | } |
| 1182 | else |
| 1183 | { |
| 1184 | dudxy = Float4(dsx.xx, dsy.xx); |
| 1185 | dvdxy = Float4(dsx.yy, dsy.yy); |
| 1186 | dsdxy = Float4(dsx.zz, dsy.zz); |
| 1187 | } |
| 1188 | |
| 1189 | // Scale by texture dimensions. |
| 1190 | dudxy *= *Pointer<Float4>(texture + OFFSET(Texture, width)); |
| 1191 | dvdxy *= *Pointer<Float4>(texture + OFFSET(Texture, height)); |
| 1192 | dsdxy *= *Pointer<Float4>(texture + OFFSET(Texture, depth)); |
| 1193 | |
| 1194 | dudxy *= dudxy; |
| 1195 | dvdxy *= dvdxy; |
| 1196 | dsdxy *= dsdxy; |
| 1197 | |
| 1198 | dudxy += dvdxy; |
| 1199 | dudxy += dsdxy; |
| 1200 | |
| 1201 | lod = Max(Float(dudxy.y), Float(dudxy.z)); // FIXME: Max(dudxy.y, dudxy.z); |
| 1202 | |
| 1203 | lod = log2sqrt(lod); // log2(sqrt(lod)) |
| 1204 | } |
| 1205 | |
| 1206 | Int4 SamplerCore::cubeFace(Float4 &U, Float4 &V, Float4 &x, Float4 &y, Float4 &z, Float4 &M) |
| 1207 | { |
| 1208 | // TODO: Comply with Vulkan recommendation: |
| 1209 | // Vulkan 1.1: "The rules should have as the first rule that rz wins over ry and rx, and the second rule that ry wins over rx." |
| 1210 | |
| 1211 | Int4 xn = CmpLT(x, Float4(0.0f)); // x < 0 |
| 1212 | Int4 yn = CmpLT(y, Float4(0.0f)); // y < 0 |
| 1213 | Int4 zn = CmpLT(z, Float4(0.0f)); // z < 0 |
| 1214 | |
| 1215 | Float4 absX = Abs(x); |
| 1216 | Float4 absY = Abs(y); |
| 1217 | Float4 absZ = Abs(z); |
| 1218 | |
| 1219 | Int4 xy = CmpNLE(absX, absY); // abs(x) > abs(y) |
| 1220 | Int4 yz = CmpNLE(absY, absZ); // abs(y) > abs(z) |
| 1221 | Int4 zx = CmpNLE(absZ, absX); // abs(z) > abs(x) |
| 1222 | Int4 xMajor = xy & ~zx; // abs(x) > abs(y) && abs(x) > abs(z) |
| 1223 | Int4 yMajor = yz & ~xy; // abs(y) > abs(z) && abs(y) > abs(x) |
| 1224 | Int4 zMajor = zx & ~yz; // abs(z) > abs(x) && abs(z) > abs(y) |
| 1225 | |
| 1226 | // FACE_POSITIVE_X = 000b |
| 1227 | // FACE_NEGATIVE_X = 001b |
| 1228 | // FACE_POSITIVE_Y = 010b |
| 1229 | // FACE_NEGATIVE_Y = 011b |
| 1230 | // FACE_POSITIVE_Z = 100b |
| 1231 | // FACE_NEGATIVE_Z = 101b |
| 1232 | |
| 1233 | Int yAxis = SignMask(yMajor); |
| 1234 | Int zAxis = SignMask(zMajor); |
| 1235 | |
| 1236 | Int4 n = ((xn & xMajor) | (yn & yMajor) | (zn & zMajor)) & Int4(0x80000000); |
| 1237 | Int negative = SignMask(n); |
| 1238 | |
| 1239 | Int faces = *Pointer<Int>(constants + OFFSET(Constants,transposeBit0) + negative * 4); |
| 1240 | faces |= *Pointer<Int>(constants + OFFSET(Constants,transposeBit1) + yAxis * 4); |
| 1241 | faces |= *Pointer<Int>(constants + OFFSET(Constants,transposeBit2) + zAxis * 4); |
| 1242 | |
| 1243 | Int4 face; |
| 1244 | face.x = faces & 0x7; |
| 1245 | face.y = (faces >> 4) & 0x7; |
| 1246 | face.z = (faces >> 8) & 0x7; |
| 1247 | face.w = (faces >> 12) & 0x7; |
| 1248 | |
| 1249 | M = Max(Max(absX, absY), Max(absZ, Float4(std::numeric_limits<float>::min()))); |
| 1250 | |
| 1251 | // U = xMajor ? (neg ^ -z) : ((zMajor & neg) ^ x) |
| 1252 | U = As<Float4>((xMajor & (n ^ As<Int4>(-z))) | (~xMajor & ((zMajor & n) ^ As<Int4>(x)))); |
| 1253 | |
| 1254 | // V = !yMajor ? -y : (n ^ z) |
| 1255 | V = As<Float4>((~yMajor & As<Int4>(-y)) | (yMajor & (n ^ As<Int4>(z)))); |
| 1256 | |
| 1257 | M = reciprocal(M) * Float4(0.5f); |
| 1258 | U = U * M + Float4(0.5f); |
| 1259 | V = V * M + Float4(0.5f); |
| 1260 | |
| 1261 | return face; |
| 1262 | } |
| 1263 | |
| 1264 | Short4 SamplerCore::applyOffset(Short4 &uvw, Float4 &offset, const Int4 &whd, AddressingMode mode) |
| 1265 | { |
| 1266 | Int4 tmp = Int4(As<UShort4>(uvw)); |
| 1267 | tmp = tmp + As<Int4>(offset); |
| 1268 | |
| 1269 | switch(mode) |
| 1270 | { |
| 1271 | case AddressingMode::ADDRESSING_WRAP: |
| 1272 | tmp = (tmp + whd * Int4(-MIN_TEXEL_OFFSET)) % whd; |
| 1273 | break; |
| 1274 | case AddressingMode::ADDRESSING_CLAMP: |
| 1275 | case AddressingMode::ADDRESSING_MIRROR: |
| 1276 | case AddressingMode::ADDRESSING_MIRRORONCE: |
| 1277 | case AddressingMode::ADDRESSING_BORDER: // FIXME: Implement and test ADDRESSING_MIRROR, ADDRESSING_MIRRORONCE, ADDRESSING_BORDER |
| 1278 | tmp = Min(Max(tmp, Int4(0)), whd - Int4(1)); |
| 1279 | break; |
| 1280 | case ADDRESSING_TEXELFETCH: |
| 1281 | break; |
| 1282 | case AddressingMode::ADDRESSING_SEAMLESS: |
| 1283 | ASSERT(false); // Cube sampling doesn't support offset. |
| 1284 | default: |
| 1285 | ASSERT(false); |
| 1286 | } |
| 1287 | |
| 1288 | return As<Short4>(UShort4(tmp)); |
| 1289 | } |
| 1290 | |
| 1291 | void SamplerCore::computeIndices(UInt index[4], Short4 uuuu, Short4 vvvv, Short4 wwww, Vector4f &offset, const Pointer<Byte> &mipmap, SamplerFunction function) |
| 1292 | { |
| 1293 | bool texelFetch = (function == Fetch); |
| 1294 | bool hasOffset = (function.offset != 0); |
| 1295 | |
| 1296 | if(!texelFetch) |
| 1297 | { |
| 1298 | uuuu = MulHigh(As<UShort4>(uuuu), UShort4(*Pointer<Int4>(mipmap + OFFSET(Mipmap, width)))); |
| 1299 | vvvv = MulHigh(As<UShort4>(vvvv), UShort4(*Pointer<Int4>(mipmap + OFFSET(Mipmap, height)))); |
| 1300 | } |
| 1301 | |
| 1302 | if(hasOffset) |
| 1303 | { |
| 1304 | uuuu = applyOffset(uuuu, offset.x, *Pointer<Int4>(mipmap + OFFSET(Mipmap, width)), |
| 1305 | texelFetch ? ADDRESSING_TEXELFETCH : state.addressingModeU); |
| 1306 | vvvv = applyOffset(vvvv, offset.y, *Pointer<Int4>(mipmap + OFFSET(Mipmap, height)), |
| 1307 | texelFetch ? ADDRESSING_TEXELFETCH : state.addressingModeV); |
| 1308 | } |
| 1309 | |
| 1310 | if(hasQuadLayout()) |
| 1311 | { |
| 1312 | ::applyQuadLayout(uuuu, vvvv); |
| 1313 | } |
| 1314 | |
| 1315 | Short4 uuu2 = uuuu; |
| 1316 | uuuu = As<Short4>(UnpackLow(uuuu, vvvv)); |
| 1317 | uuu2 = As<Short4>(UnpackHigh(uuu2, vvvv)); |
| 1318 | uuuu = As<Short4>(MulAdd(uuuu, *Pointer<Short4>(mipmap + OFFSET(Mipmap,onePitchP)))); |
| 1319 | uuu2 = As<Short4>(MulAdd(uuu2, *Pointer<Short4>(mipmap + OFFSET(Mipmap,onePitchP)))); |
| 1320 | |
| 1321 | if(hasThirdCoordinate()) |
| 1322 | { |
| 1323 | if(state.textureType == VK_IMAGE_VIEW_TYPE_3D) |
| 1324 | { |
| 1325 | if(!texelFetch) |
| 1326 | { |
| 1327 | wwww = MulHigh(As<UShort4>(wwww), UShort4(*Pointer<Int4>(mipmap + OFFSET(Mipmap, depth)))); |
| 1328 | } |
| 1329 | |
| 1330 | if(hasOffset) |
| 1331 | { |
| 1332 | wwww = applyOffset(wwww, offset.z, *Pointer<Int4>(mipmap + OFFSET(Mipmap, depth)), |
| 1333 | texelFetch ? ADDRESSING_TEXELFETCH : state.addressingModeW); |
| 1334 | } |
| 1335 | } |
| 1336 | |
| 1337 | UInt4 uv(As<UInt2>(uuuu), As<UInt2>(uuu2)); |
| 1338 | uv += As<UInt4>(Int4(As<UShort4>(wwww))) * *Pointer<UInt4>(mipmap + OFFSET(Mipmap, sliceP)); |
| 1339 | |
| 1340 | index[0] = Extract(As<Int4>(uv), 0); |
| 1341 | index[1] = Extract(As<Int4>(uv), 1); |
| 1342 | index[2] = Extract(As<Int4>(uv), 2); |
| 1343 | index[3] = Extract(As<Int4>(uv), 3); |
| 1344 | } |
| 1345 | else |
| 1346 | { |
| 1347 | index[0] = Extract(As<Int2>(uuuu), 0); |
| 1348 | index[1] = Extract(As<Int2>(uuuu), 1); |
| 1349 | index[2] = Extract(As<Int2>(uuu2), 0); |
| 1350 | index[3] = Extract(As<Int2>(uuu2), 1); |
| 1351 | } |
| 1352 | |
| 1353 | if(texelFetch) |
| 1354 | { |
| 1355 | Int size = *Pointer<Int>(mipmap + OFFSET(Mipmap, sliceP)); |
| 1356 | if(hasThirdCoordinate()) |
| 1357 | { |
| 1358 | size *= *Pointer<Int>(mipmap + OFFSET(Mipmap, depth)); |
| 1359 | } |
| 1360 | UInt min = 0; |
| 1361 | UInt max = size - 1; |
| 1362 | |
| 1363 | for(int i = 0; i < 4; i++) |
| 1364 | { |
| 1365 | index[i] = Min(Max(index[i], min), max); |
| 1366 | } |
| 1367 | } |
| 1368 | } |
| 1369 | |
| 1370 | void SamplerCore::computeIndices(UInt index[4], Int4 uuuu, Int4 vvvv, Int4 wwww, Int4 valid, const Pointer<Byte> &mipmap, SamplerFunction function) |
| 1371 | { |
| 1372 | UInt4 indices = uuuu + vvvv; |
| 1373 | |
| 1374 | if(state.addressingModeW != ADDRESSING_UNUSED) |
| 1375 | { |
| 1376 | indices += As<UInt4>(wwww); |
| 1377 | } |
| 1378 | |
| 1379 | if(borderModeActive()) |
| 1380 | { |
| 1381 | // Texels out of range are still sampled before being replaced |
| 1382 | // with the border color, so sample them at linear index 0. |
| 1383 | indices &= As<UInt4>(valid); |
| 1384 | } |
| 1385 | |
| 1386 | for(int i = 0; i < 4; i++) |
| 1387 | { |
| 1388 | index[i] = Extract(As<Int4>(indices), i); |
| 1389 | } |
| 1390 | } |
| 1391 | |
| 1392 | Vector4s SamplerCore::sampleTexel(UInt index[4], Pointer<Byte> buffer) |
| 1393 | { |
| 1394 | Vector4s c; |
| 1395 | |
| 1396 | if(has16bitTextureFormat()) |
| 1397 | { |
| 1398 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0); |
| 1399 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1); |
| 1400 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2); |
| 1401 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3); |
| 1402 | |
| 1403 | switch(state.textureFormat) |
| 1404 | { |
| 1405 | case VK_FORMAT_R5G6B5_UNORM_PACK16: |
| 1406 | c.z = (c.x & Short4(0x001Fu)) << 11; |
| 1407 | c.y = (c.x & Short4(0x07E0u)) << 5; |
| 1408 | c.x = (c.x & Short4(0xF800u)); |
| 1409 | break; |
| 1410 | case VK_FORMAT_B4G4R4A4_UNORM_PACK16: |
| 1411 | c.w = (c.x << 12) & Short4(0xF000u); |
| 1412 | c.z = (c.x) & Short4(0xF000u); |
| 1413 | c.y = (c.x << 4) & Short4(0xF000u); |
| 1414 | c.x = (c.x << 8) & Short4(0xF000u); |
| 1415 | break; |
| 1416 | case VK_FORMAT_A1R5G5B5_UNORM_PACK16: |
| 1417 | c.w = (c.x) & Short4(0x8000u); |
| 1418 | c.z = (c.x << 11) & Short4(0xF800u); |
| 1419 | c.y = (c.x << 6) & Short4(0xF800u); |
| 1420 | c.x = (c.x << 1) & Short4(0xF800u); |
| 1421 | break; |
| 1422 | default: |
| 1423 | ASSERT(false); |
| 1424 | } |
| 1425 | } |
| 1426 | else if(has8bitTextureComponents()) |
| 1427 | { |
| 1428 | switch(textureComponentCount()) |
| 1429 | { |
| 1430 | case 4: |
| 1431 | { |
| 1432 | Byte4 c0 = Pointer<Byte4>(buffer)[index[0]]; |
| 1433 | Byte4 c1 = Pointer<Byte4>(buffer)[index[1]]; |
| 1434 | Byte4 c2 = Pointer<Byte4>(buffer)[index[2]]; |
| 1435 | Byte4 c3 = Pointer<Byte4>(buffer)[index[3]]; |
| 1436 | c.x = Unpack(c0, c1); |
| 1437 | c.y = Unpack(c2, c3); |
| 1438 | |
| 1439 | switch(state.textureFormat) |
| 1440 | { |
| 1441 | case VK_FORMAT_B8G8R8A8_UNORM: |
| 1442 | case VK_FORMAT_B8G8R8A8_SRGB: |
| 1443 | c.z = As<Short4>(UnpackLow(c.x, c.y)); |
| 1444 | c.x = As<Short4>(UnpackHigh(c.x, c.y)); |
| 1445 | c.y = c.z; |
| 1446 | c.w = c.x; |
| 1447 | c.z = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.z)); |
| 1448 | c.y = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.y)); |
| 1449 | c.x = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.x)); |
| 1450 | c.w = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.w)); |
| 1451 | break; |
| 1452 | case VK_FORMAT_R8G8B8A8_UNORM: |
| 1453 | case VK_FORMAT_R8G8B8A8_SINT: |
| 1454 | case VK_FORMAT_R8G8B8A8_SNORM: |
| 1455 | case VK_FORMAT_R8G8B8A8_SRGB: |
| 1456 | c.z = As<Short4>(UnpackHigh(c.x, c.y)); |
| 1457 | c.x = As<Short4>(UnpackLow(c.x, c.y)); |
| 1458 | c.y = c.x; |
| 1459 | c.w = c.z; |
| 1460 | c.x = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.x)); |
| 1461 | c.y = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.y)); |
| 1462 | c.z = UnpackLow(As<Byte8>(Short4(0)), As<Byte8>(c.z)); |
| 1463 | c.w = UnpackHigh(As<Byte8>(Short4(0)), As<Byte8>(c.w)); |
| 1464 | // Propagate sign bit |
| 1465 | if(state.textureFormat == VK_FORMAT_R8G8B8A8_SINT) |
| 1466 | { |
| 1467 | c.x >>= 8; |
| 1468 | c.y >>= 8; |
| 1469 | c.z >>= 8; |
| 1470 | c.w >>= 8; |
| 1471 | } |
| 1472 | break; |
| 1473 | case VK_FORMAT_R8G8B8A8_UINT: |
| 1474 | c.z = As<Short4>(UnpackHigh(c.x, c.y)); |
| 1475 | c.x = As<Short4>(UnpackLow(c.x, c.y)); |
| 1476 | c.y = c.x; |
| 1477 | c.w = c.z; |
| 1478 | c.x = UnpackLow(As<Byte8>(c.x), As<Byte8>(Short4(0))); |
| 1479 | c.y = UnpackHigh(As<Byte8>(c.y), As<Byte8>(Short4(0))); |
| 1480 | c.z = UnpackLow(As<Byte8>(c.z), As<Byte8>(Short4(0))); |
| 1481 | c.w = UnpackHigh(As<Byte8>(c.w), As<Byte8>(Short4(0))); |
| 1482 | break; |
| 1483 | default: |
| 1484 | ASSERT(false); |
| 1485 | } |
| 1486 | } |
| 1487 | break; |
| 1488 | case 2: |
| 1489 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0); |
| 1490 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1); |
| 1491 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2); |
| 1492 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3); |
| 1493 | |
| 1494 | switch(state.textureFormat) |
| 1495 | { |
| 1496 | case VK_FORMAT_R8G8_UNORM: |
| 1497 | case VK_FORMAT_R8G8_SNORM: |
| 1498 | case VK_FORMAT_R8G8_SRGB: |
| 1499 | c.y = (c.x & Short4(0xFF00u)); |
| 1500 | c.x = (c.x << 8); |
| 1501 | break; |
| 1502 | case VK_FORMAT_R8G8_SINT: |
| 1503 | c.y = c.x >> 8; |
| 1504 | c.x = (c.x << 8) >> 8; // Propagate sign bit |
| 1505 | break; |
| 1506 | case VK_FORMAT_R8G8_UINT: |
| 1507 | c.y = As<Short4>(As<UShort4>(c.x) >> 8); |
| 1508 | c.x &= Short4(0x00FFu); |
| 1509 | break; |
| 1510 | default: |
| 1511 | ASSERT(false); |
| 1512 | } |
| 1513 | break; |
| 1514 | case 1: |
| 1515 | { |
| 1516 | Int c0 = Int(*Pointer<Byte>(buffer + index[0])); |
| 1517 | Int c1 = Int(*Pointer<Byte>(buffer + index[1])); |
| 1518 | Int c2 = Int(*Pointer<Byte>(buffer + index[2])); |
| 1519 | Int c3 = Int(*Pointer<Byte>(buffer + index[3])); |
| 1520 | c0 = c0 | (c1 << 8) | (c2 << 16) | (c3 << 24); |
| 1521 | |
| 1522 | switch(state.textureFormat) |
| 1523 | { |
| 1524 | case VK_FORMAT_R8_SINT: |
| 1525 | case VK_FORMAT_R8_UINT: |
| 1526 | case VK_FORMAT_S8_UINT: |
| 1527 | { |
| 1528 | Int zero(0); |
| 1529 | c.x = Unpack(As<Byte4>(c0), As<Byte4>(zero)); |
| 1530 | // Propagate sign bit |
| 1531 | if(state.textureFormat == VK_FORMAT_R8_SINT) |
| 1532 | { |
| 1533 | c.x = (c.x << 8) >> 8; |
| 1534 | } |
| 1535 | } |
| 1536 | break; |
| 1537 | case VK_FORMAT_R8_SNORM: |
| 1538 | case VK_FORMAT_R8_UNORM: |
| 1539 | case VK_FORMAT_R8_SRGB: |
| 1540 | // TODO: avoid populating the low bits at all. |
| 1541 | c.x = Unpack(As<Byte4>(c0)); |
| 1542 | c.x &= Short4(0xFF00u); |
| 1543 | break; |
| 1544 | default: |
| 1545 | c.x = Unpack(As<Byte4>(c0)); |
| 1546 | break; |
| 1547 | } |
| 1548 | } |
| 1549 | break; |
| 1550 | default: |
| 1551 | ASSERT(false); |
| 1552 | } |
| 1553 | } |
| 1554 | else if(has16bitTextureComponents()) |
| 1555 | { |
| 1556 | switch(textureComponentCount()) |
| 1557 | { |
| 1558 | case 4: |
| 1559 | c.x = Pointer<Short4>(buffer)[index[0]]; |
| 1560 | c.y = Pointer<Short4>(buffer)[index[1]]; |
| 1561 | c.z = Pointer<Short4>(buffer)[index[2]]; |
| 1562 | c.w = Pointer<Short4>(buffer)[index[3]]; |
| 1563 | transpose4x4(c.x, c.y, c.z, c.w); |
| 1564 | break; |
| 1565 | case 3: |
| 1566 | c.x = Pointer<Short4>(buffer)[index[0]]; |
| 1567 | c.y = Pointer<Short4>(buffer)[index[1]]; |
| 1568 | c.z = Pointer<Short4>(buffer)[index[2]]; |
| 1569 | c.w = Pointer<Short4>(buffer)[index[3]]; |
| 1570 | transpose4x3(c.x, c.y, c.z, c.w); |
| 1571 | break; |
| 1572 | case 2: |
| 1573 | c.x = *Pointer<Short4>(buffer + 4 * index[0]); |
| 1574 | c.x = As<Short4>(UnpackLow(c.x, *Pointer<Short4>(buffer + 4 * index[1]))); |
| 1575 | c.z = *Pointer<Short4>(buffer + 4 * index[2]); |
| 1576 | c.z = As<Short4>(UnpackLow(c.z, *Pointer<Short4>(buffer + 4 * index[3]))); |
| 1577 | c.y = c.x; |
| 1578 | c.x = UnpackLow(As<Int2>(c.x), As<Int2>(c.z)); |
| 1579 | c.y = UnpackHigh(As<Int2>(c.y), As<Int2>(c.z)); |
| 1580 | break; |
| 1581 | case 1: |
| 1582 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[0]], 0); |
| 1583 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[1]], 1); |
| 1584 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[2]], 2); |
| 1585 | c.x = Insert(c.x, Pointer<Short>(buffer)[index[3]], 3); |
| 1586 | break; |
| 1587 | default: |
| 1588 | ASSERT(false); |
| 1589 | } |
| 1590 | } |
| 1591 | else if(state.textureFormat == VK_FORMAT_A2B10G10R10_UNORM_PACK32) |
| 1592 | { |
| 1593 | Int4 cc; |
| 1594 | cc = Insert(cc, Pointer<Int>(buffer)[index[0]], 0); |
| 1595 | cc = Insert(cc, Pointer<Int>(buffer)[index[1]], 1); |
| 1596 | cc = Insert(cc, Pointer<Int>(buffer)[index[2]], 2); |
| 1597 | cc = Insert(cc, Pointer<Int>(buffer)[index[3]], 3); |
| 1598 | |
| 1599 | // shift each 10 bit field left 6, and replicate 6 high bits into bottom 6 |
| 1600 | c.x = Short4(((cc << 6) & Int4(0xFFC0)) | ((cc >> 4) & Int4(0x3F))); |
| 1601 | c.y = Short4(((cc >> 4) & Int4(0xFFC0)) | ((cc >> 14) & Int4(0x3F))); |
| 1602 | c.z = Short4(((cc >> 14) & Int4(0xFFC0)) | ((cc >> 24) & Int4(0x3F))); |
| 1603 | c.w = Short4(((cc >> 16) & Int4(0xC000))); |
| 1604 | |
| 1605 | // replicate 2 bit alpha component all the way down |
| 1606 | c.w |= (c.w >> 8) & Short4(0xc0); |
| 1607 | c.w |= (c.w >> 4) & Short4(0x0c0c); |
| 1608 | c.w |= (c.w >> 2) & Short4(0x3333); |
| 1609 | } |
| 1610 | else if(state.textureFormat == VK_FORMAT_A2B10G10R10_UINT_PACK32) |
| 1611 | { |
| 1612 | Int4 cc; |
| 1613 | cc = Insert(cc, Pointer<Int>(buffer)[index[0]], 0); |
| 1614 | cc = Insert(cc, Pointer<Int>(buffer)[index[1]], 1); |
| 1615 | cc = Insert(cc, Pointer<Int>(buffer)[index[2]], 2); |
| 1616 | cc = Insert(cc, Pointer<Int>(buffer)[index[3]], 3); |
| 1617 | |
| 1618 | c.x = Short4(((cc) & Int4(0x3FF))); |
| 1619 | c.y = Short4(((cc >> 10) & Int4(0x3FF))); |
| 1620 | c.z = Short4(((cc >> 20) & Int4(0x3FF))); |
| 1621 | c.w = Short4(((cc >> 30) & Int4(0x3))); |
| 1622 | } |
| 1623 | else ASSERT(false); |
| 1624 | |
| 1625 | if (state.textureFormat.isSRGBformat()) |
| 1626 | { |
| 1627 | for(int i = 0; i < textureComponentCount(); i++) |
| 1628 | { |
| 1629 | if(isRGBComponent(i)) |
| 1630 | { |
| 1631 | sRGBtoLinear16_8_16(c[i]); |
| 1632 | } |
| 1633 | } |
| 1634 | } |
| 1635 | |
| 1636 | return c; |
| 1637 | } |
| 1638 | |
| 1639 | Vector4s SamplerCore::sampleTexel(Short4 &uuuu, Short4 &vvvv, Short4 &wwww, Vector4f &offset, Pointer<Byte> &mipmap, Pointer<Byte> buffer, SamplerFunction function) |
| 1640 | { |
| 1641 | Vector4s c; |
| 1642 | |
| 1643 | UInt index[4]; |
| 1644 | computeIndices(index, uuuu, vvvv, wwww, offset, mipmap, function); |
| 1645 | |
| 1646 | if(isYcbcrFormat()) |
| 1647 | { |
| 1648 | // Pointers to the planes of YCbCr images are stored in consecutive mipmap levels. |
| 1649 | Pointer<Byte> bufferY = buffer; // *Pointer<Pointer<Byte>>(mipmap + 0 * sizeof(Mipmap) + OFFSET(Mipmap, buffer)); |
| 1650 | Pointer<Byte> bufferU = *Pointer<Pointer<Byte>>(mipmap + 1 * sizeof(Mipmap) + OFFSET(Mipmap, buffer)); // U/V for 2-plane interleaved formats. |
| 1651 | Pointer<Byte> bufferV = *Pointer<Pointer<Byte>>(mipmap + 2 * sizeof(Mipmap) + OFFSET(Mipmap, buffer)); |
| 1652 | |
| 1653 | // Luminance |
| 1654 | Int c0 = Int(bufferY[index[0]]); |
| 1655 | Int c1 = Int(bufferY[index[1]]); |
| 1656 | Int c2 = Int(bufferY[index[2]]); |
| 1657 | Int c3 = Int(bufferY[index[3]]); |
| 1658 | c0 = c0 | (c1 << 8) | (c2 << 16) | (c3 << 24); |
| 1659 | UShort4 Y = As<UShort4>(Unpack(As<Byte4>(c0))); |
| 1660 | |
| 1661 | UShort4 Cb, Cr; |
| 1662 | |
| 1663 | // Chroma |
| 1664 | { |
| 1665 | computeIndices(index, uuuu, vvvv, wwww, offset, mipmap + sizeof(Mipmap), function); |
| 1666 | UShort4 U, V; |
| 1667 | |
| 1668 | if(state.textureFormat == VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM) |
| 1669 | { |
| 1670 | c0 = Int(bufferU[index[0]]); |
| 1671 | c1 = Int(bufferU[index[1]]); |
| 1672 | c2 = Int(bufferU[index[2]]); |
| 1673 | c3 = Int(bufferU[index[3]]); |
| 1674 | c0 = c0 | (c1 << 8) | (c2 << 16) | (c3 << 24); |
| 1675 | U = As<UShort4>(Unpack(As<Byte4>(c0))); |
| 1676 | |
| 1677 | c0 = Int(bufferV[index[0]]); |
| 1678 | c1 = Int(bufferV[index[1]]); |
| 1679 | c2 = Int(bufferV[index[2]]); |
| 1680 | c3 = Int(bufferV[index[3]]); |
| 1681 | c0 = c0 | (c1 << 8) | (c2 << 16) | (c3 << 24); |
| 1682 | V = As<UShort4>(Unpack(As<Byte4>(c0))); |
| 1683 | } |
| 1684 | else if(state.textureFormat == VK_FORMAT_G8_B8R8_2PLANE_420_UNORM) |
| 1685 | { |
| 1686 | Short4 UV; |
| 1687 | UV = Insert(UV, Pointer<Short>(bufferU)[index[0]], 0); // TODO: Insert(UShort4, UShort) |
| 1688 | UV = Insert(UV, Pointer<Short>(bufferU)[index[1]], 1); |
| 1689 | UV = Insert(UV, Pointer<Short>(bufferU)[index[2]], 2); |
| 1690 | UV = Insert(UV, Pointer<Short>(bufferU)[index[3]], 3); |
| 1691 | U = (UV & Short4(0x00FFu)) | (UV << 8); |
| 1692 | V = (UV & Short4(0xFF00u)) | As<Short4>(As<UShort4>(UV) >> 8); |
| 1693 | } |
| 1694 | else UNSUPPORTED("state.textureFormat %d" , (int)state.textureFormat); |
| 1695 | |
| 1696 | if(!state.swappedChroma) |
| 1697 | { |
| 1698 | Cb = U; |
| 1699 | Cr = V; |
| 1700 | } |
| 1701 | else |
| 1702 | { |
| 1703 | Cb = V; |
| 1704 | Cr = U; |
| 1705 | } |
| 1706 | } |
| 1707 | |
| 1708 | if(state.ycbcrModel == VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY) |
| 1709 | { |
| 1710 | // YCbCr formats are treated as signed 15-bit. |
| 1711 | c.x = Cr >> 1; |
| 1712 | c.y = Y >> 1; |
| 1713 | c.z = Cb >> 1; |
| 1714 | } |
| 1715 | else |
| 1716 | { |
| 1717 | // Scaling and bias for studio-swing range: Y = [16 .. 235], U/V = [16 .. 240] |
| 1718 | // Scale down by 0x0101 to normalize the 8.8 samples, and up by 0x7FFF for signed 15-bit output. |
| 1719 | float yOffset = static_cast<float>(state.studioSwing ? 16 * 0x0101 : 0); |
| 1720 | float uvOffset = static_cast<float>(128 * 0x0101); |
| 1721 | float yFactor = static_cast<float>(0x7FFF) / static_cast<float>(state.studioSwing ? 219 * 0x0101 : 255 * 0x0101); |
| 1722 | float uvFactor = static_cast<float>(0x7FFF) / static_cast<float>(state.studioSwing ? 224 * 0x0101 : 255 * 0x0101); |
| 1723 | |
| 1724 | Float4 y = (Float4(Y) - Float4(yOffset)) * Float4(yFactor); |
| 1725 | Float4 u = (Float4(Cb) - Float4(uvOffset)) * Float4(uvFactor); |
| 1726 | Float4 v = (Float4(Cr) - Float4(uvOffset)) * Float4(uvFactor); |
| 1727 | |
| 1728 | if(state.ycbcrModel == VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY) |
| 1729 | { |
| 1730 | c.x = Short4(v); |
| 1731 | c.y = Short4(y); |
| 1732 | c.z = Short4(u); |
| 1733 | } |
| 1734 | else |
| 1735 | { |
| 1736 | // Generic YCbCr to RGB transformation: |
| 1737 | // R = Y + 2 * (1 - Kr) * Cr |
| 1738 | // G = Y - 2 * Kb * (1 - Kb) / Kg * Cb - 2 * Kr * (1 - Kr) / Kg * Cr |
| 1739 | // B = Y + 2 * (1 - Kb) * Cb |
| 1740 | |
| 1741 | float Kb = 0.114f; |
| 1742 | float Kr = 0.299f; |
| 1743 | |
| 1744 | switch(state.ycbcrModel) |
| 1745 | { |
| 1746 | case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709: |
| 1747 | Kb = 0.0722f; |
| 1748 | Kr = 0.2126f; |
| 1749 | break; |
| 1750 | case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601: |
| 1751 | Kb = 0.114f; |
| 1752 | Kr = 0.299f; |
| 1753 | break; |
| 1754 | case VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020: |
| 1755 | Kb = 0.0593f; |
| 1756 | Kr = 0.2627f; |
| 1757 | break; |
| 1758 | default: |
| 1759 | UNSUPPORTED("ycbcrModel %d" , int(state.ycbcrModel)); |
| 1760 | } |
| 1761 | |
| 1762 | const float Kg = 1.0f - Kr - Kb; |
| 1763 | |
| 1764 | const float Rr = 2 * (1 - Kr); |
| 1765 | const float Gb = -2 * Kb * (1 - Kb) / Kg; |
| 1766 | const float Gr = -2 * Kr * (1 - Kr) / Kg; |
| 1767 | const float Bb = 2 * (1 - Kb); |
| 1768 | |
| 1769 | Float4 r = y + Float4(Rr) * v; |
| 1770 | Float4 g = y + Float4(Gb) * u + Float4(Gr) * v; |
| 1771 | Float4 b = y + Float4(Bb) * u ; |
| 1772 | |
| 1773 | c.x = Short4(r); |
| 1774 | c.y = Short4(g); |
| 1775 | c.z = Short4(b); |
| 1776 | } |
| 1777 | } |
| 1778 | } |
| 1779 | else |
| 1780 | { |
| 1781 | return sampleTexel(index, buffer); |
| 1782 | } |
| 1783 | |
| 1784 | return c; |
| 1785 | } |
| 1786 | |
| 1787 | Vector4f SamplerCore::sampleTexel(Int4 &uuuu, Int4 &vvvv, Int4 &wwww, Float4 &z, Pointer<Byte> &mipmap, Pointer<Byte> buffer, SamplerFunction function) |
| 1788 | { |
| 1789 | Int4 valid; |
| 1790 | |
| 1791 | if(borderModeActive()) |
| 1792 | { |
| 1793 | // Valid texels have positive coordinates. |
| 1794 | Int4 negative = Int4(0); |
| 1795 | if(state.addressingModeU == ADDRESSING_BORDER) negative |= uuuu; |
| 1796 | if(state.addressingModeV == ADDRESSING_BORDER) negative |= vvvv; |
| 1797 | if(state.addressingModeW == ADDRESSING_BORDER) negative |= wwww; |
| 1798 | valid = CmpNLT(negative, Int4(0)); |
| 1799 | } |
| 1800 | |
| 1801 | UInt index[4]; |
| 1802 | UInt4 t0, t1, t2, t3; |
| 1803 | computeIndices(index, uuuu, vvvv, wwww, valid, mipmap, function); |
| 1804 | |
| 1805 | Vector4f c; |
| 1806 | |
| 1807 | if(hasFloatTexture() || has32bitIntegerTextureComponents()) |
| 1808 | { |
| 1809 | switch (state.textureFormat) |
| 1810 | { |
| 1811 | case VK_FORMAT_R16_SFLOAT: |
| 1812 | t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 2)); |
| 1813 | t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 2)); |
| 1814 | t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 2)); |
| 1815 | t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 2)); |
| 1816 | |
| 1817 | c.x.x = Extract(As<Float4>(halfToFloatBits(t0)), 0); |
| 1818 | c.x.y = Extract(As<Float4>(halfToFloatBits(t1)), 0); |
| 1819 | c.x.z = Extract(As<Float4>(halfToFloatBits(t2)), 0); |
| 1820 | c.x.w = Extract(As<Float4>(halfToFloatBits(t3)), 0); |
| 1821 | break; |
| 1822 | case VK_FORMAT_R16G16_SFLOAT: |
| 1823 | t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 4)); |
| 1824 | t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 4)); |
| 1825 | t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 4)); |
| 1826 | t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 4)); |
| 1827 | |
| 1828 | // FIXME: shuffles |
| 1829 | c.x = As<Float4>(halfToFloatBits(t0)); |
| 1830 | c.y = As<Float4>(halfToFloatBits(t1)); |
| 1831 | c.z = As<Float4>(halfToFloatBits(t2)); |
| 1832 | c.w = As<Float4>(halfToFloatBits(t3)); |
| 1833 | transpose4x4(c.x, c.y, c.z, c.w); |
| 1834 | break; |
| 1835 | case VK_FORMAT_R16G16B16A16_SFLOAT: |
| 1836 | t0 = Int4(*Pointer<UShort4>(buffer + index[0] * 8)); |
| 1837 | t1 = Int4(*Pointer<UShort4>(buffer + index[1] * 8)); |
| 1838 | t2 = Int4(*Pointer<UShort4>(buffer + index[2] * 8)); |
| 1839 | t3 = Int4(*Pointer<UShort4>(buffer + index[3] * 8)); |
| 1840 | |
| 1841 | c.x = As<Float4>(halfToFloatBits(t0)); |
| 1842 | c.y = As<Float4>(halfToFloatBits(t1)); |
| 1843 | c.z = As<Float4>(halfToFloatBits(t2)); |
| 1844 | c.w = As<Float4>(halfToFloatBits(t3)); |
| 1845 | transpose4x4(c.x, c.y, c.z, c.w); |
| 1846 | break; |
| 1847 | case VK_FORMAT_R32_SFLOAT: |
| 1848 | case VK_FORMAT_R32_SINT: |
| 1849 | case VK_FORMAT_R32_UINT: |
| 1850 | case VK_FORMAT_D32_SFLOAT: |
| 1851 | // FIXME: Optimal shuffling? |
| 1852 | c.x.x = *Pointer<Float>(buffer + index[0] * 4); |
| 1853 | c.x.y = *Pointer<Float>(buffer + index[1] * 4); |
| 1854 | c.x.z = *Pointer<Float>(buffer + index[2] * 4); |
| 1855 | c.x.w = *Pointer<Float>(buffer + index[3] * 4); |
| 1856 | break; |
| 1857 | case VK_FORMAT_R32G32_SFLOAT: |
| 1858 | case VK_FORMAT_R32G32_SINT: |
| 1859 | case VK_FORMAT_R32G32_UINT: |
| 1860 | // FIXME: Optimal shuffling? |
| 1861 | c.x.xy = *Pointer<Float4>(buffer + index[0] * 8); |
| 1862 | c.x.zw = *Pointer<Float4>(buffer + index[1] * 8 - 8); |
| 1863 | c.z.xy = *Pointer<Float4>(buffer + index[2] * 8); |
| 1864 | c.z.zw = *Pointer<Float4>(buffer + index[3] * 8 - 8); |
| 1865 | c.y = c.x; |
| 1866 | c.x = Float4(c.x.xz, c.z.xz); |
| 1867 | c.y = Float4(c.y.yw, c.z.yw); |
| 1868 | break; |
| 1869 | case VK_FORMAT_R32G32B32_SFLOAT: |
| 1870 | case VK_FORMAT_R32G32B32_SINT: |
| 1871 | case VK_FORMAT_R32G32B32_UINT: |
| 1872 | c.x = *Pointer<Float4>(buffer + index[0] * 16, 16); |
| 1873 | c.y = *Pointer<Float4>(buffer + index[1] * 16, 16); |
| 1874 | c.z = *Pointer<Float4>(buffer + index[2] * 16, 16); |
| 1875 | c.w = *Pointer<Float4>(buffer + index[3] * 16, 16); |
| 1876 | transpose4x3(c.x, c.y, c.z, c.w); |
| 1877 | break; |
| 1878 | case VK_FORMAT_R32G32B32A32_SFLOAT: |
| 1879 | case VK_FORMAT_R32G32B32A32_SINT: |
| 1880 | case VK_FORMAT_R32G32B32A32_UINT: |
| 1881 | c.x = *Pointer<Float4>(buffer + index[0] * 16, 16); |
| 1882 | c.y = *Pointer<Float4>(buffer + index[1] * 16, 16); |
| 1883 | c.z = *Pointer<Float4>(buffer + index[2] * 16, 16); |
| 1884 | c.w = *Pointer<Float4>(buffer + index[3] * 16, 16); |
| 1885 | transpose4x4(c.x, c.y, c.z, c.w); |
| 1886 | break; |
| 1887 | case VK_FORMAT_E5B9G9R9_UFLOAT_PACK32: |
| 1888 | { |
| 1889 | Float4 t; // TODO: add Insert(UInt4, RValue<UInt>) |
| 1890 | t.x = *Pointer<Float>(buffer + index[0] * 4); |
| 1891 | t.y = *Pointer<Float>(buffer + index[1] * 4); |
| 1892 | t.z = *Pointer<Float>(buffer + index[2] * 4); |
| 1893 | t.w = *Pointer<Float>(buffer + index[3] * 4); |
| 1894 | t0 = As<UInt4>(t); |
| 1895 | c.w = Float4(UInt4(1) << ((t0 >> 27) & UInt4(0x1F))) * Float4(1.0f / (1 << 24)); |
| 1896 | c.x = Float4((t0) & UInt4(0x1FF)) * c.w; |
| 1897 | c.y = Float4((t0 >> 9) & UInt4(0x1FF)) * c.w; |
| 1898 | c.z = Float4((t0 >> 18) & UInt4(0x1FF)) * c.w; |
| 1899 | break; |
| 1900 | } |
| 1901 | case VK_FORMAT_B10G11R11_UFLOAT_PACK32: |
| 1902 | { |
| 1903 | Float4 t; // TODO: add Insert(UInt4, RValue<UInt>) |
| 1904 | t.x = *Pointer<Float>(buffer + index[0] * 4); |
| 1905 | t.y = *Pointer<Float>(buffer + index[1] * 4); |
| 1906 | t.z = *Pointer<Float>(buffer + index[2] * 4); |
| 1907 | t.w = *Pointer<Float>(buffer + index[3] * 4); |
| 1908 | t0 = As<UInt4>(t); |
| 1909 | c.x = As<Float4>(halfToFloatBits((t0 << 4) & UInt4(0x7FF0))); |
| 1910 | c.y = As<Float4>(halfToFloatBits((t0 >> 7) & UInt4(0x7FF0))); |
| 1911 | c.z = As<Float4>(halfToFloatBits((t0 >> 17) & UInt4(0x7FE0))); |
| 1912 | break; |
| 1913 | } |
| 1914 | default: |
| 1915 | UNIMPLEMENTED("Format %d" , VkFormat(state.textureFormat)); |
| 1916 | } |
| 1917 | } |
| 1918 | else |
| 1919 | { |
| 1920 | ASSERT(!isYcbcrFormat()); |
| 1921 | |
| 1922 | Vector4s cs = sampleTexel(index, buffer); |
| 1923 | |
| 1924 | bool isInteger = state.textureFormat.isNonNormalizedInteger(); |
| 1925 | int componentCount = textureComponentCount(); |
| 1926 | for(int n = 0; n < componentCount; n++) |
| 1927 | { |
| 1928 | if(hasUnsignedTextureComponent(n)) |
| 1929 | { |
| 1930 | if(isInteger) |
| 1931 | { |
| 1932 | c[n] = As<Float4>(Int4(As<UShort4>(cs[n]))); |
| 1933 | } |
| 1934 | else |
| 1935 | { |
| 1936 | c[n] = Float4(As<UShort4>(cs[n])); |
| 1937 | } |
| 1938 | } |
| 1939 | else |
| 1940 | { |
| 1941 | if(isInteger) |
| 1942 | { |
| 1943 | c[n] = As<Float4>(Int4(cs[n])); |
| 1944 | } |
| 1945 | else |
| 1946 | { |
| 1947 | c[n] = Float4(cs[n]); |
| 1948 | } |
| 1949 | } |
| 1950 | } |
| 1951 | } |
| 1952 | |
| 1953 | if(state.compareEnable) |
| 1954 | { |
| 1955 | Float4 ref = z; |
| 1956 | |
| 1957 | if(!hasFloatTexture()) |
| 1958 | { |
| 1959 | // D16_UNORM: clamp reference, normalize texel value |
| 1960 | ref = Min(Max(ref, Float4(0.0f)), Float4(1.0f)); |
| 1961 | c.x = c.x * Float4(1.0f / 0xFFFF); |
| 1962 | } |
| 1963 | |
| 1964 | Int4 boolean; |
| 1965 | |
| 1966 | switch(state.compareOp) |
| 1967 | { |
| 1968 | case VK_COMPARE_OP_LESS_OR_EQUAL: boolean = CmpLE(ref, c.x); break; |
| 1969 | case VK_COMPARE_OP_GREATER_OR_EQUAL: boolean = CmpNLT(ref, c.x); break; |
| 1970 | case VK_COMPARE_OP_LESS: boolean = CmpLT(ref, c.x); break; |
| 1971 | case VK_COMPARE_OP_GREATER: boolean = CmpNLE(ref, c.x); break; |
| 1972 | case VK_COMPARE_OP_EQUAL: boolean = CmpEQ(ref, c.x); break; |
| 1973 | case VK_COMPARE_OP_NOT_EQUAL: boolean = CmpNEQ(ref, c.x); break; |
| 1974 | case VK_COMPARE_OP_ALWAYS: boolean = Int4(-1); break; |
| 1975 | case VK_COMPARE_OP_NEVER: boolean = Int4(0); break; |
| 1976 | default: ASSERT(false); |
| 1977 | } |
| 1978 | |
| 1979 | c.x = As<Float4>(boolean & As<Int4>(Float4(1.0f))); |
| 1980 | c.y = Float4(0.0f); |
| 1981 | c.z = Float4(0.0f); |
| 1982 | c.w = Float4(1.0f); |
| 1983 | } |
| 1984 | |
| 1985 | if(borderModeActive()) |
| 1986 | { |
| 1987 | c = replaceBorderTexel(c, valid); |
| 1988 | } |
| 1989 | |
| 1990 | return c; |
| 1991 | } |
| 1992 | |
| 1993 | Vector4f SamplerCore::replaceBorderTexel(const Vector4f &c, Int4 valid) |
| 1994 | { |
| 1995 | Int4 borderRGB; |
| 1996 | Int4 borderA; |
| 1997 | |
| 1998 | bool scaled = !hasFloatTexture() && !hasUnnormalizedIntegerTexture() && !state.compareEnable; |
| 1999 | bool sign = !hasUnsignedTextureComponent(0); |
| 2000 | Int4 float_one = scaled ? As<Int4>(Float4(static_cast<float>(sign ? 0x7FFF : 0xFFFF))) : As<Int4>(Float4(1.0f)); |
| 2001 | |
| 2002 | switch(state.border) |
| 2003 | { |
| 2004 | case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK: |
| 2005 | case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK: |
| 2006 | borderRGB = Int4(0); |
| 2007 | borderA = Int4(0); |
| 2008 | break; |
| 2009 | case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK: |
| 2010 | borderRGB = Int4(0); |
| 2011 | borderA = float_one; |
| 2012 | break; |
| 2013 | case VK_BORDER_COLOR_INT_OPAQUE_BLACK: |
| 2014 | borderRGB = Int4(0); |
| 2015 | borderA = Int4(1); |
| 2016 | break; |
| 2017 | case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE: |
| 2018 | borderRGB = float_one; |
| 2019 | borderA = float_one; |
| 2020 | break; |
| 2021 | case VK_BORDER_COLOR_INT_OPAQUE_WHITE: |
| 2022 | borderRGB = Int4(1); |
| 2023 | borderA = Int4(1); |
| 2024 | break; |
| 2025 | default: |
| 2026 | UNIMPLEMENTED("sint/uint/sfloat border: %u" , state.border); |
| 2027 | } |
| 2028 | |
| 2029 | Vector4f out; |
| 2030 | out.x = As<Float4>((valid & As<Int4>(c.x)) | (~valid & borderRGB)); |
| 2031 | out.y = As<Float4>((valid & As<Int4>(c.y)) | (~valid & borderRGB)); |
| 2032 | out.z = As<Float4>((valid & As<Int4>(c.z)) | (~valid & borderRGB)); |
| 2033 | out.w = As<Float4>((valid & As<Int4>(c.w)) | (~valid & borderA)); |
| 2034 | |
| 2035 | return out; |
| 2036 | } |
| 2037 | |
| 2038 | void SamplerCore::selectMipmap(const Pointer<Byte> &texture, Pointer<Byte> &mipmap, Pointer<Byte> &buffer, const Float &lod, bool secondLOD) |
| 2039 | { |
| 2040 | Pointer<Byte> mipmap0 = texture + OFFSET(Texture, mipmap[0]); |
| 2041 | |
| 2042 | if(state.mipmapFilter == MIPMAP_NONE) |
| 2043 | { |
| 2044 | mipmap = mipmap0; |
| 2045 | } |
| 2046 | else |
| 2047 | { |
| 2048 | Int ilod; |
| 2049 | |
| 2050 | if(state.mipmapFilter == MIPMAP_POINT) |
| 2051 | { |
| 2052 | // TODO: Preferred formula is ceil(lod + 0.5) - 1 |
| 2053 | ilod = RoundInt(lod); |
| 2054 | } |
| 2055 | else // MIPMAP_LINEAR |
| 2056 | { |
| 2057 | ilod = Int(lod); |
| 2058 | } |
| 2059 | |
| 2060 | mipmap = mipmap0 + ilod * sizeof(Mipmap) + secondLOD * sizeof(Mipmap); |
| 2061 | } |
| 2062 | |
| 2063 | buffer = *Pointer<Pointer<Byte>>(mipmap + OFFSET(Mipmap, buffer)); |
| 2064 | } |
| 2065 | |
| 2066 | Int4 SamplerCore::computeFilterOffset(Float &lod) |
| 2067 | { |
| 2068 | if(state.textureFilter == FILTER_POINT) |
| 2069 | { |
| 2070 | return Int4(0); |
| 2071 | } |
| 2072 | else if(state.textureFilter == FILTER_MIN_LINEAR_MAG_POINT) |
| 2073 | { |
| 2074 | return CmpNLE(Float4(lod), Float4(0.0f)); |
| 2075 | } |
| 2076 | else if(state.textureFilter == FILTER_MIN_POINT_MAG_LINEAR) |
| 2077 | { |
| 2078 | return CmpLE(Float4(lod), Float4(0.0f)); |
| 2079 | } |
| 2080 | |
| 2081 | return Int4(~0); |
| 2082 | } |
| 2083 | |
| 2084 | Short4 SamplerCore::address(Float4 &uw, AddressingMode addressingMode, Pointer<Byte> &mipmap) |
| 2085 | { |
| 2086 | if(addressingMode == ADDRESSING_UNUSED) |
| 2087 | { |
| 2088 | return Short4(); |
| 2089 | } |
| 2090 | else if(addressingMode == ADDRESSING_LAYER) |
| 2091 | { |
| 2092 | return Short4(Min(Max(RoundInt(uw), Int4(0)), *Pointer<Int4>(mipmap + OFFSET(Mipmap, depth)) - Int4(1))); |
| 2093 | } |
| 2094 | else if(addressingMode == ADDRESSING_CLAMP || addressingMode == ADDRESSING_BORDER) |
| 2095 | { |
| 2096 | Float4 clamp = Min(Max(uw, Float4(0.0f)), Float4(65535.0f / 65536.0f)); |
| 2097 | |
| 2098 | return Short4(Int4(clamp * Float4(1 << 16))); |
| 2099 | } |
| 2100 | else if(addressingMode == ADDRESSING_MIRROR) |
| 2101 | { |
| 2102 | Int4 convert = Int4(uw * Float4(1 << 16)); |
| 2103 | Int4 mirror = (convert << 15) >> 31; |
| 2104 | |
| 2105 | convert ^= mirror; |
| 2106 | |
| 2107 | return Short4(convert); |
| 2108 | } |
| 2109 | else if(addressingMode == ADDRESSING_MIRRORONCE) |
| 2110 | { |
| 2111 | // Absolute value |
| 2112 | Int4 convert = Int4(Abs(uw * Float4(1 << 16))); |
| 2113 | |
| 2114 | // Clamp |
| 2115 | convert -= Int4(0x00008000, 0x00008000, 0x00008000, 0x00008000); |
| 2116 | convert = As<Int4>(PackSigned(convert, convert)); |
| 2117 | |
| 2118 | return As<Short4>(Int2(convert)) + Short4(0x8000u); |
| 2119 | } |
| 2120 | else // Wrap |
| 2121 | { |
| 2122 | return Short4(Int4(uw * Float4(1 << 16))); |
| 2123 | } |
| 2124 | } |
| 2125 | |
| 2126 | // TODO: Eliminate when the gather + mirror addressing case is handled by mirroring the footprint. |
| 2127 | static Int4 mirror(Int4 n) |
| 2128 | { |
| 2129 | auto positive = CmpNLT(n, Int4(0)); |
| 2130 | return (positive & n) | (~positive & (-(Int4(1) + n))); |
| 2131 | } |
| 2132 | |
| 2133 | static Int4 mod(Int4 n, Int4 d) |
| 2134 | { |
| 2135 | auto x = n % d; |
| 2136 | auto positive = CmpNLT(x, Int4(0)); |
| 2137 | return (positive & x) | (~positive & (x + d)); |
| 2138 | } |
| 2139 | |
| 2140 | void SamplerCore::address(Float4 &uvw, Int4 &xyz0, Int4 &xyz1, Float4 &f, Pointer<Byte> &mipmap, Float4 &texOffset, Int4 &filter, int whd, AddressingMode addressingMode, SamplerFunction function) |
| 2141 | { |
| 2142 | if(addressingMode == ADDRESSING_UNUSED) |
| 2143 | { |
| 2144 | return; |
| 2145 | } |
| 2146 | |
| 2147 | Int4 dim = *Pointer<Int4>(mipmap + whd, 16); |
| 2148 | Int4 maxXYZ = dim - Int4(1); |
| 2149 | |
| 2150 | if(function == Fetch) |
| 2151 | { |
| 2152 | xyz0 = Min(Max(((function.offset != 0) && (addressingMode != ADDRESSING_LAYER)) ? As<Int4>(uvw) + As<Int4>(texOffset) : As<Int4>(uvw), Int4(0)), maxXYZ); |
| 2153 | } |
| 2154 | else if(addressingMode == ADDRESSING_LAYER) // Note: Offset does not apply to array layers |
| 2155 | { |
| 2156 | xyz0 = Min(Max(RoundInt(uvw), Int4(0)), maxXYZ); |
| 2157 | } |
| 2158 | else if(addressingMode == ADDRESSING_CUBEFACE) |
| 2159 | { |
| 2160 | xyz0 = As<Int4>(uvw); |
| 2161 | } |
| 2162 | else |
| 2163 | { |
| 2164 | const int halfBits = 0x3EFFFFFF; // Value just under 0.5f |
| 2165 | const int oneBits = 0x3F7FFFFF; // Value just under 1.0f |
| 2166 | const int twoBits = 0x3FFFFFFF; // Value just under 2.0f |
| 2167 | |
| 2168 | bool pointFilter = state.textureFilter == FILTER_POINT || |
| 2169 | state.textureFilter == FILTER_MIN_POINT_MAG_LINEAR || |
| 2170 | state.textureFilter == FILTER_MIN_LINEAR_MAG_POINT; |
| 2171 | |
| 2172 | Float4 coord = uvw; |
| 2173 | |
| 2174 | if(state.unnormalizedCoordinates) |
| 2175 | { |
| 2176 | switch(addressingMode) |
| 2177 | { |
| 2178 | case ADDRESSING_CLAMP: |
| 2179 | coord = Min(Max(coord, Float4(0.0f)), Float4(dim) * As<Float4>(Int4(oneBits))); |
| 2180 | break; |
| 2181 | case ADDRESSING_BORDER: |
| 2182 | // Don't map to a valid range here. |
| 2183 | break; |
| 2184 | default: |
| 2185 | // If unnormalizedCoordinates is VK_TRUE, addressModeU and addressModeV must each be |
| 2186 | // either VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE or VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER |
| 2187 | UNREACHABLE("addressingMode %d" , int(addressingMode)); |
| 2188 | break; |
| 2189 | } |
| 2190 | } |
| 2191 | else if(state.textureFilter == FILTER_GATHER && addressingMode == ADDRESSING_MIRROR) |
| 2192 | { |
| 2193 | // Gather requires the 'footprint' of the texels from which a component is taken, to also mirror around. |
| 2194 | // Therefore we can't just compute one texel's location and find the other ones at +1 offsets from it. |
| 2195 | // Here we handle that case separately by doing the mirroring per texel coordinate. |
| 2196 | // TODO: Mirror the footprint by adjusting the sign of the 0.5f and 1 offsets. |
| 2197 | |
| 2198 | coord = coord * Float4(dim); |
| 2199 | coord -= Float4(0.5f); |
| 2200 | Float4 floor = Floor(coord); |
| 2201 | xyz0 = Int4(floor); |
| 2202 | |
| 2203 | if(function.offset != 0) |
| 2204 | { |
| 2205 | xyz0 += As<Int4>(texOffset); |
| 2206 | } |
| 2207 | |
| 2208 | xyz1 = xyz0 + Int4(1); |
| 2209 | |
| 2210 | xyz0 = (maxXYZ) - mirror(mod(xyz0, Int4(2) * dim) - dim); |
| 2211 | xyz1 = (maxXYZ) - mirror(mod(xyz1, Int4(2) * dim) - dim); |
| 2212 | |
| 2213 | return; |
| 2214 | } |
| 2215 | else |
| 2216 | { |
| 2217 | if(function.offset == 0) |
| 2218 | { |
| 2219 | switch(addressingMode) |
| 2220 | { |
| 2221 | case ADDRESSING_CLAMP: |
| 2222 | case ADDRESSING_SEAMLESS: |
| 2223 | // Linear filtering of cube doesn't require clamping because the coordinates |
| 2224 | // are already in [0, 1] range and numerical imprecision is tolerated. |
| 2225 | if(addressingMode != ADDRESSING_SEAMLESS || pointFilter) |
| 2226 | { |
| 2227 | Float4 one = As<Float4>(Int4(oneBits)); |
| 2228 | coord = Min(Max(coord, Float4(0.0f)), one); |
| 2229 | } |
| 2230 | break; |
| 2231 | case ADDRESSING_MIRROR: |
| 2232 | { |
| 2233 | Float4 half = As<Float4>(Int4(halfBits)); |
| 2234 | Float4 one = As<Float4>(Int4(oneBits)); |
| 2235 | Float4 two = As<Float4>(Int4(twoBits)); |
| 2236 | coord = one - Abs(two * Frac(coord * half) - one); |
| 2237 | } |
| 2238 | break; |
| 2239 | case ADDRESSING_MIRRORONCE: |
| 2240 | { |
| 2241 | Float4 half = As<Float4>(Int4(halfBits)); |
| 2242 | Float4 one = As<Float4>(Int4(oneBits)); |
| 2243 | Float4 two = As<Float4>(Int4(twoBits)); |
| 2244 | coord = one - Abs(two * Frac(Min(Max(coord, -one), two) * half) - one); |
| 2245 | } |
| 2246 | break; |
| 2247 | case ADDRESSING_BORDER: |
| 2248 | // Don't map to a valid range here. |
| 2249 | break; |
| 2250 | default: // Wrap |
| 2251 | coord = Frac(coord); |
| 2252 | break; |
| 2253 | } |
| 2254 | } |
| 2255 | |
| 2256 | coord = coord * Float4(dim); |
| 2257 | } |
| 2258 | |
| 2259 | if(state.textureFilter == FILTER_POINT) |
| 2260 | { |
| 2261 | if(addressingMode == ADDRESSING_BORDER || function.offset != 0) |
| 2262 | { |
| 2263 | xyz0 = Int4(Floor(coord)); |
| 2264 | } |
| 2265 | else // Can't have negative coordinates, so floor() is redundant when casting to int. |
| 2266 | { |
| 2267 | xyz0 = Int4(coord); |
| 2268 | } |
| 2269 | } |
| 2270 | else |
| 2271 | { |
| 2272 | if(state.textureFilter == FILTER_MIN_POINT_MAG_LINEAR || |
| 2273 | state.textureFilter == FILTER_MIN_LINEAR_MAG_POINT) |
| 2274 | { |
| 2275 | coord -= As<Float4>(As<Int4>(Float4(0.5f)) & filter); |
| 2276 | } |
| 2277 | else |
| 2278 | { |
| 2279 | coord -= Float4(0.5f); |
| 2280 | } |
| 2281 | |
| 2282 | Float4 floor = Floor(coord); |
| 2283 | xyz0 = Int4(floor); |
| 2284 | f = coord - floor; |
| 2285 | } |
| 2286 | |
| 2287 | if(function.offset != 0) |
| 2288 | { |
| 2289 | xyz0 += As<Int4>(texOffset); |
| 2290 | } |
| 2291 | |
| 2292 | if(addressingMode == ADDRESSING_SEAMLESS) // Adjust for border. |
| 2293 | { |
| 2294 | xyz0 += Int4(1); |
| 2295 | } |
| 2296 | |
| 2297 | xyz1 = xyz0 - filter; // Increment |
| 2298 | |
| 2299 | if(addressingMode == ADDRESSING_BORDER) |
| 2300 | { |
| 2301 | // Replace the coordinates with -1 if they're out of range. |
| 2302 | Int4 border0 = CmpLT(xyz0, Int4(0)) | CmpNLT(xyz0, dim); |
| 2303 | Int4 border1 = CmpLT(xyz1, Int4(0)) | CmpNLT(xyz1, dim); |
| 2304 | xyz0 |= border0; |
| 2305 | xyz1 |= border1; |
| 2306 | } |
| 2307 | else if(function.offset != 0) |
| 2308 | { |
| 2309 | switch(addressingMode) |
| 2310 | { |
| 2311 | case ADDRESSING_SEAMLESS: |
| 2312 | UNREACHABLE("addressingMode %d" , int(addressingMode)); // Cube sampling doesn't support offset. |
| 2313 | case ADDRESSING_MIRROR: |
| 2314 | case ADDRESSING_MIRRORONCE: |
| 2315 | // TODO: Implement ADDRESSING_MIRROR and ADDRESSING_MIRRORONCE. |
| 2316 | // Fall through to Clamp. |
| 2317 | case ADDRESSING_CLAMP: |
| 2318 | xyz0 = Min(Max(xyz0, Int4(0)), maxXYZ); |
| 2319 | xyz1 = Min(Max(xyz1, Int4(0)), maxXYZ); |
| 2320 | break; |
| 2321 | default: // Wrap |
| 2322 | xyz0 = mod(xyz0, dim); |
| 2323 | xyz1 = mod(xyz1, dim); |
| 2324 | break; |
| 2325 | } |
| 2326 | } |
| 2327 | else if(state.textureFilter != FILTER_POINT) |
| 2328 | { |
| 2329 | switch(addressingMode) |
| 2330 | { |
| 2331 | case ADDRESSING_SEAMLESS: |
| 2332 | break; |
| 2333 | case ADDRESSING_MIRROR: |
| 2334 | case ADDRESSING_MIRRORONCE: |
| 2335 | case ADDRESSING_CLAMP: |
| 2336 | xyz0 = Max(xyz0, Int4(0)); |
| 2337 | xyz1 = Min(xyz1, maxXYZ); |
| 2338 | break; |
| 2339 | default: // Wrap |
| 2340 | { |
| 2341 | Int4 under = CmpLT(xyz0, Int4(0)); |
| 2342 | xyz0 = (under & maxXYZ) | (~under & xyz0); // xyz < 0 ? dim - 1 : xyz // TODO: IfThenElse() |
| 2343 | |
| 2344 | Int4 nover = CmpLT(xyz1, dim); |
| 2345 | xyz1 = nover & xyz1; // xyz >= dim ? 0 : xyz |
| 2346 | } |
| 2347 | break; |
| 2348 | } |
| 2349 | } |
| 2350 | } |
| 2351 | } |
| 2352 | |
| 2353 | void SamplerCore::convertSigned15(Float4 &cf, Short4 &cs) |
| 2354 | { |
| 2355 | cf = Float4(cs) * Float4(1.0f / 0x7FFF); |
| 2356 | } |
| 2357 | |
| 2358 | void SamplerCore::convertUnsigned16(Float4 &cf, Short4 &cs) |
| 2359 | { |
| 2360 | cf = Float4(As<UShort4>(cs)) * Float4(1.0f / 0xFFFF); |
| 2361 | } |
| 2362 | |
| 2363 | void SamplerCore::sRGBtoLinear16_8_16(Short4 &c) |
| 2364 | { |
| 2365 | c = As<UShort4>(c) >> 8; |
| 2366 | |
| 2367 | Pointer<Byte> LUT = Pointer<Byte>(constants + OFFSET(Constants,sRGBtoLinear8_16)); |
| 2368 | |
| 2369 | c = Insert(c, *Pointer<Short>(LUT + 2 * Int(Extract(c, 0))), 0); |
| 2370 | c = Insert(c, *Pointer<Short>(LUT + 2 * Int(Extract(c, 1))), 1); |
| 2371 | c = Insert(c, *Pointer<Short>(LUT + 2 * Int(Extract(c, 2))), 2); |
| 2372 | c = Insert(c, *Pointer<Short>(LUT + 2 * Int(Extract(c, 3))), 3); |
| 2373 | } |
| 2374 | |
| 2375 | bool SamplerCore::hasFloatTexture() const |
| 2376 | { |
| 2377 | return state.textureFormat.isFloatFormat(); |
| 2378 | } |
| 2379 | |
| 2380 | bool SamplerCore::hasUnnormalizedIntegerTexture() const |
| 2381 | { |
| 2382 | return state.textureFormat.isNonNormalizedInteger(); |
| 2383 | } |
| 2384 | |
| 2385 | bool SamplerCore::hasUnsignedTextureComponent(int component) const |
| 2386 | { |
| 2387 | return state.textureFormat.isUnsignedComponent(component); |
| 2388 | } |
| 2389 | |
| 2390 | int SamplerCore::textureComponentCount() const |
| 2391 | { |
| 2392 | return state.textureFormat.componentCount(); |
| 2393 | } |
| 2394 | |
| 2395 | bool SamplerCore::hasThirdCoordinate() const |
| 2396 | { |
| 2397 | return (state.textureType == VK_IMAGE_VIEW_TYPE_3D) || |
| 2398 | (state.textureType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) || |
| 2399 | (state.textureType == VK_IMAGE_VIEW_TYPE_1D_ARRAY); // Treated as 2D texture with second coordinate 0. TODO(b/134669567) |
| 2400 | } |
| 2401 | |
| 2402 | bool SamplerCore::has16bitTextureFormat() const |
| 2403 | { |
| 2404 | return state.textureFormat.has16bitTextureFormat(); |
| 2405 | } |
| 2406 | |
| 2407 | bool SamplerCore::has8bitTextureComponents() const |
| 2408 | { |
| 2409 | return state.textureFormat.has8bitTextureComponents(); |
| 2410 | } |
| 2411 | |
| 2412 | bool SamplerCore::has16bitTextureComponents() const |
| 2413 | { |
| 2414 | return state.textureFormat.has16bitTextureComponents(); |
| 2415 | } |
| 2416 | |
| 2417 | bool SamplerCore::has32bitIntegerTextureComponents() const |
| 2418 | { |
| 2419 | return state.textureFormat.has32bitIntegerTextureComponents(); |
| 2420 | } |
| 2421 | |
| 2422 | bool SamplerCore::hasQuadLayout() const |
| 2423 | { |
| 2424 | return state.textureFormat.hasQuadLayout(); |
| 2425 | } |
| 2426 | |
| 2427 | bool SamplerCore::isYcbcrFormat() const |
| 2428 | { |
| 2429 | return state.textureFormat.isYcbcrFormat(); |
| 2430 | } |
| 2431 | |
| 2432 | bool SamplerCore::isRGBComponent(int component) const |
| 2433 | { |
| 2434 | return state.textureFormat.isRGBComponent(component); |
| 2435 | } |
| 2436 | |
| 2437 | bool SamplerCore::borderModeActive() const |
| 2438 | { |
| 2439 | return state.addressingModeU == ADDRESSING_BORDER || |
| 2440 | state.addressingModeV == ADDRESSING_BORDER || |
| 2441 | state.addressingModeW == ADDRESSING_BORDER; |
| 2442 | } |
| 2443 | |
| 2444 | VkComponentSwizzle SamplerCore::gatherSwizzle() const |
| 2445 | { |
| 2446 | switch(state.gatherComponent) |
| 2447 | { |
| 2448 | case 0: return state.swizzle.r; |
| 2449 | case 1: return state.swizzle.g; |
| 2450 | case 2: return state.swizzle.b; |
| 2451 | case 3: return state.swizzle.a; |
| 2452 | default: |
| 2453 | UNREACHABLE("Invalid component" ); |
| 2454 | return VK_COMPONENT_SWIZZLE_R; |
| 2455 | } |
| 2456 | } |
| 2457 | } |
| 2458 | |