1 | // Copyright 2018 The SwiftShader Authors. All Rights Reserved. |
2 | // |
3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | // you may not use this file except in compliance with the License. |
5 | // You may obtain a copy of the License at |
6 | // |
7 | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | // |
9 | // Unless required by applicable law or agreed to in writing, software |
10 | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | // See the License for the specific language governing permissions and |
13 | // limitations under the License. |
14 | |
15 | #include "VkDeviceMemory.hpp" |
16 | #include "VkBuffer.hpp" |
17 | #include "VkDevice.hpp" |
18 | #include "VkImage.hpp" |
19 | #include "Device/Blitter.hpp" |
20 | #include "Device/ETC_Decoder.hpp" |
21 | #include <cstring> |
22 | |
23 | #ifdef __ANDROID__ |
24 | #include "System/GrallocAndroid.hpp" |
25 | #endif |
26 | |
27 | namespace |
28 | { |
29 | ETC_Decoder::InputType GetInputType(const vk::Format& format) |
30 | { |
31 | switch(format) |
32 | { |
33 | case VK_FORMAT_EAC_R11_UNORM_BLOCK: |
34 | return ETC_Decoder::ETC_R_UNSIGNED; |
35 | case VK_FORMAT_EAC_R11_SNORM_BLOCK: |
36 | return ETC_Decoder::ETC_R_SIGNED; |
37 | case VK_FORMAT_EAC_R11G11_UNORM_BLOCK: |
38 | return ETC_Decoder::ETC_RG_UNSIGNED; |
39 | case VK_FORMAT_EAC_R11G11_SNORM_BLOCK: |
40 | return ETC_Decoder::ETC_RG_SIGNED; |
41 | case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: |
42 | case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: |
43 | return ETC_Decoder::ETC_RGB; |
44 | case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: |
45 | case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: |
46 | return ETC_Decoder::ETC_RGB_PUNCHTHROUGH_ALPHA; |
47 | case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: |
48 | case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: |
49 | return ETC_Decoder::ETC_RGBA; |
50 | default: |
51 | UNIMPLEMENTED("format: %d" , int(format)); |
52 | return ETC_Decoder::ETC_RGBA; |
53 | } |
54 | } |
55 | } |
56 | |
57 | namespace vk |
58 | { |
59 | |
60 | Image::Image(const VkImageCreateInfo* pCreateInfo, void* mem, Device *device) : |
61 | device(device), |
62 | flags(pCreateInfo->flags), |
63 | imageType(pCreateInfo->imageType), |
64 | format(pCreateInfo->format), |
65 | extent(pCreateInfo->extent), |
66 | mipLevels(pCreateInfo->mipLevels), |
67 | arrayLayers(pCreateInfo->arrayLayers), |
68 | samples(pCreateInfo->samples), |
69 | tiling(pCreateInfo->tiling), |
70 | usage(pCreateInfo->usage) |
71 | { |
72 | if(format.isCompressed()) |
73 | { |
74 | VkImageCreateInfo compressedImageCreateInfo = *pCreateInfo; |
75 | compressedImageCreateInfo.format = format.getDecompressedFormat(); |
76 | decompressedImage = new (mem) Image(&compressedImageCreateInfo, nullptr, device); |
77 | } |
78 | |
79 | const auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext); |
80 | for (; nextInfo != nullptr; nextInfo = nextInfo->pNext) |
81 | { |
82 | if (nextInfo->sType == VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_IMAGE_CREATE_INFO) |
83 | { |
84 | const auto* externalInfo = reinterpret_cast<const VkExternalMemoryImageCreateInfo*>(nextInfo); |
85 | supportedExternalMemoryHandleTypes = externalInfo->handleTypes; |
86 | } |
87 | } |
88 | } |
89 | |
90 | void Image::destroy(const VkAllocationCallbacks* pAllocator) |
91 | { |
92 | if(decompressedImage) |
93 | { |
94 | vk::deallocate(decompressedImage, pAllocator); |
95 | } |
96 | } |
97 | |
98 | size_t Image::ComputeRequiredAllocationSize(const VkImageCreateInfo* pCreateInfo) |
99 | { |
100 | return Format(pCreateInfo->format).isCompressed() ? sizeof(Image) : 0; |
101 | } |
102 | |
103 | const VkMemoryRequirements Image::getMemoryRequirements() const |
104 | { |
105 | VkMemoryRequirements memoryRequirements; |
106 | memoryRequirements.alignment = vk::REQUIRED_MEMORY_ALIGNMENT; |
107 | memoryRequirements.memoryTypeBits = vk::MEMORY_TYPE_GENERIC_BIT; |
108 | memoryRequirements.size = getStorageSize(format.getAspects()) + |
109 | (decompressedImage ? decompressedImage->getStorageSize(decompressedImage->format.getAspects()) : 0); |
110 | return memoryRequirements; |
111 | } |
112 | |
113 | bool Image::canBindToMemory(DeviceMemory* pDeviceMemory) const |
114 | { |
115 | return pDeviceMemory->checkExternalMemoryHandleType(supportedExternalMemoryHandleTypes); |
116 | } |
117 | |
118 | void Image::bind(DeviceMemory* pDeviceMemory, VkDeviceSize pMemoryOffset) |
119 | { |
120 | deviceMemory = pDeviceMemory; |
121 | memoryOffset = pMemoryOffset; |
122 | if(decompressedImage) |
123 | { |
124 | decompressedImage->deviceMemory = deviceMemory; |
125 | decompressedImage->memoryOffset = memoryOffset + getStorageSize(format.getAspects()); |
126 | } |
127 | } |
128 | |
129 | #ifdef __ANDROID__ |
130 | VkResult Image::prepareForExternalUseANDROID() const |
131 | { |
132 | void* nativeBuffer = nullptr; |
133 | VkExtent3D extent = getMipLevelExtent(VK_IMAGE_ASPECT_COLOR_BIT, 0); |
134 | |
135 | if(GrallocModule::getInstance()->lock(backingMemory.nativeHandle, GRALLOC_USAGE_SW_WRITE_OFTEN, 0, 0, extent.width, extent.height, &nativeBuffer) != 0) |
136 | { |
137 | return VK_ERROR_OUT_OF_DATE_KHR; |
138 | } |
139 | |
140 | if(!nativeBuffer) |
141 | { |
142 | return VK_ERROR_OUT_OF_DATE_KHR; |
143 | } |
144 | |
145 | int imageRowBytes = rowPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, 0); |
146 | int bufferRowBytes = backingMemory.stride * getFormat().bytes(); |
147 | ASSERT(imageRowBytes <= bufferRowBytes); |
148 | |
149 | uint8_t* srcBuffer = static_cast<uint8_t*>(deviceMemory->getOffsetPointer(0)); |
150 | uint8_t* dstBuffer = static_cast<uint8_t*>(nativeBuffer); |
151 | for(uint32_t i = 0; i < extent.height; i++) |
152 | { |
153 | memcpy(dstBuffer + (i * bufferRowBytes), srcBuffer + (i * imageRowBytes), imageRowBytes); |
154 | } |
155 | |
156 | if(GrallocModule::getInstance()->unlock(backingMemory.nativeHandle) != 0) |
157 | { |
158 | return VK_ERROR_OUT_OF_DATE_KHR; |
159 | } |
160 | |
161 | return VK_SUCCESS; |
162 | } |
163 | |
164 | VkDeviceMemory Image::getExternalMemory() const |
165 | { |
166 | return backingMemory.externalMemory ? *deviceMemory : VkDeviceMemory{ VK_NULL_HANDLE }; |
167 | } |
168 | #endif |
169 | |
170 | void Image::getSubresourceLayout(const VkImageSubresource* pSubresource, VkSubresourceLayout* pLayout) const |
171 | { |
172 | // By spec, aspectMask has a single bit set. |
173 | if (!((pSubresource->aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) || |
174 | (pSubresource->aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) || |
175 | (pSubresource->aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) || |
176 | (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) || |
177 | (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) || |
178 | (pSubresource->aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT))) |
179 | { |
180 | UNSUPPORTED("aspectMask %X" , pSubresource->aspectMask); |
181 | } |
182 | |
183 | auto aspect = static_cast<VkImageAspectFlagBits>(pSubresource->aspectMask); |
184 | pLayout->offset = getMemoryOffset(aspect, pSubresource->mipLevel, pSubresource->arrayLayer); |
185 | pLayout->size = getMultiSampledLevelSize(aspect, pSubresource->mipLevel); |
186 | pLayout->rowPitch = rowPitchBytes(aspect, pSubresource->mipLevel); |
187 | pLayout->depthPitch = slicePitchBytes(aspect, pSubresource->mipLevel); |
188 | pLayout->arrayPitch = getLayerSize(aspect); |
189 | } |
190 | |
191 | void Image::copyTo(Image* dstImage, const VkImageCopy& pRegion) const |
192 | { |
193 | // Image copy does not perform any conversion, it simply copies memory from |
194 | // an image to another image that has the same number of bytes per pixel. |
195 | |
196 | if (!((pRegion.srcSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) || |
197 | (pRegion.srcSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) || |
198 | (pRegion.srcSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) || |
199 | (pRegion.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) || |
200 | (pRegion.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) || |
201 | (pRegion.srcSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT))) |
202 | { |
203 | UNSUPPORTED("srcSubresource.aspectMask %X" , pRegion.srcSubresource.aspectMask); |
204 | } |
205 | |
206 | if (!((pRegion.dstSubresource.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) || |
207 | (pRegion.dstSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT) || |
208 | (pRegion.dstSubresource.aspectMask == VK_IMAGE_ASPECT_STENCIL_BIT) || |
209 | (pRegion.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_0_BIT) || |
210 | (pRegion.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_1_BIT) || |
211 | (pRegion.dstSubresource.aspectMask == VK_IMAGE_ASPECT_PLANE_2_BIT))) |
212 | { |
213 | UNSUPPORTED("dstSubresource.aspectMask %X" , pRegion.dstSubresource.aspectMask); |
214 | } |
215 | |
216 | VkImageAspectFlagBits srcAspect = static_cast<VkImageAspectFlagBits>(pRegion.srcSubresource.aspectMask); |
217 | VkImageAspectFlagBits dstAspect = static_cast<VkImageAspectFlagBits>(pRegion.dstSubresource.aspectMask); |
218 | |
219 | Format srcFormat = getFormat(srcAspect); |
220 | Format dstFormat = dstImage->getFormat(dstAspect); |
221 | |
222 | if(((samples > VK_SAMPLE_COUNT_1_BIT) && (imageType == VK_IMAGE_TYPE_2D) && !format.isNonNormalizedInteger()) || |
223 | srcFormat.hasQuadLayout() || dstFormat.hasQuadLayout()) |
224 | { |
225 | // Requires multisampling resolve, or quadlayout awareness |
226 | VkImageBlit region; |
227 | region.srcSubresource = pRegion.srcSubresource; |
228 | region.srcOffsets[0] = pRegion.srcOffset; |
229 | region.srcOffsets[1].x = region.srcOffsets[0].x + pRegion.extent.width; |
230 | region.srcOffsets[1].y = region.srcOffsets[0].y + pRegion.extent.height; |
231 | region.srcOffsets[1].z = region.srcOffsets[0].z + pRegion.extent.depth; |
232 | |
233 | region.dstSubresource = pRegion.dstSubresource; |
234 | region.dstOffsets[0] = pRegion.dstOffset; |
235 | region.dstOffsets[1].x = region.dstOffsets[0].x + pRegion.extent.width; |
236 | region.dstOffsets[1].y = region.dstOffsets[0].y + pRegion.extent.height; |
237 | region.dstOffsets[1].z = region.dstOffsets[0].z + pRegion.extent.depth; |
238 | |
239 | return device->getBlitter()->blit(this, dstImage, region, VK_FILTER_NEAREST); |
240 | } |
241 | |
242 | int srcBytesPerBlock = srcFormat.bytesPerBlock(); |
243 | ASSERT(srcBytesPerBlock == dstFormat.bytesPerBlock()); |
244 | |
245 | const uint8_t* srcMem = static_cast<const uint8_t*>(getTexelPointer(pRegion.srcOffset, pRegion.srcSubresource)); |
246 | uint8_t* dstMem = static_cast<uint8_t*>(dstImage->getTexelPointer(pRegion.dstOffset, pRegion.dstSubresource)); |
247 | |
248 | int srcRowPitchBytes = rowPitchBytes(srcAspect, pRegion.srcSubresource.mipLevel); |
249 | int srcSlicePitchBytes = slicePitchBytes(srcAspect, pRegion.srcSubresource.mipLevel); |
250 | int dstRowPitchBytes = dstImage->rowPitchBytes(dstAspect, pRegion.dstSubresource.mipLevel); |
251 | int dstSlicePitchBytes = dstImage->slicePitchBytes(dstAspect, pRegion.dstSubresource.mipLevel); |
252 | |
253 | VkExtent3D srcExtent = getMipLevelExtent(srcAspect, pRegion.srcSubresource.mipLevel); |
254 | VkExtent3D dstExtent = dstImage->getMipLevelExtent(dstAspect, pRegion.dstSubresource.mipLevel); |
255 | VkExtent3D copyExtent = imageExtentInBlocks(pRegion.extent, srcAspect); |
256 | |
257 | bool isSinglePlane = (copyExtent.depth == 1); |
258 | bool isSingleLine = (copyExtent.height == 1) && isSinglePlane; |
259 | // In order to copy multiple lines using a single memcpy call, we |
260 | // have to make sure that we need to copy the entire line and that |
261 | // both source and destination lines have the same length in bytes |
262 | bool isEntireLine = (pRegion.extent.width == srcExtent.width) && |
263 | (pRegion.extent.width == dstExtent.width) && |
264 | // For non compressed formats, blockWidth is 1. For compressed |
265 | // formats, rowPitchBytes returns the number of bytes for a row of |
266 | // blocks, so we have to divide by the block height, which means: |
267 | // srcRowPitchBytes / srcBlockWidth == dstRowPitchBytes / dstBlockWidth |
268 | // And, to avoid potential non exact integer division, for example if a |
269 | // block has 16 bytes and represents 5 lines, we change the equation to: |
270 | // srcRowPitchBytes * dstBlockWidth == dstRowPitchBytes * srcBlockWidth |
271 | ((srcRowPitchBytes * dstFormat.blockWidth()) == |
272 | (dstRowPitchBytes * srcFormat.blockWidth())); |
273 | // In order to copy multiple planes using a single memcpy call, we |
274 | // have to make sure that we need to copy the entire plane and that |
275 | // both source and destination planes have the same length in bytes |
276 | bool isEntirePlane = isEntireLine && |
277 | (copyExtent.height == srcExtent.height) && |
278 | (copyExtent.height == dstExtent.height) && |
279 | (srcSlicePitchBytes == dstSlicePitchBytes); |
280 | |
281 | if(isSingleLine) // Copy one line |
282 | { |
283 | size_t copySize = copyExtent.width * srcBytesPerBlock; |
284 | ASSERT((srcMem + copySize) < end()); |
285 | ASSERT((dstMem + copySize) < dstImage->end()); |
286 | memcpy(dstMem, srcMem, copySize); |
287 | } |
288 | else if(isEntireLine && isSinglePlane) // Copy one plane |
289 | { |
290 | size_t copySize = copyExtent.height * srcRowPitchBytes; |
291 | ASSERT((srcMem + copySize) < end()); |
292 | ASSERT((dstMem + copySize) < dstImage->end()); |
293 | memcpy(dstMem, srcMem, copySize); |
294 | } |
295 | else if(isEntirePlane) // Copy multiple planes |
296 | { |
297 | size_t copySize = copyExtent.depth * srcSlicePitchBytes; |
298 | ASSERT((srcMem + copySize) < end()); |
299 | ASSERT((dstMem + copySize) < dstImage->end()); |
300 | memcpy(dstMem, srcMem, copySize); |
301 | } |
302 | else if(isEntireLine) // Copy plane by plane |
303 | { |
304 | size_t copySize = copyExtent.height * srcRowPitchBytes; |
305 | |
306 | for(uint32_t z = 0; z < copyExtent.depth; z++, dstMem += dstSlicePitchBytes, srcMem += srcSlicePitchBytes) |
307 | { |
308 | ASSERT((srcMem + copySize) < end()); |
309 | ASSERT((dstMem + copySize) < dstImage->end()); |
310 | memcpy(dstMem, srcMem, copySize); |
311 | } |
312 | } |
313 | else // Copy line by line |
314 | { |
315 | size_t copySize = copyExtent.width * srcBytesPerBlock; |
316 | |
317 | for(uint32_t z = 0; z < copyExtent.depth; z++, dstMem += dstSlicePitchBytes, srcMem += srcSlicePitchBytes) |
318 | { |
319 | const uint8_t* srcSlice = srcMem; |
320 | uint8_t* dstSlice = dstMem; |
321 | for(uint32_t y = 0; y < copyExtent.height; y++, dstSlice += dstRowPitchBytes, srcSlice += srcRowPitchBytes) |
322 | { |
323 | ASSERT((srcSlice + copySize) < end()); |
324 | ASSERT((dstSlice + copySize) < dstImage->end()); |
325 | memcpy(dstSlice, srcSlice, copySize); |
326 | } |
327 | } |
328 | } |
329 | } |
330 | |
331 | void Image::copy(Buffer* buffer, const VkBufferImageCopy& region, bool bufferIsSource) |
332 | { |
333 | switch(region.imageSubresource.aspectMask) |
334 | { |
335 | case VK_IMAGE_ASPECT_COLOR_BIT: |
336 | case VK_IMAGE_ASPECT_DEPTH_BIT: |
337 | case VK_IMAGE_ASPECT_STENCIL_BIT: |
338 | case VK_IMAGE_ASPECT_PLANE_0_BIT: |
339 | case VK_IMAGE_ASPECT_PLANE_1_BIT: |
340 | case VK_IMAGE_ASPECT_PLANE_2_BIT: |
341 | break; |
342 | default: |
343 | UNSUPPORTED("aspectMask %x" , int(region.imageSubresource.aspectMask)); |
344 | break; |
345 | } |
346 | |
347 | auto aspect = static_cast<VkImageAspectFlagBits>(region.imageSubresource.aspectMask); |
348 | Format copyFormat = getFormat(aspect); |
349 | |
350 | VkExtent3D imageExtent = imageExtentInBlocks(region.imageExtent, aspect); |
351 | VkExtent2D bufferExtent = bufferExtentInBlocks({ imageExtent.width, imageExtent.height }, region); |
352 | int bytesPerBlock = copyFormat.bytesPerBlock(); |
353 | int bufferRowPitchBytes = bufferExtent.width * bytesPerBlock; |
354 | int bufferSlicePitchBytes = bufferExtent.height * bufferRowPitchBytes; |
355 | |
356 | uint8_t* bufferMemory = static_cast<uint8_t*>(buffer->getOffsetPointer(region.bufferOffset)); |
357 | |
358 | if (copyFormat.hasQuadLayout()) |
359 | { |
360 | if (bufferIsSource) |
361 | { |
362 | return device->getBlitter()->blitFromBuffer(this, region.imageSubresource, region.imageOffset, |
363 | region.imageExtent, bufferMemory, bufferRowPitchBytes, |
364 | bufferSlicePitchBytes); |
365 | } |
366 | else |
367 | { |
368 | return device->getBlitter()->blitToBuffer(this, region.imageSubresource, region.imageOffset, |
369 | region.imageExtent, bufferMemory, bufferRowPitchBytes, |
370 | bufferSlicePitchBytes); |
371 | } |
372 | } |
373 | |
374 | uint8_t* imageMemory = static_cast<uint8_t*>(getTexelPointer(region.imageOffset, region.imageSubresource)); |
375 | uint8_t* srcMemory = bufferIsSource ? bufferMemory : imageMemory; |
376 | uint8_t* dstMemory = bufferIsSource ? imageMemory : bufferMemory; |
377 | int imageRowPitchBytes = rowPitchBytes(aspect, region.imageSubresource.mipLevel); |
378 | int imageSlicePitchBytes = slicePitchBytes(aspect, region.imageSubresource.mipLevel); |
379 | |
380 | int srcSlicePitchBytes = bufferIsSource ? bufferSlicePitchBytes : imageSlicePitchBytes; |
381 | int dstSlicePitchBytes = bufferIsSource ? imageSlicePitchBytes : bufferSlicePitchBytes; |
382 | int srcRowPitchBytes = bufferIsSource ? bufferRowPitchBytes : imageRowPitchBytes; |
383 | int dstRowPitchBytes = bufferIsSource ? imageRowPitchBytes : bufferRowPitchBytes; |
384 | |
385 | VkExtent3D mipLevelExtent = getMipLevelExtent(aspect, region.imageSubresource.mipLevel); |
386 | bool isSinglePlane = (imageExtent.depth == 1); |
387 | bool isSingleLine = (imageExtent.height == 1) && isSinglePlane; |
388 | bool isEntireLine = (imageExtent.width == mipLevelExtent.width) && |
389 | (imageRowPitchBytes == bufferRowPitchBytes); |
390 | bool isEntirePlane = isEntireLine && (imageExtent.height == mipLevelExtent.height) && |
391 | (imageSlicePitchBytes == bufferSlicePitchBytes); |
392 | |
393 | VkDeviceSize copySize = 0; |
394 | VkDeviceSize bufferLayerSize = 0; |
395 | if(isSingleLine) |
396 | { |
397 | copySize = imageExtent.width * bytesPerBlock; |
398 | bufferLayerSize = copySize; |
399 | } |
400 | else if(isEntireLine && isSinglePlane) |
401 | { |
402 | copySize = imageExtent.height * imageRowPitchBytes; |
403 | bufferLayerSize = copySize; |
404 | } |
405 | else if(isEntirePlane) |
406 | { |
407 | copySize = imageExtent.depth * imageSlicePitchBytes; // Copy multiple planes |
408 | bufferLayerSize = copySize; |
409 | } |
410 | else if(isEntireLine) // Copy plane by plane |
411 | { |
412 | copySize = imageExtent.height * imageRowPitchBytes; |
413 | bufferLayerSize = copySize * imageExtent.depth; |
414 | } |
415 | else // Copy line by line |
416 | { |
417 | copySize = imageExtent.width * bytesPerBlock; |
418 | bufferLayerSize = copySize * imageExtent.depth * imageExtent.height; |
419 | } |
420 | |
421 | VkDeviceSize imageLayerSize = getLayerSize(aspect); |
422 | VkDeviceSize srcLayerSize = bufferIsSource ? bufferLayerSize : imageLayerSize; |
423 | VkDeviceSize dstLayerSize = bufferIsSource ? imageLayerSize : bufferLayerSize; |
424 | |
425 | for(uint32_t i = 0; i < region.imageSubresource.layerCount; i++) |
426 | { |
427 | if(isSingleLine || (isEntireLine && isSinglePlane) || isEntirePlane) |
428 | { |
429 | ASSERT(((bufferIsSource ? dstMemory : srcMemory) + copySize) < end()); |
430 | ASSERT(((bufferIsSource ? srcMemory : dstMemory) + copySize) < buffer->end()); |
431 | memcpy(dstMemory, srcMemory, copySize); |
432 | } |
433 | else if(isEntireLine) // Copy plane by plane |
434 | { |
435 | uint8_t* srcPlaneMemory = srcMemory; |
436 | uint8_t* dstPlaneMemory = dstMemory; |
437 | for(uint32_t z = 0; z < imageExtent.depth; z++) |
438 | { |
439 | ASSERT(((bufferIsSource ? dstPlaneMemory : srcPlaneMemory) + copySize) < end()); |
440 | ASSERT(((bufferIsSource ? srcPlaneMemory : dstPlaneMemory) + copySize) < buffer->end()); |
441 | memcpy(dstPlaneMemory, srcPlaneMemory, copySize); |
442 | srcPlaneMemory += srcSlicePitchBytes; |
443 | dstPlaneMemory += dstSlicePitchBytes; |
444 | } |
445 | } |
446 | else // Copy line by line |
447 | { |
448 | uint8_t* srcLayerMemory = srcMemory; |
449 | uint8_t* dstLayerMemory = dstMemory; |
450 | for(uint32_t z = 0; z < imageExtent.depth; z++) |
451 | { |
452 | uint8_t* srcPlaneMemory = srcLayerMemory; |
453 | uint8_t* dstPlaneMemory = dstLayerMemory; |
454 | for(uint32_t y = 0; y < imageExtent.height; y++) |
455 | { |
456 | ASSERT(((bufferIsSource ? dstPlaneMemory : srcPlaneMemory) + copySize) < end()); |
457 | ASSERT(((bufferIsSource ? srcPlaneMemory : dstPlaneMemory) + copySize) < buffer->end()); |
458 | memcpy(dstPlaneMemory, srcPlaneMemory, copySize); |
459 | srcPlaneMemory += srcRowPitchBytes; |
460 | dstPlaneMemory += dstRowPitchBytes; |
461 | } |
462 | srcLayerMemory += srcSlicePitchBytes; |
463 | dstLayerMemory += dstSlicePitchBytes; |
464 | } |
465 | } |
466 | |
467 | srcMemory += srcLayerSize; |
468 | dstMemory += dstLayerSize; |
469 | } |
470 | |
471 | if(bufferIsSource) |
472 | { |
473 | prepareForSampling({ region.imageSubresource.aspectMask, region.imageSubresource.mipLevel, 1, |
474 | region.imageSubresource.baseArrayLayer, region.imageSubresource.layerCount }); |
475 | } |
476 | } |
477 | |
478 | void Image::copyTo(Buffer* dstBuffer, const VkBufferImageCopy& region) |
479 | { |
480 | copy(dstBuffer, region, false); |
481 | } |
482 | |
483 | void Image::copyFrom(Buffer* srcBuffer, const VkBufferImageCopy& region) |
484 | { |
485 | copy(srcBuffer, region, true); |
486 | } |
487 | |
488 | void* Image::getTexelPointer(const VkOffset3D& offset, const VkImageSubresourceLayers& subresource) const |
489 | { |
490 | VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(subresource.aspectMask); |
491 | return deviceMemory->getOffsetPointer(texelOffsetBytesInStorage(offset, subresource) + |
492 | getMemoryOffset(aspect, subresource.mipLevel, subresource.baseArrayLayer)); |
493 | } |
494 | |
495 | VkExtent3D Image::imageExtentInBlocks(const VkExtent3D& extent, VkImageAspectFlagBits aspect) const |
496 | { |
497 | VkExtent3D adjustedExtent = extent; |
498 | Format usedFormat = getFormat(aspect); |
499 | if(usedFormat.isCompressed()) |
500 | { |
501 | // When using a compressed format, we use the block as the base unit, instead of the texel |
502 | int blockWidth = usedFormat.blockWidth(); |
503 | int blockHeight = usedFormat.blockHeight(); |
504 | |
505 | // Mip level allocations will round up to the next block for compressed texture |
506 | adjustedExtent.width = ((adjustedExtent.width + blockWidth - 1) / blockWidth); |
507 | adjustedExtent.height = ((adjustedExtent.height + blockHeight - 1) / blockHeight); |
508 | } |
509 | return adjustedExtent; |
510 | } |
511 | |
512 | VkOffset3D Image::imageOffsetInBlocks(const VkOffset3D& offset, VkImageAspectFlagBits aspect) const |
513 | { |
514 | VkOffset3D adjustedOffset = offset; |
515 | Format usedFormat = getFormat(aspect); |
516 | if(usedFormat.isCompressed()) |
517 | { |
518 | // When using a compressed format, we use the block as the base unit, instead of the texel |
519 | int blockWidth = usedFormat.blockWidth(); |
520 | int blockHeight = usedFormat.blockHeight(); |
521 | |
522 | ASSERT(((offset.x % blockWidth) == 0) && ((offset.y % blockHeight) == 0)); // We can't offset within a block |
523 | |
524 | adjustedOffset.x /= blockWidth; |
525 | adjustedOffset.y /= blockHeight; |
526 | } |
527 | return adjustedOffset; |
528 | } |
529 | |
530 | VkExtent2D Image::bufferExtentInBlocks(const VkExtent2D& extent, const VkBufferImageCopy& region) const |
531 | { |
532 | VkExtent2D adjustedExtent = extent; |
533 | VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(region.imageSubresource.aspectMask); |
534 | Format usedFormat = getFormat(aspect); |
535 | if(region.bufferRowLength != 0) |
536 | { |
537 | adjustedExtent.width = region.bufferRowLength; |
538 | |
539 | if(usedFormat.isCompressed()) |
540 | { |
541 | int blockWidth = usedFormat.blockWidth(); |
542 | ASSERT((adjustedExtent.width % blockWidth) == 0); |
543 | adjustedExtent.width /= blockWidth; |
544 | } |
545 | } |
546 | if(region.bufferImageHeight != 0) |
547 | { |
548 | adjustedExtent.height = region.bufferImageHeight; |
549 | |
550 | if(usedFormat.isCompressed()) |
551 | { |
552 | int blockHeight = usedFormat.blockHeight(); |
553 | ASSERT((adjustedExtent.height % blockHeight) == 0); |
554 | adjustedExtent.height /= blockHeight; |
555 | } |
556 | } |
557 | return adjustedExtent; |
558 | } |
559 | |
560 | int Image::borderSize() const |
561 | { |
562 | // We won't add a border to compressed cube textures, we'll add it when we decompress the texture |
563 | return (isCube() && !format.isCompressed()) ? 1 : 0; |
564 | } |
565 | |
566 | VkDeviceSize Image::texelOffsetBytesInStorage(const VkOffset3D& offset, const VkImageSubresourceLayers& subresource) const |
567 | { |
568 | VkImageAspectFlagBits aspect = static_cast<VkImageAspectFlagBits>(subresource.aspectMask); |
569 | VkOffset3D adjustedOffset = imageOffsetInBlocks(offset, aspect); |
570 | int border = borderSize(); |
571 | return adjustedOffset.z * slicePitchBytes(aspect, subresource.mipLevel) + |
572 | (adjustedOffset.y + border) * rowPitchBytes(aspect, subresource.mipLevel) + |
573 | (adjustedOffset.x + border) * getFormat(aspect).bytesPerBlock(); |
574 | } |
575 | |
576 | VkExtent3D Image::getMipLevelExtent(VkImageAspectFlagBits aspect, uint32_t mipLevel) const |
577 | { |
578 | VkExtent3D mipLevelExtent; |
579 | mipLevelExtent.width = extent.width >> mipLevel; |
580 | mipLevelExtent.height = extent.height >> mipLevel; |
581 | mipLevelExtent.depth = extent.depth >> mipLevel; |
582 | |
583 | if(mipLevelExtent.width == 0) { mipLevelExtent.width = 1; } |
584 | if(mipLevelExtent.height == 0) { mipLevelExtent.height = 1; } |
585 | if(mipLevelExtent.depth == 0) { mipLevelExtent.depth = 1; } |
586 | |
587 | switch(aspect) |
588 | { |
589 | case VK_IMAGE_ASPECT_COLOR_BIT: |
590 | case VK_IMAGE_ASPECT_DEPTH_BIT: |
591 | case VK_IMAGE_ASPECT_STENCIL_BIT: |
592 | case VK_IMAGE_ASPECT_PLANE_0_BIT: // Vulkan 1.1 Table 31. Plane Format Compatibility Table: plane 0 of all defined formats is full resolution. |
593 | break; |
594 | case VK_IMAGE_ASPECT_PLANE_1_BIT: |
595 | case VK_IMAGE_ASPECT_PLANE_2_BIT: |
596 | switch(format) |
597 | { |
598 | case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM: |
599 | case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM: |
600 | ASSERT(mipLevelExtent.width % 2 == 0 && mipLevelExtent.height % 2 == 0); // Vulkan 1.1: "Images in this format must be defined with a width and height that is a multiple of two." |
601 | // Vulkan 1.1 Table 31. Plane Format Compatibility Table: |
602 | // Half-resolution U and V planes. |
603 | mipLevelExtent.width /= 2; |
604 | mipLevelExtent.height /= 2; |
605 | break; |
606 | default: |
607 | UNSUPPORTED("format %d" , int(format)); |
608 | } |
609 | break; |
610 | default: |
611 | UNSUPPORTED("aspect %x" , int(aspect)); |
612 | } |
613 | |
614 | return mipLevelExtent; |
615 | } |
616 | |
617 | int Image::rowPitchBytes(VkImageAspectFlagBits aspect, uint32_t mipLevel) const |
618 | { |
619 | // Depth and Stencil pitch should be computed separately |
620 | ASSERT((aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != |
621 | (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)); |
622 | |
623 | return getFormat(aspect).pitchB(getMipLevelExtent(aspect, mipLevel).width, borderSize(), true); |
624 | } |
625 | |
626 | int Image::slicePitchBytes(VkImageAspectFlagBits aspect, uint32_t mipLevel) const |
627 | { |
628 | // Depth and Stencil slice should be computed separately |
629 | ASSERT((aspect & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != |
630 | (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)); |
631 | |
632 | VkExtent3D mipLevelExtent = getMipLevelExtent(aspect, mipLevel); |
633 | Format usedFormat = getFormat(aspect); |
634 | if(usedFormat.isCompressed()) |
635 | { |
636 | sw::align(mipLevelExtent.width, usedFormat.blockWidth()); |
637 | sw::align(mipLevelExtent.height, usedFormat.blockHeight()); |
638 | } |
639 | |
640 | return usedFormat.sliceB(mipLevelExtent.width, mipLevelExtent.height, borderSize(), true); |
641 | } |
642 | |
643 | Format Image::getFormat(VkImageAspectFlagBits aspect) const |
644 | { |
645 | return format.getAspectFormat(aspect); |
646 | } |
647 | |
648 | bool Image::isCube() const |
649 | { |
650 | return (flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) && (imageType == VK_IMAGE_TYPE_2D); |
651 | } |
652 | |
653 | uint8_t* Image::end() const |
654 | { |
655 | return reinterpret_cast<uint8_t*>(deviceMemory->getOffsetPointer(deviceMemory->getCommittedMemoryInBytes() + 1)); |
656 | } |
657 | |
658 | VkDeviceSize Image::getMemoryOffset(VkImageAspectFlagBits aspect) const |
659 | { |
660 | switch(format) |
661 | { |
662 | case VK_FORMAT_D16_UNORM_S8_UINT: |
663 | case VK_FORMAT_D24_UNORM_S8_UINT: |
664 | case VK_FORMAT_D32_SFLOAT_S8_UINT: |
665 | if(aspect == VK_IMAGE_ASPECT_STENCIL_BIT) |
666 | { |
667 | // Offset by depth buffer to get to stencil buffer |
668 | return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_DEPTH_BIT); |
669 | } |
670 | break; |
671 | |
672 | case VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM: |
673 | if(aspect == VK_IMAGE_ASPECT_PLANE_2_BIT) |
674 | { |
675 | return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_1_BIT) |
676 | + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT); |
677 | } |
678 | // Fall through to 2PLANE case: |
679 | case VK_FORMAT_G8_B8R8_2PLANE_420_UNORM: |
680 | if(aspect == VK_IMAGE_ASPECT_PLANE_1_BIT) |
681 | { |
682 | return memoryOffset + getStorageSize(VK_IMAGE_ASPECT_PLANE_0_BIT); |
683 | } |
684 | else |
685 | { |
686 | ASSERT(aspect == VK_IMAGE_ASPECT_PLANE_0_BIT); |
687 | |
688 | return memoryOffset; |
689 | } |
690 | break; |
691 | |
692 | default: |
693 | break; |
694 | } |
695 | |
696 | return memoryOffset; |
697 | } |
698 | |
699 | VkDeviceSize Image::getMemoryOffset(VkImageAspectFlagBits aspect, uint32_t mipLevel) const |
700 | { |
701 | VkDeviceSize offset = getMemoryOffset(aspect); |
702 | for(uint32_t i = 0; i < mipLevel; ++i) |
703 | { |
704 | offset += getMultiSampledLevelSize(aspect, i); |
705 | } |
706 | return offset; |
707 | } |
708 | |
709 | VkDeviceSize Image::getMemoryOffset(VkImageAspectFlagBits aspect, uint32_t mipLevel, uint32_t layer) const |
710 | { |
711 | return layer * getLayerOffset(aspect, mipLevel) + getMemoryOffset(aspect, mipLevel); |
712 | } |
713 | |
714 | VkDeviceSize Image::getMipLevelSize(VkImageAspectFlagBits aspect, uint32_t mipLevel) const |
715 | { |
716 | return getMipLevelExtent(aspect, mipLevel).depth * slicePitchBytes(aspect, mipLevel); |
717 | } |
718 | |
719 | VkDeviceSize Image::getMultiSampledLevelSize(VkImageAspectFlagBits aspect, uint32_t mipLevel) const |
720 | { |
721 | return getMipLevelSize(aspect, mipLevel) * samples; |
722 | } |
723 | |
724 | bool Image::is3DSlice() const |
725 | { |
726 | return ((imageType == VK_IMAGE_TYPE_3D) && (flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT)); |
727 | } |
728 | |
729 | VkDeviceSize Image::getLayerOffset(VkImageAspectFlagBits aspect, uint32_t mipLevel) const |
730 | { |
731 | if(is3DSlice()) |
732 | { |
733 | // When the VkImageSubresourceRange structure is used to select a subset of the slices of a 3D |
734 | // image's mip level in order to create a 2D or 2D array image view of a 3D image created with |
735 | // VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT, baseArrayLayer and layerCount specify the first |
736 | // slice index and the number of slices to include in the created image view. |
737 | ASSERT(samples == VK_SAMPLE_COUNT_1_BIT); |
738 | |
739 | // Offset to the proper slice of the 3D image's mip level |
740 | return slicePitchBytes(aspect, mipLevel); |
741 | } |
742 | |
743 | return getLayerSize(aspect); |
744 | } |
745 | |
746 | VkDeviceSize Image::getLayerSize(VkImageAspectFlagBits aspect) const |
747 | { |
748 | VkDeviceSize layerSize = 0; |
749 | |
750 | for(uint32_t mipLevel = 0; mipLevel < mipLevels; ++mipLevel) |
751 | { |
752 | layerSize += getMultiSampledLevelSize(aspect, mipLevel); |
753 | } |
754 | |
755 | return layerSize; |
756 | } |
757 | |
758 | VkDeviceSize Image::getStorageSize(VkImageAspectFlags aspectMask) const |
759 | { |
760 | if((aspectMask & ~(VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT | |
761 | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)) != 0) |
762 | { |
763 | UNSUPPORTED("aspectMask %x" , int(aspectMask)); |
764 | } |
765 | |
766 | VkDeviceSize storageSize = 0; |
767 | |
768 | if(aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_COLOR_BIT); |
769 | if(aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_DEPTH_BIT); |
770 | if(aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_STENCIL_BIT); |
771 | if(aspectMask & VK_IMAGE_ASPECT_PLANE_0_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_PLANE_0_BIT); |
772 | if(aspectMask & VK_IMAGE_ASPECT_PLANE_1_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_PLANE_1_BIT); |
773 | if(aspectMask & VK_IMAGE_ASPECT_PLANE_2_BIT) storageSize += getLayerSize(VK_IMAGE_ASPECT_PLANE_2_BIT); |
774 | |
775 | return arrayLayers * storageSize; |
776 | } |
777 | |
778 | const Image* Image::getSampledImage(const vk::Format& imageViewFormat) const |
779 | { |
780 | bool isImageViewCompressed = imageViewFormat.isCompressed(); |
781 | if(decompressedImage && !isImageViewCompressed) |
782 | { |
783 | ASSERT(flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT); |
784 | ASSERT(format.bytesPerBlock() == imageViewFormat.bytesPerBlock()); |
785 | } |
786 | // If the ImageView's format is compressed, then we do need to decompress the image so that |
787 | // it may be sampled properly by texture sampling functions, which don't support compressed |
788 | // textures. If the ImageView's format is NOT compressed, then we reinterpret cast the |
789 | // compressed image into the ImageView's format, so we must return the compressed image as is. |
790 | return (decompressedImage && isImageViewCompressed) ? decompressedImage : this; |
791 | } |
792 | |
793 | void Image::blit(Image* dstImage, const VkImageBlit& region, VkFilter filter) const |
794 | { |
795 | device->getBlitter()->blit(this, dstImage, region, filter); |
796 | } |
797 | |
798 | void Image::blitToBuffer(VkImageSubresourceLayers subresource, VkOffset3D offset, VkExtent3D extent, uint8_t* dst, int bufferRowPitch, int bufferSlicePitch) const |
799 | { |
800 | device->getBlitter()->blitToBuffer(this, subresource, offset, extent, dst, bufferRowPitch, bufferSlicePitch); |
801 | } |
802 | |
803 | void Image::resolve(Image* dstImage, const VkImageResolve& region) const |
804 | { |
805 | VkImageBlit blitRegion; |
806 | |
807 | blitRegion.srcOffsets[0] = blitRegion.srcOffsets[1] = region.srcOffset; |
808 | blitRegion.srcOffsets[1].x += region.extent.width; |
809 | blitRegion.srcOffsets[1].y += region.extent.height; |
810 | blitRegion.srcOffsets[1].z += region.extent.depth; |
811 | |
812 | blitRegion.dstOffsets[0] = blitRegion.dstOffsets[1] = region.dstOffset; |
813 | blitRegion.dstOffsets[1].x += region.extent.width; |
814 | blitRegion.dstOffsets[1].y += region.extent.height; |
815 | blitRegion.dstOffsets[1].z += region.extent.depth; |
816 | |
817 | blitRegion.srcSubresource = region.srcSubresource; |
818 | blitRegion.dstSubresource = region.dstSubresource; |
819 | |
820 | device->getBlitter()->blit(this, dstImage, blitRegion, VK_FILTER_NEAREST); |
821 | } |
822 | |
823 | VkFormat Image::getClearFormat() const |
824 | { |
825 | // Set the proper format for the clear value, as described here: |
826 | // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#clears-values |
827 | if(format.isSignedNonNormalizedInteger()) |
828 | { |
829 | return VK_FORMAT_R32G32B32A32_SINT; |
830 | } |
831 | else if(format.isUnsignedNonNormalizedInteger()) |
832 | { |
833 | return VK_FORMAT_R32G32B32A32_UINT; |
834 | } |
835 | |
836 | return VK_FORMAT_R32G32B32A32_SFLOAT; |
837 | } |
838 | |
839 | uint32_t Image::getLastLayerIndex(const VkImageSubresourceRange& subresourceRange) const |
840 | { |
841 | return ((subresourceRange.layerCount == VK_REMAINING_ARRAY_LAYERS) ? |
842 | arrayLayers : (subresourceRange.baseArrayLayer + subresourceRange.layerCount)) - 1; |
843 | } |
844 | |
845 | uint32_t Image::getLastMipLevel(const VkImageSubresourceRange& subresourceRange) const |
846 | { |
847 | return ((subresourceRange.levelCount == VK_REMAINING_MIP_LEVELS) ? |
848 | mipLevels : (subresourceRange.baseMipLevel + subresourceRange.levelCount)) - 1; |
849 | } |
850 | |
851 | void Image::clear(void* pixelData, VkFormat pixelFormat, const vk::Format& viewFormat, const VkImageSubresourceRange& subresourceRange, const VkRect2D& renderArea) |
852 | { |
853 | device->getBlitter()->clear(pixelData, pixelFormat, this, viewFormat, subresourceRange, &renderArea); |
854 | } |
855 | |
856 | void Image::clear(const VkClearColorValue& color, const VkImageSubresourceRange& subresourceRange) |
857 | { |
858 | if(!(subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)) |
859 | { |
860 | UNIMPLEMENTED("aspectMask" ); |
861 | } |
862 | |
863 | device->getBlitter()->clear((void*)color.float32, getClearFormat(), this, format, subresourceRange); |
864 | } |
865 | |
866 | void Image::clear(const VkClearDepthStencilValue& color, const VkImageSubresourceRange& subresourceRange) |
867 | { |
868 | if((subresourceRange.aspectMask & ~(VK_IMAGE_ASPECT_DEPTH_BIT | |
869 | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0) |
870 | { |
871 | UNIMPLEMENTED("aspectMask" ); |
872 | } |
873 | |
874 | if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) |
875 | { |
876 | VkImageSubresourceRange depthSubresourceRange = subresourceRange; |
877 | depthSubresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; |
878 | device->getBlitter()->clear((void*)(&color.depth), VK_FORMAT_D32_SFLOAT, this, format, depthSubresourceRange); |
879 | } |
880 | |
881 | if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) |
882 | { |
883 | VkImageSubresourceRange stencilSubresourceRange = subresourceRange; |
884 | stencilSubresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; |
885 | device->getBlitter()->clear((void*)(&color.stencil), VK_FORMAT_S8_UINT, this, format, stencilSubresourceRange); |
886 | } |
887 | } |
888 | |
889 | void Image::clear(const VkClearValue& clearValue, const vk::Format& viewFormat, const VkRect2D& renderArea, const VkImageSubresourceRange& subresourceRange) |
890 | { |
891 | if(!((subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) || |
892 | (subresourceRange.aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | |
893 | VK_IMAGE_ASPECT_STENCIL_BIT)))) |
894 | { |
895 | UNIMPLEMENTED("subresourceRange" ); |
896 | } |
897 | |
898 | if(subresourceRange.aspectMask == VK_IMAGE_ASPECT_COLOR_BIT) |
899 | { |
900 | clear((void*)(clearValue.color.float32), getClearFormat(), viewFormat, subresourceRange, renderArea); |
901 | } |
902 | else |
903 | { |
904 | if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) |
905 | { |
906 | VkImageSubresourceRange depthSubresourceRange = subresourceRange; |
907 | depthSubresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; |
908 | clear((void*)(&clearValue.depthStencil.depth), VK_FORMAT_D32_SFLOAT, viewFormat, depthSubresourceRange, renderArea); |
909 | } |
910 | |
911 | if(subresourceRange.aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) |
912 | { |
913 | VkImageSubresourceRange stencilSubresourceRange = subresourceRange; |
914 | stencilSubresourceRange.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT; |
915 | clear((void*)(&clearValue.depthStencil.stencil), VK_FORMAT_S8_UINT, viewFormat, stencilSubresourceRange, renderArea); |
916 | } |
917 | } |
918 | } |
919 | |
920 | void Image::prepareForSampling(const VkImageSubresourceRange& subresourceRange) |
921 | { |
922 | if(decompressedImage) |
923 | { |
924 | switch(format) |
925 | { |
926 | case VK_FORMAT_EAC_R11_UNORM_BLOCK: |
927 | case VK_FORMAT_EAC_R11_SNORM_BLOCK: |
928 | case VK_FORMAT_EAC_R11G11_UNORM_BLOCK: |
929 | case VK_FORMAT_EAC_R11G11_SNORM_BLOCK: |
930 | case VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK: |
931 | case VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK: |
932 | case VK_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK: |
933 | case VK_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK: |
934 | case VK_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK: |
935 | case VK_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK: |
936 | decodeETC2(subresourceRange); |
937 | break; |
938 | default: |
939 | break; |
940 | } |
941 | } |
942 | |
943 | if(isCube() && (arrayLayers >= 6)) |
944 | { |
945 | VkImageSubresourceLayers subresourceLayers = |
946 | { |
947 | subresourceRange.aspectMask, |
948 | subresourceRange.baseMipLevel, |
949 | subresourceRange.baseArrayLayer, |
950 | 6 |
951 | }; |
952 | uint32_t lastMipLevel = getLastMipLevel(subresourceRange); |
953 | for(; subresourceLayers.mipLevel <= lastMipLevel; subresourceLayers.mipLevel++) |
954 | { |
955 | for(subresourceLayers.baseArrayLayer = 0; |
956 | subresourceLayers.baseArrayLayer < arrayLayers; |
957 | subresourceLayers.baseArrayLayer += 6) |
958 | { |
959 | device->getBlitter()->updateBorders(decompressedImage ? decompressedImage : this, subresourceLayers); |
960 | } |
961 | } |
962 | } |
963 | } |
964 | |
965 | void Image::decodeETC2(const VkImageSubresourceRange& subresourceRange) const |
966 | { |
967 | ASSERT(decompressedImage); |
968 | |
969 | ETC_Decoder::InputType inputType = GetInputType(format); |
970 | |
971 | uint32_t lastLayer = getLastLayerIndex(subresourceRange); |
972 | uint32_t lastMipLevel = getLastMipLevel(subresourceRange); |
973 | |
974 | int bytes = decompressedImage->format.bytes(); |
975 | bool fakeAlpha = (format == VK_FORMAT_ETC2_R8G8B8_UNORM_BLOCK) || (format == VK_FORMAT_ETC2_R8G8B8_SRGB_BLOCK); |
976 | size_t sizeToWrite = 0; |
977 | |
978 | VkImageSubresourceLayers subresourceLayers = { subresourceRange.aspectMask, subresourceRange.baseMipLevel, subresourceRange.baseArrayLayer, 1 }; |
979 | for(; subresourceLayers.baseArrayLayer <= lastLayer; subresourceLayers.baseArrayLayer++) |
980 | { |
981 | for(; subresourceLayers.mipLevel <= lastMipLevel; subresourceLayers.mipLevel++) |
982 | { |
983 | VkExtent3D mipLevelExtent = getMipLevelExtent(static_cast<VkImageAspectFlagBits>(subresourceLayers.aspectMask), subresourceLayers.mipLevel); |
984 | |
985 | int pitchB = decompressedImage->rowPitchBytes(VK_IMAGE_ASPECT_COLOR_BIT, subresourceLayers.mipLevel); |
986 | |
987 | if(fakeAlpha) |
988 | { |
989 | // To avoid overflow in case of cube textures, which are offset in memory to account for the border, |
990 | // compute the size from the first pixel to the last pixel, excluding any padding or border before |
991 | // the first pixel or after the last pixel. |
992 | sizeToWrite = ((mipLevelExtent.height - 1) * pitchB) + (mipLevelExtent.width * bytes); |
993 | } |
994 | |
995 | for(int32_t depth = 0; depth < static_cast<int32_t>(mipLevelExtent.depth); depth++) |
996 | { |
997 | uint8_t* source = static_cast<uint8_t*>(getTexelPointer({ 0, 0, depth }, subresourceLayers)); |
998 | uint8_t* dest = static_cast<uint8_t*>(decompressedImage->getTexelPointer({ 0, 0, depth }, subresourceLayers)); |
999 | |
1000 | if(fakeAlpha) |
1001 | { |
1002 | ASSERT((dest + sizeToWrite) < decompressedImage->end()); |
1003 | memset(dest, 0xFF, sizeToWrite); |
1004 | } |
1005 | |
1006 | ETC_Decoder::Decode(source, dest, mipLevelExtent.width, mipLevelExtent.height, |
1007 | mipLevelExtent.width, mipLevelExtent.height, pitchB, bytes, inputType); |
1008 | } |
1009 | } |
1010 | } |
1011 | } |
1012 | |
1013 | } // namespace vk |
1014 | |