| 1 | // Copyright 2018 The SwiftShader Authors. All Rights Reserved. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | #include "VkBuffer.hpp" |
| 16 | #include "VkConfig.h" |
| 17 | #include "VkDeviceMemory.hpp" |
| 18 | |
| 19 | #include <cstring> |
| 20 | |
| 21 | namespace vk |
| 22 | { |
| 23 | |
| 24 | Buffer::Buffer(const VkBufferCreateInfo* pCreateInfo, void* mem) : |
| 25 | flags(pCreateInfo->flags), size(pCreateInfo->size), usage(pCreateInfo->usage), |
| 26 | sharingMode(pCreateInfo->sharingMode) |
| 27 | { |
| 28 | if(pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) |
| 29 | { |
| 30 | queueFamilyIndexCount = pCreateInfo->queueFamilyIndexCount; |
| 31 | queueFamilyIndices = reinterpret_cast<uint32_t*>(mem); |
| 32 | memcpy(queueFamilyIndices, pCreateInfo->pQueueFamilyIndices, sizeof(uint32_t) * queueFamilyIndexCount); |
| 33 | } |
| 34 | |
| 35 | const auto* nextInfo = reinterpret_cast<const VkBaseInStructure*>(pCreateInfo->pNext); |
| 36 | for (; nextInfo != nullptr; nextInfo = nextInfo->pNext) |
| 37 | { |
| 38 | if (nextInfo->sType == VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO) |
| 39 | { |
| 40 | const auto* externalInfo = reinterpret_cast<const VkExternalMemoryBufferCreateInfo*>(nextInfo); |
| 41 | supportedExternalMemoryHandleTypes = externalInfo->handleTypes; |
| 42 | } |
| 43 | } |
| 44 | } |
| 45 | |
| 46 | void Buffer::destroy(const VkAllocationCallbacks* pAllocator) |
| 47 | { |
| 48 | vk::deallocate(queueFamilyIndices, pAllocator); |
| 49 | } |
| 50 | |
| 51 | size_t Buffer::ComputeRequiredAllocationSize(const VkBufferCreateInfo* pCreateInfo) |
| 52 | { |
| 53 | return (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT) ? sizeof(uint32_t) * pCreateInfo->queueFamilyIndexCount : 0; |
| 54 | } |
| 55 | |
| 56 | const VkMemoryRequirements Buffer::getMemoryRequirements() const |
| 57 | { |
| 58 | VkMemoryRequirements memoryRequirements = {}; |
| 59 | if(usage & (VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT)) |
| 60 | { |
| 61 | memoryRequirements.alignment = vk::MIN_TEXEL_BUFFER_OFFSET_ALIGNMENT; |
| 62 | } |
| 63 | else if(usage & VK_BUFFER_USAGE_STORAGE_BUFFER_BIT) |
| 64 | { |
| 65 | memoryRequirements.alignment = vk::MIN_STORAGE_BUFFER_OFFSET_ALIGNMENT; |
| 66 | } |
| 67 | else if(usage & VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) |
| 68 | { |
| 69 | memoryRequirements.alignment = vk::MIN_UNIFORM_BUFFER_OFFSET_ALIGNMENT; |
| 70 | } |
| 71 | else |
| 72 | { |
| 73 | memoryRequirements.alignment = REQUIRED_MEMORY_ALIGNMENT; |
| 74 | } |
| 75 | memoryRequirements.memoryTypeBits = vk::MEMORY_TYPE_GENERIC_BIT; |
| 76 | memoryRequirements.size = size; // TODO: also reserve space for a header containing |
| 77 | // the size of the buffer (for robust buffer access) |
| 78 | return memoryRequirements; |
| 79 | } |
| 80 | |
| 81 | bool Buffer::canBindToMemory(DeviceMemory* pDeviceMemory) const |
| 82 | { |
| 83 | return pDeviceMemory->checkExternalMemoryHandleType(supportedExternalMemoryHandleTypes); |
| 84 | } |
| 85 | |
| 86 | void Buffer::bind(DeviceMemory* pDeviceMemory, VkDeviceSize pMemoryOffset) |
| 87 | { |
| 88 | memory = pDeviceMemory->getOffsetPointer(pMemoryOffset); |
| 89 | } |
| 90 | |
| 91 | void Buffer::copyFrom(const void* srcMemory, VkDeviceSize pSize, VkDeviceSize pOffset) |
| 92 | { |
| 93 | ASSERT((pSize + pOffset) <= size); |
| 94 | |
| 95 | memcpy(getOffsetPointer(pOffset), srcMemory, pSize); |
| 96 | } |
| 97 | |
| 98 | void Buffer::copyTo(void* dstMemory, VkDeviceSize pSize, VkDeviceSize pOffset) const |
| 99 | { |
| 100 | ASSERT((pSize + pOffset) <= size); |
| 101 | |
| 102 | memcpy(dstMemory, getOffsetPointer(pOffset), pSize); |
| 103 | } |
| 104 | |
| 105 | void Buffer::copyTo(Buffer* dstBuffer, const VkBufferCopy& pRegion) const |
| 106 | { |
| 107 | copyTo(dstBuffer->getOffsetPointer(pRegion.dstOffset), pRegion.size, pRegion.srcOffset); |
| 108 | } |
| 109 | |
| 110 | void Buffer::fill(VkDeviceSize dstOffset, VkDeviceSize fillSize, uint32_t data) |
| 111 | { |
| 112 | size_t bytes = (fillSize == VK_WHOLE_SIZE) ? (size - dstOffset) : fillSize; |
| 113 | |
| 114 | ASSERT((bytes + dstOffset) <= size); |
| 115 | |
| 116 | uint32_t* memToWrite = static_cast<uint32_t*>(getOffsetPointer(dstOffset)); |
| 117 | |
| 118 | // Vulkan 1.1 spec: "If VK_WHOLE_SIZE is used and the remaining size of the buffer is |
| 119 | // not a multiple of 4, then the nearest smaller multiple is used." |
| 120 | for(; bytes >= 4; bytes -= 4, memToWrite++) |
| 121 | { |
| 122 | *memToWrite = data; |
| 123 | } |
| 124 | } |
| 125 | |
| 126 | void Buffer::update(VkDeviceSize dstOffset, VkDeviceSize dataSize, const void* pData) |
| 127 | { |
| 128 | ASSERT((dataSize + dstOffset) <= size); |
| 129 | |
| 130 | memcpy(getOffsetPointer(dstOffset), pData, dataSize); |
| 131 | } |
| 132 | |
| 133 | void* Buffer::getOffsetPointer(VkDeviceSize offset) const |
| 134 | { |
| 135 | return reinterpret_cast<uint8_t*>(memory) + offset; |
| 136 | } |
| 137 | |
| 138 | uint8_t* Buffer::end() const |
| 139 | { |
| 140 | return reinterpret_cast<uint8_t*>(getOffsetPointer(size + 1)); |
| 141 | } |
| 142 | |
| 143 | } // namespace vk |
| 144 | |