| 1 | // Copyright 2018 The SwiftShader Authors. All Rights Reserved. |
| 2 | // |
| 3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | // you may not use this file except in compliance with the License. |
| 5 | // You may obtain a copy of the License at |
| 6 | // |
| 7 | // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | // |
| 9 | // Unless required by applicable law or agreed to in writing, software |
| 10 | // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | // See the License for the specific language governing permissions and |
| 13 | // limitations under the License. |
| 14 | |
| 15 | #include "VkCommandBuffer.hpp" |
| 16 | #include "VkFence.hpp" |
| 17 | #include "VkQueue.hpp" |
| 18 | #include "VkSemaphore.hpp" |
| 19 | #include "WSI/VkSwapchainKHR.hpp" |
| 20 | #include "Device/Renderer.hpp" |
| 21 | |
| 22 | #include "marl/defer.h" |
| 23 | #include "marl/scheduler.h" |
| 24 | #include "marl/thread.h" |
| 25 | #include "marl/trace.h" |
| 26 | |
| 27 | #include <cstring> |
| 28 | |
| 29 | namespace |
| 30 | { |
| 31 | |
| 32 | VkSubmitInfo* DeepCopySubmitInfo(uint32_t submitCount, const VkSubmitInfo* pSubmits) |
| 33 | { |
| 34 | size_t submitSize = sizeof(VkSubmitInfo) * submitCount; |
| 35 | size_t totalSize = submitSize; |
| 36 | for(uint32_t i = 0; i < submitCount; i++) |
| 37 | { |
| 38 | totalSize += pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore); |
| 39 | totalSize += pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags); |
| 40 | totalSize += pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore); |
| 41 | totalSize += pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer); |
| 42 | } |
| 43 | |
| 44 | uint8_t* mem = static_cast<uint8_t*>( |
| 45 | vk::allocate(totalSize, vk::REQUIRED_MEMORY_ALIGNMENT, vk::DEVICE_MEMORY, vk::Fence::GetAllocationScope())); |
| 46 | |
| 47 | auto submits = new (mem) VkSubmitInfo[submitCount]; |
| 48 | memcpy(mem, pSubmits, submitSize); |
| 49 | mem += submitSize; |
| 50 | |
| 51 | for(uint32_t i = 0; i < submitCount; i++) |
| 52 | { |
| 53 | size_t size = pSubmits[i].waitSemaphoreCount * sizeof(VkSemaphore); |
| 54 | submits[i].pWaitSemaphores = reinterpret_cast<const VkSemaphore*>(mem); |
| 55 | memcpy(mem, pSubmits[i].pWaitSemaphores, size); |
| 56 | mem += size; |
| 57 | |
| 58 | size = pSubmits[i].waitSemaphoreCount * sizeof(VkPipelineStageFlags); |
| 59 | submits[i].pWaitDstStageMask = reinterpret_cast<const VkPipelineStageFlags*>(mem); |
| 60 | memcpy(mem, pSubmits[i].pWaitDstStageMask, size); |
| 61 | mem += size; |
| 62 | |
| 63 | size = pSubmits[i].signalSemaphoreCount * sizeof(VkSemaphore); |
| 64 | submits[i].pSignalSemaphores = reinterpret_cast<const VkSemaphore*>(mem); |
| 65 | memcpy(mem, pSubmits[i].pSignalSemaphores, size); |
| 66 | mem += size; |
| 67 | |
| 68 | size = pSubmits[i].commandBufferCount * sizeof(VkCommandBuffer); |
| 69 | submits[i].pCommandBuffers = reinterpret_cast<const VkCommandBuffer*>(mem); |
| 70 | memcpy(mem, pSubmits[i].pCommandBuffers, size); |
| 71 | mem += size; |
| 72 | } |
| 73 | |
| 74 | return submits; |
| 75 | } |
| 76 | |
| 77 | } // anonymous namespace |
| 78 | |
| 79 | namespace vk |
| 80 | { |
| 81 | |
| 82 | Queue::Queue(Device* device, marl::Scheduler *scheduler) : device(device) |
| 83 | { |
| 84 | queueThread = std::thread(&Queue::taskLoop, this, scheduler); |
| 85 | } |
| 86 | |
| 87 | Queue::~Queue() |
| 88 | { |
| 89 | Task task; |
| 90 | task.type = Task::KILL_THREAD; |
| 91 | pending.put(task); |
| 92 | |
| 93 | queueThread.join(); |
| 94 | ASSERT_MSG(pending.count() == 0, "queue has work after worker thread shutdown" ); |
| 95 | |
| 96 | garbageCollect(); |
| 97 | } |
| 98 | |
| 99 | VkResult Queue::submit(uint32_t submitCount, const VkSubmitInfo* pSubmits, Fence* fence) |
| 100 | { |
| 101 | garbageCollect(); |
| 102 | |
| 103 | Task task; |
| 104 | task.submitCount = submitCount; |
| 105 | task.pSubmits = DeepCopySubmitInfo(submitCount, pSubmits); |
| 106 | task.events = fence; |
| 107 | |
| 108 | if(task.events) |
| 109 | { |
| 110 | task.events->start(); |
| 111 | } |
| 112 | |
| 113 | pending.put(task); |
| 114 | |
| 115 | return VK_SUCCESS; |
| 116 | } |
| 117 | |
| 118 | void Queue::submitQueue(const Task& task) |
| 119 | { |
| 120 | if (renderer == nullptr) |
| 121 | { |
| 122 | renderer.reset(new sw::Renderer(device)); |
| 123 | } |
| 124 | |
| 125 | for(uint32_t i = 0; i < task.submitCount; i++) |
| 126 | { |
| 127 | auto& submitInfo = task.pSubmits[i]; |
| 128 | for(uint32_t j = 0; j < submitInfo.waitSemaphoreCount; j++) |
| 129 | { |
| 130 | vk::Cast(submitInfo.pWaitSemaphores[j])->wait(submitInfo.pWaitDstStageMask[j]); |
| 131 | } |
| 132 | |
| 133 | { |
| 134 | CommandBuffer::ExecutionState executionState; |
| 135 | executionState.renderer = renderer.get(); |
| 136 | executionState.events = task.events; |
| 137 | for(uint32_t j = 0; j < submitInfo.commandBufferCount; j++) |
| 138 | { |
| 139 | vk::Cast(submitInfo.pCommandBuffers[j])->submit(executionState); |
| 140 | } |
| 141 | } |
| 142 | |
| 143 | for(uint32_t j = 0; j < submitInfo.signalSemaphoreCount; j++) |
| 144 | { |
| 145 | vk::Cast(submitInfo.pSignalSemaphores[j])->signal(); |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | if (task.pSubmits) |
| 150 | { |
| 151 | toDelete.put(task.pSubmits); |
| 152 | } |
| 153 | |
| 154 | if(task.events) |
| 155 | { |
| 156 | // TODO: fix renderer signaling so that work submitted separately from (but before) a fence |
| 157 | // is guaranteed complete by the time the fence signals. |
| 158 | renderer->synchronize(); |
| 159 | task.events->finish(); |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | void Queue::taskLoop(marl::Scheduler* scheduler) |
| 164 | { |
| 165 | marl::Thread::setName("Queue<%p>" , this); |
| 166 | scheduler->bind(); |
| 167 | defer(scheduler->unbind()); |
| 168 | |
| 169 | while(true) |
| 170 | { |
| 171 | Task task = pending.take(); |
| 172 | |
| 173 | switch(task.type) |
| 174 | { |
| 175 | case Task::KILL_THREAD: |
| 176 | ASSERT_MSG(pending.count() == 0, "queue has remaining work!" ); |
| 177 | return; |
| 178 | case Task::SUBMIT_QUEUE: |
| 179 | submitQueue(task); |
| 180 | break; |
| 181 | default: |
| 182 | UNIMPLEMENTED("task.type %d" , static_cast<int>(task.type)); |
| 183 | break; |
| 184 | } |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | VkResult Queue::waitIdle() |
| 189 | { |
| 190 | // Wait for task queue to flush. |
| 191 | sw::WaitGroup wg; |
| 192 | wg.add(); |
| 193 | |
| 194 | Task task; |
| 195 | task.events = &wg; |
| 196 | pending.put(task); |
| 197 | |
| 198 | wg.wait(); |
| 199 | |
| 200 | garbageCollect(); |
| 201 | |
| 202 | return VK_SUCCESS; |
| 203 | } |
| 204 | |
| 205 | void Queue::garbageCollect() |
| 206 | { |
| 207 | while (true) |
| 208 | { |
| 209 | auto v = toDelete.tryTake(); |
| 210 | if (!v.second) { break; } |
| 211 | vk::deallocate(v.first, DEVICE_MEMORY); |
| 212 | } |
| 213 | } |
| 214 | |
| 215 | #ifndef __ANDROID__ |
| 216 | VkResult Queue::present(const VkPresentInfoKHR* presentInfo) |
| 217 | { |
| 218 | // This is a hack to deal with screen tearing for now. |
| 219 | // Need to correctly implement threading using VkSemaphore |
| 220 | // to get rid of it. b/132458423 |
| 221 | waitIdle(); |
| 222 | |
| 223 | for(uint32_t i = 0; i < presentInfo->waitSemaphoreCount; i++) |
| 224 | { |
| 225 | vk::Cast(presentInfo->pWaitSemaphores[i])->wait(); |
| 226 | } |
| 227 | |
| 228 | for(uint32_t i = 0; i < presentInfo->swapchainCount; i++) |
| 229 | { |
| 230 | VkResult result = vk::Cast(presentInfo->pSwapchains[i])->present(presentInfo->pImageIndices[i]); |
| 231 | if (result != VK_SUCCESS) |
| 232 | return result; |
| 233 | } |
| 234 | |
| 235 | return VK_SUCCESS; |
| 236 | } |
| 237 | #endif |
| 238 | |
| 239 | } // namespace vk |
| 240 | |