1// Copyright 2018 The SwiftShader Authors. All Rights Reserved.
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15#include "VkDevice.hpp"
16
17#include "VkConfig.h"
18#include "VkDebug.hpp"
19#include "VkDescriptorSetLayout.hpp"
20#include "VkFence.hpp"
21#include "VkQueue.hpp"
22#include "Device/Blitter.hpp"
23
24#include <chrono>
25#include <climits>
26#include <new> // Must #include this to use "placement new"
27
28namespace
29{
30 std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds> now()
31 {
32 return std::chrono::time_point_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now());
33 }
34}
35
36namespace vk
37{
38
39std::shared_ptr<rr::Routine> Device::SamplingRoutineCache::query(const vk::Device::SamplingRoutineCache::Key& key) const
40{
41 return cache.query(key);
42}
43
44void Device::SamplingRoutineCache::add(const vk::Device::SamplingRoutineCache::Key& key, const std::shared_ptr<rr::Routine>& routine)
45{
46 ASSERT(routine);
47 cache.add(key, routine);
48}
49
50rr::Routine* Device::SamplingRoutineCache::queryConst(const vk::Device::SamplingRoutineCache::Key& key) const
51{
52 return cache.queryConstCache(key).get();
53}
54
55void Device::SamplingRoutineCache::updateConstCache()
56{
57 cache.updateConstCache();
58}
59
60Device::Device(const VkDeviceCreateInfo* pCreateInfo, void* mem, PhysicalDevice *physicalDevice, const VkPhysicalDeviceFeatures *enabledFeatures, const std::shared_ptr<marl::Scheduler>& scheduler)
61 : physicalDevice(physicalDevice),
62 queues(reinterpret_cast<Queue*>(mem)),
63 enabledExtensionCount(pCreateInfo->enabledExtensionCount),
64 enabledFeatures(enabledFeatures ? *enabledFeatures : VkPhysicalDeviceFeatures{}), // "Setting pEnabledFeatures to NULL and not including a VkPhysicalDeviceFeatures2 in the pNext member of VkDeviceCreateInfo is equivalent to setting all members of the structure to VK_FALSE."
65 scheduler(scheduler)
66{
67 for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
68 {
69 const VkDeviceQueueCreateInfo& queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
70 queueCount += queueCreateInfo.queueCount;
71 }
72
73 uint32_t queueID = 0;
74 for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
75 {
76 const VkDeviceQueueCreateInfo& queueCreateInfo = pCreateInfo->pQueueCreateInfos[i];
77
78 for(uint32_t j = 0; j < queueCreateInfo.queueCount; j++, queueID++)
79 {
80 new (&queues[queueID]) Queue(this, scheduler.get());
81 }
82 }
83
84 extensions = reinterpret_cast<ExtensionName*>(static_cast<uint8_t*>(mem) + (sizeof(Queue) * queueCount));
85 for(uint32_t i = 0; i < enabledExtensionCount; i++)
86 {
87 strncpy(extensions[i], pCreateInfo->ppEnabledExtensionNames[i], VK_MAX_EXTENSION_NAME_SIZE);
88 }
89
90 if(pCreateInfo->enabledLayerCount)
91 {
92 // "The ppEnabledLayerNames and enabledLayerCount members of VkDeviceCreateInfo are deprecated and their values must be ignored by implementations."
93 UNIMPLEMENTED("enabledLayerCount"); // TODO(b/119321052): UNIMPLEMENTED() should be used only for features that must still be implemented. Use a more informational macro here.
94 }
95
96 // FIXME (b/119409619): use an allocator here so we can control all memory allocations
97 blitter.reset(new sw::Blitter());
98 samplingRoutineCache.reset(new SamplingRoutineCache());
99}
100
101void Device::destroy(const VkAllocationCallbacks* pAllocator)
102{
103 for(uint32_t i = 0; i < queueCount; i++)
104 {
105 queues[i].~Queue();
106 }
107
108 vk::deallocate(queues, pAllocator);
109}
110
111size_t Device::ComputeRequiredAllocationSize(const VkDeviceCreateInfo* pCreateInfo)
112{
113 uint32_t queueCount = 0;
114 for(uint32_t i = 0; i < pCreateInfo->queueCreateInfoCount; i++)
115 {
116 queueCount += pCreateInfo->pQueueCreateInfos[i].queueCount;
117 }
118
119 return (sizeof(Queue) * queueCount) + (pCreateInfo->enabledExtensionCount * sizeof(ExtensionName));
120}
121
122bool Device::hasExtension(const char* extensionName) const
123{
124 for(uint32_t i = 0; i < enabledExtensionCount; i++)
125 {
126 if(strncmp(extensions[i], extensionName, VK_MAX_EXTENSION_NAME_SIZE) == 0)
127 {
128 return true;
129 }
130 }
131 return false;
132}
133
134VkQueue Device::getQueue(uint32_t queueFamilyIndex, uint32_t queueIndex) const
135{
136 ASSERT(queueFamilyIndex == 0);
137
138 return queues[queueIndex];
139}
140
141VkResult Device::waitForFences(uint32_t fenceCount, const VkFence* pFences, VkBool32 waitAll, uint64_t timeout)
142{
143 using time_point = std::chrono::time_point<std::chrono::system_clock, std::chrono::nanoseconds>;
144 const time_point start = now();
145 const uint64_t max_timeout = (LLONG_MAX - start.time_since_epoch().count());
146 bool infiniteTimeout = (timeout > max_timeout);
147 const time_point end_ns = start + std::chrono::nanoseconds(std::min(max_timeout, timeout));
148
149 if(waitAll != VK_FALSE) // All fences must be signaled
150 {
151 for(uint32_t i = 0; i < fenceCount; i++)
152 {
153 if(timeout == 0)
154 {
155 if(Cast(pFences[i])->getStatus() != VK_SUCCESS) // At least one fence is not signaled
156 {
157 return VK_TIMEOUT;
158 }
159 }
160 else if(infiniteTimeout)
161 {
162 if(Cast(pFences[i])->wait() != VK_SUCCESS) // At least one fence is not signaled
163 {
164 return VK_TIMEOUT;
165 }
166 }
167 else
168 {
169 if(Cast(pFences[i])->wait(end_ns) != VK_SUCCESS) // At least one fence is not signaled
170 {
171 return VK_TIMEOUT;
172 }
173 }
174 }
175
176 return VK_SUCCESS;
177 }
178 else // At least one fence must be signaled
179 {
180 marl::containers::vector<marl::Event, 8> events;
181 for(uint32_t i = 0; i < fenceCount; i++)
182 {
183 events.push_back(Cast(pFences[i])->getEvent());
184 }
185
186 auto any = marl::Event::any(events.begin(), events.end());
187
188 if(timeout == 0)
189 {
190 return any.isSignalled() ? VK_SUCCESS : VK_TIMEOUT;
191 }
192 else if (infiniteTimeout)
193 {
194 any.wait();
195 return VK_SUCCESS;
196 }
197 else
198 {
199 return any.wait_until(end_ns) ? VK_SUCCESS : VK_TIMEOUT;
200 }
201 }
202}
203
204VkResult Device::waitIdle()
205{
206 for(uint32_t i = 0; i < queueCount; i++)
207 {
208 queues[i].waitIdle();
209 }
210
211 return VK_SUCCESS;
212}
213
214void Device::getDescriptorSetLayoutSupport(const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
215 VkDescriptorSetLayoutSupport* pSupport) const
216{
217 // Mark everything as unsupported
218 pSupport->supported = VK_FALSE;
219}
220
221void Device::updateDescriptorSets(uint32_t descriptorWriteCount, const VkWriteDescriptorSet* pDescriptorWrites,
222 uint32_t descriptorCopyCount, const VkCopyDescriptorSet* pDescriptorCopies)
223{
224 for(uint32_t i = 0; i < descriptorWriteCount; i++)
225 {
226 DescriptorSetLayout::WriteDescriptorSet(this, pDescriptorWrites[i]);
227 }
228
229 for(uint32_t i = 0; i < descriptorCopyCount; i++)
230 {
231 DescriptorSetLayout::CopyDescriptorSet(pDescriptorCopies[i]);
232 }
233}
234
235Device::SamplingRoutineCache* Device::getSamplingRoutineCache() const
236{
237 return samplingRoutineCache.get();
238}
239
240rr::Routine* Device::findInConstCache(const SamplingRoutineCache::Key& key) const
241{
242 return samplingRoutineCache->queryConst(key);
243}
244
245void Device::updateSamplingRoutineConstCache()
246{
247 std::unique_lock<std::mutex> lock(samplingRoutineCacheMutex);
248 samplingRoutineCache->updateConstCache();
249}
250
251std::mutex& Device::getSamplingRoutineCacheMutex()
252{
253 return samplingRoutineCacheMutex;
254}
255
256} // namespace vk
257