1 | // Copyright 2019 The SwiftShader Authors. All Rights Reserved. |
2 | // |
3 | // Licensed under the Apache License, Version 2.0 (the "License"); |
4 | // you may not use this file except in compliance with the License. |
5 | // You may obtain a copy of the License at |
6 | // |
7 | // http://www.apache.org/licenses/LICENSE-2.0 |
8 | // |
9 | // Unless required by applicable law or agreed to in writing, software |
10 | // distributed under the License is distributed on an "AS IS" BASIS, |
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
12 | // See the License for the specific language governing permissions and |
13 | // limitations under the License. |
14 | |
15 | #include "VkPipelineCache.hpp" |
16 | #include <cstring> |
17 | |
18 | namespace vk |
19 | { |
20 | |
21 | PipelineCache::SpirvShaderKey::SpecializationInfo::SpecializationInfo(const VkSpecializationInfo* specializationInfo) |
22 | { |
23 | if(specializationInfo) |
24 | { |
25 | auto ptr = reinterpret_cast<VkSpecializationInfo*>( |
26 | allocate(sizeof(VkSpecializationInfo), REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY)); |
27 | |
28 | info = std::shared_ptr<VkSpecializationInfo>(ptr, Deleter()); |
29 | |
30 | info->mapEntryCount = specializationInfo->mapEntryCount; |
31 | if(specializationInfo->mapEntryCount > 0) |
32 | { |
33 | size_t entriesSize = specializationInfo->mapEntryCount * sizeof(VkSpecializationMapEntry); |
34 | VkSpecializationMapEntry* mapEntries = reinterpret_cast<VkSpecializationMapEntry*>( |
35 | allocate(entriesSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY)); |
36 | memcpy(mapEntries, specializationInfo->pMapEntries, entriesSize); |
37 | info->pMapEntries = mapEntries; |
38 | } |
39 | |
40 | info->dataSize = specializationInfo->dataSize; |
41 | if(specializationInfo->dataSize > 0) |
42 | { |
43 | void* data = allocate(specializationInfo->dataSize, REQUIRED_MEMORY_ALIGNMENT, DEVICE_MEMORY); |
44 | memcpy(data, specializationInfo->pData, specializationInfo->dataSize); |
45 | info->pData = data; |
46 | } |
47 | else |
48 | { |
49 | info->pData = nullptr; |
50 | } |
51 | } |
52 | } |
53 | |
54 | void PipelineCache::SpirvShaderKey::SpecializationInfo::Deleter::operator() (VkSpecializationInfo* info) const |
55 | { |
56 | if(info) |
57 | { |
58 | deallocate(const_cast<VkSpecializationMapEntry*>(info->pMapEntries), DEVICE_MEMORY); |
59 | deallocate(const_cast<void*>(info->pData), DEVICE_MEMORY); |
60 | deallocate(info, DEVICE_MEMORY); |
61 | } |
62 | } |
63 | |
64 | bool PipelineCache::SpirvShaderKey::SpecializationInfo::operator<(const SpecializationInfo& specializationInfo) const |
65 | { |
66 | if(info && specializationInfo.info) |
67 | { |
68 | if(info->mapEntryCount != specializationInfo.info->mapEntryCount) |
69 | { |
70 | return info->mapEntryCount < specializationInfo.info->mapEntryCount; |
71 | } |
72 | |
73 | if(info->dataSize != specializationInfo.info->dataSize) |
74 | { |
75 | return info->dataSize < specializationInfo.info->dataSize; |
76 | } |
77 | |
78 | if(info->mapEntryCount > 0) |
79 | { |
80 | int cmp = memcmp(info->pMapEntries, specializationInfo.info->pMapEntries, info->mapEntryCount * sizeof(VkSpecializationMapEntry)); |
81 | if(cmp != 0) |
82 | { |
83 | return cmp < 0; |
84 | } |
85 | } |
86 | |
87 | if(info->dataSize > 0) |
88 | { |
89 | int cmp = memcmp(info->pData, specializationInfo.info->pData, info->dataSize); |
90 | if(cmp != 0) |
91 | { |
92 | return cmp < 0; |
93 | } |
94 | } |
95 | } |
96 | |
97 | return (info < specializationInfo.info); |
98 | } |
99 | |
100 | PipelineCache::SpirvShaderKey::SpirvShaderKey(const VkShaderStageFlagBits pipelineStage, |
101 | const std::string& entryPointName, |
102 | const std::vector<uint32_t>& insns, |
103 | const vk::RenderPass *renderPass, |
104 | const uint32_t subpassIndex, |
105 | const VkSpecializationInfo* specializationInfo) : |
106 | pipelineStage(pipelineStage), |
107 | entryPointName(entryPointName), |
108 | insns(insns), |
109 | renderPass(renderPass), |
110 | subpassIndex(subpassIndex), |
111 | specializationInfo(specializationInfo) |
112 | { |
113 | } |
114 | |
115 | bool PipelineCache::SpirvShaderKey::operator<(const SpirvShaderKey &other) const |
116 | { |
117 | if(pipelineStage != other.pipelineStage) |
118 | { |
119 | return pipelineStage < other.pipelineStage; |
120 | } |
121 | |
122 | if(renderPass != other.renderPass) |
123 | { |
124 | return renderPass < other.renderPass; |
125 | } |
126 | |
127 | if(subpassIndex != other.subpassIndex) |
128 | { |
129 | return subpassIndex < other.subpassIndex; |
130 | } |
131 | |
132 | if(insns.size() != other.insns.size()) |
133 | { |
134 | return insns.size() < other.insns.size(); |
135 | } |
136 | |
137 | if(entryPointName.size() != other.entryPointName.size()) |
138 | { |
139 | return entryPointName.size() < other.entryPointName.size(); |
140 | } |
141 | |
142 | int cmp = memcmp(entryPointName.c_str(), other.entryPointName.c_str(), entryPointName.size()); |
143 | if(cmp != 0) |
144 | { |
145 | return cmp < 0; |
146 | } |
147 | |
148 | cmp = memcmp(insns.data(), other.insns.data(), insns.size() * sizeof(uint32_t)); |
149 | if(cmp != 0) |
150 | { |
151 | return cmp < 0; |
152 | } |
153 | |
154 | return (specializationInfo < other.specializationInfo); |
155 | } |
156 | |
157 | PipelineCache::PipelineCache(const VkPipelineCacheCreateInfo* pCreateInfo, void* mem) : |
158 | dataSize(ComputeRequiredAllocationSize(pCreateInfo)), data(reinterpret_cast<uint8_t*>(mem)) |
159 | { |
160 | CacheHeader* = reinterpret_cast<CacheHeader*>(mem); |
161 | header->headerLength = sizeof(CacheHeader); |
162 | header->headerVersion = VK_PIPELINE_CACHE_HEADER_VERSION_ONE; |
163 | header->vendorID = VENDOR_ID; |
164 | header->deviceID = DEVICE_ID; |
165 | memcpy(header->pipelineCacheUUID, SWIFTSHADER_UUID, VK_UUID_SIZE); |
166 | |
167 | if(pCreateInfo->pInitialData && (pCreateInfo->initialDataSize > 0)) |
168 | { |
169 | memcpy(data + sizeof(CacheHeader), pCreateInfo->pInitialData, pCreateInfo->initialDataSize); |
170 | } |
171 | } |
172 | |
173 | PipelineCache::~PipelineCache() |
174 | { |
175 | spirvShaders.clear(); |
176 | computePrograms.clear(); |
177 | } |
178 | |
179 | void PipelineCache::destroy(const VkAllocationCallbacks* pAllocator) |
180 | { |
181 | vk::deallocate(data, pAllocator); |
182 | } |
183 | |
184 | size_t PipelineCache::ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo* pCreateInfo) |
185 | { |
186 | return pCreateInfo->initialDataSize + sizeof(CacheHeader); |
187 | } |
188 | |
189 | VkResult PipelineCache::getData(size_t* pDataSize, void* pData) |
190 | { |
191 | if(!pData) |
192 | { |
193 | *pDataSize = dataSize; |
194 | return VK_SUCCESS; |
195 | } |
196 | |
197 | if(*pDataSize != dataSize) |
198 | { |
199 | *pDataSize = 0; |
200 | return VK_INCOMPLETE; |
201 | } |
202 | |
203 | if(*pDataSize > 0) |
204 | { |
205 | memcpy(pData, data, *pDataSize); |
206 | } |
207 | |
208 | return VK_SUCCESS; |
209 | } |
210 | |
211 | VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache* pSrcCaches) |
212 | { |
213 | for(uint32_t i = 0; i < srcCacheCount; i++) |
214 | { |
215 | PipelineCache* srcCache = Cast(pSrcCaches[i]); |
216 | |
217 | { |
218 | std::unique_lock<std::mutex> lock(spirvShadersMutex); |
219 | spirvShaders.insert(srcCache->spirvShaders.begin(), srcCache->spirvShaders.end()); |
220 | } |
221 | |
222 | { |
223 | std::unique_lock<std::mutex> lock(computeProgramsMutex); |
224 | computePrograms.insert(srcCache->computePrograms.begin(), srcCache->computePrograms.end()); |
225 | } |
226 | } |
227 | |
228 | return VK_SUCCESS; |
229 | } |
230 | |
231 | const std::shared_ptr<sw::SpirvShader>* PipelineCache::operator[](const PipelineCache::SpirvShaderKey& key) const |
232 | { |
233 | auto it = spirvShaders.find(key); |
234 | return (it != spirvShaders.end()) ? &(it->second) : nullptr; |
235 | } |
236 | |
237 | void PipelineCache::insert(const PipelineCache::SpirvShaderKey& key, const std::shared_ptr<sw::SpirvShader> &shader) |
238 | { |
239 | spirvShaders[key] = shader; |
240 | } |
241 | |
242 | const std::shared_ptr<sw::ComputeProgram>* PipelineCache::operator[](const PipelineCache::ComputeProgramKey& key) const |
243 | { |
244 | auto it = computePrograms.find(key); |
245 | return (it != computePrograms.end()) ? &(it->second) : nullptr; |
246 | } |
247 | |
248 | void PipelineCache::insert(const PipelineCache::ComputeProgramKey& key, const std::shared_ptr<sw::ComputeProgram> &computeProgram) |
249 | { |
250 | computePrograms[key] = computeProgram; |
251 | } |
252 | |
253 | } // namespace vk |
254 | |