1 | /* |
2 | * Copyright 2010 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "include/gpu/GrDirectContext.h" |
9 | #include "include/gpu/GrTypes.h" |
10 | #include "include/private/SkMacros.h" |
11 | #include "src/core/SkSafeMath.h" |
12 | #include "src/core/SkTraceEvent.h" |
13 | #include "src/gpu/GrBufferAllocPool.h" |
14 | |
15 | #include <memory> |
16 | #include "src/gpu/GrCaps.h" |
17 | #include "src/gpu/GrContextPriv.h" |
18 | #include "src/gpu/GrCpuBuffer.h" |
19 | #include "src/gpu/GrGpu.h" |
20 | #include "src/gpu/GrGpuBuffer.h" |
21 | #include "src/gpu/GrResourceProvider.h" |
22 | |
23 | sk_sp<GrBufferAllocPool::CpuBufferCache> GrBufferAllocPool::CpuBufferCache::Make( |
24 | int maxBuffersToCache) { |
25 | return sk_sp<CpuBufferCache>(new CpuBufferCache(maxBuffersToCache)); |
26 | } |
27 | |
28 | GrBufferAllocPool::CpuBufferCache::CpuBufferCache(int maxBuffersToCache) |
29 | : fMaxBuffersToCache(maxBuffersToCache) { |
30 | if (fMaxBuffersToCache) { |
31 | fBuffers = std::make_unique<Buffer[]>(fMaxBuffersToCache); |
32 | } |
33 | } |
34 | |
35 | sk_sp<GrCpuBuffer> GrBufferAllocPool::CpuBufferCache::makeBuffer(size_t size, |
36 | bool mustBeInitialized) { |
37 | SkASSERT(size > 0); |
38 | Buffer* result = nullptr; |
39 | if (size == kDefaultBufferSize) { |
40 | int i = 0; |
41 | for (; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) { |
42 | SkASSERT(fBuffers[i].fBuffer->size() == kDefaultBufferSize); |
43 | if (fBuffers[i].fBuffer->unique()) { |
44 | result = &fBuffers[i]; |
45 | } |
46 | } |
47 | if (!result && i < fMaxBuffersToCache) { |
48 | fBuffers[i].fBuffer = GrCpuBuffer::Make(size); |
49 | result = &fBuffers[i]; |
50 | } |
51 | } |
52 | Buffer tempResult; |
53 | if (!result) { |
54 | tempResult.fBuffer = GrCpuBuffer::Make(size); |
55 | result = &tempResult; |
56 | } |
57 | if (mustBeInitialized && !result->fCleared) { |
58 | result->fCleared = true; |
59 | memset(result->fBuffer->data(), 0, result->fBuffer->size()); |
60 | } |
61 | return result->fBuffer; |
62 | } |
63 | |
64 | void GrBufferAllocPool::CpuBufferCache::releaseAll() { |
65 | for (int i = 0; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) { |
66 | fBuffers[i].fBuffer.reset(); |
67 | fBuffers[i].fCleared = false; |
68 | } |
69 | } |
70 | |
71 | ////////////////////////////////////////////////////////////////////////////// |
72 | |
73 | #ifdef SK_DEBUG |
74 | #define VALIDATE validate |
75 | #else |
76 | static void VALIDATE(bool = false) {} |
77 | #endif |
78 | |
79 | #define UNMAP_BUFFER(block) \ |
80 | do { \ |
81 | TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \ |
82 | TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \ |
83 | (float)((block).fBytesFree) / (block).fBuffer->size()); \ |
84 | SkASSERT(!block.fBuffer->isCpuBuffer()); \ |
85 | static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap(); \ |
86 | } while (false) |
87 | |
88 | constexpr size_t GrBufferAllocPool::kDefaultBufferSize; |
89 | |
90 | GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType, |
91 | sk_sp<CpuBufferCache> cpuBufferCache) |
92 | : fBlocks(8) |
93 | , fCpuBufferCache(std::move(cpuBufferCache)) |
94 | , fGpu(gpu) |
95 | , fBufferType(bufferType) {} |
96 | |
97 | void GrBufferAllocPool::deleteBlocks() { |
98 | if (fBlocks.count()) { |
99 | GrBuffer* buffer = fBlocks.back().fBuffer.get(); |
100 | if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) { |
101 | UNMAP_BUFFER(fBlocks.back()); |
102 | } |
103 | } |
104 | while (!fBlocks.empty()) { |
105 | this->destroyBlock(); |
106 | } |
107 | SkASSERT(!fBufferPtr); |
108 | } |
109 | |
110 | GrBufferAllocPool::~GrBufferAllocPool() { |
111 | VALIDATE(); |
112 | this->deleteBlocks(); |
113 | } |
114 | |
115 | void GrBufferAllocPool::reset() { |
116 | VALIDATE(); |
117 | fBytesInUse = 0; |
118 | this->deleteBlocks(); |
119 | this->resetCpuData(0); |
120 | VALIDATE(); |
121 | } |
122 | |
123 | void GrBufferAllocPool::unmap() { |
124 | VALIDATE(); |
125 | |
126 | if (fBufferPtr) { |
127 | BufferBlock& block = fBlocks.back(); |
128 | GrBuffer* buffer = block.fBuffer.get(); |
129 | if (!buffer->isCpuBuffer()) { |
130 | if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) { |
131 | UNMAP_BUFFER(block); |
132 | } else { |
133 | size_t flushSize = block.fBuffer->size() - block.fBytesFree; |
134 | this->flushCpuData(fBlocks.back(), flushSize); |
135 | } |
136 | } |
137 | fBufferPtr = nullptr; |
138 | } |
139 | VALIDATE(); |
140 | } |
141 | |
142 | #ifdef SK_DEBUG |
143 | void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { |
144 | bool wasDestroyed = false; |
145 | if (fBufferPtr) { |
146 | SkASSERT(!fBlocks.empty()); |
147 | const GrBuffer* buffer = fBlocks.back().fBuffer.get(); |
148 | if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) { |
149 | SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr); |
150 | } |
151 | } else if (!fBlocks.empty()) { |
152 | const GrBuffer* buffer = fBlocks.back().fBuffer.get(); |
153 | SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped()); |
154 | } |
155 | size_t bytesInUse = 0; |
156 | for (int i = 0; i < fBlocks.count() - 1; ++i) { |
157 | const GrBuffer* buffer = fBlocks[i].fBuffer.get(); |
158 | SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped()); |
159 | } |
160 | for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) { |
161 | GrBuffer* buffer = fBlocks[i].fBuffer.get(); |
162 | if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) { |
163 | wasDestroyed = true; |
164 | } else { |
165 | size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree; |
166 | bytesInUse += bytes; |
167 | SkASSERT(bytes || unusedBlockAllowed); |
168 | } |
169 | } |
170 | |
171 | if (!wasDestroyed) { |
172 | SkASSERT(bytesInUse == fBytesInUse); |
173 | if (unusedBlockAllowed) { |
174 | SkASSERT((fBytesInUse && !fBlocks.empty()) || |
175 | (!fBytesInUse && (fBlocks.count() < 2))); |
176 | } else { |
177 | SkASSERT((0 == fBytesInUse) == fBlocks.empty()); |
178 | } |
179 | } |
180 | } |
181 | #endif |
182 | |
183 | static inline size_t align_up_pad(size_t x, size_t alignment) { |
184 | return (alignment - x % alignment) % alignment; |
185 | } |
186 | |
187 | static inline size_t align_down(size_t x, uint32_t alignment) { |
188 | return (x / alignment) * alignment; |
189 | } |
190 | |
191 | void* GrBufferAllocPool::makeSpace(size_t size, |
192 | size_t alignment, |
193 | sk_sp<const GrBuffer>* buffer, |
194 | size_t* offset) { |
195 | VALIDATE(); |
196 | |
197 | SkASSERT(buffer); |
198 | SkASSERT(offset); |
199 | |
200 | if (fBufferPtr) { |
201 | BufferBlock& back = fBlocks.back(); |
202 | size_t usedBytes = back.fBuffer->size() - back.fBytesFree; |
203 | size_t pad = align_up_pad(usedBytes, alignment); |
204 | SkSafeMath safeMath; |
205 | size_t alignedSize = safeMath.add(pad, size); |
206 | if (!safeMath.ok()) { |
207 | return nullptr; |
208 | } |
209 | if (alignedSize <= back.fBytesFree) { |
210 | memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad); |
211 | usedBytes += pad; |
212 | *offset = usedBytes; |
213 | *buffer = back.fBuffer; |
214 | back.fBytesFree -= alignedSize; |
215 | fBytesInUse += alignedSize; |
216 | VALIDATE(); |
217 | return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes); |
218 | } |
219 | } |
220 | |
221 | // We could honor the space request using by a partial update of the current |
222 | // VB (if there is room). But we don't currently use draw calls to GL that |
223 | // allow the driver to know that previously issued draws won't read from |
224 | // the part of the buffer we update. Also, the GL buffer implementation |
225 | // may be cheating on the actual buffer size by shrinking the buffer on |
226 | // updateData() if the amount of data passed is less than the full buffer |
227 | // size. |
228 | |
229 | if (!this->createBlock(size)) { |
230 | return nullptr; |
231 | } |
232 | SkASSERT(fBufferPtr); |
233 | |
234 | *offset = 0; |
235 | BufferBlock& back = fBlocks.back(); |
236 | *buffer = back.fBuffer; |
237 | back.fBytesFree -= size; |
238 | fBytesInUse += size; |
239 | VALIDATE(); |
240 | return fBufferPtr; |
241 | } |
242 | |
243 | void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize, |
244 | size_t fallbackSize, |
245 | size_t alignment, |
246 | sk_sp<const GrBuffer>* buffer, |
247 | size_t* offset, |
248 | size_t* actualSize) { |
249 | VALIDATE(); |
250 | |
251 | SkASSERT(buffer); |
252 | SkASSERT(offset); |
253 | SkASSERT(actualSize); |
254 | |
255 | size_t usedBytes = (fBlocks.empty()) ? 0 : fBlocks.back().fBuffer->size() - |
256 | fBlocks.back().fBytesFree; |
257 | size_t pad = align_up_pad(usedBytes, alignment); |
258 | if (fBlocks.empty() || (minSize + pad) > fBlocks.back().fBytesFree) { |
259 | // We either don't have a block yet or the current block doesn't have enough free space. |
260 | // Create a new one. |
261 | if (!this->createBlock(fallbackSize)) { |
262 | return nullptr; |
263 | } |
264 | usedBytes = 0; |
265 | pad = 0; |
266 | } |
267 | SkASSERT(fBufferPtr); |
268 | |
269 | // Consume padding first, to make subsequent alignment math easier |
270 | memset(static_cast<char*>(fBufferPtr) + usedBytes, 0, pad); |
271 | usedBytes += pad; |
272 | fBlocks.back().fBytesFree -= pad; |
273 | fBytesInUse += pad; |
274 | |
275 | // Give caller all remaining space in this block (but aligned correctly) |
276 | size_t size = align_down(fBlocks.back().fBytesFree, alignment); |
277 | *offset = usedBytes; |
278 | *buffer = fBlocks.back().fBuffer; |
279 | *actualSize = size; |
280 | fBlocks.back().fBytesFree -= size; |
281 | fBytesInUse += size; |
282 | VALIDATE(); |
283 | return static_cast<char*>(fBufferPtr) + usedBytes; |
284 | } |
285 | |
286 | void GrBufferAllocPool::putBack(size_t bytes) { |
287 | VALIDATE(); |
288 | |
289 | while (bytes) { |
290 | // caller shouldn't try to put back more than they've taken |
291 | SkASSERT(!fBlocks.empty()); |
292 | BufferBlock& block = fBlocks.back(); |
293 | size_t bytesUsed = block.fBuffer->size() - block.fBytesFree; |
294 | if (bytes >= bytesUsed) { |
295 | bytes -= bytesUsed; |
296 | fBytesInUse -= bytesUsed; |
297 | // if we locked a vb to satisfy the make space and we're releasing |
298 | // beyond it, then unmap it. |
299 | GrBuffer* buffer = block.fBuffer.get(); |
300 | if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) { |
301 | UNMAP_BUFFER(block); |
302 | } |
303 | this->destroyBlock(); |
304 | } else { |
305 | block.fBytesFree += bytes; |
306 | fBytesInUse -= bytes; |
307 | bytes = 0; |
308 | break; |
309 | } |
310 | } |
311 | |
312 | VALIDATE(); |
313 | } |
314 | |
315 | bool GrBufferAllocPool::createBlock(size_t requestSize) { |
316 | size_t size = std::max(requestSize, kDefaultBufferSize); |
317 | |
318 | VALIDATE(); |
319 | |
320 | BufferBlock& block = fBlocks.push_back(); |
321 | |
322 | block.fBuffer = this->getBuffer(size); |
323 | if (!block.fBuffer) { |
324 | fBlocks.pop_back(); |
325 | return false; |
326 | } |
327 | |
328 | block.fBytesFree = block.fBuffer->size(); |
329 | if (fBufferPtr) { |
330 | SkASSERT(fBlocks.count() > 1); |
331 | BufferBlock& prev = fBlocks.fromBack(1); |
332 | GrBuffer* buffer = prev.fBuffer.get(); |
333 | if (!buffer->isCpuBuffer()) { |
334 | if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) { |
335 | UNMAP_BUFFER(prev); |
336 | } else { |
337 | this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree); |
338 | } |
339 | } |
340 | fBufferPtr = nullptr; |
341 | } |
342 | |
343 | SkASSERT(!fBufferPtr); |
344 | |
345 | // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy. |
346 | // Otherwise when buffer mapping is supported we map if the buffer size is greater than the |
347 | // threshold. |
348 | if (block.fBuffer->isCpuBuffer()) { |
349 | fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data(); |
350 | SkASSERT(fBufferPtr); |
351 | } else { |
352 | if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && |
353 | size > fGpu->caps()->bufferMapThreshold()) { |
354 | fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map(); |
355 | } |
356 | } |
357 | if (!fBufferPtr) { |
358 | this->resetCpuData(block.fBytesFree); |
359 | fBufferPtr = fCpuStagingBuffer->data(); |
360 | } |
361 | |
362 | VALIDATE(true); |
363 | |
364 | return true; |
365 | } |
366 | |
367 | void GrBufferAllocPool::destroyBlock() { |
368 | SkASSERT(!fBlocks.empty()); |
369 | SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() || |
370 | !static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped()); |
371 | fBlocks.pop_back(); |
372 | fBufferPtr = nullptr; |
373 | } |
374 | |
375 | void GrBufferAllocPool::resetCpuData(size_t newSize) { |
376 | SkASSERT(newSize >= kDefaultBufferSize || !newSize); |
377 | if (!newSize) { |
378 | fCpuStagingBuffer.reset(); |
379 | return; |
380 | } |
381 | if (fCpuStagingBuffer && newSize <= fCpuStagingBuffer->size()) { |
382 | return; |
383 | } |
384 | bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData(); |
385 | fCpuStagingBuffer = fCpuBufferCache ? fCpuBufferCache->makeBuffer(newSize, mustInitialize) |
386 | : GrCpuBuffer::Make(newSize); |
387 | } |
388 | |
389 | void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { |
390 | SkASSERT(block.fBuffer.get()); |
391 | SkASSERT(!block.fBuffer.get()->isCpuBuffer()); |
392 | GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get()); |
393 | SkASSERT(!buffer->isMapped()); |
394 | SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr); |
395 | SkASSERT(flushSize <= buffer->size()); |
396 | VALIDATE(true); |
397 | |
398 | if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && |
399 | flushSize > fGpu->caps()->bufferMapThreshold()) { |
400 | void* data = buffer->map(); |
401 | if (data) { |
402 | memcpy(data, fBufferPtr, flushSize); |
403 | UNMAP_BUFFER(block); |
404 | return; |
405 | } |
406 | } |
407 | buffer->updateData(fBufferPtr, flushSize); |
408 | VALIDATE(true); |
409 | } |
410 | |
411 | sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) { |
412 | const GrCaps& caps = *fGpu->caps(); |
413 | auto resourceProvider = fGpu->getContext()->priv().resourceProvider(); |
414 | if (caps.preferClientSideDynamicBuffers() || |
415 | (fBufferType == GrGpuBufferType::kDrawIndirect && caps.useClientSideIndirectBuffers())) { |
416 | // Create a CPU buffer. |
417 | bool mustInitialize = caps.mustClearUploadedBufferData(); |
418 | return fCpuBufferCache ? fCpuBufferCache->makeBuffer(size, mustInitialize) |
419 | : GrCpuBuffer::Make(size); |
420 | } |
421 | return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern); |
422 | } |
423 | |
424 | //////////////////////////////////////////////////////////////////////////////// |
425 | |
426 | GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache) |
427 | : GrBufferAllocPool(gpu, GrGpuBufferType::kVertex, std::move(cpuBufferCache)) {} |
428 | |
429 | void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize, |
430 | int vertexCount, |
431 | sk_sp<const GrBuffer>* buffer, |
432 | int* startVertex) { |
433 | SkASSERT(vertexCount >= 0); |
434 | SkASSERT(buffer); |
435 | SkASSERT(startVertex); |
436 | |
437 | size_t offset SK_INIT_TO_AVOID_WARNING; |
438 | void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount), |
439 | vertexSize, |
440 | buffer, |
441 | &offset); |
442 | |
443 | SkASSERT(0 == offset % vertexSize); |
444 | *startVertex = static_cast<int>(offset / vertexSize); |
445 | return ptr; |
446 | } |
447 | |
448 | void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount, |
449 | int fallbackVertexCount, |
450 | sk_sp<const GrBuffer>* buffer, int* startVertex, |
451 | int* actualVertexCount) { |
452 | SkASSERT(minVertexCount >= 0); |
453 | SkASSERT(fallbackVertexCount >= minVertexCount); |
454 | SkASSERT(buffer); |
455 | SkASSERT(startVertex); |
456 | SkASSERT(actualVertexCount); |
457 | |
458 | size_t offset SK_INIT_TO_AVOID_WARNING; |
459 | size_t actualSize SK_INIT_TO_AVOID_WARNING; |
460 | void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount), |
461 | SkSafeMath::Mul(vertexSize, fallbackVertexCount), |
462 | vertexSize, |
463 | buffer, |
464 | &offset, |
465 | &actualSize); |
466 | |
467 | SkASSERT(0 == offset % vertexSize); |
468 | *startVertex = static_cast<int>(offset / vertexSize); |
469 | |
470 | SkASSERT(0 == actualSize % vertexSize); |
471 | SkASSERT(actualSize >= vertexSize * minVertexCount); |
472 | *actualVertexCount = static_cast<int>(actualSize / vertexSize); |
473 | |
474 | return ptr; |
475 | } |
476 | |
477 | //////////////////////////////////////////////////////////////////////////////// |
478 | |
479 | GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache) |
480 | : GrBufferAllocPool(gpu, GrGpuBufferType::kIndex, std::move(cpuBufferCache)) {} |
481 | |
482 | void* GrIndexBufferAllocPool::makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer, |
483 | int* startIndex) { |
484 | SkASSERT(indexCount >= 0); |
485 | SkASSERT(buffer); |
486 | SkASSERT(startIndex); |
487 | |
488 | size_t offset SK_INIT_TO_AVOID_WARNING; |
489 | void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)), |
490 | sizeof(uint16_t), |
491 | buffer, |
492 | &offset); |
493 | |
494 | SkASSERT(0 == offset % sizeof(uint16_t)); |
495 | *startIndex = static_cast<int>(offset / sizeof(uint16_t)); |
496 | return ptr; |
497 | } |
498 | |
499 | void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount, |
500 | sk_sp<const GrBuffer>* buffer, int* startIndex, |
501 | int* actualIndexCount) { |
502 | SkASSERT(minIndexCount >= 0); |
503 | SkASSERT(fallbackIndexCount >= minIndexCount); |
504 | SkASSERT(buffer); |
505 | SkASSERT(startIndex); |
506 | SkASSERT(actualIndexCount); |
507 | |
508 | size_t offset SK_INIT_TO_AVOID_WARNING; |
509 | size_t actualSize SK_INIT_TO_AVOID_WARNING; |
510 | void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)), |
511 | SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)), |
512 | sizeof(uint16_t), |
513 | buffer, |
514 | &offset, |
515 | &actualSize); |
516 | |
517 | SkASSERT(0 == offset % sizeof(uint16_t)); |
518 | *startIndex = static_cast<int>(offset / sizeof(uint16_t)); |
519 | |
520 | SkASSERT(0 == actualSize % sizeof(uint16_t)); |
521 | SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t)); |
522 | *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t)); |
523 | return ptr; |
524 | } |
525 | |