1/*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "include/gpu/GrContext.h"
9#include "include/gpu/GrTypes.h"
10#include "include/private/SkMacros.h"
11#include "src/core/SkSafeMath.h"
12#include "src/core/SkTraceEvent.h"
13#include "src/gpu/GrBufferAllocPool.h"
14#include "src/gpu/GrCaps.h"
15#include "src/gpu/GrContextPriv.h"
16#include "src/gpu/GrCpuBuffer.h"
17#include "src/gpu/GrGpu.h"
18#include "src/gpu/GrGpuBuffer.h"
19#include "src/gpu/GrResourceProvider.h"
20
21sk_sp<GrBufferAllocPool::CpuBufferCache> GrBufferAllocPool::CpuBufferCache::Make(
22 int maxBuffersToCache) {
23 return sk_sp<CpuBufferCache>(new CpuBufferCache(maxBuffersToCache));
24}
25
26GrBufferAllocPool::CpuBufferCache::CpuBufferCache(int maxBuffersToCache)
27 : fMaxBuffersToCache(maxBuffersToCache) {
28 if (fMaxBuffersToCache) {
29 fBuffers.reset(new Buffer[fMaxBuffersToCache]);
30 }
31}
32
33sk_sp<GrCpuBuffer> GrBufferAllocPool::CpuBufferCache::makeBuffer(size_t size,
34 bool mustBeInitialized) {
35 SkASSERT(size > 0);
36 Buffer* result = nullptr;
37 if (size == kDefaultBufferSize) {
38 int i = 0;
39 for (; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
40 SkASSERT(fBuffers[i].fBuffer->size() == kDefaultBufferSize);
41 if (fBuffers[i].fBuffer->unique()) {
42 result = &fBuffers[i];
43 }
44 }
45 if (!result && i < fMaxBuffersToCache) {
46 fBuffers[i].fBuffer = GrCpuBuffer::Make(size);
47 result = &fBuffers[i];
48 }
49 }
50 Buffer tempResult;
51 if (!result) {
52 tempResult.fBuffer = GrCpuBuffer::Make(size);
53 result = &tempResult;
54 }
55 if (mustBeInitialized && !result->fCleared) {
56 result->fCleared = true;
57 memset(result->fBuffer->data(), 0, result->fBuffer->size());
58 }
59 return result->fBuffer;
60}
61
62void GrBufferAllocPool::CpuBufferCache::releaseAll() {
63 for (int i = 0; i < fMaxBuffersToCache && fBuffers[i].fBuffer; ++i) {
64 fBuffers[i].fBuffer.reset();
65 fBuffers[i].fCleared = false;
66 }
67}
68
69//////////////////////////////////////////////////////////////////////////////
70
71#ifdef SK_DEBUG
72 #define VALIDATE validate
73#else
74 static void VALIDATE(bool = false) {}
75#endif
76
77#define UNMAP_BUFFER(block) \
78 do { \
79 TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \
80 TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \
81 (float)((block).fBytesFree) / (block).fBuffer->size()); \
82 SkASSERT(!block.fBuffer->isCpuBuffer()); \
83 static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap(); \
84 } while (false)
85
86constexpr size_t GrBufferAllocPool::kDefaultBufferSize;
87
88GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType,
89 sk_sp<CpuBufferCache> cpuBufferCache)
90 : fBlocks(8)
91 , fCpuBufferCache(std::move(cpuBufferCache))
92 , fGpu(gpu)
93 , fBufferType(bufferType) {}
94
95void GrBufferAllocPool::deleteBlocks() {
96 if (fBlocks.count()) {
97 GrBuffer* buffer = fBlocks.back().fBuffer.get();
98 if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
99 UNMAP_BUFFER(fBlocks.back());
100 }
101 }
102 while (!fBlocks.empty()) {
103 this->destroyBlock();
104 }
105 SkASSERT(!fBufferPtr);
106}
107
108GrBufferAllocPool::~GrBufferAllocPool() {
109 VALIDATE();
110 this->deleteBlocks();
111}
112
113void GrBufferAllocPool::reset() {
114 VALIDATE();
115 fBytesInUse = 0;
116 this->deleteBlocks();
117 this->resetCpuData(0);
118 VALIDATE();
119}
120
121void GrBufferAllocPool::unmap() {
122 VALIDATE();
123
124 if (fBufferPtr) {
125 BufferBlock& block = fBlocks.back();
126 GrBuffer* buffer = block.fBuffer.get();
127 if (!buffer->isCpuBuffer()) {
128 if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
129 UNMAP_BUFFER(block);
130 } else {
131 size_t flushSize = block.fBuffer->size() - block.fBytesFree;
132 this->flushCpuData(fBlocks.back(), flushSize);
133 }
134 }
135 fBufferPtr = nullptr;
136 }
137 VALIDATE();
138}
139
140#ifdef SK_DEBUG
141void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
142 bool wasDestroyed = false;
143 if (fBufferPtr) {
144 SkASSERT(!fBlocks.empty());
145 const GrBuffer* buffer = fBlocks.back().fBuffer.get();
146 if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) {
147 SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
148 }
149 } else if (!fBlocks.empty()) {
150 const GrBuffer* buffer = fBlocks.back().fBuffer.get();
151 SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
152 }
153 size_t bytesInUse = 0;
154 for (int i = 0; i < fBlocks.count() - 1; ++i) {
155 const GrBuffer* buffer = fBlocks[i].fBuffer.get();
156 SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
157 }
158 for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
159 GrBuffer* buffer = fBlocks[i].fBuffer.get();
160 if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) {
161 wasDestroyed = true;
162 } else {
163 size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree;
164 bytesInUse += bytes;
165 SkASSERT(bytes || unusedBlockAllowed);
166 }
167 }
168
169 if (!wasDestroyed) {
170 SkASSERT(bytesInUse == fBytesInUse);
171 if (unusedBlockAllowed) {
172 SkASSERT((fBytesInUse && !fBlocks.empty()) ||
173 (!fBytesInUse && (fBlocks.count() < 2)));
174 } else {
175 SkASSERT((0 == fBytesInUse) == fBlocks.empty());
176 }
177 }
178}
179#endif
180
181static inline size_t align_up_pad(size_t x, size_t alignment) {
182 return (alignment - x % alignment) % alignment;
183}
184
185static inline size_t align_down(size_t x, uint32_t alignment) {
186 return (x / alignment) * alignment;
187}
188
189void* GrBufferAllocPool::makeSpace(size_t size,
190 size_t alignment,
191 sk_sp<const GrBuffer>* buffer,
192 size_t* offset) {
193 VALIDATE();
194
195 SkASSERT(buffer);
196 SkASSERT(offset);
197
198 if (fBufferPtr) {
199 BufferBlock& back = fBlocks.back();
200 size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
201 size_t pad = align_up_pad(usedBytes, alignment);
202 SkSafeMath safeMath;
203 size_t alignedSize = safeMath.add(pad, size);
204 if (!safeMath.ok()) {
205 return nullptr;
206 }
207 if (alignedSize <= back.fBytesFree) {
208 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
209 usedBytes += pad;
210 *offset = usedBytes;
211 *buffer = back.fBuffer;
212 back.fBytesFree -= alignedSize;
213 fBytesInUse += alignedSize;
214 VALIDATE();
215 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
216 }
217 }
218
219 // We could honor the space request using by a partial update of the current
220 // VB (if there is room). But we don't currently use draw calls to GL that
221 // allow the driver to know that previously issued draws won't read from
222 // the part of the buffer we update. Also, the GL buffer implementation
223 // may be cheating on the actual buffer size by shrinking the buffer on
224 // updateData() if the amount of data passed is less than the full buffer
225 // size.
226
227 if (!this->createBlock(size)) {
228 return nullptr;
229 }
230 SkASSERT(fBufferPtr);
231
232 *offset = 0;
233 BufferBlock& back = fBlocks.back();
234 *buffer = back.fBuffer;
235 back.fBytesFree -= size;
236 fBytesInUse += size;
237 VALIDATE();
238 return fBufferPtr;
239}
240
241void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
242 size_t fallbackSize,
243 size_t alignment,
244 sk_sp<const GrBuffer>* buffer,
245 size_t* offset,
246 size_t* actualSize) {
247 VALIDATE();
248
249 SkASSERT(buffer);
250 SkASSERT(offset);
251 SkASSERT(actualSize);
252
253 if (fBufferPtr) {
254 BufferBlock& back = fBlocks.back();
255 size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
256 size_t pad = align_up_pad(usedBytes, alignment);
257 if ((minSize + pad) <= back.fBytesFree) {
258 // Consume padding first, to make subsequent alignment math easier
259 memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
260 usedBytes += pad;
261 back.fBytesFree -= pad;
262 fBytesInUse += pad;
263
264 // Give caller all remaining space in this block up to fallbackSize (but aligned
265 // correctly)
266 size_t size;
267 if (back.fBytesFree >= fallbackSize) {
268 SkASSERT(align_down(fallbackSize, alignment) == fallbackSize);
269 size = fallbackSize;
270 } else {
271 size = align_down(back.fBytesFree, alignment);
272 }
273 *offset = usedBytes;
274 *buffer = back.fBuffer;
275 *actualSize = size;
276 back.fBytesFree -= size;
277 fBytesInUse += size;
278 VALIDATE();
279 return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
280 }
281 }
282
283 // We could honor the space request using by a partial update of the current
284 // VB (if there is room). But we don't currently use draw calls to GL that
285 // allow the driver to know that previously issued draws won't read from
286 // the part of the buffer we update. Also, the GL buffer implementation
287 // may be cheating on the actual buffer size by shrinking the buffer on
288 // updateData() if the amount of data passed is less than the full buffer
289 // size.
290
291 if (!this->createBlock(fallbackSize)) {
292 return nullptr;
293 }
294 SkASSERT(fBufferPtr);
295
296 *offset = 0;
297 BufferBlock& back = fBlocks.back();
298 *buffer = back.fBuffer;
299 *actualSize = fallbackSize;
300 back.fBytesFree -= fallbackSize;
301 fBytesInUse += fallbackSize;
302 VALIDATE();
303 return fBufferPtr;
304}
305
306void GrBufferAllocPool::putBack(size_t bytes) {
307 VALIDATE();
308
309 while (bytes) {
310 // caller shouldn't try to put back more than they've taken
311 SkASSERT(!fBlocks.empty());
312 BufferBlock& block = fBlocks.back();
313 size_t bytesUsed = block.fBuffer->size() - block.fBytesFree;
314 if (bytes >= bytesUsed) {
315 bytes -= bytesUsed;
316 fBytesInUse -= bytesUsed;
317 // if we locked a vb to satisfy the make space and we're releasing
318 // beyond it, then unmap it.
319 GrBuffer* buffer = block.fBuffer.get();
320 if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
321 UNMAP_BUFFER(block);
322 }
323 this->destroyBlock();
324 } else {
325 block.fBytesFree += bytes;
326 fBytesInUse -= bytes;
327 bytes = 0;
328 break;
329 }
330 }
331
332 VALIDATE();
333}
334
335bool GrBufferAllocPool::createBlock(size_t requestSize) {
336 size_t size = std::max(requestSize, kDefaultBufferSize);
337
338 VALIDATE();
339
340 BufferBlock& block = fBlocks.push_back();
341
342 block.fBuffer = this->getBuffer(size);
343 if (!block.fBuffer) {
344 fBlocks.pop_back();
345 return false;
346 }
347
348 block.fBytesFree = block.fBuffer->size();
349 if (fBufferPtr) {
350 SkASSERT(fBlocks.count() > 1);
351 BufferBlock& prev = fBlocks.fromBack(1);
352 GrBuffer* buffer = prev.fBuffer.get();
353 if (!buffer->isCpuBuffer()) {
354 if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
355 UNMAP_BUFFER(prev);
356 } else {
357 this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree);
358 }
359 }
360 fBufferPtr = nullptr;
361 }
362
363 SkASSERT(!fBufferPtr);
364
365 // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy.
366 // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
367 // threshold.
368 if (block.fBuffer->isCpuBuffer()) {
369 fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data();
370 SkASSERT(fBufferPtr);
371 } else {
372 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
373 size > fGpu->caps()->bufferMapThreshold()) {
374 fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map();
375 }
376 }
377 if (!fBufferPtr) {
378 this->resetCpuData(block.fBytesFree);
379 fBufferPtr = fCpuStagingBuffer->data();
380 }
381
382 VALIDATE(true);
383
384 return true;
385}
386
387void GrBufferAllocPool::destroyBlock() {
388 SkASSERT(!fBlocks.empty());
389 SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() ||
390 !static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped());
391 fBlocks.pop_back();
392 fBufferPtr = nullptr;
393}
394
395void GrBufferAllocPool::resetCpuData(size_t newSize) {
396 SkASSERT(newSize >= kDefaultBufferSize || !newSize);
397 if (!newSize) {
398 fCpuStagingBuffer.reset();
399 return;
400 }
401 if (fCpuStagingBuffer && newSize <= fCpuStagingBuffer->size()) {
402 return;
403 }
404 bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
405 fCpuStagingBuffer = fCpuBufferCache ? fCpuBufferCache->makeBuffer(newSize, mustInitialize)
406 : GrCpuBuffer::Make(newSize);
407}
408
409void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
410 SkASSERT(block.fBuffer.get());
411 SkASSERT(!block.fBuffer.get()->isCpuBuffer());
412 GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get());
413 SkASSERT(!buffer->isMapped());
414 SkASSERT(fCpuStagingBuffer && fCpuStagingBuffer->data() == fBufferPtr);
415 SkASSERT(flushSize <= buffer->size());
416 VALIDATE(true);
417
418 if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
419 flushSize > fGpu->caps()->bufferMapThreshold()) {
420 void* data = buffer->map();
421 if (data) {
422 memcpy(data, fBufferPtr, flushSize);
423 UNMAP_BUFFER(block);
424 return;
425 }
426 }
427 buffer->updateData(fBufferPtr, flushSize);
428 VALIDATE(true);
429}
430
431sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) {
432 auto resourceProvider = fGpu->getContext()->priv().resourceProvider();
433 if (!fGpu->caps()->preferClientSideDynamicBuffers()) {
434 // Indirect draw commands for a polyfill must reside in a CPU buffer.
435 bool mayNeedIndirectDrawPolyfill = (fBufferType == GrGpuBufferType::kDrawIndirect) &&
436 (!fGpu->caps()->nativeDrawIndirectSupport() ||
437 fGpu->caps()->nativeDrawIndexedIndirectIsBroken());
438 if (!mayNeedIndirectDrawPolyfill) {
439 // We can create an actual GPU buffer.
440 return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern);
441 }
442 }
443 // Create a CPU buffer.
444 bool mustInitialize = fGpu->caps()->mustClearUploadedBufferData();
445 return fCpuBufferCache ? fCpuBufferCache->makeBuffer(size, mustInitialize)
446 : GrCpuBuffer::Make(size);
447}
448
449////////////////////////////////////////////////////////////////////////////////
450
451GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
452 : GrBufferAllocPool(gpu, GrGpuBufferType::kVertex, std::move(cpuBufferCache)) {}
453
454void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
455 int vertexCount,
456 sk_sp<const GrBuffer>* buffer,
457 int* startVertex) {
458 SkASSERT(vertexCount >= 0);
459 SkASSERT(buffer);
460 SkASSERT(startVertex);
461
462 size_t offset SK_INIT_TO_AVOID_WARNING;
463 void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(vertexSize, vertexCount),
464 vertexSize,
465 buffer,
466 &offset);
467
468 SkASSERT(0 == offset % vertexSize);
469 *startVertex = static_cast<int>(offset / vertexSize);
470 return ptr;
471}
472
473void* GrVertexBufferAllocPool::makeSpaceAtLeast(size_t vertexSize, int minVertexCount,
474 int fallbackVertexCount,
475 sk_sp<const GrBuffer>* buffer, int* startVertex,
476 int* actualVertexCount) {
477 SkASSERT(minVertexCount >= 0);
478 SkASSERT(fallbackVertexCount >= minVertexCount);
479 SkASSERT(buffer);
480 SkASSERT(startVertex);
481 SkASSERT(actualVertexCount);
482
483 size_t offset SK_INIT_TO_AVOID_WARNING;
484 size_t actualSize SK_INIT_TO_AVOID_WARNING;
485 void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(vertexSize, minVertexCount),
486 SkSafeMath::Mul(vertexSize, fallbackVertexCount),
487 vertexSize,
488 buffer,
489 &offset,
490 &actualSize);
491
492 SkASSERT(0 == offset % vertexSize);
493 *startVertex = static_cast<int>(offset / vertexSize);
494
495 SkASSERT(0 == actualSize % vertexSize);
496 SkASSERT(actualSize >= vertexSize * minVertexCount);
497 *actualVertexCount = static_cast<int>(actualSize / vertexSize);
498
499 return ptr;
500}
501
502////////////////////////////////////////////////////////////////////////////////
503
504GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu, sk_sp<CpuBufferCache> cpuBufferCache)
505 : GrBufferAllocPool(gpu, GrGpuBufferType::kIndex, std::move(cpuBufferCache)) {}
506
507void* GrIndexBufferAllocPool::makeSpace(int indexCount, sk_sp<const GrBuffer>* buffer,
508 int* startIndex) {
509 SkASSERT(indexCount >= 0);
510 SkASSERT(buffer);
511 SkASSERT(startIndex);
512
513 size_t offset SK_INIT_TO_AVOID_WARNING;
514 void* ptr = INHERITED::makeSpace(SkSafeMath::Mul(indexCount, sizeof(uint16_t)),
515 sizeof(uint16_t),
516 buffer,
517 &offset);
518
519 SkASSERT(0 == offset % sizeof(uint16_t));
520 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
521 return ptr;
522}
523
524void* GrIndexBufferAllocPool::makeSpaceAtLeast(int minIndexCount, int fallbackIndexCount,
525 sk_sp<const GrBuffer>* buffer, int* startIndex,
526 int* actualIndexCount) {
527 SkASSERT(minIndexCount >= 0);
528 SkASSERT(fallbackIndexCount >= minIndexCount);
529 SkASSERT(buffer);
530 SkASSERT(startIndex);
531 SkASSERT(actualIndexCount);
532
533 size_t offset SK_INIT_TO_AVOID_WARNING;
534 size_t actualSize SK_INIT_TO_AVOID_WARNING;
535 void* ptr = INHERITED::makeSpaceAtLeast(SkSafeMath::Mul(minIndexCount, sizeof(uint16_t)),
536 SkSafeMath::Mul(fallbackIndexCount, sizeof(uint16_t)),
537 sizeof(uint16_t),
538 buffer,
539 &offset,
540 &actualSize);
541
542 SkASSERT(0 == offset % sizeof(uint16_t));
543 *startIndex = static_cast<int>(offset / sizeof(uint16_t));
544
545 SkASSERT(0 == actualSize % sizeof(uint16_t));
546 SkASSERT(actualSize >= minIndexCount * sizeof(uint16_t));
547 *actualIndexCount = static_cast<int>(actualSize / sizeof(uint16_t));
548 return ptr;
549}
550