1/*
2 * Copyright 2016 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "include/core/SkTraceMemoryDump.h"
9#include "src/gpu/GrGpuResourcePriv.h"
10#include "src/gpu/gl/GrGLBuffer.h"
11#include "src/gpu/gl/GrGLCaps.h"
12#include "src/gpu/gl/GrGLGpu.h"
13
14#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
15#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
16
17#define GL_ALLOC_CALL(call) \
18 [&] { \
19 if (this->glGpu()->glCaps().skipErrorChecks()) { \
20 GR_GL_CALL(this->glGpu()->glInterface(), call); \
21 return static_cast<GrGLenum>(GR_GL_NO_ERROR); \
22 } else { \
23 this->glGpu()->clearErrorsAndCheckForOOM(); \
24 GR_GL_CALL_NOERRCHECK(this->glGpu()->glInterface(), call); \
25 return this->glGpu()->getErrorAndCheckForOOM(); \
26 } \
27 }()
28
29#ifdef SK_DEBUG
30#define VALIDATE() this->validate()
31#else
32#define VALIDATE() do {} while(false)
33#endif
34
35sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
36 GrAccessPattern accessPattern, const void* data) {
37 if (gpu->glCaps().transferBufferType() == GrGLCaps::TransferBufferType::kNone &&
38 (GrGpuBufferType::kXferCpuToGpu == intendedType ||
39 GrGpuBufferType::kXferGpuToCpu == intendedType)) {
40 return nullptr;
41 }
42
43 sk_sp<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern, data));
44 if (0 == buffer->bufferID()) {
45 return nullptr;
46 }
47 return buffer;
48}
49
50// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
51// objects are implemented as client-side-arrays on tile-deferred architectures.
52#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
53
54inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType,
55 GrAccessPattern accessPattern,
56 const GrGLCaps& caps) {
57 auto drawUsage = [](GrAccessPattern pattern) {
58 switch (pattern) {
59 case kDynamic_GrAccessPattern:
60 // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
61 return DYNAMIC_DRAW_PARAM;
62 case kStatic_GrAccessPattern:
63 return GR_GL_STATIC_DRAW;
64 case kStream_GrAccessPattern:
65 return GR_GL_STREAM_DRAW;
66 }
67 SkUNREACHABLE;
68 };
69
70 auto readUsage = [](GrAccessPattern pattern) {
71 switch (pattern) {
72 case kDynamic_GrAccessPattern:
73 return GR_GL_DYNAMIC_READ;
74 case kStatic_GrAccessPattern:
75 return GR_GL_STATIC_READ;
76 case kStream_GrAccessPattern:
77 return GR_GL_STREAM_READ;
78 }
79 SkUNREACHABLE;
80 };
81
82 auto usageType = [&drawUsage, &readUsage, &caps](GrGpuBufferType type,
83 GrAccessPattern pattern) {
84 // GL_NV_pixel_buffer_object adds transfer buffers but not the related <usage> values.
85 if (caps.transferBufferType() == GrGLCaps::TransferBufferType::kNV_PBO) {
86 return drawUsage(pattern);
87 }
88 switch (type) {
89 case GrGpuBufferType::kVertex:
90 case GrGpuBufferType::kIndex:
91 case GrGpuBufferType::kDrawIndirect:
92 case GrGpuBufferType::kXferCpuToGpu:
93 return drawUsage(pattern);
94 case GrGpuBufferType::kXferGpuToCpu:
95 return readUsage(pattern);
96 }
97 SkUNREACHABLE;
98 };
99
100 return usageType(bufferType, accessPattern);
101}
102
103GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType,
104 GrAccessPattern accessPattern, const void* data)
105 : INHERITED(gpu, size, intendedType, accessPattern)
106 , fIntendedType(intendedType)
107 , fBufferID(0)
108 , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern, gpu->glCaps()))
109 , fGLSizeInBytes(0)
110 , fHasAttachedToTexture(false) {
111 GL_CALL(GenBuffers(1, &fBufferID));
112 if (fBufferID) {
113 GrGLenum target = gpu->bindBuffer(fIntendedType, this);
114 GrGLenum error = GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)size, data, fUsage));
115 if (error != GR_GL_NO_ERROR) {
116 GL_CALL(DeleteBuffers(1, &fBufferID));
117 fBufferID = 0;
118 } else {
119 fGLSizeInBytes = size;
120 }
121 }
122 VALIDATE();
123 this->registerWithCache(SkBudgeted::kYes);
124 if (!fBufferID) {
125 this->resourcePriv().removeScratchKey();
126 }
127}
128
129inline GrGLGpu* GrGLBuffer::glGpu() const {
130 SkASSERT(!this->wasDestroyed());
131 return static_cast<GrGLGpu*>(this->getGpu());
132}
133
134inline const GrGLCaps& GrGLBuffer::glCaps() const {
135 return this->glGpu()->glCaps();
136}
137
138void GrGLBuffer::onRelease() {
139 TRACE_EVENT0("skia.gpu", TRACE_FUNC);
140
141 if (!this->wasDestroyed()) {
142 VALIDATE();
143 // make sure we've not been abandoned or already released
144 if (fBufferID) {
145 GL_CALL(DeleteBuffers(1, &fBufferID));
146 fBufferID = 0;
147 fGLSizeInBytes = 0;
148 }
149 fMapPtr = nullptr;
150 VALIDATE();
151 }
152
153 INHERITED::onRelease();
154}
155
156void GrGLBuffer::onAbandon() {
157 fBufferID = 0;
158 fGLSizeInBytes = 0;
159 fMapPtr = nullptr;
160 VALIDATE();
161 INHERITED::onAbandon();
162}
163
164void GrGLBuffer::onMap() {
165 SkASSERT(fBufferID);
166 SkASSERT(!this->wasDestroyed());
167 VALIDATE();
168 SkASSERT(!this->isMapped());
169
170 // TODO: Make this a function parameter.
171 bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType);
172
173 // Handling dirty context is done in the bindBuffer call
174 switch (this->glCaps().mapBufferType()) {
175 case GrGLCaps::kNone_MapBufferType:
176 return;
177 case GrGLCaps::kMapBuffer_MapBufferType: {
178 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
179 if (!readOnly) {
180 // Let driver know it can discard the old data
181 if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
182 GrGLenum error =
183 GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
184 if (error != GR_GL_NO_ERROR) {
185 return;
186 }
187 }
188 }
189 GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
190 break;
191 }
192 case GrGLCaps::kMapBufferRange_MapBufferType: {
193 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
194 // Make sure the GL buffer size agrees with fDesc before mapping.
195 if (fGLSizeInBytes != this->size()) {
196 GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
197 if (error != GR_GL_NO_ERROR) {
198 return;
199 }
200 }
201 GrGLbitfield access;
202 if (readOnly) {
203 access = GR_GL_MAP_READ_BIT;
204 } else {
205 access = GR_GL_MAP_WRITE_BIT;
206 if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
207 // TODO: Make this a function parameter.
208 access |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
209 }
210 }
211 GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access));
212 break;
213 }
214 case GrGLCaps::kChromium_MapBufferType: {
215 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
216 // Make sure the GL buffer size agrees with fDesc before mapping.
217 if (fGLSizeInBytes != this->size()) {
218 GrGLenum error = GL_ALLOC_CALL(BufferData(target, this->size(), nullptr, fUsage));
219 if (error != GR_GL_NO_ERROR) {
220 return;
221 }
222 }
223 GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
224 readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
225 break;
226 }
227 }
228 fGLSizeInBytes = this->size();
229 VALIDATE();
230}
231
232void GrGLBuffer::onUnmap() {
233 SkASSERT(fBufferID);
234 VALIDATE();
235 SkASSERT(this->isMapped());
236 if (0 == fBufferID) {
237 fMapPtr = nullptr;
238 return;
239 }
240 // bind buffer handles the dirty context
241 switch (this->glCaps().mapBufferType()) {
242 case GrGLCaps::kNone_MapBufferType:
243 SkDEBUGFAIL("Shouldn't get here.");
244 return;
245 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
246 case GrGLCaps::kMapBufferRange_MapBufferType: {
247 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
248 GL_CALL(UnmapBuffer(target));
249 break;
250 }
251 case GrGLCaps::kChromium_MapBufferType:
252 this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
253 GL_CALL(UnmapBufferSubData(fMapPtr));
254 break;
255 }
256 fMapPtr = nullptr;
257}
258
259bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
260 SkASSERT(fBufferID);
261 if (this->wasDestroyed()) {
262 return false;
263 }
264
265 SkASSERT(!this->isMapped());
266 VALIDATE();
267 if (srcSizeInBytes > this->size()) {
268 return false;
269 }
270 SkASSERT(srcSizeInBytes <= this->size());
271 // bindbuffer handles dirty context
272 GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
273
274 if (this->glCaps().useBufferDataNullHint()) {
275 if (this->size() == srcSizeInBytes) {
276 GrGLenum error =
277 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage));
278 if (error != GR_GL_NO_ERROR) {
279 return false;
280 }
281 } else {
282 // Before we call glBufferSubData we give the driver a hint using
283 // glBufferData with nullptr. This makes the old buffer contents
284 // inaccessible to future draws. The GPU may still be processing
285 // draws that reference the old contents. With this hint it can
286 // assign a different allocation for the new contents to avoid
287 // flushing the gpu past draws consuming the old contents.
288 // TODO I think we actually want to try calling bufferData here
289 GrGLenum error =
290 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)this->size(), nullptr, fUsage));
291 if (error != GR_GL_NO_ERROR) {
292 return false;
293 }
294 GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
295 }
296 fGLSizeInBytes = this->size();
297 } else {
298 // Note that we're cheating on the size here. Currently no methods
299 // allow a partial update that preserves contents of non-updated
300 // portions of the buffer (map() does a glBufferData(..size, nullptr..))
301 GrGLenum error =
302 GL_ALLOC_CALL(BufferData(target, (GrGLsizeiptr)srcSizeInBytes, src, fUsage));
303 if (error != GR_GL_NO_ERROR) {
304 return false;
305 }
306 fGLSizeInBytes = srcSizeInBytes;
307 }
308 VALIDATE();
309 return true;
310}
311
312void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
313 const SkString& dumpName) const {
314 SkString buffer_id;
315 buffer_id.appendU32(this->bufferID());
316 traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
317 buffer_id.c_str());
318}
319
320#ifdef SK_DEBUG
321
322void GrGLBuffer::validate() const {
323 SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
324 SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size());
325}
326
327#endif
328