1 | /* |
2 | * Copyright 2016 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "include/core/SkTraceMemoryDump.h" |
9 | #include "src/gpu/GrGpuResourcePriv.h" |
10 | #include "src/gpu/gl/GrGLBuffer.h" |
11 | #include "src/gpu/gl/GrGLCaps.h" |
12 | #include "src/gpu/gl/GrGLGpu.h" |
13 | |
14 | #define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X) |
15 | #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X) |
16 | |
17 | #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR |
18 | #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface) |
19 | #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call) |
20 | #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface) |
21 | #else |
22 | #define CLEAR_ERROR_BEFORE_ALLOC(iface) |
23 | #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call) |
24 | #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR |
25 | #endif |
26 | |
27 | #ifdef SK_DEBUG |
28 | #define VALIDATE() this->validate() |
29 | #else |
30 | #define VALIDATE() do {} while(false) |
31 | #endif |
32 | |
33 | sk_sp<GrGLBuffer> GrGLBuffer::Make(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType, |
34 | GrAccessPattern accessPattern, const void* data) { |
35 | if (gpu->glCaps().transferBufferType() == GrGLCaps::TransferBufferType::kNone && |
36 | (GrGpuBufferType::kXferCpuToGpu == intendedType || |
37 | GrGpuBufferType::kXferGpuToCpu == intendedType)) { |
38 | return nullptr; |
39 | } |
40 | |
41 | sk_sp<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern, data)); |
42 | if (0 == buffer->bufferID()) { |
43 | return nullptr; |
44 | } |
45 | return buffer; |
46 | } |
47 | |
48 | // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer |
49 | // objects are implemented as client-side-arrays on tile-deferred architectures. |
50 | #define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW |
51 | |
52 | inline static GrGLenum gr_to_gl_access_pattern(GrGpuBufferType bufferType, |
53 | GrAccessPattern accessPattern, |
54 | const GrGLCaps& caps) { |
55 | auto drawUsage = [](GrAccessPattern pattern) { |
56 | switch (pattern) { |
57 | case kDynamic_GrAccessPattern: |
58 | // TODO: Do we really want to use STREAM_DRAW here on non-Chromium? |
59 | return DYNAMIC_DRAW_PARAM; |
60 | case kStatic_GrAccessPattern: |
61 | return GR_GL_STATIC_DRAW; |
62 | case kStream_GrAccessPattern: |
63 | return GR_GL_STREAM_DRAW; |
64 | } |
65 | SkUNREACHABLE; |
66 | }; |
67 | |
68 | auto readUsage = [](GrAccessPattern pattern) { |
69 | switch (pattern) { |
70 | case kDynamic_GrAccessPattern: |
71 | return GR_GL_DYNAMIC_READ; |
72 | case kStatic_GrAccessPattern: |
73 | return GR_GL_STATIC_READ; |
74 | case kStream_GrAccessPattern: |
75 | return GR_GL_STREAM_READ; |
76 | } |
77 | SkUNREACHABLE; |
78 | }; |
79 | |
80 | auto usageType = [&drawUsage, &readUsage, &caps](GrGpuBufferType type, |
81 | GrAccessPattern pattern) { |
82 | // GL_NV_pixel_buffer_object adds transfer buffers but not the related <usage> values. |
83 | if (caps.transferBufferType() == GrGLCaps::TransferBufferType::kNV_PBO) { |
84 | return drawUsage(pattern); |
85 | } |
86 | switch (type) { |
87 | case GrGpuBufferType::kVertex: |
88 | case GrGpuBufferType::kIndex: |
89 | case GrGpuBufferType::kDrawIndirect: |
90 | case GrGpuBufferType::kXferCpuToGpu: |
91 | return drawUsage(pattern); |
92 | case GrGpuBufferType::kXferGpuToCpu: |
93 | return readUsage(pattern); |
94 | } |
95 | SkUNREACHABLE; |
96 | }; |
97 | |
98 | return usageType(bufferType, accessPattern); |
99 | } |
100 | |
101 | GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrGpuBufferType intendedType, |
102 | GrAccessPattern accessPattern, const void* data) |
103 | : INHERITED(gpu, size, intendedType, accessPattern) |
104 | , fIntendedType(intendedType) |
105 | , fBufferID(0) |
106 | , fUsage(gr_to_gl_access_pattern(intendedType, accessPattern, gpu->glCaps())) |
107 | , fGLSizeInBytes(0) |
108 | , fHasAttachedToTexture(false) { |
109 | GL_CALL(GenBuffers(1, &fBufferID)); |
110 | if (fBufferID) { |
111 | GrGLenum target = gpu->bindBuffer(fIntendedType, this); |
112 | CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface()); |
113 | // make sure driver can allocate memory for this buffer |
114 | GL_ALLOC_CALL(gpu->glInterface(), BufferData(target, |
115 | (GrGLsizeiptr) size, |
116 | data, |
117 | fUsage)); |
118 | if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) { |
119 | GL_CALL(DeleteBuffers(1, &fBufferID)); |
120 | fBufferID = 0; |
121 | } else { |
122 | fGLSizeInBytes = size; |
123 | } |
124 | } |
125 | VALIDATE(); |
126 | this->registerWithCache(SkBudgeted::kYes); |
127 | if (!fBufferID) { |
128 | this->resourcePriv().removeScratchKey(); |
129 | } |
130 | } |
131 | |
132 | inline GrGLGpu* GrGLBuffer::glGpu() const { |
133 | SkASSERT(!this->wasDestroyed()); |
134 | return static_cast<GrGLGpu*>(this->getGpu()); |
135 | } |
136 | |
137 | inline const GrGLCaps& GrGLBuffer::glCaps() const { |
138 | return this->glGpu()->glCaps(); |
139 | } |
140 | |
141 | void GrGLBuffer::onRelease() { |
142 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
143 | |
144 | if (!this->wasDestroyed()) { |
145 | VALIDATE(); |
146 | // make sure we've not been abandoned or already released |
147 | if (fBufferID) { |
148 | GL_CALL(DeleteBuffers(1, &fBufferID)); |
149 | fBufferID = 0; |
150 | fGLSizeInBytes = 0; |
151 | } |
152 | fMapPtr = nullptr; |
153 | VALIDATE(); |
154 | } |
155 | |
156 | INHERITED::onRelease(); |
157 | } |
158 | |
159 | void GrGLBuffer::onAbandon() { |
160 | fBufferID = 0; |
161 | fGLSizeInBytes = 0; |
162 | fMapPtr = nullptr; |
163 | VALIDATE(); |
164 | INHERITED::onAbandon(); |
165 | } |
166 | |
167 | void GrGLBuffer::onMap() { |
168 | SkASSERT(fBufferID); |
169 | SkASSERT(!this->wasDestroyed()); |
170 | VALIDATE(); |
171 | SkASSERT(!this->isMapped()); |
172 | |
173 | // TODO: Make this a function parameter. |
174 | bool readOnly = (GrGpuBufferType::kXferGpuToCpu == fIntendedType); |
175 | |
176 | // Handling dirty context is done in the bindBuffer call |
177 | switch (this->glCaps().mapBufferType()) { |
178 | case GrGLCaps::kNone_MapBufferType: |
179 | return; |
180 | case GrGLCaps::kMapBuffer_MapBufferType: { |
181 | GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
182 | if (!readOnly) { |
183 | // Let driver know it can discard the old data |
184 | if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) { |
185 | GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); |
186 | } |
187 | } |
188 | GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); |
189 | break; |
190 | } |
191 | case GrGLCaps::kMapBufferRange_MapBufferType: { |
192 | GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
193 | // Make sure the GL buffer size agrees with fDesc before mapping. |
194 | if (fGLSizeInBytes != this->size()) { |
195 | GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); |
196 | } |
197 | GrGLbitfield access; |
198 | if (readOnly) { |
199 | access = GR_GL_MAP_READ_BIT; |
200 | } else { |
201 | access = GR_GL_MAP_WRITE_BIT; |
202 | if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) { |
203 | // TODO: Make this a function parameter. |
204 | access |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; |
205 | } |
206 | } |
207 | GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), access)); |
208 | break; |
209 | } |
210 | case GrGLCaps::kChromium_MapBufferType: { |
211 | GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
212 | // Make sure the GL buffer size agrees with fDesc before mapping. |
213 | if (fGLSizeInBytes != this->size()) { |
214 | GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); |
215 | } |
216 | GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(), |
217 | readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); |
218 | break; |
219 | } |
220 | } |
221 | fGLSizeInBytes = this->size(); |
222 | VALIDATE(); |
223 | } |
224 | |
225 | void GrGLBuffer::onUnmap() { |
226 | SkASSERT(fBufferID); |
227 | VALIDATE(); |
228 | SkASSERT(this->isMapped()); |
229 | if (0 == fBufferID) { |
230 | fMapPtr = nullptr; |
231 | return; |
232 | } |
233 | // bind buffer handles the dirty context |
234 | switch (this->glCaps().mapBufferType()) { |
235 | case GrGLCaps::kNone_MapBufferType: |
236 | SkDEBUGFAIL("Shouldn't get here." ); |
237 | return; |
238 | case GrGLCaps::kMapBuffer_MapBufferType: // fall through |
239 | case GrGLCaps::kMapBufferRange_MapBufferType: { |
240 | GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
241 | GL_CALL(UnmapBuffer(target)); |
242 | break; |
243 | } |
244 | case GrGLCaps::kChromium_MapBufferType: |
245 | this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed? |
246 | GL_CALL(UnmapBufferSubData(fMapPtr)); |
247 | break; |
248 | } |
249 | fMapPtr = nullptr; |
250 | } |
251 | |
252 | bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { |
253 | SkASSERT(fBufferID); |
254 | if (this->wasDestroyed()) { |
255 | return false; |
256 | } |
257 | |
258 | SkASSERT(!this->isMapped()); |
259 | VALIDATE(); |
260 | if (srcSizeInBytes > this->size()) { |
261 | return false; |
262 | } |
263 | SkASSERT(srcSizeInBytes <= this->size()); |
264 | // bindbuffer handles dirty context |
265 | GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); |
266 | |
267 | if (this->glCaps().useBufferDataNullHint()) { |
268 | if (this->size() == srcSizeInBytes) { |
269 | GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage)); |
270 | } else { |
271 | // Before we call glBufferSubData we give the driver a hint using |
272 | // glBufferData with nullptr. This makes the old buffer contents |
273 | // inaccessible to future draws. The GPU may still be processing |
274 | // draws that reference the old contents. With this hint it can |
275 | // assign a different allocation for the new contents to avoid |
276 | // flushing the gpu past draws consuming the old contents. |
277 | // TODO I think we actually want to try calling bufferData here |
278 | GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); |
279 | GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src)); |
280 | } |
281 | fGLSizeInBytes = this->size(); |
282 | } else { |
283 | // Note that we're cheating on the size here. Currently no methods |
284 | // allow a partial update that preserves contents of non-updated |
285 | // portions of the buffer (map() does a glBufferData(..size, nullptr..)) |
286 | GL_CALL(BufferData(target, srcSizeInBytes, src, fUsage)); |
287 | fGLSizeInBytes = srcSizeInBytes; |
288 | } |
289 | VALIDATE(); |
290 | return true; |
291 | } |
292 | |
293 | void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, |
294 | const SkString& dumpName) const { |
295 | SkString buffer_id; |
296 | buffer_id.appendU32(this->bufferID()); |
297 | traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer" , |
298 | buffer_id.c_str()); |
299 | } |
300 | |
301 | #ifdef SK_DEBUG |
302 | |
303 | void GrGLBuffer::validate() const { |
304 | SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); |
305 | SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size()); |
306 | } |
307 | |
308 | #endif |
309 | |