1 | /* |
2 | * Copyright 2011 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "include/core/SkPixmap.h" |
9 | #include "include/core/SkStrokeRec.h" |
10 | #include "include/core/SkTypes.h" |
11 | #include "include/gpu/GrBackendSemaphore.h" |
12 | #include "include/gpu/GrBackendSurface.h" |
13 | #include "include/gpu/GrTypes.h" |
14 | #include "include/private/SkHalf.h" |
15 | #include "include/private/SkTemplates.h" |
16 | #include "include/private/SkTo.h" |
17 | #include "src/core/SkAutoMalloc.h" |
18 | #include "src/core/SkCompressedDataUtils.h" |
19 | #include "src/core/SkConvertPixels.h" |
20 | #include "src/core/SkMipMap.h" |
21 | #include "src/core/SkTraceEvent.h" |
22 | #include "src/gpu/GrContextPriv.h" |
23 | #include "src/gpu/GrCpuBuffer.h" |
24 | #include "src/gpu/GrDataUtils.h" |
25 | #include "src/gpu/GrFixedClip.h" |
26 | #include "src/gpu/GrGpuResourcePriv.h" |
27 | #include "src/gpu/GrPipeline.h" |
28 | #include "src/gpu/GrProgramInfo.h" |
29 | #include "src/gpu/GrRenderTargetPriv.h" |
30 | #include "src/gpu/GrShaderCaps.h" |
31 | #include "src/gpu/GrSurfaceProxyPriv.h" |
32 | #include "src/gpu/GrTexturePriv.h" |
33 | #include "src/gpu/gl/GrGLBuffer.h" |
34 | #include "src/gpu/gl/GrGLGpu.h" |
35 | #include "src/gpu/gl/GrGLOpsRenderPass.h" |
36 | #include "src/gpu/gl/GrGLSemaphore.h" |
37 | #include "src/gpu/gl/GrGLStencilAttachment.h" |
38 | #include "src/gpu/gl/GrGLTextureRenderTarget.h" |
39 | #include "src/gpu/gl/builders/GrGLShaderStringBuilder.h" |
40 | #include "src/sksl/SkSLCompiler.h" |
41 | |
42 | #include <cmath> |
43 | |
44 | #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) |
45 | #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) |
46 | |
47 | #define GL_ALLOC_CALL(call) \ |
48 | [&] { \ |
49 | if (this->glCaps().skipErrorChecks()) { \ |
50 | GR_GL_CALL(this->glInterface(), call); \ |
51 | return static_cast<GrGLenum>(GR_GL_NO_ERROR); \ |
52 | } else { \ |
53 | GrGLClearErr(this->glInterface()); \ |
54 | GR_GL_CALL_NOERRCHECK(this->glInterface(), call); \ |
55 | return GR_GL_GET_ERROR(this->glInterface()); \ |
56 | } \ |
57 | }() |
58 | |
59 | //#define USE_NSIGHT |
60 | |
61 | /////////////////////////////////////////////////////////////////////////////// |
62 | |
63 | static const GrGLenum gXfermodeEquation2Blend[] = { |
64 | // Basic OpenGL blend equations. |
65 | GR_GL_FUNC_ADD, |
66 | GR_GL_FUNC_SUBTRACT, |
67 | GR_GL_FUNC_REVERSE_SUBTRACT, |
68 | |
69 | // GL_KHR_blend_equation_advanced. |
70 | GR_GL_SCREEN, |
71 | GR_GL_OVERLAY, |
72 | GR_GL_DARKEN, |
73 | GR_GL_LIGHTEN, |
74 | GR_GL_COLORDODGE, |
75 | GR_GL_COLORBURN, |
76 | GR_GL_HARDLIGHT, |
77 | GR_GL_SOFTLIGHT, |
78 | GR_GL_DIFFERENCE, |
79 | GR_GL_EXCLUSION, |
80 | GR_GL_MULTIPLY, |
81 | GR_GL_HSL_HUE, |
82 | GR_GL_HSL_SATURATION, |
83 | GR_GL_HSL_COLOR, |
84 | GR_GL_HSL_LUMINOSITY, |
85 | |
86 | // Illegal... needs to map to something. |
87 | GR_GL_FUNC_ADD, |
88 | }; |
89 | static_assert(0 == kAdd_GrBlendEquation); |
90 | static_assert(1 == kSubtract_GrBlendEquation); |
91 | static_assert(2 == kReverseSubtract_GrBlendEquation); |
92 | static_assert(3 == kScreen_GrBlendEquation); |
93 | static_assert(4 == kOverlay_GrBlendEquation); |
94 | static_assert(5 == kDarken_GrBlendEquation); |
95 | static_assert(6 == kLighten_GrBlendEquation); |
96 | static_assert(7 == kColorDodge_GrBlendEquation); |
97 | static_assert(8 == kColorBurn_GrBlendEquation); |
98 | static_assert(9 == kHardLight_GrBlendEquation); |
99 | static_assert(10 == kSoftLight_GrBlendEquation); |
100 | static_assert(11 == kDifference_GrBlendEquation); |
101 | static_assert(12 == kExclusion_GrBlendEquation); |
102 | static_assert(13 == kMultiply_GrBlendEquation); |
103 | static_assert(14 == kHSLHue_GrBlendEquation); |
104 | static_assert(15 == kHSLSaturation_GrBlendEquation); |
105 | static_assert(16 == kHSLColor_GrBlendEquation); |
106 | static_assert(17 == kHSLLuminosity_GrBlendEquation); |
107 | static_assert(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt); |
108 | |
109 | static const GrGLenum gXfermodeCoeff2Blend[] = { |
110 | GR_GL_ZERO, |
111 | GR_GL_ONE, |
112 | GR_GL_SRC_COLOR, |
113 | GR_GL_ONE_MINUS_SRC_COLOR, |
114 | GR_GL_DST_COLOR, |
115 | GR_GL_ONE_MINUS_DST_COLOR, |
116 | GR_GL_SRC_ALPHA, |
117 | GR_GL_ONE_MINUS_SRC_ALPHA, |
118 | GR_GL_DST_ALPHA, |
119 | GR_GL_ONE_MINUS_DST_ALPHA, |
120 | GR_GL_CONSTANT_COLOR, |
121 | GR_GL_ONE_MINUS_CONSTANT_COLOR, |
122 | |
123 | // extended blend coeffs |
124 | GR_GL_SRC1_COLOR, |
125 | GR_GL_ONE_MINUS_SRC1_COLOR, |
126 | GR_GL_SRC1_ALPHA, |
127 | GR_GL_ONE_MINUS_SRC1_ALPHA, |
128 | |
129 | // Illegal... needs to map to something. |
130 | GR_GL_ZERO, |
131 | }; |
132 | |
133 | ////////////////////////////////////////////////////////////////////////////// |
134 | |
135 | static int gl_target_to_binding_index(GrGLenum target) { |
136 | switch (target) { |
137 | case GR_GL_TEXTURE_2D: |
138 | return 0; |
139 | case GR_GL_TEXTURE_RECTANGLE: |
140 | return 1; |
141 | case GR_GL_TEXTURE_EXTERNAL: |
142 | return 2; |
143 | } |
144 | SK_ABORT("Unexpected GL texture target." ); |
145 | } |
146 | |
147 | GrGpuResource::UniqueID GrGLGpu::TextureUnitBindings::boundID(GrGLenum target) const { |
148 | return fTargetBindings[gl_target_to_binding_index(target)].fBoundResourceID; |
149 | } |
150 | |
151 | bool GrGLGpu::TextureUnitBindings::hasBeenModified(GrGLenum target) const { |
152 | return fTargetBindings[gl_target_to_binding_index(target)].fHasBeenModified; |
153 | } |
154 | |
155 | void GrGLGpu::TextureUnitBindings::setBoundID(GrGLenum target, GrGpuResource::UniqueID resourceID) { |
156 | int targetIndex = gl_target_to_binding_index(target); |
157 | fTargetBindings[targetIndex].fBoundResourceID = resourceID; |
158 | fTargetBindings[targetIndex].fHasBeenModified = true; |
159 | } |
160 | |
161 | void GrGLGpu::TextureUnitBindings::invalidateForScratchUse(GrGLenum target) { |
162 | this->setBoundID(target, GrGpuResource::UniqueID()); |
163 | } |
164 | |
165 | void GrGLGpu::TextureUnitBindings::invalidateAllTargets(bool markUnmodified) { |
166 | for (auto& targetBinding : fTargetBindings) { |
167 | targetBinding.fBoundResourceID.makeInvalid(); |
168 | if (markUnmodified) { |
169 | targetBinding.fHasBeenModified = false; |
170 | } |
171 | } |
172 | } |
173 | |
174 | ////////////////////////////////////////////////////////////////////////////// |
175 | |
176 | static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) { |
177 | switch (filter) { |
178 | case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST; |
179 | case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR; |
180 | case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR; |
181 | } |
182 | SK_ABORT("Unknown filter" ); |
183 | } |
184 | |
185 | static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter) { |
186 | switch (filter) { |
187 | case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST; |
188 | case GrSamplerState::Filter::kBilerp: return GR_GL_LINEAR; |
189 | case GrSamplerState::Filter::kMipMap: return GR_GL_LINEAR_MIPMAP_LINEAR; |
190 | } |
191 | SK_ABORT("Unknown filter" ); |
192 | } |
193 | |
194 | static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode, |
195 | const GrCaps& caps) { |
196 | switch (wrapMode) { |
197 | case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE; |
198 | case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT; |
199 | case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT; |
200 | case GrSamplerState::WrapMode::kClampToBorder: |
201 | // May not be supported but should have been caught earlier |
202 | SkASSERT(caps.clampToBorderSupport()); |
203 | return GR_GL_CLAMP_TO_BORDER; |
204 | } |
205 | SK_ABORT("Unknown wrap mode" ); |
206 | } |
207 | |
208 | /////////////////////////////////////////////////////////////////////////////// |
209 | |
210 | class GrGLGpu::SamplerObjectCache { |
211 | public: |
212 | SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) { |
213 | fNumTextureUnits = fGpu->glCaps().shaderCaps()->maxFragmentSamplers(); |
214 | fHWBoundSamplers.reset(new GrGLuint[fNumTextureUnits]); |
215 | std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); |
216 | std::fill_n(fSamplers, kNumSamplers, 0); |
217 | } |
218 | |
219 | ~SamplerObjectCache() { |
220 | if (!fNumTextureUnits) { |
221 | // We've already been abandoned. |
222 | return; |
223 | } |
224 | for (GrGLuint sampler : fSamplers) { |
225 | // The spec states that "zero" values should be silently ignored, however they still |
226 | // trigger GL errors on some NVIDIA platforms. |
227 | if (sampler) { |
228 | GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(1, &sampler)); |
229 | } |
230 | } |
231 | } |
232 | |
233 | void bindSampler(int unitIdx, GrSamplerState state) { |
234 | int index = StateToIndex(state); |
235 | if (!fSamplers[index]) { |
236 | GrGLuint s; |
237 | GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s)); |
238 | if (!s) { |
239 | return; |
240 | } |
241 | fSamplers[index] = s; |
242 | auto minFilter = filter_to_gl_min_filter(state.filter()); |
243 | auto magFilter = filter_to_gl_mag_filter(state.filter()); |
244 | auto wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps()); |
245 | auto wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps()); |
246 | GR_GL_CALL(fGpu->glInterface(), |
247 | SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter)); |
248 | GR_GL_CALL(fGpu->glInterface(), |
249 | SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter)); |
250 | GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX)); |
251 | GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY)); |
252 | } |
253 | if (fHWBoundSamplers[unitIdx] != fSamplers[index]) { |
254 | GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, fSamplers[index])); |
255 | fHWBoundSamplers[unitIdx] = fSamplers[index]; |
256 | } |
257 | } |
258 | |
259 | void invalidateBindings() { |
260 | // When we have sampler support we always use samplers. So setting these to zero will cause |
261 | // a rebind on next usage. |
262 | std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); |
263 | } |
264 | |
265 | void abandon() { |
266 | fHWBoundSamplers.reset(); |
267 | fNumTextureUnits = 0; |
268 | } |
269 | |
270 | void release() { |
271 | if (!fNumTextureUnits) { |
272 | // We've already been abandoned. |
273 | return; |
274 | } |
275 | GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(kNumSamplers, fSamplers)); |
276 | std::fill_n(fSamplers, kNumSamplers, 0); |
277 | // Deleting a bound sampler implicitly binds sampler 0. |
278 | std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); |
279 | } |
280 | |
281 | private: |
282 | static int StateToIndex(GrSamplerState state) { |
283 | int filter = static_cast<int>(state.filter()); |
284 | SkASSERT(filter >= 0 && filter < 3); |
285 | int wrapX = static_cast<int>(state.wrapModeX()); |
286 | SkASSERT(wrapX >= 0 && wrapX < 4); |
287 | int wrapY = static_cast<int>(state.wrapModeY()); |
288 | SkASSERT(wrapY >= 0 && wrapY < 4); |
289 | int idx = 16 * filter + 4 * wrapX + wrapY; |
290 | SkASSERT(idx < kNumSamplers); |
291 | return idx; |
292 | } |
293 | |
294 | GrGLGpu* fGpu; |
295 | static constexpr int kNumSamplers = 48; |
296 | std::unique_ptr<GrGLuint[]> fHWBoundSamplers; |
297 | GrGLuint fSamplers[kNumSamplers]; |
298 | int fNumTextureUnits; |
299 | }; |
300 | |
301 | /////////////////////////////////////////////////////////////////////////////// |
302 | |
303 | sk_sp<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface, const GrContextOptions& options, |
304 | GrContext* context) { |
305 | if (!interface) { |
306 | interface = GrGLMakeNativeInterface(); |
307 | // For clients that have written their own GrGLCreateNativeInterface and haven't yet updated |
308 | // to GrGLMakeNativeInterface. |
309 | if (!interface) { |
310 | interface = sk_ref_sp(GrGLCreateNativeInterface()); |
311 | } |
312 | if (!interface) { |
313 | return nullptr; |
314 | } |
315 | } |
316 | #ifdef USE_NSIGHT |
317 | const_cast<GrContextOptions&>(options).fSuppressPathRendering = true; |
318 | #endif |
319 | auto glContext = GrGLContext::Make(std::move(interface), options); |
320 | if (!glContext) { |
321 | return nullptr; |
322 | } |
323 | return sk_sp<GrGpu>(new GrGLGpu(std::move(glContext), context)); |
324 | } |
325 | |
326 | GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrContext* context) |
327 | : GrGpu(context) |
328 | , fGLContext(std::move(ctx)) |
329 | , fProgramCache(new ProgramCache(this)) |
330 | , fHWProgramID(0) |
331 | , fTempSrcFBOID(0) |
332 | , fTempDstFBOID(0) |
333 | , fStencilClearFBOID(0) { |
334 | SkASSERT(fGLContext); |
335 | GrGLClearErr(this->glInterface()); |
336 | fCaps = sk_ref_sp(fGLContext->caps()); |
337 | |
338 | fHWTextureUnitBindings.reset(this->numTextureUnits()); |
339 | |
340 | this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER; |
341 | this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER; |
342 | this->hwBufferState(GrGpuBufferType::kDrawIndirect)->fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER; |
343 | if (GrGLCaps::TransferBufferType::kChromium == this->glCaps().transferBufferType()) { |
344 | this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = |
345 | GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM; |
346 | this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = |
347 | GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; |
348 | } else { |
349 | this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER; |
350 | this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER; |
351 | } |
352 | for (int i = 0; i < kGrGpuBufferTypeCount; ++i) { |
353 | fHWBufferState[i].invalidate(); |
354 | } |
355 | static_assert(kGrGpuBufferTypeCount == SK_ARRAY_COUNT(fHWBufferState)); |
356 | |
357 | if (this->glCaps().shaderCaps()->pathRenderingSupport()) { |
358 | fPathRendering.reset(new GrGLPathRendering(this)); |
359 | } |
360 | |
361 | if (this->glCaps().samplerObjectSupport()) { |
362 | fSamplerObjectCache.reset(new SamplerObjectCache(this)); |
363 | } |
364 | } |
365 | |
366 | GrGLGpu::~GrGLGpu() { |
367 | // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu |
368 | // to release the resources held by the objects themselves. |
369 | fPathRendering.reset(); |
370 | fCopyProgramArrayBuffer.reset(); |
371 | fMipmapProgramArrayBuffer.reset(); |
372 | |
373 | fHWProgram.reset(); |
374 | if (fHWProgramID) { |
375 | // detach the current program so there is no confusion on OpenGL's part |
376 | // that we want it to be deleted |
377 | GL_CALL(UseProgram(0)); |
378 | } |
379 | |
380 | if (fTempSrcFBOID) { |
381 | this->deleteFramebuffer(fTempSrcFBOID); |
382 | } |
383 | if (fTempDstFBOID) { |
384 | this->deleteFramebuffer(fTempDstFBOID); |
385 | } |
386 | if (fStencilClearFBOID) { |
387 | this->deleteFramebuffer(fStencilClearFBOID); |
388 | } |
389 | |
390 | for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
391 | if (0 != fCopyPrograms[i].fProgram) { |
392 | GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); |
393 | } |
394 | } |
395 | |
396 | for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
397 | if (0 != fMipmapPrograms[i].fProgram) { |
398 | GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); |
399 | } |
400 | } |
401 | |
402 | fSamplerObjectCache.reset(); |
403 | |
404 | while (!fFinishCallbacks.empty()) { |
405 | fFinishCallbacks.front().fCallback(fFinishCallbacks.front().fContext); |
406 | this->deleteSync(fFinishCallbacks.front().fSync); |
407 | fFinishCallbacks.pop_front(); |
408 | } |
409 | } |
410 | |
411 | void GrGLGpu::disconnect(DisconnectType type) { |
412 | INHERITED::disconnect(type); |
413 | if (DisconnectType::kCleanup == type) { |
414 | if (fHWProgramID) { |
415 | GL_CALL(UseProgram(0)); |
416 | } |
417 | if (fTempSrcFBOID) { |
418 | this->deleteFramebuffer(fTempSrcFBOID); |
419 | } |
420 | if (fTempDstFBOID) { |
421 | this->deleteFramebuffer(fTempDstFBOID); |
422 | } |
423 | if (fStencilClearFBOID) { |
424 | this->deleteFramebuffer(fStencilClearFBOID); |
425 | } |
426 | for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
427 | if (fCopyPrograms[i].fProgram) { |
428 | GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); |
429 | } |
430 | } |
431 | for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
432 | if (fMipmapPrograms[i].fProgram) { |
433 | GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); |
434 | } |
435 | } |
436 | |
437 | if (fSamplerObjectCache) { |
438 | fSamplerObjectCache->release(); |
439 | } |
440 | } else { |
441 | if (fProgramCache) { |
442 | fProgramCache->abandon(); |
443 | } |
444 | if (fSamplerObjectCache) { |
445 | fSamplerObjectCache->abandon(); |
446 | } |
447 | } |
448 | |
449 | fHWProgram.reset(); |
450 | fProgramCache.reset(); |
451 | |
452 | fHWProgramID = 0; |
453 | fTempSrcFBOID = 0; |
454 | fTempDstFBOID = 0; |
455 | fStencilClearFBOID = 0; |
456 | fCopyProgramArrayBuffer.reset(); |
457 | for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
458 | fCopyPrograms[i].fProgram = 0; |
459 | } |
460 | fMipmapProgramArrayBuffer.reset(); |
461 | for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
462 | fMipmapPrograms[i].fProgram = 0; |
463 | } |
464 | |
465 | if (this->glCaps().shaderCaps()->pathRenderingSupport()) { |
466 | this->glPathRendering()->disconnect(type); |
467 | } |
468 | |
469 | while (!fFinishCallbacks.empty()) { |
470 | fFinishCallbacks.front().fCallback(fFinishCallbacks.front().fContext); |
471 | if (DisconnectType::kCleanup == type) { |
472 | this->deleteSync(fFinishCallbacks.front().fSync); |
473 | } |
474 | fFinishCallbacks.pop_front(); |
475 | } |
476 | } |
477 | |
478 | /////////////////////////////////////////////////////////////////////////////// |
479 | |
480 | void GrGLGpu::onResetContext(uint32_t resetBits) { |
481 | if (resetBits & kMisc_GrGLBackendState) { |
482 | // we don't use the zb at all |
483 | GL_CALL(Disable(GR_GL_DEPTH_TEST)); |
484 | GL_CALL(DepthMask(GR_GL_FALSE)); |
485 | |
486 | // We don't use face culling. |
487 | GL_CALL(Disable(GR_GL_CULL_FACE)); |
488 | // We do use separate stencil. Our algorithms don't care which face is front vs. back so |
489 | // just set this to the default for self-consistency. |
490 | GL_CALL(FrontFace(GR_GL_CCW)); |
491 | |
492 | this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate(); |
493 | this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate(); |
494 | |
495 | if (GR_IS_GR_GL(this->glStandard())) { |
496 | #ifndef USE_NSIGHT |
497 | // Desktop-only state that we never change |
498 | if (!this->glCaps().isCoreProfile()) { |
499 | GL_CALL(Disable(GR_GL_POINT_SMOOTH)); |
500 | GL_CALL(Disable(GR_GL_LINE_SMOOTH)); |
501 | GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); |
502 | GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); |
503 | GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); |
504 | GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); |
505 | } |
506 | // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a |
507 | // core profile. This seems like a bug since the core spec removes any mention of |
508 | // GL_ARB_imaging. |
509 | if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { |
510 | GL_CALL(Disable(GR_GL_COLOR_TABLE)); |
511 | } |
512 | GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); |
513 | |
514 | fHWWireframeEnabled = kUnknown_TriState; |
515 | #endif |
516 | // Since ES doesn't support glPointSize at all we always use the VS to |
517 | // set the point size |
518 | GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); |
519 | |
520 | } |
521 | |
522 | if (GR_IS_GR_GL_ES(this->glStandard()) && |
523 | this->glCaps().fbFetchRequiresEnablePerSample()) { |
524 | // The arm extension requires specifically enabling MSAA fetching per sample. |
525 | // On some devices this may have a perf hit. Also multiple render targets are disabled |
526 | GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE)); |
527 | } |
528 | fHWWriteToColor = kUnknown_TriState; |
529 | // we only ever use lines in hairline mode |
530 | GL_CALL(LineWidth(1)); |
531 | GL_CALL(Disable(GR_GL_DITHER)); |
532 | |
533 | fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN; |
534 | } |
535 | |
536 | if (resetBits & kMSAAEnable_GrGLBackendState) { |
537 | fMSAAEnabled = kUnknown_TriState; |
538 | |
539 | if (this->caps()->mixedSamplesSupport()) { |
540 | // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage |
541 | // modulation. This state has no effect when not rendering to a mixed sampled target. |
542 | GL_CALL(CoverageModulation(GR_GL_RGBA)); |
543 | } |
544 | |
545 | fHWConservativeRasterEnabled = kUnknown_TriState; |
546 | } |
547 | |
548 | fHWActiveTextureUnitIdx = -1; // invalid |
549 | fLastPrimitiveType = static_cast<GrPrimitiveType>(-1); |
550 | |
551 | if (resetBits & kTextureBinding_GrGLBackendState) { |
552 | for (int s = 0; s < this->numTextureUnits(); ++s) { |
553 | fHWTextureUnitBindings[s].invalidateAllTargets(false); |
554 | } |
555 | if (fSamplerObjectCache) { |
556 | fSamplerObjectCache->invalidateBindings(); |
557 | } |
558 | } |
559 | |
560 | if (resetBits & kBlend_GrGLBackendState) { |
561 | fHWBlendState.invalidate(); |
562 | } |
563 | |
564 | if (resetBits & kView_GrGLBackendState) { |
565 | fHWScissorSettings.invalidate(); |
566 | fHWWindowRectsState.invalidate(); |
567 | fHWViewport.invalidate(); |
568 | } |
569 | |
570 | if (resetBits & kStencil_GrGLBackendState) { |
571 | fHWStencilSettings.invalidate(); |
572 | fHWStencilTestEnabled = kUnknown_TriState; |
573 | } |
574 | |
575 | // Vertex |
576 | if (resetBits & kVertex_GrGLBackendState) { |
577 | fHWVertexArrayState.invalidate(); |
578 | this->hwBufferState(GrGpuBufferType::kVertex)->invalidate(); |
579 | this->hwBufferState(GrGpuBufferType::kIndex)->invalidate(); |
580 | this->hwBufferState(GrGpuBufferType::kDrawIndirect)->invalidate(); |
581 | fHWPatchVertexCount = 0; |
582 | } |
583 | |
584 | if (resetBits & kRenderTarget_GrGLBackendState) { |
585 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
586 | fHWSRGBFramebuffer = kUnknown_TriState; |
587 | fBoundDrawFramebuffer = 0; |
588 | } |
589 | |
590 | if (resetBits & kPathRendering_GrGLBackendState) { |
591 | if (this->caps()->shaderCaps()->pathRenderingSupport()) { |
592 | this->glPathRendering()->resetContext(); |
593 | } |
594 | } |
595 | |
596 | // we assume these values |
597 | if (resetBits & kPixelStore_GrGLBackendState) { |
598 | if (this->caps()->writePixelsRowBytesSupport()) { |
599 | GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
600 | } |
601 | if (this->glCaps().readPixelsRowBytesSupport()) { |
602 | GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); |
603 | } |
604 | if (this->glCaps().packFlipYSupport()) { |
605 | GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); |
606 | } |
607 | } |
608 | |
609 | if (resetBits & kProgram_GrGLBackendState) { |
610 | fHWProgramID = 0; |
611 | fHWProgram.reset(); |
612 | } |
613 | ++fResetTimestampForTextureParameters; |
614 | } |
615 | |
616 | static bool check_backend_texture(const GrBackendTexture& backendTex, |
617 | const GrGLCaps& caps, |
618 | GrGLTexture::Desc* desc, |
619 | bool skipRectTexSupportCheck = false) { |
620 | GrGLTextureInfo info; |
621 | if (!backendTex.getGLTextureInfo(&info) || !info.fID || !info.fFormat) { |
622 | return false; |
623 | } |
624 | |
625 | desc->fSize = {backendTex.width(), backendTex.height()}; |
626 | desc->fTarget = info.fTarget; |
627 | desc->fID = info.fID; |
628 | desc->fFormat = GrGLFormatFromGLEnum(info.fFormat); |
629 | |
630 | if (desc->fFormat == GrGLFormat::kUnknown) { |
631 | return false; |
632 | } |
633 | if (GR_GL_TEXTURE_EXTERNAL == desc->fTarget) { |
634 | if (!caps.shaderCaps()->externalTextureSupport()) { |
635 | return false; |
636 | } |
637 | } else if (GR_GL_TEXTURE_RECTANGLE == desc->fTarget) { |
638 | if (!caps.rectangleTextureSupport() && !skipRectTexSupportCheck) { |
639 | return false; |
640 | } |
641 | } else if (GR_GL_TEXTURE_2D != desc->fTarget) { |
642 | return false; |
643 | } |
644 | if (backendTex.isProtected()) { |
645 | // Not supported in GL backend at this time. |
646 | return false; |
647 | } |
648 | |
649 | return true; |
650 | } |
651 | |
652 | sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex, |
653 | GrWrapOwnership ownership, |
654 | GrWrapCacheable cacheable, |
655 | GrIOType ioType) { |
656 | GrGLTexture::Desc desc; |
657 | if (!check_backend_texture(backendTex, this->glCaps(), &desc)) { |
658 | return nullptr; |
659 | } |
660 | |
661 | if (kBorrow_GrWrapOwnership == ownership) { |
662 | desc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
663 | } else { |
664 | desc.fOwnership = GrBackendObjectOwnership::kOwned; |
665 | } |
666 | |
667 | GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kValid |
668 | : GrMipMapsStatus::kNotAllocated; |
669 | |
670 | auto texture = GrGLTexture::MakeWrapped(this, mipMapsStatus, desc, |
671 | backendTex.getGLTextureParams(), cacheable, ioType); |
672 | // We don't know what parameters are already set on wrapped textures. |
673 | texture->textureParamsModified(); |
674 | return std::move(texture); |
675 | } |
676 | |
677 | static bool check_compressed_backend_texture(const GrBackendTexture& backendTex, |
678 | const GrGLCaps& caps, GrGLTexture::Desc* desc, |
679 | bool skipRectTexSupportCheck = false) { |
680 | GrGLTextureInfo info; |
681 | if (!backendTex.getGLTextureInfo(&info) || !info.fID || !info.fFormat) { |
682 | return false; |
683 | } |
684 | |
685 | desc->fSize = {backendTex.width(), backendTex.height()}; |
686 | desc->fTarget = info.fTarget; |
687 | desc->fID = info.fID; |
688 | desc->fFormat = GrGLFormatFromGLEnum(info.fFormat); |
689 | |
690 | if (desc->fFormat == GrGLFormat::kUnknown) { |
691 | return false; |
692 | } |
693 | |
694 | if (GR_GL_TEXTURE_2D != desc->fTarget) { |
695 | return false; |
696 | } |
697 | if (backendTex.isProtected()) { |
698 | // Not supported in GL backend at this time. |
699 | return false; |
700 | } |
701 | |
702 | return true; |
703 | } |
704 | |
705 | sk_sp<GrTexture> GrGLGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex, |
706 | GrWrapOwnership ownership, |
707 | GrWrapCacheable cacheable) { |
708 | GrGLTexture::Desc desc; |
709 | if (!check_compressed_backend_texture(backendTex, this->glCaps(), &desc)) { |
710 | return nullptr; |
711 | } |
712 | |
713 | if (kBorrow_GrWrapOwnership == ownership) { |
714 | desc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
715 | } else { |
716 | desc.fOwnership = GrBackendObjectOwnership::kOwned; |
717 | } |
718 | |
719 | GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kValid |
720 | : GrMipMapsStatus::kNotAllocated; |
721 | |
722 | auto texture = GrGLTexture::MakeWrapped(this, mipMapsStatus, desc, |
723 | backendTex.getGLTextureParams(), cacheable, |
724 | kRead_GrIOType); |
725 | // We don't know what parameters are already set on wrapped textures. |
726 | texture->textureParamsModified(); |
727 | return std::move(texture); |
728 | } |
729 | |
730 | sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex, |
731 | int sampleCnt, |
732 | GrWrapOwnership ownership, |
733 | GrWrapCacheable cacheable) { |
734 | const GrGLCaps& caps = this->glCaps(); |
735 | |
736 | GrGLTexture::Desc desc; |
737 | if (!check_backend_texture(backendTex, this->glCaps(), &desc)) { |
738 | return nullptr; |
739 | } |
740 | SkASSERT(caps.isFormatRenderable(desc.fFormat, sampleCnt)); |
741 | SkASSERT(caps.isFormatTexturable(desc.fFormat)); |
742 | |
743 | // We don't support rendering to a EXTERNAL texture. |
744 | if (GR_GL_TEXTURE_EXTERNAL == desc.fTarget) { |
745 | return nullptr; |
746 | } |
747 | |
748 | if (kBorrow_GrWrapOwnership == ownership) { |
749 | desc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
750 | } else { |
751 | desc.fOwnership = GrBackendObjectOwnership::kOwned; |
752 | } |
753 | |
754 | |
755 | sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, desc.fFormat); |
756 | SkASSERT(sampleCnt); |
757 | |
758 | GrGLRenderTarget::IDs rtIDs; |
759 | if (!this->createRenderTargetObjects(desc, sampleCnt, &rtIDs)) { |
760 | return nullptr; |
761 | } |
762 | |
763 | GrMipMapsStatus mipMapsStatus = backendTex.hasMipMaps() ? GrMipMapsStatus::kDirty |
764 | : GrMipMapsStatus::kNotAllocated; |
765 | |
766 | sk_sp<GrGLTextureRenderTarget> texRT(GrGLTextureRenderTarget::MakeWrapped( |
767 | this, sampleCnt, desc, backendTex.getGLTextureParams(), rtIDs, cacheable, |
768 | mipMapsStatus)); |
769 | texRT->baseLevelWasBoundToFBO(); |
770 | // We don't know what parameters are already set on wrapped textures. |
771 | texRT->textureParamsModified(); |
772 | return std::move(texRT); |
773 | } |
774 | |
775 | sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) { |
776 | GrGLFramebufferInfo info; |
777 | if (!backendRT.getGLFramebufferInfo(&info)) { |
778 | return nullptr; |
779 | } |
780 | |
781 | if (backendRT.isProtected()) { |
782 | // Not supported in GL at this time. |
783 | return nullptr; |
784 | } |
785 | |
786 | const auto format = backendRT.getBackendFormat().asGLFormat(); |
787 | if (!this->glCaps().isFormatRenderable(format, backendRT.sampleCnt())) { |
788 | return nullptr; |
789 | } |
790 | |
791 | GrGLRenderTarget::IDs rtIDs; |
792 | rtIDs.fRTFBOID = info.fFBOID; |
793 | rtIDs.fMSColorRenderbufferID = 0; |
794 | rtIDs.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; |
795 | rtIDs.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed; |
796 | |
797 | int sampleCount = this->glCaps().getRenderTargetSampleCount(backendRT.sampleCnt(), format); |
798 | |
799 | return GrGLRenderTarget::MakeWrapped(this, backendRT.dimensions(), format, sampleCount, rtIDs, |
800 | backendRT.stencilBits()); |
801 | } |
802 | |
803 | sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex, |
804 | int sampleCnt) { |
805 | GrGLTexture::Desc desc; |
806 | // We do not check whether texture rectangle is supported by Skia - if the caller provided us |
807 | // with a texture rectangle,we assume the necessary support exists. |
808 | if (!check_backend_texture(tex, this->glCaps(), &desc, true)) { |
809 | return nullptr; |
810 | } |
811 | |
812 | if (!this->glCaps().isFormatRenderable(desc.fFormat, sampleCnt)) { |
813 | return nullptr; |
814 | } |
815 | |
816 | const int sampleCount = this->glCaps().getRenderTargetSampleCount(sampleCnt, desc.fFormat); |
817 | GrGLRenderTarget::IDs rtIDs; |
818 | if (!this->createRenderTargetObjects(desc, sampleCount, &rtIDs)) { |
819 | return nullptr; |
820 | } |
821 | return GrGLRenderTarget::MakeWrapped(this, desc.fSize, desc.fFormat, sampleCount, rtIDs, 0); |
822 | } |
823 | |
824 | static bool check_write_and_transfer_input(GrGLTexture* glTex) { |
825 | if (!glTex) { |
826 | return false; |
827 | } |
828 | |
829 | // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures |
830 | if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) { |
831 | return false; |
832 | } |
833 | |
834 | return true; |
835 | } |
836 | |
837 | bool GrGLGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height, |
838 | GrColorType surfaceColorType, GrColorType srcColorType, |
839 | const GrMipLevel texels[], int mipLevelCount, |
840 | bool prepForTexSampling) { |
841 | auto glTex = static_cast<GrGLTexture*>(surface->asTexture()); |
842 | |
843 | if (!check_write_and_transfer_input(glTex)) { |
844 | return false; |
845 | } |
846 | |
847 | this->bindTextureToScratchUnit(glTex->target(), glTex->textureID()); |
848 | |
849 | SkASSERT(!GrGLFormatIsCompressed(glTex->format())); |
850 | return this->uploadTexData(glTex->format(), surfaceColorType, glTex->width(), glTex->height(), |
851 | glTex->target(), left, top, width, height, srcColorType, texels, |
852 | mipLevelCount); |
853 | } |
854 | |
855 | bool GrGLGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height, |
856 | GrColorType textureColorType, GrColorType bufferColorType, |
857 | GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) { |
858 | GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); |
859 | |
860 | // Can't transfer compressed data |
861 | SkASSERT(!GrGLFormatIsCompressed(glTex->format())); |
862 | |
863 | if (!check_write_and_transfer_input(glTex)) { |
864 | return false; |
865 | } |
866 | |
867 | static_assert(sizeof(int) == sizeof(int32_t), "" ); |
868 | if (width <= 0 || height <= 0) { |
869 | return false; |
870 | } |
871 | |
872 | this->bindTextureToScratchUnit(glTex->target(), glTex->textureID()); |
873 | |
874 | SkASSERT(!transferBuffer->isMapped()); |
875 | SkASSERT(!transferBuffer->isCpuBuffer()); |
876 | const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer); |
877 | this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer); |
878 | |
879 | SkDEBUGCODE( |
880 | SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
881 | SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height()); |
882 | SkASSERT(bounds.contains(subRect)); |
883 | ) |
884 | |
885 | size_t bpp = GrColorTypeBytesPerPixel(bufferColorType); |
886 | const size_t trimRowBytes = width * bpp; |
887 | const void* pixels = (void*)offset; |
888 | if (width < 0 || height < 0) { |
889 | return false; |
890 | } |
891 | |
892 | bool restoreGLRowLength = false; |
893 | if (trimRowBytes != rowBytes) { |
894 | // we should have checked for this support already |
895 | SkASSERT(this->glCaps().writePixelsRowBytesSupport()); |
896 | GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp)); |
897 | restoreGLRowLength = true; |
898 | } |
899 | |
900 | GrGLFormat textureFormat = glTex->format(); |
901 | // External format and type come from the upload data. |
902 | GrGLenum externalFormat = 0; |
903 | GrGLenum externalType = 0; |
904 | this->glCaps().getTexSubImageExternalFormatAndType( |
905 | textureFormat, textureColorType, bufferColorType, &externalFormat, &externalType); |
906 | if (!externalFormat || !externalType) { |
907 | return false; |
908 | } |
909 | |
910 | GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1)); |
911 | GL_CALL(TexSubImage2D(glTex->target(), |
912 | 0, |
913 | left, top, |
914 | width, |
915 | height, |
916 | externalFormat, externalType, |
917 | pixels)); |
918 | |
919 | if (restoreGLRowLength) { |
920 | GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
921 | } |
922 | |
923 | return true; |
924 | } |
925 | |
926 | bool GrGLGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, |
927 | GrColorType surfaceColorType, GrColorType dstColorType, |
928 | GrGpuBuffer* transferBuffer, size_t offset) { |
929 | auto* glBuffer = static_cast<GrGLBuffer*>(transferBuffer); |
930 | this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glBuffer); |
931 | auto offsetAsPtr = reinterpret_cast<void*>(offset); |
932 | return this->readOrTransferPixelsFrom(surface, left, top, width, height, surfaceColorType, |
933 | dstColorType, offsetAsPtr, width); |
934 | } |
935 | |
936 | void GrGLGpu::unbindCpuToGpuXferBuffer() { |
937 | auto* xferBufferState = this->hwBufferState(GrGpuBufferType::kXferCpuToGpu); |
938 | if (!xferBufferState->fBoundBufferUniqueID.isInvalid()) { |
939 | GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0)); |
940 | xferBufferState->invalidate(); |
941 | } |
942 | } |
943 | |
944 | bool GrGLGpu::uploadTexData(GrGLFormat textureFormat, GrColorType textureColorType, int texWidth, |
945 | int texHeight, GrGLenum target, int left, int top, int width, |
946 | int height, GrColorType srcColorType, const GrMipLevel texels[], |
947 | int mipLevelCount, GrMipMapsStatus* mipMapsStatus) { |
948 | // If we're uploading compressed data then we should be using uploadCompressedTexData |
949 | SkASSERT(!GrGLFormatIsCompressed(textureFormat)); |
950 | |
951 | SkASSERT(this->glCaps().isFormatTexturable(textureFormat)); |
952 | SkDEBUGCODE( |
953 | SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
954 | SkIRect bounds = SkIRect::MakeWH(texWidth, texHeight); |
955 | SkASSERT(bounds.contains(subRect)); |
956 | ) |
957 | SkASSERT(1 == mipLevelCount || |
958 | (0 == left && 0 == top && width == texWidth && height == texHeight)); |
959 | |
960 | this->unbindCpuToGpuXferBuffer(); |
961 | |
962 | const GrGLInterface* interface = this->glInterface(); |
963 | const GrGLCaps& caps = this->glCaps(); |
964 | |
965 | size_t bpp = GrColorTypeBytesPerPixel(srcColorType); |
966 | |
967 | if (width == 0 || height == 0) { |
968 | return false; |
969 | } |
970 | |
971 | // External format and type come from the upload data. |
972 | GrGLenum externalFormat; |
973 | GrGLenum externalType; |
974 | this->glCaps().getTexSubImageExternalFormatAndType( |
975 | textureFormat, textureColorType, srcColorType, &externalFormat, &externalType); |
976 | if (!externalFormat || !externalType) { |
977 | return false; |
978 | } |
979 | |
980 | /* |
981 | * Check whether to allocate a temporary buffer for flipping y or |
982 | * because our srcData has extra bytes past each row. If so, we need |
983 | * to trim those off here, since GL ES may not let us specify |
984 | * GL_UNPACK_ROW_LENGTH. |
985 | */ |
986 | bool restoreGLRowLength = false; |
987 | |
988 | if (mipMapsStatus) { |
989 | *mipMapsStatus = (mipLevelCount > 1) ? |
990 | GrMipMapsStatus::kValid : GrMipMapsStatus::kNotAllocated; |
991 | } |
992 | |
993 | GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1)); |
994 | |
995 | for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; currentMipLevel++) { |
996 | if (!texels[currentMipLevel].fPixels) { |
997 | if (mipMapsStatus) { |
998 | *mipMapsStatus = GrMipMapsStatus::kDirty; |
999 | } |
1000 | continue; |
1001 | } |
1002 | int twoToTheMipLevel = 1 << currentMipLevel; |
1003 | const int currentWidth = std::max(1, width / twoToTheMipLevel); |
1004 | const int currentHeight = std::max(1, height / twoToTheMipLevel); |
1005 | const size_t trimRowBytes = currentWidth * bpp; |
1006 | const size_t rowBytes = texels[currentMipLevel].fRowBytes; |
1007 | |
1008 | if (caps.writePixelsRowBytesSupport() && (rowBytes != trimRowBytes || restoreGLRowLength)) { |
1009 | GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); |
1010 | GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); |
1011 | restoreGLRowLength = true; |
1012 | } |
1013 | |
1014 | GL_CALL(TexSubImage2D(target, currentMipLevel, left, top, currentWidth, currentHeight, |
1015 | externalFormat, externalType, texels[currentMipLevel].fPixels)); |
1016 | } |
1017 | if (restoreGLRowLength) { |
1018 | SkASSERT(caps.writePixelsRowBytesSupport()); |
1019 | GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
1020 | } |
1021 | return true; |
1022 | } |
1023 | |
1024 | bool GrGLGpu::uploadCompressedTexData(GrGLFormat format, |
1025 | SkISize dimensions, |
1026 | GrMipMapped mipMapped, |
1027 | GrGLenum target, |
1028 | const void* data, size_t dataSize) { |
1029 | SkASSERT(format != GrGLFormat::kUnknown); |
1030 | const GrGLCaps& caps = this->glCaps(); |
1031 | |
1032 | // We only need the internal format for compressed 2D textures. |
1033 | GrGLenum internalFormat = caps.getTexImageOrStorageInternalFormat(format); |
1034 | if (!internalFormat) { |
1035 | return false; |
1036 | } |
1037 | |
1038 | SkImage::CompressionType compressionType = GrGLFormatToCompressionType(format); |
1039 | SkASSERT(compressionType != SkImage::CompressionType::kNone); |
1040 | |
1041 | bool useTexStorage = caps.formatSupportsTexStorage(format); |
1042 | |
1043 | int numMipLevels = 1; |
1044 | if (mipMapped == GrMipMapped::kYes) { |
1045 | numMipLevels = SkMipMap::ComputeLevelCount(dimensions.width(), dimensions.height())+1; |
1046 | } |
1047 | |
1048 | // TODO: Make sure that the width and height that we pass to OpenGL |
1049 | // is a multiple of the block size. |
1050 | |
1051 | if (useTexStorage) { |
1052 | // We never resize or change formats of textures. |
1053 | GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, numMipLevels, internalFormat, |
1054 | dimensions.width(), dimensions.height())); |
1055 | if (error != GR_GL_NO_ERROR) { |
1056 | return false; |
1057 | } |
1058 | |
1059 | size_t offset = 0; |
1060 | for (int level = 0; level < numMipLevels; ++level) { |
1061 | |
1062 | size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions, |
1063 | nullptr, false); |
1064 | |
1065 | error = GL_ALLOC_CALL(CompressedTexSubImage2D(target, |
1066 | level, |
1067 | 0, // left |
1068 | 0, // top |
1069 | dimensions.width(), |
1070 | dimensions.height(), |
1071 | internalFormat, |
1072 | SkToInt(levelDataSize), |
1073 | &((char*)data)[offset])); |
1074 | |
1075 | if (error != GR_GL_NO_ERROR) { |
1076 | return false; |
1077 | } |
1078 | |
1079 | offset += levelDataSize; |
1080 | dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)}; |
1081 | } |
1082 | } else { |
1083 | size_t offset = 0; |
1084 | |
1085 | for (int level = 0; level < numMipLevels; ++level) { |
1086 | size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions, |
1087 | nullptr, false); |
1088 | |
1089 | const char* rawLevelData = &((char*)data)[offset]; |
1090 | GrGLenum error = GL_ALLOC_CALL(CompressedTexImage2D(target, |
1091 | level, |
1092 | internalFormat, |
1093 | dimensions.width(), |
1094 | dimensions.height(), |
1095 | 0, // border |
1096 | SkToInt(levelDataSize), |
1097 | rawLevelData)); |
1098 | |
1099 | if (error != GR_GL_NO_ERROR) { |
1100 | return false; |
1101 | } |
1102 | |
1103 | offset += levelDataSize; |
1104 | dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)}; |
1105 | } |
1106 | } |
1107 | return true; |
1108 | } |
1109 | |
1110 | bool GrGLGpu::renderbufferStorageMSAA(const GrGLContext& ctx, int sampleCount, GrGLenum format, |
1111 | int width, int height) { |
1112 | SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); |
1113 | GrGLenum error; |
1114 | switch (ctx.caps()->msFBOType()) { |
1115 | case GrGLCaps::kStandard_MSFBOType: |
1116 | error = GL_ALLOC_CALL(RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, sampleCount, |
1117 | format, width, height)); |
1118 | break; |
1119 | case GrGLCaps::kES_Apple_MSFBOType: |
1120 | error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2APPLE( |
1121 | GR_GL_RENDERBUFFER, sampleCount, format, width, height)); |
1122 | break; |
1123 | case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: |
1124 | case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: |
1125 | error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2EXT( |
1126 | GR_GL_RENDERBUFFER, sampleCount, format, width, height)); |
1127 | break; |
1128 | case GrGLCaps::kNone_MSFBOType: |
1129 | SkUNREACHABLE; |
1130 | break; |
1131 | } |
1132 | return error == GR_GL_NO_ERROR; |
1133 | } |
1134 | |
1135 | bool GrGLGpu::createRenderTargetObjects(const GrGLTexture::Desc& desc, |
1136 | int sampleCount, |
1137 | GrGLRenderTarget::IDs* rtIDs) { |
1138 | rtIDs->fMSColorRenderbufferID = 0; |
1139 | rtIDs->fRTFBOID = 0; |
1140 | rtIDs->fRTFBOOwnership = GrBackendObjectOwnership::kOwned; |
1141 | rtIDs->fTexFBOID = 0; |
1142 | |
1143 | GrGLenum colorRenderbufferFormat = 0; // suppress warning |
1144 | |
1145 | if (desc.fFormat == GrGLFormat::kUnknown) { |
1146 | goto FAILED; |
1147 | } |
1148 | |
1149 | if (sampleCount > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { |
1150 | goto FAILED; |
1151 | } |
1152 | |
1153 | GL_CALL(GenFramebuffers(1, &rtIDs->fTexFBOID)); |
1154 | if (!rtIDs->fTexFBOID) { |
1155 | goto FAILED; |
1156 | } |
1157 | |
1158 | // If we are using multisampling we will create two FBOS. We render to one and then resolve to |
1159 | // the texture bound to the other. The exception is the IMG multisample extension. With this |
1160 | // extension the texture is multisampled when rendered to and then auto-resolves it when it is |
1161 | // rendered from. |
1162 | if (sampleCount > 1 && this->glCaps().usesMSAARenderBuffers()) { |
1163 | GL_CALL(GenFramebuffers(1, &rtIDs->fRTFBOID)); |
1164 | GL_CALL(GenRenderbuffers(1, &rtIDs->fMSColorRenderbufferID)); |
1165 | if (!rtIDs->fRTFBOID || !rtIDs->fMSColorRenderbufferID) { |
1166 | goto FAILED; |
1167 | } |
1168 | colorRenderbufferFormat = this->glCaps().getRenderbufferInternalFormat(desc.fFormat); |
1169 | } else { |
1170 | rtIDs->fRTFBOID = rtIDs->fTexFBOID; |
1171 | } |
1172 | |
1173 | // below here we may bind the FBO |
1174 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
1175 | if (rtIDs->fRTFBOID != rtIDs->fTexFBOID) { |
1176 | SkASSERT(sampleCount > 1); |
1177 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, rtIDs->fMSColorRenderbufferID)); |
1178 | if (!this->renderbufferStorageMSAA(*fGLContext, sampleCount, colorRenderbufferFormat, |
1179 | desc.fSize.width(), desc.fSize.height())) { |
1180 | goto FAILED; |
1181 | } |
1182 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fRTFBOID); |
1183 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1184 | GR_GL_COLOR_ATTACHMENT0, |
1185 | GR_GL_RENDERBUFFER, |
1186 | rtIDs->fMSColorRenderbufferID)); |
1187 | } |
1188 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fTexFBOID); |
1189 | |
1190 | if (this->glCaps().usesImplicitMSAAResolve() && sampleCount > 1) { |
1191 | GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, |
1192 | GR_GL_COLOR_ATTACHMENT0, |
1193 | desc.fTarget, |
1194 | desc.fID, |
1195 | 0, |
1196 | sampleCount)); |
1197 | } else { |
1198 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, |
1199 | GR_GL_COLOR_ATTACHMENT0, |
1200 | desc.fTarget, |
1201 | desc.fID, |
1202 | 0)); |
1203 | } |
1204 | |
1205 | return true; |
1206 | |
1207 | FAILED: |
1208 | if (rtIDs->fMSColorRenderbufferID) { |
1209 | GL_CALL(DeleteRenderbuffers(1, &rtIDs->fMSColorRenderbufferID)); |
1210 | } |
1211 | if (rtIDs->fRTFBOID != rtIDs->fTexFBOID) { |
1212 | this->deleteFramebuffer(rtIDs->fRTFBOID); |
1213 | } |
1214 | if (rtIDs->fTexFBOID) { |
1215 | this->deleteFramebuffer(rtIDs->fTexFBOID); |
1216 | } |
1217 | return false; |
1218 | } |
1219 | |
1220 | // good to set a break-point here to know when createTexture fails |
1221 | static sk_sp<GrTexture> return_null_texture() { |
1222 | // SkDEBUGFAIL("null texture"); |
1223 | return nullptr; |
1224 | } |
1225 | |
1226 | static GrGLTextureParameters::SamplerOverriddenState set_initial_texture_params( |
1227 | const GrGLInterface* interface, GrGLenum target) { |
1228 | // Some drivers like to know filter/wrap before seeing glTexImage2D. Some |
1229 | // drivers have a bug where an FBO won't be complete if it includes a |
1230 | // texture that is not mipmap complete (considering the filter in use). |
1231 | GrGLTextureParameters::SamplerOverriddenState state; |
1232 | state.fMinFilter = GR_GL_NEAREST; |
1233 | state.fMagFilter = GR_GL_NEAREST; |
1234 | state.fWrapS = GR_GL_CLAMP_TO_EDGE; |
1235 | state.fWrapT = GR_GL_CLAMP_TO_EDGE; |
1236 | GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, state.fMagFilter)); |
1237 | GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, state.fMinFilter)); |
1238 | GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_S, state.fWrapS)); |
1239 | GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_T, state.fWrapT)); |
1240 | return state; |
1241 | } |
1242 | |
1243 | sk_sp<GrTexture> GrGLGpu::onCreateTexture(SkISize dimensions, |
1244 | const GrBackendFormat& format, |
1245 | GrRenderable renderable, |
1246 | int renderTargetSampleCnt, |
1247 | SkBudgeted budgeted, |
1248 | GrProtected isProtected, |
1249 | int mipLevelCount, |
1250 | uint32_t levelClearMask) { |
1251 | // We don't support protected textures in GL. |
1252 | if (isProtected == GrProtected::kYes) { |
1253 | return nullptr; |
1254 | } |
1255 | SkASSERT(GrGLCaps::kNone_MSFBOType != this->glCaps().msFBOType() || renderTargetSampleCnt == 1); |
1256 | |
1257 | SkASSERT(mipLevelCount > 0); |
1258 | GrMipMapsStatus mipMapsStatus = |
1259 | mipLevelCount > 1 ? GrMipMapsStatus::kDirty : GrMipMapsStatus::kNotAllocated; |
1260 | GrGLTextureParameters::SamplerOverriddenState initialState; |
1261 | GrGLTexture::Desc texDesc; |
1262 | texDesc.fSize = dimensions; |
1263 | texDesc.fTarget = GR_GL_TEXTURE_2D; |
1264 | texDesc.fFormat = format.asGLFormat(); |
1265 | texDesc.fOwnership = GrBackendObjectOwnership::kOwned; |
1266 | SkASSERT(texDesc.fFormat != GrGLFormat::kUnknown); |
1267 | SkASSERT(!GrGLFormatIsCompressed(texDesc.fFormat)); |
1268 | |
1269 | texDesc.fID = this->createTexture2D(dimensions, texDesc.fFormat, renderable, &initialState, |
1270 | mipLevelCount); |
1271 | |
1272 | if (!texDesc.fID) { |
1273 | return return_null_texture(); |
1274 | } |
1275 | |
1276 | sk_sp<GrGLTexture> tex; |
1277 | if (renderable == GrRenderable::kYes) { |
1278 | // unbind the texture from the texture unit before binding it to the frame buffer |
1279 | GL_CALL(BindTexture(texDesc.fTarget, 0)); |
1280 | GrGLRenderTarget::IDs rtIDDesc; |
1281 | |
1282 | if (!this->createRenderTargetObjects(texDesc, renderTargetSampleCnt, &rtIDDesc)) { |
1283 | GL_CALL(DeleteTextures(1, &texDesc.fID)); |
1284 | return return_null_texture(); |
1285 | } |
1286 | tex = sk_make_sp<GrGLTextureRenderTarget>( |
1287 | this, budgeted, renderTargetSampleCnt, texDesc, rtIDDesc, mipMapsStatus); |
1288 | tex->baseLevelWasBoundToFBO(); |
1289 | } else { |
1290 | tex = sk_make_sp<GrGLTexture>(this, budgeted, texDesc, mipMapsStatus); |
1291 | } |
1292 | // The non-sampler params are still at their default values. |
1293 | tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(), |
1294 | fResetTimestampForTextureParameters); |
1295 | if (levelClearMask) { |
1296 | GrGLenum externalFormat, externalType; |
1297 | GrColorType colorType; |
1298 | this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(texDesc.fFormat, &externalFormat, |
1299 | &externalType, &colorType); |
1300 | if (this->glCaps().clearTextureSupport()) { |
1301 | for (int i = 0; i < mipLevelCount; ++i) { |
1302 | if (levelClearMask & (1U << i)) { |
1303 | GL_CALL(ClearTexImage(tex->textureID(), i, externalFormat, externalType, |
1304 | nullptr)); |
1305 | } |
1306 | } |
1307 | } else if (this->glCaps().canFormatBeFBOColorAttachment(format.asGLFormat()) && |
1308 | !this->glCaps().performColorClearsAsDraws()) { |
1309 | this->flushScissorTest(GrScissorTest::kDisabled); |
1310 | this->disableWindowRectangles(); |
1311 | this->flushColorWrite(true); |
1312 | this->flushClearColor(SK_PMColor4fTRANSPARENT); |
1313 | for (int i = 0; i < mipLevelCount; ++i) { |
1314 | if (levelClearMask & (1U << i)) { |
1315 | this->bindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER, |
1316 | kDst_TempFBOTarget); |
1317 | GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); |
1318 | this->unbindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER); |
1319 | } |
1320 | } |
1321 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
1322 | } else { |
1323 | std::unique_ptr<char[]> zeros; |
1324 | GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1)); |
1325 | for (int i = 0; i < mipLevelCount; ++i) { |
1326 | if (levelClearMask & (1U << i)) { |
1327 | int levelWidth = std::max(1, texDesc.fSize.width() >> i); |
1328 | int levelHeight = std::max(1, texDesc.fSize.height() >> i); |
1329 | // Levels only get smaller as we proceed. Once we create a zeros use it for all |
1330 | // smaller levels that need clearing. |
1331 | if (!zeros) { |
1332 | size_t bpp = GrColorTypeBytesPerPixel(colorType); |
1333 | size_t size = levelWidth * levelHeight * bpp; |
1334 | zeros.reset(new char[size]()); |
1335 | } |
1336 | this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, tex->textureID()); |
1337 | GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, i, 0, 0, levelWidth, levelHeight, |
1338 | externalFormat, externalType, zeros.get())); |
1339 | } |
1340 | } |
1341 | } |
1342 | } |
1343 | return std::move(tex); |
1344 | } |
1345 | |
1346 | sk_sp<GrTexture> GrGLGpu::onCreateCompressedTexture(SkISize dimensions, |
1347 | const GrBackendFormat& format, |
1348 | SkBudgeted budgeted, |
1349 | GrMipMapped mipMapped, |
1350 | GrProtected isProtected, |
1351 | const void* data, size_t dataSize) { |
1352 | // We don't support protected textures in GL. |
1353 | if (isProtected == GrProtected::kYes) { |
1354 | return nullptr; |
1355 | } |
1356 | GrGLTextureParameters::SamplerOverriddenState initialState; |
1357 | GrGLTexture::Desc desc; |
1358 | desc.fSize = dimensions; |
1359 | desc.fTarget = GR_GL_TEXTURE_2D; |
1360 | desc.fOwnership = GrBackendObjectOwnership::kOwned; |
1361 | desc.fFormat = format.asGLFormat(); |
1362 | desc.fID = this->createCompressedTexture2D(desc.fSize, desc.fFormat, |
1363 | mipMapped, &initialState, |
1364 | data, dataSize); |
1365 | if (!desc.fID) { |
1366 | return nullptr; |
1367 | } |
1368 | |
1369 | // Unbind this texture from the scratch texture unit. |
1370 | this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0); |
1371 | |
1372 | GrMipMapsStatus mipMapsStatus = mipMapped == GrMipMapped::kYes |
1373 | ? GrMipMapsStatus::kValid |
1374 | : GrMipMapsStatus::kNotAllocated; |
1375 | |
1376 | auto tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, mipMapsStatus); |
1377 | // The non-sampler params are still at their default values. |
1378 | tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(), |
1379 | fResetTimestampForTextureParameters); |
1380 | return std::move(tex); |
1381 | } |
1382 | |
1383 | GrBackendTexture GrGLGpu::onCreateCompressedBackendTexture(SkISize dimensions, |
1384 | const GrBackendFormat& format, |
1385 | GrMipMapped mipMapped, |
1386 | GrProtected isProtected, |
1387 | const BackendTextureData* data) { |
1388 | // We don't support protected textures in GL. |
1389 | if (isProtected == GrProtected::kYes) { |
1390 | return {}; |
1391 | } |
1392 | |
1393 | this->handleDirtyContext(); |
1394 | |
1395 | GrGLFormat glFormat = format.asGLFormat(); |
1396 | if (glFormat == GrGLFormat::kUnknown) { |
1397 | return {}; |
1398 | } |
1399 | |
1400 | const char* rawData = nullptr; |
1401 | size_t rawDataSize = 0; |
1402 | SkAutoMalloc am; |
1403 | |
1404 | SkASSERT(!data || data->type() != BackendTextureData::Type::kPixmaps); |
1405 | if (data && data->type() == BackendTextureData::Type::kCompressed) { |
1406 | rawData = (const char*) data->compressedData(); |
1407 | rawDataSize = data->compressedSize(); |
1408 | } else if (data && data->type() == BackendTextureData::Type::kColor) { |
1409 | SkImage::CompressionType compression = GrGLFormatToCompressionType(glFormat); |
1410 | SkASSERT(compression != SkImage::CompressionType::kNone); |
1411 | |
1412 | rawDataSize = SkCompressedDataSize(compression, dimensions, nullptr, |
1413 | mipMapped == GrMipMapped::kYes); |
1414 | |
1415 | am.reset(rawDataSize); |
1416 | |
1417 | GrFillInCompressedData(compression, dimensions, mipMapped, (char*)am.get(), data->color()); |
1418 | |
1419 | rawData = (const char*) am.get(); |
1420 | } |
1421 | |
1422 | GrGLTextureInfo info; |
1423 | GrGLTextureParameters::SamplerOverriddenState initialState; |
1424 | |
1425 | info.fTarget = GR_GL_TEXTURE_2D; |
1426 | info.fFormat = GrGLFormatToEnum(glFormat); |
1427 | info.fID = this->createCompressedTexture2D(dimensions, glFormat, |
1428 | mipMapped, &initialState, |
1429 | rawData, rawDataSize); |
1430 | if (!info.fID) { |
1431 | return {}; |
1432 | } |
1433 | |
1434 | // Unbind this texture from the scratch texture unit. |
1435 | this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0); |
1436 | |
1437 | auto parameters = sk_make_sp<GrGLTextureParameters>(); |
1438 | // The non-sampler params are still at their default values. |
1439 | parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(), |
1440 | fResetTimestampForTextureParameters); |
1441 | |
1442 | return GrBackendTexture(dimensions.width(), dimensions.height(), mipMapped, info, |
1443 | std::move(parameters)); |
1444 | } |
1445 | |
1446 | namespace { |
1447 | |
1448 | const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount; |
1449 | |
1450 | void inline get_stencil_rb_sizes(const GrGLInterface* gl, |
1451 | GrGLStencilAttachment::Format* format) { |
1452 | |
1453 | // we shouldn't ever know one size and not the other |
1454 | SkASSERT((kUnknownBitCount == format->fStencilBits) == |
1455 | (kUnknownBitCount == format->fTotalBits)); |
1456 | if (kUnknownBitCount == format->fStencilBits) { |
1457 | GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, |
1458 | GR_GL_RENDERBUFFER_STENCIL_SIZE, |
1459 | (GrGLint*)&format->fStencilBits); |
1460 | if (format->fPacked) { |
1461 | GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, |
1462 | GR_GL_RENDERBUFFER_DEPTH_SIZE, |
1463 | (GrGLint*)&format->fTotalBits); |
1464 | format->fTotalBits += format->fStencilBits; |
1465 | } else { |
1466 | format->fTotalBits = format->fStencilBits; |
1467 | } |
1468 | } |
1469 | } |
1470 | } |
1471 | |
1472 | int GrGLGpu::getCompatibleStencilIndex(GrGLFormat format) { |
1473 | static const int kSize = 16; |
1474 | SkASSERT(this->glCaps().canFormatBeFBOColorAttachment(format)); |
1475 | |
1476 | if (!this->glCaps().hasStencilFormatBeenDeterminedForFormat(format)) { |
1477 | // Default to unsupported, set this if we find a stencil format that works. |
1478 | int firstWorkingStencilFormatIndex = -1; |
1479 | |
1480 | GrGLuint colorID = |
1481 | this->createTexture2D({kSize, kSize}, format, GrRenderable::kYes, nullptr, 1); |
1482 | if (!colorID) { |
1483 | return -1; |
1484 | } |
1485 | // unbind the texture from the texture unit before binding it to the frame buffer |
1486 | GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); |
1487 | |
1488 | // Create Framebuffer |
1489 | GrGLuint fb = 0; |
1490 | GL_CALL(GenFramebuffers(1, &fb)); |
1491 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb); |
1492 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
1493 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, |
1494 | GR_GL_COLOR_ATTACHMENT0, |
1495 | GR_GL_TEXTURE_2D, |
1496 | colorID, |
1497 | 0)); |
1498 | GrGLuint sbRBID = 0; |
1499 | GL_CALL(GenRenderbuffers(1, &sbRBID)); |
1500 | |
1501 | // look over formats till I find a compatible one |
1502 | int stencilFmtCnt = this->glCaps().stencilFormats().count(); |
1503 | if (sbRBID) { |
1504 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID)); |
1505 | for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) { |
1506 | const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i]; |
1507 | GrGLenum error = GL_ALLOC_CALL(RenderbufferStorage( |
1508 | GR_GL_RENDERBUFFER, sFmt.fInternalFormat, kSize, kSize)); |
1509 | if (error == GR_GL_NO_ERROR) { |
1510 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1511 | GR_GL_STENCIL_ATTACHMENT, |
1512 | GR_GL_RENDERBUFFER, sbRBID)); |
1513 | if (sFmt.fPacked) { |
1514 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1515 | GR_GL_DEPTH_ATTACHMENT, |
1516 | GR_GL_RENDERBUFFER, sbRBID)); |
1517 | } else { |
1518 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1519 | GR_GL_DEPTH_ATTACHMENT, |
1520 | GR_GL_RENDERBUFFER, 0)); |
1521 | } |
1522 | GrGLenum status; |
1523 | GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
1524 | if (status == GR_GL_FRAMEBUFFER_COMPLETE) { |
1525 | firstWorkingStencilFormatIndex = i; |
1526 | break; |
1527 | } |
1528 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1529 | GR_GL_STENCIL_ATTACHMENT, |
1530 | GR_GL_RENDERBUFFER, 0)); |
1531 | if (sFmt.fPacked) { |
1532 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1533 | GR_GL_DEPTH_ATTACHMENT, |
1534 | GR_GL_RENDERBUFFER, 0)); |
1535 | } |
1536 | } |
1537 | } |
1538 | GL_CALL(DeleteRenderbuffers(1, &sbRBID)); |
1539 | } |
1540 | GL_CALL(DeleteTextures(1, &colorID)); |
1541 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0); |
1542 | this->deleteFramebuffer(fb); |
1543 | fGLContext->caps()->setStencilFormatIndexForFormat(format, firstWorkingStencilFormatIndex); |
1544 | } |
1545 | return this->glCaps().getStencilFormatIndexForFormat(format); |
1546 | } |
1547 | |
1548 | GrGLuint GrGLGpu::createCompressedTexture2D( |
1549 | SkISize dimensions, |
1550 | GrGLFormat format, |
1551 | GrMipMapped mipMapped, |
1552 | GrGLTextureParameters::SamplerOverriddenState* initialState, |
1553 | const void* data, size_t dataSize) { |
1554 | if (format == GrGLFormat::kUnknown) { |
1555 | return 0; |
1556 | } |
1557 | GrGLuint id = 0; |
1558 | GL_CALL(GenTextures(1, &id)); |
1559 | if (!id) { |
1560 | return 0; |
1561 | } |
1562 | |
1563 | this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, id); |
1564 | |
1565 | *initialState = set_initial_texture_params(this->glInterface(), GR_GL_TEXTURE_2D); |
1566 | |
1567 | if (data) { |
1568 | if (!this->uploadCompressedTexData(format, dimensions, mipMapped, |
1569 | GR_GL_TEXTURE_2D, data, dataSize)) { |
1570 | GL_CALL(DeleteTextures(1, &id)); |
1571 | return 0; |
1572 | } |
1573 | } |
1574 | |
1575 | return id; |
1576 | } |
1577 | |
1578 | GrGLuint GrGLGpu::createTexture2D(SkISize dimensions, |
1579 | GrGLFormat format, |
1580 | GrRenderable renderable, |
1581 | GrGLTextureParameters::SamplerOverriddenState* initialState, |
1582 | int mipLevelCount) { |
1583 | SkASSERT(format != GrGLFormat::kUnknown); |
1584 | SkASSERT(!GrGLFormatIsCompressed(format)); |
1585 | |
1586 | GrGLuint id = 0; |
1587 | GL_CALL(GenTextures(1, &id)); |
1588 | |
1589 | if (!id) { |
1590 | return 0; |
1591 | } |
1592 | |
1593 | this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, id); |
1594 | |
1595 | if (GrRenderable::kYes == renderable && this->glCaps().textureUsageSupport()) { |
1596 | // provides a hint about how this texture will be used |
1597 | GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_USAGE, GR_GL_FRAMEBUFFER_ATTACHMENT)); |
1598 | } |
1599 | |
1600 | if (initialState) { |
1601 | *initialState = set_initial_texture_params(this->glInterface(), GR_GL_TEXTURE_2D); |
1602 | } else { |
1603 | set_initial_texture_params(this->glInterface(), GR_GL_TEXTURE_2D); |
1604 | } |
1605 | |
1606 | GrGLenum internalFormat = this->glCaps().getTexImageOrStorageInternalFormat(format); |
1607 | |
1608 | bool success = false; |
1609 | if (internalFormat) { |
1610 | if (this->glCaps().formatSupportsTexStorage(format)) { |
1611 | auto levelCount = std::max(mipLevelCount, 1); |
1612 | GrGLenum error = |
1613 | GL_ALLOC_CALL(TexStorage2D(GR_GL_TEXTURE_2D, levelCount, internalFormat, |
1614 | dimensions.width(), dimensions.height())); |
1615 | success = (error == GR_GL_NO_ERROR); |
1616 | } else { |
1617 | GrGLenum externalFormat, externalType; |
1618 | this->glCaps().getTexImageExternalFormatAndType(format, &externalFormat, &externalType); |
1619 | GrGLenum error = GR_GL_NO_ERROR; |
1620 | if (externalFormat && externalType) { |
1621 | for (int level = 0; level < mipLevelCount && error == GR_GL_NO_ERROR; level++) { |
1622 | const int twoToTheMipLevel = 1 << level; |
1623 | const int currentWidth = std::max(1, dimensions.width() / twoToTheMipLevel); |
1624 | const int currentHeight = std::max(1, dimensions.height() / twoToTheMipLevel); |
1625 | error = GL_ALLOC_CALL(TexImage2D(GR_GL_TEXTURE_2D, level, internalFormat, |
1626 | currentWidth, currentHeight, 0, externalFormat, |
1627 | externalType, nullptr)); |
1628 | } |
1629 | success = (error == GR_GL_NO_ERROR); |
1630 | } |
1631 | } |
1632 | } |
1633 | if (success) { |
1634 | return id; |
1635 | } |
1636 | GL_CALL(DeleteTextures(1, &id)); |
1637 | return 0; |
1638 | } |
1639 | |
1640 | GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget( |
1641 | const GrRenderTarget* rt, int width, int height, int numStencilSamples) { |
1642 | SkASSERT(width >= rt->width()); |
1643 | SkASSERT(height >= rt->height()); |
1644 | |
1645 | GrGLStencilAttachment::IDDesc sbDesc; |
1646 | |
1647 | int sIdx = this->getCompatibleStencilIndex(rt->backendFormat().asGLFormat()); |
1648 | if (sIdx < 0) { |
1649 | return nullptr; |
1650 | } |
1651 | |
1652 | if (!sbDesc.fRenderbufferID) { |
1653 | GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID)); |
1654 | } |
1655 | if (!sbDesc.fRenderbufferID) { |
1656 | return nullptr; |
1657 | } |
1658 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID)); |
1659 | const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx]; |
1660 | // we do this "if" so that we don't call the multisample |
1661 | // version on a GL that doesn't have an MSAA extension. |
1662 | if (numStencilSamples > 1) { |
1663 | if (!this->renderbufferStorageMSAA(*fGLContext, numStencilSamples, sFmt.fInternalFormat, |
1664 | width, height)) { |
1665 | GL_CALL(DeleteRenderbuffers(1, &sbDesc.fRenderbufferID)); |
1666 | return nullptr; |
1667 | } |
1668 | } else { |
1669 | GrGLenum error = GL_ALLOC_CALL( |
1670 | RenderbufferStorage(GR_GL_RENDERBUFFER, sFmt.fInternalFormat, width, height)); |
1671 | if (error != GR_GL_NO_ERROR) { |
1672 | GL_CALL(DeleteRenderbuffers(1, &sbDesc.fRenderbufferID)); |
1673 | return nullptr; |
1674 | } |
1675 | } |
1676 | fStats.incStencilAttachmentCreates(); |
1677 | // After sized formats we attempt an unsized format and take |
1678 | // whatever sizes GL gives us. In that case we query for the size. |
1679 | GrGLStencilAttachment::Format format = sFmt; |
1680 | get_stencil_rb_sizes(this->glInterface(), &format); |
1681 | GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this, |
1682 | sbDesc, |
1683 | width, |
1684 | height, |
1685 | numStencilSamples, |
1686 | format); |
1687 | return stencil; |
1688 | } |
1689 | |
1690 | //////////////////////////////////////////////////////////////////////////////// |
1691 | |
1692 | sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType, |
1693 | GrAccessPattern accessPattern, const void* data) { |
1694 | return GrGLBuffer::Make(this, size, intendedType, accessPattern, data); |
1695 | } |
1696 | |
1697 | void GrGLGpu::flushScissorTest(GrScissorTest scissorTest) { |
1698 | if (GrScissorTest::kEnabled == scissorTest) { |
1699 | if (kYes_TriState != fHWScissorSettings.fEnabled) { |
1700 | GL_CALL(Enable(GR_GL_SCISSOR_TEST)); |
1701 | fHWScissorSettings.fEnabled = kYes_TriState; |
1702 | } |
1703 | } else { |
1704 | if (kNo_TriState != fHWScissorSettings.fEnabled) { |
1705 | GL_CALL(Disable(GR_GL_SCISSOR_TEST)); |
1706 | fHWScissorSettings.fEnabled = kNo_TriState; |
1707 | } |
1708 | } |
1709 | } |
1710 | |
1711 | void GrGLGpu::flushScissorRect(const SkIRect& scissor, int rtWidth, int rtHeight, |
1712 | GrSurfaceOrigin rtOrigin) { |
1713 | SkASSERT(fHWScissorSettings.fEnabled == TriState::kYes_TriState); |
1714 | auto nativeScissor = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, scissor); |
1715 | if (fHWScissorSettings.fRect != nativeScissor) { |
1716 | GL_CALL(Scissor(nativeScissor.fX, nativeScissor.fY, nativeScissor.fWidth, |
1717 | nativeScissor.fHeight)); |
1718 | fHWScissorSettings.fRect = nativeScissor; |
1719 | } |
1720 | } |
1721 | |
1722 | void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState, |
1723 | const GrGLRenderTarget* rt, GrSurfaceOrigin origin) { |
1724 | #ifndef USE_NSIGHT |
1725 | typedef GrWindowRectsState::Mode Mode; |
1726 | SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen. |
1727 | SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles()); |
1728 | |
1729 | if (!this->caps()->maxWindowRectangles() || |
1730 | fHWWindowRectsState.knownEqualTo(origin, rt->width(), rt->height(), windowState)) { |
1731 | return; |
1732 | } |
1733 | |
1734 | // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above |
1735 | // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912 |
1736 | int numWindows = std::min(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows)); |
1737 | SkASSERT(windowState.numWindows() == numWindows); |
1738 | |
1739 | GrNativeRect glwindows[GrWindowRectangles::kMaxWindows]; |
1740 | const SkIRect* skwindows = windowState.windows().data(); |
1741 | for (int i = 0; i < numWindows; ++i) { |
1742 | glwindows[i].setRelativeTo(origin, rt->height(), skwindows[i]); |
1743 | } |
1744 | |
1745 | GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE; |
1746 | GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts())); |
1747 | |
1748 | fHWWindowRectsState.set(origin, rt->width(), rt->height(), windowState); |
1749 | #endif |
1750 | } |
1751 | |
1752 | void GrGLGpu::disableWindowRectangles() { |
1753 | #ifndef USE_NSIGHT |
1754 | if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) { |
1755 | return; |
1756 | } |
1757 | GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr)); |
1758 | fHWWindowRectsState.setDisabled(); |
1759 | #endif |
1760 | } |
1761 | |
1762 | bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget, |
1763 | const GrProgramInfo& programInfo) { |
1764 | this->handleDirtyContext(); |
1765 | |
1766 | sk_sp<GrGLProgram> program = fProgramCache->findOrCreateProgram(renderTarget, programInfo); |
1767 | if (!program) { |
1768 | GrCapsDebugf(this->caps(), "Failed to create program!\n" ); |
1769 | return false; |
1770 | } |
1771 | |
1772 | this->flushProgram(std::move(program)); |
1773 | |
1774 | if (GrPrimitiveType::kPatches == programInfo.primitiveType()) { |
1775 | this->flushPatchVertexCount(programInfo.tessellationPatchVertexCount()); |
1776 | } |
1777 | |
1778 | // Swizzle the blend to match what the shader will output. |
1779 | this->flushBlendAndColorWrite(programInfo.pipeline().getXferProcessor().getBlendInfo(), |
1780 | programInfo.pipeline().writeSwizzle()); |
1781 | |
1782 | fHWProgram->updateUniforms(renderTarget, programInfo); |
1783 | |
1784 | GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget); |
1785 | GrStencilSettings stencil; |
1786 | if (programInfo.pipeline().isStencilEnabled()) { |
1787 | SkASSERT(glRT->renderTargetPriv().getStencilAttachment()); |
1788 | stencil.reset(*programInfo.pipeline().getUserStencil(), |
1789 | programInfo.pipeline().hasStencilClip(), |
1790 | glRT->renderTargetPriv().numStencilBits()); |
1791 | } |
1792 | this->flushStencil(stencil, programInfo.origin()); |
1793 | this->flushScissorTest(GrScissorTest(programInfo.pipeline().isScissorTestEnabled())); |
1794 | this->flushWindowRectangles(programInfo.pipeline().getWindowRectsState(), |
1795 | glRT, programInfo.origin()); |
1796 | this->flushHWAAState(glRT, programInfo.pipeline().isHWAntialiasState()); |
1797 | this->flushConservativeRasterState(programInfo.pipeline().usesConservativeRaster()); |
1798 | this->flushWireframeState(programInfo.pipeline().isWireframe()); |
1799 | |
1800 | // This must come after textures are flushed because a texture may need |
1801 | // to be msaa-resolved (which will modify bound FBO state). |
1802 | this->flushRenderTarget(glRT); |
1803 | |
1804 | return true; |
1805 | } |
1806 | |
1807 | void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) { |
1808 | if (!program) { |
1809 | fHWProgram.reset(); |
1810 | fHWProgramID = 0; |
1811 | return; |
1812 | } |
1813 | SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID())); |
1814 | if (program == fHWProgram) { |
1815 | return; |
1816 | } |
1817 | auto id = program->programID(); |
1818 | SkASSERT(id); |
1819 | GL_CALL(UseProgram(id)); |
1820 | fHWProgram = std::move(program); |
1821 | fHWProgramID = id; |
1822 | } |
1823 | |
1824 | void GrGLGpu::flushProgram(GrGLuint id) { |
1825 | SkASSERT(id); |
1826 | if (fHWProgramID == id) { |
1827 | SkASSERT(!fHWProgram); |
1828 | return; |
1829 | } |
1830 | fHWProgram.reset(); |
1831 | GL_CALL(UseProgram(id)); |
1832 | fHWProgramID = id; |
1833 | } |
1834 | |
1835 | GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) { |
1836 | this->handleDirtyContext(); |
1837 | |
1838 | // Index buffer state is tied to the vertex array. |
1839 | if (GrGpuBufferType::kIndex == type) { |
1840 | this->bindVertexArray(0); |
1841 | } |
1842 | |
1843 | auto* bufferState = this->hwBufferState(type); |
1844 | if (buffer->isCpuBuffer()) { |
1845 | if (!bufferState->fBufferZeroKnownBound) { |
1846 | GL_CALL(BindBuffer(bufferState->fGLTarget, 0)); |
1847 | bufferState->fBufferZeroKnownBound = true; |
1848 | bufferState->fBoundBufferUniqueID.makeInvalid(); |
1849 | } |
1850 | } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() != |
1851 | bufferState->fBoundBufferUniqueID) { |
1852 | const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer); |
1853 | GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID())); |
1854 | bufferState->fBufferZeroKnownBound = false; |
1855 | bufferState->fBoundBufferUniqueID = glBuffer->uniqueID(); |
1856 | } |
1857 | |
1858 | return bufferState->fGLTarget; |
1859 | } |
1860 | |
1861 | void GrGLGpu::clear(const GrFixedClip& clip, const SkPMColor4f& color, |
1862 | GrRenderTarget* target, GrSurfaceOrigin origin) { |
1863 | // parent class should never let us get here with no RT |
1864 | SkASSERT(target); |
1865 | SkASSERT(!this->caps()->performColorClearsAsDraws()); |
1866 | SkASSERT(!clip.scissorEnabled() || !this->caps()->performPartialClearsAsDraws()); |
1867 | |
1868 | this->handleDirtyContext(); |
1869 | |
1870 | GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
1871 | |
1872 | if (clip.scissorEnabled()) { |
1873 | this->flushRenderTarget(glRT, origin, clip.scissorRect()); |
1874 | } else { |
1875 | this->flushRenderTarget(glRT); |
1876 | } |
1877 | this->flushScissor(clip.scissorState(), glRT->width(), glRT->height(), origin); |
1878 | this->flushWindowRectangles(clip.windowRectsState(), glRT, origin); |
1879 | this->flushColorWrite(true); |
1880 | this->flushClearColor(color); |
1881 | GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); |
1882 | } |
1883 | |
1884 | void GrGLGpu::clearStencil(GrRenderTarget* target, int clearValue) { |
1885 | SkASSERT(!this->caps()->performStencilClearsAsDraws()); |
1886 | |
1887 | if (!target) { |
1888 | return; |
1889 | } |
1890 | |
1891 | // This should only be called internally when we know we have a stencil buffer. |
1892 | SkASSERT(target->renderTargetPriv().getStencilAttachment()); |
1893 | |
1894 | GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
1895 | this->flushRenderTargetNoColorWrites(glRT); |
1896 | |
1897 | this->flushScissorTest(GrScissorTest::kDisabled); |
1898 | this->disableWindowRectangles(); |
1899 | |
1900 | GL_CALL(StencilMask(0xffffffff)); |
1901 | GL_CALL(ClearStencil(clearValue)); |
1902 | GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); |
1903 | fHWStencilSettings.invalidate(); |
1904 | } |
1905 | |
1906 | static bool use_tiled_rendering(const GrGLCaps& glCaps, |
1907 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) { |
1908 | // Only use the tiled rendering extension if we can explicitly clear and discard the stencil. |
1909 | // Otherwise it's faster to just not use it. |
1910 | return glCaps.tiledRenderingSupport() && GrLoadOp::kClear == stencilLoadStore.fLoadOp && |
1911 | GrStoreOp::kDiscard == stencilLoadStore.fStoreOp; |
1912 | } |
1913 | |
1914 | void GrGLGpu::beginCommandBuffer(GrRenderTarget* rt, const SkIRect& bounds, GrSurfaceOrigin origin, |
1915 | const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore, |
1916 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) { |
1917 | SkASSERT(!fIsExecutingCommandBuffer_DebugOnly); |
1918 | |
1919 | this->handleDirtyContext(); |
1920 | |
1921 | auto glRT = static_cast<GrGLRenderTarget*>(rt); |
1922 | this->flushRenderTarget(glRT); |
1923 | SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = true); |
1924 | |
1925 | if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) { |
1926 | auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, glRT->height(), bounds); |
1927 | GrGLbitfield preserveMask = (GrLoadOp::kLoad == colorLoadStore.fLoadOp) |
1928 | ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE; |
1929 | SkASSERT(GrLoadOp::kLoad != stencilLoadStore.fLoadOp); // Handled by use_tiled_rendering(). |
1930 | GL_CALL(StartTiling(nativeBounds.fX, nativeBounds.fY, nativeBounds.fWidth, |
1931 | nativeBounds.fHeight, preserveMask)); |
1932 | } |
1933 | |
1934 | GrGLbitfield clearMask = 0; |
1935 | if (GrLoadOp::kClear == colorLoadStore.fLoadOp) { |
1936 | SkASSERT(!this->caps()->performColorClearsAsDraws()); |
1937 | this->flushClearColor(colorLoadStore.fClearColor); |
1938 | this->flushColorWrite(true); |
1939 | clearMask |= GR_GL_COLOR_BUFFER_BIT; |
1940 | } |
1941 | if (GrLoadOp::kClear == stencilLoadStore.fLoadOp) { |
1942 | SkASSERT(!this->caps()->performStencilClearsAsDraws()); |
1943 | GL_CALL(StencilMask(0xffffffff)); |
1944 | GL_CALL(ClearStencil(0)); |
1945 | clearMask |= GR_GL_STENCIL_BUFFER_BIT; |
1946 | } |
1947 | if (clearMask) { |
1948 | this->flushScissorTest(GrScissorTest::kDisabled); |
1949 | this->disableWindowRectangles(); |
1950 | GL_CALL(Clear(clearMask)); |
1951 | } |
1952 | } |
1953 | |
1954 | void GrGLGpu::endCommandBuffer(GrRenderTarget* rt, |
1955 | const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore, |
1956 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) { |
1957 | SkASSERT(fIsExecutingCommandBuffer_DebugOnly); |
1958 | |
1959 | this->handleDirtyContext(); |
1960 | |
1961 | if (rt->uniqueID() != fHWBoundRenderTargetUniqueID) { |
1962 | // The framebuffer binding changed in the middle of a command buffer. We should have already |
1963 | // printed a warning during onFBOChanged. |
1964 | return; |
1965 | } |
1966 | |
1967 | if (GrGLCaps::kNone_InvalidateFBType != this->glCaps().invalidateFBType()) { |
1968 | auto glRT = static_cast<GrGLRenderTarget*>(rt); |
1969 | |
1970 | SkSTArray<2, GrGLenum> discardAttachments; |
1971 | if (GrStoreOp::kDiscard == colorLoadStore.fStoreOp) { |
1972 | discardAttachments.push_back( |
1973 | (0 == glRT->renderFBOID()) ? GR_GL_COLOR : GR_GL_COLOR_ATTACHMENT0); |
1974 | } |
1975 | if (GrStoreOp::kDiscard == stencilLoadStore.fStoreOp) { |
1976 | discardAttachments.push_back( |
1977 | (0 == glRT->renderFBOID()) ? GR_GL_STENCIL : GR_GL_STENCIL_ATTACHMENT); |
1978 | } |
1979 | |
1980 | if (!discardAttachments.empty()) { |
1981 | if (GrGLCaps::kInvalidate_InvalidateFBType == this->glCaps().invalidateFBType()) { |
1982 | GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.count(), |
1983 | discardAttachments.begin())); |
1984 | } else { |
1985 | SkASSERT(GrGLCaps::kDiscard_InvalidateFBType == this->glCaps().invalidateFBType()); |
1986 | GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.count(), |
1987 | discardAttachments.begin())); |
1988 | } |
1989 | } |
1990 | } |
1991 | |
1992 | if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) { |
1993 | GrGLbitfield preserveMask = (GrStoreOp::kStore == colorLoadStore.fStoreOp) |
1994 | ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE; |
1995 | // Handled by use_tiled_rendering(). |
1996 | SkASSERT(GrStoreOp::kStore != stencilLoadStore.fStoreOp); |
1997 | GL_CALL(EndTiling(preserveMask)); |
1998 | } |
1999 | |
2000 | SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = false); |
2001 | } |
2002 | |
2003 | void GrGLGpu::clearStencilClip(const GrFixedClip& clip, |
2004 | bool insideStencilMask, |
2005 | GrRenderTarget* target, GrSurfaceOrigin origin) { |
2006 | SkASSERT(target); |
2007 | SkASSERT(!this->caps()->performStencilClearsAsDraws()); |
2008 | this->handleDirtyContext(); |
2009 | |
2010 | GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment(); |
2011 | if (!sb) { |
2012 | // We should only get here if we marked a proxy as requiring a SB. However, |
2013 | // the SB creation could later fail. Likely clipping is going to go awry now. |
2014 | return; |
2015 | } |
2016 | |
2017 | GrGLint stencilBitCount = sb->bits(); |
2018 | #if 0 |
2019 | SkASSERT(stencilBitCount > 0); |
2020 | GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); |
2021 | #else |
2022 | // we could just clear the clip bit but when we go through |
2023 | // ANGLE a partial stencil mask will cause clears to be |
2024 | // turned into draws. Our contract on GrOpsTask says that |
2025 | // changing the clip between stencil passes may or may not |
2026 | // zero the client's clip bits. So we just clear the whole thing. |
2027 | static const GrGLint clipStencilMask = ~0; |
2028 | #endif |
2029 | GrGLint value; |
2030 | if (insideStencilMask) { |
2031 | value = (1 << (stencilBitCount - 1)); |
2032 | } else { |
2033 | value = 0; |
2034 | } |
2035 | GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
2036 | this->flushRenderTargetNoColorWrites(glRT); |
2037 | |
2038 | this->flushScissor(clip.scissorState(), glRT->width(), glRT->height(), origin); |
2039 | this->flushWindowRectangles(clip.windowRectsState(), glRT, origin); |
2040 | |
2041 | GL_CALL(StencilMask((uint32_t) clipStencilMask)); |
2042 | GL_CALL(ClearStencil(value)); |
2043 | GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); |
2044 | fHWStencilSettings.invalidate(); |
2045 | } |
2046 | |
2047 | bool GrGLGpu::readOrTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, |
2048 | GrColorType surfaceColorType, GrColorType dstColorType, |
2049 | void* offsetOrPtr, int rowWidthInPixels) { |
2050 | SkASSERT(surface); |
2051 | |
2052 | auto format = surface->backendFormat().asGLFormat(); |
2053 | GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); |
2054 | if (!renderTarget && !this->glCaps().isFormatRenderable(format, 1)) { |
2055 | return false; |
2056 | } |
2057 | GrGLenum externalFormat = 0; |
2058 | GrGLenum externalType = 0; |
2059 | this->glCaps().getReadPixelsFormat(surface->backendFormat().asGLFormat(), |
2060 | surfaceColorType, |
2061 | dstColorType, |
2062 | &externalFormat, |
2063 | &externalType); |
2064 | if (!externalFormat || !externalType) { |
2065 | return false; |
2066 | } |
2067 | |
2068 | if (renderTarget) { |
2069 | if (renderTarget->numSamples() <= 1 || |
2070 | renderTarget->renderFBOID() == renderTarget->textureFBOID()) { // Also catches FBO 0. |
2071 | SkASSERT(!renderTarget->requiresManualMSAAResolve()); |
2072 | this->flushRenderTargetNoColorWrites(renderTarget); |
2073 | } else if (GrGLRenderTarget::kUnresolvableFBOID == renderTarget->textureFBOID()) { |
2074 | SkASSERT(!renderTarget->requiresManualMSAAResolve()); |
2075 | return false; |
2076 | } else { |
2077 | SkASSERT(renderTarget->requiresManualMSAAResolve()); |
2078 | // we don't track the state of the READ FBO ID. |
2079 | this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID()); |
2080 | } |
2081 | } else { |
2082 | // Use a temporary FBO. |
2083 | this->bindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget); |
2084 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
2085 | } |
2086 | |
2087 | // the read rect is viewport-relative |
2088 | GrNativeRect readRect = {left, top, width, height}; |
2089 | |
2090 | // determine if GL can read using the passed rowBytes or if we need a scratch buffer. |
2091 | if (rowWidthInPixels != width) { |
2092 | SkASSERT(this->glCaps().readPixelsRowBytesSupport()); |
2093 | GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, rowWidthInPixels)); |
2094 | } |
2095 | GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, 1)); |
2096 | |
2097 | bool reattachStencil = false; |
2098 | if (this->glCaps().detachStencilFromMSAABuffersBeforeReadPixels() && |
2099 | renderTarget && |
2100 | renderTarget->renderTargetPriv().getStencilAttachment() && |
2101 | renderTarget->numSamples() > 1) { |
2102 | // Fix Adreno devices that won't read from MSAA framebuffers with stencil attached |
2103 | reattachStencil = true; |
2104 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, |
2105 | GR_GL_RENDERBUFFER, 0)); |
2106 | } |
2107 | |
2108 | GL_CALL(ReadPixels(readRect.fX, readRect.fY, readRect.fWidth, readRect.fHeight, |
2109 | externalFormat, externalType, offsetOrPtr)); |
2110 | |
2111 | if (reattachStencil) { |
2112 | GrGLStencilAttachment* stencilAttachment = static_cast<GrGLStencilAttachment*>( |
2113 | renderTarget->renderTargetPriv().getStencilAttachment()); |
2114 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, |
2115 | GR_GL_RENDERBUFFER, stencilAttachment->renderbufferID())); |
2116 | } |
2117 | |
2118 | if (rowWidthInPixels != width) { |
2119 | SkASSERT(this->glCaps().readPixelsRowBytesSupport()); |
2120 | GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); |
2121 | } |
2122 | |
2123 | if (!renderTarget) { |
2124 | this->unbindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER); |
2125 | } |
2126 | return true; |
2127 | } |
2128 | |
2129 | bool GrGLGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height, |
2130 | GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, |
2131 | size_t rowBytes) { |
2132 | SkASSERT(surface); |
2133 | |
2134 | size_t bytesPerPixel = GrColorTypeBytesPerPixel(dstColorType); |
2135 | |
2136 | // GL_PACK_ROW_LENGTH is in terms of pixels not bytes. |
2137 | int rowPixelWidth; |
2138 | |
2139 | if (rowBytes == SkToSizeT(width * bytesPerPixel)) { |
2140 | rowPixelWidth = width; |
2141 | } else { |
2142 | SkASSERT(!(rowBytes % bytesPerPixel)); |
2143 | rowPixelWidth = rowBytes / bytesPerPixel; |
2144 | } |
2145 | return this->readOrTransferPixelsFrom(surface, left, top, width, height, surfaceColorType, |
2146 | dstColorType, buffer, rowPixelWidth); |
2147 | } |
2148 | |
2149 | GrOpsRenderPass* GrGLGpu::getOpsRenderPass( |
2150 | GrRenderTarget* rt, GrSurfaceOrigin origin, const SkIRect& bounds, |
2151 | const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, |
2152 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo, |
2153 | const SkTArray<GrSurfaceProxy*, true>& sampledProxies) { |
2154 | if (!fCachedOpsRenderPass) { |
2155 | fCachedOpsRenderPass.reset(new GrGLOpsRenderPass(this)); |
2156 | } |
2157 | |
2158 | fCachedOpsRenderPass->set(rt, bounds, origin, colorInfo, stencilInfo); |
2159 | return fCachedOpsRenderPass.get(); |
2160 | } |
2161 | |
2162 | void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, GrSurfaceOrigin origin, |
2163 | const SkIRect& bounds) { |
2164 | this->flushRenderTargetNoColorWrites(target); |
2165 | this->didWriteToSurface(target, origin, &bounds); |
2166 | } |
2167 | |
2168 | void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target) { |
2169 | this->flushRenderTargetNoColorWrites(target); |
2170 | this->didWriteToSurface(target, kTopLeft_GrSurfaceOrigin, nullptr); |
2171 | } |
2172 | |
2173 | void GrGLGpu::flushRenderTargetNoColorWrites(GrGLRenderTarget* target) { |
2174 | SkASSERT(target); |
2175 | GrGpuResource::UniqueID rtID = target->uniqueID(); |
2176 | if (fHWBoundRenderTargetUniqueID != rtID) { |
2177 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID()); |
2178 | #ifdef SK_DEBUG |
2179 | // don't do this check in Chromium -- this is causing |
2180 | // lots of repeated command buffer flushes when the compositor is |
2181 | // rendering with Ganesh, which is really slow; even too slow for |
2182 | // Debug mode. |
2183 | if (!this->glCaps().skipErrorChecks()) { |
2184 | GrGLenum status; |
2185 | GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
2186 | if (status != GR_GL_FRAMEBUFFER_COMPLETE) { |
2187 | SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n" , status); |
2188 | } |
2189 | } |
2190 | #endif |
2191 | fHWBoundRenderTargetUniqueID = rtID; |
2192 | this->flushViewport(target->width(), target->height()); |
2193 | } |
2194 | |
2195 | if (this->glCaps().srgbWriteControl()) { |
2196 | this->flushFramebufferSRGB(this->caps()->isFormatSRGB(target->backendFormat())); |
2197 | } |
2198 | |
2199 | if (this->glCaps().shouldQueryImplementationReadSupport(target->format())) { |
2200 | GrGLint format; |
2201 | GrGLint type; |
2202 | GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &format); |
2203 | GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &type); |
2204 | this->glCaps().didQueryImplementationReadSupport(target->format(), format, type); |
2205 | } |
2206 | } |
2207 | |
2208 | void GrGLGpu::flushFramebufferSRGB(bool enable) { |
2209 | if (enable && kYes_TriState != fHWSRGBFramebuffer) { |
2210 | GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB)); |
2211 | fHWSRGBFramebuffer = kYes_TriState; |
2212 | } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) { |
2213 | GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB)); |
2214 | fHWSRGBFramebuffer = kNo_TriState; |
2215 | } |
2216 | } |
2217 | |
2218 | void GrGLGpu::flushViewport(int width, int height) { |
2219 | GrNativeRect viewport = {0, 0, width, height}; |
2220 | if (fHWViewport != viewport) { |
2221 | GL_CALL(Viewport(viewport.fX, viewport.fY, viewport.fWidth, viewport.fHeight)); |
2222 | fHWViewport = viewport; |
2223 | } |
2224 | } |
2225 | |
2226 | GrGLenum GrGLGpu::prepareToDraw(GrPrimitiveType primitiveType) { |
2227 | fStats.incNumDraws(); |
2228 | |
2229 | if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() && |
2230 | GrIsPrimTypeLines(primitiveType) && !GrIsPrimTypeLines(fLastPrimitiveType)) { |
2231 | GL_CALL(Enable(GR_GL_CULL_FACE)); |
2232 | GL_CALL(Disable(GR_GL_CULL_FACE)); |
2233 | } |
2234 | fLastPrimitiveType = primitiveType; |
2235 | |
2236 | switch (primitiveType) { |
2237 | case GrPrimitiveType::kTriangles: |
2238 | return GR_GL_TRIANGLES; |
2239 | case GrPrimitiveType::kTriangleStrip: |
2240 | return GR_GL_TRIANGLE_STRIP; |
2241 | case GrPrimitiveType::kPoints: |
2242 | return GR_GL_POINTS; |
2243 | case GrPrimitiveType::kLines: |
2244 | return GR_GL_LINES; |
2245 | case GrPrimitiveType::kLineStrip: |
2246 | return GR_GL_LINE_STRIP; |
2247 | case GrPrimitiveType::kPatches: |
2248 | return GR_GL_PATCHES; |
2249 | case GrPrimitiveType::kPath: |
2250 | SK_ABORT("non-mesh-based GrPrimitiveType" ); |
2251 | return 0; |
2252 | } |
2253 | SK_ABORT("invalid GrPrimitiveType" ); |
2254 | } |
2255 | |
2256 | void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect, |
2257 | ForExternalIO) { |
2258 | // Some extensions automatically resolves the texture when it is read. |
2259 | SkASSERT(this->glCaps().usesMSAARenderBuffers()); |
2260 | |
2261 | GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); |
2262 | SkASSERT(rt->textureFBOID() != rt->renderFBOID()); |
2263 | SkASSERT(rt->textureFBOID() != 0 && rt->renderFBOID() != 0); |
2264 | this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()); |
2265 | this->bindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()); |
2266 | |
2267 | // make sure we go through flushRenderTarget() since we've modified |
2268 | // the bound DRAW FBO ID. |
2269 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
2270 | if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { |
2271 | // Apple's extension uses the scissor as the blit bounds. |
2272 | GrScissorState scissorState; |
2273 | scissorState.set(resolveRect); |
2274 | // Passing in kTopLeft_GrSurfaceOrigin will make sure no transformation of the rect |
2275 | // happens inside flushScissor since resolveRect is already in native device coordinates. |
2276 | this->flushScissor(scissorState, rt->width(), rt->height(), kTopLeft_GrSurfaceOrigin); |
2277 | this->disableWindowRectangles(); |
2278 | GL_CALL(ResolveMultisampleFramebuffer()); |
2279 | } else { |
2280 | int l, b, r, t; |
2281 | if (GrGLCaps::kResolveMustBeFull_BlitFrambufferFlag & |
2282 | this->glCaps().blitFramebufferSupportFlags()) { |
2283 | l = 0; |
2284 | b = 0; |
2285 | r = target->width(); |
2286 | t = target->height(); |
2287 | } else { |
2288 | l = resolveRect.x(); |
2289 | b = resolveRect.y(); |
2290 | r = resolveRect.x() + resolveRect.width(); |
2291 | t = resolveRect.y() + resolveRect.height(); |
2292 | } |
2293 | |
2294 | // BlitFrameBuffer respects the scissor, so disable it. |
2295 | this->flushScissorTest(GrScissorTest::kDisabled); |
2296 | this->disableWindowRectangles(); |
2297 | GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t, GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); |
2298 | } |
2299 | } |
2300 | |
2301 | namespace { |
2302 | |
2303 | |
2304 | GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { |
2305 | static const GrGLenum gTable[kGrStencilOpCount] = { |
2306 | GR_GL_KEEP, // kKeep |
2307 | GR_GL_ZERO, // kZero |
2308 | GR_GL_REPLACE, // kReplace |
2309 | GR_GL_INVERT, // kInvert |
2310 | GR_GL_INCR_WRAP, // kIncWrap |
2311 | GR_GL_DECR_WRAP, // kDecWrap |
2312 | GR_GL_INCR, // kIncClamp |
2313 | GR_GL_DECR, // kDecClamp |
2314 | }; |
2315 | static_assert(0 == (int)GrStencilOp::kKeep); |
2316 | static_assert(1 == (int)GrStencilOp::kZero); |
2317 | static_assert(2 == (int)GrStencilOp::kReplace); |
2318 | static_assert(3 == (int)GrStencilOp::kInvert); |
2319 | static_assert(4 == (int)GrStencilOp::kIncWrap); |
2320 | static_assert(5 == (int)GrStencilOp::kDecWrap); |
2321 | static_assert(6 == (int)GrStencilOp::kIncClamp); |
2322 | static_assert(7 == (int)GrStencilOp::kDecClamp); |
2323 | SkASSERT(op < (GrStencilOp)kGrStencilOpCount); |
2324 | return gTable[(int)op]; |
2325 | } |
2326 | |
2327 | void set_gl_stencil(const GrGLInterface* gl, |
2328 | const GrStencilSettings::Face& face, |
2329 | GrGLenum glFace) { |
2330 | GrGLenum glFunc = GrToGLStencilFunc(face.fTest); |
2331 | GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp); |
2332 | GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp); |
2333 | |
2334 | GrGLint ref = face.fRef; |
2335 | GrGLint mask = face.fTestMask; |
2336 | GrGLint writeMask = face.fWriteMask; |
2337 | |
2338 | if (GR_GL_FRONT_AND_BACK == glFace) { |
2339 | // we call the combined func just in case separate stencil is not |
2340 | // supported. |
2341 | GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); |
2342 | GR_GL_CALL(gl, StencilMask(writeMask)); |
2343 | GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp)); |
2344 | } else { |
2345 | GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); |
2346 | GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); |
2347 | GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp)); |
2348 | } |
2349 | } |
2350 | } |
2351 | |
2352 | void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings, GrSurfaceOrigin origin) { |
2353 | if (stencilSettings.isDisabled()) { |
2354 | this->disableStencil(); |
2355 | } else if (fHWStencilSettings != stencilSettings || |
2356 | (stencilSettings.isTwoSided() && fHWStencilOrigin != origin)) { |
2357 | if (kYes_TriState != fHWStencilTestEnabled) { |
2358 | GL_CALL(Enable(GR_GL_STENCIL_TEST)); |
2359 | |
2360 | fHWStencilTestEnabled = kYes_TriState; |
2361 | } |
2362 | if (!stencilSettings.isTwoSided()) { |
2363 | set_gl_stencil(this->glInterface(), stencilSettings.singleSidedFace(), |
2364 | GR_GL_FRONT_AND_BACK); |
2365 | } else { |
2366 | set_gl_stencil(this->glInterface(), stencilSettings.postOriginCWFace(origin), |
2367 | GR_GL_FRONT); |
2368 | set_gl_stencil(this->glInterface(), stencilSettings.postOriginCCWFace(origin), |
2369 | GR_GL_BACK); |
2370 | } |
2371 | fHWStencilSettings = stencilSettings; |
2372 | fHWStencilOrigin = origin; |
2373 | } |
2374 | } |
2375 | |
2376 | void GrGLGpu::disableStencil() { |
2377 | if (kNo_TriState != fHWStencilTestEnabled) { |
2378 | GL_CALL(Disable(GR_GL_STENCIL_TEST)); |
2379 | |
2380 | fHWStencilTestEnabled = kNo_TriState; |
2381 | fHWStencilSettings.invalidate(); |
2382 | } |
2383 | } |
2384 | |
2385 | void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA) { |
2386 | // rt is only optional if useHWAA is false. |
2387 | SkASSERT(rt || !useHWAA); |
2388 | #ifdef SK_DEBUG |
2389 | if (useHWAA && rt->numSamples() <= 1) { |
2390 | SkASSERT(this->caps()->mixedSamplesSupport()); |
2391 | SkASSERT(0 != static_cast<GrGLRenderTarget*>(rt)->renderFBOID()); |
2392 | SkASSERT(rt->renderTargetPriv().getStencilAttachment()); |
2393 | } |
2394 | #endif |
2395 | |
2396 | if (this->caps()->multisampleDisableSupport()) { |
2397 | if (useHWAA) { |
2398 | if (kYes_TriState != fMSAAEnabled) { |
2399 | GL_CALL(Enable(GR_GL_MULTISAMPLE)); |
2400 | fMSAAEnabled = kYes_TriState; |
2401 | } |
2402 | } else { |
2403 | if (kNo_TriState != fMSAAEnabled) { |
2404 | GL_CALL(Disable(GR_GL_MULTISAMPLE)); |
2405 | fMSAAEnabled = kNo_TriState; |
2406 | } |
2407 | } |
2408 | } |
2409 | } |
2410 | |
2411 | void GrGLGpu::flushConservativeRasterState(bool enabled) { |
2412 | if (this->caps()->conservativeRasterSupport()) { |
2413 | if (enabled) { |
2414 | if (kYes_TriState != fHWConservativeRasterEnabled) { |
2415 | GL_CALL(Enable(GR_GL_CONSERVATIVE_RASTERIZATION)); |
2416 | fHWConservativeRasterEnabled = kYes_TriState; |
2417 | } |
2418 | } else { |
2419 | if (kNo_TriState != fHWConservativeRasterEnabled) { |
2420 | GL_CALL(Disable(GR_GL_CONSERVATIVE_RASTERIZATION)); |
2421 | fHWConservativeRasterEnabled = kNo_TriState; |
2422 | } |
2423 | } |
2424 | } |
2425 | } |
2426 | |
2427 | void GrGLGpu::flushWireframeState(bool enabled) { |
2428 | if (this->caps()->wireframeSupport()) { |
2429 | if (this->caps()->wireframeMode() || enabled) { |
2430 | if (kYes_TriState != fHWWireframeEnabled) { |
2431 | GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE)); |
2432 | fHWWireframeEnabled = kYes_TriState; |
2433 | } |
2434 | } else { |
2435 | if (kNo_TriState != fHWWireframeEnabled) { |
2436 | GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL)); |
2437 | fHWWireframeEnabled = kNo_TriState; |
2438 | } |
2439 | } |
2440 | } |
2441 | } |
2442 | |
2443 | void GrGLGpu::flushBlendAndColorWrite( |
2444 | const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) { |
2445 | if (this->glCaps().neverDisableColorWrites() && !blendInfo.fWriteColor) { |
2446 | // We need to work around a driver bug by using a blend state that preserves the dst color, |
2447 | // rather than disabling color writes. |
2448 | GrXferProcessor::BlendInfo preserveDstBlend; |
2449 | preserveDstBlend.fSrcBlend = kZero_GrBlendCoeff; |
2450 | preserveDstBlend.fDstBlend = kOne_GrBlendCoeff; |
2451 | this->flushBlendAndColorWrite(preserveDstBlend, swizzle); |
2452 | return; |
2453 | } |
2454 | |
2455 | GrBlendEquation equation = blendInfo.fEquation; |
2456 | GrBlendCoeff srcCoeff = blendInfo.fSrcBlend; |
2457 | GrBlendCoeff dstCoeff = blendInfo.fDstBlend; |
2458 | |
2459 | // Any optimization to disable blending should have already been applied and |
2460 | // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0). |
2461 | bool blendOff = GrBlendShouldDisable(equation, srcCoeff, dstCoeff) || |
2462 | !blendInfo.fWriteColor; |
2463 | |
2464 | if (blendOff) { |
2465 | if (kNo_TriState != fHWBlendState.fEnabled) { |
2466 | GL_CALL(Disable(GR_GL_BLEND)); |
2467 | |
2468 | // Workaround for the ARM KHR_blend_equation_advanced blacklist issue |
2469 | // https://code.google.com/p/skia/issues/detail?id=3943 |
2470 | if (kARM_GrGLVendor == this->ctxInfo().vendor() && |
2471 | GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) { |
2472 | SkASSERT(this->caps()->advancedBlendEquationSupport()); |
2473 | // Set to any basic blending equation. |
2474 | GrBlendEquation blend_equation = kAdd_GrBlendEquation; |
2475 | GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation])); |
2476 | fHWBlendState.fEquation = blend_equation; |
2477 | } |
2478 | |
2479 | fHWBlendState.fEnabled = kNo_TriState; |
2480 | } |
2481 | } else { |
2482 | if (kYes_TriState != fHWBlendState.fEnabled) { |
2483 | GL_CALL(Enable(GR_GL_BLEND)); |
2484 | |
2485 | fHWBlendState.fEnabled = kYes_TriState; |
2486 | } |
2487 | |
2488 | if (fHWBlendState.fEquation != equation) { |
2489 | GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation])); |
2490 | fHWBlendState.fEquation = equation; |
2491 | } |
2492 | |
2493 | if (GrBlendEquationIsAdvanced(equation)) { |
2494 | SkASSERT(this->caps()->advancedBlendEquationSupport()); |
2495 | // Advanced equations have no other blend state. |
2496 | return; |
2497 | } |
2498 | |
2499 | if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) { |
2500 | GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], |
2501 | gXfermodeCoeff2Blend[dstCoeff])); |
2502 | fHWBlendState.fSrcCoeff = srcCoeff; |
2503 | fHWBlendState.fDstCoeff = dstCoeff; |
2504 | } |
2505 | |
2506 | if ((GrBlendCoeffRefsConstant(srcCoeff) || GrBlendCoeffRefsConstant(dstCoeff))) { |
2507 | SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant); |
2508 | if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) { |
2509 | GL_CALL(BlendColor(blendConst.fR, blendConst.fG, blendConst.fB, blendConst.fA)); |
2510 | fHWBlendState.fConstColor = blendConst; |
2511 | fHWBlendState.fConstColorValid = true; |
2512 | } |
2513 | } |
2514 | } |
2515 | |
2516 | this->flushColorWrite(blendInfo.fWriteColor); |
2517 | } |
2518 | |
2519 | static void get_gl_swizzle_values(const GrSwizzle& swizzle, GrGLenum glValues[4]) { |
2520 | for (int i = 0; i < 4; ++i) { |
2521 | switch (swizzle[i]) { |
2522 | case 'r': glValues[i] = GR_GL_RED; break; |
2523 | case 'g': glValues[i] = GR_GL_GREEN; break; |
2524 | case 'b': glValues[i] = GR_GL_BLUE; break; |
2525 | case 'a': glValues[i] = GR_GL_ALPHA; break; |
2526 | case '0': glValues[i] = GR_GL_ZERO; break; |
2527 | case '1': glValues[i] = GR_GL_ONE; break; |
2528 | default: SK_ABORT("Unsupported component" ); |
2529 | } |
2530 | } |
2531 | } |
2532 | |
2533 | void GrGLGpu::bindTexture(int unitIdx, GrSamplerState samplerState, const GrSwizzle& swizzle, |
2534 | GrGLTexture* texture) { |
2535 | SkASSERT(texture); |
2536 | |
2537 | #ifdef SK_DEBUG |
2538 | if (!this->caps()->npotTextureTileSupport()) { |
2539 | if (samplerState.isRepeated()) { |
2540 | const int w = texture->width(); |
2541 | const int h = texture->height(); |
2542 | SkASSERT(SkIsPow2(w) && SkIsPow2(h)); |
2543 | } |
2544 | } |
2545 | #endif |
2546 | |
2547 | GrGpuResource::UniqueID textureID = texture->uniqueID(); |
2548 | GrGLenum target = texture->target(); |
2549 | if (fHWTextureUnitBindings[unitIdx].boundID(target) != textureID) { |
2550 | this->setTextureUnit(unitIdx); |
2551 | GL_CALL(BindTexture(target, texture->textureID())); |
2552 | fHWTextureUnitBindings[unitIdx].setBoundID(target, textureID); |
2553 | } |
2554 | |
2555 | if (samplerState.filter() == GrSamplerState::Filter::kMipMap) { |
2556 | if (!this->caps()->mipMapSupport() || |
2557 | texture->texturePriv().mipMapped() == GrMipMapped::kNo) { |
2558 | samplerState.setFilterMode(GrSamplerState::Filter::kBilerp); |
2559 | } |
2560 | } |
2561 | |
2562 | #ifdef SK_DEBUG |
2563 | // We were supposed to ensure MipMaps were up-to-date before getting here. |
2564 | if (samplerState.filter() == GrSamplerState::Filter::kMipMap) { |
2565 | SkASSERT(!texture->texturePriv().mipMapsAreDirty()); |
2566 | } |
2567 | #endif |
2568 | |
2569 | auto timestamp = texture->parameters()->resetTimestamp(); |
2570 | bool setAll = timestamp < fResetTimestampForTextureParameters; |
2571 | |
2572 | const GrGLTextureParameters::SamplerOverriddenState* samplerStateToRecord = nullptr; |
2573 | GrGLTextureParameters::SamplerOverriddenState newSamplerState; |
2574 | if (fSamplerObjectCache) { |
2575 | fSamplerObjectCache->bindSampler(unitIdx, samplerState); |
2576 | } else { |
2577 | const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState = |
2578 | texture->parameters()->samplerOverriddenState(); |
2579 | samplerStateToRecord = &newSamplerState; |
2580 | |
2581 | newSamplerState.fMinFilter = filter_to_gl_min_filter(samplerState.filter()); |
2582 | newSamplerState.fMagFilter = filter_to_gl_mag_filter(samplerState.filter()); |
2583 | |
2584 | newSamplerState.fWrapS = wrap_mode_to_gl_wrap(samplerState.wrapModeX(), this->glCaps()); |
2585 | newSamplerState.fWrapT = wrap_mode_to_gl_wrap(samplerState.wrapModeY(), this->glCaps()); |
2586 | |
2587 | // These are the OpenGL default values. |
2588 | newSamplerState.fMinLOD = -1000.f; |
2589 | newSamplerState.fMaxLOD = 1000.f; |
2590 | |
2591 | if (setAll || newSamplerState.fMagFilter != oldSamplerState.fMagFilter) { |
2592 | this->setTextureUnit(unitIdx); |
2593 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newSamplerState.fMagFilter)); |
2594 | } |
2595 | if (setAll || newSamplerState.fMinFilter != oldSamplerState.fMinFilter) { |
2596 | this->setTextureUnit(unitIdx); |
2597 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newSamplerState.fMinFilter)); |
2598 | } |
2599 | if (this->glCaps().mipMapLevelAndLodControlSupport()) { |
2600 | if (setAll || newSamplerState.fMinLOD != oldSamplerState.fMinLOD) { |
2601 | this->setTextureUnit(unitIdx); |
2602 | GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MIN_LOD, newSamplerState.fMinLOD)); |
2603 | } |
2604 | if (setAll || newSamplerState.fMaxLOD != oldSamplerState.fMaxLOD) { |
2605 | this->setTextureUnit(unitIdx); |
2606 | GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MAX_LOD, newSamplerState.fMaxLOD)); |
2607 | } |
2608 | } |
2609 | if (setAll || newSamplerState.fWrapS != oldSamplerState.fWrapS) { |
2610 | this->setTextureUnit(unitIdx); |
2611 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newSamplerState.fWrapS)); |
2612 | } |
2613 | if (setAll || newSamplerState.fWrapT != oldSamplerState.fWrapT) { |
2614 | this->setTextureUnit(unitIdx); |
2615 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newSamplerState.fWrapT)); |
2616 | } |
2617 | if (this->glCaps().clampToBorderSupport()) { |
2618 | // Make sure the border color is transparent black (the default) |
2619 | if (setAll || oldSamplerState.fBorderColorInvalid) { |
2620 | this->setTextureUnit(unitIdx); |
2621 | static const GrGLfloat kTransparentBlack[4] = {0.f, 0.f, 0.f, 0.f}; |
2622 | GL_CALL(TexParameterfv(target, GR_GL_TEXTURE_BORDER_COLOR, kTransparentBlack)); |
2623 | } |
2624 | } |
2625 | } |
2626 | GrGLTextureParameters::NonsamplerState newNonsamplerState; |
2627 | newNonsamplerState.fBaseMipMapLevel = 0; |
2628 | newNonsamplerState.fMaxMipMapLevel = texture->texturePriv().maxMipMapLevel(); |
2629 | |
2630 | const GrGLTextureParameters::NonsamplerState& oldNonsamplerState = |
2631 | texture->parameters()->nonsamplerState(); |
2632 | if (!this->caps()->shaderCaps()->textureSwizzleAppliedInShader()) { |
2633 | newNonsamplerState.fSwizzleKey = swizzle.asKey(); |
2634 | if (setAll || swizzle.asKey() != oldNonsamplerState.fSwizzleKey) { |
2635 | GrGLenum glValues[4]; |
2636 | get_gl_swizzle_values(swizzle, glValues); |
2637 | this->setTextureUnit(unitIdx); |
2638 | if (GR_IS_GR_GL(this->glStandard())) { |
2639 | static_assert(sizeof(glValues[0]) == sizeof(GrGLint)); |
2640 | GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA, |
2641 | reinterpret_cast<const GrGLint*>(glValues))); |
2642 | } else if (GR_IS_GR_GL_ES(this->glStandard())) { |
2643 | // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA. |
2644 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, glValues[0])); |
2645 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, glValues[1])); |
2646 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, glValues[2])); |
2647 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, glValues[3])); |
2648 | } |
2649 | } |
2650 | } |
2651 | // These are not supported in ES2 contexts |
2652 | if (this->glCaps().mipMapLevelAndLodControlSupport() && |
2653 | (texture->texturePriv().textureType() != GrTextureType::kExternal || |
2654 | !this->glCaps().dontSetBaseOrMaxLevelForExternalTextures())) { |
2655 | if (newNonsamplerState.fBaseMipMapLevel != oldNonsamplerState.fBaseMipMapLevel) { |
2656 | this->setTextureUnit(unitIdx); |
2657 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, |
2658 | newNonsamplerState.fBaseMipMapLevel)); |
2659 | } |
2660 | if (newNonsamplerState.fMaxMipMapLevel != oldNonsamplerState.fMaxMipMapLevel) { |
2661 | this->setTextureUnit(unitIdx); |
2662 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL, |
2663 | newNonsamplerState.fMaxMipMapLevel)); |
2664 | } |
2665 | } |
2666 | texture->parameters()->set(samplerStateToRecord, newNonsamplerState, |
2667 | fResetTimestampForTextureParameters); |
2668 | } |
2669 | |
2670 | void GrGLGpu::onResetTextureBindings() { |
2671 | static constexpr GrGLenum kTargets[] = {GR_GL_TEXTURE_2D, GR_GL_TEXTURE_RECTANGLE, |
2672 | GR_GL_TEXTURE_EXTERNAL}; |
2673 | for (int i = 0; i < this->numTextureUnits(); ++i) { |
2674 | this->setTextureUnit(i); |
2675 | for (auto target : kTargets) { |
2676 | if (fHWTextureUnitBindings[i].hasBeenModified(target)) { |
2677 | GL_CALL(BindTexture(target, 0)); |
2678 | } |
2679 | } |
2680 | fHWTextureUnitBindings[i].invalidateAllTargets(true); |
2681 | } |
2682 | } |
2683 | |
2684 | void GrGLGpu::flushPatchVertexCount(uint8_t count) { |
2685 | SkASSERT(this->caps()->shaderCaps()->tessellationSupport()); |
2686 | if (fHWPatchVertexCount != count) { |
2687 | GL_CALL(PatchParameteri(GR_GL_PATCH_VERTICES, count)); |
2688 | fHWPatchVertexCount = count; |
2689 | } |
2690 | } |
2691 | |
2692 | void GrGLGpu::flushColorWrite(bool writeColor) { |
2693 | if (!writeColor) { |
2694 | if (kNo_TriState != fHWWriteToColor) { |
2695 | GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE, |
2696 | GR_GL_FALSE, GR_GL_FALSE)); |
2697 | fHWWriteToColor = kNo_TriState; |
2698 | } |
2699 | } else { |
2700 | if (kYes_TriState != fHWWriteToColor) { |
2701 | GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); |
2702 | fHWWriteToColor = kYes_TriState; |
2703 | } |
2704 | } |
2705 | } |
2706 | |
2707 | void GrGLGpu::flushClearColor(const SkPMColor4f& color) { |
2708 | GrGLfloat r = color.fR, g = color.fG, b = color.fB, a = color.fA; |
2709 | if (this->glCaps().clearToBoundaryValuesIsBroken() && |
2710 | (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) { |
2711 | static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f); |
2712 | static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f); |
2713 | a = (1 == a) ? safeAlpha1 : safeAlpha0; |
2714 | } |
2715 | if (r != fHWClearColor[0] || g != fHWClearColor[1] || |
2716 | b != fHWClearColor[2] || a != fHWClearColor[3]) { |
2717 | GL_CALL(ClearColor(r, g, b, a)); |
2718 | fHWClearColor[0] = r; |
2719 | fHWClearColor[1] = g; |
2720 | fHWClearColor[2] = b; |
2721 | fHWClearColor[3] = a; |
2722 | } |
2723 | } |
2724 | |
2725 | void GrGLGpu::setTextureUnit(int unit) { |
2726 | SkASSERT(unit >= 0 && unit < this->numTextureUnits()); |
2727 | if (unit != fHWActiveTextureUnitIdx) { |
2728 | GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); |
2729 | fHWActiveTextureUnitIdx = unit; |
2730 | } |
2731 | } |
2732 | |
2733 | void GrGLGpu::bindTextureToScratchUnit(GrGLenum target, GrGLint textureID) { |
2734 | // Bind the last texture unit since it is the least likely to be used by GrGLProgram. |
2735 | int lastUnitIdx = this->numTextureUnits() - 1; |
2736 | if (lastUnitIdx != fHWActiveTextureUnitIdx) { |
2737 | GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); |
2738 | fHWActiveTextureUnitIdx = lastUnitIdx; |
2739 | } |
2740 | // Clear out the this field so that if a GrGLProgram does use this unit it will rebind the |
2741 | // correct texture. |
2742 | fHWTextureUnitBindings[lastUnitIdx].invalidateForScratchUse(target); |
2743 | GL_CALL(BindTexture(target, textureID)); |
2744 | } |
2745 | |
2746 | // Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface. |
2747 | static inline bool can_blit_framebuffer_for_copy_surface(const GrSurface* dst, |
2748 | const GrSurface* src, |
2749 | const SkIRect& srcRect, |
2750 | const SkIPoint& dstPoint, |
2751 | const GrGLCaps& caps) { |
2752 | int dstSampleCnt = 0; |
2753 | int srcSampleCnt = 0; |
2754 | if (const GrRenderTarget* rt = dst->asRenderTarget()) { |
2755 | dstSampleCnt = rt->numSamples(); |
2756 | } |
2757 | if (const GrRenderTarget* rt = src->asRenderTarget()) { |
2758 | srcSampleCnt = rt->numSamples(); |
2759 | } |
2760 | SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTarget())); |
2761 | SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTarget())); |
2762 | |
2763 | GrGLFormat dstFormat = dst->backendFormat().asGLFormat(); |
2764 | GrGLFormat srcFormat = src->backendFormat().asGLFormat(); |
2765 | |
2766 | const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture()); |
2767 | const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture()); |
2768 | |
2769 | GrTextureType dstTexType; |
2770 | GrTextureType* dstTexTypePtr = nullptr; |
2771 | GrTextureType srcTexType; |
2772 | GrTextureType* srcTexTypePtr = nullptr; |
2773 | if (dstTex) { |
2774 | dstTexType = dstTex->texturePriv().textureType(); |
2775 | dstTexTypePtr = &dstTexType; |
2776 | } |
2777 | if (srcTex) { |
2778 | srcTexType = srcTex->texturePriv().textureType(); |
2779 | srcTexTypePtr = &srcTexType; |
2780 | } |
2781 | |
2782 | return caps.canCopyAsBlit(dstFormat, dstSampleCnt, dstTexTypePtr, |
2783 | srcFormat, srcSampleCnt, srcTexTypePtr, |
2784 | src->getBoundsRect(), true, srcRect, dstPoint); |
2785 | } |
2786 | |
2787 | static bool rt_has_msaa_render_buffer(const GrGLRenderTarget* rt, const GrGLCaps& glCaps) { |
2788 | // A RT has a separate MSAA renderbuffer if: |
2789 | // 1) It's multisampled |
2790 | // 2) We're using an extension with separate MSAA renderbuffers |
2791 | // 3) It's not FBO 0, which is special and always auto-resolves |
2792 | return rt->numSamples() > 1 && glCaps.usesMSAARenderBuffers() && rt->renderFBOID() != 0; |
2793 | } |
2794 | |
2795 | static inline bool can_copy_texsubimage(const GrSurface* dst, const GrSurface* src, |
2796 | const GrGLCaps& caps) { |
2797 | |
2798 | const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget()); |
2799 | const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); |
2800 | const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture()); |
2801 | const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture()); |
2802 | |
2803 | bool dstHasMSAARenderBuffer = dstRT ? rt_has_msaa_render_buffer(dstRT, caps) : false; |
2804 | bool srcHasMSAARenderBuffer = srcRT ? rt_has_msaa_render_buffer(srcRT, caps) : false; |
2805 | |
2806 | GrGLFormat dstFormat = dst->backendFormat().asGLFormat(); |
2807 | GrGLFormat srcFormat = src->backendFormat().asGLFormat(); |
2808 | |
2809 | GrTextureType dstTexType; |
2810 | GrTextureType* dstTexTypePtr = nullptr; |
2811 | GrTextureType srcTexType; |
2812 | GrTextureType* srcTexTypePtr = nullptr; |
2813 | if (dstTex) { |
2814 | dstTexType = dstTex->texturePriv().textureType(); |
2815 | dstTexTypePtr = &dstTexType; |
2816 | } |
2817 | if (srcTex) { |
2818 | srcTexType = srcTex->texturePriv().textureType(); |
2819 | srcTexTypePtr = &srcTexType; |
2820 | } |
2821 | |
2822 | return caps.canCopyTexSubImage(dstFormat, dstHasMSAARenderBuffer, dstTexTypePtr, |
2823 | srcFormat, srcHasMSAARenderBuffer, srcTexTypePtr); |
2824 | } |
2825 | |
2826 | // If a temporary FBO was created, its non-zero ID is returned. |
2827 | void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget, |
2828 | TempFBOTarget tempFBOTarget) { |
2829 | GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); |
2830 | if (!rt || mipLevel > 0) { |
2831 | SkASSERT(surface->asTexture()); |
2832 | GrGLTexture* texture = static_cast<GrGLTexture*>(surface->asTexture()); |
2833 | GrGLuint texID = texture->textureID(); |
2834 | GrGLenum target = texture->target(); |
2835 | GrGLuint* tempFBOID; |
2836 | tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID; |
2837 | |
2838 | if (0 == *tempFBOID) { |
2839 | GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID)); |
2840 | } |
2841 | |
2842 | this->bindFramebuffer(fboTarget, *tempFBOID); |
2843 | GR_GL_CALL( |
2844 | this->glInterface(), |
2845 | FramebufferTexture2D(fboTarget, GR_GL_COLOR_ATTACHMENT0, target, texID, mipLevel)); |
2846 | if (mipLevel == 0) { |
2847 | texture->baseLevelWasBoundToFBO(); |
2848 | } |
2849 | } else { |
2850 | this->bindFramebuffer(fboTarget, rt->renderFBOID()); |
2851 | } |
2852 | } |
2853 | |
2854 | void GrGLGpu::unbindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget) { |
2855 | // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to |
2856 | if (mipLevel > 0 || !surface->asRenderTarget()) { |
2857 | SkASSERT(surface->asTexture()); |
2858 | GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target(); |
2859 | GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, |
2860 | GR_GL_COLOR_ATTACHMENT0, |
2861 | textureTarget, |
2862 | 0, |
2863 | 0)); |
2864 | } |
2865 | } |
2866 | |
2867 | void GrGLGpu::onFBOChanged() { |
2868 | if (this->caps()->workarounds().flush_on_framebuffer_change || |
2869 | this->caps()->workarounds().restore_scissor_on_fbo_change) { |
2870 | GL_CALL(Flush()); |
2871 | } |
2872 | #ifdef SK_DEBUG |
2873 | if (fIsExecutingCommandBuffer_DebugOnly) { |
2874 | SkDebugf("WARNING: GL FBO binding changed while executing a command buffer. " |
2875 | "This will severely hurt performance.\n" ); |
2876 | } |
2877 | #endif |
2878 | } |
2879 | |
2880 | void GrGLGpu::bindFramebuffer(GrGLenum target, GrGLuint fboid) { |
2881 | fStats.incRenderTargetBinds(); |
2882 | GL_CALL(BindFramebuffer(target, fboid)); |
2883 | if (target == GR_GL_FRAMEBUFFER || target == GR_GL_DRAW_FRAMEBUFFER) { |
2884 | fBoundDrawFramebuffer = fboid; |
2885 | } |
2886 | |
2887 | if (this->caps()->workarounds().restore_scissor_on_fbo_change) { |
2888 | // The driver forgets the correct scissor when modifying the FBO binding. |
2889 | if (!fHWScissorSettings.fRect.isInvalid()) { |
2890 | const GrNativeRect& r = fHWScissorSettings.fRect; |
2891 | GL_CALL(Scissor(r.fX, r.fY, r.fWidth, r.fHeight)); |
2892 | } |
2893 | } |
2894 | |
2895 | this->onFBOChanged(); |
2896 | } |
2897 | |
2898 | void GrGLGpu::deleteFramebuffer(GrGLuint fboid) { |
2899 | // We're relying on the GL state shadowing being correct in the workaround code below so we |
2900 | // need to handle a dirty context. |
2901 | this->handleDirtyContext(); |
2902 | if (fboid == fBoundDrawFramebuffer && |
2903 | this->caps()->workarounds().unbind_attachments_on_bound_render_fbo_delete) { |
2904 | // This workaround only applies to deleting currently bound framebuffers |
2905 | // on Adreno 420. Because this is a somewhat rare case, instead of |
2906 | // tracking all the attachments of every framebuffer instead just always |
2907 | // unbind all attachments. |
2908 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, |
2909 | GR_GL_RENDERBUFFER, 0)); |
2910 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, |
2911 | GR_GL_RENDERBUFFER, 0)); |
2912 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, |
2913 | GR_GL_RENDERBUFFER, 0)); |
2914 | } |
2915 | |
2916 | GL_CALL(DeleteFramebuffers(1, &fboid)); |
2917 | |
2918 | // Deleting the currently bound framebuffer rebinds to 0. |
2919 | if (fboid == fBoundDrawFramebuffer) { |
2920 | this->onFBOChanged(); |
2921 | } |
2922 | } |
2923 | |
2924 | bool GrGLGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
2925 | const SkIPoint& dstPoint) { |
2926 | // Don't prefer copying as a draw if the dst doesn't already have a FBO object. |
2927 | // This implicitly handles this->glCaps().useDrawInsteadOfAllRenderTargetWrites(). |
2928 | bool preferCopy = SkToBool(dst->asRenderTarget()); |
2929 | auto dstFormat = dst->backendFormat().asGLFormat(); |
2930 | if (preferCopy && this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()))) { |
2931 | if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) { |
2932 | return true; |
2933 | } |
2934 | } |
2935 | |
2936 | if (can_copy_texsubimage(dst, src, this->glCaps())) { |
2937 | this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint); |
2938 | return true; |
2939 | } |
2940 | |
2941 | if (can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this->glCaps())) { |
2942 | return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint); |
2943 | } |
2944 | |
2945 | if (!preferCopy && this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()))) { |
2946 | if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) { |
2947 | return true; |
2948 | } |
2949 | } |
2950 | |
2951 | return false; |
2952 | } |
2953 | |
2954 | bool GrGLGpu::createCopyProgram(GrTexture* srcTex) { |
2955 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
2956 | |
2957 | int progIdx = TextureToCopyProgramIdx(srcTex); |
2958 | const GrShaderCaps* shaderCaps = this->caps()->shaderCaps(); |
2959 | GrSLType samplerType = |
2960 | GrSLCombinedSamplerTypeForTextureType(srcTex->texturePriv().textureType()); |
2961 | |
2962 | if (!fCopyProgramArrayBuffer) { |
2963 | static const GrGLfloat vdata[] = { |
2964 | 0, 0, |
2965 | 0, 1, |
2966 | 1, 0, |
2967 | 1, 1 |
2968 | }; |
2969 | fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex, |
2970 | kStatic_GrAccessPattern, vdata); |
2971 | } |
2972 | if (!fCopyProgramArrayBuffer) { |
2973 | return false; |
2974 | } |
2975 | |
2976 | SkASSERT(!fCopyPrograms[progIdx].fProgram); |
2977 | GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram()); |
2978 | if (!fCopyPrograms[progIdx].fProgram) { |
2979 | return false; |
2980 | } |
2981 | |
2982 | GrShaderVar aVertex("a_vertex" , kHalf2_GrSLType, GrShaderVar::TypeModifier::In); |
2983 | GrShaderVar uTexCoordXform("u_texCoordXform" , kHalf4_GrSLType, |
2984 | GrShaderVar::TypeModifier::Uniform); |
2985 | GrShaderVar uPosXform("u_posXform" , kHalf4_GrSLType, GrShaderVar::TypeModifier::Uniform); |
2986 | GrShaderVar uTexture("u_texture" , samplerType, GrShaderVar::TypeModifier::Uniform); |
2987 | GrShaderVar vTexCoord("v_texCoord" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out); |
2988 | GrShaderVar oFragColor("o_FragColor" , kHalf4_GrSLType, GrShaderVar::TypeModifier::Out); |
2989 | |
2990 | SkString vshaderTxt; |
2991 | if (shaderCaps->noperspectiveInterpolationSupport()) { |
2992 | if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { |
2993 | vshaderTxt.appendf("#extension %s : require\n" , extension); |
2994 | } |
2995 | vTexCoord.addModifier("noperspective" ); |
2996 | } |
2997 | |
2998 | aVertex.appendDecl(shaderCaps, &vshaderTxt); |
2999 | vshaderTxt.append(";" ); |
3000 | uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt); |
3001 | vshaderTxt.append(";" ); |
3002 | uPosXform.appendDecl(shaderCaps, &vshaderTxt); |
3003 | vshaderTxt.append(";" ); |
3004 | vTexCoord.appendDecl(shaderCaps, &vshaderTxt); |
3005 | vshaderTxt.append(";" ); |
3006 | |
3007 | vshaderTxt.append( |
3008 | "// Copy Program VS\n" |
3009 | "void main() {" |
3010 | " v_texCoord = half2(a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw);" |
3011 | " sk_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;" |
3012 | " sk_Position.zw = half2(0, 1);" |
3013 | "}" |
3014 | ); |
3015 | |
3016 | SkString fshaderTxt; |
3017 | if (shaderCaps->noperspectiveInterpolationSupport()) { |
3018 | if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { |
3019 | fshaderTxt.appendf("#extension %s : require\n" , extension); |
3020 | } |
3021 | } |
3022 | vTexCoord.setTypeModifier(GrShaderVar::TypeModifier::In); |
3023 | vTexCoord.appendDecl(shaderCaps, &fshaderTxt); |
3024 | fshaderTxt.append(";" ); |
3025 | uTexture.appendDecl(shaderCaps, &fshaderTxt); |
3026 | fshaderTxt.append(";" ); |
3027 | fshaderTxt.appendf( |
3028 | "// Copy Program FS\n" |
3029 | "void main() {" |
3030 | " sk_FragColor = sample(u_texture, v_texCoord);" |
3031 | "}" |
3032 | ); |
3033 | |
3034 | auto errorHandler = this->getContext()->priv().getShaderErrorHandler(); |
3035 | SkSL::String sksl(vshaderTxt.c_str(), vshaderTxt.size()); |
3036 | SkSL::Program::Settings settings; |
3037 | settings.fCaps = shaderCaps; |
3038 | SkSL::String glsl; |
3039 | std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kVertex_Kind, |
3040 | sksl, settings, &glsl, errorHandler); |
3041 | GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram, |
3042 | GR_GL_VERTEX_SHADER, glsl, &fStats, errorHandler); |
3043 | SkASSERT(program->fInputs.isEmpty()); |
3044 | |
3045 | sksl.assign(fshaderTxt.c_str(), fshaderTxt.size()); |
3046 | program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kFragment_Kind, sksl, settings, &glsl, |
3047 | errorHandler); |
3048 | GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram, |
3049 | GR_GL_FRAGMENT_SHADER, glsl, &fStats, |
3050 | errorHandler); |
3051 | SkASSERT(program->fInputs.isEmpty()); |
3052 | |
3053 | GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram)); |
3054 | |
3055 | GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform, |
3056 | GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture" )); |
3057 | GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform, |
3058 | GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform" )); |
3059 | GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform, |
3060 | GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform" )); |
3061 | |
3062 | GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex" )); |
3063 | |
3064 | GL_CALL(DeleteShader(vshader)); |
3065 | GL_CALL(DeleteShader(fshader)); |
3066 | |
3067 | return true; |
3068 | } |
3069 | |
3070 | bool GrGLGpu::createMipmapProgram(int progIdx) { |
3071 | const bool oddWidth = SkToBool(progIdx & 0x2); |
3072 | const bool oddHeight = SkToBool(progIdx & 0x1); |
3073 | const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1); |
3074 | |
3075 | const GrShaderCaps* shaderCaps = this->caps()->shaderCaps(); |
3076 | |
3077 | SkASSERT(!fMipmapPrograms[progIdx].fProgram); |
3078 | GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram()); |
3079 | if (!fMipmapPrograms[progIdx].fProgram) { |
3080 | return false; |
3081 | } |
3082 | |
3083 | GrShaderVar aVertex("a_vertex" , kHalf2_GrSLType, GrShaderVar::TypeModifier::In); |
3084 | GrShaderVar uTexCoordXform("u_texCoordXform" , kHalf4_GrSLType, |
3085 | GrShaderVar::TypeModifier::Uniform); |
3086 | GrShaderVar uTexture("u_texture" , kTexture2DSampler_GrSLType, |
3087 | GrShaderVar::TypeModifier::Uniform); |
3088 | // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension): |
3089 | GrShaderVar vTexCoords[] = { |
3090 | GrShaderVar("v_texCoord0" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out), |
3091 | GrShaderVar("v_texCoord1" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out), |
3092 | GrShaderVar("v_texCoord2" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out), |
3093 | GrShaderVar("v_texCoord3" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out), |
3094 | }; |
3095 | GrShaderVar oFragColor("o_FragColor" , kHalf4_GrSLType,GrShaderVar::TypeModifier::Out); |
3096 | |
3097 | SkString vshaderTxt; |
3098 | if (shaderCaps->noperspectiveInterpolationSupport()) { |
3099 | if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { |
3100 | vshaderTxt.appendf("#extension %s : require\n" , extension); |
3101 | } |
3102 | vTexCoords[0].addModifier("noperspective" ); |
3103 | vTexCoords[1].addModifier("noperspective" ); |
3104 | vTexCoords[2].addModifier("noperspective" ); |
3105 | vTexCoords[3].addModifier("noperspective" ); |
3106 | } |
3107 | |
3108 | aVertex.appendDecl(shaderCaps, &vshaderTxt); |
3109 | vshaderTxt.append(";" ); |
3110 | uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt); |
3111 | vshaderTxt.append(";" ); |
3112 | for (int i = 0; i < numTaps; ++i) { |
3113 | vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt); |
3114 | vshaderTxt.append(";" ); |
3115 | } |
3116 | |
3117 | vshaderTxt.append( |
3118 | "// Mipmap Program VS\n" |
3119 | "void main() {" |
3120 | " sk_Position.xy = a_vertex * half2(2, 2) - half2(1, 1);" |
3121 | " sk_Position.zw = half2(0, 1);" |
3122 | ); |
3123 | |
3124 | // Insert texture coordinate computation: |
3125 | if (oddWidth && oddHeight) { |
3126 | vshaderTxt.append( |
3127 | " v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;" |
3128 | " v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + half2(u_texCoordXform.x, 0);" |
3129 | " v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + half2(0, u_texCoordXform.z);" |
3130 | " v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;" |
3131 | ); |
3132 | } else if (oddWidth) { |
3133 | vshaderTxt.append( |
3134 | " v_texCoord0 = a_vertex.xy * half2(u_texCoordXform.y, 1);" |
3135 | " v_texCoord1 = a_vertex.xy * half2(u_texCoordXform.y, 1) + half2(u_texCoordXform.x, 0);" |
3136 | ); |
3137 | } else if (oddHeight) { |
3138 | vshaderTxt.append( |
3139 | " v_texCoord0 = a_vertex.xy * half2(1, u_texCoordXform.w);" |
3140 | " v_texCoord1 = a_vertex.xy * half2(1, u_texCoordXform.w) + half2(0, u_texCoordXform.z);" |
3141 | ); |
3142 | } else { |
3143 | vshaderTxt.append( |
3144 | " v_texCoord0 = a_vertex.xy;" |
3145 | ); |
3146 | } |
3147 | |
3148 | vshaderTxt.append("}" ); |
3149 | |
3150 | SkString fshaderTxt; |
3151 | if (shaderCaps->noperspectiveInterpolationSupport()) { |
3152 | if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { |
3153 | fshaderTxt.appendf("#extension %s : require\n" , extension); |
3154 | } |
3155 | } |
3156 | for (int i = 0; i < numTaps; ++i) { |
3157 | vTexCoords[i].setTypeModifier(GrShaderVar::TypeModifier::In); |
3158 | vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt); |
3159 | fshaderTxt.append(";" ); |
3160 | } |
3161 | uTexture.appendDecl(shaderCaps, &fshaderTxt); |
3162 | fshaderTxt.append(";" ); |
3163 | fshaderTxt.append( |
3164 | "// Mipmap Program FS\n" |
3165 | "void main() {" |
3166 | ); |
3167 | |
3168 | if (oddWidth && oddHeight) { |
3169 | fshaderTxt.append( |
3170 | " sk_FragColor = (sample(u_texture, v_texCoord0) + " |
3171 | " sample(u_texture, v_texCoord1) + " |
3172 | " sample(u_texture, v_texCoord2) + " |
3173 | " sample(u_texture, v_texCoord3)) * 0.25;" |
3174 | ); |
3175 | } else if (oddWidth || oddHeight) { |
3176 | fshaderTxt.append( |
3177 | " sk_FragColor = (sample(u_texture, v_texCoord0) + " |
3178 | " sample(u_texture, v_texCoord1)) * 0.5;" |
3179 | ); |
3180 | } else { |
3181 | fshaderTxt.append( |
3182 | " sk_FragColor = sample(u_texture, v_texCoord0);" |
3183 | ); |
3184 | } |
3185 | |
3186 | fshaderTxt.append("}" ); |
3187 | |
3188 | auto errorHandler = this->getContext()->priv().getShaderErrorHandler(); |
3189 | SkSL::String sksl(vshaderTxt.c_str(), vshaderTxt.size()); |
3190 | SkSL::Program::Settings settings; |
3191 | settings.fCaps = shaderCaps; |
3192 | SkSL::String glsl; |
3193 | std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kVertex_Kind, |
3194 | sksl, settings, &glsl, errorHandler); |
3195 | GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram, |
3196 | GR_GL_VERTEX_SHADER, glsl, &fStats, errorHandler); |
3197 | SkASSERT(program->fInputs.isEmpty()); |
3198 | |
3199 | sksl.assign(fshaderTxt.c_str(), fshaderTxt.size()); |
3200 | program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kFragment_Kind, sksl, settings, &glsl, |
3201 | errorHandler); |
3202 | GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram, |
3203 | GR_GL_FRAGMENT_SHADER, glsl, &fStats, |
3204 | errorHandler); |
3205 | SkASSERT(program->fInputs.isEmpty()); |
3206 | |
3207 | GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram)); |
3208 | |
3209 | GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform, |
3210 | GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture" )); |
3211 | GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform, |
3212 | GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform" )); |
3213 | |
3214 | GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex" )); |
3215 | |
3216 | GL_CALL(DeleteShader(vshader)); |
3217 | GL_CALL(DeleteShader(fshader)); |
3218 | |
3219 | return true; |
3220 | } |
3221 | |
3222 | bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
3223 | const SkIPoint& dstPoint) { |
3224 | auto* srcTex = static_cast<GrGLTexture*>(src->asTexture()); |
3225 | auto* dstTex = static_cast<GrGLTexture*>(src->asTexture()); |
3226 | auto* dstRT = static_cast<GrGLRenderTarget*>(src->asRenderTarget()); |
3227 | if (!srcTex) { |
3228 | return false; |
3229 | } |
3230 | int progIdx = TextureToCopyProgramIdx(srcTex); |
3231 | if (!dstRT) { |
3232 | SkASSERT(dstTex); |
3233 | if (!this->glCaps().isFormatRenderable(dstTex->format(), 1)) { |
3234 | return false; |
3235 | } |
3236 | } |
3237 | if (!fCopyPrograms[progIdx].fProgram) { |
3238 | if (!this->createCopyProgram(srcTex)) { |
3239 | SkDebugf("Failed to create copy program.\n" ); |
3240 | return false; |
3241 | } |
3242 | } |
3243 | int w = srcRect.width(); |
3244 | int h = srcRect.height(); |
3245 | // We don't swizzle at all in our copies. |
3246 | this->bindTexture(0, GrSamplerState::Filter::kNearest, GrSwizzle::RGBA(), srcTex); |
3247 | this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER, kDst_TempFBOTarget); |
3248 | this->flushViewport(dst->width(), dst->height()); |
3249 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
3250 | SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h); |
3251 | this->flushProgram(fCopyPrograms[progIdx].fProgram); |
3252 | fHWVertexArrayState.setVertexArrayID(this, 0); |
3253 | GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this); |
3254 | attribs->enableVertexArrays(this, 1); |
3255 | attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kFloat2_GrVertexAttribType, |
3256 | kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0); |
3257 | // dst rect edges in NDC (-1 to 1) |
3258 | int dw = dst->width(); |
3259 | int dh = dst->height(); |
3260 | GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f; |
3261 | GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f; |
3262 | GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f; |
3263 | GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f; |
3264 | GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft; |
3265 | GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w); |
3266 | GrGLfloat sy0 = (GrGLfloat)srcRect.fTop; |
3267 | GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h); |
3268 | int sw = src->width(); |
3269 | int sh = src->height(); |
3270 | if (srcTex->texturePriv().textureType() != GrTextureType::kRectangle) { |
3271 | // src rect edges in normalized texture space (0 to 1) |
3272 | sx0 /= sw; |
3273 | sx1 /= sw; |
3274 | sy0 /= sh; |
3275 | sy1 /= sh; |
3276 | } |
3277 | GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0)); |
3278 | GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform, |
3279 | sx1 - sx0, sy1 - sy0, sx0, sy0)); |
3280 | GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0)); |
3281 | this->flushBlendAndColorWrite(GrXferProcessor::BlendInfo(), GrSwizzle::RGBA()); |
3282 | this->flushHWAAState(nullptr, false); |
3283 | this->flushConservativeRasterState(false); |
3284 | this->flushWireframeState(false); |
3285 | this->flushScissorTest(GrScissorTest::kDisabled); |
3286 | this->disableWindowRectangles(); |
3287 | this->disableStencil(); |
3288 | if (this->glCaps().srgbWriteControl()) { |
3289 | this->flushFramebufferSRGB(true); |
3290 | } |
3291 | GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4)); |
3292 | this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER); |
3293 | // The rect is already in device space so we pass in kTopLeft so no flip is done. |
3294 | this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect); |
3295 | return true; |
3296 | } |
3297 | |
3298 | void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
3299 | const SkIPoint& dstPoint) { |
3300 | SkASSERT(can_copy_texsubimage(dst, src, this->glCaps())); |
3301 | this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget); |
3302 | GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture()); |
3303 | SkASSERT(dstTex); |
3304 | // We modified the bound FBO |
3305 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
3306 | |
3307 | this->bindTextureToScratchUnit(dstTex->target(), dstTex->textureID()); |
3308 | GL_CALL(CopyTexSubImage2D(dstTex->target(), 0, |
3309 | dstPoint.fX, dstPoint.fY, |
3310 | srcRect.fLeft, srcRect.fTop, |
3311 | srcRect.width(), srcRect.height())); |
3312 | this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER); |
3313 | SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, |
3314 | srcRect.width(), srcRect.height()); |
3315 | // The rect is already in device space so we pass in kTopLeft so no flip is done. |
3316 | this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect); |
3317 | } |
3318 | |
3319 | bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
3320 | const SkIPoint& dstPoint) { |
3321 | SkASSERT(can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this->glCaps())); |
3322 | SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, |
3323 | srcRect.width(), srcRect.height()); |
3324 | if (dst == src) { |
3325 | if (SkIRect::Intersects(dstRect, srcRect)) { |
3326 | return false; |
3327 | } |
3328 | } |
3329 | |
3330 | this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER, kDst_TempFBOTarget); |
3331 | this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER, kSrc_TempFBOTarget); |
3332 | // We modified the bound FBO |
3333 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
3334 | |
3335 | // BlitFrameBuffer respects the scissor, so disable it. |
3336 | this->flushScissorTest(GrScissorTest::kDisabled); |
3337 | this->disableWindowRectangles(); |
3338 | |
3339 | GL_CALL(BlitFramebuffer(srcRect.fLeft, |
3340 | srcRect.fTop, |
3341 | srcRect.fRight, |
3342 | srcRect.fBottom, |
3343 | dstRect.fLeft, |
3344 | dstRect.fTop, |
3345 | dstRect.fRight, |
3346 | dstRect.fBottom, |
3347 | GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); |
3348 | this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER); |
3349 | this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER); |
3350 | |
3351 | // The rect is already in device space so we pass in kTopLeft so no flip is done. |
3352 | this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect); |
3353 | return true; |
3354 | } |
3355 | |
3356 | bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) { |
3357 | auto glTex = static_cast<GrGLTexture*>(texture); |
3358 | // Mipmaps are only supported on 2D textures: |
3359 | if (GR_GL_TEXTURE_2D != glTex->target()) { |
3360 | return false; |
3361 | } |
3362 | GrGLFormat format = glTex->format(); |
3363 | // Manual implementation of mipmap generation, to work around driver bugs w/sRGB. |
3364 | // Uses draw calls to do a series of downsample operations to successive mips. |
3365 | |
3366 | // The manual approach requires the ability to limit which level we're sampling and that the |
3367 | // destination can be bound to a FBO: |
3368 | if (!this->glCaps().doManualMipmapping() || !this->glCaps().isFormatRenderable(format, 1)) { |
3369 | GrGLenum target = glTex->target(); |
3370 | this->bindTextureToScratchUnit(target, glTex->textureID()); |
3371 | GL_CALL(GenerateMipmap(glTex->target())); |
3372 | return true; |
3373 | } |
3374 | |
3375 | int width = texture->width(); |
3376 | int height = texture->height(); |
3377 | int levelCount = SkMipMap::ComputeLevelCount(width, height) + 1; |
3378 | SkASSERT(levelCount == texture->texturePriv().maxMipMapLevel() + 1); |
3379 | |
3380 | // Create (if necessary), then bind temporary FBO: |
3381 | if (0 == fTempDstFBOID) { |
3382 | GL_CALL(GenFramebuffers(1, &fTempDstFBOID)); |
3383 | } |
3384 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID); |
3385 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
3386 | |
3387 | // Bind the texture, to get things configured for filtering. |
3388 | // We'll be changing our base level further below: |
3389 | this->setTextureUnit(0); |
3390 | // The mipmap program does not do any swizzling. |
3391 | this->bindTexture(0, GrSamplerState::Filter::kBilerp, GrSwizzle::RGBA(), glTex); |
3392 | |
3393 | // Vertex data: |
3394 | if (!fMipmapProgramArrayBuffer) { |
3395 | static const GrGLfloat vdata[] = { |
3396 | 0, 0, |
3397 | 0, 1, |
3398 | 1, 0, |
3399 | 1, 1 |
3400 | }; |
3401 | fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex, |
3402 | kStatic_GrAccessPattern, vdata); |
3403 | } |
3404 | if (!fMipmapProgramArrayBuffer) { |
3405 | return false; |
3406 | } |
3407 | |
3408 | fHWVertexArrayState.setVertexArrayID(this, 0); |
3409 | |
3410 | GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this); |
3411 | attribs->enableVertexArrays(this, 1); |
3412 | attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kFloat2_GrVertexAttribType, |
3413 | kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0); |
3414 | |
3415 | // Set "simple" state once: |
3416 | this->flushBlendAndColorWrite(GrXferProcessor::BlendInfo(), GrSwizzle::RGBA()); |
3417 | this->flushHWAAState(nullptr, false); |
3418 | this->flushScissorTest(GrScissorTest::kDisabled); |
3419 | this->disableWindowRectangles(); |
3420 | this->disableStencil(); |
3421 | |
3422 | // Do all the blits: |
3423 | width = texture->width(); |
3424 | height = texture->height(); |
3425 | |
3426 | for (GrGLint level = 1; level < levelCount; ++level) { |
3427 | // Get and bind the program for this particular downsample (filter shape can vary): |
3428 | int progIdx = TextureSizeToMipmapProgramIdx(width, height); |
3429 | if (!fMipmapPrograms[progIdx].fProgram) { |
3430 | if (!this->createMipmapProgram(progIdx)) { |
3431 | SkDebugf("Failed to create mipmap program.\n" ); |
3432 | // Invalidate all params to cover base level change in a previous iteration. |
3433 | glTex->textureParamsModified(); |
3434 | return false; |
3435 | } |
3436 | } |
3437 | this->flushProgram(fMipmapPrograms[progIdx].fProgram); |
3438 | |
3439 | // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h) |
3440 | const float invWidth = 1.0f / width; |
3441 | const float invHeight = 1.0f / height; |
3442 | GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform, |
3443 | invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight)); |
3444 | GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0)); |
3445 | |
3446 | // Only sample from previous mip |
3447 | GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1)); |
3448 | |
3449 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D, |
3450 | glTex->textureID(), level)); |
3451 | |
3452 | width = std::max(1, width / 2); |
3453 | height = std::max(1, height / 2); |
3454 | this->flushViewport(width, height); |
3455 | |
3456 | GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4)); |
3457 | } |
3458 | |
3459 | // Unbind: |
3460 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, |
3461 | GR_GL_TEXTURE_2D, 0, 0)); |
3462 | |
3463 | // We modified the base level param. |
3464 | GrGLTextureParameters::NonsamplerState nonsamplerState = glTex->parameters()->nonsamplerState(); |
3465 | // We drew the 2nd to last level into the last level. |
3466 | nonsamplerState.fBaseMipMapLevel = levelCount - 2; |
3467 | glTex->parameters()->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters); |
3468 | |
3469 | return true; |
3470 | } |
3471 | |
3472 | void GrGLGpu::querySampleLocations( |
3473 | GrRenderTarget* renderTarget, SkTArray<SkPoint>* sampleLocations) { |
3474 | this->flushRenderTargetNoColorWrites(static_cast<GrGLRenderTarget*>(renderTarget)); |
3475 | |
3476 | int effectiveSampleCnt; |
3477 | GR_GL_GetIntegerv(this->glInterface(), GR_GL_SAMPLES, &effectiveSampleCnt); |
3478 | SkASSERT(effectiveSampleCnt >= renderTarget->numSamples()); |
3479 | |
3480 | sampleLocations->reset(effectiveSampleCnt); |
3481 | for (int i = 0; i < effectiveSampleCnt; ++i) { |
3482 | GL_CALL(GetMultisamplefv(GR_GL_SAMPLE_POSITION, i, &(*sampleLocations)[i].fX)); |
3483 | } |
3484 | } |
3485 | |
3486 | void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) { |
3487 | SkASSERT(type); |
3488 | switch (type) { |
3489 | case kTexture_GrXferBarrierType: { |
3490 | GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt); |
3491 | SkASSERT(glrt->textureFBOID() != 0 && glrt->renderFBOID() != 0); |
3492 | if (glrt->textureFBOID() != glrt->renderFBOID()) { |
3493 | // The render target uses separate storage so no need for glTextureBarrier. |
3494 | // FIXME: The render target will resolve automatically when its texture is bound, |
3495 | // but we could resolve only the bounds that will be read if we do it here instead. |
3496 | return; |
3497 | } |
3498 | SkASSERT(this->caps()->textureBarrierSupport()); |
3499 | GL_CALL(TextureBarrier()); |
3500 | return; |
3501 | } |
3502 | case kBlend_GrXferBarrierType: |
3503 | SkASSERT(GrCaps::kAdvanced_BlendEquationSupport == |
3504 | this->caps()->blendEquationSupport()); |
3505 | GL_CALL(BlendBarrier()); |
3506 | return; |
3507 | default: break; // placate compiler warnings that kNone not handled |
3508 | } |
3509 | } |
3510 | |
3511 | void GrGLGpu::insertManualFramebufferBarrier() { |
3512 | SkASSERT(this->caps()->requiresManualFBBarrierAfterTessellatedStencilDraw()); |
3513 | GL_CALL(MemoryBarrier(GR_GL_FRAMEBUFFER_BARRIER_BIT)); |
3514 | } |
3515 | |
3516 | GrBackendTexture GrGLGpu::onCreateBackendTexture(SkISize dimensions, |
3517 | const GrBackendFormat& format, |
3518 | GrRenderable renderable, |
3519 | GrMipMapped mipMapped, |
3520 | GrProtected isProtected, |
3521 | const BackendTextureData* data) { |
3522 | // We don't support protected textures in GL. |
3523 | if (isProtected == GrProtected::kYes) { |
3524 | return {}; |
3525 | } |
3526 | |
3527 | this->handleDirtyContext(); |
3528 | |
3529 | GrGLFormat glFormat = format.asGLFormat(); |
3530 | if (glFormat == GrGLFormat::kUnknown) { |
3531 | return {}; |
3532 | } |
3533 | |
3534 | int numMipLevels = 1; |
3535 | if (mipMapped == GrMipMapped::kYes) { |
3536 | numMipLevels = SkMipMap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1; |
3537 | } |
3538 | |
3539 | // Compressed formats go through onCreateCompressedBackendTexture |
3540 | SkASSERT(!GrGLFormatIsCompressed(glFormat)); |
3541 | |
3542 | GrGLTextureInfo info; |
3543 | GrGLTextureParameters::SamplerOverriddenState initialState; |
3544 | |
3545 | if (glFormat == GrGLFormat::kUnknown) { |
3546 | return {}; |
3547 | } |
3548 | |
3549 | info.fTarget = GR_GL_TEXTURE_2D; |
3550 | info.fFormat = GrGLFormatToEnum(glFormat); |
3551 | info.fID = this->createTexture2D(dimensions, glFormat, renderable, &initialState, numMipLevels); |
3552 | if (!info.fID) { |
3553 | return {}; |
3554 | } |
3555 | |
3556 | SkASSERT(!data || data->type() != BackendTextureData::Type::kCompressed); |
3557 | if (data && data->type() == BackendTextureData::Type::kPixmaps) { |
3558 | SkTDArray<GrMipLevel> texels; |
3559 | GrColorType colorType = SkColorTypeToGrColorType(data->pixmap(0).colorType()); |
3560 | texels.append(numMipLevels); |
3561 | for (int i = 0; i < numMipLevels; ++i) { |
3562 | texels[i] = {data->pixmap(i).addr(), data->pixmap(i).rowBytes()}; |
3563 | } |
3564 | if (!this->uploadTexData(glFormat, colorType, dimensions.width(), dimensions.height(), |
3565 | GR_GL_TEXTURE_2D, 0, 0, dimensions.width(), dimensions.height(), |
3566 | colorType, texels.begin(), texels.count())) { |
3567 | GL_CALL(DeleteTextures(1, &info.fID)); |
3568 | return {}; |
3569 | } |
3570 | } else if (data && data->type() == BackendTextureData::Type::kColor) { |
3571 | // TODO: Unify this with the clear texture code in onCreateTexture(). |
3572 | GrColorType colorType; |
3573 | GrGLenum externalFormat, externalType; |
3574 | this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(glFormat, &externalFormat, |
3575 | &externalType, &colorType); |
3576 | if (colorType == GrColorType::kUnknown) { |
3577 | GL_CALL(DeleteTextures(1, &info.fID)); |
3578 | return {}; |
3579 | } |
3580 | |
3581 | // Make one tight image at the base size and reuse it for smaller levels. |
3582 | GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, dimensions); |
3583 | auto rb = ii.minRowBytes(); |
3584 | std::unique_ptr<char[]> pixelStorage(new char[rb * dimensions.height()]); |
3585 | if (!GrClearImage(ii, pixelStorage.get(), rb, data->color())) { |
3586 | GL_CALL(DeleteTextures(1, &info.fID)); |
3587 | return {}; |
3588 | } |
3589 | |
3590 | GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1)); |
3591 | SkISize levelDimensions = dimensions; |
3592 | for (int i = 0; i < numMipLevels; ++i) { |
3593 | GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D, i, 0, 0, levelDimensions.width(), |
3594 | levelDimensions.height(), externalFormat, externalType, |
3595 | pixelStorage.get())); |
3596 | levelDimensions = {std::max(1, levelDimensions.width() /2), |
3597 | std::max(1, levelDimensions.height()/2)}; |
3598 | } |
3599 | } |
3600 | // Unbind this texture from the scratch texture unit. |
3601 | this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0); |
3602 | |
3603 | auto parameters = sk_make_sp<GrGLTextureParameters>(); |
3604 | // The non-sampler params are still at their default values. |
3605 | parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(), |
3606 | fResetTimestampForTextureParameters); |
3607 | |
3608 | return GrBackendTexture(dimensions.width(), dimensions.height(), mipMapped, info, |
3609 | std::move(parameters)); |
3610 | } |
3611 | |
3612 | void GrGLGpu::deleteBackendTexture(const GrBackendTexture& tex) { |
3613 | SkASSERT(GrBackendApi::kOpenGL == tex.backend()); |
3614 | |
3615 | GrGLTextureInfo info; |
3616 | if (tex.getGLTextureInfo(&info)) { |
3617 | GL_CALL(DeleteTextures(1, &info.fID)); |
3618 | } |
3619 | } |
3620 | |
3621 | #if GR_TEST_UTILS |
3622 | |
3623 | bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const { |
3624 | SkASSERT(GrBackendApi::kOpenGL == tex.backend()); |
3625 | |
3626 | GrGLTextureInfo info; |
3627 | if (!tex.getGLTextureInfo(&info)) { |
3628 | return false; |
3629 | } |
3630 | |
3631 | GrGLboolean result; |
3632 | GL_CALL_RET(result, IsTexture(info.fID)); |
3633 | |
3634 | return (GR_GL_TRUE == result); |
3635 | } |
3636 | |
3637 | GrBackendRenderTarget GrGLGpu::createTestingOnlyBackendRenderTarget(int w, int h, |
3638 | GrColorType colorType) { |
3639 | if (w > this->caps()->maxRenderTargetSize() || h > this->caps()->maxRenderTargetSize()) { |
3640 | return GrBackendRenderTarget(); // invalid |
3641 | } |
3642 | this->handleDirtyContext(); |
3643 | auto format = this->glCaps().getFormatFromColorType(colorType); |
3644 | if (!this->glCaps().isFormatRenderable(format, 1)) { |
3645 | return {}; |
3646 | } |
3647 | bool useTexture = format == GrGLFormat::kBGRA8; |
3648 | int sFormatIdx = this->getCompatibleStencilIndex(format); |
3649 | if (sFormatIdx < 0) { |
3650 | return {}; |
3651 | } |
3652 | GrGLuint colorID = 0; |
3653 | GrGLuint stencilID = 0; |
3654 | auto deleteIDs = [&] { |
3655 | if (colorID) { |
3656 | if (useTexture) { |
3657 | GL_CALL(DeleteTextures(1, &colorID)); |
3658 | } else { |
3659 | GL_CALL(DeleteRenderbuffers(1, &colorID)); |
3660 | } |
3661 | } |
3662 | if (stencilID) { |
3663 | GL_CALL(DeleteRenderbuffers(1, &stencilID)); |
3664 | } |
3665 | }; |
3666 | |
3667 | if (useTexture) { |
3668 | GL_CALL(GenTextures(1, &colorID)); |
3669 | } else { |
3670 | GL_CALL(GenRenderbuffers(1, &colorID)); |
3671 | } |
3672 | GL_CALL(GenRenderbuffers(1, &stencilID)); |
3673 | if (!stencilID || !colorID) { |
3674 | deleteIDs(); |
3675 | return {}; |
3676 | } |
3677 | |
3678 | GrGLFramebufferInfo info; |
3679 | info.fFBOID = 0; |
3680 | info.fFormat = GrGLFormatToEnum(format); |
3681 | GL_CALL(GenFramebuffers(1, &info.fFBOID)); |
3682 | if (!info.fFBOID) { |
3683 | deleteIDs(); |
3684 | return {}; |
3685 | } |
3686 | |
3687 | this->invalidateBoundRenderTarget(); |
3688 | |
3689 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID); |
3690 | if (useTexture) { |
3691 | GrGLTextureParameters::SamplerOverriddenState initialState; |
3692 | colorID = this->createTexture2D({w, h}, format, GrRenderable::kYes, &initialState, 1); |
3693 | if (!colorID) { |
3694 | deleteIDs(); |
3695 | return {}; |
3696 | } |
3697 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D, |
3698 | colorID, 0)); |
3699 | } else { |
3700 | GrGLenum renderBufferFormat = this->glCaps().getRenderbufferInternalFormat(format); |
3701 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, colorID)); |
3702 | GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, renderBufferFormat, w, h)); |
3703 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, |
3704 | GR_GL_RENDERBUFFER, colorID)); |
3705 | } |
3706 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, stencilID)); |
3707 | auto stencilBufferFormat = this->glCaps().stencilFormats()[sFormatIdx].fInternalFormat; |
3708 | GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, stencilBufferFormat, w, h)); |
3709 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER, |
3710 | stencilID)); |
3711 | if (this->glCaps().stencilFormats()[sFormatIdx].fPacked) { |
3712 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, |
3713 | GR_GL_RENDERBUFFER, stencilID)); |
3714 | } |
3715 | |
3716 | // We don't want to have to recover the renderbuffer/texture IDs later to delete them. OpenGL |
3717 | // has this rule that if a renderbuffer/texture is deleted and a FBO other than the current FBO |
3718 | // has the RB attached then deletion is delayed. So we unbind the FBO here and delete the |
3719 | // renderbuffers/texture. |
3720 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0); |
3721 | deleteIDs(); |
3722 | |
3723 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID); |
3724 | GrGLenum status; |
3725 | GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
3726 | if (GR_GL_FRAMEBUFFER_COMPLETE != status) { |
3727 | this->deleteFramebuffer(info.fFBOID); |
3728 | return {}; |
3729 | } |
3730 | auto stencilBits = SkToInt(this->glCaps().stencilFormats()[sFormatIdx].fStencilBits); |
3731 | |
3732 | GrBackendRenderTarget beRT = GrBackendRenderTarget(w, h, 1, stencilBits, info); |
3733 | SkASSERT(this->caps()->areColorTypeAndFormatCompatible(colorType, beRT.getBackendFormat())); |
3734 | return beRT; |
3735 | } |
3736 | |
3737 | void GrGLGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& backendRT) { |
3738 | SkASSERT(GrBackendApi::kOpenGL == backendRT.backend()); |
3739 | GrGLFramebufferInfo info; |
3740 | if (backendRT.getGLFramebufferInfo(&info)) { |
3741 | if (info.fFBOID) { |
3742 | this->deleteFramebuffer(info.fFBOID); |
3743 | } |
3744 | } |
3745 | } |
3746 | |
3747 | void GrGLGpu::testingOnly_flushGpuAndSync() { |
3748 | GL_CALL(Finish()); |
3749 | } |
3750 | #endif |
3751 | |
3752 | /////////////////////////////////////////////////////////////////////////////// |
3753 | |
3754 | GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu, |
3755 | const GrBuffer* ibuf) { |
3756 | SkASSERT(!ibuf || ibuf->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(ibuf)->isMapped()); |
3757 | GrGLAttribArrayState* attribState; |
3758 | |
3759 | if (gpu->glCaps().isCoreProfile()) { |
3760 | if (!fCoreProfileVertexArray) { |
3761 | GrGLuint arrayID; |
3762 | GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); |
3763 | int attrCount = gpu->glCaps().maxVertexAttributes(); |
3764 | fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount); |
3765 | } |
3766 | if (ibuf) { |
3767 | attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf); |
3768 | } else { |
3769 | attribState = fCoreProfileVertexArray->bind(gpu); |
3770 | } |
3771 | } else { |
3772 | if (ibuf) { |
3773 | // bindBuffer implicitly binds VAO 0 when binding an index buffer. |
3774 | gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf); |
3775 | } else { |
3776 | this->setVertexArrayID(gpu, 0); |
3777 | } |
3778 | int attrCount = gpu->glCaps().maxVertexAttributes(); |
3779 | if (fDefaultVertexArrayAttribState.count() != attrCount) { |
3780 | fDefaultVertexArrayAttribState.resize(attrCount); |
3781 | } |
3782 | attribState = &fDefaultVertexArrayAttribState; |
3783 | } |
3784 | return attribState; |
3785 | } |
3786 | |
3787 | void GrGLGpu::addFinishedProc(GrGpuFinishedProc finishedProc, |
3788 | GrGpuFinishedContext finishedContext) { |
3789 | SkASSERT(finishedProc); |
3790 | FinishCallback callback; |
3791 | callback.fCallback = finishedProc; |
3792 | callback.fContext = finishedContext; |
3793 | if (this->caps()->fenceSyncSupport()) { |
3794 | callback.fSync = (GrGLsync)this->insertFence(); |
3795 | } else { |
3796 | callback.fSync = 0; |
3797 | } |
3798 | fFinishCallbacks.push_back(callback); |
3799 | } |
3800 | |
3801 | bool GrGLGpu::onSubmitToGpu(bool syncCpu) { |
3802 | if (syncCpu || (!fFinishCallbacks.empty() && !this->caps()->fenceSyncSupport())) { |
3803 | GL_CALL(Finish()); |
3804 | // After a finish everything previously sent to GL is done. |
3805 | for (const auto& cb : fFinishCallbacks) { |
3806 | cb.fCallback(cb.fContext); |
3807 | if (cb.fSync) { |
3808 | this->deleteSync(cb.fSync); |
3809 | } else { |
3810 | SkASSERT(!this->caps()->fenceSyncSupport()); |
3811 | } |
3812 | } |
3813 | fFinishCallbacks.clear(); |
3814 | } else { |
3815 | GL_CALL(Flush()); |
3816 | // See if any previously inserted finish procs are good to go. |
3817 | this->checkFinishProcs(); |
3818 | } |
3819 | return true; |
3820 | } |
3821 | |
3822 | void GrGLGpu::submit(GrOpsRenderPass* renderPass) { |
3823 | // The GrGLOpsRenderPass doesn't buffer ops so there is nothing to do here |
3824 | SkASSERT(fCachedOpsRenderPass.get() == renderPass); |
3825 | fCachedOpsRenderPass->reset(); |
3826 | } |
3827 | |
3828 | GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() { |
3829 | SkASSERT(this->caps()->fenceSyncSupport()); |
3830 | GrGLsync sync; |
3831 | if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) { |
3832 | static_assert(sizeof(GrGLsync) >= sizeof(GrGLuint)); |
3833 | GrGLuint fence = 0; |
3834 | GL_CALL(GenFences(1, &fence)); |
3835 | GL_CALL(SetFence(fence, GR_GL_ALL_COMPLETED)); |
3836 | sync = reinterpret_cast<GrGLsync>(static_cast<intptr_t>(fence)); |
3837 | } else { |
3838 | GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); |
3839 | } |
3840 | static_assert(sizeof(GrFence) >= sizeof(GrGLsync)); |
3841 | return (GrFence)sync; |
3842 | } |
3843 | |
3844 | bool GrGLGpu::waitSync(GrGLsync sync, uint64_t timeout, bool flush) { |
3845 | if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) { |
3846 | GrGLuint nvFence = static_cast<GrGLuint>(reinterpret_cast<intptr_t>(sync)); |
3847 | if (!timeout) { |
3848 | if (flush) { |
3849 | GL_CALL(Flush); |
3850 | } |
3851 | GrGLboolean result; |
3852 | GL_CALL_RET(result, TestFence(nvFence)); |
3853 | return result == GR_GL_TRUE; |
3854 | } |
3855 | // Ignore non-zero timeouts. GL_NV_fence has no timeout functionality. |
3856 | // If this really becomes necessary we could poll TestFence(). |
3857 | // FinishFence always flushes so no need to check flush param. |
3858 | GL_CALL(FinishFence(nvFence)); |
3859 | return true; |
3860 | } else { |
3861 | GrGLbitfield flags = flush ? GR_GL_SYNC_FLUSH_COMMANDS_BIT : 0; |
3862 | GrGLenum result; |
3863 | GL_CALL_RET(result, ClientWaitSync(sync, flags, timeout)); |
3864 | return (GR_GL_CONDITION_SATISFIED == result || GR_GL_ALREADY_SIGNALED == result); |
3865 | } |
3866 | } |
3867 | |
3868 | bool GrGLGpu::waitFence(GrFence fence, uint64_t timeout) { |
3869 | return this->waitSync((GrGLsync)fence, timeout, /* flush = */ true); |
3870 | } |
3871 | |
3872 | void GrGLGpu::deleteFence(GrFence fence) const { |
3873 | this->deleteSync((GrGLsync)fence); |
3874 | } |
3875 | |
3876 | std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrGLGpu::makeSemaphore(bool isOwned) { |
3877 | SkASSERT(this->caps()->semaphoreSupport()); |
3878 | return GrGLSemaphore::Make(this, isOwned); |
3879 | } |
3880 | |
3881 | std::unique_ptr<GrSemaphore> GrGLGpu::wrapBackendSemaphore( |
3882 | const GrBackendSemaphore& semaphore, |
3883 | GrResourceProvider::SemaphoreWrapType wrapType, |
3884 | GrWrapOwnership ownership) { |
3885 | SkASSERT(this->caps()->semaphoreSupport()); |
3886 | return GrGLSemaphore::MakeWrapped(this, semaphore.glSync(), ownership); |
3887 | } |
3888 | |
3889 | void GrGLGpu::insertSemaphore(GrSemaphore* semaphore) { |
3890 | GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore); |
3891 | |
3892 | GrGLsync sync; |
3893 | GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); |
3894 | glSem->setSync(sync); |
3895 | } |
3896 | |
3897 | void GrGLGpu::waitSemaphore(GrSemaphore* semaphore) { |
3898 | GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore); |
3899 | |
3900 | GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED)); |
3901 | } |
3902 | |
3903 | void GrGLGpu::checkFinishProcs() { |
3904 | // Bail after the first unfinished sync since we expect they signal in the order inserted. |
3905 | while (!fFinishCallbacks.empty() && this->waitSync(fFinishCallbacks.front().fSync, |
3906 | /* timeout = */ 0, /* flush = */ false)) { |
3907 | fFinishCallbacks.front().fCallback(fFinishCallbacks.front().fContext); |
3908 | this->deleteSync(fFinishCallbacks.front().fSync); |
3909 | fFinishCallbacks.pop_front(); |
3910 | } |
3911 | } |
3912 | |
3913 | void GrGLGpu::deleteSync(GrGLsync sync) const { |
3914 | if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) { |
3915 | GrGLuint nvFence = SkToUInt(reinterpret_cast<intptr_t>(sync)); |
3916 | GL_CALL(DeleteFences(1, &nvFence)); |
3917 | } else { |
3918 | GL_CALL(DeleteSync(sync)); |
3919 | } |
3920 | } |
3921 | |
3922 | std::unique_ptr<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) { |
3923 | // Set up a semaphore to be signaled once the data is ready, and flush GL |
3924 | std::unique_ptr<GrSemaphore> semaphore = this->makeSemaphore(true); |
3925 | SkASSERT(semaphore); |
3926 | this->insertSemaphore(semaphore.get()); |
3927 | // We must call flush here to make sure the GrGLSync object gets created and sent to the gpu. |
3928 | GL_CALL(Flush()); |
3929 | |
3930 | return semaphore; |
3931 | } |
3932 | |
3933 | int GrGLGpu::TextureToCopyProgramIdx(GrTexture* texture) { |
3934 | switch (GrSLCombinedSamplerTypeForTextureType(texture->texturePriv().textureType())) { |
3935 | case kTexture2DSampler_GrSLType: |
3936 | return 0; |
3937 | case kTexture2DRectSampler_GrSLType: |
3938 | return 1; |
3939 | case kTextureExternalSampler_GrSLType: |
3940 | return 2; |
3941 | default: |
3942 | SK_ABORT("Unexpected samper type" ); |
3943 | } |
3944 | } |
3945 | |
3946 | #ifdef SK_ENABLE_DUMP_GPU |
3947 | #include "src/utils/SkJSONWriter.h" |
3948 | void GrGLGpu::onDumpJSON(SkJSONWriter* writer) const { |
3949 | // We are called by the base class, which has already called beginObject(). We choose to nest |
3950 | // all of our caps information in a named sub-object. |
3951 | writer->beginObject("GL GPU" ); |
3952 | |
3953 | const GrGLubyte* str; |
3954 | GL_CALL_RET(str, GetString(GR_GL_VERSION)); |
3955 | writer->appendString("GL_VERSION" , (const char*)(str)); |
3956 | GL_CALL_RET(str, GetString(GR_GL_RENDERER)); |
3957 | writer->appendString("GL_RENDERER" , (const char*)(str)); |
3958 | GL_CALL_RET(str, GetString(GR_GL_VENDOR)); |
3959 | writer->appendString("GL_VENDOR" , (const char*)(str)); |
3960 | GL_CALL_RET(str, GetString(GR_GL_SHADING_LANGUAGE_VERSION)); |
3961 | writer->appendString("GL_SHADING_LANGUAGE_VERSION" , (const char*)(str)); |
3962 | |
3963 | writer->appendName("extensions" ); |
3964 | glInterface()->fExtensions.dumpJSON(writer); |
3965 | |
3966 | writer->endObject(); |
3967 | } |
3968 | #endif |
3969 | |