1 | /* |
2 | * Copyright 2011 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include "src/gpu/gl/GrGLGpu.h" |
9 | |
10 | #include "include/core/SkPixmap.h" |
11 | #include "include/core/SkTypes.h" |
12 | #include "include/gpu/GrBackendSemaphore.h" |
13 | #include "include/gpu/GrBackendSurface.h" |
14 | #include "include/gpu/GrDirectContext.h" |
15 | #include "include/gpu/GrTypes.h" |
16 | #include "include/private/SkHalf.h" |
17 | #include "include/private/SkTemplates.h" |
18 | #include "include/private/SkTo.h" |
19 | #include "src/core/SkAutoMalloc.h" |
20 | #include "src/core/SkCompressedDataUtils.h" |
21 | #include "src/core/SkMipmap.h" |
22 | #include "src/core/SkTraceEvent.h" |
23 | #include "src/gpu/GrBackendUtils.h" |
24 | #include "src/gpu/GrContextPriv.h" |
25 | #include "src/gpu/GrCpuBuffer.h" |
26 | #include "src/gpu/GrDataUtils.h" |
27 | #include "src/gpu/GrGpuResourcePriv.h" |
28 | #include "src/gpu/GrPipeline.h" |
29 | #include "src/gpu/GrProgramInfo.h" |
30 | #include "src/gpu/GrRenderTarget.h" |
31 | #include "src/gpu/GrShaderCaps.h" |
32 | #include "src/gpu/GrSurfaceProxyPriv.h" |
33 | #include "src/gpu/GrTexture.h" |
34 | #include "src/gpu/gl/GrGLBuffer.h" |
35 | #include "src/gpu/gl/GrGLOpsRenderPass.h" |
36 | #include "src/gpu/gl/GrGLSemaphore.h" |
37 | #include "src/gpu/gl/GrGLStencilAttachment.h" |
38 | #include "src/gpu/gl/GrGLTextureRenderTarget.h" |
39 | #include "src/gpu/gl/builders/GrGLShaderStringBuilder.h" |
40 | #include "src/sksl/SkSLCompiler.h" |
41 | |
42 | #include <cmath> |
43 | #include <memory> |
44 | |
45 | #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X) |
46 | #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X) |
47 | |
48 | #define GL_ALLOC_CALL(call) \ |
49 | [&] { \ |
50 | if (this->glCaps().skipErrorChecks()) { \ |
51 | GR_GL_CALL(this->glInterface(), call); \ |
52 | return static_cast<GrGLenum>(GR_GL_NO_ERROR); \ |
53 | } else { \ |
54 | this->clearErrorsAndCheckForOOM(); \ |
55 | GR_GL_CALL_NOERRCHECK(this->glInterface(), call); \ |
56 | return this->getErrorAndCheckForOOM(); \ |
57 | } \ |
58 | }() |
59 | |
60 | //#define USE_NSIGHT |
61 | |
62 | /////////////////////////////////////////////////////////////////////////////// |
63 | |
64 | static const GrGLenum gXfermodeEquation2Blend[] = { |
65 | // Basic OpenGL blend equations. |
66 | GR_GL_FUNC_ADD, |
67 | GR_GL_FUNC_SUBTRACT, |
68 | GR_GL_FUNC_REVERSE_SUBTRACT, |
69 | |
70 | // GL_KHR_blend_equation_advanced. |
71 | GR_GL_SCREEN, |
72 | GR_GL_OVERLAY, |
73 | GR_GL_DARKEN, |
74 | GR_GL_LIGHTEN, |
75 | GR_GL_COLORDODGE, |
76 | GR_GL_COLORBURN, |
77 | GR_GL_HARDLIGHT, |
78 | GR_GL_SOFTLIGHT, |
79 | GR_GL_DIFFERENCE, |
80 | GR_GL_EXCLUSION, |
81 | GR_GL_MULTIPLY, |
82 | GR_GL_HSL_HUE, |
83 | GR_GL_HSL_SATURATION, |
84 | GR_GL_HSL_COLOR, |
85 | GR_GL_HSL_LUMINOSITY, |
86 | |
87 | // Illegal... needs to map to something. |
88 | GR_GL_FUNC_ADD, |
89 | }; |
90 | static_assert(0 == kAdd_GrBlendEquation); |
91 | static_assert(1 == kSubtract_GrBlendEquation); |
92 | static_assert(2 == kReverseSubtract_GrBlendEquation); |
93 | static_assert(3 == kScreen_GrBlendEquation); |
94 | static_assert(4 == kOverlay_GrBlendEquation); |
95 | static_assert(5 == kDarken_GrBlendEquation); |
96 | static_assert(6 == kLighten_GrBlendEquation); |
97 | static_assert(7 == kColorDodge_GrBlendEquation); |
98 | static_assert(8 == kColorBurn_GrBlendEquation); |
99 | static_assert(9 == kHardLight_GrBlendEquation); |
100 | static_assert(10 == kSoftLight_GrBlendEquation); |
101 | static_assert(11 == kDifference_GrBlendEquation); |
102 | static_assert(12 == kExclusion_GrBlendEquation); |
103 | static_assert(13 == kMultiply_GrBlendEquation); |
104 | static_assert(14 == kHSLHue_GrBlendEquation); |
105 | static_assert(15 == kHSLSaturation_GrBlendEquation); |
106 | static_assert(16 == kHSLColor_GrBlendEquation); |
107 | static_assert(17 == kHSLLuminosity_GrBlendEquation); |
108 | static_assert(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt); |
109 | |
110 | static const GrGLenum gXfermodeCoeff2Blend[] = { |
111 | GR_GL_ZERO, |
112 | GR_GL_ONE, |
113 | GR_GL_SRC_COLOR, |
114 | GR_GL_ONE_MINUS_SRC_COLOR, |
115 | GR_GL_DST_COLOR, |
116 | GR_GL_ONE_MINUS_DST_COLOR, |
117 | GR_GL_SRC_ALPHA, |
118 | GR_GL_ONE_MINUS_SRC_ALPHA, |
119 | GR_GL_DST_ALPHA, |
120 | GR_GL_ONE_MINUS_DST_ALPHA, |
121 | GR_GL_CONSTANT_COLOR, |
122 | GR_GL_ONE_MINUS_CONSTANT_COLOR, |
123 | |
124 | // extended blend coeffs |
125 | GR_GL_SRC1_COLOR, |
126 | GR_GL_ONE_MINUS_SRC1_COLOR, |
127 | GR_GL_SRC1_ALPHA, |
128 | GR_GL_ONE_MINUS_SRC1_ALPHA, |
129 | |
130 | // Illegal... needs to map to something. |
131 | GR_GL_ZERO, |
132 | }; |
133 | |
134 | ////////////////////////////////////////////////////////////////////////////// |
135 | |
136 | static int gl_target_to_binding_index(GrGLenum target) { |
137 | switch (target) { |
138 | case GR_GL_TEXTURE_2D: |
139 | return 0; |
140 | case GR_GL_TEXTURE_RECTANGLE: |
141 | return 1; |
142 | case GR_GL_TEXTURE_EXTERNAL: |
143 | return 2; |
144 | } |
145 | SK_ABORT("Unexpected GL texture target." ); |
146 | } |
147 | |
148 | GrGpuResource::UniqueID GrGLGpu::TextureUnitBindings::boundID(GrGLenum target) const { |
149 | return fTargetBindings[gl_target_to_binding_index(target)].fBoundResourceID; |
150 | } |
151 | |
152 | bool GrGLGpu::TextureUnitBindings::hasBeenModified(GrGLenum target) const { |
153 | return fTargetBindings[gl_target_to_binding_index(target)].fHasBeenModified; |
154 | } |
155 | |
156 | void GrGLGpu::TextureUnitBindings::setBoundID(GrGLenum target, GrGpuResource::UniqueID resourceID) { |
157 | int targetIndex = gl_target_to_binding_index(target); |
158 | fTargetBindings[targetIndex].fBoundResourceID = resourceID; |
159 | fTargetBindings[targetIndex].fHasBeenModified = true; |
160 | } |
161 | |
162 | void GrGLGpu::TextureUnitBindings::invalidateForScratchUse(GrGLenum target) { |
163 | this->setBoundID(target, GrGpuResource::UniqueID()); |
164 | } |
165 | |
166 | void GrGLGpu::TextureUnitBindings::invalidateAllTargets(bool markUnmodified) { |
167 | for (auto& targetBinding : fTargetBindings) { |
168 | targetBinding.fBoundResourceID.makeInvalid(); |
169 | if (markUnmodified) { |
170 | targetBinding.fHasBeenModified = false; |
171 | } |
172 | } |
173 | } |
174 | |
175 | ////////////////////////////////////////////////////////////////////////////// |
176 | |
177 | static GrGLenum filter_to_gl_mag_filter(GrSamplerState::Filter filter) { |
178 | switch (filter) { |
179 | case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST; |
180 | case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR; |
181 | } |
182 | SkUNREACHABLE; |
183 | } |
184 | |
185 | static GrGLenum filter_to_gl_min_filter(GrSamplerState::Filter filter, |
186 | GrSamplerState::MipmapMode mm) { |
187 | switch (mm) { |
188 | case GrSamplerState::MipmapMode::kNone: |
189 | return filter_to_gl_mag_filter(filter); |
190 | case GrSamplerState::MipmapMode::kNearest: |
191 | switch (filter) { |
192 | case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_NEAREST; |
193 | case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_NEAREST; |
194 | } |
195 | SkUNREACHABLE; |
196 | case GrSamplerState::MipmapMode::kLinear: |
197 | switch (filter) { |
198 | case GrSamplerState::Filter::kNearest: return GR_GL_NEAREST_MIPMAP_LINEAR; |
199 | case GrSamplerState::Filter::kLinear: return GR_GL_LINEAR_MIPMAP_LINEAR; |
200 | } |
201 | SkUNREACHABLE; |
202 | } |
203 | SkUNREACHABLE; |
204 | } |
205 | |
206 | static inline GrGLenum wrap_mode_to_gl_wrap(GrSamplerState::WrapMode wrapMode, |
207 | const GrCaps& caps) { |
208 | switch (wrapMode) { |
209 | case GrSamplerState::WrapMode::kClamp: return GR_GL_CLAMP_TO_EDGE; |
210 | case GrSamplerState::WrapMode::kRepeat: return GR_GL_REPEAT; |
211 | case GrSamplerState::WrapMode::kMirrorRepeat: return GR_GL_MIRRORED_REPEAT; |
212 | case GrSamplerState::WrapMode::kClampToBorder: |
213 | // May not be supported but should have been caught earlier |
214 | SkASSERT(caps.clampToBorderSupport()); |
215 | return GR_GL_CLAMP_TO_BORDER; |
216 | } |
217 | SkUNREACHABLE; |
218 | } |
219 | |
220 | /////////////////////////////////////////////////////////////////////////////// |
221 | |
222 | class GrGLGpu::SamplerObjectCache { |
223 | public: |
224 | SamplerObjectCache(GrGLGpu* gpu) : fGpu(gpu) { |
225 | fNumTextureUnits = fGpu->glCaps().shaderCaps()->maxFragmentSamplers(); |
226 | fHWBoundSamplers.reset(new GrGLuint[fNumTextureUnits]); |
227 | std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); |
228 | std::fill_n(fSamplers, kNumSamplers, 0); |
229 | } |
230 | |
231 | ~SamplerObjectCache() { |
232 | if (!fNumTextureUnits) { |
233 | // We've already been abandoned. |
234 | return; |
235 | } |
236 | for (GrGLuint sampler : fSamplers) { |
237 | // The spec states that "zero" values should be silently ignored, however they still |
238 | // trigger GL errors on some NVIDIA platforms. |
239 | if (sampler) { |
240 | GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(1, &sampler)); |
241 | } |
242 | } |
243 | } |
244 | |
245 | void bindSampler(int unitIdx, GrSamplerState state) { |
246 | int index = state.asIndex(); |
247 | if (!fSamplers[index]) { |
248 | GrGLuint s; |
249 | GR_GL_CALL(fGpu->glInterface(), GenSamplers(1, &s)); |
250 | if (!s) { |
251 | return; |
252 | } |
253 | fSamplers[index] = s; |
254 | GrGLenum minFilter = filter_to_gl_min_filter(state.filter(), state.mipmapMode()); |
255 | GrGLenum magFilter = filter_to_gl_mag_filter(state.filter()); |
256 | GrGLenum wrapX = wrap_mode_to_gl_wrap(state.wrapModeX(), fGpu->glCaps()); |
257 | GrGLenum wrapY = wrap_mode_to_gl_wrap(state.wrapModeY(), fGpu->glCaps()); |
258 | GR_GL_CALL(fGpu->glInterface(), |
259 | SamplerParameteri(s, GR_GL_TEXTURE_MIN_FILTER, minFilter)); |
260 | GR_GL_CALL(fGpu->glInterface(), |
261 | SamplerParameteri(s, GR_GL_TEXTURE_MAG_FILTER, magFilter)); |
262 | GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_S, wrapX)); |
263 | GR_GL_CALL(fGpu->glInterface(), SamplerParameteri(s, GR_GL_TEXTURE_WRAP_T, wrapY)); |
264 | } |
265 | if (fHWBoundSamplers[unitIdx] != fSamplers[index]) { |
266 | GR_GL_CALL(fGpu->glInterface(), BindSampler(unitIdx, fSamplers[index])); |
267 | fHWBoundSamplers[unitIdx] = fSamplers[index]; |
268 | } |
269 | } |
270 | |
271 | void invalidateBindings() { |
272 | // When we have sampler support we always use samplers. So setting these to zero will cause |
273 | // a rebind on next usage. |
274 | std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); |
275 | } |
276 | |
277 | void abandon() { |
278 | fHWBoundSamplers.reset(); |
279 | fNumTextureUnits = 0; |
280 | } |
281 | |
282 | void release() { |
283 | if (!fNumTextureUnits) { |
284 | // We've already been abandoned. |
285 | return; |
286 | } |
287 | GR_GL_CALL(fGpu->glInterface(), DeleteSamplers(kNumSamplers, fSamplers)); |
288 | std::fill_n(fSamplers, kNumSamplers, 0); |
289 | // Deleting a bound sampler implicitly binds sampler 0. |
290 | std::fill_n(fHWBoundSamplers.get(), fNumTextureUnits, 0); |
291 | } |
292 | |
293 | private: |
294 | static constexpr int kNumSamplers = GrSamplerState::kNumUniqueSamplers; |
295 | GrGLGpu* fGpu; |
296 | std::unique_ptr<GrGLuint[]> fHWBoundSamplers; |
297 | GrGLuint fSamplers[kNumSamplers]; |
298 | int fNumTextureUnits; |
299 | }; |
300 | |
301 | /////////////////////////////////////////////////////////////////////////////// |
302 | |
303 | sk_sp<GrGpu> GrGLGpu::Make(sk_sp<const GrGLInterface> interface, const GrContextOptions& options, |
304 | GrDirectContext* direct) { |
305 | if (!interface) { |
306 | interface = GrGLMakeNativeInterface(); |
307 | // For clients that have written their own GrGLCreateNativeInterface and haven't yet updated |
308 | // to GrGLMakeNativeInterface. |
309 | if (!interface) { |
310 | interface = sk_ref_sp(GrGLCreateNativeInterface()); |
311 | } |
312 | if (!interface) { |
313 | return nullptr; |
314 | } |
315 | } |
316 | #ifdef USE_NSIGHT |
317 | const_cast<GrContextOptions&>(options).fSuppressPathRendering = true; |
318 | #endif |
319 | auto glContext = GrGLContext::Make(std::move(interface), options); |
320 | if (!glContext) { |
321 | return nullptr; |
322 | } |
323 | return sk_sp<GrGpu>(new GrGLGpu(std::move(glContext), direct)); |
324 | } |
325 | |
326 | GrGLGpu::GrGLGpu(std::unique_ptr<GrGLContext> ctx, GrDirectContext* direct) |
327 | : GrGpu(direct) |
328 | , fGLContext(std::move(ctx)) |
329 | , fProgramCache(new ProgramCache(this)) |
330 | , fHWProgramID(0) |
331 | , fTempSrcFBOID(0) |
332 | , fTempDstFBOID(0) |
333 | , fStencilClearFBOID(0) |
334 | , fFinishCallbacks(this) { |
335 | SkASSERT(fGLContext); |
336 | // Clear errors so we don't get confused whether we caused an error. |
337 | this->clearErrorsAndCheckForOOM(); |
338 | // Toss out any pre-existing OOM that was hanging around before we got started. |
339 | this->checkAndResetOOMed(); |
340 | |
341 | fCaps = sk_ref_sp(fGLContext->caps()); |
342 | |
343 | fHWTextureUnitBindings.reset(this->numTextureUnits()); |
344 | |
345 | this->hwBufferState(GrGpuBufferType::kVertex)->fGLTarget = GR_GL_ARRAY_BUFFER; |
346 | this->hwBufferState(GrGpuBufferType::kIndex)->fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER; |
347 | this->hwBufferState(GrGpuBufferType::kDrawIndirect)->fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER; |
348 | if (GrGLCaps::TransferBufferType::kChromium == this->glCaps().transferBufferType()) { |
349 | this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = |
350 | GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM; |
351 | this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = |
352 | GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM; |
353 | } else { |
354 | this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER; |
355 | this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->fGLTarget = GR_GL_PIXEL_PACK_BUFFER; |
356 | } |
357 | for (int i = 0; i < kGrGpuBufferTypeCount; ++i) { |
358 | fHWBufferState[i].invalidate(); |
359 | } |
360 | static_assert(kGrGpuBufferTypeCount == SK_ARRAY_COUNT(fHWBufferState)); |
361 | |
362 | if (this->glCaps().shaderCaps()->pathRenderingSupport()) { |
363 | fPathRendering = std::make_unique<GrGLPathRendering>(this); |
364 | } |
365 | |
366 | if (this->glCaps().samplerObjectSupport()) { |
367 | fSamplerObjectCache = std::make_unique<SamplerObjectCache>(this); |
368 | } |
369 | } |
370 | |
371 | GrGLGpu::~GrGLGpu() { |
372 | // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu |
373 | // to release the resources held by the objects themselves. |
374 | fPathRendering.reset(); |
375 | fCopyProgramArrayBuffer.reset(); |
376 | fMipmapProgramArrayBuffer.reset(); |
377 | |
378 | fHWProgram.reset(); |
379 | if (fHWProgramID) { |
380 | // detach the current program so there is no confusion on OpenGL's part |
381 | // that we want it to be deleted |
382 | GL_CALL(UseProgram(0)); |
383 | } |
384 | |
385 | if (fTempSrcFBOID) { |
386 | this->deleteFramebuffer(fTempSrcFBOID); |
387 | } |
388 | if (fTempDstFBOID) { |
389 | this->deleteFramebuffer(fTempDstFBOID); |
390 | } |
391 | if (fStencilClearFBOID) { |
392 | this->deleteFramebuffer(fStencilClearFBOID); |
393 | } |
394 | |
395 | for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
396 | if (0 != fCopyPrograms[i].fProgram) { |
397 | GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); |
398 | } |
399 | } |
400 | |
401 | for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
402 | if (0 != fMipmapPrograms[i].fProgram) { |
403 | GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); |
404 | } |
405 | } |
406 | |
407 | fSamplerObjectCache.reset(); |
408 | |
409 | fFinishCallbacks.callAll(true); |
410 | } |
411 | |
412 | void GrGLGpu::disconnect(DisconnectType type) { |
413 | INHERITED::disconnect(type); |
414 | if (DisconnectType::kCleanup == type) { |
415 | if (fHWProgramID) { |
416 | GL_CALL(UseProgram(0)); |
417 | } |
418 | if (fTempSrcFBOID) { |
419 | this->deleteFramebuffer(fTempSrcFBOID); |
420 | } |
421 | if (fTempDstFBOID) { |
422 | this->deleteFramebuffer(fTempDstFBOID); |
423 | } |
424 | if (fStencilClearFBOID) { |
425 | this->deleteFramebuffer(fStencilClearFBOID); |
426 | } |
427 | for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
428 | if (fCopyPrograms[i].fProgram) { |
429 | GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram)); |
430 | } |
431 | } |
432 | for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
433 | if (fMipmapPrograms[i].fProgram) { |
434 | GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram)); |
435 | } |
436 | } |
437 | |
438 | if (fSamplerObjectCache) { |
439 | fSamplerObjectCache->release(); |
440 | } |
441 | } else { |
442 | if (fProgramCache) { |
443 | fProgramCache->abandon(); |
444 | } |
445 | if (fSamplerObjectCache) { |
446 | fSamplerObjectCache->abandon(); |
447 | } |
448 | } |
449 | |
450 | fHWProgram.reset(); |
451 | fProgramCache.reset(); |
452 | |
453 | fHWProgramID = 0; |
454 | fTempSrcFBOID = 0; |
455 | fTempDstFBOID = 0; |
456 | fStencilClearFBOID = 0; |
457 | fCopyProgramArrayBuffer.reset(); |
458 | for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) { |
459 | fCopyPrograms[i].fProgram = 0; |
460 | } |
461 | fMipmapProgramArrayBuffer.reset(); |
462 | for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) { |
463 | fMipmapPrograms[i].fProgram = 0; |
464 | } |
465 | |
466 | if (this->glCaps().shaderCaps()->pathRenderingSupport()) { |
467 | this->glPathRendering()->disconnect(type); |
468 | } |
469 | fFinishCallbacks.callAll(DisconnectType::kCleanup == type); |
470 | } |
471 | |
472 | /////////////////////////////////////////////////////////////////////////////// |
473 | |
474 | void GrGLGpu::onResetContext(uint32_t resetBits) { |
475 | if (resetBits & kMisc_GrGLBackendState) { |
476 | // we don't use the zb at all |
477 | GL_CALL(Disable(GR_GL_DEPTH_TEST)); |
478 | GL_CALL(DepthMask(GR_GL_FALSE)); |
479 | |
480 | // We don't use face culling. |
481 | GL_CALL(Disable(GR_GL_CULL_FACE)); |
482 | // We do use separate stencil. Our algorithms don't care which face is front vs. back so |
483 | // just set this to the default for self-consistency. |
484 | GL_CALL(FrontFace(GR_GL_CCW)); |
485 | |
486 | this->hwBufferState(GrGpuBufferType::kXferCpuToGpu)->invalidate(); |
487 | this->hwBufferState(GrGpuBufferType::kXferGpuToCpu)->invalidate(); |
488 | |
489 | if (GR_IS_GR_GL(this->glStandard())) { |
490 | #ifndef USE_NSIGHT |
491 | // Desktop-only state that we never change |
492 | if (!this->glCaps().isCoreProfile()) { |
493 | GL_CALL(Disable(GR_GL_POINT_SMOOTH)); |
494 | GL_CALL(Disable(GR_GL_LINE_SMOOTH)); |
495 | GL_CALL(Disable(GR_GL_POLYGON_SMOOTH)); |
496 | GL_CALL(Disable(GR_GL_POLYGON_STIPPLE)); |
497 | GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP)); |
498 | GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP)); |
499 | } |
500 | // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a |
501 | // core profile. This seems like a bug since the core spec removes any mention of |
502 | // GL_ARB_imaging. |
503 | if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) { |
504 | GL_CALL(Disable(GR_GL_COLOR_TABLE)); |
505 | } |
506 | GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL)); |
507 | |
508 | fHWWireframeEnabled = kUnknown_TriState; |
509 | #endif |
510 | // Since ES doesn't support glPointSize at all we always use the VS to |
511 | // set the point size |
512 | GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE)); |
513 | |
514 | } |
515 | |
516 | if (GR_IS_GR_GL_ES(this->glStandard()) && |
517 | this->glCaps().fbFetchRequiresEnablePerSample()) { |
518 | // The arm extension requires specifically enabling MSAA fetching per sample. |
519 | // On some devices this may have a perf hit. Also multiple render targets are disabled |
520 | GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE)); |
521 | } |
522 | fHWWriteToColor = kUnknown_TriState; |
523 | // we only ever use lines in hairline mode |
524 | GL_CALL(LineWidth(1)); |
525 | GL_CALL(Disable(GR_GL_DITHER)); |
526 | |
527 | fHWClearColor[0] = fHWClearColor[1] = fHWClearColor[2] = fHWClearColor[3] = SK_FloatNaN; |
528 | } |
529 | |
530 | if (resetBits & kMSAAEnable_GrGLBackendState) { |
531 | fMSAAEnabled = kUnknown_TriState; |
532 | |
533 | if (this->caps()->mixedSamplesSupport()) { |
534 | // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage |
535 | // modulation. This state has no effect when not rendering to a mixed sampled target. |
536 | GL_CALL(CoverageModulation(GR_GL_RGBA)); |
537 | } |
538 | |
539 | fHWConservativeRasterEnabled = kUnknown_TriState; |
540 | } |
541 | |
542 | fHWActiveTextureUnitIdx = -1; // invalid |
543 | fLastPrimitiveType = static_cast<GrPrimitiveType>(-1); |
544 | |
545 | if (resetBits & kTextureBinding_GrGLBackendState) { |
546 | for (int s = 0; s < this->numTextureUnits(); ++s) { |
547 | fHWTextureUnitBindings[s].invalidateAllTargets(false); |
548 | } |
549 | if (fSamplerObjectCache) { |
550 | fSamplerObjectCache->invalidateBindings(); |
551 | } |
552 | } |
553 | |
554 | if (resetBits & kBlend_GrGLBackendState) { |
555 | fHWBlendState.invalidate(); |
556 | } |
557 | |
558 | if (resetBits & kView_GrGLBackendState) { |
559 | fHWScissorSettings.invalidate(); |
560 | fHWWindowRectsState.invalidate(); |
561 | fHWViewport.invalidate(); |
562 | } |
563 | |
564 | if (resetBits & kStencil_GrGLBackendState) { |
565 | fHWStencilSettings.invalidate(); |
566 | fHWStencilTestEnabled = kUnknown_TriState; |
567 | } |
568 | |
569 | // Vertex |
570 | if (resetBits & kVertex_GrGLBackendState) { |
571 | fHWVertexArrayState.invalidate(); |
572 | this->hwBufferState(GrGpuBufferType::kVertex)->invalidate(); |
573 | this->hwBufferState(GrGpuBufferType::kIndex)->invalidate(); |
574 | this->hwBufferState(GrGpuBufferType::kDrawIndirect)->invalidate(); |
575 | fHWPatchVertexCount = 0; |
576 | } |
577 | |
578 | if (resetBits & kRenderTarget_GrGLBackendState) { |
579 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
580 | fHWSRGBFramebuffer = kUnknown_TriState; |
581 | fBoundDrawFramebuffer = 0; |
582 | } |
583 | |
584 | if (resetBits & kPathRendering_GrGLBackendState) { |
585 | if (this->caps()->shaderCaps()->pathRenderingSupport()) { |
586 | this->glPathRendering()->resetContext(); |
587 | } |
588 | } |
589 | |
590 | // we assume these values |
591 | if (resetBits & kPixelStore_GrGLBackendState) { |
592 | if (this->caps()->writePixelsRowBytesSupport()) { |
593 | GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
594 | } |
595 | if (this->glCaps().readPixelsRowBytesSupport()) { |
596 | GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); |
597 | } |
598 | if (this->glCaps().packFlipYSupport()) { |
599 | GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE)); |
600 | } |
601 | } |
602 | |
603 | if (resetBits & kProgram_GrGLBackendState) { |
604 | fHWProgramID = 0; |
605 | fHWProgram.reset(); |
606 | } |
607 | ++fResetTimestampForTextureParameters; |
608 | } |
609 | |
610 | static bool check_backend_texture(const GrBackendTexture& backendTex, |
611 | const GrGLCaps& caps, |
612 | GrGLTexture::Desc* desc, |
613 | bool skipRectTexSupportCheck = false) { |
614 | GrGLTextureInfo info; |
615 | if (!backendTex.getGLTextureInfo(&info) || !info.fID || !info.fFormat) { |
616 | return false; |
617 | } |
618 | |
619 | desc->fSize = {backendTex.width(), backendTex.height()}; |
620 | desc->fTarget = info.fTarget; |
621 | desc->fID = info.fID; |
622 | desc->fFormat = GrGLFormatFromGLEnum(info.fFormat); |
623 | |
624 | if (desc->fFormat == GrGLFormat::kUnknown) { |
625 | return false; |
626 | } |
627 | if (GR_GL_TEXTURE_EXTERNAL == desc->fTarget) { |
628 | if (!caps.shaderCaps()->externalTextureSupport()) { |
629 | return false; |
630 | } |
631 | } else if (GR_GL_TEXTURE_RECTANGLE == desc->fTarget) { |
632 | if (!caps.rectangleTextureSupport() && !skipRectTexSupportCheck) { |
633 | return false; |
634 | } |
635 | } else if (GR_GL_TEXTURE_2D != desc->fTarget) { |
636 | return false; |
637 | } |
638 | if (backendTex.isProtected()) { |
639 | // Not supported in GL backend at this time. |
640 | return false; |
641 | } |
642 | |
643 | return true; |
644 | } |
645 | |
646 | sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTexture& backendTex, |
647 | GrWrapOwnership ownership, |
648 | GrWrapCacheable cacheable, |
649 | GrIOType ioType) { |
650 | GrGLTexture::Desc desc; |
651 | if (!check_backend_texture(backendTex, this->glCaps(), &desc)) { |
652 | return nullptr; |
653 | } |
654 | |
655 | if (kBorrow_GrWrapOwnership == ownership) { |
656 | desc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
657 | } else { |
658 | desc.fOwnership = GrBackendObjectOwnership::kOwned; |
659 | } |
660 | |
661 | GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid |
662 | : GrMipmapStatus::kNotAllocated; |
663 | |
664 | auto texture = GrGLTexture::MakeWrapped(this, mipmapStatus, desc, |
665 | backendTex.getGLTextureParams(), cacheable, ioType); |
666 | // We don't know what parameters are already set on wrapped textures. |
667 | texture->textureParamsModified(); |
668 | return std::move(texture); |
669 | } |
670 | |
671 | static bool check_compressed_backend_texture(const GrBackendTexture& backendTex, |
672 | const GrGLCaps& caps, GrGLTexture::Desc* desc, |
673 | bool skipRectTexSupportCheck = false) { |
674 | GrGLTextureInfo info; |
675 | if (!backendTex.getGLTextureInfo(&info) || !info.fID || !info.fFormat) { |
676 | return false; |
677 | } |
678 | |
679 | desc->fSize = {backendTex.width(), backendTex.height()}; |
680 | desc->fTarget = info.fTarget; |
681 | desc->fID = info.fID; |
682 | desc->fFormat = GrGLFormatFromGLEnum(info.fFormat); |
683 | |
684 | if (desc->fFormat == GrGLFormat::kUnknown) { |
685 | return false; |
686 | } |
687 | |
688 | if (GR_GL_TEXTURE_2D != desc->fTarget) { |
689 | return false; |
690 | } |
691 | if (backendTex.isProtected()) { |
692 | // Not supported in GL backend at this time. |
693 | return false; |
694 | } |
695 | |
696 | return true; |
697 | } |
698 | |
699 | sk_sp<GrTexture> GrGLGpu::onWrapCompressedBackendTexture(const GrBackendTexture& backendTex, |
700 | GrWrapOwnership ownership, |
701 | GrWrapCacheable cacheable) { |
702 | GrGLTexture::Desc desc; |
703 | if (!check_compressed_backend_texture(backendTex, this->glCaps(), &desc)) { |
704 | return nullptr; |
705 | } |
706 | |
707 | if (kBorrow_GrWrapOwnership == ownership) { |
708 | desc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
709 | } else { |
710 | desc.fOwnership = GrBackendObjectOwnership::kOwned; |
711 | } |
712 | |
713 | GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kValid |
714 | : GrMipmapStatus::kNotAllocated; |
715 | |
716 | auto texture = GrGLTexture::MakeWrapped(this, mipmapStatus, desc, |
717 | backendTex.getGLTextureParams(), cacheable, |
718 | kRead_GrIOType); |
719 | // We don't know what parameters are already set on wrapped textures. |
720 | texture->textureParamsModified(); |
721 | return std::move(texture); |
722 | } |
723 | |
724 | sk_sp<GrTexture> GrGLGpu::onWrapRenderableBackendTexture(const GrBackendTexture& backendTex, |
725 | int sampleCnt, |
726 | GrWrapOwnership ownership, |
727 | GrWrapCacheable cacheable) { |
728 | const GrGLCaps& caps = this->glCaps(); |
729 | |
730 | GrGLTexture::Desc desc; |
731 | if (!check_backend_texture(backendTex, this->glCaps(), &desc)) { |
732 | return nullptr; |
733 | } |
734 | SkASSERT(caps.isFormatRenderable(desc.fFormat, sampleCnt)); |
735 | SkASSERT(caps.isFormatTexturable(desc.fFormat)); |
736 | |
737 | // We don't support rendering to a EXTERNAL texture. |
738 | if (GR_GL_TEXTURE_EXTERNAL == desc.fTarget) { |
739 | return nullptr; |
740 | } |
741 | |
742 | if (kBorrow_GrWrapOwnership == ownership) { |
743 | desc.fOwnership = GrBackendObjectOwnership::kBorrowed; |
744 | } else { |
745 | desc.fOwnership = GrBackendObjectOwnership::kOwned; |
746 | } |
747 | |
748 | |
749 | sampleCnt = caps.getRenderTargetSampleCount(sampleCnt, desc.fFormat); |
750 | SkASSERT(sampleCnt); |
751 | |
752 | GrGLRenderTarget::IDs rtIDs; |
753 | if (!this->createRenderTargetObjects(desc, sampleCnt, &rtIDs)) { |
754 | return nullptr; |
755 | } |
756 | |
757 | GrMipmapStatus mipmapStatus = backendTex.hasMipmaps() ? GrMipmapStatus::kDirty |
758 | : GrMipmapStatus::kNotAllocated; |
759 | |
760 | sk_sp<GrGLTextureRenderTarget> texRT(GrGLTextureRenderTarget::MakeWrapped( |
761 | this, sampleCnt, desc, backendTex.getGLTextureParams(), rtIDs, cacheable, |
762 | mipmapStatus)); |
763 | texRT->baseLevelWasBoundToFBO(); |
764 | // We don't know what parameters are already set on wrapped textures. |
765 | texRT->textureParamsModified(); |
766 | return std::move(texRT); |
767 | } |
768 | |
769 | sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) { |
770 | GrGLFramebufferInfo info; |
771 | if (!backendRT.getGLFramebufferInfo(&info)) { |
772 | return nullptr; |
773 | } |
774 | |
775 | if (backendRT.isProtected()) { |
776 | // Not supported in GL at this time. |
777 | return nullptr; |
778 | } |
779 | |
780 | const auto format = backendRT.getBackendFormat().asGLFormat(); |
781 | if (!this->glCaps().isFormatRenderable(format, backendRT.sampleCnt())) { |
782 | return nullptr; |
783 | } |
784 | |
785 | GrGLRenderTarget::IDs rtIDs; |
786 | rtIDs.fRTFBOID = info.fFBOID; |
787 | rtIDs.fMSColorRenderbufferID = 0; |
788 | rtIDs.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID; |
789 | rtIDs.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed; |
790 | |
791 | int sampleCount = this->glCaps().getRenderTargetSampleCount(backendRT.sampleCnt(), format); |
792 | |
793 | return GrGLRenderTarget::MakeWrapped(this, backendRT.dimensions(), format, sampleCount, rtIDs, |
794 | backendRT.stencilBits()); |
795 | } |
796 | |
797 | sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTexture& tex, |
798 | int sampleCnt) { |
799 | GrGLTexture::Desc desc; |
800 | // We do not check whether texture rectangle is supported by Skia - if the caller provided us |
801 | // with a texture rectangle,we assume the necessary support exists. |
802 | if (!check_backend_texture(tex, this->glCaps(), &desc, true)) { |
803 | return nullptr; |
804 | } |
805 | |
806 | if (!this->glCaps().isFormatRenderable(desc.fFormat, sampleCnt)) { |
807 | return nullptr; |
808 | } |
809 | |
810 | const int sampleCount = this->glCaps().getRenderTargetSampleCount(sampleCnt, desc.fFormat); |
811 | GrGLRenderTarget::IDs rtIDs; |
812 | if (!this->createRenderTargetObjects(desc, sampleCount, &rtIDs)) { |
813 | return nullptr; |
814 | } |
815 | return GrGLRenderTarget::MakeWrapped(this, desc.fSize, desc.fFormat, sampleCount, rtIDs, 0); |
816 | } |
817 | |
818 | static bool check_write_and_transfer_input(GrGLTexture* glTex) { |
819 | if (!glTex) { |
820 | return false; |
821 | } |
822 | |
823 | // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures |
824 | if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) { |
825 | return false; |
826 | } |
827 | |
828 | return true; |
829 | } |
830 | |
831 | bool GrGLGpu::onWritePixels(GrSurface* surface, int left, int top, int width, int height, |
832 | GrColorType surfaceColorType, GrColorType srcColorType, |
833 | const GrMipLevel texels[], int mipLevelCount, |
834 | bool prepForTexSampling) { |
835 | auto glTex = static_cast<GrGLTexture*>(surface->asTexture()); |
836 | |
837 | if (!check_write_and_transfer_input(glTex)) { |
838 | return false; |
839 | } |
840 | |
841 | this->bindTextureToScratchUnit(glTex->target(), glTex->textureID()); |
842 | |
843 | SkASSERT(!GrGLFormatIsCompressed(glTex->format())); |
844 | SkIRect dstRect = SkIRect::MakeXYWH(left, top, width, height); |
845 | return this->uploadColorTypeTexData(glTex->format(), surfaceColorType, glTex->dimensions(), |
846 | glTex->target(), dstRect, srcColorType, texels, |
847 | mipLevelCount); |
848 | } |
849 | |
850 | bool GrGLGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int width, int height, |
851 | GrColorType textureColorType, GrColorType bufferColorType, |
852 | GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) { |
853 | GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); |
854 | |
855 | // Can't transfer compressed data |
856 | SkASSERT(!GrGLFormatIsCompressed(glTex->format())); |
857 | |
858 | if (!check_write_and_transfer_input(glTex)) { |
859 | return false; |
860 | } |
861 | |
862 | static_assert(sizeof(int) == sizeof(int32_t), "" ); |
863 | if (width <= 0 || height <= 0) { |
864 | return false; |
865 | } |
866 | |
867 | this->bindTextureToScratchUnit(glTex->target(), glTex->textureID()); |
868 | |
869 | SkASSERT(!transferBuffer->isMapped()); |
870 | SkASSERT(!transferBuffer->isCpuBuffer()); |
871 | const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer); |
872 | this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer); |
873 | |
874 | SkDEBUGCODE( |
875 | SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
876 | SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height()); |
877 | SkASSERT(bounds.contains(subRect)); |
878 | ) |
879 | |
880 | size_t bpp = GrColorTypeBytesPerPixel(bufferColorType); |
881 | const size_t trimRowBytes = width * bpp; |
882 | const void* pixels = (void*)offset; |
883 | if (width < 0 || height < 0) { |
884 | return false; |
885 | } |
886 | |
887 | bool restoreGLRowLength = false; |
888 | if (trimRowBytes != rowBytes) { |
889 | // we should have checked for this support already |
890 | SkASSERT(this->glCaps().writePixelsRowBytesSupport()); |
891 | GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowBytes / bpp)); |
892 | restoreGLRowLength = true; |
893 | } |
894 | |
895 | GrGLFormat textureFormat = glTex->format(); |
896 | // External format and type come from the upload data. |
897 | GrGLenum externalFormat = 0; |
898 | GrGLenum externalType = 0; |
899 | this->glCaps().getTexSubImageExternalFormatAndType( |
900 | textureFormat, textureColorType, bufferColorType, &externalFormat, &externalType); |
901 | if (!externalFormat || !externalType) { |
902 | return false; |
903 | } |
904 | |
905 | GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1)); |
906 | GL_CALL(TexSubImage2D(glTex->target(), |
907 | 0, |
908 | left, top, |
909 | width, |
910 | height, |
911 | externalFormat, externalType, |
912 | pixels)); |
913 | |
914 | if (restoreGLRowLength) { |
915 | GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
916 | } |
917 | |
918 | return true; |
919 | } |
920 | |
921 | bool GrGLGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, |
922 | GrColorType surfaceColorType, GrColorType dstColorType, |
923 | GrGpuBuffer* transferBuffer, size_t offset) { |
924 | auto* glBuffer = static_cast<GrGLBuffer*>(transferBuffer); |
925 | this->bindBuffer(GrGpuBufferType::kXferGpuToCpu, glBuffer); |
926 | auto offsetAsPtr = reinterpret_cast<void*>(offset); |
927 | return this->readOrTransferPixelsFrom(surface, left, top, width, height, surfaceColorType, |
928 | dstColorType, offsetAsPtr, width); |
929 | } |
930 | |
931 | void GrGLGpu::unbindCpuToGpuXferBuffer() { |
932 | auto* xferBufferState = this->hwBufferState(GrGpuBufferType::kXferCpuToGpu); |
933 | if (!xferBufferState->fBoundBufferUniqueID.isInvalid()) { |
934 | GL_CALL(BindBuffer(xferBufferState->fGLTarget, 0)); |
935 | xferBufferState->invalidate(); |
936 | } |
937 | } |
938 | |
939 | bool GrGLGpu::uploadColorTypeTexData(GrGLFormat textureFormat, |
940 | GrColorType textureColorType, |
941 | SkISize texDims, |
942 | GrGLenum target, |
943 | SkIRect dstRect, |
944 | GrColorType srcColorType, |
945 | const GrMipLevel texels[], |
946 | int mipLevelCount) { |
947 | // If we're uploading compressed data then we should be using uploadCompressedTexData |
948 | SkASSERT(!GrGLFormatIsCompressed(textureFormat)); |
949 | |
950 | SkASSERT(this->glCaps().isFormatTexturable(textureFormat)); |
951 | |
952 | size_t bpp = GrColorTypeBytesPerPixel(srcColorType); |
953 | |
954 | // External format and type come from the upload data. |
955 | GrGLenum externalFormat; |
956 | GrGLenum externalType; |
957 | this->glCaps().getTexSubImageExternalFormatAndType( |
958 | textureFormat, textureColorType, srcColorType, &externalFormat, &externalType); |
959 | if (!externalFormat || !externalType) { |
960 | return false; |
961 | } |
962 | this->uploadTexData(texDims, target, dstRect, externalFormat, externalType, bpp, texels, |
963 | mipLevelCount); |
964 | return true; |
965 | } |
966 | |
967 | bool GrGLGpu::uploadColorToTex(GrGLFormat textureFormat, |
968 | SkISize texDims, |
969 | GrGLenum target, |
970 | SkColor4f color, |
971 | uint32_t levelMask) { |
972 | GrColorType colorType; |
973 | GrGLenum externalFormat, externalType; |
974 | this->glCaps().getTexSubImageDefaultFormatTypeAndColorType(textureFormat, &externalFormat, |
975 | &externalType, &colorType); |
976 | if (colorType == GrColorType::kUnknown) { |
977 | return false; |
978 | } |
979 | |
980 | std::unique_ptr<char[]> pixelStorage; |
981 | size_t bpp = 0; |
982 | int numLevels = SkMipmap::ComputeLevelCount(texDims) + 1; |
983 | SkSTArray<16, GrMipLevel> levels; |
984 | levels.resize(numLevels); |
985 | SkISize levelDims = texDims; |
986 | for (int i = 0; i < numLevels; ++i, levelDims = {std::max(levelDims.width() >> 1, 1), |
987 | std::max(levelDims.height() >> 1, 1)}) { |
988 | if (levelMask & (1 << i)) { |
989 | if (!pixelStorage) { |
990 | // Make one tight image at the first size and reuse it for smaller levels. |
991 | GrImageInfo ii(colorType, kUnpremul_SkAlphaType, nullptr, levelDims); |
992 | size_t rb = ii.minRowBytes(); |
993 | pixelStorage.reset(new char[rb * levelDims.height()]); |
994 | if (!GrClearImage(ii, pixelStorage.get(), ii.minRowBytes(), color)) { |
995 | return false; |
996 | } |
997 | bpp = ii.bpp(); |
998 | } |
999 | levels[i] = {pixelStorage.get(), levelDims.width()*bpp}; |
1000 | } |
1001 | } |
1002 | this->uploadTexData(texDims, target, SkIRect::MakeSize(texDims), externalFormat, externalType, |
1003 | bpp, levels.begin(), levels.count()); |
1004 | return true; |
1005 | } |
1006 | |
1007 | void GrGLGpu::uploadTexData(SkISize texDims, |
1008 | GrGLenum target, |
1009 | SkIRect dstRect, |
1010 | GrGLenum externalFormat, |
1011 | GrGLenum externalType, |
1012 | size_t bpp, |
1013 | const GrMipLevel texels[], |
1014 | int mipLevelCount) { |
1015 | SkASSERT(!texDims.isEmpty()); |
1016 | SkASSERT(!dstRect.isEmpty()); |
1017 | SkASSERT(SkIRect::MakeSize(texDims).contains(dstRect)); |
1018 | SkASSERT(mipLevelCount > 0 && mipLevelCount <= SkMipmap::ComputeLevelCount(texDims) + 1); |
1019 | SkASSERT(mipLevelCount == 1 || dstRect == SkIRect::MakeSize(texDims)); |
1020 | |
1021 | const GrGLCaps& caps = this->glCaps(); |
1022 | |
1023 | bool restoreGLRowLength = false; |
1024 | |
1025 | this->unbindCpuToGpuXferBuffer(); |
1026 | GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1)); |
1027 | |
1028 | SkISize dims = dstRect.size(); |
1029 | for (int level = 0; level < mipLevelCount; ++level, dims = {std::max(dims.width() >> 1, 1), |
1030 | std::max(dims.height() >> 1, 1)}) { |
1031 | if (!texels[level].fPixels) { |
1032 | continue; |
1033 | } |
1034 | const size_t trimRowBytes = dims.width() * bpp; |
1035 | const size_t rowBytes = texels[level].fRowBytes; |
1036 | |
1037 | if (caps.writePixelsRowBytesSupport() && (rowBytes != trimRowBytes || restoreGLRowLength)) { |
1038 | GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp); |
1039 | GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength)); |
1040 | restoreGLRowLength = true; |
1041 | } else { |
1042 | SkASSERT(rowBytes == trimRowBytes); |
1043 | } |
1044 | |
1045 | GL_CALL(TexSubImage2D(target, level, dstRect.x(), dstRect.y(), dims.width(), dims.height(), |
1046 | externalFormat, externalType, texels[level].fPixels)); |
1047 | } |
1048 | if (restoreGLRowLength) { |
1049 | SkASSERT(caps.writePixelsRowBytesSupport()); |
1050 | GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0)); |
1051 | } |
1052 | } |
1053 | |
1054 | bool GrGLGpu::uploadCompressedTexData(SkImage::CompressionType compressionType, |
1055 | GrGLFormat format, |
1056 | SkISize dimensions, |
1057 | GrMipmapped mipMapped, |
1058 | GrGLenum target, |
1059 | const void* data, size_t dataSize) { |
1060 | SkASSERT(format != GrGLFormat::kUnknown); |
1061 | const GrGLCaps& caps = this->glCaps(); |
1062 | |
1063 | // We only need the internal format for compressed 2D textures. |
1064 | GrGLenum internalFormat = caps.getTexImageOrStorageInternalFormat(format); |
1065 | if (!internalFormat) { |
1066 | return false; |
1067 | } |
1068 | |
1069 | SkASSERT(compressionType != SkImage::CompressionType::kNone); |
1070 | |
1071 | bool useTexStorage = caps.formatSupportsTexStorage(format); |
1072 | |
1073 | int numMipLevels = 1; |
1074 | if (mipMapped == GrMipmapped::kYes) { |
1075 | numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height())+1; |
1076 | } |
1077 | |
1078 | // TODO: Make sure that the width and height that we pass to OpenGL |
1079 | // is a multiple of the block size. |
1080 | |
1081 | if (useTexStorage) { |
1082 | // We never resize or change formats of textures. |
1083 | GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, numMipLevels, internalFormat, |
1084 | dimensions.width(), dimensions.height())); |
1085 | if (error != GR_GL_NO_ERROR) { |
1086 | return false; |
1087 | } |
1088 | |
1089 | size_t offset = 0; |
1090 | for (int level = 0; level < numMipLevels; ++level) { |
1091 | |
1092 | size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions, |
1093 | nullptr, false); |
1094 | |
1095 | error = GL_ALLOC_CALL(CompressedTexSubImage2D(target, |
1096 | level, |
1097 | 0, // left |
1098 | 0, // top |
1099 | dimensions.width(), |
1100 | dimensions.height(), |
1101 | internalFormat, |
1102 | SkToInt(levelDataSize), |
1103 | &((char*)data)[offset])); |
1104 | |
1105 | if (error != GR_GL_NO_ERROR) { |
1106 | return false; |
1107 | } |
1108 | |
1109 | offset += levelDataSize; |
1110 | dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)}; |
1111 | } |
1112 | } else { |
1113 | size_t offset = 0; |
1114 | |
1115 | for (int level = 0; level < numMipLevels; ++level) { |
1116 | size_t levelDataSize = SkCompressedDataSize(compressionType, dimensions, |
1117 | nullptr, false); |
1118 | |
1119 | const char* rawLevelData = &((char*)data)[offset]; |
1120 | GrGLenum error = GL_ALLOC_CALL(CompressedTexImage2D(target, |
1121 | level, |
1122 | internalFormat, |
1123 | dimensions.width(), |
1124 | dimensions.height(), |
1125 | 0, // border |
1126 | SkToInt(levelDataSize), |
1127 | rawLevelData)); |
1128 | |
1129 | if (error != GR_GL_NO_ERROR) { |
1130 | return false; |
1131 | } |
1132 | |
1133 | offset += levelDataSize; |
1134 | dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)}; |
1135 | } |
1136 | } |
1137 | return true; |
1138 | } |
1139 | |
1140 | bool GrGLGpu::renderbufferStorageMSAA(const GrGLContext& ctx, int sampleCount, GrGLenum format, |
1141 | int width, int height) { |
1142 | SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType()); |
1143 | GrGLenum error; |
1144 | switch (ctx.caps()->msFBOType()) { |
1145 | case GrGLCaps::kStandard_MSFBOType: |
1146 | error = GL_ALLOC_CALL(RenderbufferStorageMultisample(GR_GL_RENDERBUFFER, sampleCount, |
1147 | format, width, height)); |
1148 | break; |
1149 | case GrGLCaps::kES_Apple_MSFBOType: |
1150 | error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2APPLE( |
1151 | GR_GL_RENDERBUFFER, sampleCount, format, width, height)); |
1152 | break; |
1153 | case GrGLCaps::kES_EXT_MsToTexture_MSFBOType: |
1154 | case GrGLCaps::kES_IMG_MsToTexture_MSFBOType: |
1155 | error = GL_ALLOC_CALL(RenderbufferStorageMultisampleES2EXT( |
1156 | GR_GL_RENDERBUFFER, sampleCount, format, width, height)); |
1157 | break; |
1158 | case GrGLCaps::kNone_MSFBOType: |
1159 | SkUNREACHABLE; |
1160 | break; |
1161 | } |
1162 | return error == GR_GL_NO_ERROR; |
1163 | } |
1164 | |
1165 | bool GrGLGpu::createRenderTargetObjects(const GrGLTexture::Desc& desc, |
1166 | int sampleCount, |
1167 | GrGLRenderTarget::IDs* rtIDs) { |
1168 | rtIDs->fMSColorRenderbufferID = 0; |
1169 | rtIDs->fRTFBOID = 0; |
1170 | rtIDs->fRTFBOOwnership = GrBackendObjectOwnership::kOwned; |
1171 | rtIDs->fTexFBOID = 0; |
1172 | |
1173 | GrGLenum colorRenderbufferFormat = 0; // suppress warning |
1174 | |
1175 | if (desc.fFormat == GrGLFormat::kUnknown) { |
1176 | goto FAILED; |
1177 | } |
1178 | |
1179 | if (sampleCount > 1 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) { |
1180 | goto FAILED; |
1181 | } |
1182 | |
1183 | GL_CALL(GenFramebuffers(1, &rtIDs->fTexFBOID)); |
1184 | if (!rtIDs->fTexFBOID) { |
1185 | goto FAILED; |
1186 | } |
1187 | |
1188 | // If we are using multisampling we will create two FBOS. We render to one and then resolve to |
1189 | // the texture bound to the other. The exception is the IMG multisample extension. With this |
1190 | // extension the texture is multisampled when rendered to and then auto-resolves it when it is |
1191 | // rendered from. |
1192 | if (sampleCount > 1 && this->glCaps().usesMSAARenderBuffers()) { |
1193 | GL_CALL(GenFramebuffers(1, &rtIDs->fRTFBOID)); |
1194 | GL_CALL(GenRenderbuffers(1, &rtIDs->fMSColorRenderbufferID)); |
1195 | if (!rtIDs->fRTFBOID || !rtIDs->fMSColorRenderbufferID) { |
1196 | goto FAILED; |
1197 | } |
1198 | colorRenderbufferFormat = this->glCaps().getRenderbufferInternalFormat(desc.fFormat); |
1199 | } else { |
1200 | rtIDs->fRTFBOID = rtIDs->fTexFBOID; |
1201 | } |
1202 | |
1203 | // below here we may bind the FBO |
1204 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
1205 | if (rtIDs->fRTFBOID != rtIDs->fTexFBOID) { |
1206 | SkASSERT(sampleCount > 1); |
1207 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, rtIDs->fMSColorRenderbufferID)); |
1208 | if (!this->renderbufferStorageMSAA(*fGLContext, sampleCount, colorRenderbufferFormat, |
1209 | desc.fSize.width(), desc.fSize.height())) { |
1210 | goto FAILED; |
1211 | } |
1212 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fRTFBOID); |
1213 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1214 | GR_GL_COLOR_ATTACHMENT0, |
1215 | GR_GL_RENDERBUFFER, |
1216 | rtIDs->fMSColorRenderbufferID)); |
1217 | } |
1218 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, rtIDs->fTexFBOID); |
1219 | |
1220 | if (this->glCaps().usesImplicitMSAAResolve() && sampleCount > 1) { |
1221 | GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER, |
1222 | GR_GL_COLOR_ATTACHMENT0, |
1223 | desc.fTarget, |
1224 | desc.fID, |
1225 | 0, |
1226 | sampleCount)); |
1227 | } else { |
1228 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, |
1229 | GR_GL_COLOR_ATTACHMENT0, |
1230 | desc.fTarget, |
1231 | desc.fID, |
1232 | 0)); |
1233 | } |
1234 | |
1235 | return true; |
1236 | |
1237 | FAILED: |
1238 | if (rtIDs->fMSColorRenderbufferID) { |
1239 | GL_CALL(DeleteRenderbuffers(1, &rtIDs->fMSColorRenderbufferID)); |
1240 | } |
1241 | if (rtIDs->fRTFBOID != rtIDs->fTexFBOID) { |
1242 | this->deleteFramebuffer(rtIDs->fRTFBOID); |
1243 | } |
1244 | if (rtIDs->fTexFBOID) { |
1245 | this->deleteFramebuffer(rtIDs->fTexFBOID); |
1246 | } |
1247 | return false; |
1248 | } |
1249 | |
1250 | // good to set a break-point here to know when createTexture fails |
1251 | static sk_sp<GrTexture> return_null_texture() { |
1252 | // SkDEBUGFAIL("null texture"); |
1253 | return nullptr; |
1254 | } |
1255 | |
1256 | static GrGLTextureParameters::SamplerOverriddenState set_initial_texture_params( |
1257 | const GrGLInterface* interface, GrGLenum target) { |
1258 | // Some drivers like to know filter/wrap before seeing glTexImage2D. Some |
1259 | // drivers have a bug where an FBO won't be complete if it includes a |
1260 | // texture that is not mipmap complete (considering the filter in use). |
1261 | GrGLTextureParameters::SamplerOverriddenState state; |
1262 | state.fMinFilter = GR_GL_NEAREST; |
1263 | state.fMagFilter = GR_GL_NEAREST; |
1264 | state.fWrapS = GR_GL_CLAMP_TO_EDGE; |
1265 | state.fWrapT = GR_GL_CLAMP_TO_EDGE; |
1266 | GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, state.fMagFilter)); |
1267 | GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, state.fMinFilter)); |
1268 | GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_S, state.fWrapS)); |
1269 | GR_GL_CALL(interface, TexParameteri(target, GR_GL_TEXTURE_WRAP_T, state.fWrapT)); |
1270 | return state; |
1271 | } |
1272 | |
1273 | sk_sp<GrTexture> GrGLGpu::onCreateTexture(SkISize dimensions, |
1274 | const GrBackendFormat& format, |
1275 | GrRenderable renderable, |
1276 | int renderTargetSampleCnt, |
1277 | SkBudgeted budgeted, |
1278 | GrProtected isProtected, |
1279 | int mipLevelCount, |
1280 | uint32_t levelClearMask) { |
1281 | // We don't support protected textures in GL. |
1282 | if (isProtected == GrProtected::kYes) { |
1283 | return nullptr; |
1284 | } |
1285 | SkASSERT(GrGLCaps::kNone_MSFBOType != this->glCaps().msFBOType() || renderTargetSampleCnt == 1); |
1286 | |
1287 | SkASSERT(mipLevelCount > 0); |
1288 | GrMipmapStatus mipmapStatus = |
1289 | mipLevelCount > 1 ? GrMipmapStatus::kDirty : GrMipmapStatus::kNotAllocated; |
1290 | GrGLTextureParameters::SamplerOverriddenState initialState; |
1291 | GrGLTexture::Desc texDesc; |
1292 | texDesc.fSize = dimensions; |
1293 | switch (format.textureType()) { |
1294 | case GrTextureType::kExternal: |
1295 | case GrTextureType::kNone: |
1296 | return nullptr; |
1297 | case GrTextureType::k2D: |
1298 | texDesc.fTarget = GR_GL_TEXTURE_2D; |
1299 | break; |
1300 | case GrTextureType::kRectangle: |
1301 | if (mipLevelCount > 1 || !this->glCaps().rectangleTextureSupport()) { |
1302 | return nullptr; |
1303 | } |
1304 | texDesc.fTarget = GR_GL_TEXTURE_RECTANGLE; |
1305 | break; |
1306 | } |
1307 | texDesc.fFormat = format.asGLFormat(); |
1308 | texDesc.fOwnership = GrBackendObjectOwnership::kOwned; |
1309 | SkASSERT(texDesc.fFormat != GrGLFormat::kUnknown); |
1310 | SkASSERT(!GrGLFormatIsCompressed(texDesc.fFormat)); |
1311 | |
1312 | texDesc.fID = this->createTexture(dimensions, texDesc.fFormat, texDesc.fTarget, renderable, |
1313 | &initialState, mipLevelCount); |
1314 | |
1315 | if (!texDesc.fID) { |
1316 | return return_null_texture(); |
1317 | } |
1318 | |
1319 | sk_sp<GrGLTexture> tex; |
1320 | if (renderable == GrRenderable::kYes) { |
1321 | // unbind the texture from the texture unit before binding it to the frame buffer |
1322 | GL_CALL(BindTexture(texDesc.fTarget, 0)); |
1323 | GrGLRenderTarget::IDs rtIDDesc; |
1324 | |
1325 | if (!this->createRenderTargetObjects(texDesc, renderTargetSampleCnt, &rtIDDesc)) { |
1326 | GL_CALL(DeleteTextures(1, &texDesc.fID)); |
1327 | return return_null_texture(); |
1328 | } |
1329 | tex = sk_make_sp<GrGLTextureRenderTarget>( |
1330 | this, budgeted, renderTargetSampleCnt, texDesc, rtIDDesc, mipmapStatus); |
1331 | tex->baseLevelWasBoundToFBO(); |
1332 | } else { |
1333 | tex = sk_make_sp<GrGLTexture>(this, budgeted, texDesc, mipmapStatus); |
1334 | } |
1335 | // The non-sampler params are still at their default values. |
1336 | tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(), |
1337 | fResetTimestampForTextureParameters); |
1338 | if (levelClearMask) { |
1339 | if (this->glCaps().clearTextureSupport()) { |
1340 | GrGLenum externalFormat, externalType; |
1341 | GrColorType colorType; |
1342 | this->glCaps().getTexSubImageDefaultFormatTypeAndColorType( |
1343 | texDesc.fFormat, &externalFormat, &externalType, &colorType); |
1344 | for (int i = 0; i < mipLevelCount; ++i) { |
1345 | if (levelClearMask & (1U << i)) { |
1346 | GL_CALL(ClearTexImage(tex->textureID(), i, externalFormat, externalType, |
1347 | nullptr)); |
1348 | } |
1349 | } |
1350 | } else if (this->glCaps().canFormatBeFBOColorAttachment(format.asGLFormat()) && |
1351 | !this->glCaps().performColorClearsAsDraws()) { |
1352 | this->flushScissorTest(GrScissorTest::kDisabled); |
1353 | this->disableWindowRectangles(); |
1354 | this->flushColorWrite(true); |
1355 | this->flushClearColor(SK_PMColor4fTRANSPARENT); |
1356 | for (int i = 0; i < mipLevelCount; ++i) { |
1357 | if (levelClearMask & (1U << i)) { |
1358 | this->bindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER, |
1359 | kDst_TempFBOTarget); |
1360 | GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); |
1361 | this->unbindSurfaceFBOForPixelOps(tex.get(), i, GR_GL_FRAMEBUFFER); |
1362 | } |
1363 | } |
1364 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
1365 | } else { |
1366 | this->bindTextureToScratchUnit(texDesc.fTarget, tex->textureID()); |
1367 | static constexpr SkColor4f kZeroColor = {0, 0, 0, 0}; |
1368 | this->uploadColorToTex(texDesc.fFormat, texDesc.fSize, texDesc.fTarget, kZeroColor, |
1369 | levelClearMask); |
1370 | } |
1371 | } |
1372 | return std::move(tex); |
1373 | } |
1374 | |
1375 | sk_sp<GrTexture> GrGLGpu::onCreateCompressedTexture(SkISize dimensions, |
1376 | const GrBackendFormat& format, |
1377 | SkBudgeted budgeted, |
1378 | GrMipmapped mipMapped, |
1379 | GrProtected isProtected, |
1380 | const void* data, size_t dataSize) { |
1381 | // We don't support protected textures in GL. |
1382 | if (isProtected == GrProtected::kYes) { |
1383 | return nullptr; |
1384 | } |
1385 | SkImage::CompressionType compression = GrBackendFormatToCompressionType(format); |
1386 | |
1387 | GrGLTextureParameters::SamplerOverriddenState initialState; |
1388 | GrGLTexture::Desc desc; |
1389 | desc.fSize = dimensions; |
1390 | desc.fTarget = GR_GL_TEXTURE_2D; |
1391 | desc.fOwnership = GrBackendObjectOwnership::kOwned; |
1392 | desc.fFormat = format.asGLFormat(); |
1393 | desc.fID = this->createCompressedTexture2D(desc.fSize, compression, desc.fFormat, |
1394 | mipMapped, &initialState); |
1395 | if (!desc.fID) { |
1396 | return nullptr; |
1397 | } |
1398 | |
1399 | if (data) { |
1400 | if (!this->uploadCompressedTexData(compression, desc.fFormat, dimensions, mipMapped, |
1401 | GR_GL_TEXTURE_2D, data, dataSize)) { |
1402 | GL_CALL(DeleteTextures(1, &desc.fID)); |
1403 | return nullptr; |
1404 | } |
1405 | } |
1406 | |
1407 | // Unbind this texture from the scratch texture unit. |
1408 | this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0); |
1409 | |
1410 | GrMipmapStatus mipmapStatus = mipMapped == GrMipmapped::kYes |
1411 | ? GrMipmapStatus::kValid |
1412 | : GrMipmapStatus::kNotAllocated; |
1413 | |
1414 | auto tex = sk_make_sp<GrGLTexture>(this, budgeted, desc, mipmapStatus); |
1415 | // The non-sampler params are still at their default values. |
1416 | tex->parameters()->set(&initialState, GrGLTextureParameters::NonsamplerState(), |
1417 | fResetTimestampForTextureParameters); |
1418 | return std::move(tex); |
1419 | } |
1420 | |
1421 | GrBackendTexture GrGLGpu::onCreateCompressedBackendTexture( |
1422 | SkISize dimensions, const GrBackendFormat& format, GrMipmapped mipMapped, |
1423 | GrProtected isProtected) { |
1424 | // We don't support protected textures in GL. |
1425 | if (isProtected == GrProtected::kYes) { |
1426 | return {}; |
1427 | } |
1428 | |
1429 | this->handleDirtyContext(); |
1430 | |
1431 | GrGLFormat glFormat = format.asGLFormat(); |
1432 | if (glFormat == GrGLFormat::kUnknown) { |
1433 | return {}; |
1434 | } |
1435 | |
1436 | SkImage::CompressionType compression = GrBackendFormatToCompressionType(format); |
1437 | |
1438 | GrGLTextureInfo info; |
1439 | GrGLTextureParameters::SamplerOverriddenState initialState; |
1440 | |
1441 | info.fTarget = GR_GL_TEXTURE_2D; |
1442 | info.fFormat = GrGLFormatToEnum(glFormat); |
1443 | info.fID = this->createCompressedTexture2D(dimensions, compression, glFormat, |
1444 | mipMapped, &initialState); |
1445 | if (!info.fID) { |
1446 | return {}; |
1447 | } |
1448 | |
1449 | // Unbind this texture from the scratch texture unit. |
1450 | this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, 0); |
1451 | |
1452 | auto parameters = sk_make_sp<GrGLTextureParameters>(); |
1453 | // The non-sampler params are still at their default values. |
1454 | parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(), |
1455 | fResetTimestampForTextureParameters); |
1456 | |
1457 | return GrBackendTexture(dimensions.width(), dimensions.height(), mipMapped, info, |
1458 | std::move(parameters)); |
1459 | } |
1460 | |
1461 | bool GrGLGpu::onUpdateCompressedBackendTexture(const GrBackendTexture& backendTexture, |
1462 | sk_sp<GrRefCntedCallback> finishedCallback, |
1463 | const BackendTextureData* data) { |
1464 | SkASSERT(data && data->type() != BackendTextureData::Type::kPixmaps); |
1465 | |
1466 | GrGLTextureInfo info; |
1467 | SkAssertResult(backendTexture.getGLTextureInfo(&info)); |
1468 | |
1469 | GrBackendFormat format = backendTexture.getBackendFormat(); |
1470 | GrGLFormat glFormat = format.asGLFormat(); |
1471 | if (glFormat == GrGLFormat::kUnknown) { |
1472 | return false; |
1473 | } |
1474 | SkImage::CompressionType compression = GrBackendFormatToCompressionType(format); |
1475 | |
1476 | GrMipmapped mipMapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo; |
1477 | |
1478 | const char* rawData = nullptr; |
1479 | size_t rawDataSize = 0; |
1480 | SkAutoMalloc am; |
1481 | if (data->type() == BackendTextureData::Type::kCompressed) { |
1482 | rawData = (const char*)data->compressedData(); |
1483 | rawDataSize = data->compressedSize(); |
1484 | } else { |
1485 | SkASSERT(data->type() == BackendTextureData::Type::kColor); |
1486 | SkASSERT(compression != SkImage::CompressionType::kNone); |
1487 | |
1488 | rawDataSize = SkCompressedDataSize(compression, backendTexture.dimensions(), nullptr, |
1489 | backendTexture.hasMipmaps()); |
1490 | |
1491 | am.reset(rawDataSize); |
1492 | |
1493 | GrFillInCompressedData(compression, backendTexture.dimensions(), mipMapped, (char*)am.get(), |
1494 | data->color()); |
1495 | |
1496 | rawData = (const char*)am.get(); |
1497 | } |
1498 | |
1499 | this->bindTextureToScratchUnit(info.fTarget, info.fID); |
1500 | |
1501 | // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1 |
1502 | // so that the uploads go to the right levels. |
1503 | if (backendTexture.hasMipMaps() && this->glCaps().mipmapLevelAndLodControlSupport()) { |
1504 | auto params = backendTexture.getGLTextureParams(); |
1505 | GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState(); |
1506 | if (params->nonsamplerState().fBaseMipMapLevel != 0) { |
1507 | GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0)); |
1508 | nonsamplerState.fBaseMipMapLevel = 0; |
1509 | } |
1510 | int numMipLevels = |
1511 | SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1; |
1512 | if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) { |
1513 | GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1)); |
1514 | nonsamplerState.fBaseMipMapLevel = numMipLevels - 1; |
1515 | } |
1516 | params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters); |
1517 | } |
1518 | |
1519 | bool result = this->uploadCompressedTexData( |
1520 | compression, glFormat, backendTexture.dimensions(), mipMapped, GR_GL_TEXTURE_2D, |
1521 | rawData, rawDataSize); |
1522 | |
1523 | // Unbind this texture from the scratch texture unit. |
1524 | this->bindTextureToScratchUnit(info.fTarget, 0); |
1525 | |
1526 | return result; |
1527 | } |
1528 | |
1529 | namespace { |
1530 | |
1531 | const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount; |
1532 | |
1533 | void inline get_stencil_rb_sizes(const GrGLInterface* gl, |
1534 | GrGLStencilAttachment::Format* format) { |
1535 | |
1536 | // we shouldn't ever know one size and not the other |
1537 | SkASSERT((kUnknownBitCount == format->fStencilBits) == |
1538 | (kUnknownBitCount == format->fTotalBits)); |
1539 | if (kUnknownBitCount == format->fStencilBits) { |
1540 | GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, |
1541 | GR_GL_RENDERBUFFER_STENCIL_SIZE, |
1542 | (GrGLint*)&format->fStencilBits); |
1543 | if (format->fPacked) { |
1544 | GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER, |
1545 | GR_GL_RENDERBUFFER_DEPTH_SIZE, |
1546 | (GrGLint*)&format->fTotalBits); |
1547 | format->fTotalBits += format->fStencilBits; |
1548 | } else { |
1549 | format->fTotalBits = format->fStencilBits; |
1550 | } |
1551 | } |
1552 | } |
1553 | } // namespace |
1554 | |
1555 | int GrGLGpu::getCompatibleStencilIndex(GrGLFormat format) { |
1556 | static const int kSize = 16; |
1557 | SkASSERT(this->glCaps().canFormatBeFBOColorAttachment(format)); |
1558 | |
1559 | if (!this->glCaps().hasStencilFormatBeenDeterminedForFormat(format)) { |
1560 | // Default to unsupported, set this if we find a stencil format that works. |
1561 | int firstWorkingStencilFormatIndex = -1; |
1562 | |
1563 | GrGLuint colorID = this->createTexture({kSize, kSize}, format, GR_GL_TEXTURE_2D, |
1564 | GrRenderable::kYes, nullptr, 1); |
1565 | if (!colorID) { |
1566 | return -1; |
1567 | } |
1568 | // unbind the texture from the texture unit before binding it to the frame buffer |
1569 | GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0)); |
1570 | |
1571 | // Create Framebuffer |
1572 | GrGLuint fb = 0; |
1573 | GL_CALL(GenFramebuffers(1, &fb)); |
1574 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, fb); |
1575 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
1576 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, |
1577 | GR_GL_COLOR_ATTACHMENT0, |
1578 | GR_GL_TEXTURE_2D, |
1579 | colorID, |
1580 | 0)); |
1581 | GrGLuint sbRBID = 0; |
1582 | GL_CALL(GenRenderbuffers(1, &sbRBID)); |
1583 | |
1584 | // look over formats till I find a compatible one |
1585 | int stencilFmtCnt = this->glCaps().stencilFormats().count(); |
1586 | if (sbRBID) { |
1587 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID)); |
1588 | for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) { |
1589 | const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i]; |
1590 | GrGLenum error = GL_ALLOC_CALL(RenderbufferStorage( |
1591 | GR_GL_RENDERBUFFER, sFmt.fInternalFormat, kSize, kSize)); |
1592 | if (error == GR_GL_NO_ERROR) { |
1593 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1594 | GR_GL_STENCIL_ATTACHMENT, |
1595 | GR_GL_RENDERBUFFER, sbRBID)); |
1596 | if (sFmt.fPacked) { |
1597 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1598 | GR_GL_DEPTH_ATTACHMENT, |
1599 | GR_GL_RENDERBUFFER, sbRBID)); |
1600 | } else { |
1601 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1602 | GR_GL_DEPTH_ATTACHMENT, |
1603 | GR_GL_RENDERBUFFER, 0)); |
1604 | } |
1605 | GrGLenum status; |
1606 | GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
1607 | if (status == GR_GL_FRAMEBUFFER_COMPLETE) { |
1608 | firstWorkingStencilFormatIndex = i; |
1609 | break; |
1610 | } |
1611 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1612 | GR_GL_STENCIL_ATTACHMENT, |
1613 | GR_GL_RENDERBUFFER, 0)); |
1614 | if (sFmt.fPacked) { |
1615 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, |
1616 | GR_GL_DEPTH_ATTACHMENT, |
1617 | GR_GL_RENDERBUFFER, 0)); |
1618 | } |
1619 | } |
1620 | } |
1621 | GL_CALL(DeleteRenderbuffers(1, &sbRBID)); |
1622 | } |
1623 | GL_CALL(DeleteTextures(1, &colorID)); |
1624 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0); |
1625 | this->deleteFramebuffer(fb); |
1626 | fGLContext->caps()->setStencilFormatIndexForFormat(format, firstWorkingStencilFormatIndex); |
1627 | } |
1628 | return this->glCaps().getStencilFormatIndexForFormat(format); |
1629 | } |
1630 | |
1631 | GrGLuint GrGLGpu::createCompressedTexture2D( |
1632 | SkISize dimensions, |
1633 | SkImage::CompressionType compression, |
1634 | GrGLFormat format, |
1635 | GrMipmapped mipMapped, |
1636 | GrGLTextureParameters::SamplerOverriddenState* initialState) { |
1637 | if (format == GrGLFormat::kUnknown) { |
1638 | return 0; |
1639 | } |
1640 | GrGLuint id = 0; |
1641 | GL_CALL(GenTextures(1, &id)); |
1642 | if (!id) { |
1643 | return 0; |
1644 | } |
1645 | |
1646 | this->bindTextureToScratchUnit(GR_GL_TEXTURE_2D, id); |
1647 | |
1648 | *initialState = set_initial_texture_params(this->glInterface(), GR_GL_TEXTURE_2D); |
1649 | |
1650 | return id; |
1651 | } |
1652 | |
1653 | GrGLuint GrGLGpu::createTexture(SkISize dimensions, |
1654 | GrGLFormat format, |
1655 | GrGLenum target, |
1656 | GrRenderable renderable, |
1657 | GrGLTextureParameters::SamplerOverriddenState* initialState, |
1658 | int mipLevelCount) { |
1659 | SkASSERT(format != GrGLFormat::kUnknown); |
1660 | SkASSERT(!GrGLFormatIsCompressed(format)); |
1661 | |
1662 | GrGLuint id = 0; |
1663 | GL_CALL(GenTextures(1, &id)); |
1664 | |
1665 | if (!id) { |
1666 | return 0; |
1667 | } |
1668 | |
1669 | this->bindTextureToScratchUnit(target, id); |
1670 | |
1671 | if (GrRenderable::kYes == renderable && this->glCaps().textureUsageSupport()) { |
1672 | // provides a hint about how this texture will be used |
1673 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_USAGE, GR_GL_FRAMEBUFFER_ATTACHMENT)); |
1674 | } |
1675 | |
1676 | if (initialState) { |
1677 | *initialState = set_initial_texture_params(this->glInterface(), target); |
1678 | } else { |
1679 | set_initial_texture_params(this->glInterface(), target); |
1680 | } |
1681 | |
1682 | GrGLenum internalFormat = this->glCaps().getTexImageOrStorageInternalFormat(format); |
1683 | |
1684 | bool success = false; |
1685 | if (internalFormat) { |
1686 | if (this->glCaps().formatSupportsTexStorage(format)) { |
1687 | auto levelCount = std::max(mipLevelCount, 1); |
1688 | GrGLenum error = GL_ALLOC_CALL(TexStorage2D(target, levelCount, internalFormat, |
1689 | dimensions.width(), dimensions.height())); |
1690 | success = (error == GR_GL_NO_ERROR); |
1691 | } else { |
1692 | GrGLenum externalFormat, externalType; |
1693 | this->glCaps().getTexImageExternalFormatAndType(format, &externalFormat, &externalType); |
1694 | GrGLenum error = GR_GL_NO_ERROR; |
1695 | if (externalFormat && externalType) { |
1696 | for (int level = 0; level < mipLevelCount && error == GR_GL_NO_ERROR; level++) { |
1697 | const int twoToTheMipLevel = 1 << level; |
1698 | const int currentWidth = std::max(1, dimensions.width() / twoToTheMipLevel); |
1699 | const int currentHeight = std::max(1, dimensions.height() / twoToTheMipLevel); |
1700 | error = GL_ALLOC_CALL(TexImage2D(target, level, internalFormat, currentWidth, |
1701 | currentHeight, 0, externalFormat, externalType, |
1702 | nullptr)); |
1703 | } |
1704 | success = (error == GR_GL_NO_ERROR); |
1705 | } |
1706 | } |
1707 | } |
1708 | if (success) { |
1709 | return id; |
1710 | } |
1711 | GL_CALL(DeleteTextures(1, &id)); |
1712 | return 0; |
1713 | } |
1714 | |
1715 | GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget( |
1716 | const GrRenderTarget* rt, int width, int height, int numStencilSamples) { |
1717 | SkASSERT(width >= rt->width()); |
1718 | SkASSERT(height >= rt->height()); |
1719 | |
1720 | GrGLStencilAttachment::IDDesc sbDesc; |
1721 | |
1722 | int sIdx = this->getCompatibleStencilIndex(rt->backendFormat().asGLFormat()); |
1723 | if (sIdx < 0) { |
1724 | return nullptr; |
1725 | } |
1726 | |
1727 | if (!sbDesc.fRenderbufferID) { |
1728 | GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID)); |
1729 | } |
1730 | if (!sbDesc.fRenderbufferID) { |
1731 | return nullptr; |
1732 | } |
1733 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID)); |
1734 | const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx]; |
1735 | // we do this "if" so that we don't call the multisample |
1736 | // version on a GL that doesn't have an MSAA extension. |
1737 | if (numStencilSamples > 1) { |
1738 | if (!this->renderbufferStorageMSAA(*fGLContext, numStencilSamples, sFmt.fInternalFormat, |
1739 | width, height)) { |
1740 | GL_CALL(DeleteRenderbuffers(1, &sbDesc.fRenderbufferID)); |
1741 | return nullptr; |
1742 | } |
1743 | } else { |
1744 | GrGLenum error = GL_ALLOC_CALL( |
1745 | RenderbufferStorage(GR_GL_RENDERBUFFER, sFmt.fInternalFormat, width, height)); |
1746 | if (error != GR_GL_NO_ERROR) { |
1747 | GL_CALL(DeleteRenderbuffers(1, &sbDesc.fRenderbufferID)); |
1748 | return nullptr; |
1749 | } |
1750 | } |
1751 | fStats.incStencilAttachmentCreates(); |
1752 | // After sized formats we attempt an unsized format and take |
1753 | // whatever sizes GL gives us. In that case we query for the size. |
1754 | GrGLStencilAttachment::Format format = sFmt; |
1755 | get_stencil_rb_sizes(this->glInterface(), &format); |
1756 | GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this, |
1757 | sbDesc, |
1758 | width, |
1759 | height, |
1760 | numStencilSamples, |
1761 | format); |
1762 | return stencil; |
1763 | } |
1764 | |
1765 | //////////////////////////////////////////////////////////////////////////////// |
1766 | |
1767 | sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType, |
1768 | GrAccessPattern accessPattern, const void* data) { |
1769 | return GrGLBuffer::Make(this, size, intendedType, accessPattern, data); |
1770 | } |
1771 | |
1772 | void GrGLGpu::flushScissorTest(GrScissorTest scissorTest) { |
1773 | if (GrScissorTest::kEnabled == scissorTest) { |
1774 | if (kYes_TriState != fHWScissorSettings.fEnabled) { |
1775 | GL_CALL(Enable(GR_GL_SCISSOR_TEST)); |
1776 | fHWScissorSettings.fEnabled = kYes_TriState; |
1777 | } |
1778 | } else { |
1779 | if (kNo_TriState != fHWScissorSettings.fEnabled) { |
1780 | GL_CALL(Disable(GR_GL_SCISSOR_TEST)); |
1781 | fHWScissorSettings.fEnabled = kNo_TriState; |
1782 | } |
1783 | } |
1784 | } |
1785 | |
1786 | void GrGLGpu::flushScissorRect(const SkIRect& scissor, int rtWidth, int rtHeight, |
1787 | GrSurfaceOrigin rtOrigin) { |
1788 | SkASSERT(fHWScissorSettings.fEnabled == TriState::kYes_TriState); |
1789 | auto nativeScissor = GrNativeRect::MakeRelativeTo(rtOrigin, rtHeight, scissor); |
1790 | if (fHWScissorSettings.fRect != nativeScissor) { |
1791 | GL_CALL(Scissor(nativeScissor.fX, nativeScissor.fY, nativeScissor.fWidth, |
1792 | nativeScissor.fHeight)); |
1793 | fHWScissorSettings.fRect = nativeScissor; |
1794 | } |
1795 | } |
1796 | |
1797 | void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState, |
1798 | const GrGLRenderTarget* rt, GrSurfaceOrigin origin) { |
1799 | #ifndef USE_NSIGHT |
1800 | typedef GrWindowRectsState::Mode Mode; |
1801 | SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen. |
1802 | SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles()); |
1803 | |
1804 | if (!this->caps()->maxWindowRectangles() || |
1805 | fHWWindowRectsState.knownEqualTo(origin, rt->width(), rt->height(), windowState)) { |
1806 | return; |
1807 | } |
1808 | |
1809 | // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above |
1810 | // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912 |
1811 | int numWindows = std::min(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows)); |
1812 | SkASSERT(windowState.numWindows() == numWindows); |
1813 | |
1814 | GrNativeRect glwindows[GrWindowRectangles::kMaxWindows]; |
1815 | const SkIRect* skwindows = windowState.windows().data(); |
1816 | for (int i = 0; i < numWindows; ++i) { |
1817 | glwindows[i].setRelativeTo(origin, rt->height(), skwindows[i]); |
1818 | } |
1819 | |
1820 | GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE; |
1821 | GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts())); |
1822 | |
1823 | fHWWindowRectsState.set(origin, rt->width(), rt->height(), windowState); |
1824 | #endif |
1825 | } |
1826 | |
1827 | void GrGLGpu::disableWindowRectangles() { |
1828 | #ifndef USE_NSIGHT |
1829 | if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) { |
1830 | return; |
1831 | } |
1832 | GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr)); |
1833 | fHWWindowRectsState.setDisabled(); |
1834 | #endif |
1835 | } |
1836 | |
1837 | bool GrGLGpu::flushGLState(GrRenderTarget* renderTarget, const GrProgramInfo& programInfo) { |
1838 | this->handleDirtyContext(); |
1839 | |
1840 | sk_sp<GrGLProgram> program = fProgramCache->findOrCreateProgram(renderTarget, programInfo); |
1841 | if (!program) { |
1842 | GrCapsDebugf(this->caps(), "Failed to create program!\n" ); |
1843 | return false; |
1844 | } |
1845 | |
1846 | this->flushProgram(std::move(program)); |
1847 | |
1848 | if (GrPrimitiveType::kPatches == programInfo.primitiveType()) { |
1849 | this->flushPatchVertexCount(programInfo.tessellationPatchVertexCount()); |
1850 | } |
1851 | |
1852 | // Swizzle the blend to match what the shader will output. |
1853 | this->flushBlendAndColorWrite(programInfo.pipeline().getXferProcessor().getBlendInfo(), |
1854 | programInfo.pipeline().writeSwizzle()); |
1855 | |
1856 | fHWProgram->updateUniforms(renderTarget, programInfo); |
1857 | |
1858 | GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget); |
1859 | GrStencilSettings stencil; |
1860 | if (programInfo.pipeline().isStencilEnabled()) { |
1861 | SkASSERT(glRT->getStencilAttachment()); |
1862 | stencil.reset(*programInfo.pipeline().getUserStencil(), |
1863 | programInfo.pipeline().hasStencilClip(), |
1864 | glRT->numStencilBits()); |
1865 | } |
1866 | this->flushStencil(stencil, programInfo.origin()); |
1867 | this->flushScissorTest(GrScissorTest(programInfo.pipeline().isScissorTestEnabled())); |
1868 | this->flushWindowRectangles(programInfo.pipeline().getWindowRectsState(), |
1869 | glRT, programInfo.origin()); |
1870 | this->flushHWAAState(glRT, programInfo.pipeline().isHWAntialiasState()); |
1871 | this->flushConservativeRasterState(programInfo.pipeline().usesConservativeRaster()); |
1872 | this->flushWireframeState(programInfo.pipeline().isWireframe()); |
1873 | |
1874 | // This must come after textures are flushed because a texture may need |
1875 | // to be msaa-resolved (which will modify bound FBO state). |
1876 | this->flushRenderTarget(glRT); |
1877 | |
1878 | return true; |
1879 | } |
1880 | |
1881 | void GrGLGpu::flushProgram(sk_sp<GrGLProgram> program) { |
1882 | if (!program) { |
1883 | fHWProgram.reset(); |
1884 | fHWProgramID = 0; |
1885 | return; |
1886 | } |
1887 | SkASSERT((program == fHWProgram) == (fHWProgramID == program->programID())); |
1888 | if (program == fHWProgram) { |
1889 | return; |
1890 | } |
1891 | auto id = program->programID(); |
1892 | SkASSERT(id); |
1893 | GL_CALL(UseProgram(id)); |
1894 | fHWProgram = std::move(program); |
1895 | fHWProgramID = id; |
1896 | } |
1897 | |
1898 | void GrGLGpu::flushProgram(GrGLuint id) { |
1899 | SkASSERT(id); |
1900 | if (fHWProgramID == id) { |
1901 | SkASSERT(!fHWProgram); |
1902 | return; |
1903 | } |
1904 | fHWProgram.reset(); |
1905 | GL_CALL(UseProgram(id)); |
1906 | fHWProgramID = id; |
1907 | } |
1908 | |
1909 | GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) { |
1910 | this->handleDirtyContext(); |
1911 | |
1912 | // Index buffer state is tied to the vertex array. |
1913 | if (GrGpuBufferType::kIndex == type) { |
1914 | this->bindVertexArray(0); |
1915 | } |
1916 | |
1917 | auto* bufferState = this->hwBufferState(type); |
1918 | if (buffer->isCpuBuffer()) { |
1919 | if (!bufferState->fBufferZeroKnownBound) { |
1920 | GL_CALL(BindBuffer(bufferState->fGLTarget, 0)); |
1921 | bufferState->fBufferZeroKnownBound = true; |
1922 | bufferState->fBoundBufferUniqueID.makeInvalid(); |
1923 | } |
1924 | } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() != |
1925 | bufferState->fBoundBufferUniqueID) { |
1926 | const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer); |
1927 | GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID())); |
1928 | bufferState->fBufferZeroKnownBound = false; |
1929 | bufferState->fBoundBufferUniqueID = glBuffer->uniqueID(); |
1930 | } |
1931 | |
1932 | return bufferState->fGLTarget; |
1933 | } |
1934 | |
1935 | void GrGLGpu::clear(const GrScissorState& scissor, const SkPMColor4f& color, |
1936 | GrRenderTarget* target, GrSurfaceOrigin origin) { |
1937 | // parent class should never let us get here with no RT |
1938 | SkASSERT(target); |
1939 | SkASSERT(!this->caps()->performColorClearsAsDraws()); |
1940 | SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws()); |
1941 | |
1942 | this->handleDirtyContext(); |
1943 | |
1944 | GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
1945 | |
1946 | if (scissor.enabled()) { |
1947 | this->flushRenderTarget(glRT, origin, scissor.rect()); |
1948 | } else { |
1949 | this->flushRenderTarget(glRT); |
1950 | } |
1951 | this->flushScissor(scissor, glRT->width(), glRT->height(), origin); |
1952 | this->disableWindowRectangles(); |
1953 | this->flushColorWrite(true); |
1954 | this->flushClearColor(color); |
1955 | GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT)); |
1956 | } |
1957 | |
1958 | static bool use_tiled_rendering(const GrGLCaps& glCaps, |
1959 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) { |
1960 | // Only use the tiled rendering extension if we can explicitly clear and discard the stencil. |
1961 | // Otherwise it's faster to just not use it. |
1962 | return glCaps.tiledRenderingSupport() && GrLoadOp::kClear == stencilLoadStore.fLoadOp && |
1963 | GrStoreOp::kDiscard == stencilLoadStore.fStoreOp; |
1964 | } |
1965 | |
1966 | void GrGLGpu::beginCommandBuffer(GrRenderTarget* rt, const SkIRect& bounds, GrSurfaceOrigin origin, |
1967 | const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore, |
1968 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) { |
1969 | SkASSERT(!fIsExecutingCommandBuffer_DebugOnly); |
1970 | |
1971 | this->handleDirtyContext(); |
1972 | |
1973 | auto glRT = static_cast<GrGLRenderTarget*>(rt); |
1974 | this->flushRenderTarget(glRT); |
1975 | SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = true); |
1976 | |
1977 | if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) { |
1978 | auto nativeBounds = GrNativeRect::MakeRelativeTo(origin, glRT->height(), bounds); |
1979 | GrGLbitfield preserveMask = (GrLoadOp::kLoad == colorLoadStore.fLoadOp) |
1980 | ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE; |
1981 | SkASSERT(GrLoadOp::kLoad != stencilLoadStore.fLoadOp); // Handled by use_tiled_rendering(). |
1982 | GL_CALL(StartTiling(nativeBounds.fX, nativeBounds.fY, nativeBounds.fWidth, |
1983 | nativeBounds.fHeight, preserveMask)); |
1984 | } |
1985 | |
1986 | GrGLbitfield clearMask = 0; |
1987 | if (GrLoadOp::kClear == colorLoadStore.fLoadOp) { |
1988 | SkASSERT(!this->caps()->performColorClearsAsDraws()); |
1989 | this->flushClearColor(colorLoadStore.fClearColor); |
1990 | this->flushColorWrite(true); |
1991 | clearMask |= GR_GL_COLOR_BUFFER_BIT; |
1992 | } |
1993 | if (GrLoadOp::kClear == stencilLoadStore.fLoadOp) { |
1994 | SkASSERT(!this->caps()->performStencilClearsAsDraws()); |
1995 | GL_CALL(StencilMask(0xffffffff)); |
1996 | GL_CALL(ClearStencil(0)); |
1997 | clearMask |= GR_GL_STENCIL_BUFFER_BIT; |
1998 | } |
1999 | if (clearMask) { |
2000 | this->flushScissorTest(GrScissorTest::kDisabled); |
2001 | this->disableWindowRectangles(); |
2002 | GL_CALL(Clear(clearMask)); |
2003 | } |
2004 | } |
2005 | |
2006 | void GrGLGpu::endCommandBuffer(GrRenderTarget* rt, |
2007 | const GrOpsRenderPass::LoadAndStoreInfo& colorLoadStore, |
2008 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilLoadStore) { |
2009 | SkASSERT(fIsExecutingCommandBuffer_DebugOnly); |
2010 | |
2011 | this->handleDirtyContext(); |
2012 | |
2013 | if (rt->uniqueID() != fHWBoundRenderTargetUniqueID) { |
2014 | // The framebuffer binding changed in the middle of a command buffer. We should have already |
2015 | // printed a warning during onFBOChanged. |
2016 | return; |
2017 | } |
2018 | |
2019 | if (GrGLCaps::kNone_InvalidateFBType != this->glCaps().invalidateFBType()) { |
2020 | auto glRT = static_cast<GrGLRenderTarget*>(rt); |
2021 | |
2022 | SkSTArray<2, GrGLenum> discardAttachments; |
2023 | if (GrStoreOp::kDiscard == colorLoadStore.fStoreOp) { |
2024 | discardAttachments.push_back( |
2025 | (0 == glRT->renderFBOID()) ? GR_GL_COLOR : GR_GL_COLOR_ATTACHMENT0); |
2026 | } |
2027 | if (GrStoreOp::kDiscard == stencilLoadStore.fStoreOp) { |
2028 | discardAttachments.push_back( |
2029 | (0 == glRT->renderFBOID()) ? GR_GL_STENCIL : GR_GL_STENCIL_ATTACHMENT); |
2030 | } |
2031 | |
2032 | if (!discardAttachments.empty()) { |
2033 | if (GrGLCaps::kInvalidate_InvalidateFBType == this->glCaps().invalidateFBType()) { |
2034 | GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.count(), |
2035 | discardAttachments.begin())); |
2036 | } else { |
2037 | SkASSERT(GrGLCaps::kDiscard_InvalidateFBType == this->glCaps().invalidateFBType()); |
2038 | GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, discardAttachments.count(), |
2039 | discardAttachments.begin())); |
2040 | } |
2041 | } |
2042 | } |
2043 | |
2044 | if (use_tiled_rendering(this->glCaps(), stencilLoadStore)) { |
2045 | GrGLbitfield preserveMask = (GrStoreOp::kStore == colorLoadStore.fStoreOp) |
2046 | ? GR_GL_COLOR_BUFFER_BIT0 : GR_GL_NONE; |
2047 | // Handled by use_tiled_rendering(). |
2048 | SkASSERT(GrStoreOp::kStore != stencilLoadStore.fStoreOp); |
2049 | GL_CALL(EndTiling(preserveMask)); |
2050 | } |
2051 | |
2052 | SkDEBUGCODE(fIsExecutingCommandBuffer_DebugOnly = false); |
2053 | } |
2054 | |
2055 | void GrGLGpu::clearStencilClip(const GrScissorState& scissor, bool insideStencilMask, |
2056 | GrRenderTarget* target, GrSurfaceOrigin origin) { |
2057 | SkASSERT(target); |
2058 | SkASSERT(!this->caps()->performStencilClearsAsDraws()); |
2059 | SkASSERT(!scissor.enabled() || !this->caps()->performPartialClearsAsDraws()); |
2060 | this->handleDirtyContext(); |
2061 | |
2062 | GrStencilAttachment* sb = target->getStencilAttachment(); |
2063 | if (!sb) { |
2064 | // We should only get here if we marked a proxy as requiring a SB. However, |
2065 | // the SB creation could later fail. Likely clipping is going to go awry now. |
2066 | return; |
2067 | } |
2068 | |
2069 | GrGLint stencilBitCount = sb->bits(); |
2070 | #if 0 |
2071 | SkASSERT(stencilBitCount > 0); |
2072 | GrGLint clipStencilMask = (1 << (stencilBitCount - 1)); |
2073 | #else |
2074 | // we could just clear the clip bit but when we go through |
2075 | // ANGLE a partial stencil mask will cause clears to be |
2076 | // turned into draws. Our contract on GrOpsTask says that |
2077 | // changing the clip between stencil passes may or may not |
2078 | // zero the client's clip bits. So we just clear the whole thing. |
2079 | static const GrGLint clipStencilMask = ~0; |
2080 | #endif |
2081 | GrGLint value; |
2082 | if (insideStencilMask) { |
2083 | value = (1 << (stencilBitCount - 1)); |
2084 | } else { |
2085 | value = 0; |
2086 | } |
2087 | GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target); |
2088 | this->flushRenderTargetNoColorWrites(glRT); |
2089 | |
2090 | this->flushScissor(scissor, glRT->width(), glRT->height(), origin); |
2091 | this->disableWindowRectangles(); |
2092 | |
2093 | GL_CALL(StencilMask((uint32_t) clipStencilMask)); |
2094 | GL_CALL(ClearStencil(value)); |
2095 | GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT)); |
2096 | fHWStencilSettings.invalidate(); |
2097 | } |
2098 | |
2099 | bool GrGLGpu::readOrTransferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, |
2100 | GrColorType surfaceColorType, GrColorType dstColorType, |
2101 | void* offsetOrPtr, int rowWidthInPixels) { |
2102 | SkASSERT(surface); |
2103 | |
2104 | auto format = surface->backendFormat().asGLFormat(); |
2105 | GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); |
2106 | if (!renderTarget && !this->glCaps().isFormatRenderable(format, 1)) { |
2107 | return false; |
2108 | } |
2109 | GrGLenum externalFormat = 0; |
2110 | GrGLenum externalType = 0; |
2111 | this->glCaps().getReadPixelsFormat(surface->backendFormat().asGLFormat(), |
2112 | surfaceColorType, |
2113 | dstColorType, |
2114 | &externalFormat, |
2115 | &externalType); |
2116 | if (!externalFormat || !externalType) { |
2117 | return false; |
2118 | } |
2119 | |
2120 | if (renderTarget) { |
2121 | if (renderTarget->numSamples() <= 1 || |
2122 | renderTarget->renderFBOID() == renderTarget->textureFBOID()) { // Also catches FBO 0. |
2123 | SkASSERT(!renderTarget->requiresManualMSAAResolve()); |
2124 | this->flushRenderTargetNoColorWrites(renderTarget); |
2125 | } else if (GrGLRenderTarget::kUnresolvableFBOID == renderTarget->textureFBOID()) { |
2126 | SkASSERT(!renderTarget->requiresManualMSAAResolve()); |
2127 | return false; |
2128 | } else { |
2129 | SkASSERT(renderTarget->requiresManualMSAAResolve()); |
2130 | // we don't track the state of the READ FBO ID. |
2131 | this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID()); |
2132 | } |
2133 | } else { |
2134 | // Use a temporary FBO. |
2135 | this->bindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget); |
2136 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
2137 | } |
2138 | |
2139 | // the read rect is viewport-relative |
2140 | GrNativeRect readRect = {left, top, width, height}; |
2141 | |
2142 | // determine if GL can read using the passed rowBytes or if we need a scratch buffer. |
2143 | if (rowWidthInPixels != width) { |
2144 | SkASSERT(this->glCaps().readPixelsRowBytesSupport()); |
2145 | GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, rowWidthInPixels)); |
2146 | } |
2147 | GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, 1)); |
2148 | |
2149 | bool reattachStencil = false; |
2150 | if (this->glCaps().detachStencilFromMSAABuffersBeforeReadPixels() && |
2151 | renderTarget && |
2152 | renderTarget->getStencilAttachment() && |
2153 | renderTarget->numSamples() > 1) { |
2154 | // Fix Adreno devices that won't read from MSAA framebuffers with stencil attached |
2155 | reattachStencil = true; |
2156 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, |
2157 | GR_GL_RENDERBUFFER, 0)); |
2158 | } |
2159 | |
2160 | GL_CALL(ReadPixels(readRect.fX, readRect.fY, readRect.fWidth, readRect.fHeight, |
2161 | externalFormat, externalType, offsetOrPtr)); |
2162 | |
2163 | if (reattachStencil) { |
2164 | auto* stencilAttachment = |
2165 | static_cast<GrGLStencilAttachment*>(renderTarget->getStencilAttachment()); |
2166 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, |
2167 | GR_GL_RENDERBUFFER, stencilAttachment->renderbufferID())); |
2168 | } |
2169 | |
2170 | if (rowWidthInPixels != width) { |
2171 | SkASSERT(this->glCaps().readPixelsRowBytesSupport()); |
2172 | GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0)); |
2173 | } |
2174 | |
2175 | if (!renderTarget) { |
2176 | this->unbindSurfaceFBOForPixelOps(surface, 0, GR_GL_FRAMEBUFFER); |
2177 | } |
2178 | return true; |
2179 | } |
2180 | |
2181 | bool GrGLGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int height, |
2182 | GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, |
2183 | size_t rowBytes) { |
2184 | SkASSERT(surface); |
2185 | |
2186 | size_t bytesPerPixel = GrColorTypeBytesPerPixel(dstColorType); |
2187 | |
2188 | // GL_PACK_ROW_LENGTH is in terms of pixels not bytes. |
2189 | int rowPixelWidth; |
2190 | |
2191 | if (rowBytes == SkToSizeT(width * bytesPerPixel)) { |
2192 | rowPixelWidth = width; |
2193 | } else { |
2194 | SkASSERT(!(rowBytes % bytesPerPixel)); |
2195 | rowPixelWidth = rowBytes / bytesPerPixel; |
2196 | } |
2197 | return this->readOrTransferPixelsFrom(surface, left, top, width, height, surfaceColorType, |
2198 | dstColorType, buffer, rowPixelWidth); |
2199 | } |
2200 | |
2201 | GrOpsRenderPass* GrGLGpu::getOpsRenderPass( |
2202 | GrRenderTarget* rt, GrStencilAttachment*, |
2203 | GrSurfaceOrigin origin, const SkIRect& bounds, |
2204 | const GrOpsRenderPass::LoadAndStoreInfo& colorInfo, |
2205 | const GrOpsRenderPass::StencilLoadAndStoreInfo& stencilInfo, |
2206 | const SkTArray<GrSurfaceProxy*, true>& sampledProxies) { |
2207 | if (!fCachedOpsRenderPass) { |
2208 | fCachedOpsRenderPass = std::make_unique<GrGLOpsRenderPass>(this); |
2209 | } |
2210 | |
2211 | fCachedOpsRenderPass->set(rt, bounds, origin, colorInfo, stencilInfo); |
2212 | return fCachedOpsRenderPass.get(); |
2213 | } |
2214 | |
2215 | void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, GrSurfaceOrigin origin, |
2216 | const SkIRect& bounds) { |
2217 | this->flushRenderTargetNoColorWrites(target); |
2218 | this->didWriteToSurface(target, origin, &bounds); |
2219 | } |
2220 | |
2221 | void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target) { |
2222 | this->flushRenderTargetNoColorWrites(target); |
2223 | this->didWriteToSurface(target, kTopLeft_GrSurfaceOrigin, nullptr); |
2224 | } |
2225 | |
2226 | void GrGLGpu::flushRenderTargetNoColorWrites(GrGLRenderTarget* target) { |
2227 | SkASSERT(target); |
2228 | GrGpuResource::UniqueID rtID = target->uniqueID(); |
2229 | if (fHWBoundRenderTargetUniqueID != rtID) { |
2230 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID()); |
2231 | #ifdef SK_DEBUG |
2232 | // don't do this check in Chromium -- this is causing |
2233 | // lots of repeated command buffer flushes when the compositor is |
2234 | // rendering with Ganesh, which is really slow; even too slow for |
2235 | // Debug mode. |
2236 | if (!this->glCaps().skipErrorChecks()) { |
2237 | GrGLenum status; |
2238 | GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
2239 | if (status != GR_GL_FRAMEBUFFER_COMPLETE) { |
2240 | SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n" , status); |
2241 | } |
2242 | } |
2243 | #endif |
2244 | fHWBoundRenderTargetUniqueID = rtID; |
2245 | this->flushViewport(target->width(), target->height()); |
2246 | } |
2247 | |
2248 | if (this->glCaps().srgbWriteControl()) { |
2249 | this->flushFramebufferSRGB(this->caps()->isFormatSRGB(target->backendFormat())); |
2250 | } |
2251 | |
2252 | if (this->glCaps().shouldQueryImplementationReadSupport(target->format())) { |
2253 | GrGLint format; |
2254 | GrGLint type; |
2255 | GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_FORMAT, &format); |
2256 | GR_GL_GetIntegerv(this->glInterface(), GR_GL_IMPLEMENTATION_COLOR_READ_TYPE, &type); |
2257 | this->glCaps().didQueryImplementationReadSupport(target->format(), format, type); |
2258 | } |
2259 | } |
2260 | |
2261 | void GrGLGpu::flushFramebufferSRGB(bool enable) { |
2262 | if (enable && kYes_TriState != fHWSRGBFramebuffer) { |
2263 | GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB)); |
2264 | fHWSRGBFramebuffer = kYes_TriState; |
2265 | } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) { |
2266 | GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB)); |
2267 | fHWSRGBFramebuffer = kNo_TriState; |
2268 | } |
2269 | } |
2270 | |
2271 | void GrGLGpu::flushViewport(int width, int height) { |
2272 | GrNativeRect viewport = {0, 0, width, height}; |
2273 | if (fHWViewport != viewport) { |
2274 | GL_CALL(Viewport(viewport.fX, viewport.fY, viewport.fWidth, viewport.fHeight)); |
2275 | fHWViewport = viewport; |
2276 | } |
2277 | } |
2278 | |
2279 | GrGLenum GrGLGpu::prepareToDraw(GrPrimitiveType primitiveType) { |
2280 | fStats.incNumDraws(); |
2281 | |
2282 | if (this->glCaps().requiresCullFaceEnableDisableWhenDrawingLinesAfterNonLines() && |
2283 | GrIsPrimTypeLines(primitiveType) && !GrIsPrimTypeLines(fLastPrimitiveType)) { |
2284 | GL_CALL(Enable(GR_GL_CULL_FACE)); |
2285 | GL_CALL(Disable(GR_GL_CULL_FACE)); |
2286 | } |
2287 | fLastPrimitiveType = primitiveType; |
2288 | |
2289 | switch (primitiveType) { |
2290 | case GrPrimitiveType::kTriangles: |
2291 | return GR_GL_TRIANGLES; |
2292 | case GrPrimitiveType::kTriangleStrip: |
2293 | return GR_GL_TRIANGLE_STRIP; |
2294 | case GrPrimitiveType::kPoints: |
2295 | return GR_GL_POINTS; |
2296 | case GrPrimitiveType::kLines: |
2297 | return GR_GL_LINES; |
2298 | case GrPrimitiveType::kLineStrip: |
2299 | return GR_GL_LINE_STRIP; |
2300 | case GrPrimitiveType::kPatches: |
2301 | return GR_GL_PATCHES; |
2302 | case GrPrimitiveType::kPath: |
2303 | SK_ABORT("non-mesh-based GrPrimitiveType" ); |
2304 | return 0; |
2305 | } |
2306 | SK_ABORT("invalid GrPrimitiveType" ); |
2307 | } |
2308 | |
2309 | void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) { |
2310 | // Some extensions automatically resolves the texture when it is read. |
2311 | SkASSERT(this->glCaps().usesMSAARenderBuffers()); |
2312 | |
2313 | GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target); |
2314 | SkASSERT(rt->textureFBOID() != rt->renderFBOID()); |
2315 | SkASSERT(rt->textureFBOID() != 0 && rt->renderFBOID() != 0); |
2316 | this->bindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()); |
2317 | this->bindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()); |
2318 | |
2319 | // make sure we go through flushRenderTarget() since we've modified |
2320 | // the bound DRAW FBO ID. |
2321 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
2322 | if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) { |
2323 | // Apple's extension uses the scissor as the blit bounds. |
2324 | // Passing in kTopLeft_GrSurfaceOrigin will make sure no transformation of the rect |
2325 | // happens inside flushScissor since resolveRect is already in native device coordinates. |
2326 | GrScissorState scissor(rt->dimensions()); |
2327 | SkAssertResult(scissor.set(resolveRect)); |
2328 | this->flushScissor(scissor, rt->width(), rt->height(), kTopLeft_GrSurfaceOrigin); |
2329 | this->disableWindowRectangles(); |
2330 | GL_CALL(ResolveMultisampleFramebuffer()); |
2331 | } else { |
2332 | int l, b, r, t; |
2333 | if (GrGLCaps::kResolveMustBeFull_BlitFrambufferFlag & |
2334 | this->glCaps().blitFramebufferSupportFlags()) { |
2335 | l = 0; |
2336 | b = 0; |
2337 | r = target->width(); |
2338 | t = target->height(); |
2339 | } else { |
2340 | l = resolveRect.x(); |
2341 | b = resolveRect.y(); |
2342 | r = resolveRect.x() + resolveRect.width(); |
2343 | t = resolveRect.y() + resolveRect.height(); |
2344 | } |
2345 | |
2346 | // BlitFrameBuffer respects the scissor, so disable it. |
2347 | this->flushScissorTest(GrScissorTest::kDisabled); |
2348 | this->disableWindowRectangles(); |
2349 | GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t, GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); |
2350 | } |
2351 | } |
2352 | |
2353 | namespace { |
2354 | |
2355 | |
2356 | GrGLenum gr_to_gl_stencil_op(GrStencilOp op) { |
2357 | static const GrGLenum gTable[kGrStencilOpCount] = { |
2358 | GR_GL_KEEP, // kKeep |
2359 | GR_GL_ZERO, // kZero |
2360 | GR_GL_REPLACE, // kReplace |
2361 | GR_GL_INVERT, // kInvert |
2362 | GR_GL_INCR_WRAP, // kIncWrap |
2363 | GR_GL_DECR_WRAP, // kDecWrap |
2364 | GR_GL_INCR, // kIncClamp |
2365 | GR_GL_DECR, // kDecClamp |
2366 | }; |
2367 | static_assert(0 == (int)GrStencilOp::kKeep); |
2368 | static_assert(1 == (int)GrStencilOp::kZero); |
2369 | static_assert(2 == (int)GrStencilOp::kReplace); |
2370 | static_assert(3 == (int)GrStencilOp::kInvert); |
2371 | static_assert(4 == (int)GrStencilOp::kIncWrap); |
2372 | static_assert(5 == (int)GrStencilOp::kDecWrap); |
2373 | static_assert(6 == (int)GrStencilOp::kIncClamp); |
2374 | static_assert(7 == (int)GrStencilOp::kDecClamp); |
2375 | SkASSERT(op < (GrStencilOp)kGrStencilOpCount); |
2376 | return gTable[(int)op]; |
2377 | } |
2378 | |
2379 | void set_gl_stencil(const GrGLInterface* gl, |
2380 | const GrStencilSettings::Face& face, |
2381 | GrGLenum glFace) { |
2382 | GrGLenum glFunc = GrToGLStencilFunc(face.fTest); |
2383 | GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp); |
2384 | GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp); |
2385 | |
2386 | GrGLint ref = face.fRef; |
2387 | GrGLint mask = face.fTestMask; |
2388 | GrGLint writeMask = face.fWriteMask; |
2389 | |
2390 | if (GR_GL_FRONT_AND_BACK == glFace) { |
2391 | // we call the combined func just in case separate stencil is not |
2392 | // supported. |
2393 | GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask)); |
2394 | GR_GL_CALL(gl, StencilMask(writeMask)); |
2395 | GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp)); |
2396 | } else { |
2397 | GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask)); |
2398 | GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask)); |
2399 | GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp)); |
2400 | } |
2401 | } |
2402 | } // namespace |
2403 | |
2404 | void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings, GrSurfaceOrigin origin) { |
2405 | if (stencilSettings.isDisabled()) { |
2406 | this->disableStencil(); |
2407 | } else if (fHWStencilSettings != stencilSettings || |
2408 | (stencilSettings.isTwoSided() && fHWStencilOrigin != origin)) { |
2409 | if (kYes_TriState != fHWStencilTestEnabled) { |
2410 | GL_CALL(Enable(GR_GL_STENCIL_TEST)); |
2411 | |
2412 | fHWStencilTestEnabled = kYes_TriState; |
2413 | } |
2414 | if (!stencilSettings.isTwoSided()) { |
2415 | set_gl_stencil(this->glInterface(), stencilSettings.singleSidedFace(), |
2416 | GR_GL_FRONT_AND_BACK); |
2417 | } else { |
2418 | set_gl_stencil(this->glInterface(), stencilSettings.postOriginCWFace(origin), |
2419 | GR_GL_FRONT); |
2420 | set_gl_stencil(this->glInterface(), stencilSettings.postOriginCCWFace(origin), |
2421 | GR_GL_BACK); |
2422 | } |
2423 | fHWStencilSettings = stencilSettings; |
2424 | fHWStencilOrigin = origin; |
2425 | } |
2426 | } |
2427 | |
2428 | void GrGLGpu::disableStencil() { |
2429 | if (kNo_TriState != fHWStencilTestEnabled) { |
2430 | GL_CALL(Disable(GR_GL_STENCIL_TEST)); |
2431 | |
2432 | fHWStencilTestEnabled = kNo_TriState; |
2433 | fHWStencilSettings.invalidate(); |
2434 | } |
2435 | } |
2436 | |
2437 | void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA) { |
2438 | // rt is only optional if useHWAA is false. |
2439 | SkASSERT(rt || !useHWAA); |
2440 | #ifdef SK_DEBUG |
2441 | if (useHWAA && rt->numSamples() <= 1) { |
2442 | SkASSERT(this->caps()->mixedSamplesSupport()); |
2443 | SkASSERT(0 != static_cast<GrGLRenderTarget*>(rt)->renderFBOID()); |
2444 | SkASSERT(rt->getStencilAttachment()); |
2445 | } |
2446 | #endif |
2447 | |
2448 | if (this->caps()->multisampleDisableSupport()) { |
2449 | if (useHWAA) { |
2450 | if (kYes_TriState != fMSAAEnabled) { |
2451 | GL_CALL(Enable(GR_GL_MULTISAMPLE)); |
2452 | fMSAAEnabled = kYes_TriState; |
2453 | } |
2454 | } else { |
2455 | if (kNo_TriState != fMSAAEnabled) { |
2456 | GL_CALL(Disable(GR_GL_MULTISAMPLE)); |
2457 | fMSAAEnabled = kNo_TriState; |
2458 | } |
2459 | } |
2460 | } |
2461 | } |
2462 | |
2463 | void GrGLGpu::flushConservativeRasterState(bool enabled) { |
2464 | if (this->caps()->conservativeRasterSupport()) { |
2465 | if (enabled) { |
2466 | if (kYes_TriState != fHWConservativeRasterEnabled) { |
2467 | GL_CALL(Enable(GR_GL_CONSERVATIVE_RASTERIZATION)); |
2468 | fHWConservativeRasterEnabled = kYes_TriState; |
2469 | } |
2470 | } else { |
2471 | if (kNo_TriState != fHWConservativeRasterEnabled) { |
2472 | GL_CALL(Disable(GR_GL_CONSERVATIVE_RASTERIZATION)); |
2473 | fHWConservativeRasterEnabled = kNo_TriState; |
2474 | } |
2475 | } |
2476 | } |
2477 | } |
2478 | |
2479 | void GrGLGpu::flushWireframeState(bool enabled) { |
2480 | if (this->caps()->wireframeSupport()) { |
2481 | if (this->caps()->wireframeMode() || enabled) { |
2482 | if (kYes_TriState != fHWWireframeEnabled) { |
2483 | GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_LINE)); |
2484 | fHWWireframeEnabled = kYes_TriState; |
2485 | } |
2486 | } else { |
2487 | if (kNo_TriState != fHWWireframeEnabled) { |
2488 | GL_CALL(PolygonMode(GR_GL_FRONT_AND_BACK, GR_GL_FILL)); |
2489 | fHWWireframeEnabled = kNo_TriState; |
2490 | } |
2491 | } |
2492 | } |
2493 | } |
2494 | |
2495 | void GrGLGpu::flushBlendAndColorWrite( |
2496 | const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) { |
2497 | if (this->glCaps().neverDisableColorWrites() && !blendInfo.fWriteColor) { |
2498 | // We need to work around a driver bug by using a blend state that preserves the dst color, |
2499 | // rather than disabling color writes. |
2500 | GrXferProcessor::BlendInfo preserveDstBlend; |
2501 | preserveDstBlend.fSrcBlend = kZero_GrBlendCoeff; |
2502 | preserveDstBlend.fDstBlend = kOne_GrBlendCoeff; |
2503 | this->flushBlendAndColorWrite(preserveDstBlend, swizzle); |
2504 | return; |
2505 | } |
2506 | |
2507 | GrBlendEquation equation = blendInfo.fEquation; |
2508 | GrBlendCoeff srcCoeff = blendInfo.fSrcBlend; |
2509 | GrBlendCoeff dstCoeff = blendInfo.fDstBlend; |
2510 | |
2511 | // Any optimization to disable blending should have already been applied and |
2512 | // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0). |
2513 | bool blendOff = GrBlendShouldDisable(equation, srcCoeff, dstCoeff) || |
2514 | !blendInfo.fWriteColor; |
2515 | |
2516 | if (blendOff) { |
2517 | if (kNo_TriState != fHWBlendState.fEnabled) { |
2518 | GL_CALL(Disable(GR_GL_BLEND)); |
2519 | |
2520 | // Workaround for the ARM KHR_blend_equation_advanced disable flags issue |
2521 | // https://code.google.com/p/skia/issues/detail?id=3943 |
2522 | if (kARM_GrGLVendor == this->ctxInfo().vendor() && |
2523 | GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) { |
2524 | SkASSERT(this->caps()->advancedBlendEquationSupport()); |
2525 | // Set to any basic blending equation. |
2526 | GrBlendEquation blend_equation = kAdd_GrBlendEquation; |
2527 | GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation])); |
2528 | fHWBlendState.fEquation = blend_equation; |
2529 | } |
2530 | |
2531 | fHWBlendState.fEnabled = kNo_TriState; |
2532 | } |
2533 | } else { |
2534 | if (kYes_TriState != fHWBlendState.fEnabled) { |
2535 | GL_CALL(Enable(GR_GL_BLEND)); |
2536 | |
2537 | fHWBlendState.fEnabled = kYes_TriState; |
2538 | } |
2539 | |
2540 | if (fHWBlendState.fEquation != equation) { |
2541 | GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation])); |
2542 | fHWBlendState.fEquation = equation; |
2543 | } |
2544 | |
2545 | if (GrBlendEquationIsAdvanced(equation)) { |
2546 | SkASSERT(this->caps()->advancedBlendEquationSupport()); |
2547 | // Advanced equations have no other blend state. |
2548 | return; |
2549 | } |
2550 | |
2551 | if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) { |
2552 | GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff], |
2553 | gXfermodeCoeff2Blend[dstCoeff])); |
2554 | fHWBlendState.fSrcCoeff = srcCoeff; |
2555 | fHWBlendState.fDstCoeff = dstCoeff; |
2556 | } |
2557 | |
2558 | if ((GrBlendCoeffRefsConstant(srcCoeff) || GrBlendCoeffRefsConstant(dstCoeff))) { |
2559 | SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant); |
2560 | if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) { |
2561 | GL_CALL(BlendColor(blendConst.fR, blendConst.fG, blendConst.fB, blendConst.fA)); |
2562 | fHWBlendState.fConstColor = blendConst; |
2563 | fHWBlendState.fConstColorValid = true; |
2564 | } |
2565 | } |
2566 | } |
2567 | |
2568 | this->flushColorWrite(blendInfo.fWriteColor); |
2569 | } |
2570 | |
2571 | static void get_gl_swizzle_values(const GrSwizzle& swizzle, GrGLenum glValues[4]) { |
2572 | for (int i = 0; i < 4; ++i) { |
2573 | switch (swizzle[i]) { |
2574 | case 'r': glValues[i] = GR_GL_RED; break; |
2575 | case 'g': glValues[i] = GR_GL_GREEN; break; |
2576 | case 'b': glValues[i] = GR_GL_BLUE; break; |
2577 | case 'a': glValues[i] = GR_GL_ALPHA; break; |
2578 | case '0': glValues[i] = GR_GL_ZERO; break; |
2579 | case '1': glValues[i] = GR_GL_ONE; break; |
2580 | default: SK_ABORT("Unsupported component" ); |
2581 | } |
2582 | } |
2583 | } |
2584 | |
2585 | void GrGLGpu::bindTexture(int unitIdx, GrSamplerState samplerState, const GrSwizzle& swizzle, |
2586 | GrGLTexture* texture) { |
2587 | SkASSERT(texture); |
2588 | |
2589 | #ifdef SK_DEBUG |
2590 | if (!this->caps()->npotTextureTileSupport()) { |
2591 | if (samplerState.isRepeated()) { |
2592 | const int w = texture->width(); |
2593 | const int h = texture->height(); |
2594 | SkASSERT(SkIsPow2(w) && SkIsPow2(h)); |
2595 | } |
2596 | } |
2597 | #endif |
2598 | |
2599 | GrGpuResource::UniqueID textureID = texture->uniqueID(); |
2600 | GrGLenum target = texture->target(); |
2601 | if (fHWTextureUnitBindings[unitIdx].boundID(target) != textureID) { |
2602 | this->setTextureUnit(unitIdx); |
2603 | GL_CALL(BindTexture(target, texture->textureID())); |
2604 | fHWTextureUnitBindings[unitIdx].setBoundID(target, textureID); |
2605 | } |
2606 | |
2607 | if (samplerState.mipmapped() == GrMipmapped::kYes) { |
2608 | if (!this->caps()->mipmapSupport() || texture->mipmapped() == GrMipmapped::kNo) { |
2609 | samplerState.setMipmapMode(GrSamplerState::MipmapMode::kNone); |
2610 | } else { |
2611 | SkASSERT(!texture->mipmapsAreDirty()); |
2612 | } |
2613 | } |
2614 | |
2615 | auto timestamp = texture->parameters()->resetTimestamp(); |
2616 | bool setAll = timestamp < fResetTimestampForTextureParameters; |
2617 | const GrGLTextureParameters::SamplerOverriddenState* samplerStateToRecord = nullptr; |
2618 | GrGLTextureParameters::SamplerOverriddenState newSamplerState; |
2619 | if (fSamplerObjectCache) { |
2620 | fSamplerObjectCache->bindSampler(unitIdx, samplerState); |
2621 | if (this->glCaps().mustSetAnyTexParameterToEnableMipmapping()) { |
2622 | if (samplerState.mipmapped() == GrMipmapped::kYes) { |
2623 | GrGLenum minFilter = filter_to_gl_min_filter(samplerState.filter(), |
2624 | samplerState.mipmapMode()); |
2625 | const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState = |
2626 | texture->parameters()->samplerOverriddenState(); |
2627 | this->setTextureUnit(unitIdx); |
2628 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, minFilter)); |
2629 | newSamplerState = oldSamplerState; |
2630 | newSamplerState.fMinFilter = minFilter; |
2631 | samplerStateToRecord = &newSamplerState; |
2632 | } |
2633 | } |
2634 | } else { |
2635 | const GrGLTextureParameters::SamplerOverriddenState& oldSamplerState = |
2636 | texture->parameters()->samplerOverriddenState(); |
2637 | samplerStateToRecord = &newSamplerState; |
2638 | |
2639 | newSamplerState.fMinFilter = filter_to_gl_min_filter(samplerState.filter(), |
2640 | samplerState.mipmapMode()); |
2641 | newSamplerState.fMagFilter = filter_to_gl_mag_filter(samplerState.filter()); |
2642 | |
2643 | newSamplerState.fWrapS = wrap_mode_to_gl_wrap(samplerState.wrapModeX(), this->glCaps()); |
2644 | newSamplerState.fWrapT = wrap_mode_to_gl_wrap(samplerState.wrapModeY(), this->glCaps()); |
2645 | |
2646 | // These are the OpenGL default values. |
2647 | newSamplerState.fMinLOD = -1000.f; |
2648 | newSamplerState.fMaxLOD = 1000.f; |
2649 | |
2650 | if (setAll || newSamplerState.fMagFilter != oldSamplerState.fMagFilter) { |
2651 | this->setTextureUnit(unitIdx); |
2652 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newSamplerState.fMagFilter)); |
2653 | } |
2654 | if (setAll || newSamplerState.fMinFilter != oldSamplerState.fMinFilter) { |
2655 | this->setTextureUnit(unitIdx); |
2656 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newSamplerState.fMinFilter)); |
2657 | } |
2658 | if (this->glCaps().mipmapLevelAndLodControlSupport()) { |
2659 | if (setAll || newSamplerState.fMinLOD != oldSamplerState.fMinLOD) { |
2660 | this->setTextureUnit(unitIdx); |
2661 | GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MIN_LOD, newSamplerState.fMinLOD)); |
2662 | } |
2663 | if (setAll || newSamplerState.fMaxLOD != oldSamplerState.fMaxLOD) { |
2664 | this->setTextureUnit(unitIdx); |
2665 | GL_CALL(TexParameterf(target, GR_GL_TEXTURE_MAX_LOD, newSamplerState.fMaxLOD)); |
2666 | } |
2667 | } |
2668 | if (setAll || newSamplerState.fWrapS != oldSamplerState.fWrapS) { |
2669 | this->setTextureUnit(unitIdx); |
2670 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newSamplerState.fWrapS)); |
2671 | } |
2672 | if (setAll || newSamplerState.fWrapT != oldSamplerState.fWrapT) { |
2673 | this->setTextureUnit(unitIdx); |
2674 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newSamplerState.fWrapT)); |
2675 | } |
2676 | if (this->glCaps().clampToBorderSupport()) { |
2677 | // Make sure the border color is transparent black (the default) |
2678 | if (setAll || oldSamplerState.fBorderColorInvalid) { |
2679 | this->setTextureUnit(unitIdx); |
2680 | static const GrGLfloat kTransparentBlack[4] = {0.f, 0.f, 0.f, 0.f}; |
2681 | GL_CALL(TexParameterfv(target, GR_GL_TEXTURE_BORDER_COLOR, kTransparentBlack)); |
2682 | } |
2683 | } |
2684 | } |
2685 | GrGLTextureParameters::NonsamplerState newNonsamplerState; |
2686 | newNonsamplerState.fBaseMipMapLevel = 0; |
2687 | newNonsamplerState.fMaxMipmapLevel = texture->maxMipmapLevel(); |
2688 | |
2689 | const GrGLTextureParameters::NonsamplerState& oldNonsamplerState = |
2690 | texture->parameters()->nonsamplerState(); |
2691 | if (!this->caps()->shaderCaps()->textureSwizzleAppliedInShader()) { |
2692 | newNonsamplerState.fSwizzleKey = swizzle.asKey(); |
2693 | if (setAll || swizzle.asKey() != oldNonsamplerState.fSwizzleKey) { |
2694 | GrGLenum glValues[4]; |
2695 | get_gl_swizzle_values(swizzle, glValues); |
2696 | this->setTextureUnit(unitIdx); |
2697 | if (GR_IS_GR_GL(this->glStandard())) { |
2698 | static_assert(sizeof(glValues[0]) == sizeof(GrGLint)); |
2699 | GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA, |
2700 | reinterpret_cast<const GrGLint*>(glValues))); |
2701 | } else if (GR_IS_GR_GL_ES(this->glStandard())) { |
2702 | // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA. |
2703 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, glValues[0])); |
2704 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, glValues[1])); |
2705 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, glValues[2])); |
2706 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, glValues[3])); |
2707 | } |
2708 | } |
2709 | } |
2710 | // These are not supported in ES2 contexts |
2711 | if (this->glCaps().mipmapLevelAndLodControlSupport() && |
2712 | (texture->textureType() != GrTextureType::kExternal || |
2713 | !this->glCaps().dontSetBaseOrMaxLevelForExternalTextures())) { |
2714 | if (newNonsamplerState.fBaseMipMapLevel != oldNonsamplerState.fBaseMipMapLevel) { |
2715 | this->setTextureUnit(unitIdx); |
2716 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, |
2717 | newNonsamplerState.fBaseMipMapLevel)); |
2718 | } |
2719 | if (newNonsamplerState.fMaxMipmapLevel != oldNonsamplerState.fMaxMipmapLevel) { |
2720 | this->setTextureUnit(unitIdx); |
2721 | GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL, |
2722 | newNonsamplerState.fMaxMipmapLevel)); |
2723 | } |
2724 | } |
2725 | texture->parameters()->set(samplerStateToRecord, newNonsamplerState, |
2726 | fResetTimestampForTextureParameters); |
2727 | } |
2728 | |
2729 | void GrGLGpu::onResetTextureBindings() { |
2730 | static constexpr GrGLenum kTargets[] = {GR_GL_TEXTURE_2D, GR_GL_TEXTURE_RECTANGLE, |
2731 | GR_GL_TEXTURE_EXTERNAL}; |
2732 | for (int i = 0; i < this->numTextureUnits(); ++i) { |
2733 | this->setTextureUnit(i); |
2734 | for (auto target : kTargets) { |
2735 | if (fHWTextureUnitBindings[i].hasBeenModified(target)) { |
2736 | GL_CALL(BindTexture(target, 0)); |
2737 | } |
2738 | } |
2739 | fHWTextureUnitBindings[i].invalidateAllTargets(true); |
2740 | } |
2741 | } |
2742 | |
2743 | void GrGLGpu::flushPatchVertexCount(uint8_t count) { |
2744 | SkASSERT(this->caps()->shaderCaps()->tessellationSupport()); |
2745 | if (fHWPatchVertexCount != count) { |
2746 | GL_CALL(PatchParameteri(GR_GL_PATCH_VERTICES, count)); |
2747 | fHWPatchVertexCount = count; |
2748 | } |
2749 | } |
2750 | |
2751 | void GrGLGpu::flushColorWrite(bool writeColor) { |
2752 | if (!writeColor) { |
2753 | if (kNo_TriState != fHWWriteToColor) { |
2754 | GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE, |
2755 | GR_GL_FALSE, GR_GL_FALSE)); |
2756 | fHWWriteToColor = kNo_TriState; |
2757 | } |
2758 | } else { |
2759 | if (kYes_TriState != fHWWriteToColor) { |
2760 | GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE)); |
2761 | fHWWriteToColor = kYes_TriState; |
2762 | } |
2763 | } |
2764 | } |
2765 | |
2766 | void GrGLGpu::flushClearColor(const SkPMColor4f& color) { |
2767 | GrGLfloat r = color.fR, g = color.fG, b = color.fB, a = color.fA; |
2768 | if (this->glCaps().clearToBoundaryValuesIsBroken() && |
2769 | (1 == r || 0 == r) && (1 == g || 0 == g) && (1 == b || 0 == b) && (1 == a || 0 == a)) { |
2770 | static const GrGLfloat safeAlpha1 = nextafter(1.f, 2.f); |
2771 | static const GrGLfloat safeAlpha0 = nextafter(0.f, -1.f); |
2772 | a = (1 == a) ? safeAlpha1 : safeAlpha0; |
2773 | } |
2774 | if (r != fHWClearColor[0] || g != fHWClearColor[1] || |
2775 | b != fHWClearColor[2] || a != fHWClearColor[3]) { |
2776 | GL_CALL(ClearColor(r, g, b, a)); |
2777 | fHWClearColor[0] = r; |
2778 | fHWClearColor[1] = g; |
2779 | fHWClearColor[2] = b; |
2780 | fHWClearColor[3] = a; |
2781 | } |
2782 | } |
2783 | |
2784 | void GrGLGpu::setTextureUnit(int unit) { |
2785 | SkASSERT(unit >= 0 && unit < this->numTextureUnits()); |
2786 | if (unit != fHWActiveTextureUnitIdx) { |
2787 | GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit)); |
2788 | fHWActiveTextureUnitIdx = unit; |
2789 | } |
2790 | } |
2791 | |
2792 | void GrGLGpu::bindTextureToScratchUnit(GrGLenum target, GrGLint textureID) { |
2793 | // Bind the last texture unit since it is the least likely to be used by GrGLProgram. |
2794 | int lastUnitIdx = this->numTextureUnits() - 1; |
2795 | if (lastUnitIdx != fHWActiveTextureUnitIdx) { |
2796 | GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx)); |
2797 | fHWActiveTextureUnitIdx = lastUnitIdx; |
2798 | } |
2799 | // Clear out the this field so that if a GrGLProgram does use this unit it will rebind the |
2800 | // correct texture. |
2801 | fHWTextureUnitBindings[lastUnitIdx].invalidateForScratchUse(target); |
2802 | GL_CALL(BindTexture(target, textureID)); |
2803 | } |
2804 | |
2805 | // Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface. |
2806 | static inline bool can_blit_framebuffer_for_copy_surface(const GrSurface* dst, |
2807 | const GrSurface* src, |
2808 | const SkIRect& srcRect, |
2809 | const SkIPoint& dstPoint, |
2810 | const GrGLCaps& caps) { |
2811 | int dstSampleCnt = 0; |
2812 | int srcSampleCnt = 0; |
2813 | if (const GrRenderTarget* rt = dst->asRenderTarget()) { |
2814 | dstSampleCnt = rt->numSamples(); |
2815 | } |
2816 | if (const GrRenderTarget* rt = src->asRenderTarget()) { |
2817 | srcSampleCnt = rt->numSamples(); |
2818 | } |
2819 | SkASSERT((dstSampleCnt > 0) == SkToBool(dst->asRenderTarget())); |
2820 | SkASSERT((srcSampleCnt > 0) == SkToBool(src->asRenderTarget())); |
2821 | |
2822 | GrGLFormat dstFormat = dst->backendFormat().asGLFormat(); |
2823 | GrGLFormat srcFormat = src->backendFormat().asGLFormat(); |
2824 | |
2825 | const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture()); |
2826 | const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture()); |
2827 | |
2828 | GrTextureType dstTexType; |
2829 | GrTextureType* dstTexTypePtr = nullptr; |
2830 | GrTextureType srcTexType; |
2831 | GrTextureType* srcTexTypePtr = nullptr; |
2832 | if (dstTex) { |
2833 | dstTexType = dstTex->textureType(); |
2834 | dstTexTypePtr = &dstTexType; |
2835 | } |
2836 | if (srcTex) { |
2837 | srcTexType = srcTex->textureType(); |
2838 | srcTexTypePtr = &srcTexType; |
2839 | } |
2840 | |
2841 | return caps.canCopyAsBlit(dstFormat, dstSampleCnt, dstTexTypePtr, |
2842 | srcFormat, srcSampleCnt, srcTexTypePtr, |
2843 | src->getBoundsRect(), true, srcRect, dstPoint); |
2844 | } |
2845 | |
2846 | static bool rt_has_msaa_render_buffer(const GrGLRenderTarget* rt, const GrGLCaps& glCaps) { |
2847 | // A RT has a separate MSAA renderbuffer if: |
2848 | // 1) It's multisampled |
2849 | // 2) We're using an extension with separate MSAA renderbuffers |
2850 | // 3) It's not FBO 0, which is special and always auto-resolves |
2851 | return rt->numSamples() > 1 && glCaps.usesMSAARenderBuffers() && rt->renderFBOID() != 0; |
2852 | } |
2853 | |
2854 | static inline bool can_copy_texsubimage(const GrSurface* dst, const GrSurface* src, |
2855 | const GrGLCaps& caps) { |
2856 | |
2857 | const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget()); |
2858 | const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget()); |
2859 | const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture()); |
2860 | const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture()); |
2861 | |
2862 | bool dstHasMSAARenderBuffer = dstRT ? rt_has_msaa_render_buffer(dstRT, caps) : false; |
2863 | bool srcHasMSAARenderBuffer = srcRT ? rt_has_msaa_render_buffer(srcRT, caps) : false; |
2864 | |
2865 | GrGLFormat dstFormat = dst->backendFormat().asGLFormat(); |
2866 | GrGLFormat srcFormat = src->backendFormat().asGLFormat(); |
2867 | |
2868 | GrTextureType dstTexType; |
2869 | GrTextureType* dstTexTypePtr = nullptr; |
2870 | GrTextureType srcTexType; |
2871 | GrTextureType* srcTexTypePtr = nullptr; |
2872 | if (dstTex) { |
2873 | dstTexType = dstTex->textureType(); |
2874 | dstTexTypePtr = &dstTexType; |
2875 | } |
2876 | if (srcTex) { |
2877 | srcTexType = srcTex->textureType(); |
2878 | srcTexTypePtr = &srcTexType; |
2879 | } |
2880 | |
2881 | return caps.canCopyTexSubImage(dstFormat, dstHasMSAARenderBuffer, dstTexTypePtr, |
2882 | srcFormat, srcHasMSAARenderBuffer, srcTexTypePtr); |
2883 | } |
2884 | |
2885 | // If a temporary FBO was created, its non-zero ID is returned. |
2886 | void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget, |
2887 | TempFBOTarget tempFBOTarget) { |
2888 | GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget()); |
2889 | if (!rt || mipLevel > 0) { |
2890 | SkASSERT(surface->asTexture()); |
2891 | GrGLTexture* texture = static_cast<GrGLTexture*>(surface->asTexture()); |
2892 | GrGLuint texID = texture->textureID(); |
2893 | GrGLenum target = texture->target(); |
2894 | GrGLuint* tempFBOID; |
2895 | tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID; |
2896 | |
2897 | if (0 == *tempFBOID) { |
2898 | GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID)); |
2899 | } |
2900 | |
2901 | this->bindFramebuffer(fboTarget, *tempFBOID); |
2902 | GR_GL_CALL( |
2903 | this->glInterface(), |
2904 | FramebufferTexture2D(fboTarget, GR_GL_COLOR_ATTACHMENT0, target, texID, mipLevel)); |
2905 | if (mipLevel == 0) { |
2906 | texture->baseLevelWasBoundToFBO(); |
2907 | } |
2908 | } else { |
2909 | this->bindFramebuffer(fboTarget, rt->renderFBOID()); |
2910 | } |
2911 | } |
2912 | |
2913 | void GrGLGpu::unbindSurfaceFBOForPixelOps(GrSurface* surface, int mipLevel, GrGLenum fboTarget) { |
2914 | // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to |
2915 | if (mipLevel > 0 || !surface->asRenderTarget()) { |
2916 | SkASSERT(surface->asTexture()); |
2917 | GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target(); |
2918 | GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget, |
2919 | GR_GL_COLOR_ATTACHMENT0, |
2920 | textureTarget, |
2921 | 0, |
2922 | 0)); |
2923 | } |
2924 | } |
2925 | |
2926 | void GrGLGpu::onFBOChanged() { |
2927 | if (this->caps()->workarounds().flush_on_framebuffer_change || |
2928 | this->caps()->workarounds().restore_scissor_on_fbo_change) { |
2929 | this->flush(FlushType::kForce); |
2930 | } |
2931 | #ifdef SK_DEBUG |
2932 | if (fIsExecutingCommandBuffer_DebugOnly) { |
2933 | SkDebugf("WARNING: GL FBO binding changed while executing a command buffer. " |
2934 | "This will severely hurt performance.\n" ); |
2935 | } |
2936 | #endif |
2937 | } |
2938 | |
2939 | void GrGLGpu::bindFramebuffer(GrGLenum target, GrGLuint fboid) { |
2940 | fStats.incRenderTargetBinds(); |
2941 | GL_CALL(BindFramebuffer(target, fboid)); |
2942 | if (target == GR_GL_FRAMEBUFFER || target == GR_GL_DRAW_FRAMEBUFFER) { |
2943 | fBoundDrawFramebuffer = fboid; |
2944 | } |
2945 | |
2946 | if (this->caps()->workarounds().restore_scissor_on_fbo_change) { |
2947 | // The driver forgets the correct scissor when modifying the FBO binding. |
2948 | if (!fHWScissorSettings.fRect.isInvalid()) { |
2949 | const GrNativeRect& r = fHWScissorSettings.fRect; |
2950 | GL_CALL(Scissor(r.fX, r.fY, r.fWidth, r.fHeight)); |
2951 | } |
2952 | } |
2953 | |
2954 | this->onFBOChanged(); |
2955 | } |
2956 | |
2957 | void GrGLGpu::deleteFramebuffer(GrGLuint fboid) { |
2958 | // We're relying on the GL state shadowing being correct in the workaround code below so we |
2959 | // need to handle a dirty context. |
2960 | this->handleDirtyContext(); |
2961 | if (fboid == fBoundDrawFramebuffer && |
2962 | this->caps()->workarounds().unbind_attachments_on_bound_render_fbo_delete) { |
2963 | // This workaround only applies to deleting currently bound framebuffers |
2964 | // on Adreno 420. Because this is a somewhat rare case, instead of |
2965 | // tracking all the attachments of every framebuffer instead just always |
2966 | // unbind all attachments. |
2967 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, |
2968 | GR_GL_RENDERBUFFER, 0)); |
2969 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, |
2970 | GR_GL_RENDERBUFFER, 0)); |
2971 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, |
2972 | GR_GL_RENDERBUFFER, 0)); |
2973 | } |
2974 | |
2975 | GL_CALL(DeleteFramebuffers(1, &fboid)); |
2976 | |
2977 | // Deleting the currently bound framebuffer rebinds to 0. |
2978 | if (fboid == fBoundDrawFramebuffer) { |
2979 | this->onFBOChanged(); |
2980 | } |
2981 | } |
2982 | |
2983 | bool GrGLGpu::onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
2984 | const SkIPoint& dstPoint) { |
2985 | // Don't prefer copying as a draw if the dst doesn't already have a FBO object. |
2986 | // This implicitly handles this->glCaps().useDrawInsteadOfAllRenderTargetWrites(). |
2987 | bool preferCopy = SkToBool(dst->asRenderTarget()); |
2988 | auto dstFormat = dst->backendFormat().asGLFormat(); |
2989 | if (preferCopy && this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()))) { |
2990 | if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) { |
2991 | return true; |
2992 | } |
2993 | } |
2994 | |
2995 | if (can_copy_texsubimage(dst, src, this->glCaps())) { |
2996 | this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint); |
2997 | return true; |
2998 | } |
2999 | |
3000 | if (can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this->glCaps())) { |
3001 | return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint); |
3002 | } |
3003 | |
3004 | if (!preferCopy && this->glCaps().canCopyAsDraw(dstFormat, SkToBool(src->asTexture()))) { |
3005 | if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) { |
3006 | return true; |
3007 | } |
3008 | } |
3009 | |
3010 | return false; |
3011 | } |
3012 | |
3013 | bool GrGLGpu::createCopyProgram(GrTexture* srcTex) { |
3014 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
3015 | |
3016 | int progIdx = TextureToCopyProgramIdx(srcTex); |
3017 | const GrShaderCaps* shaderCaps = this->caps()->shaderCaps(); |
3018 | GrSLType samplerType = GrSLCombinedSamplerTypeForTextureType(srcTex->textureType()); |
3019 | |
3020 | if (!fCopyProgramArrayBuffer) { |
3021 | static const GrGLfloat vdata[] = { |
3022 | 0, 0, |
3023 | 0, 1, |
3024 | 1, 0, |
3025 | 1, 1 |
3026 | }; |
3027 | fCopyProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex, |
3028 | kStatic_GrAccessPattern, vdata); |
3029 | } |
3030 | if (!fCopyProgramArrayBuffer) { |
3031 | return false; |
3032 | } |
3033 | |
3034 | SkASSERT(!fCopyPrograms[progIdx].fProgram); |
3035 | GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram()); |
3036 | if (!fCopyPrograms[progIdx].fProgram) { |
3037 | return false; |
3038 | } |
3039 | |
3040 | GrShaderVar aVertex("a_vertex" , kHalf2_GrSLType, GrShaderVar::TypeModifier::In); |
3041 | GrShaderVar uTexCoordXform("u_texCoordXform" , kHalf4_GrSLType, |
3042 | GrShaderVar::TypeModifier::Uniform); |
3043 | GrShaderVar uPosXform("u_posXform" , kHalf4_GrSLType, GrShaderVar::TypeModifier::Uniform); |
3044 | GrShaderVar uTexture("u_texture" , samplerType, GrShaderVar::TypeModifier::Uniform); |
3045 | GrShaderVar vTexCoord("v_texCoord" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out); |
3046 | GrShaderVar oFragColor("o_FragColor" , kHalf4_GrSLType, GrShaderVar::TypeModifier::Out); |
3047 | |
3048 | SkString vshaderTxt; |
3049 | if (shaderCaps->noperspectiveInterpolationSupport()) { |
3050 | if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { |
3051 | vshaderTxt.appendf("#extension %s : require\n" , extension); |
3052 | } |
3053 | vTexCoord.addModifier("noperspective" ); |
3054 | } |
3055 | |
3056 | aVertex.appendDecl(shaderCaps, &vshaderTxt); |
3057 | vshaderTxt.append(";" ); |
3058 | uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt); |
3059 | vshaderTxt.append(";" ); |
3060 | uPosXform.appendDecl(shaderCaps, &vshaderTxt); |
3061 | vshaderTxt.append(";" ); |
3062 | vTexCoord.appendDecl(shaderCaps, &vshaderTxt); |
3063 | vshaderTxt.append(";" ); |
3064 | |
3065 | vshaderTxt.append( |
3066 | "// Copy Program VS\n" |
3067 | "void main() {" |
3068 | " v_texCoord = half2(a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw);" |
3069 | " sk_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;" |
3070 | " sk_Position.zw = half2(0, 1);" |
3071 | "}" |
3072 | ); |
3073 | |
3074 | SkString fshaderTxt; |
3075 | if (shaderCaps->noperspectiveInterpolationSupport()) { |
3076 | if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { |
3077 | fshaderTxt.appendf("#extension %s : require\n" , extension); |
3078 | } |
3079 | } |
3080 | vTexCoord.setTypeModifier(GrShaderVar::TypeModifier::In); |
3081 | vTexCoord.appendDecl(shaderCaps, &fshaderTxt); |
3082 | fshaderTxt.append(";" ); |
3083 | uTexture.appendDecl(shaderCaps, &fshaderTxt); |
3084 | fshaderTxt.append(";" ); |
3085 | fshaderTxt.appendf( |
3086 | "// Copy Program FS\n" |
3087 | "void main() {" |
3088 | " sk_FragColor = sample(u_texture, v_texCoord);" |
3089 | "}" |
3090 | ); |
3091 | |
3092 | auto errorHandler = this->getContext()->priv().getShaderErrorHandler(); |
3093 | SkSL::String sksl(vshaderTxt.c_str(), vshaderTxt.size()); |
3094 | SkSL::Program::Settings settings; |
3095 | settings.fCaps = shaderCaps; |
3096 | SkSL::String glsl; |
3097 | std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kVertex_Kind, |
3098 | sksl, settings, &glsl, errorHandler); |
3099 | GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram, |
3100 | GR_GL_VERTEX_SHADER, glsl, &fStats, errorHandler); |
3101 | SkASSERT(program->fInputs.isEmpty()); |
3102 | |
3103 | sksl.assign(fshaderTxt.c_str(), fshaderTxt.size()); |
3104 | program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kFragment_Kind, sksl, settings, &glsl, |
3105 | errorHandler); |
3106 | GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram, |
3107 | GR_GL_FRAGMENT_SHADER, glsl, &fStats, |
3108 | errorHandler); |
3109 | SkASSERT(program->fInputs.isEmpty()); |
3110 | |
3111 | GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram)); |
3112 | |
3113 | GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform, |
3114 | GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture" )); |
3115 | GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform, |
3116 | GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform" )); |
3117 | GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform, |
3118 | GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform" )); |
3119 | |
3120 | GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex" )); |
3121 | |
3122 | GL_CALL(DeleteShader(vshader)); |
3123 | GL_CALL(DeleteShader(fshader)); |
3124 | |
3125 | return true; |
3126 | } |
3127 | |
3128 | bool GrGLGpu::createMipmapProgram(int progIdx) { |
3129 | const bool oddWidth = SkToBool(progIdx & 0x2); |
3130 | const bool oddHeight = SkToBool(progIdx & 0x1); |
3131 | const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1); |
3132 | |
3133 | const GrShaderCaps* shaderCaps = this->caps()->shaderCaps(); |
3134 | |
3135 | SkASSERT(!fMipmapPrograms[progIdx].fProgram); |
3136 | GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram()); |
3137 | if (!fMipmapPrograms[progIdx].fProgram) { |
3138 | return false; |
3139 | } |
3140 | |
3141 | GrShaderVar aVertex("a_vertex" , kHalf2_GrSLType, GrShaderVar::TypeModifier::In); |
3142 | GrShaderVar uTexCoordXform("u_texCoordXform" , kHalf4_GrSLType, |
3143 | GrShaderVar::TypeModifier::Uniform); |
3144 | GrShaderVar uTexture("u_texture" , kTexture2DSampler_GrSLType, |
3145 | GrShaderVar::TypeModifier::Uniform); |
3146 | // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension): |
3147 | GrShaderVar vTexCoords[] = { |
3148 | GrShaderVar("v_texCoord0" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out), |
3149 | GrShaderVar("v_texCoord1" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out), |
3150 | GrShaderVar("v_texCoord2" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out), |
3151 | GrShaderVar("v_texCoord3" , kHalf2_GrSLType, GrShaderVar::TypeModifier::Out), |
3152 | }; |
3153 | GrShaderVar oFragColor("o_FragColor" , kHalf4_GrSLType,GrShaderVar::TypeModifier::Out); |
3154 | |
3155 | SkString vshaderTxt; |
3156 | if (shaderCaps->noperspectiveInterpolationSupport()) { |
3157 | if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { |
3158 | vshaderTxt.appendf("#extension %s : require\n" , extension); |
3159 | } |
3160 | vTexCoords[0].addModifier("noperspective" ); |
3161 | vTexCoords[1].addModifier("noperspective" ); |
3162 | vTexCoords[2].addModifier("noperspective" ); |
3163 | vTexCoords[3].addModifier("noperspective" ); |
3164 | } |
3165 | |
3166 | aVertex.appendDecl(shaderCaps, &vshaderTxt); |
3167 | vshaderTxt.append(";" ); |
3168 | uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt); |
3169 | vshaderTxt.append(";" ); |
3170 | for (int i = 0; i < numTaps; ++i) { |
3171 | vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt); |
3172 | vshaderTxt.append(";" ); |
3173 | } |
3174 | |
3175 | vshaderTxt.append( |
3176 | "// Mipmap Program VS\n" |
3177 | "void main() {" |
3178 | " sk_Position.xy = a_vertex * half2(2, 2) - half2(1, 1);" |
3179 | " sk_Position.zw = half2(0, 1);" |
3180 | ); |
3181 | |
3182 | // Insert texture coordinate computation: |
3183 | if (oddWidth && oddHeight) { |
3184 | vshaderTxt.append( |
3185 | " v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;" |
3186 | " v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + half2(u_texCoordXform.x, 0);" |
3187 | " v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + half2(0, u_texCoordXform.z);" |
3188 | " v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;" |
3189 | ); |
3190 | } else if (oddWidth) { |
3191 | vshaderTxt.append( |
3192 | " v_texCoord0 = a_vertex.xy * half2(u_texCoordXform.y, 1);" |
3193 | " v_texCoord1 = a_vertex.xy * half2(u_texCoordXform.y, 1) + half2(u_texCoordXform.x, 0);" |
3194 | ); |
3195 | } else if (oddHeight) { |
3196 | vshaderTxt.append( |
3197 | " v_texCoord0 = a_vertex.xy * half2(1, u_texCoordXform.w);" |
3198 | " v_texCoord1 = a_vertex.xy * half2(1, u_texCoordXform.w) + half2(0, u_texCoordXform.z);" |
3199 | ); |
3200 | } else { |
3201 | vshaderTxt.append( |
3202 | " v_texCoord0 = a_vertex.xy;" |
3203 | ); |
3204 | } |
3205 | |
3206 | vshaderTxt.append("}" ); |
3207 | |
3208 | SkString fshaderTxt; |
3209 | if (shaderCaps->noperspectiveInterpolationSupport()) { |
3210 | if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) { |
3211 | fshaderTxt.appendf("#extension %s : require\n" , extension); |
3212 | } |
3213 | } |
3214 | for (int i = 0; i < numTaps; ++i) { |
3215 | vTexCoords[i].setTypeModifier(GrShaderVar::TypeModifier::In); |
3216 | vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt); |
3217 | fshaderTxt.append(";" ); |
3218 | } |
3219 | uTexture.appendDecl(shaderCaps, &fshaderTxt); |
3220 | fshaderTxt.append(";" ); |
3221 | fshaderTxt.append( |
3222 | "// Mipmap Program FS\n" |
3223 | "void main() {" |
3224 | ); |
3225 | |
3226 | if (oddWidth && oddHeight) { |
3227 | fshaderTxt.append( |
3228 | " sk_FragColor = (sample(u_texture, v_texCoord0) + " |
3229 | " sample(u_texture, v_texCoord1) + " |
3230 | " sample(u_texture, v_texCoord2) + " |
3231 | " sample(u_texture, v_texCoord3)) * 0.25;" |
3232 | ); |
3233 | } else if (oddWidth || oddHeight) { |
3234 | fshaderTxt.append( |
3235 | " sk_FragColor = (sample(u_texture, v_texCoord0) + " |
3236 | " sample(u_texture, v_texCoord1)) * 0.5;" |
3237 | ); |
3238 | } else { |
3239 | fshaderTxt.append( |
3240 | " sk_FragColor = sample(u_texture, v_texCoord0);" |
3241 | ); |
3242 | } |
3243 | |
3244 | fshaderTxt.append("}" ); |
3245 | |
3246 | auto errorHandler = this->getContext()->priv().getShaderErrorHandler(); |
3247 | SkSL::String sksl(vshaderTxt.c_str(), vshaderTxt.size()); |
3248 | SkSL::Program::Settings settings; |
3249 | settings.fCaps = shaderCaps; |
3250 | SkSL::String glsl; |
3251 | std::unique_ptr<SkSL::Program> program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kVertex_Kind, |
3252 | sksl, settings, &glsl, errorHandler); |
3253 | GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram, |
3254 | GR_GL_VERTEX_SHADER, glsl, &fStats, errorHandler); |
3255 | SkASSERT(program->fInputs.isEmpty()); |
3256 | |
3257 | sksl.assign(fshaderTxt.c_str(), fshaderTxt.size()); |
3258 | program = GrSkSLtoGLSL(*fGLContext, SkSL::Program::kFragment_Kind, sksl, settings, &glsl, |
3259 | errorHandler); |
3260 | GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram, |
3261 | GR_GL_FRAGMENT_SHADER, glsl, &fStats, |
3262 | errorHandler); |
3263 | SkASSERT(program->fInputs.isEmpty()); |
3264 | |
3265 | GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram)); |
3266 | |
3267 | GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform, |
3268 | GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture" )); |
3269 | GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform, |
3270 | GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform" )); |
3271 | |
3272 | GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex" )); |
3273 | |
3274 | GL_CALL(DeleteShader(vshader)); |
3275 | GL_CALL(DeleteShader(fshader)); |
3276 | |
3277 | return true; |
3278 | } |
3279 | |
3280 | bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
3281 | const SkIPoint& dstPoint) { |
3282 | auto* srcTex = static_cast<GrGLTexture*>(src->asTexture()); |
3283 | auto* dstTex = static_cast<GrGLTexture*>(src->asTexture()); |
3284 | auto* dstRT = static_cast<GrGLRenderTarget*>(src->asRenderTarget()); |
3285 | if (!srcTex) { |
3286 | return false; |
3287 | } |
3288 | int progIdx = TextureToCopyProgramIdx(srcTex); |
3289 | if (!dstRT) { |
3290 | SkASSERT(dstTex); |
3291 | if (!this->glCaps().isFormatRenderable(dstTex->format(), 1)) { |
3292 | return false; |
3293 | } |
3294 | } |
3295 | if (!fCopyPrograms[progIdx].fProgram) { |
3296 | if (!this->createCopyProgram(srcTex)) { |
3297 | SkDebugf("Failed to create copy program.\n" ); |
3298 | return false; |
3299 | } |
3300 | } |
3301 | int w = srcRect.width(); |
3302 | int h = srcRect.height(); |
3303 | // We don't swizzle at all in our copies. |
3304 | this->bindTexture(0, GrSamplerState::Filter::kNearest, GrSwizzle::RGBA(), srcTex); |
3305 | this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER, kDst_TempFBOTarget); |
3306 | this->flushViewport(dst->width(), dst->height()); |
3307 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
3308 | SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h); |
3309 | this->flushProgram(fCopyPrograms[progIdx].fProgram); |
3310 | fHWVertexArrayState.setVertexArrayID(this, 0); |
3311 | GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this); |
3312 | attribs->enableVertexArrays(this, 1); |
3313 | attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kFloat2_GrVertexAttribType, |
3314 | kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0); |
3315 | // dst rect edges in NDC (-1 to 1) |
3316 | int dw = dst->width(); |
3317 | int dh = dst->height(); |
3318 | GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f; |
3319 | GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f; |
3320 | GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f; |
3321 | GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f; |
3322 | GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft; |
3323 | GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w); |
3324 | GrGLfloat sy0 = (GrGLfloat)srcRect.fTop; |
3325 | GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h); |
3326 | int sw = src->width(); |
3327 | int sh = src->height(); |
3328 | if (srcTex->textureType() != GrTextureType::kRectangle) { |
3329 | // src rect edges in normalized texture space (0 to 1) |
3330 | sx0 /= sw; |
3331 | sx1 /= sw; |
3332 | sy0 /= sh; |
3333 | sy1 /= sh; |
3334 | } |
3335 | GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0)); |
3336 | GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform, |
3337 | sx1 - sx0, sy1 - sy0, sx0, sy0)); |
3338 | GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0)); |
3339 | this->flushBlendAndColorWrite(GrXferProcessor::BlendInfo(), GrSwizzle::RGBA()); |
3340 | this->flushHWAAState(nullptr, false); |
3341 | this->flushConservativeRasterState(false); |
3342 | this->flushWireframeState(false); |
3343 | this->flushScissorTest(GrScissorTest::kDisabled); |
3344 | this->disableWindowRectangles(); |
3345 | this->disableStencil(); |
3346 | if (this->glCaps().srgbWriteControl()) { |
3347 | this->flushFramebufferSRGB(true); |
3348 | } |
3349 | GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4)); |
3350 | this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_FRAMEBUFFER); |
3351 | // The rect is already in device space so we pass in kTopLeft so no flip is done. |
3352 | this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect); |
3353 | return true; |
3354 | } |
3355 | |
3356 | void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
3357 | const SkIPoint& dstPoint) { |
3358 | SkASSERT(can_copy_texsubimage(dst, src, this->glCaps())); |
3359 | this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER, kSrc_TempFBOTarget); |
3360 | GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture()); |
3361 | SkASSERT(dstTex); |
3362 | // We modified the bound FBO |
3363 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
3364 | |
3365 | this->bindTextureToScratchUnit(dstTex->target(), dstTex->textureID()); |
3366 | GL_CALL(CopyTexSubImage2D(dstTex->target(), 0, |
3367 | dstPoint.fX, dstPoint.fY, |
3368 | srcRect.fLeft, srcRect.fTop, |
3369 | srcRect.width(), srcRect.height())); |
3370 | this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_FRAMEBUFFER); |
3371 | SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, |
3372 | srcRect.width(), srcRect.height()); |
3373 | // The rect is already in device space so we pass in kTopLeft so no flip is done. |
3374 | this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect); |
3375 | } |
3376 | |
3377 | bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
3378 | const SkIPoint& dstPoint) { |
3379 | SkASSERT(can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this->glCaps())); |
3380 | SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, |
3381 | srcRect.width(), srcRect.height()); |
3382 | if (dst == src) { |
3383 | if (SkIRect::Intersects(dstRect, srcRect)) { |
3384 | return false; |
3385 | } |
3386 | } |
3387 | |
3388 | this->bindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER, kDst_TempFBOTarget); |
3389 | this->bindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER, kSrc_TempFBOTarget); |
3390 | // We modified the bound FBO |
3391 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
3392 | |
3393 | // BlitFrameBuffer respects the scissor, so disable it. |
3394 | this->flushScissorTest(GrScissorTest::kDisabled); |
3395 | this->disableWindowRectangles(); |
3396 | |
3397 | GL_CALL(BlitFramebuffer(srcRect.fLeft, |
3398 | srcRect.fTop, |
3399 | srcRect.fRight, |
3400 | srcRect.fBottom, |
3401 | dstRect.fLeft, |
3402 | dstRect.fTop, |
3403 | dstRect.fRight, |
3404 | dstRect.fBottom, |
3405 | GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST)); |
3406 | this->unbindSurfaceFBOForPixelOps(dst, 0, GR_GL_DRAW_FRAMEBUFFER); |
3407 | this->unbindSurfaceFBOForPixelOps(src, 0, GR_GL_READ_FRAMEBUFFER); |
3408 | |
3409 | // The rect is already in device space so we pass in kTopLeft so no flip is done. |
3410 | this->didWriteToSurface(dst, kTopLeft_GrSurfaceOrigin, &dstRect); |
3411 | return true; |
3412 | } |
3413 | |
3414 | bool GrGLGpu::onRegenerateMipMapLevels(GrTexture* texture) { |
3415 | auto glTex = static_cast<GrGLTexture*>(texture); |
3416 | // Mipmaps are only supported on 2D textures: |
3417 | if (GR_GL_TEXTURE_2D != glTex->target()) { |
3418 | return false; |
3419 | } |
3420 | GrGLFormat format = glTex->format(); |
3421 | // Manual implementation of mipmap generation, to work around driver bugs w/sRGB. |
3422 | // Uses draw calls to do a series of downsample operations to successive mips. |
3423 | |
3424 | // The manual approach requires the ability to limit which level we're sampling and that the |
3425 | // destination can be bound to a FBO: |
3426 | if (!this->glCaps().doManualMipmapping() || !this->glCaps().isFormatRenderable(format, 1)) { |
3427 | GrGLenum target = glTex->target(); |
3428 | this->bindTextureToScratchUnit(target, glTex->textureID()); |
3429 | GL_CALL(GenerateMipmap(glTex->target())); |
3430 | return true; |
3431 | } |
3432 | |
3433 | int width = texture->width(); |
3434 | int height = texture->height(); |
3435 | int levelCount = SkMipmap::ComputeLevelCount(width, height) + 1; |
3436 | SkASSERT(levelCount == texture->maxMipmapLevel() + 1); |
3437 | |
3438 | // Create (if necessary), then bind temporary FBO: |
3439 | if (0 == fTempDstFBOID) { |
3440 | GL_CALL(GenFramebuffers(1, &fTempDstFBOID)); |
3441 | } |
3442 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID); |
3443 | fHWBoundRenderTargetUniqueID.makeInvalid(); |
3444 | |
3445 | // Bind the texture, to get things configured for filtering. |
3446 | // We'll be changing our base level further below: |
3447 | this->setTextureUnit(0); |
3448 | // The mipmap program does not do any swizzling. |
3449 | this->bindTexture(0, GrSamplerState::Filter::kLinear, GrSwizzle::RGBA(), glTex); |
3450 | |
3451 | // Vertex data: |
3452 | if (!fMipmapProgramArrayBuffer) { |
3453 | static const GrGLfloat vdata[] = { |
3454 | 0, 0, |
3455 | 0, 1, |
3456 | 1, 0, |
3457 | 1, 1 |
3458 | }; |
3459 | fMipmapProgramArrayBuffer = GrGLBuffer::Make(this, sizeof(vdata), GrGpuBufferType::kVertex, |
3460 | kStatic_GrAccessPattern, vdata); |
3461 | } |
3462 | if (!fMipmapProgramArrayBuffer) { |
3463 | return false; |
3464 | } |
3465 | |
3466 | fHWVertexArrayState.setVertexArrayID(this, 0); |
3467 | |
3468 | GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this); |
3469 | attribs->enableVertexArrays(this, 1); |
3470 | attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kFloat2_GrVertexAttribType, |
3471 | kFloat2_GrSLType, 2 * sizeof(GrGLfloat), 0); |
3472 | |
3473 | // Set "simple" state once: |
3474 | this->flushBlendAndColorWrite(GrXferProcessor::BlendInfo(), GrSwizzle::RGBA()); |
3475 | this->flushHWAAState(nullptr, false); |
3476 | this->flushScissorTest(GrScissorTest::kDisabled); |
3477 | this->disableWindowRectangles(); |
3478 | this->disableStencil(); |
3479 | |
3480 | // Do all the blits: |
3481 | width = texture->width(); |
3482 | height = texture->height(); |
3483 | |
3484 | for (GrGLint level = 1; level < levelCount; ++level) { |
3485 | // Get and bind the program for this particular downsample (filter shape can vary): |
3486 | int progIdx = TextureSizeToMipmapProgramIdx(width, height); |
3487 | if (!fMipmapPrograms[progIdx].fProgram) { |
3488 | if (!this->createMipmapProgram(progIdx)) { |
3489 | SkDebugf("Failed to create mipmap program.\n" ); |
3490 | // Invalidate all params to cover base level change in a previous iteration. |
3491 | glTex->textureParamsModified(); |
3492 | return false; |
3493 | } |
3494 | } |
3495 | this->flushProgram(fMipmapPrograms[progIdx].fProgram); |
3496 | |
3497 | // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h) |
3498 | const float invWidth = 1.0f / width; |
3499 | const float invHeight = 1.0f / height; |
3500 | GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform, |
3501 | invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight)); |
3502 | GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0)); |
3503 | |
3504 | // Only sample from previous mip |
3505 | GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1)); |
3506 | |
3507 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D, |
3508 | glTex->textureID(), level)); |
3509 | |
3510 | width = std::max(1, width / 2); |
3511 | height = std::max(1, height / 2); |
3512 | this->flushViewport(width, height); |
3513 | |
3514 | GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4)); |
3515 | } |
3516 | |
3517 | // Unbind: |
3518 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, |
3519 | GR_GL_TEXTURE_2D, 0, 0)); |
3520 | |
3521 | // We modified the base level param. |
3522 | GrGLTextureParameters::NonsamplerState nonsamplerState = glTex->parameters()->nonsamplerState(); |
3523 | // We drew the 2nd to last level into the last level. |
3524 | nonsamplerState.fBaseMipMapLevel = levelCount - 2; |
3525 | glTex->parameters()->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters); |
3526 | |
3527 | return true; |
3528 | } |
3529 | |
3530 | void GrGLGpu::querySampleLocations( |
3531 | GrRenderTarget* renderTarget, SkTArray<SkPoint>* sampleLocations) { |
3532 | this->flushRenderTargetNoColorWrites(static_cast<GrGLRenderTarget*>(renderTarget)); |
3533 | |
3534 | int effectiveSampleCnt; |
3535 | GR_GL_GetIntegerv(this->glInterface(), GR_GL_SAMPLES, &effectiveSampleCnt); |
3536 | SkASSERT(effectiveSampleCnt >= renderTarget->numSamples()); |
3537 | |
3538 | sampleLocations->reset(effectiveSampleCnt); |
3539 | for (int i = 0; i < effectiveSampleCnt; ++i) { |
3540 | GL_CALL(GetMultisamplefv(GR_GL_SAMPLE_POSITION, i, &(*sampleLocations)[i].fX)); |
3541 | } |
3542 | } |
3543 | |
3544 | void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) { |
3545 | SkASSERT(type); |
3546 | switch (type) { |
3547 | case kTexture_GrXferBarrierType: { |
3548 | GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt); |
3549 | SkASSERT(glrt->textureFBOID() != 0 && glrt->renderFBOID() != 0); |
3550 | if (glrt->textureFBOID() != glrt->renderFBOID()) { |
3551 | // The render target uses separate storage so no need for glTextureBarrier. |
3552 | // FIXME: The render target will resolve automatically when its texture is bound, |
3553 | // but we could resolve only the bounds that will be read if we do it here instead. |
3554 | return; |
3555 | } |
3556 | SkASSERT(this->caps()->textureBarrierSupport()); |
3557 | GL_CALL(TextureBarrier()); |
3558 | return; |
3559 | } |
3560 | case kBlend_GrXferBarrierType: |
3561 | SkASSERT(GrCaps::kAdvanced_BlendEquationSupport == |
3562 | this->caps()->blendEquationSupport()); |
3563 | GL_CALL(BlendBarrier()); |
3564 | return; |
3565 | default: break; // placate compiler warnings that kNone not handled |
3566 | } |
3567 | } |
3568 | |
3569 | void GrGLGpu::insertManualFramebufferBarrier() { |
3570 | SkASSERT(this->caps()->requiresManualFBBarrierAfterTessellatedStencilDraw()); |
3571 | GL_CALL(MemoryBarrier(GR_GL_FRAMEBUFFER_BARRIER_BIT)); |
3572 | } |
3573 | |
3574 | GrBackendTexture GrGLGpu::onCreateBackendTexture(SkISize dimensions, |
3575 | const GrBackendFormat& format, |
3576 | GrRenderable renderable, |
3577 | GrMipmapped mipMapped, |
3578 | GrProtected isProtected) { |
3579 | // We don't support protected textures in GL. |
3580 | if (isProtected == GrProtected::kYes) { |
3581 | return {}; |
3582 | } |
3583 | |
3584 | this->handleDirtyContext(); |
3585 | |
3586 | GrGLFormat glFormat = format.asGLFormat(); |
3587 | if (glFormat == GrGLFormat::kUnknown) { |
3588 | return {}; |
3589 | } |
3590 | |
3591 | int numMipLevels = 1; |
3592 | if (mipMapped == GrMipmapped::kYes) { |
3593 | numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1; |
3594 | } |
3595 | |
3596 | // Compressed formats go through onCreateCompressedBackendTexture |
3597 | SkASSERT(!GrGLFormatIsCompressed(glFormat)); |
3598 | |
3599 | GrGLTextureInfo info; |
3600 | GrGLTextureParameters::SamplerOverriddenState initialState; |
3601 | |
3602 | if (glFormat == GrGLFormat::kUnknown) { |
3603 | return {}; |
3604 | } |
3605 | switch (format.textureType()) { |
3606 | case GrTextureType::kNone: |
3607 | case GrTextureType::kExternal: |
3608 | return {}; |
3609 | case GrTextureType::k2D: |
3610 | info.fTarget = GR_GL_TEXTURE_2D; |
3611 | break; |
3612 | case GrTextureType::kRectangle: |
3613 | if (!this->glCaps().rectangleTextureSupport() || mipMapped == GrMipmapped::kYes) { |
3614 | return {}; |
3615 | } |
3616 | info.fTarget = GR_GL_TEXTURE_RECTANGLE; |
3617 | break; |
3618 | } |
3619 | info.fFormat = GrGLFormatToEnum(glFormat); |
3620 | info.fID = this->createTexture(dimensions, glFormat, info.fTarget, renderable, &initialState, |
3621 | numMipLevels); |
3622 | if (!info.fID) { |
3623 | return {}; |
3624 | } |
3625 | |
3626 | // Unbind this texture from the scratch texture unit. |
3627 | this->bindTextureToScratchUnit(info.fTarget, 0); |
3628 | |
3629 | auto parameters = sk_make_sp<GrGLTextureParameters>(); |
3630 | // The non-sampler params are still at their default values. |
3631 | parameters->set(&initialState, GrGLTextureParameters::NonsamplerState(), |
3632 | fResetTimestampForTextureParameters); |
3633 | |
3634 | return GrBackendTexture(dimensions.width(), dimensions.height(), mipMapped, info, |
3635 | std::move(parameters)); |
3636 | } |
3637 | |
3638 | bool GrGLGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture, |
3639 | sk_sp<GrRefCntedCallback> finishedCallback, |
3640 | const BackendTextureData* data) { |
3641 | GrGLTextureInfo info; |
3642 | SkAssertResult(backendTexture.getGLTextureInfo(&info)); |
3643 | |
3644 | int numMipLevels = 1; |
3645 | if (backendTexture.hasMipmaps()) { |
3646 | numMipLevels = |
3647 | SkMipmap::ComputeLevelCount(backendTexture.width(), backendTexture.height()) + 1; |
3648 | } |
3649 | |
3650 | GrGLFormat glFormat = GrGLFormatFromGLEnum(info.fFormat); |
3651 | |
3652 | this->bindTextureToScratchUnit(info.fTarget, info.fID); |
3653 | |
3654 | // If we have mips make sure the base level is set to 0 and the max level set to numMipLevels-1 |
3655 | // so that the uploads go to the right levels. |
3656 | if (numMipLevels && this->glCaps().mipmapLevelAndLodControlSupport()) { |
3657 | auto params = backendTexture.getGLTextureParams(); |
3658 | GrGLTextureParameters::NonsamplerState nonsamplerState = params->nonsamplerState(); |
3659 | if (params->nonsamplerState().fBaseMipMapLevel != 0) { |
3660 | GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_BASE_LEVEL, 0)); |
3661 | nonsamplerState.fBaseMipMapLevel = 0; |
3662 | } |
3663 | if (params->nonsamplerState().fMaxMipmapLevel != (numMipLevels - 1)) { |
3664 | GL_CALL(TexParameteri(info.fTarget, GR_GL_TEXTURE_MAX_LEVEL, numMipLevels - 1)); |
3665 | nonsamplerState.fBaseMipMapLevel = numMipLevels - 1; |
3666 | } |
3667 | params->set(nullptr, nonsamplerState, fResetTimestampForTextureParameters); |
3668 | } |
3669 | |
3670 | SkASSERT(data->type() != BackendTextureData::Type::kCompressed); |
3671 | bool result = false; |
3672 | if (data->type() == BackendTextureData::Type::kPixmaps) { |
3673 | SkTDArray<GrMipLevel> texels; |
3674 | GrColorType colorType = SkColorTypeToGrColorType(data->pixmap(0).colorType()); |
3675 | texels.append(numMipLevels); |
3676 | for (int i = 0; i < numMipLevels; ++i) { |
3677 | texels[i] = {data->pixmap(i).addr(), data->pixmap(i).rowBytes()}; |
3678 | } |
3679 | SkIRect dstRect = SkIRect::MakeSize(backendTexture.dimensions()); |
3680 | result = this->uploadColorTypeTexData(glFormat, colorType, backendTexture.dimensions(), |
3681 | info.fTarget, dstRect, colorType, texels.begin(), |
3682 | texels.count()); |
3683 | } else if (data->type() == BackendTextureData::Type::kColor) { |
3684 | uint32_t levelMask = (1 << numMipLevels) - 1; |
3685 | result = this->uploadColorToTex(glFormat, backendTexture.dimensions(), info.fTarget, |
3686 | data->color(), levelMask); |
3687 | } |
3688 | |
3689 | // Unbind this texture from the scratch texture unit. |
3690 | this->bindTextureToScratchUnit(info.fTarget, 0); |
3691 | return result; |
3692 | } |
3693 | |
3694 | void GrGLGpu::deleteBackendTexture(const GrBackendTexture& tex) { |
3695 | SkASSERT(GrBackendApi::kOpenGL == tex.backend()); |
3696 | |
3697 | GrGLTextureInfo info; |
3698 | if (tex.getGLTextureInfo(&info)) { |
3699 | GL_CALL(DeleteTextures(1, &info.fID)); |
3700 | } |
3701 | } |
3702 | |
3703 | bool GrGLGpu::compile(const GrProgramDesc& desc, const GrProgramInfo& programInfo) { |
3704 | SkASSERT(!(GrProcessor::CustomFeatures::kSampleLocations & programInfo.requestedFeatures())); |
3705 | |
3706 | Stats::ProgramCacheResult stat; |
3707 | |
3708 | sk_sp<GrGLProgram> tmp = fProgramCache->findOrCreateProgram(desc, programInfo, &stat); |
3709 | if (!tmp) { |
3710 | return false; |
3711 | } |
3712 | |
3713 | return stat != Stats::ProgramCacheResult::kHit; |
3714 | } |
3715 | |
3716 | #if GR_TEST_UTILS |
3717 | |
3718 | bool GrGLGpu::isTestingOnlyBackendTexture(const GrBackendTexture& tex) const { |
3719 | SkASSERT(GrBackendApi::kOpenGL == tex.backend()); |
3720 | |
3721 | GrGLTextureInfo info; |
3722 | if (!tex.getGLTextureInfo(&info)) { |
3723 | return false; |
3724 | } |
3725 | |
3726 | GrGLboolean result; |
3727 | GL_CALL_RET(result, IsTexture(info.fID)); |
3728 | |
3729 | return (GR_GL_TRUE == result); |
3730 | } |
3731 | |
3732 | GrBackendRenderTarget GrGLGpu::createTestingOnlyBackendRenderTarget(int w, int h, |
3733 | GrColorType colorType) { |
3734 | if (w > this->caps()->maxRenderTargetSize() || h > this->caps()->maxRenderTargetSize()) { |
3735 | return GrBackendRenderTarget(); // invalid |
3736 | } |
3737 | this->handleDirtyContext(); |
3738 | auto format = this->glCaps().getFormatFromColorType(colorType); |
3739 | if (!this->glCaps().isFormatRenderable(format, 1)) { |
3740 | return {}; |
3741 | } |
3742 | bool useTexture = format == GrGLFormat::kBGRA8; |
3743 | int sFormatIdx = this->getCompatibleStencilIndex(format); |
3744 | if (sFormatIdx < 0) { |
3745 | return {}; |
3746 | } |
3747 | GrGLuint colorID = 0; |
3748 | GrGLuint stencilID = 0; |
3749 | auto deleteIDs = [&] { |
3750 | if (colorID) { |
3751 | if (useTexture) { |
3752 | GL_CALL(DeleteTextures(1, &colorID)); |
3753 | } else { |
3754 | GL_CALL(DeleteRenderbuffers(1, &colorID)); |
3755 | } |
3756 | } |
3757 | if (stencilID) { |
3758 | GL_CALL(DeleteRenderbuffers(1, &stencilID)); |
3759 | } |
3760 | }; |
3761 | |
3762 | if (useTexture) { |
3763 | GL_CALL(GenTextures(1, &colorID)); |
3764 | } else { |
3765 | GL_CALL(GenRenderbuffers(1, &colorID)); |
3766 | } |
3767 | GL_CALL(GenRenderbuffers(1, &stencilID)); |
3768 | if (!stencilID || !colorID) { |
3769 | deleteIDs(); |
3770 | return {}; |
3771 | } |
3772 | |
3773 | GrGLFramebufferInfo info; |
3774 | info.fFBOID = 0; |
3775 | info.fFormat = GrGLFormatToEnum(format); |
3776 | GL_CALL(GenFramebuffers(1, &info.fFBOID)); |
3777 | if (!info.fFBOID) { |
3778 | deleteIDs(); |
3779 | return {}; |
3780 | } |
3781 | |
3782 | this->invalidateBoundRenderTarget(); |
3783 | |
3784 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID); |
3785 | if (useTexture) { |
3786 | GrGLTextureParameters::SamplerOverriddenState initialState; |
3787 | colorID = this->createTexture({w, h}, format, GR_GL_TEXTURE_2D, GrRenderable::kYes, |
3788 | &initialState, 1); |
3789 | if (!colorID) { |
3790 | deleteIDs(); |
3791 | return {}; |
3792 | } |
3793 | GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, GR_GL_TEXTURE_2D, |
3794 | colorID, 0)); |
3795 | } else { |
3796 | GrGLenum renderBufferFormat = this->glCaps().getRenderbufferInternalFormat(format); |
3797 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, colorID)); |
3798 | GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, renderBufferFormat, w, h)); |
3799 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0, |
3800 | GR_GL_RENDERBUFFER, colorID)); |
3801 | } |
3802 | GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, stencilID)); |
3803 | auto stencilBufferFormat = this->glCaps().stencilFormats()[sFormatIdx].fInternalFormat; |
3804 | GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, stencilBufferFormat, w, h)); |
3805 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_STENCIL_ATTACHMENT, GR_GL_RENDERBUFFER, |
3806 | stencilID)); |
3807 | if (this->glCaps().stencilFormats()[sFormatIdx].fPacked) { |
3808 | GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER, GR_GL_DEPTH_ATTACHMENT, |
3809 | GR_GL_RENDERBUFFER, stencilID)); |
3810 | } |
3811 | |
3812 | // We don't want to have to recover the renderbuffer/texture IDs later to delete them. OpenGL |
3813 | // has this rule that if a renderbuffer/texture is deleted and a FBO other than the current FBO |
3814 | // has the RB attached then deletion is delayed. So we unbind the FBO here and delete the |
3815 | // renderbuffers/texture. |
3816 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, 0); |
3817 | deleteIDs(); |
3818 | |
3819 | this->bindFramebuffer(GR_GL_FRAMEBUFFER, info.fFBOID); |
3820 | GrGLenum status; |
3821 | GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER)); |
3822 | if (GR_GL_FRAMEBUFFER_COMPLETE != status) { |
3823 | this->deleteFramebuffer(info.fFBOID); |
3824 | return {}; |
3825 | } |
3826 | auto stencilBits = SkToInt(this->glCaps().stencilFormats()[sFormatIdx].fStencilBits); |
3827 | |
3828 | GrBackendRenderTarget beRT = GrBackendRenderTarget(w, h, 1, stencilBits, info); |
3829 | SkASSERT(this->caps()->areColorTypeAndFormatCompatible(colorType, beRT.getBackendFormat())); |
3830 | return beRT; |
3831 | } |
3832 | |
3833 | void GrGLGpu::deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget& backendRT) { |
3834 | SkASSERT(GrBackendApi::kOpenGL == backendRT.backend()); |
3835 | GrGLFramebufferInfo info; |
3836 | if (backendRT.getGLFramebufferInfo(&info)) { |
3837 | if (info.fFBOID) { |
3838 | this->deleteFramebuffer(info.fFBOID); |
3839 | } |
3840 | } |
3841 | } |
3842 | |
3843 | void GrGLGpu::testingOnly_flushGpuAndSync() { |
3844 | GL_CALL(Finish()); |
3845 | } |
3846 | #endif |
3847 | |
3848 | /////////////////////////////////////////////////////////////////////////////// |
3849 | |
3850 | GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu, |
3851 | const GrBuffer* ibuf) { |
3852 | SkASSERT(!ibuf || ibuf->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(ibuf)->isMapped()); |
3853 | GrGLAttribArrayState* attribState; |
3854 | |
3855 | if (gpu->glCaps().isCoreProfile()) { |
3856 | if (!fCoreProfileVertexArray) { |
3857 | GrGLuint arrayID; |
3858 | GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID)); |
3859 | int attrCount = gpu->glCaps().maxVertexAttributes(); |
3860 | fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount); |
3861 | } |
3862 | if (ibuf) { |
3863 | attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf); |
3864 | } else { |
3865 | attribState = fCoreProfileVertexArray->bind(gpu); |
3866 | } |
3867 | } else { |
3868 | if (ibuf) { |
3869 | // bindBuffer implicitly binds VAO 0 when binding an index buffer. |
3870 | gpu->bindBuffer(GrGpuBufferType::kIndex, ibuf); |
3871 | } else { |
3872 | this->setVertexArrayID(gpu, 0); |
3873 | } |
3874 | int attrCount = gpu->glCaps().maxVertexAttributes(); |
3875 | if (fDefaultVertexArrayAttribState.count() != attrCount) { |
3876 | fDefaultVertexArrayAttribState.resize(attrCount); |
3877 | } |
3878 | attribState = &fDefaultVertexArrayAttribState; |
3879 | } |
3880 | return attribState; |
3881 | } |
3882 | |
3883 | void GrGLGpu::addFinishedProc(GrGpuFinishedProc finishedProc, |
3884 | GrGpuFinishedContext finishedContext) { |
3885 | fFinishCallbacks.add(finishedProc, finishedContext); |
3886 | } |
3887 | |
3888 | void GrGLGpu::flush(FlushType flushType) { |
3889 | if (fNeedsGLFlush || flushType == FlushType::kForce) { |
3890 | GL_CALL(Flush()); |
3891 | fNeedsGLFlush = false; |
3892 | } |
3893 | } |
3894 | |
3895 | bool GrGLGpu::onSubmitToGpu(bool syncCpu) { |
3896 | if (syncCpu || (!fFinishCallbacks.empty() && !this->caps()->fenceSyncSupport())) { |
3897 | GL_CALL(Finish()); |
3898 | fFinishCallbacks.callAll(true); |
3899 | } else { |
3900 | this->flush(); |
3901 | // See if any previously inserted finish procs are good to go. |
3902 | fFinishCallbacks.check(); |
3903 | } |
3904 | if (!this->glCaps().skipErrorChecks()) { |
3905 | this->clearErrorsAndCheckForOOM(); |
3906 | } |
3907 | return true; |
3908 | } |
3909 | |
3910 | void GrGLGpu::submit(GrOpsRenderPass* renderPass) { |
3911 | // The GrGLOpsRenderPass doesn't buffer ops so there is nothing to do here |
3912 | SkASSERT(fCachedOpsRenderPass.get() == renderPass); |
3913 | fCachedOpsRenderPass->reset(); |
3914 | } |
3915 | |
3916 | GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() { |
3917 | if (!this->caps()->fenceSyncSupport()) { |
3918 | return 0; |
3919 | } |
3920 | GrGLsync sync; |
3921 | if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) { |
3922 | static_assert(sizeof(GrGLsync) >= sizeof(GrGLuint)); |
3923 | GrGLuint fence = 0; |
3924 | GL_CALL(GenFences(1, &fence)); |
3925 | GL_CALL(SetFence(fence, GR_GL_ALL_COMPLETED)); |
3926 | sync = reinterpret_cast<GrGLsync>(static_cast<intptr_t>(fence)); |
3927 | } else { |
3928 | GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); |
3929 | } |
3930 | this->setNeedsFlush(); |
3931 | static_assert(sizeof(GrFence) >= sizeof(GrGLsync)); |
3932 | return (GrFence)sync; |
3933 | } |
3934 | |
3935 | bool GrGLGpu::waitSync(GrGLsync sync, uint64_t timeout, bool flush) { |
3936 | if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) { |
3937 | GrGLuint nvFence = static_cast<GrGLuint>(reinterpret_cast<intptr_t>(sync)); |
3938 | if (!timeout) { |
3939 | if (flush) { |
3940 | this->flush(FlushType::kForce); |
3941 | } |
3942 | GrGLboolean result; |
3943 | GL_CALL_RET(result, TestFence(nvFence)); |
3944 | return result == GR_GL_TRUE; |
3945 | } |
3946 | // Ignore non-zero timeouts. GL_NV_fence has no timeout functionality. |
3947 | // If this really becomes necessary we could poll TestFence(). |
3948 | // FinishFence always flushes so no need to check flush param. |
3949 | GL_CALL(FinishFence(nvFence)); |
3950 | return true; |
3951 | } else { |
3952 | GrGLbitfield flags = flush ? GR_GL_SYNC_FLUSH_COMMANDS_BIT : 0; |
3953 | GrGLenum result; |
3954 | GL_CALL_RET(result, ClientWaitSync(sync, flags, timeout)); |
3955 | return (GR_GL_CONDITION_SATISFIED == result || GR_GL_ALREADY_SIGNALED == result); |
3956 | } |
3957 | } |
3958 | |
3959 | bool GrGLGpu::waitFence(GrFence fence) { |
3960 | if (!this->caps()->fenceSyncSupport()) { |
3961 | return true; |
3962 | } |
3963 | return this->waitSync(reinterpret_cast<GrGLsync>(fence), 0, false); |
3964 | } |
3965 | |
3966 | void GrGLGpu::deleteFence(GrFence fence) const { |
3967 | if (this->caps()->fenceSyncSupport()) { |
3968 | this->deleteSync(reinterpret_cast<GrGLsync>(fence)); |
3969 | } |
3970 | } |
3971 | |
3972 | std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrGLGpu::makeSemaphore(bool isOwned) { |
3973 | SkASSERT(this->caps()->semaphoreSupport()); |
3974 | return GrGLSemaphore::Make(this, isOwned); |
3975 | } |
3976 | |
3977 | std::unique_ptr<GrSemaphore> GrGLGpu::wrapBackendSemaphore( |
3978 | const GrBackendSemaphore& semaphore, |
3979 | GrResourceProvider::SemaphoreWrapType wrapType, |
3980 | GrWrapOwnership ownership) { |
3981 | SkASSERT(this->caps()->semaphoreSupport()); |
3982 | return GrGLSemaphore::MakeWrapped(this, semaphore.glSync(), ownership); |
3983 | } |
3984 | |
3985 | void GrGLGpu::insertSemaphore(GrSemaphore* semaphore) { |
3986 | SkASSERT(semaphore); |
3987 | GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore); |
3988 | |
3989 | GrGLsync sync; |
3990 | GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0)); |
3991 | glSem->setSync(sync); |
3992 | this->setNeedsFlush(); |
3993 | } |
3994 | |
3995 | void GrGLGpu::waitSemaphore(GrSemaphore* semaphore) { |
3996 | SkASSERT(semaphore); |
3997 | GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore); |
3998 | |
3999 | GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED)); |
4000 | } |
4001 | |
4002 | void GrGLGpu::checkFinishProcs() { |
4003 | fFinishCallbacks.check(); |
4004 | } |
4005 | |
4006 | void GrGLGpu::clearErrorsAndCheckForOOM() { |
4007 | while (this->getErrorAndCheckForOOM() != GR_GL_NO_ERROR) {} |
4008 | } |
4009 | |
4010 | GrGLenum GrGLGpu::getErrorAndCheckForOOM() { |
4011 | #if GR_GL_CHECK_ERROR |
4012 | if (this->glInterface()->checkAndResetOOMed()) { |
4013 | this->setOOMed(); |
4014 | } |
4015 | #endif |
4016 | GrGLenum error = this->fGLContext->glInterface()->fFunctions.fGetError(); |
4017 | if (error == GR_GL_OUT_OF_MEMORY) { |
4018 | this->setOOMed(); |
4019 | } |
4020 | return error; |
4021 | } |
4022 | |
4023 | void GrGLGpu::deleteSync(GrGLsync sync) const { |
4024 | if (this->glCaps().fenceType() == GrGLCaps::FenceType::kNVFence) { |
4025 | GrGLuint nvFence = SkToUInt(reinterpret_cast<intptr_t>(sync)); |
4026 | GL_CALL(DeleteFences(1, &nvFence)); |
4027 | } else { |
4028 | GL_CALL(DeleteSync(sync)); |
4029 | } |
4030 | } |
4031 | |
4032 | std::unique_ptr<GrSemaphore> GrGLGpu::prepareTextureForCrossContextUsage(GrTexture* texture) { |
4033 | // Set up a semaphore to be signaled once the data is ready, and flush GL |
4034 | std::unique_ptr<GrSemaphore> semaphore = this->makeSemaphore(true); |
4035 | SkASSERT(semaphore); |
4036 | this->insertSemaphore(semaphore.get()); |
4037 | // We must call flush here to make sure the GrGLSync object gets created and sent to the gpu. |
4038 | this->flush(FlushType::kForce); |
4039 | |
4040 | return semaphore; |
4041 | } |
4042 | |
4043 | int GrGLGpu::TextureToCopyProgramIdx(GrTexture* texture) { |
4044 | switch (GrSLCombinedSamplerTypeForTextureType(texture->textureType())) { |
4045 | case kTexture2DSampler_GrSLType: |
4046 | return 0; |
4047 | case kTexture2DRectSampler_GrSLType: |
4048 | return 1; |
4049 | case kTextureExternalSampler_GrSLType: |
4050 | return 2; |
4051 | default: |
4052 | SK_ABORT("Unexpected samper type" ); |
4053 | } |
4054 | } |
4055 | |
4056 | #ifdef SK_ENABLE_DUMP_GPU |
4057 | #include "src/utils/SkJSONWriter.h" |
4058 | void GrGLGpu::onDumpJSON(SkJSONWriter* writer) const { |
4059 | // We are called by the base class, which has already called beginObject(). We choose to nest |
4060 | // all of our caps information in a named sub-object. |
4061 | writer->beginObject("GL GPU" ); |
4062 | |
4063 | const GrGLubyte* str; |
4064 | GL_CALL_RET(str, GetString(GR_GL_VERSION)); |
4065 | writer->appendString("GL_VERSION" , (const char*)(str)); |
4066 | GL_CALL_RET(str, GetString(GR_GL_RENDERER)); |
4067 | writer->appendString("GL_RENDERER" , (const char*)(str)); |
4068 | GL_CALL_RET(str, GetString(GR_GL_VENDOR)); |
4069 | writer->appendString("GL_VENDOR" , (const char*)(str)); |
4070 | GL_CALL_RET(str, GetString(GR_GL_SHADING_LANGUAGE_VERSION)); |
4071 | writer->appendString("GL_SHADING_LANGUAGE_VERSION" , (const char*)(str)); |
4072 | |
4073 | writer->appendName("extensions" ); |
4074 | glInterface()->fExtensions.dumpJSON(writer); |
4075 | |
4076 | writer->endObject(); |
4077 | } |
4078 | #endif |
4079 | |