1 | /* |
2 | * Copyright 2010 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | |
9 | #include "src/gpu/GrGpu.h" |
10 | |
11 | #include "include/gpu/GrBackendSemaphore.h" |
12 | #include "include/gpu/GrBackendSurface.h" |
13 | #include "include/gpu/GrDirectContext.h" |
14 | #include "src/core/SkCompressedDataUtils.h" |
15 | #include "src/core/SkMathPriv.h" |
16 | #include "src/core/SkMipmap.h" |
17 | #include "src/gpu/GrAuditTrail.h" |
18 | #include "src/gpu/GrBackendUtils.h" |
19 | #include "src/gpu/GrCaps.h" |
20 | #include "src/gpu/GrContextPriv.h" |
21 | #include "src/gpu/GrDataUtils.h" |
22 | #include "src/gpu/GrGpuResourcePriv.h" |
23 | #include "src/gpu/GrNativeRect.h" |
24 | #include "src/gpu/GrPathRendering.h" |
25 | #include "src/gpu/GrPipeline.h" |
26 | #include "src/gpu/GrRenderTarget.h" |
27 | #include "src/gpu/GrResourceCache.h" |
28 | #include "src/gpu/GrResourceProvider.h" |
29 | #include "src/gpu/GrRingBuffer.h" |
30 | #include "src/gpu/GrSemaphore.h" |
31 | #include "src/gpu/GrStagingBufferManager.h" |
32 | #include "src/gpu/GrStencilAttachment.h" |
33 | #include "src/gpu/GrStencilSettings.h" |
34 | #include "src/gpu/GrTextureProxyPriv.h" |
35 | #include "src/gpu/GrTracing.h" |
36 | #include "src/utils/SkJSONWriter.h" |
37 | |
38 | //////////////////////////////////////////////////////////////////////////////// |
39 | |
40 | GrGpu::GrGpu(GrDirectContext* direct) : fResetBits(kAll_GrBackendState), fContext(direct) {} |
41 | |
42 | GrGpu::~GrGpu() { |
43 | this->callSubmittedProcs(false); |
44 | } |
45 | |
46 | void GrGpu::disconnect(DisconnectType type) {} |
47 | |
48 | //////////////////////////////////////////////////////////////////////////////// |
49 | |
50 | static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType, |
51 | const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps) { |
52 | SkASSERT(mipLevelCount > 0); |
53 | bool hasBasePixels = texels[0].fPixels; |
54 | int levelsWithPixelsCnt = 0; |
55 | auto bpp = GrColorTypeBytesPerPixel(texelColorType); |
56 | int w = dimensions.fWidth; |
57 | int h = dimensions.fHeight; |
58 | for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) { |
59 | if (texels[currentMipLevel].fPixels) { |
60 | const size_t minRowBytes = w * bpp; |
61 | if (caps->writePixelsRowBytesSupport()) { |
62 | if (texels[currentMipLevel].fRowBytes < minRowBytes) { |
63 | return false; |
64 | } |
65 | if (texels[currentMipLevel].fRowBytes % bpp) { |
66 | return false; |
67 | } |
68 | } else { |
69 | if (texels[currentMipLevel].fRowBytes != minRowBytes) { |
70 | return false; |
71 | } |
72 | } |
73 | ++levelsWithPixelsCnt; |
74 | } |
75 | if (w == 1 && h == 1) { |
76 | if (currentMipLevel != mipLevelCount - 1) { |
77 | return false; |
78 | } |
79 | } else { |
80 | w = std::max(w / 2, 1); |
81 | h = std::max(h / 2, 1); |
82 | } |
83 | } |
84 | // Either just a base layer or a full stack is required. |
85 | if (mipLevelCount != 1 && (w != 1 || h != 1)) { |
86 | return false; |
87 | } |
88 | // Can specify just the base, all levels, or no levels. |
89 | if (!hasBasePixels) { |
90 | return levelsWithPixelsCnt == 0; |
91 | } |
92 | return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount; |
93 | } |
94 | |
95 | sk_sp<GrTexture> GrGpu::createTextureCommon(SkISize dimensions, |
96 | const GrBackendFormat& format, |
97 | GrRenderable renderable, |
98 | int renderTargetSampleCnt, |
99 | SkBudgeted budgeted, |
100 | GrProtected isProtected, |
101 | int mipLevelCount, |
102 | uint32_t levelClearMask) { |
103 | if (this->caps()->isFormatCompressed(format)) { |
104 | // Call GrGpu::createCompressedTexture. |
105 | return nullptr; |
106 | } |
107 | |
108 | GrMipmapped mipMapped = mipLevelCount > 1 ? GrMipmapped::kYes : GrMipmapped::kNo; |
109 | if (!this->caps()->validateSurfaceParams(dimensions, format, renderable, renderTargetSampleCnt, |
110 | mipMapped)) { |
111 | return nullptr; |
112 | } |
113 | |
114 | if (renderable == GrRenderable::kYes) { |
115 | renderTargetSampleCnt = |
116 | this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format); |
117 | } |
118 | // Attempt to catch un- or wrongly initialized sample counts. |
119 | SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64); |
120 | this->handleDirtyContext(); |
121 | auto tex = this->onCreateTexture(dimensions, |
122 | format, |
123 | renderable, |
124 | renderTargetSampleCnt, |
125 | budgeted, |
126 | isProtected, |
127 | mipLevelCount, |
128 | levelClearMask); |
129 | if (tex) { |
130 | SkASSERT(tex->backendFormat() == format); |
131 | SkASSERT(GrRenderable::kNo == renderable || tex->asRenderTarget()); |
132 | if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) { |
133 | tex->resourcePriv().removeScratchKey(); |
134 | } |
135 | fStats.incTextureCreates(); |
136 | if (renderTargetSampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) { |
137 | SkASSERT(GrRenderable::kYes == renderable); |
138 | tex->asRenderTarget()->setRequiresManualMSAAResolve(); |
139 | } |
140 | } |
141 | return tex; |
142 | } |
143 | |
144 | sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions, |
145 | const GrBackendFormat& format, |
146 | GrRenderable renderable, |
147 | int renderTargetSampleCnt, |
148 | GrMipmapped mipMapped, |
149 | SkBudgeted budgeted, |
150 | GrProtected isProtected) { |
151 | int mipLevelCount = 1; |
152 | if (mipMapped == GrMipmapped::kYes) { |
153 | mipLevelCount = |
154 | 32 - SkCLZ(static_cast<uint32_t>(std::max(dimensions.fWidth, dimensions.fHeight))); |
155 | } |
156 | uint32_t levelClearMask = |
157 | this->caps()->shouldInitializeTextures() ? (1 << mipLevelCount) - 1 : 0; |
158 | auto tex = this->createTextureCommon(dimensions, format, renderable, renderTargetSampleCnt, |
159 | budgeted, isProtected, mipLevelCount, levelClearMask); |
160 | if (tex && mipMapped == GrMipmapped::kYes && levelClearMask) { |
161 | tex->markMipmapsClean(); |
162 | } |
163 | return tex; |
164 | } |
165 | |
166 | sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions, |
167 | const GrBackendFormat& format, |
168 | GrRenderable renderable, |
169 | int renderTargetSampleCnt, |
170 | SkBudgeted budgeted, |
171 | GrProtected isProtected, |
172 | GrColorType textureColorType, |
173 | GrColorType srcColorType, |
174 | const GrMipLevel texels[], |
175 | int texelLevelCount) { |
176 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
177 | if (texelLevelCount) { |
178 | if (!validate_texel_levels(dimensions, srcColorType, texels, texelLevelCount, |
179 | this->caps())) { |
180 | return nullptr; |
181 | } |
182 | } |
183 | |
184 | int mipLevelCount = std::max(1, texelLevelCount); |
185 | uint32_t levelClearMask = 0; |
186 | if (this->caps()->shouldInitializeTextures()) { |
187 | if (texelLevelCount) { |
188 | for (int i = 0; i < mipLevelCount; ++i) { |
189 | if (!texels->fPixels) { |
190 | levelClearMask |= static_cast<uint32_t>(1 << i); |
191 | } |
192 | } |
193 | } else { |
194 | levelClearMask = static_cast<uint32_t>((1 << mipLevelCount) - 1); |
195 | } |
196 | } |
197 | |
198 | auto tex = this->createTextureCommon(dimensions, format, renderable, renderTargetSampleCnt, |
199 | budgeted, isProtected, texelLevelCount, levelClearMask); |
200 | if (tex) { |
201 | bool markMipLevelsClean = false; |
202 | // Currently if level 0 does not have pixels then no other level may, as enforced by |
203 | // validate_texel_levels. |
204 | if (texelLevelCount && texels[0].fPixels) { |
205 | if (!this->writePixels(tex.get(), 0, 0, dimensions.fWidth, dimensions.fHeight, |
206 | textureColorType, srcColorType, texels, texelLevelCount)) { |
207 | return nullptr; |
208 | } |
209 | // Currently if level[1] of mip map has pixel data then so must all other levels. |
210 | // as enforced by validate_texel_levels. |
211 | markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].fPixels); |
212 | fStats.incTextureUploads(); |
213 | } else if (levelClearMask && mipLevelCount > 1) { |
214 | markMipLevelsClean = true; |
215 | } |
216 | if (markMipLevelsClean) { |
217 | tex->markMipmapsClean(); |
218 | } |
219 | } |
220 | return tex; |
221 | } |
222 | |
223 | sk_sp<GrTexture> GrGpu::createCompressedTexture(SkISize dimensions, |
224 | const GrBackendFormat& format, |
225 | SkBudgeted budgeted, |
226 | GrMipmapped mipMapped, |
227 | GrProtected isProtected, |
228 | const void* data, |
229 | size_t dataSize) { |
230 | this->handleDirtyContext(); |
231 | if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() || |
232 | dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) { |
233 | return nullptr; |
234 | } |
235 | // Note if we relax the requirement that data must be provided then we must check |
236 | // caps()->shouldInitializeTextures() here. |
237 | if (!data) { |
238 | return nullptr; |
239 | } |
240 | if (!this->caps()->isFormatTexturable(format)) { |
241 | return nullptr; |
242 | } |
243 | |
244 | // TODO: expand CompressedDataIsCorrect to work here too |
245 | SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format); |
246 | |
247 | if (dataSize < SkCompressedDataSize(compressionType, dimensions, nullptr, |
248 | mipMapped == GrMipmapped::kYes)) { |
249 | return nullptr; |
250 | } |
251 | return this->onCreateCompressedTexture(dimensions, format, budgeted, mipMapped, isProtected, |
252 | data, dataSize); |
253 | } |
254 | |
255 | sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex, |
256 | GrWrapOwnership ownership, |
257 | GrWrapCacheable cacheable, |
258 | GrIOType ioType) { |
259 | SkASSERT(ioType != kWrite_GrIOType); |
260 | this->handleDirtyContext(); |
261 | |
262 | const GrCaps* caps = this->caps(); |
263 | SkASSERT(caps); |
264 | |
265 | if (!caps->isFormatTexturable(backendTex.getBackendFormat())) { |
266 | return nullptr; |
267 | } |
268 | if (backendTex.width() > caps->maxTextureSize() || |
269 | backendTex.height() > caps->maxTextureSize()) { |
270 | return nullptr; |
271 | } |
272 | |
273 | return this->onWrapBackendTexture(backendTex, ownership, cacheable, ioType); |
274 | } |
275 | |
276 | sk_sp<GrTexture> GrGpu::wrapCompressedBackendTexture(const GrBackendTexture& backendTex, |
277 | GrWrapOwnership ownership, |
278 | GrWrapCacheable cacheable) { |
279 | this->handleDirtyContext(); |
280 | |
281 | const GrCaps* caps = this->caps(); |
282 | SkASSERT(caps); |
283 | |
284 | if (!caps->isFormatTexturable(backendTex.getBackendFormat())) { |
285 | return nullptr; |
286 | } |
287 | if (backendTex.width() > caps->maxTextureSize() || |
288 | backendTex.height() > caps->maxTextureSize()) { |
289 | return nullptr; |
290 | } |
291 | |
292 | return this->onWrapCompressedBackendTexture(backendTex, ownership, cacheable); |
293 | } |
294 | |
295 | sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex, |
296 | int sampleCnt, |
297 | GrWrapOwnership ownership, |
298 | GrWrapCacheable cacheable) { |
299 | this->handleDirtyContext(); |
300 | if (sampleCnt < 1) { |
301 | return nullptr; |
302 | } |
303 | |
304 | const GrCaps* caps = this->caps(); |
305 | |
306 | if (!caps->isFormatTexturable(backendTex.getBackendFormat()) || |
307 | !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) { |
308 | return nullptr; |
309 | } |
310 | |
311 | if (backendTex.width() > caps->maxRenderTargetSize() || |
312 | backendTex.height() > caps->maxRenderTargetSize()) { |
313 | return nullptr; |
314 | } |
315 | sk_sp<GrTexture> tex = |
316 | this->onWrapRenderableBackendTexture(backendTex, sampleCnt, ownership, cacheable); |
317 | SkASSERT(!tex || tex->asRenderTarget()); |
318 | if (tex && sampleCnt > 1 && !caps->msaaResolvesAutomatically()) { |
319 | tex->asRenderTarget()->setRequiresManualMSAAResolve(); |
320 | } |
321 | return tex; |
322 | } |
323 | |
324 | sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) { |
325 | this->handleDirtyContext(); |
326 | |
327 | const GrCaps* caps = this->caps(); |
328 | |
329 | if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) { |
330 | return nullptr; |
331 | } |
332 | |
333 | sk_sp<GrRenderTarget> rt = this->onWrapBackendRenderTarget(backendRT); |
334 | if (backendRT.isFramebufferOnly()) { |
335 | rt->setFramebufferOnly(); |
336 | } |
337 | return rt; |
338 | } |
339 | |
340 | sk_sp<GrRenderTarget> GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTexture& backendTex, |
341 | int sampleCnt) { |
342 | this->handleDirtyContext(); |
343 | |
344 | const GrCaps* caps = this->caps(); |
345 | |
346 | int maxSize = caps->maxTextureSize(); |
347 | if (backendTex.width() > maxSize || backendTex.height() > maxSize) { |
348 | return nullptr; |
349 | } |
350 | |
351 | if (!caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) { |
352 | return nullptr; |
353 | } |
354 | |
355 | auto rt = this->onWrapBackendTextureAsRenderTarget(backendTex, sampleCnt); |
356 | if (rt && sampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) { |
357 | rt->setRequiresManualMSAAResolve(); |
358 | } |
359 | return rt; |
360 | } |
361 | |
362 | sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo, |
363 | const GrVkDrawableInfo& vkInfo) { |
364 | return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo); |
365 | } |
366 | |
367 | sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo, |
368 | const GrVkDrawableInfo& vkInfo) { |
369 | // This is only supported on Vulkan so we default to returning nullptr here |
370 | return nullptr; |
371 | } |
372 | |
373 | sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType, |
374 | GrAccessPattern accessPattern, const void* data) { |
375 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
376 | this->handleDirtyContext(); |
377 | sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data); |
378 | if (!this->caps()->reuseScratchBuffers()) { |
379 | buffer->resourcePriv().removeScratchKey(); |
380 | } |
381 | return buffer; |
382 | } |
383 | |
384 | bool GrGpu::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
385 | const SkIPoint& dstPoint) { |
386 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
387 | SkASSERT(dst && src); |
388 | SkASSERT(!src->framebufferOnly()); |
389 | |
390 | if (dst->readOnly()) { |
391 | return false; |
392 | } |
393 | |
394 | this->handleDirtyContext(); |
395 | |
396 | return this->onCopySurface(dst, src, srcRect, dstPoint); |
397 | } |
398 | |
399 | bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height, |
400 | GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, |
401 | size_t rowBytes) { |
402 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
403 | SkASSERT(surface); |
404 | SkASSERT(!surface->framebufferOnly()); |
405 | SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat())); |
406 | |
407 | auto subRect = SkIRect::MakeXYWH(left, top, width, height); |
408 | auto bounds = SkIRect::MakeWH(surface->width(), surface->height()); |
409 | if (!bounds.contains(subRect)) { |
410 | return false; |
411 | } |
412 | |
413 | size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * width); |
414 | if (!this->caps()->readPixelsRowBytesSupport()) { |
415 | if (rowBytes != minRowBytes) { |
416 | return false; |
417 | } |
418 | } else { |
419 | if (rowBytes < minRowBytes) { |
420 | return false; |
421 | } |
422 | if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) { |
423 | return false; |
424 | } |
425 | } |
426 | |
427 | this->handleDirtyContext(); |
428 | |
429 | return this->onReadPixels(surface, left, top, width, height, surfaceColorType, dstColorType, |
430 | buffer, rowBytes); |
431 | } |
432 | |
433 | bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int height, |
434 | GrColorType surfaceColorType, GrColorType srcColorType, |
435 | const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling) { |
436 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
437 | ATRACE_ANDROID_FRAMEWORK_ALWAYS("Upload %ix%i Texture" , width, height); |
438 | SkASSERT(surface); |
439 | SkASSERT(!surface->framebufferOnly()); |
440 | |
441 | if (surface->readOnly()) { |
442 | return false; |
443 | } |
444 | |
445 | if (mipLevelCount == 0) { |
446 | return false; |
447 | } else if (mipLevelCount == 1) { |
448 | // We require that if we are not mipped, then the write region is contained in the surface |
449 | auto subRect = SkIRect::MakeXYWH(left, top, width, height); |
450 | auto bounds = SkIRect::MakeWH(surface->width(), surface->height()); |
451 | if (!bounds.contains(subRect)) { |
452 | return false; |
453 | } |
454 | } else if (0 != left || 0 != top || width != surface->width() || height != surface->height()) { |
455 | // We require that if the texels are mipped, than the write region is the entire surface |
456 | return false; |
457 | } |
458 | |
459 | if (!validate_texel_levels({width, height}, srcColorType, texels, mipLevelCount, |
460 | this->caps())) { |
461 | return false; |
462 | } |
463 | |
464 | this->handleDirtyContext(); |
465 | if (this->onWritePixels(surface, left, top, width, height, surfaceColorType, srcColorType, |
466 | texels, mipLevelCount, prepForTexSampling)) { |
467 | SkIRect rect = SkIRect::MakeXYWH(left, top, width, height); |
468 | this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount); |
469 | fStats.incTextureUploads(); |
470 | return true; |
471 | } |
472 | return false; |
473 | } |
474 | |
475 | bool GrGpu::transferPixelsTo(GrTexture* texture, int left, int top, int width, int height, |
476 | GrColorType textureColorType, GrColorType bufferColorType, |
477 | GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) { |
478 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
479 | SkASSERT(texture); |
480 | SkASSERT(transferBuffer); |
481 | |
482 | if (texture->readOnly()) { |
483 | return false; |
484 | } |
485 | |
486 | // We require that the write region is contained in the texture |
487 | SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
488 | SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height()); |
489 | if (!bounds.contains(subRect)) { |
490 | return false; |
491 | } |
492 | |
493 | size_t bpp = GrColorTypeBytesPerPixel(bufferColorType); |
494 | if (this->caps()->writePixelsRowBytesSupport()) { |
495 | if (rowBytes < SkToSizeT(bpp * width)) { |
496 | return false; |
497 | } |
498 | if (rowBytes % bpp) { |
499 | return false; |
500 | } |
501 | } else { |
502 | if (rowBytes != SkToSizeT(bpp * width)) { |
503 | return false; |
504 | } |
505 | } |
506 | |
507 | this->handleDirtyContext(); |
508 | if (this->onTransferPixelsTo(texture, left, top, width, height, textureColorType, |
509 | bufferColorType, transferBuffer, offset, rowBytes)) { |
510 | SkIRect rect = SkIRect::MakeXYWH(left, top, width, height); |
511 | this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect); |
512 | fStats.incTransfersToTexture(); |
513 | |
514 | return true; |
515 | } |
516 | return false; |
517 | } |
518 | |
519 | bool GrGpu::transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, |
520 | GrColorType surfaceColorType, GrColorType bufferColorType, |
521 | GrGpuBuffer* transferBuffer, size_t offset) { |
522 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
523 | SkASSERT(surface); |
524 | SkASSERT(transferBuffer); |
525 | SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat())); |
526 | |
527 | #ifdef SK_DEBUG |
528 | auto supportedRead = this->caps()->supportedReadPixelsColorType( |
529 | surfaceColorType, surface->backendFormat(), bufferColorType); |
530 | SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer); |
531 | SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0); |
532 | #endif |
533 | |
534 | // We require that the write region is contained in the texture |
535 | SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
536 | SkIRect bounds = SkIRect::MakeWH(surface->width(), surface->height()); |
537 | if (!bounds.contains(subRect)) { |
538 | return false; |
539 | } |
540 | |
541 | this->handleDirtyContext(); |
542 | if (this->onTransferPixelsFrom(surface, left, top, width, height, surfaceColorType, |
543 | bufferColorType, transferBuffer, offset)) { |
544 | fStats.incTransfersFromSurface(); |
545 | return true; |
546 | } |
547 | return false; |
548 | } |
549 | |
550 | bool GrGpu::regenerateMipMapLevels(GrTexture* texture) { |
551 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
552 | SkASSERT(texture); |
553 | SkASSERT(this->caps()->mipmapSupport()); |
554 | SkASSERT(texture->mipmapped() == GrMipmapped::kYes); |
555 | if (!texture->mipmapsAreDirty()) { |
556 | // This can happen when the proxy expects mipmaps to be dirty, but they are not dirty on the |
557 | // actual target. This may be caused by things that the drawingManager could not predict, |
558 | // i.e., ops that don't draw anything, aborting a draw for exceptional circumstances, etc. |
559 | // NOTE: This goes away once we quit tracking mipmap state on the actual texture. |
560 | return true; |
561 | } |
562 | if (texture->readOnly()) { |
563 | return false; |
564 | } |
565 | if (this->onRegenerateMipMapLevels(texture)) { |
566 | texture->markMipmapsClean(); |
567 | return true; |
568 | } |
569 | return false; |
570 | } |
571 | |
572 | void GrGpu::resetTextureBindings() { |
573 | this->handleDirtyContext(); |
574 | this->onResetTextureBindings(); |
575 | } |
576 | |
577 | void GrGpu::resolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) { |
578 | SkASSERT(target); |
579 | this->handleDirtyContext(); |
580 | this->onResolveRenderTarget(target, resolveRect); |
581 | } |
582 | |
583 | void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds, |
584 | uint32_t mipLevels) const { |
585 | SkASSERT(surface); |
586 | SkASSERT(!surface->readOnly()); |
587 | // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds. |
588 | if (nullptr == bounds || !bounds->isEmpty()) { |
589 | GrTexture* texture = surface->asTexture(); |
590 | if (texture && 1 == mipLevels) { |
591 | texture->markMipmapsDirty(); |
592 | } |
593 | } |
594 | } |
595 | |
596 | int GrGpu::findOrAssignSamplePatternKey(GrRenderTarget* renderTarget) { |
597 | SkASSERT(this->caps()->sampleLocationsSupport()); |
598 | SkASSERT(renderTarget->numSamples() > 1 || |
599 | (renderTarget->getStencilAttachment() && |
600 | renderTarget->getStencilAttachment()->numSamples() > 1)); |
601 | |
602 | SkSTArray<16, SkPoint> sampleLocations; |
603 | this->querySampleLocations(renderTarget, &sampleLocations); |
604 | return fSamplePatternDictionary.findOrAssignSamplePatternKey(sampleLocations); |
605 | } |
606 | |
607 | void GrGpu::executeFlushInfo(GrSurfaceProxy* proxies[], |
608 | int numProxies, |
609 | SkSurface::BackendSurfaceAccess access, |
610 | const GrFlushInfo& info, |
611 | const GrBackendSurfaceMutableState* newState) { |
612 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
613 | |
614 | GrResourceProvider* resourceProvider = fContext->priv().resourceProvider(); |
615 | |
616 | std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores( |
617 | new std::unique_ptr<GrSemaphore>[info.fNumSemaphores]); |
618 | if (this->caps()->semaphoreSupport() && info.fNumSemaphores) { |
619 | for (int i = 0; i < info.fNumSemaphores; ++i) { |
620 | if (info.fSignalSemaphores[i].isInitialized()) { |
621 | semaphores[i] = resourceProvider->wrapBackendSemaphore( |
622 | info.fSignalSemaphores[i], |
623 | GrResourceProvider::SemaphoreWrapType::kWillSignal, |
624 | kBorrow_GrWrapOwnership); |
625 | // If we failed to wrap the semaphore it means the client didn't give us a valid |
626 | // semaphore to begin with. Therefore, it is fine to not signal it. |
627 | if (semaphores[i]) { |
628 | this->insertSemaphore(semaphores[i].get()); |
629 | } |
630 | } else { |
631 | semaphores[i] = resourceProvider->makeSemaphore(false); |
632 | if (semaphores[i]) { |
633 | this->insertSemaphore(semaphores[i].get()); |
634 | info.fSignalSemaphores[i] = semaphores[i]->backendSemaphore(); |
635 | } |
636 | } |
637 | } |
638 | } |
639 | |
640 | if (info.fFinishedProc) { |
641 | this->addFinishedProc(info.fFinishedProc, info.fFinishedContext); |
642 | } |
643 | |
644 | if (info.fSubmittedProc) { |
645 | fSubmittedProcs.emplace_back(info.fSubmittedProc, info.fSubmittedContext); |
646 | } |
647 | |
648 | // We currently don't support passing in new surface state for multiple proxies here. The only |
649 | // time we have multiple proxies is if we are flushing a yuv SkImage which won't have state |
650 | // updates anyways. |
651 | SkASSERT(!newState || numProxies == 1); |
652 | SkASSERT(!newState || access == SkSurface::BackendSurfaceAccess::kNoAccess); |
653 | this->prepareSurfacesForBackendAccessAndStateUpdates(proxies, numProxies, access, newState); |
654 | } |
655 | |
656 | bool GrGpu::submitToGpu(bool syncCpu) { |
657 | this->stats()->incNumSubmitToGpus(); |
658 | |
659 | if (auto manager = this->stagingBufferManager()) { |
660 | manager->detachBuffers(); |
661 | } |
662 | |
663 | if (auto uniformsBuffer = this->uniformsRingBuffer()) { |
664 | uniformsBuffer->startSubmit(this); |
665 | } |
666 | |
667 | bool submitted = this->onSubmitToGpu(syncCpu); |
668 | |
669 | this->callSubmittedProcs(submitted); |
670 | |
671 | return submitted; |
672 | } |
673 | |
674 | bool GrGpu::checkAndResetOOMed() { |
675 | if (fOOMed) { |
676 | fOOMed = false; |
677 | return true; |
678 | } |
679 | return false; |
680 | } |
681 | |
682 | void GrGpu::callSubmittedProcs(bool success) { |
683 | for (int i = 0; i < fSubmittedProcs.count(); ++i) { |
684 | fSubmittedProcs[i].fProc(fSubmittedProcs[i].fContext, success); |
685 | } |
686 | fSubmittedProcs.reset(); |
687 | } |
688 | |
689 | #ifdef SK_ENABLE_DUMP_GPU |
690 | void GrGpu::dumpJSON(SkJSONWriter* writer) const { |
691 | writer->beginObject(); |
692 | |
693 | // TODO: Is there anything useful in the base class to dump here? |
694 | |
695 | this->onDumpJSON(writer); |
696 | |
697 | writer->endObject(); |
698 | } |
699 | #else |
700 | void GrGpu::dumpJSON(SkJSONWriter* writer) const { } |
701 | #endif |
702 | |
703 | #if GR_TEST_UTILS |
704 | |
705 | #if GR_GPU_STATS |
706 | static const char* cache_result_to_str(int i) { |
707 | const char* kCacheResultStrings[GrGpu::Stats::kNumProgramCacheResults] = { |
708 | "hits" , |
709 | "misses" , |
710 | "partials" |
711 | }; |
712 | static_assert(0 == (int) GrGpu::Stats::ProgramCacheResult::kHit); |
713 | static_assert(1 == (int) GrGpu::Stats::ProgramCacheResult::kMiss); |
714 | static_assert(2 == (int) GrGpu::Stats::ProgramCacheResult::kPartial); |
715 | static_assert(GrGpu::Stats::kNumProgramCacheResults == 3); |
716 | return kCacheResultStrings[i]; |
717 | } |
718 | |
719 | void GrGpu::Stats::dump(SkString* out) { |
720 | out->appendf("Render Target Binds: %d\n" , fRenderTargetBinds); |
721 | out->appendf("Shader Compilations: %d\n" , fShaderCompilations); |
722 | out->appendf("Textures Created: %d\n" , fTextureCreates); |
723 | out->appendf("Texture Uploads: %d\n" , fTextureUploads); |
724 | out->appendf("Transfers to Texture: %d\n" , fTransfersToTexture); |
725 | out->appendf("Transfers from Surface: %d\n" , fTransfersFromSurface); |
726 | out->appendf("Stencil Buffer Creates: %d\n" , fStencilAttachmentCreates); |
727 | out->appendf("Number of draws: %d\n" , fNumDraws); |
728 | out->appendf("Number of Scratch Textures reused %d\n" , fNumScratchTexturesReused); |
729 | |
730 | SkASSERT(fNumInlineCompilationFailures == 0); |
731 | out->appendf("Number of Inline compile failures %d\n" , fNumInlineCompilationFailures); |
732 | for (int i = 0; i < Stats::kNumProgramCacheResults-1; ++i) { |
733 | out->appendf("Inline Program Cache %s %d\n" , cache_result_to_str(i), |
734 | fInlineProgramCacheStats[i]); |
735 | } |
736 | |
737 | SkASSERT(fNumPreCompilationFailures == 0); |
738 | out->appendf("Number of precompile failures %d\n" , fNumPreCompilationFailures); |
739 | for (int i = 0; i < Stats::kNumProgramCacheResults-1; ++i) { |
740 | out->appendf("Precompile Program Cache %s %d\n" , cache_result_to_str(i), |
741 | fPreProgramCacheStats[i]); |
742 | } |
743 | |
744 | SkASSERT(fNumCompilationFailures == 0); |
745 | out->appendf("Total number of compilation failures %d\n" , fNumCompilationFailures); |
746 | out->appendf("Total number of partial compilation successes %d\n" , |
747 | fNumPartialCompilationSuccesses); |
748 | out->appendf("Total number of compilation successes %d\n" , fNumCompilationSuccesses); |
749 | |
750 | // enable this block to output CSV-style stats for program pre-compilation |
751 | #if 0 |
752 | SkASSERT(fNumInlineCompilationFailures == 0); |
753 | SkASSERT(fNumPreCompilationFailures == 0); |
754 | SkASSERT(fNumCompilationFailures == 0); |
755 | SkASSERT(fNumPartialCompilationSuccesses == 0); |
756 | |
757 | SkDebugf("%d, %d, %d, %d, %d\n" , |
758 | fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kHit], |
759 | fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss], |
760 | fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kHit], |
761 | fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss], |
762 | fNumCompilationSuccesses); |
763 | #endif |
764 | } |
765 | |
766 | void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) { |
767 | keys->push_back(SkString("render_target_binds" )); values->push_back(fRenderTargetBinds); |
768 | keys->push_back(SkString("shader_compilations" )); values->push_back(fShaderCompilations); |
769 | } |
770 | |
771 | #endif // GR_GPU_STATS |
772 | #endif // GR_TEST_UTILS |
773 | |
774 | bool GrGpu::MipMapsAreCorrect(SkISize dimensions, |
775 | GrMipmapped mipMapped, |
776 | const BackendTextureData* data) { |
777 | int numMipLevels = 1; |
778 | if (mipMapped == GrMipmapped::kYes) { |
779 | numMipLevels = SkMipmap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1; |
780 | } |
781 | |
782 | if (!data || data->type() == BackendTextureData::Type::kColor) { |
783 | return true; |
784 | } |
785 | |
786 | if (data->type() == BackendTextureData::Type::kCompressed) { |
787 | return false; // This should be going through CompressedDataIsCorrect |
788 | } |
789 | |
790 | SkASSERT(data->type() == BackendTextureData::Type::kPixmaps); |
791 | |
792 | if (data->pixmap(0).dimensions() != dimensions) { |
793 | return false; |
794 | } |
795 | |
796 | SkColorType colorType = data->pixmap(0).colorType(); |
797 | for (int i = 1; i < numMipLevels; ++i) { |
798 | dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)}; |
799 | if (dimensions != data->pixmap(i).dimensions()) { |
800 | return false; |
801 | } |
802 | if (colorType != data->pixmap(i).colorType()) { |
803 | return false; |
804 | } |
805 | } |
806 | return true; |
807 | } |
808 | |
809 | bool GrGpu::CompressedDataIsCorrect(SkISize dimensions, SkImage::CompressionType compressionType, |
810 | GrMipmapped mipMapped, const BackendTextureData* data) { |
811 | |
812 | if (!data || data->type() == BackendTextureData::Type::kColor) { |
813 | return true; |
814 | } |
815 | |
816 | if (data->type() == BackendTextureData::Type::kPixmaps) { |
817 | return false; |
818 | } |
819 | |
820 | SkASSERT(data->type() == BackendTextureData::Type::kCompressed); |
821 | |
822 | size_t computedSize = SkCompressedDataSize(compressionType, dimensions, |
823 | nullptr, mipMapped == GrMipmapped::kYes); |
824 | |
825 | return computedSize == data->compressedSize(); |
826 | } |
827 | |
828 | GrBackendTexture GrGpu::createBackendTexture(SkISize dimensions, |
829 | const GrBackendFormat& format, |
830 | GrRenderable renderable, |
831 | GrMipmapped mipMapped, |
832 | GrProtected isProtected) { |
833 | const GrCaps* caps = this->caps(); |
834 | |
835 | if (!format.isValid()) { |
836 | return {}; |
837 | } |
838 | |
839 | if (caps->isFormatCompressed(format)) { |
840 | // Compressed formats must go through the createCompressedBackendTexture API |
841 | return {}; |
842 | } |
843 | |
844 | if (dimensions.isEmpty() || dimensions.width() > caps->maxTextureSize() || |
845 | dimensions.height() > caps->maxTextureSize()) { |
846 | return {}; |
847 | } |
848 | |
849 | if (mipMapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) { |
850 | return {}; |
851 | } |
852 | |
853 | return this->onCreateBackendTexture(dimensions, format, renderable, mipMapped, isProtected); |
854 | } |
855 | |
856 | bool GrGpu::updateBackendTexture(const GrBackendTexture& backendTexture, |
857 | sk_sp<GrRefCntedCallback> finishedCallback, |
858 | const BackendTextureData* data) { |
859 | SkASSERT(data); |
860 | const GrCaps* caps = this->caps(); |
861 | |
862 | if (!backendTexture.isValid()) { |
863 | return false; |
864 | } |
865 | |
866 | if (data->type() == BackendTextureData::Type::kPixmaps) { |
867 | auto ct = SkColorTypeToGrColorType(data->pixmap(0).colorType()); |
868 | if (!caps->areColorTypeAndFormatCompatible(ct, backendTexture.getBackendFormat())) { |
869 | return false; |
870 | } |
871 | } |
872 | |
873 | if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) { |
874 | return false; |
875 | } |
876 | |
877 | GrMipmapped mipMapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo; |
878 | if (!MipMapsAreCorrect(backendTexture.dimensions(), mipMapped, data)) { |
879 | return false; |
880 | } |
881 | |
882 | return this->onUpdateBackendTexture(backendTexture, std::move(finishedCallback), data); |
883 | } |
884 | |
885 | GrBackendTexture GrGpu::createCompressedBackendTexture(SkISize dimensions, |
886 | const GrBackendFormat& format, |
887 | GrMipmapped mipMapped, |
888 | GrProtected isProtected) { |
889 | const GrCaps* caps = this->caps(); |
890 | |
891 | if (!format.isValid()) { |
892 | return {}; |
893 | } |
894 | |
895 | SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format); |
896 | if (compressionType == SkImage::CompressionType::kNone) { |
897 | // Uncompressed formats must go through the createBackendTexture API |
898 | return {}; |
899 | } |
900 | |
901 | if (dimensions.isEmpty() || |
902 | dimensions.width() > caps->maxTextureSize() || |
903 | dimensions.height() > caps->maxTextureSize()) { |
904 | return {}; |
905 | } |
906 | |
907 | if (mipMapped == GrMipmapped::kYes && !this->caps()->mipmapSupport()) { |
908 | return {}; |
909 | } |
910 | |
911 | return this->onCreateCompressedBackendTexture(dimensions, format, mipMapped, isProtected); |
912 | } |
913 | |
914 | bool GrGpu::updateCompressedBackendTexture(const GrBackendTexture& backendTexture, |
915 | sk_sp<GrRefCntedCallback> finishedCallback, |
916 | const BackendTextureData* data) { |
917 | SkASSERT(data); |
918 | |
919 | if (!backendTexture.isValid()) { |
920 | return false; |
921 | } |
922 | |
923 | GrBackendFormat format = backendTexture.getBackendFormat(); |
924 | |
925 | SkImage::CompressionType compressionType = GrBackendFormatToCompressionType(format); |
926 | if (compressionType == SkImage::CompressionType::kNone) { |
927 | // Uncompressed formats must go through the createBackendTexture API |
928 | return false; |
929 | } |
930 | |
931 | if (backendTexture.hasMipmaps() && !this->caps()->mipmapSupport()) { |
932 | return false; |
933 | } |
934 | |
935 | GrMipmapped mipMapped = backendTexture.hasMipmaps() ? GrMipmapped::kYes : GrMipmapped::kNo; |
936 | |
937 | if (!CompressedDataIsCorrect(backendTexture.dimensions(), compressionType, mipMapped, data)) { |
938 | return false; |
939 | } |
940 | |
941 | return this->onUpdateCompressedBackendTexture(backendTexture, std::move(finishedCallback), |
942 | data); |
943 | } |
944 | |