1 | /* |
2 | * Copyright 2010 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | |
9 | #include "src/gpu/GrGpu.h" |
10 | |
11 | #include "include/gpu/GrBackendSemaphore.h" |
12 | #include "include/gpu/GrBackendSurface.h" |
13 | #include "include/gpu/GrContext.h" |
14 | #include "src/core/SkCompressedDataUtils.h" |
15 | #include "src/core/SkMathPriv.h" |
16 | #include "src/core/SkMipMap.h" |
17 | #include "src/gpu/GrAuditTrail.h" |
18 | #include "src/gpu/GrCaps.h" |
19 | #include "src/gpu/GrContextPriv.h" |
20 | #include "src/gpu/GrDataUtils.h" |
21 | #include "src/gpu/GrGpuResourcePriv.h" |
22 | #include "src/gpu/GrNativeRect.h" |
23 | #include "src/gpu/GrPathRendering.h" |
24 | #include "src/gpu/GrPipeline.h" |
25 | #include "src/gpu/GrRenderTargetPriv.h" |
26 | #include "src/gpu/GrResourceCache.h" |
27 | #include "src/gpu/GrResourceProvider.h" |
28 | #include "src/gpu/GrSemaphore.h" |
29 | #include "src/gpu/GrStagingBuffer.h" |
30 | #include "src/gpu/GrStencilAttachment.h" |
31 | #include "src/gpu/GrStencilSettings.h" |
32 | #include "src/gpu/GrSurfacePriv.h" |
33 | #include "src/gpu/GrTexturePriv.h" |
34 | #include "src/gpu/GrTextureProxyPriv.h" |
35 | #include "src/gpu/GrTracing.h" |
36 | #include "src/utils/SkJSONWriter.h" |
37 | |
38 | static const size_t kMinStagingBufferSize = 32 * 1024; |
39 | |
40 | //////////////////////////////////////////////////////////////////////////////// |
41 | |
42 | GrGpu::GrGpu(GrContext* context) : fResetBits(kAll_GrBackendState), fContext(context) {} |
43 | |
44 | GrGpu::~GrGpu() { |
45 | SkASSERT(fBusyStagingBuffers.isEmpty()); |
46 | } |
47 | |
48 | void GrGpu::disconnect(DisconnectType) {} |
49 | |
50 | //////////////////////////////////////////////////////////////////////////////// |
51 | |
52 | bool GrGpu::IsACopyNeededForMips(const GrCaps* caps, const GrTextureProxy* texProxy, |
53 | GrSamplerState::Filter filter) { |
54 | SkASSERT(texProxy); |
55 | if (filter != GrSamplerState::Filter::kMipMap || texProxy->mipMapped() == GrMipMapped::kYes || |
56 | !caps->mipMapSupport()) { |
57 | return false; |
58 | } |
59 | return SkMipMap::ComputeLevelCount(texProxy->width(), texProxy->height()) > 0; |
60 | } |
61 | |
62 | static bool validate_texel_levels(SkISize dimensions, GrColorType texelColorType, |
63 | const GrMipLevel* texels, int mipLevelCount, const GrCaps* caps) { |
64 | SkASSERT(mipLevelCount > 0); |
65 | bool hasBasePixels = texels[0].fPixels; |
66 | int levelsWithPixelsCnt = 0; |
67 | auto bpp = GrColorTypeBytesPerPixel(texelColorType); |
68 | int w = dimensions.fWidth; |
69 | int h = dimensions.fHeight; |
70 | for (int currentMipLevel = 0; currentMipLevel < mipLevelCount; ++currentMipLevel) { |
71 | if (texels[currentMipLevel].fPixels) { |
72 | const size_t minRowBytes = w * bpp; |
73 | if (caps->writePixelsRowBytesSupport()) { |
74 | if (texels[currentMipLevel].fRowBytes < minRowBytes) { |
75 | return false; |
76 | } |
77 | if (texels[currentMipLevel].fRowBytes % bpp) { |
78 | return false; |
79 | } |
80 | } else { |
81 | if (texels[currentMipLevel].fRowBytes != minRowBytes) { |
82 | return false; |
83 | } |
84 | } |
85 | ++levelsWithPixelsCnt; |
86 | } |
87 | if (w == 1 && h == 1) { |
88 | if (currentMipLevel != mipLevelCount - 1) { |
89 | return false; |
90 | } |
91 | } else { |
92 | w = std::max(w / 2, 1); |
93 | h = std::max(h / 2, 1); |
94 | } |
95 | } |
96 | // Either just a base layer or a full stack is required. |
97 | if (mipLevelCount != 1 && (w != 1 || h != 1)) { |
98 | return false; |
99 | } |
100 | // Can specify just the base, all levels, or no levels. |
101 | if (!hasBasePixels) { |
102 | return levelsWithPixelsCnt == 0; |
103 | } |
104 | return levelsWithPixelsCnt == 1 || levelsWithPixelsCnt == mipLevelCount; |
105 | } |
106 | |
107 | sk_sp<GrTexture> GrGpu::createTextureCommon(SkISize dimensions, |
108 | const GrBackendFormat& format, |
109 | GrRenderable renderable, |
110 | int renderTargetSampleCnt, |
111 | SkBudgeted budgeted, |
112 | GrProtected isProtected, |
113 | int mipLevelCount, |
114 | uint32_t levelClearMask) { |
115 | if (this->caps()->isFormatCompressed(format)) { |
116 | // Call GrGpu::createCompressedTexture. |
117 | return nullptr; |
118 | } |
119 | |
120 | GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo; |
121 | if (!this->caps()->validateSurfaceParams(dimensions, format, renderable, renderTargetSampleCnt, |
122 | mipMapped)) { |
123 | return nullptr; |
124 | } |
125 | |
126 | if (renderable == GrRenderable::kYes) { |
127 | renderTargetSampleCnt = |
128 | this->caps()->getRenderTargetSampleCount(renderTargetSampleCnt, format); |
129 | } |
130 | // Attempt to catch un- or wrongly initialized sample counts. |
131 | SkASSERT(renderTargetSampleCnt > 0 && renderTargetSampleCnt <= 64); |
132 | this->handleDirtyContext(); |
133 | auto tex = this->onCreateTexture(dimensions, |
134 | format, |
135 | renderable, |
136 | renderTargetSampleCnt, |
137 | budgeted, |
138 | isProtected, |
139 | mipLevelCount, |
140 | levelClearMask); |
141 | if (tex) { |
142 | SkASSERT(tex->backendFormat() == format); |
143 | SkASSERT(GrRenderable::kNo == renderable || tex->asRenderTarget()); |
144 | if (!this->caps()->reuseScratchTextures() && renderable == GrRenderable::kNo) { |
145 | tex->resourcePriv().removeScratchKey(); |
146 | } |
147 | fStats.incTextureCreates(); |
148 | if (renderTargetSampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) { |
149 | SkASSERT(GrRenderable::kYes == renderable); |
150 | tex->asRenderTarget()->setRequiresManualMSAAResolve(); |
151 | } |
152 | } |
153 | return tex; |
154 | } |
155 | |
156 | sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions, |
157 | const GrBackendFormat& format, |
158 | GrRenderable renderable, |
159 | int renderTargetSampleCnt, |
160 | GrMipMapped mipMapped, |
161 | SkBudgeted budgeted, |
162 | GrProtected isProtected) { |
163 | int mipLevelCount = 1; |
164 | if (mipMapped == GrMipMapped::kYes) { |
165 | mipLevelCount = |
166 | 32 - SkCLZ(static_cast<uint32_t>(std::max(dimensions.fWidth, dimensions.fHeight))); |
167 | } |
168 | uint32_t levelClearMask = |
169 | this->caps()->shouldInitializeTextures() ? (1 << mipLevelCount) - 1 : 0; |
170 | auto tex = this->createTextureCommon(dimensions, format, renderable, renderTargetSampleCnt, |
171 | budgeted, isProtected, mipLevelCount, levelClearMask); |
172 | if (tex && mipMapped == GrMipMapped::kYes && levelClearMask) { |
173 | tex->texturePriv().markMipMapsClean(); |
174 | } |
175 | return tex; |
176 | } |
177 | |
178 | sk_sp<GrTexture> GrGpu::createTexture(SkISize dimensions, |
179 | const GrBackendFormat& format, |
180 | GrRenderable renderable, |
181 | int renderTargetSampleCnt, |
182 | SkBudgeted budgeted, |
183 | GrProtected isProtected, |
184 | GrColorType textureColorType, |
185 | GrColorType srcColorType, |
186 | const GrMipLevel texels[], |
187 | int texelLevelCount) { |
188 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
189 | if (texelLevelCount) { |
190 | if (!validate_texel_levels(dimensions, srcColorType, texels, texelLevelCount, |
191 | this->caps())) { |
192 | return nullptr; |
193 | } |
194 | } |
195 | |
196 | int mipLevelCount = std::max(1, texelLevelCount); |
197 | uint32_t levelClearMask = 0; |
198 | if (this->caps()->shouldInitializeTextures()) { |
199 | if (texelLevelCount) { |
200 | for (int i = 0; i < mipLevelCount; ++i) { |
201 | if (!texels->fPixels) { |
202 | levelClearMask |= static_cast<uint32_t>(1 << i); |
203 | } |
204 | } |
205 | } else { |
206 | levelClearMask = static_cast<uint32_t>((1 << mipLevelCount) - 1); |
207 | } |
208 | } |
209 | |
210 | auto tex = this->createTextureCommon(dimensions, format, renderable, renderTargetSampleCnt, |
211 | budgeted, isProtected, texelLevelCount, levelClearMask); |
212 | if (tex) { |
213 | bool markMipLevelsClean = false; |
214 | // Currently if level 0 does not have pixels then no other level may, as enforced by |
215 | // validate_texel_levels. |
216 | if (texelLevelCount && texels[0].fPixels) { |
217 | if (!this->writePixels(tex.get(), 0, 0, dimensions.fWidth, dimensions.fHeight, |
218 | textureColorType, srcColorType, texels, texelLevelCount)) { |
219 | return nullptr; |
220 | } |
221 | // Currently if level[1] of mip map has pixel data then so must all other levels. |
222 | // as enforced by validate_texel_levels. |
223 | markMipLevelsClean = (texelLevelCount > 1 && !levelClearMask && texels[1].fPixels); |
224 | fStats.incTextureUploads(); |
225 | } else if (levelClearMask && mipLevelCount > 1) { |
226 | markMipLevelsClean = true; |
227 | } |
228 | if (markMipLevelsClean) { |
229 | tex->texturePriv().markMipMapsClean(); |
230 | } |
231 | } |
232 | return tex; |
233 | } |
234 | |
235 | sk_sp<GrTexture> GrGpu::createCompressedTexture(SkISize dimensions, |
236 | const GrBackendFormat& format, |
237 | SkBudgeted budgeted, |
238 | GrMipMapped mipMapped, |
239 | GrProtected isProtected, |
240 | const void* data, |
241 | size_t dataSize) { |
242 | this->handleDirtyContext(); |
243 | if (dimensions.width() < 1 || dimensions.width() > this->caps()->maxTextureSize() || |
244 | dimensions.height() < 1 || dimensions.height() > this->caps()->maxTextureSize()) { |
245 | return nullptr; |
246 | } |
247 | // Note if we relax the requirement that data must be provided then we must check |
248 | // caps()->shouldInitializeTextures() here. |
249 | if (!data) { |
250 | return nullptr; |
251 | } |
252 | if (!this->caps()->isFormatTexturable(format)) { |
253 | return nullptr; |
254 | } |
255 | |
256 | // TODO: expand CompressedDataIsCorrect to work here too |
257 | SkImage::CompressionType compressionType = this->caps()->compressionType(format); |
258 | |
259 | if (dataSize < SkCompressedDataSize(compressionType, dimensions, nullptr, |
260 | mipMapped == GrMipMapped::kYes)) { |
261 | return nullptr; |
262 | } |
263 | return this->onCreateCompressedTexture(dimensions, format, budgeted, mipMapped, isProtected, |
264 | data, dataSize); |
265 | } |
266 | |
267 | sk_sp<GrTexture> GrGpu::wrapBackendTexture(const GrBackendTexture& backendTex, |
268 | GrWrapOwnership ownership, |
269 | GrWrapCacheable cacheable, |
270 | GrIOType ioType) { |
271 | SkASSERT(ioType != kWrite_GrIOType); |
272 | this->handleDirtyContext(); |
273 | |
274 | const GrCaps* caps = this->caps(); |
275 | SkASSERT(caps); |
276 | |
277 | if (!caps->isFormatTexturable(backendTex.getBackendFormat())) { |
278 | return nullptr; |
279 | } |
280 | if (backendTex.width() > caps->maxTextureSize() || |
281 | backendTex.height() > caps->maxTextureSize()) { |
282 | return nullptr; |
283 | } |
284 | |
285 | return this->onWrapBackendTexture(backendTex, ownership, cacheable, ioType); |
286 | } |
287 | |
288 | sk_sp<GrTexture> GrGpu::wrapCompressedBackendTexture(const GrBackendTexture& backendTex, |
289 | GrWrapOwnership ownership, |
290 | GrWrapCacheable cacheable) { |
291 | this->handleDirtyContext(); |
292 | |
293 | const GrCaps* caps = this->caps(); |
294 | SkASSERT(caps); |
295 | |
296 | if (!caps->isFormatTexturable(backendTex.getBackendFormat())) { |
297 | return nullptr; |
298 | } |
299 | if (backendTex.width() > caps->maxTextureSize() || |
300 | backendTex.height() > caps->maxTextureSize()) { |
301 | return nullptr; |
302 | } |
303 | |
304 | return this->onWrapCompressedBackendTexture(backendTex, ownership, cacheable); |
305 | } |
306 | |
307 | sk_sp<GrTexture> GrGpu::wrapRenderableBackendTexture(const GrBackendTexture& backendTex, |
308 | int sampleCnt, |
309 | GrWrapOwnership ownership, |
310 | GrWrapCacheable cacheable) { |
311 | this->handleDirtyContext(); |
312 | if (sampleCnt < 1) { |
313 | return nullptr; |
314 | } |
315 | |
316 | const GrCaps* caps = this->caps(); |
317 | |
318 | if (!caps->isFormatTexturable(backendTex.getBackendFormat()) || |
319 | !caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) { |
320 | return nullptr; |
321 | } |
322 | |
323 | if (backendTex.width() > caps->maxRenderTargetSize() || |
324 | backendTex.height() > caps->maxRenderTargetSize()) { |
325 | return nullptr; |
326 | } |
327 | sk_sp<GrTexture> tex = |
328 | this->onWrapRenderableBackendTexture(backendTex, sampleCnt, ownership, cacheable); |
329 | SkASSERT(!tex || tex->asRenderTarget()); |
330 | if (tex && sampleCnt > 1 && !caps->msaaResolvesAutomatically()) { |
331 | tex->asRenderTarget()->setRequiresManualMSAAResolve(); |
332 | } |
333 | return tex; |
334 | } |
335 | |
336 | sk_sp<GrRenderTarget> GrGpu::wrapBackendRenderTarget(const GrBackendRenderTarget& backendRT) { |
337 | this->handleDirtyContext(); |
338 | |
339 | const GrCaps* caps = this->caps(); |
340 | |
341 | if (!caps->isFormatRenderable(backendRT.getBackendFormat(), backendRT.sampleCnt())) { |
342 | return nullptr; |
343 | } |
344 | |
345 | sk_sp<GrRenderTarget> rt = this->onWrapBackendRenderTarget(backendRT); |
346 | if (backendRT.isFramebufferOnly()) { |
347 | rt->setFramebufferOnly(); |
348 | } |
349 | return rt; |
350 | } |
351 | |
352 | sk_sp<GrRenderTarget> GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTexture& backendTex, |
353 | int sampleCnt) { |
354 | this->handleDirtyContext(); |
355 | |
356 | const GrCaps* caps = this->caps(); |
357 | |
358 | int maxSize = caps->maxTextureSize(); |
359 | if (backendTex.width() > maxSize || backendTex.height() > maxSize) { |
360 | return nullptr; |
361 | } |
362 | |
363 | if (!caps->isFormatRenderable(backendTex.getBackendFormat(), sampleCnt)) { |
364 | return nullptr; |
365 | } |
366 | |
367 | auto rt = this->onWrapBackendTextureAsRenderTarget(backendTex, sampleCnt); |
368 | if (rt && sampleCnt > 1 && !this->caps()->msaaResolvesAutomatically()) { |
369 | rt->setRequiresManualMSAAResolve(); |
370 | } |
371 | return rt; |
372 | } |
373 | |
374 | sk_sp<GrRenderTarget> GrGpu::wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo, |
375 | const GrVkDrawableInfo& vkInfo) { |
376 | return this->onWrapVulkanSecondaryCBAsRenderTarget(imageInfo, vkInfo); |
377 | } |
378 | |
379 | sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo& imageInfo, |
380 | const GrVkDrawableInfo& vkInfo) { |
381 | // This is only supported on Vulkan so we default to returning nullptr here |
382 | return nullptr; |
383 | } |
384 | |
385 | sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType, |
386 | GrAccessPattern accessPattern, const void* data) { |
387 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
388 | this->handleDirtyContext(); |
389 | sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data); |
390 | if (!this->caps()->reuseScratchBuffers()) { |
391 | buffer->resourcePriv().removeScratchKey(); |
392 | } |
393 | return buffer; |
394 | } |
395 | |
396 | bool GrGpu::copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
397 | const SkIPoint& dstPoint) { |
398 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
399 | SkASSERT(dst && src); |
400 | SkASSERT(!src->framebufferOnly()); |
401 | |
402 | if (dst->readOnly()) { |
403 | return false; |
404 | } |
405 | |
406 | this->handleDirtyContext(); |
407 | |
408 | return this->onCopySurface(dst, src, srcRect, dstPoint); |
409 | } |
410 | |
411 | bool GrGpu::readPixels(GrSurface* surface, int left, int top, int width, int height, |
412 | GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, |
413 | size_t rowBytes) { |
414 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
415 | SkASSERT(surface); |
416 | SkASSERT(!surface->framebufferOnly()); |
417 | SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat())); |
418 | |
419 | auto subRect = SkIRect::MakeXYWH(left, top, width, height); |
420 | auto bounds = SkIRect::MakeWH(surface->width(), surface->height()); |
421 | if (!bounds.contains(subRect)) { |
422 | return false; |
423 | } |
424 | |
425 | size_t minRowBytes = SkToSizeT(GrColorTypeBytesPerPixel(dstColorType) * width); |
426 | if (!this->caps()->readPixelsRowBytesSupport()) { |
427 | if (rowBytes != minRowBytes) { |
428 | return false; |
429 | } |
430 | } else { |
431 | if (rowBytes < minRowBytes) { |
432 | return false; |
433 | } |
434 | if (rowBytes % GrColorTypeBytesPerPixel(dstColorType)) { |
435 | return false; |
436 | } |
437 | } |
438 | |
439 | this->handleDirtyContext(); |
440 | |
441 | return this->onReadPixels(surface, left, top, width, height, surfaceColorType, dstColorType, |
442 | buffer, rowBytes); |
443 | } |
444 | |
445 | bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int height, |
446 | GrColorType surfaceColorType, GrColorType srcColorType, |
447 | const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling) { |
448 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
449 | ATRACE_ANDROID_FRAMEWORK_ALWAYS("texture_upload" ); |
450 | SkASSERT(surface); |
451 | SkASSERT(!surface->framebufferOnly()); |
452 | |
453 | if (surface->readOnly()) { |
454 | return false; |
455 | } |
456 | |
457 | if (mipLevelCount == 0) { |
458 | return false; |
459 | } else if (mipLevelCount == 1) { |
460 | // We require that if we are not mipped, then the write region is contained in the surface |
461 | auto subRect = SkIRect::MakeXYWH(left, top, width, height); |
462 | auto bounds = SkIRect::MakeWH(surface->width(), surface->height()); |
463 | if (!bounds.contains(subRect)) { |
464 | return false; |
465 | } |
466 | } else if (0 != left || 0 != top || width != surface->width() || height != surface->height()) { |
467 | // We require that if the texels are mipped, than the write region is the entire surface |
468 | return false; |
469 | } |
470 | |
471 | if (!validate_texel_levels({width, height}, srcColorType, texels, mipLevelCount, |
472 | this->caps())) { |
473 | return false; |
474 | } |
475 | |
476 | this->handleDirtyContext(); |
477 | if (this->onWritePixels(surface, left, top, width, height, surfaceColorType, srcColorType, |
478 | texels, mipLevelCount, prepForTexSampling)) { |
479 | SkIRect rect = SkIRect::MakeXYWH(left, top, width, height); |
480 | this->didWriteToSurface(surface, kTopLeft_GrSurfaceOrigin, &rect, mipLevelCount); |
481 | fStats.incTextureUploads(); |
482 | return true; |
483 | } |
484 | return false; |
485 | } |
486 | |
487 | bool GrGpu::transferPixelsTo(GrTexture* texture, int left, int top, int width, int height, |
488 | GrColorType textureColorType, GrColorType bufferColorType, |
489 | GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) { |
490 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
491 | SkASSERT(texture); |
492 | SkASSERT(transferBuffer); |
493 | |
494 | if (texture->readOnly()) { |
495 | return false; |
496 | } |
497 | |
498 | // We require that the write region is contained in the texture |
499 | SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
500 | SkIRect bounds = SkIRect::MakeWH(texture->width(), texture->height()); |
501 | if (!bounds.contains(subRect)) { |
502 | return false; |
503 | } |
504 | |
505 | size_t bpp = GrColorTypeBytesPerPixel(bufferColorType); |
506 | if (this->caps()->writePixelsRowBytesSupport()) { |
507 | if (rowBytes < SkToSizeT(bpp * width)) { |
508 | return false; |
509 | } |
510 | if (rowBytes % bpp) { |
511 | return false; |
512 | } |
513 | } else { |
514 | if (rowBytes != SkToSizeT(bpp * width)) { |
515 | return false; |
516 | } |
517 | } |
518 | |
519 | this->handleDirtyContext(); |
520 | if (this->onTransferPixelsTo(texture, left, top, width, height, textureColorType, |
521 | bufferColorType, transferBuffer, offset, rowBytes)) { |
522 | SkIRect rect = SkIRect::MakeXYWH(left, top, width, height); |
523 | this->didWriteToSurface(texture, kTopLeft_GrSurfaceOrigin, &rect); |
524 | fStats.incTransfersToTexture(); |
525 | |
526 | return true; |
527 | } |
528 | return false; |
529 | } |
530 | |
531 | bool GrGpu::transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, |
532 | GrColorType surfaceColorType, GrColorType bufferColorType, |
533 | GrGpuBuffer* transferBuffer, size_t offset) { |
534 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
535 | SkASSERT(surface); |
536 | SkASSERT(transferBuffer); |
537 | SkASSERT(this->caps()->isFormatTexturable(surface->backendFormat())); |
538 | |
539 | #ifdef SK_DEBUG |
540 | auto supportedRead = this->caps()->supportedReadPixelsColorType( |
541 | surfaceColorType, surface->backendFormat(), bufferColorType); |
542 | SkASSERT(supportedRead.fOffsetAlignmentForTransferBuffer); |
543 | SkASSERT(offset % supportedRead.fOffsetAlignmentForTransferBuffer == 0); |
544 | #endif |
545 | |
546 | // We require that the write region is contained in the texture |
547 | SkIRect subRect = SkIRect::MakeXYWH(left, top, width, height); |
548 | SkIRect bounds = SkIRect::MakeWH(surface->width(), surface->height()); |
549 | if (!bounds.contains(subRect)) { |
550 | return false; |
551 | } |
552 | |
553 | this->handleDirtyContext(); |
554 | if (this->onTransferPixelsFrom(surface, left, top, width, height, surfaceColorType, |
555 | bufferColorType, transferBuffer, offset)) { |
556 | fStats.incTransfersFromSurface(); |
557 | return true; |
558 | } |
559 | return false; |
560 | } |
561 | |
562 | bool GrGpu::regenerateMipMapLevels(GrTexture* texture) { |
563 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
564 | SkASSERT(texture); |
565 | SkASSERT(this->caps()->mipMapSupport()); |
566 | SkASSERT(texture->texturePriv().mipMapped() == GrMipMapped::kYes); |
567 | if (!texture->texturePriv().mipMapsAreDirty()) { |
568 | // This can happen when the proxy expects mipmaps to be dirty, but they are not dirty on the |
569 | // actual target. This may be caused by things that the drawingManager could not predict, |
570 | // i.e., ops that don't draw anything, aborting a draw for exceptional circumstances, etc. |
571 | // NOTE: This goes away once we quit tracking mipmap state on the actual texture. |
572 | return true; |
573 | } |
574 | if (texture->readOnly()) { |
575 | return false; |
576 | } |
577 | if (this->onRegenerateMipMapLevels(texture)) { |
578 | texture->texturePriv().markMipMapsClean(); |
579 | return true; |
580 | } |
581 | return false; |
582 | } |
583 | |
584 | void GrGpu::resetTextureBindings() { |
585 | this->handleDirtyContext(); |
586 | this->onResetTextureBindings(); |
587 | } |
588 | |
589 | void GrGpu::resolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect, |
590 | ForExternalIO forExternalIO) { |
591 | SkASSERT(target); |
592 | this->handleDirtyContext(); |
593 | this->onResolveRenderTarget(target, resolveRect, forExternalIO); |
594 | } |
595 | |
596 | void GrGpu::didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds, |
597 | uint32_t mipLevels) const { |
598 | SkASSERT(surface); |
599 | SkASSERT(!surface->readOnly()); |
600 | // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds. |
601 | if (nullptr == bounds || !bounds->isEmpty()) { |
602 | GrTexture* texture = surface->asTexture(); |
603 | if (texture && 1 == mipLevels) { |
604 | texture->texturePriv().markMipMapsDirty(); |
605 | } |
606 | } |
607 | } |
608 | |
609 | int GrGpu::findOrAssignSamplePatternKey(GrRenderTarget* renderTarget) { |
610 | SkASSERT(this->caps()->sampleLocationsSupport()); |
611 | SkASSERT(renderTarget->numSamples() > 1 || |
612 | (renderTarget->renderTargetPriv().getStencilAttachment() && |
613 | renderTarget->renderTargetPriv().getStencilAttachment()->numSamples() > 1)); |
614 | |
615 | SkSTArray<16, SkPoint> sampleLocations; |
616 | this->querySampleLocations(renderTarget, &sampleLocations); |
617 | return fSamplePatternDictionary.findOrAssignSamplePatternKey(sampleLocations); |
618 | } |
619 | |
620 | #ifdef SK_DEBUG |
621 | bool GrGpu::inStagingBuffers(GrStagingBuffer* b) const { |
622 | for (const auto& i : fStagingBuffers) { |
623 | if (b == i.get()) { |
624 | return true; |
625 | } |
626 | } |
627 | return false; |
628 | } |
629 | |
630 | void GrGpu::validateStagingBuffers() const { |
631 | for (const auto& i : fStagingBuffers) { |
632 | GrStagingBuffer* buffer = i.get(); |
633 | SkASSERT(fAvailableStagingBuffers.isInList(buffer) || |
634 | fActiveStagingBuffers.isInList(buffer) || |
635 | fBusyStagingBuffers.isInList(buffer)); |
636 | } |
637 | for (auto b : fAvailableStagingBuffers) { |
638 | SkASSERT(this->inStagingBuffers(b)); |
639 | } |
640 | for (auto b : fActiveStagingBuffers) { |
641 | SkASSERT(this->inStagingBuffers(b)); |
642 | } |
643 | for (auto b : fBusyStagingBuffers) { |
644 | SkASSERT(this->inStagingBuffers(b)); |
645 | } |
646 | } |
647 | #endif |
648 | |
649 | void GrGpu::executeFlushInfo(GrSurfaceProxy* proxies[], |
650 | int numProxies, |
651 | SkSurface::BackendSurfaceAccess access, |
652 | const GrFlushInfo& info, |
653 | const GrPrepareForExternalIORequests& externalRequests) { |
654 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
655 | |
656 | GrResourceProvider* resourceProvider = fContext->priv().resourceProvider(); |
657 | |
658 | std::unique_ptr<std::unique_ptr<GrSemaphore>[]> semaphores( |
659 | new std::unique_ptr<GrSemaphore>[info.fNumSemaphores]); |
660 | if (this->caps()->semaphoreSupport() && info.fNumSemaphores) { |
661 | for (int i = 0; i < info.fNumSemaphores; ++i) { |
662 | if (info.fSignalSemaphores[i].isInitialized()) { |
663 | semaphores[i] = resourceProvider->wrapBackendSemaphore( |
664 | info.fSignalSemaphores[i], |
665 | GrResourceProvider::SemaphoreWrapType::kWillSignal, |
666 | kBorrow_GrWrapOwnership); |
667 | this->insertSemaphore(semaphores[i].get()); |
668 | } else { |
669 | semaphores[i] = resourceProvider->makeSemaphore(false); |
670 | if (semaphores[i]) { |
671 | this->insertSemaphore(semaphores[i].get()); |
672 | info.fSignalSemaphores[i] = semaphores[i]->backendSemaphore(); |
673 | } |
674 | } |
675 | } |
676 | } |
677 | |
678 | if (info.fFinishedProc) { |
679 | this->addFinishedProc(info.fFinishedProc, info.fFinishedContext); |
680 | } |
681 | this->prepareSurfacesForBackendAccessAndExternalIO(proxies, numProxies, access, |
682 | externalRequests); |
683 | } |
684 | |
685 | bool GrGpu::submitToGpu(bool syncCpu) { |
686 | this->stats()->incNumSubmitToGpus(); |
687 | |
688 | #ifdef SK_DEBUG |
689 | this->validateStagingBuffers(); |
690 | #endif |
691 | this->unmapStagingBuffers(); |
692 | |
693 | bool submitted = this->onSubmitToGpu(syncCpu); |
694 | |
695 | // Move all active staging buffers to the busy list. |
696 | // TODO: this should probably be handled inside of the onSubmitToGpu by the backends. |
697 | while (GrStagingBuffer* buffer = fActiveStagingBuffers.head()) { |
698 | fActiveStagingBuffers.remove(buffer); |
699 | fBusyStagingBuffers.addToTail(buffer); |
700 | } |
701 | return submitted; |
702 | } |
703 | |
704 | #ifdef SK_ENABLE_DUMP_GPU |
705 | void GrGpu::dumpJSON(SkJSONWriter* writer) const { |
706 | writer->beginObject(); |
707 | |
708 | // TODO: Is there anything useful in the base class to dump here? |
709 | |
710 | this->onDumpJSON(writer); |
711 | |
712 | writer->endObject(); |
713 | } |
714 | #else |
715 | void GrGpu::dumpJSON(SkJSONWriter* writer) const { } |
716 | #endif |
717 | |
718 | #if GR_TEST_UTILS |
719 | |
720 | #if GR_GPU_STATS |
721 | static const char* cache_result_to_str(int i) { |
722 | const char* kCacheResultStrings[GrGpu::Stats::kNumProgramCacheResults] = { |
723 | "hits" , |
724 | "misses" , |
725 | "partials" |
726 | }; |
727 | static_assert(0 == (int) GrGpu::Stats::ProgramCacheResult::kHit); |
728 | static_assert(1 == (int) GrGpu::Stats::ProgramCacheResult::kMiss); |
729 | static_assert(2 == (int) GrGpu::Stats::ProgramCacheResult::kPartial); |
730 | static_assert(GrGpu::Stats::kNumProgramCacheResults == 3); |
731 | return kCacheResultStrings[i]; |
732 | } |
733 | |
734 | void GrGpu::Stats::dump(SkString* out) { |
735 | out->appendf("Render Target Binds: %d\n" , fRenderTargetBinds); |
736 | out->appendf("Shader Compilations: %d\n" , fShaderCompilations); |
737 | out->appendf("Textures Created: %d\n" , fTextureCreates); |
738 | out->appendf("Texture Uploads: %d\n" , fTextureUploads); |
739 | out->appendf("Transfers to Texture: %d\n" , fTransfersToTexture); |
740 | out->appendf("Transfers from Surface: %d\n" , fTransfersFromSurface); |
741 | out->appendf("Stencil Buffer Creates: %d\n" , fStencilAttachmentCreates); |
742 | out->appendf("Number of draws: %d\n" , fNumDraws); |
743 | out->appendf("Number of Scratch Textures reused %d\n" , fNumScratchTexturesReused); |
744 | |
745 | SkASSERT(fNumInlineCompilationFailures == 0); |
746 | out->appendf("Number of Inline compile failures %d\n" , fNumInlineCompilationFailures); |
747 | for (int i = 0; i < Stats::kNumProgramCacheResults-1; ++i) { |
748 | out->appendf("Inline Program Cache %s %d\n" , cache_result_to_str(i), |
749 | fInlineProgramCacheStats[i]); |
750 | } |
751 | |
752 | SkASSERT(fNumPreCompilationFailures == 0); |
753 | out->appendf("Number of precompile failures %d\n" , fNumPreCompilationFailures); |
754 | for (int i = 0; i < Stats::kNumProgramCacheResults-1; ++i) { |
755 | out->appendf("Precompile Program Cache %s %d\n" , cache_result_to_str(i), |
756 | fPreProgramCacheStats[i]); |
757 | } |
758 | |
759 | SkASSERT(fNumCompilationFailures == 0); |
760 | out->appendf("Total number of compilation failures %d\n" , fNumCompilationFailures); |
761 | out->appendf("Total number of partial compilation successes %d\n" , |
762 | fNumPartialCompilationSuccesses); |
763 | out->appendf("Total number of compilation successes %d\n" , fNumCompilationSuccesses); |
764 | |
765 | // enable this block to output CSV-style stats for program pre-compilation |
766 | #if 0 |
767 | SkASSERT(fNumInlineCompilationFailures == 0); |
768 | SkASSERT(fNumPreCompilationFailures == 0); |
769 | SkASSERT(fNumCompilationFailures == 0); |
770 | SkASSERT(fNumPartialCompilationSuccesses == 0); |
771 | |
772 | SkDebugf("%d, %d, %d, %d, %d\n" , |
773 | fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kHit], |
774 | fInlineProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss], |
775 | fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kHit], |
776 | fPreProgramCacheStats[(int) Stats::ProgramCacheResult::kMiss], |
777 | fNumCompilationSuccesses); |
778 | #endif |
779 | } |
780 | |
781 | void GrGpu::Stats::dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) { |
782 | keys->push_back(SkString("render_target_binds" )); values->push_back(fRenderTargetBinds); |
783 | keys->push_back(SkString("shader_compilations" )); values->push_back(fShaderCompilations); |
784 | } |
785 | |
786 | #endif // GR_GPU_STATS |
787 | #endif // GR_TEST_UTILS |
788 | |
789 | bool GrGpu::MipMapsAreCorrect(SkISize dimensions, |
790 | GrMipMapped mipMapped, |
791 | const BackendTextureData* data) { |
792 | int numMipLevels = 1; |
793 | if (mipMapped == GrMipMapped::kYes) { |
794 | numMipLevels = SkMipMap::ComputeLevelCount(dimensions.width(), dimensions.height()) + 1; |
795 | } |
796 | |
797 | if (!data || data->type() == BackendTextureData::Type::kColor) { |
798 | return true; |
799 | } |
800 | |
801 | if (data->type() == BackendTextureData::Type::kCompressed) { |
802 | return false; // This should be going through CompressedDataIsCorrect |
803 | } |
804 | |
805 | SkASSERT(data->type() == BackendTextureData::Type::kPixmaps); |
806 | |
807 | if (data->pixmap(0).dimensions() != dimensions) { |
808 | return false; |
809 | } |
810 | |
811 | SkColorType colorType = data->pixmap(0).colorType(); |
812 | for (int i = 1; i < numMipLevels; ++i) { |
813 | dimensions = {std::max(1, dimensions.width()/2), std::max(1, dimensions.height()/2)}; |
814 | if (dimensions != data->pixmap(i).dimensions()) { |
815 | return false; |
816 | } |
817 | if (colorType != data->pixmap(i).colorType()) { |
818 | return false; |
819 | } |
820 | } |
821 | return true; |
822 | } |
823 | |
824 | bool GrGpu::CompressedDataIsCorrect(SkISize dimensions, SkImage::CompressionType compressionType, |
825 | GrMipMapped mipMapped, const BackendTextureData* data) { |
826 | |
827 | if (!data || data->type() == BackendTextureData::Type::kColor) { |
828 | return true; |
829 | } |
830 | |
831 | if (data->type() == BackendTextureData::Type::kPixmaps) { |
832 | return false; |
833 | } |
834 | |
835 | SkASSERT(data->type() == BackendTextureData::Type::kCompressed); |
836 | |
837 | size_t computedSize = SkCompressedDataSize(compressionType, dimensions, |
838 | nullptr, mipMapped == GrMipMapped::kYes); |
839 | |
840 | return computedSize == data->compressedSize(); |
841 | } |
842 | |
843 | GrBackendTexture GrGpu::createBackendTexture(SkISize dimensions, |
844 | const GrBackendFormat& format, |
845 | GrRenderable renderable, |
846 | GrMipMapped mipMapped, |
847 | GrProtected isProtected, |
848 | const BackendTextureData* data) { |
849 | const GrCaps* caps = this->caps(); |
850 | |
851 | if (!format.isValid()) { |
852 | return {}; |
853 | } |
854 | |
855 | if (caps->isFormatCompressed(format)) { |
856 | // Compressed formats must go through the createCompressedBackendTexture API |
857 | return {}; |
858 | } |
859 | |
860 | if (data && data->type() == BackendTextureData::Type::kPixmaps) { |
861 | auto ct = SkColorTypeToGrColorType(data->pixmap(0).colorType()); |
862 | if (!caps->areColorTypeAndFormatCompatible(ct, format)) { |
863 | return {}; |
864 | } |
865 | } |
866 | |
867 | if (dimensions.isEmpty() || dimensions.width() > caps->maxTextureSize() || |
868 | dimensions.height() > caps->maxTextureSize()) { |
869 | return {}; |
870 | } |
871 | |
872 | if (mipMapped == GrMipMapped::kYes && !this->caps()->mipMapSupport()) { |
873 | return {}; |
874 | } |
875 | |
876 | if (!MipMapsAreCorrect(dimensions, mipMapped, data)) { |
877 | return {}; |
878 | } |
879 | |
880 | return this->onCreateBackendTexture(dimensions, format, renderable, mipMapped, |
881 | isProtected, data); |
882 | } |
883 | |
884 | GrBackendTexture GrGpu::createCompressedBackendTexture(SkISize dimensions, |
885 | const GrBackendFormat& format, |
886 | GrMipMapped mipMapped, |
887 | GrProtected isProtected, |
888 | const BackendTextureData* data) { |
889 | const GrCaps* caps = this->caps(); |
890 | |
891 | if (!format.isValid()) { |
892 | return {}; |
893 | } |
894 | |
895 | SkImage::CompressionType compressionType = caps->compressionType(format); |
896 | if (compressionType == SkImage::CompressionType::kNone) { |
897 | // Uncompressed formats must go through the createBackendTexture API |
898 | return {}; |
899 | } |
900 | |
901 | if (dimensions.isEmpty() || |
902 | dimensions.width() > caps->maxTextureSize() || |
903 | dimensions.height() > caps->maxTextureSize()) { |
904 | return {}; |
905 | } |
906 | |
907 | if (mipMapped == GrMipMapped::kYes && !this->caps()->mipMapSupport()) { |
908 | return {}; |
909 | } |
910 | |
911 | if (!CompressedDataIsCorrect(dimensions, compressionType, mipMapped, data)) { |
912 | return {}; |
913 | } |
914 | |
915 | return this->onCreateCompressedBackendTexture(dimensions, format, mipMapped, |
916 | isProtected, data); |
917 | } |
918 | |
919 | GrStagingBuffer* GrGpu::findStagingBuffer(size_t size) { |
920 | #ifdef SK_DEBUG |
921 | this->validateStagingBuffers(); |
922 | #endif |
923 | for (auto b : fActiveStagingBuffers) { |
924 | if (b->remaining() >= size) { |
925 | return b; |
926 | } |
927 | } |
928 | for (auto b : fAvailableStagingBuffers) { |
929 | if (b->remaining() >= size) { |
930 | fAvailableStagingBuffers.remove(b); |
931 | fActiveStagingBuffers.addToTail(b); |
932 | return b; |
933 | } |
934 | } |
935 | size = SkNextPow2(size); |
936 | size = std::max(size, kMinStagingBufferSize); |
937 | std::unique_ptr<GrStagingBuffer> b = this->createStagingBuffer(size); |
938 | GrStagingBuffer* stagingBuffer = b.get(); |
939 | fStagingBuffers.push_back(std::move(b)); |
940 | fActiveStagingBuffers.addToTail(stagingBuffer); |
941 | return stagingBuffer; |
942 | } |
943 | |
944 | GrStagingBuffer::Slice GrGpu::allocateStagingBufferSlice(size_t size) { |
945 | #ifdef SK_DEBUG |
946 | this->validateStagingBuffers(); |
947 | #endif |
948 | GrStagingBuffer* stagingBuffer = this->findStagingBuffer(size); |
949 | return stagingBuffer->allocate(size); |
950 | } |
951 | |
952 | void GrGpu::unmapStagingBuffers() { |
953 | #ifdef SK_DEBUG |
954 | this->validateStagingBuffers(); |
955 | #endif |
956 | // Unmap all active buffers. |
957 | for (auto buffer : fActiveStagingBuffers) { |
958 | buffer->unmap(); |
959 | } |
960 | } |
961 | |
962 | void GrGpu::markStagingBufferAvailable(GrStagingBuffer* buffer) { |
963 | #ifdef SK_DEBUG |
964 | this->validateStagingBuffers(); |
965 | #endif |
966 | fBusyStagingBuffers.remove(buffer); |
967 | fAvailableStagingBuffers.addToTail(buffer); |
968 | } |
969 | |