1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrResourceProvider.h"
9
10#include "include/gpu/GrBackendSemaphore.h"
11#include "include/gpu/GrContext.h"
12#include "include/private/GrResourceKey.h"
13#include "include/private/GrSingleOwner.h"
14#include "src/core/SkConvertPixels.h"
15#include "src/core/SkMathPriv.h"
16#include "src/gpu/GrCaps.h"
17#include "src/gpu/GrContextPriv.h"
18#include "src/gpu/GrDataUtils.h"
19#include "src/gpu/GrGpu.h"
20#include "src/gpu/GrGpuBuffer.h"
21#include "src/gpu/GrImageInfo.h"
22#include "src/gpu/GrPath.h"
23#include "src/gpu/GrPathRendering.h"
24#include "src/gpu/GrProxyProvider.h"
25#include "src/gpu/GrRenderTargetPriv.h"
26#include "src/gpu/GrResourceCache.h"
27#include "src/gpu/GrSemaphore.h"
28#include "src/gpu/GrStencilAttachment.h"
29#include "src/gpu/GrTexturePriv.h"
30#include "src/gpu/SkGr.h"
31
32const int GrResourceProvider::kMinScratchTextureSize = 16;
33
34#define ASSERT_SINGLE_OWNER \
35 SkDEBUGCODE(GrSingleOwner::AutoEnforce debug_SingleOwner(fSingleOwner);)
36
37GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSingleOwner* owner)
38 : fCache(cache)
39 , fGpu(gpu)
40#ifdef SK_DEBUG
41 , fSingleOwner(owner)
42#endif
43{
44 fCaps = sk_ref_sp(fGpu->caps());
45}
46
47sk_sp<GrTexture> GrResourceProvider::createTexture(SkISize dimensions,
48 const GrBackendFormat& format,
49 GrColorType colorType,
50 GrRenderable renderable,
51 int renderTargetSampleCnt,
52 SkBudgeted budgeted,
53 GrProtected isProtected,
54 const GrMipLevel texels[],
55 int mipLevelCount) {
56 ASSERT_SINGLE_OWNER
57
58 SkASSERT(mipLevelCount > 0);
59
60 if (this->isAbandoned()) {
61 return nullptr;
62 }
63
64 GrMipMapped mipMapped = mipLevelCount > 1 ? GrMipMapped::kYes : GrMipMapped::kNo;
65 if (!fCaps->validateSurfaceParams(dimensions, format, renderable, renderTargetSampleCnt,
66 mipMapped)) {
67 return nullptr;
68 }
69 // Current rule is that you can provide no level data, just the base, or all the levels.
70 bool hasPixels = mipLevelCount && texels[0].fPixels;
71 auto scratch = this->getExactScratch(dimensions, format, renderable, renderTargetSampleCnt,
72 budgeted, mipMapped, isProtected);
73 if (scratch) {
74 if (!hasPixels) {
75 return scratch;
76 }
77 return this->writePixels(std::move(scratch), colorType, dimensions, texels, mipLevelCount);
78 }
79 SkAutoSTMalloc<14, GrMipLevel> tmpTexels;
80 SkAutoSTArray<14, std::unique_ptr<char[]>> tmpDatas;
81 GrColorType tempColorType = GrColorType::kUnknown;
82 if (hasPixels) {
83 tempColorType = this->prepareLevels(format, colorType, dimensions, texels, mipLevelCount,
84 &tmpTexels, &tmpDatas);
85 if (tempColorType == GrColorType::kUnknown) {
86 return nullptr;
87 }
88 }
89 return fGpu->createTexture(dimensions, format, renderable, renderTargetSampleCnt, budgeted,
90 isProtected, colorType, tempColorType, tmpTexels.get(),
91 mipLevelCount);
92}
93
94sk_sp<GrTexture> GrResourceProvider::getExactScratch(SkISize dimensions,
95 const GrBackendFormat& format,
96 GrRenderable renderable,
97 int renderTargetSampleCnt,
98 SkBudgeted budgeted,
99 GrMipMapped mipMapped,
100 GrProtected isProtected) {
101 sk_sp<GrTexture> tex(this->refScratchTexture(dimensions, format, renderable,
102 renderTargetSampleCnt, mipMapped, isProtected));
103 if (tex && SkBudgeted::kNo == budgeted) {
104 tex->resourcePriv().makeUnbudgeted();
105 }
106
107 return tex;
108}
109
110sk_sp<GrTexture> GrResourceProvider::createTexture(SkISize dimensions,
111 const GrBackendFormat& format,
112 GrColorType colorType,
113 GrRenderable renderable,
114 int renderTargetSampleCnt,
115 SkBudgeted budgeted,
116 SkBackingFit fit,
117 GrProtected isProtected,
118 const GrMipLevel& mipLevel) {
119 ASSERT_SINGLE_OWNER
120
121 if (!mipLevel.fPixels) {
122 return nullptr;
123 }
124
125 if (SkBackingFit::kApprox == fit) {
126 if (this->isAbandoned()) {
127 return nullptr;
128 }
129 if (!fCaps->validateSurfaceParams(dimensions, format, renderable, renderTargetSampleCnt,
130 GrMipMapped::kNo)) {
131 return nullptr;
132 }
133
134 auto tex = this->createApproxTexture(dimensions, format, renderable, renderTargetSampleCnt,
135 isProtected);
136 if (!tex) {
137 return nullptr;
138 }
139 return this->writePixels(std::move(tex), colorType, dimensions, &mipLevel, 1);
140 } else {
141 return this->createTexture(dimensions, format, colorType, renderable, renderTargetSampleCnt,
142 budgeted, isProtected, &mipLevel, 1);
143 }
144}
145
146sk_sp<GrTexture> GrResourceProvider::createCompressedTexture(SkISize dimensions,
147 const GrBackendFormat& format,
148 SkBudgeted budgeted,
149 GrMipMapped mipMapped,
150 GrProtected isProtected,
151 SkData* data) {
152 ASSERT_SINGLE_OWNER
153 if (this->isAbandoned()) {
154 return nullptr;
155 }
156 return fGpu->createCompressedTexture(dimensions, format, budgeted, mipMapped,
157 isProtected, data->data(), data->size());
158}
159
160sk_sp<GrTexture> GrResourceProvider::createTexture(SkISize dimensions,
161 const GrBackendFormat& format,
162 GrRenderable renderable,
163 int renderTargetSampleCnt,
164 GrMipMapped mipMapped,
165 SkBudgeted budgeted,
166 GrProtected isProtected) {
167 ASSERT_SINGLE_OWNER
168 if (this->isAbandoned()) {
169 return nullptr;
170 }
171
172 if (!fCaps->validateSurfaceParams(dimensions, format, renderable, renderTargetSampleCnt,
173 mipMapped)) {
174 return nullptr;
175 }
176
177 // Currently we don't recycle compressed textures as scratch. Additionally all compressed
178 // textures should be created through the createCompressedTexture function.
179 SkASSERT(!this->caps()->isFormatCompressed(format));
180
181 // TODO: Support GrMipMapped::kYes in scratch texture lookup here.
182 sk_sp<GrTexture> tex =
183 this->getExactScratch(dimensions, format, renderable, renderTargetSampleCnt, budgeted,
184 mipMapped, isProtected);
185 if (tex) {
186 return tex;
187 }
188
189 return fGpu->createTexture(dimensions, format, renderable, renderTargetSampleCnt, mipMapped,
190 budgeted, isProtected);
191}
192
193// Map 'value' to a larger multiple of 2. Values <= 'kMagicTol' will pop up to
194// the next power of 2. Those above 'kMagicTol' will only go up half the floor power of 2.
195SkISize GrResourceProvider::MakeApprox(SkISize dimensions) {
196 auto adjust = [](int value) {
197 static const int kMagicTol = 1024;
198
199 value = std::max(kMinScratchTextureSize, value);
200
201 if (SkIsPow2(value)) {
202 return value;
203 }
204
205 int ceilPow2 = SkNextPow2(value);
206 if (value <= kMagicTol) {
207 return ceilPow2;
208 }
209
210 int floorPow2 = ceilPow2 >> 1;
211 int mid = floorPow2 + (floorPow2 >> 1);
212
213 if (value <= mid) {
214 return mid;
215 }
216 return ceilPow2;
217 };
218
219 return {adjust(dimensions.width()), adjust(dimensions.height())};
220}
221
222sk_sp<GrTexture> GrResourceProvider::createApproxTexture(SkISize dimensions,
223 const GrBackendFormat& format,
224 GrRenderable renderable,
225 int renderTargetSampleCnt,
226 GrProtected isProtected) {
227 ASSERT_SINGLE_OWNER
228
229 if (this->isAbandoned()) {
230 return nullptr;
231 }
232
233 // Currently we don't recycle compressed textures as scratch. Additionally all compressed
234 // textures should be created through the createCompressedTexture function.
235 SkASSERT(!this->caps()->isFormatCompressed(format));
236
237 if (!fCaps->validateSurfaceParams(dimensions, format, renderable, renderTargetSampleCnt,
238 GrMipMapped::kNo)) {
239 return nullptr;
240 }
241
242 auto copyDimensions = MakeApprox(dimensions);
243
244 if (auto tex = this->refScratchTexture(copyDimensions, format, renderable,
245 renderTargetSampleCnt, GrMipMapped::kNo, isProtected)) {
246 return tex;
247 }
248
249 return fGpu->createTexture(copyDimensions, format, renderable, renderTargetSampleCnt,
250 GrMipMapped::kNo, SkBudgeted::kYes, isProtected);
251}
252
253sk_sp<GrTexture> GrResourceProvider::refScratchTexture(SkISize dimensions,
254 const GrBackendFormat& format,
255 GrRenderable renderable,
256 int renderTargetSampleCnt,
257 GrMipMapped mipMapped,
258 GrProtected isProtected) {
259 ASSERT_SINGLE_OWNER
260 SkASSERT(!this->isAbandoned());
261 SkASSERT(!this->caps()->isFormatCompressed(format));
262 SkASSERT(fCaps->validateSurfaceParams(dimensions, format, renderable, renderTargetSampleCnt,
263 GrMipMapped::kNo));
264
265 // We could make initial clears work with scratch textures but it is a rare case so we just opt
266 // to fall back to making a new texture.
267 if (fGpu->caps()->reuseScratchTextures() || renderable == GrRenderable::kYes) {
268 GrScratchKey key;
269 GrTexturePriv::ComputeScratchKey(*this->caps(), format, dimensions, renderable,
270 renderTargetSampleCnt, mipMapped, isProtected, &key);
271 GrGpuResource* resource = fCache->findAndRefScratchResource(key);
272 if (resource) {
273 fGpu->stats()->incNumScratchTexturesReused();
274 GrSurface* surface = static_cast<GrSurface*>(resource);
275 return sk_sp<GrTexture>(surface->asTexture());
276 }
277 }
278
279 return nullptr;
280}
281
282sk_sp<GrTexture> GrResourceProvider::wrapBackendTexture(const GrBackendTexture& tex,
283 GrWrapOwnership ownership,
284 GrWrapCacheable cacheable,
285 GrIOType ioType) {
286 ASSERT_SINGLE_OWNER
287 if (this->isAbandoned()) {
288 return nullptr;
289 }
290 return fGpu->wrapBackendTexture(tex, ownership, cacheable, ioType);
291}
292
293sk_sp<GrTexture> GrResourceProvider::wrapCompressedBackendTexture(const GrBackendTexture& tex,
294 GrWrapOwnership ownership,
295 GrWrapCacheable cacheable) {
296 ASSERT_SINGLE_OWNER
297 if (this->isAbandoned()) {
298 return nullptr;
299 }
300
301 return fGpu->wrapCompressedBackendTexture(tex, ownership, cacheable);
302}
303
304
305sk_sp<GrTexture> GrResourceProvider::wrapRenderableBackendTexture(const GrBackendTexture& tex,
306 int sampleCnt,
307 GrWrapOwnership ownership,
308 GrWrapCacheable cacheable) {
309 ASSERT_SINGLE_OWNER
310 if (this->isAbandoned()) {
311 return nullptr;
312 }
313 return fGpu->wrapRenderableBackendTexture(tex, sampleCnt, ownership, cacheable);
314}
315
316sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendRenderTarget(
317 const GrBackendRenderTarget& backendRT) {
318 ASSERT_SINGLE_OWNER
319 return this->isAbandoned() ? nullptr : fGpu->wrapBackendRenderTarget(backendRT);
320}
321
322sk_sp<GrRenderTarget> GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget(
323 const SkImageInfo& imageInfo, const GrVkDrawableInfo& vkInfo) {
324 ASSERT_SINGLE_OWNER
325 return this->isAbandoned() ? nullptr : fGpu->wrapVulkanSecondaryCBAsRenderTarget(imageInfo,
326 vkInfo);
327
328}
329
330void GrResourceProvider::assignUniqueKeyToResource(const GrUniqueKey& key,
331 GrGpuResource* resource) {
332 ASSERT_SINGLE_OWNER
333 if (this->isAbandoned() || !resource) {
334 return;
335 }
336 resource->resourcePriv().setUniqueKey(key);
337}
338
339sk_sp<GrGpuResource> GrResourceProvider::findResourceByUniqueKey(const GrUniqueKey& key) {
340 ASSERT_SINGLE_OWNER
341 return this->isAbandoned() ? nullptr
342 : sk_sp<GrGpuResource>(fCache->findAndRefUniqueResource(key));
343}
344
345sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType,
346 size_t size,
347 const void* data,
348 const GrUniqueKey& key) {
349 if (auto buffer = this->findByUniqueKey<GrGpuBuffer>(key)) {
350 return std::move(buffer);
351 }
352 if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, data)) {
353 // We shouldn't bin and/or cache static buffers.
354 SkASSERT(buffer->size() == size);
355 SkASSERT(!buffer->resourcePriv().getScratchKey().isValid());
356 buffer->resourcePriv().setUniqueKey(key);
357 return sk_sp<const GrGpuBuffer>(buffer);
358 }
359 return nullptr;
360}
361
362sk_sp<const GrGpuBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern,
363 int patternSize,
364 int reps,
365 int vertCount,
366 const GrUniqueKey* key) {
367 size_t bufferSize = patternSize * reps * sizeof(uint16_t);
368
369 sk_sp<GrGpuBuffer> buffer(
370 this->createBuffer(bufferSize, GrGpuBufferType::kIndex, kStatic_GrAccessPattern));
371 if (!buffer) {
372 return nullptr;
373 }
374 uint16_t* data = (uint16_t*) buffer->map();
375 SkAutoTArray<uint16_t> temp;
376 if (!data) {
377 temp.reset(reps * patternSize);
378 data = temp.get();
379 }
380 for (int i = 0; i < reps; ++i) {
381 int baseIdx = i * patternSize;
382 uint16_t baseVert = (uint16_t)(i * vertCount);
383 for (int j = 0; j < patternSize; ++j) {
384 data[baseIdx+j] = baseVert + pattern[j];
385 }
386 }
387 if (temp.get()) {
388 if (!buffer->updateData(data, bufferSize)) {
389 return nullptr;
390 }
391 } else {
392 buffer->unmap();
393 }
394 if (key) {
395 SkASSERT(key->isValid());
396 this->assignUniqueKeyToResource(*key, buffer.get());
397 }
398 return std::move(buffer);
399}
400
401///////////////////////////////////////////////////////////////////////////////////////////////////
402static constexpr int kMaxNumNonAAQuads = 1 << 12; // max possible: (1 << 14) - 1;
403static const int kVertsPerNonAAQuad = 4;
404static const int kIndicesPerNonAAQuad = 6;
405
406sk_sp<const GrGpuBuffer> GrResourceProvider::createNonAAQuadIndexBuffer() {
407 static_assert(kVertsPerNonAAQuad * kMaxNumNonAAQuads <= 65535); // indices fit in a uint16_t
408
409 static const uint16_t kNonAAQuadIndexPattern[] = {
410 0, 1, 2, 2, 1, 3
411 };
412
413 static_assert(SK_ARRAY_COUNT(kNonAAQuadIndexPattern) == kIndicesPerNonAAQuad);
414
415 return this->createPatternedIndexBuffer(kNonAAQuadIndexPattern, kIndicesPerNonAAQuad,
416 kMaxNumNonAAQuads, kVertsPerNonAAQuad, nullptr);
417}
418
419int GrResourceProvider::MaxNumNonAAQuads() { return kMaxNumNonAAQuads; }
420int GrResourceProvider::NumVertsPerNonAAQuad() { return kVertsPerNonAAQuad; }
421int GrResourceProvider::NumIndicesPerNonAAQuad() { return kIndicesPerNonAAQuad; }
422
423///////////////////////////////////////////////////////////////////////////////////////////////////
424static constexpr int kMaxNumAAQuads = 1 << 9; // max possible: (1 << 13) - 1;
425static const int kVertsPerAAQuad = 8;
426static const int kIndicesPerAAQuad = 30;
427
428sk_sp<const GrGpuBuffer> GrResourceProvider::createAAQuadIndexBuffer() {
429 static_assert(kVertsPerAAQuad * kMaxNumAAQuads <= 65535); // indices fit in a uint16_t
430
431 // clang-format off
432 static const uint16_t kAAQuadIndexPattern[] = {
433 0, 1, 2, 1, 3, 2,
434 0, 4, 1, 4, 5, 1,
435 0, 6, 4, 0, 2, 6,
436 2, 3, 6, 3, 7, 6,
437 1, 5, 3, 3, 5, 7,
438 };
439 // clang-format on
440
441 static_assert(SK_ARRAY_COUNT(kAAQuadIndexPattern) == kIndicesPerAAQuad);
442
443 return this->createPatternedIndexBuffer(kAAQuadIndexPattern, kIndicesPerAAQuad,
444 kMaxNumAAQuads, kVertsPerAAQuad, nullptr);
445}
446
447int GrResourceProvider::MaxNumAAQuads() { return kMaxNumAAQuads; }
448int GrResourceProvider::NumVertsPerAAQuad() { return kVertsPerAAQuad; }
449int GrResourceProvider::NumIndicesPerAAQuad() { return kIndicesPerAAQuad; }
450
451///////////////////////////////////////////////////////////////////////////////////////////////////
452sk_sp<GrPath> GrResourceProvider::createPath(const SkPath& path, const GrStyle& style) {
453 if (this->isAbandoned()) {
454 return nullptr;
455 }
456
457 SkASSERT(this->gpu()->pathRendering());
458 return this->gpu()->pathRendering()->createPath(path, style);
459}
460
461sk_sp<GrGpuBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType,
462 GrAccessPattern accessPattern,
463 const void* data) {
464 if (this->isAbandoned()) {
465 return nullptr;
466 }
467 if (kDynamic_GrAccessPattern != accessPattern) {
468 return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
469 }
470 // bin by pow2 with a reasonable min
471 static const size_t MIN_SIZE = 1 << 12;
472 size_t allocSize = std::max(MIN_SIZE, GrNextSizePow2(size));
473
474 GrScratchKey key;
475 GrGpuBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
476 auto buffer =
477 sk_sp<GrGpuBuffer>(static_cast<GrGpuBuffer*>(this->cache()->findAndRefScratchResource(
478 key)));
479 if (!buffer) {
480 buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
481 if (!buffer) {
482 return nullptr;
483 }
484 }
485 if (data) {
486 buffer->updateData(data, size);
487 }
488 return buffer;
489}
490
491bool GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt, int numStencilSamples) {
492 SkASSERT(rt);
493 GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment();
494 if (stencil && stencil->numSamples() == numStencilSamples) {
495 return true;
496 }
497
498 if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment()) {
499 GrUniqueKey sbKey;
500
501 int width = rt->width();
502 int height = rt->height();
503#if 0
504 if (this->caps()->oversizedStencilSupport()) {
505 width = SkNextPow2(width);
506 height = SkNextPow2(height);
507 }
508#endif
509 GrStencilAttachment::ComputeSharedStencilAttachmentKey(
510 width, height, numStencilSamples, &sbKey);
511 auto stencil = this->findByUniqueKey<GrStencilAttachment>(sbKey);
512 if (!stencil) {
513 // Need to try and create a new stencil
514 stencil.reset(this->gpu()->createStencilAttachmentForRenderTarget(
515 rt, width, height, numStencilSamples));
516 if (!stencil) {
517 return false;
518 }
519 this->assignUniqueKeyToResource(sbKey, stencil.get());
520 }
521 rt->renderTargetPriv().attachStencilAttachment(std::move(stencil));
522 }
523
524 if (GrStencilAttachment* stencil = rt->renderTargetPriv().getStencilAttachment()) {
525 return stencil->numSamples() == numStencilSamples;
526 }
527 return false;
528}
529
530sk_sp<GrRenderTarget> GrResourceProvider::wrapBackendTextureAsRenderTarget(
531 const GrBackendTexture& tex, int sampleCnt) {
532 if (this->isAbandoned()) {
533 return nullptr;
534 }
535 return fGpu->wrapBackendTextureAsRenderTarget(tex, sampleCnt);
536}
537
538std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT GrResourceProvider::makeSemaphore(
539 bool isOwned) {
540 return this->isAbandoned() ? nullptr : fGpu->makeSemaphore(isOwned);
541}
542
543std::unique_ptr<GrSemaphore> GrResourceProvider::wrapBackendSemaphore(
544 const GrBackendSemaphore& semaphore,
545 SemaphoreWrapType wrapType,
546 GrWrapOwnership ownership) {
547 ASSERT_SINGLE_OWNER
548 return this->isAbandoned() ? nullptr : fGpu->wrapBackendSemaphore(semaphore,
549 wrapType,
550 ownership);
551}
552
553// Ensures the row bytes are populated (not 0) and makes a copy to a temporary
554// to make the row bytes tight if necessary. Returns false if the input row bytes are invalid.
555static bool prepare_level(const GrMipLevel& inLevel,
556 SkISize dimensions,
557 bool rowBytesSupport,
558 GrColorType origColorType,
559 GrColorType allowedColorType,
560 GrMipLevel* outLevel,
561 std::unique_ptr<char[]>* data) {
562 if (!inLevel.fPixels) {
563 outLevel->fPixels = nullptr;
564 outLevel->fRowBytes = 0;
565 return true;
566 }
567 size_t minRB = dimensions.fWidth * GrColorTypeBytesPerPixel(origColorType);
568 size_t actualRB = inLevel.fRowBytes ? inLevel.fRowBytes : minRB;
569 if (actualRB < minRB) {
570 return false;
571 }
572 if (origColorType == allowedColorType && (actualRB == minRB || rowBytesSupport)) {
573 outLevel->fRowBytes = actualRB;
574 outLevel->fPixels = inLevel.fPixels;
575 return true;
576 }
577 auto tempRB = dimensions.fWidth * GrColorTypeBytesPerPixel(allowedColorType);
578 data->reset(new char[tempRB * dimensions.fHeight]);
579 outLevel->fPixels = data->get();
580 outLevel->fRowBytes = tempRB;
581 GrImageInfo srcInfo(origColorType, kUnpremul_SkAlphaType, nullptr, dimensions);
582 GrImageInfo dstInfo(allowedColorType, kUnpremul_SkAlphaType, nullptr, dimensions);
583 return GrConvertPixels(dstInfo, data->get(), tempRB, srcInfo, inLevel.fPixels, actualRB);
584}
585
586GrColorType GrResourceProvider::prepareLevels(const GrBackendFormat& format,
587 GrColorType colorType,
588 SkISize baseSize,
589 const GrMipLevel texels[],
590 int mipLevelCount,
591 TempLevels* tempLevels,
592 TempLevelDatas* tempLevelDatas) const {
593 SkASSERT(mipLevelCount && texels && texels[0].fPixels);
594
595 auto allowedColorType =
596 this->caps()->supportedWritePixelsColorType(colorType, format, colorType).fColorType;
597 if (allowedColorType == GrColorType::kUnknown) {
598 return GrColorType::kUnknown;
599 }
600 bool rowBytesSupport = this->caps()->writePixelsRowBytesSupport();
601 tempLevels->reset(mipLevelCount);
602 tempLevelDatas->reset(mipLevelCount);
603 auto size = baseSize;
604 for (int i = 0; i < mipLevelCount; ++i) {
605 if (!prepare_level(texels[i], size, rowBytesSupport, colorType, allowedColorType,
606 &(*tempLevels)[i], &(*tempLevelDatas)[i])) {
607 return GrColorType::kUnknown;
608 }
609 size = {std::max(size.fWidth / 2, 1), std::max(size.fHeight / 2, 1)};
610 }
611 return allowedColorType;
612}
613
614sk_sp<GrTexture> GrResourceProvider::writePixels(sk_sp<GrTexture> texture,
615 GrColorType colorType,
616 SkISize baseSize,
617 const GrMipLevel texels[],
618 int mipLevelCount) const {
619 SkASSERT(!this->isAbandoned());
620 SkASSERT(texture);
621 SkASSERT(colorType != GrColorType::kUnknown);
622 SkASSERT(mipLevelCount && texels && texels[0].fPixels);
623
624 SkAutoSTMalloc<14, GrMipLevel> tmpTexels;
625 SkAutoSTArray<14, std::unique_ptr<char[]>> tmpDatas;
626 auto tempColorType = this->prepareLevels(texture->backendFormat(), colorType, baseSize, texels,
627 mipLevelCount, &tmpTexels, &tmpDatas);
628 if (tempColorType == GrColorType::kUnknown) {
629 return nullptr;
630 }
631 SkAssertResult(fGpu->writePixels(texture.get(), 0, 0, baseSize.fWidth, baseSize.fHeight,
632 colorType, tempColorType, tmpTexels.get(), mipLevelCount));
633 return texture;
634}
635