1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/image/SkImage_Lazy.h"
9
10#include "include/core/SkBitmap.h"
11#include "include/core/SkData.h"
12#include "include/core/SkImageGenerator.h"
13#include "src/core/SkBitmapCache.h"
14#include "src/core/SkCachedData.h"
15#include "src/core/SkImagePriv.h"
16#include "src/core/SkNextID.h"
17
18#if SK_SUPPORT_GPU
19#include "include/core/SkYUVAIndex.h"
20#include "include/gpu/GrDirectContext.h"
21#include "include/gpu/GrRecordingContext.h"
22#include "include/private/GrResourceKey.h"
23#include "src/core/SkResourceCache.h"
24#include "src/core/SkYUVPlanesCache.h"
25#include "src/gpu/GrBitmapTextureMaker.h"
26#include "src/gpu/GrCaps.h"
27#include "src/gpu/GrColorSpaceXform.h"
28#include "src/gpu/GrGpuResourcePriv.h"
29#include "src/gpu/GrImageTextureMaker.h"
30#include "src/gpu/GrPaint.h"
31#include "src/gpu/GrProxyProvider.h"
32#include "src/gpu/GrRecordingContextPriv.h"
33#include "src/gpu/GrRenderTargetContext.h"
34#include "src/gpu/GrSamplerState.h"
35#include "src/gpu/SkGr.h"
36#include "src/gpu/effects/GrYUVtoRGBEffect.h"
37#endif
38
39// Ref-counted tuple(SkImageGenerator, SkMutex) which allows sharing one generator among N images
40class SharedGenerator final : public SkNVRefCnt<SharedGenerator> {
41public:
42 static sk_sp<SharedGenerator> Make(std::unique_ptr<SkImageGenerator> gen) {
43 return gen ? sk_sp<SharedGenerator>(new SharedGenerator(std::move(gen))) : nullptr;
44 }
45
46 // This is thread safe. It is a const field set in the constructor.
47 const SkImageInfo& getInfo() { return fGenerator->getInfo(); }
48
49private:
50 explicit SharedGenerator(std::unique_ptr<SkImageGenerator> gen)
51 : fGenerator(std::move(gen)) {
52 SkASSERT(fGenerator);
53 }
54
55 friend class ScopedGenerator;
56 friend class SkImage_Lazy;
57
58 std::unique_ptr<SkImageGenerator> fGenerator;
59 SkMutex fMutex;
60};
61
62///////////////////////////////////////////////////////////////////////////////
63
64SkImage_Lazy::Validator::Validator(sk_sp<SharedGenerator> gen, const SkColorType* colorType,
65 sk_sp<SkColorSpace> colorSpace)
66 : fSharedGenerator(std::move(gen)) {
67 if (!fSharedGenerator) {
68 return;
69 }
70
71 // The following generator accessors are safe without acquiring the mutex (const getters).
72 // TODO: refactor to use a ScopedGenerator instead, for clarity.
73 fInfo = fSharedGenerator->fGenerator->getInfo();
74 if (fInfo.isEmpty()) {
75 fSharedGenerator.reset();
76 return;
77 }
78
79 fUniqueID = fSharedGenerator->fGenerator->uniqueID();
80
81 if (colorType && (*colorType == fInfo.colorType())) {
82 colorType = nullptr;
83 }
84
85 if (colorType || colorSpace) {
86 if (colorType) {
87 fInfo = fInfo.makeColorType(*colorType);
88 }
89 if (colorSpace) {
90 fInfo = fInfo.makeColorSpace(colorSpace);
91 }
92 fUniqueID = SkNextID::ImageID();
93 }
94}
95
96///////////////////////////////////////////////////////////////////////////////
97
98// Helper for exclusive access to a shared generator.
99class SkImage_Lazy::ScopedGenerator {
100public:
101 ScopedGenerator(const sk_sp<SharedGenerator>& gen)
102 : fSharedGenerator(gen)
103 , fAutoAquire(gen->fMutex) {}
104
105 SkImageGenerator* operator->() const {
106 fSharedGenerator->fMutex.assertHeld();
107 return fSharedGenerator->fGenerator.get();
108 }
109
110 operator SkImageGenerator*() const {
111 fSharedGenerator->fMutex.assertHeld();
112 return fSharedGenerator->fGenerator.get();
113 }
114
115private:
116 const sk_sp<SharedGenerator>& fSharedGenerator;
117 SkAutoMutexExclusive fAutoAquire;
118};
119
120///////////////////////////////////////////////////////////////////////////////
121
122SkImage_Lazy::SkImage_Lazy(Validator* validator)
123 : INHERITED(validator->fInfo, validator->fUniqueID)
124 , fSharedGenerator(std::move(validator->fSharedGenerator))
125{
126 SkASSERT(fSharedGenerator);
127}
128
129
130//////////////////////////////////////////////////////////////////////////////////////////////////
131
132bool SkImage_Lazy::getROPixels(SkBitmap* bitmap, SkImage::CachingHint chint) const {
133 auto check_output_bitmap = [bitmap]() {
134 SkASSERT(bitmap->isImmutable());
135 SkASSERT(bitmap->getPixels());
136 (void)bitmap;
137 };
138
139 auto desc = SkBitmapCacheDesc::Make(this);
140 if (SkBitmapCache::Find(desc, bitmap)) {
141 check_output_bitmap();
142 return true;
143 }
144
145 if (SkImage::kAllow_CachingHint == chint) {
146 SkPixmap pmap;
147 SkBitmapCache::RecPtr cacheRec = SkBitmapCache::Alloc(desc, this->imageInfo(), &pmap);
148 if (!cacheRec || !ScopedGenerator(fSharedGenerator)->getPixels(pmap)) {
149 return false;
150 }
151 SkBitmapCache::Add(std::move(cacheRec), bitmap);
152 this->notifyAddedToRasterCache();
153 } else {
154 if (!bitmap->tryAllocPixels(this->imageInfo()) ||
155 !ScopedGenerator(fSharedGenerator)->getPixels(bitmap->pixmap())) {
156 return false;
157 }
158 bitmap->setImmutable();
159 }
160
161 check_output_bitmap();
162 return true;
163}
164
165//////////////////////////////////////////////////////////////////////////////////////////////////
166
167bool SkImage_Lazy::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB,
168 int srcX, int srcY, CachingHint chint) const {
169 SkBitmap bm;
170 if (this->getROPixels(&bm, chint)) {
171 return bm.readPixels(dstInfo, dstPixels, dstRB, srcX, srcY);
172 }
173 return false;
174}
175
176sk_sp<SkData> SkImage_Lazy::onRefEncoded() const {
177 // check that we aren't a subset or colortype/etc modification of the original
178 if (fSharedGenerator->fGenerator->uniqueID() == this->uniqueID()) {
179 ScopedGenerator generator(fSharedGenerator);
180 return generator->refEncodedData();
181 }
182 return nullptr;
183}
184
185bool SkImage_Lazy::onIsValid(GrRecordingContext* context) const {
186 ScopedGenerator generator(fSharedGenerator);
187 return generator->isValid(context);
188}
189
190///////////////////////////////////////////////////////////////////////////////////////////////////
191
192#if SK_SUPPORT_GPU
193GrSurfaceProxyView SkImage_Lazy::refView(GrRecordingContext* context, GrMipmapped mipMapped) const {
194 if (!context) {
195 return {};
196 }
197
198 GrImageTextureMaker textureMaker(context, this, GrImageTexGenPolicy::kDraw);
199 return textureMaker.view(mipMapped);
200}
201#endif
202
203sk_sp<SkImage> SkImage_Lazy::onMakeSubset(const SkIRect& subset, GrDirectContext* direct) const {
204 // TODO: can we do this more efficiently, by telling the generator we want to
205 // "realize" a subset?
206
207 auto pixels = direct ? this->makeTextureImage(direct)
208 : this->makeRasterImage();
209 return pixels ? pixels->makeSubset(subset, direct) : nullptr;
210}
211
212sk_sp<SkImage> SkImage_Lazy::onMakeColorTypeAndColorSpace(SkColorType targetCT,
213 sk_sp<SkColorSpace> targetCS,
214 GrDirectContext*) const {
215 SkAutoMutexExclusive autoAquire(fOnMakeColorTypeAndSpaceMutex);
216 if (fOnMakeColorTypeAndSpaceResult &&
217 targetCT == fOnMakeColorTypeAndSpaceResult->colorType() &&
218 SkColorSpace::Equals(targetCS.get(), fOnMakeColorTypeAndSpaceResult->colorSpace())) {
219 return fOnMakeColorTypeAndSpaceResult;
220 }
221 Validator validator(fSharedGenerator, &targetCT, targetCS);
222 sk_sp<SkImage> result = validator ? sk_sp<SkImage>(new SkImage_Lazy(&validator)) : nullptr;
223 if (result) {
224 fOnMakeColorTypeAndSpaceResult = result;
225 }
226 return result;
227}
228
229sk_sp<SkImage> SkImage_Lazy::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const {
230 // TODO: The correct thing is to clone the generator, and modify its color space. That's hard,
231 // because we don't have a clone method, and generator is public (and derived-from by clients).
232 // So do the simple/inefficient thing here, and fallback to raster when this is called.
233
234 // We allocate the bitmap with the new color space, then generate the image using the original.
235 SkBitmap bitmap;
236 if (bitmap.tryAllocPixels(this->imageInfo().makeColorSpace(std::move(newCS)))) {
237 SkPixmap pixmap = bitmap.pixmap();
238 pixmap.setColorSpace(this->refColorSpace());
239 if (ScopedGenerator(fSharedGenerator)->getPixels(pixmap)) {
240 bitmap.setImmutable();
241 return SkImage::MakeFromBitmap(bitmap);
242 }
243 }
244 return nullptr;
245}
246
247sk_sp<SkImage> SkImage::MakeFromGenerator(std::unique_ptr<SkImageGenerator> generator) {
248 SkImage_Lazy::Validator
249 validator(SharedGenerator::Make(std::move(generator)), nullptr, nullptr);
250
251 return validator ? sk_make_sp<SkImage_Lazy>(&validator) : nullptr;
252}
253
254#if SK_SUPPORT_GPU
255
256GrSurfaceProxyView SkImage_Lazy::textureProxyViewFromPlanes(GrRecordingContext* ctx,
257 SkBudgeted budgeted) const {
258 SkYUVASizeInfo yuvSizeInfo;
259 SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount];
260 SkYUVColorSpace yuvColorSpace;
261 const void* planes[SkYUVASizeInfo::kMaxCount];
262
263 sk_sp<SkCachedData> dataStorage =
264 this->getPlanes(&yuvSizeInfo, yuvaIndices, &yuvColorSpace, planes);
265 if (!dataStorage) {
266 return {};
267 }
268
269 GrSurfaceProxyView yuvViews[SkYUVASizeInfo::kMaxCount];
270 for (int i = 0; i < SkYUVASizeInfo::kMaxCount; ++i) {
271 if (yuvSizeInfo.fSizes[i].isEmpty()) {
272 SkASSERT(!yuvSizeInfo.fWidthBytes[i]);
273 continue;
274 }
275
276 int componentWidth = yuvSizeInfo.fSizes[i].fWidth;
277 int componentHeight = yuvSizeInfo.fSizes[i].fHeight;
278 // If the sizes of the components are not all the same we choose to create exact-match
279 // textures for the smaller ones rather than add a texture domain to the draw.
280 // TODO: revisit this decision to improve texture reuse?
281 SkBackingFit fit =
282 (componentWidth != yuvSizeInfo.fSizes[0].fWidth) ||
283 (componentHeight != yuvSizeInfo.fSizes[0].fHeight)
284 ? SkBackingFit::kExact : SkBackingFit::kApprox;
285
286 SkImageInfo imageInfo = SkImageInfo::MakeA8(componentWidth, componentHeight);
287 SkCachedData* dataStoragePtr = dataStorage.get();
288 // We grab a ref to cached yuv data. When the SkBitmap we create below goes away it will
289 // call the YUVGen_DataReleaseProc which will release this ref.
290 // DDL TODO: Currently we end up creating a lazy proxy that will hold onto a ref to the
291 // SkImage in its lambda. This means that we'll keep the ref on the YUV data around for the
292 // life time of the proxy and not just upload. For non-DDL draws we should look into
293 // releasing this SkImage after uploads (by deleting the lambda after instantiation).
294 dataStoragePtr->ref();
295 SkBitmap bitmap;
296 auto releaseProc = [](void*, void* data) {
297 SkCachedData* cachedData = static_cast<SkCachedData*>(data);
298 SkASSERT(cachedData);
299 cachedData->unref();
300 };
301
302 SkAssertResult(bitmap.installPixels(imageInfo, const_cast<void*>(planes[i]),
303 yuvSizeInfo.fWidthBytes[i], releaseProc,
304 dataStoragePtr));
305 bitmap.setImmutable();
306
307 GrBitmapTextureMaker maker(ctx, bitmap, fit);
308 yuvViews[i] = maker.view(GrMipmapped::kNo);
309
310 if (!yuvViews[i]) {
311 return {};
312 }
313
314 SkASSERT(yuvViews[i].proxy()->dimensions() == yuvSizeInfo.fSizes[i]);
315 }
316
317 // TODO: investigate preallocating mip maps here
318 GrColorType ct = SkColorTypeToGrColorType(this->colorType());
319 auto renderTargetContext = GrRenderTargetContext::Make(
320 ctx, ct, nullptr, SkBackingFit::kExact, this->dimensions(), 1, GrMipmapped::kNo,
321 GrProtected::kNo, kTopLeft_GrSurfaceOrigin, budgeted);
322 if (!renderTargetContext) {
323 return {};
324 }
325
326 GrPaint paint;
327 const auto& caps = *ctx->priv().caps();
328 std::unique_ptr<GrFragmentProcessor> yuvToRgbProcessor = GrYUVtoRGBEffect::Make(
329 yuvViews, yuvaIndices, yuvColorSpace, GrSamplerState::Filter::kNearest, caps);
330
331 // The pixels after yuv->rgb will be in the generator's color space.
332 // If onMakeColorTypeAndColorSpace has been called then this will not match this image's
333 // color space. To correct this, apply a color space conversion from the generator's color
334 // space to this image's color space.
335 SkColorSpace* srcColorSpace;
336 {
337 ScopedGenerator generator(fSharedGenerator);
338 srcColorSpace = generator->getInfo().colorSpace();
339 }
340 SkColorSpace* dstColorSpace = this->colorSpace();
341
342 // If the caller expects the pixels in a different color space than the one from the image,
343 // apply a color conversion to do this.
344 std::unique_ptr<GrFragmentProcessor> colorConversionProcessor =
345 GrColorSpaceXformEffect::Make(std::move(yuvToRgbProcessor),
346 srcColorSpace, kOpaque_SkAlphaType,
347 dstColorSpace, kOpaque_SkAlphaType);
348 paint.setColorFragmentProcessor(std::move(colorConversionProcessor));
349
350 paint.setPorterDuffXPFactory(SkBlendMode::kSrc);
351 const SkRect r = SkRect::MakeIWH(yuvSizeInfo.fSizes[0].fWidth, yuvSizeInfo.fSizes[0].fHeight);
352
353 SkMatrix m = SkEncodedOriginToMatrix(yuvSizeInfo.fOrigin, r.width(), r.height());
354 renderTargetContext->drawRect(nullptr, std::move(paint), GrAA::kNo, m, r);
355
356 SkASSERT(renderTargetContext->asTextureProxy());
357 return renderTargetContext->readSurfaceView();
358}
359
360sk_sp<SkCachedData> SkImage_Lazy::getPlanes(
361 SkYUVASizeInfo* yuvaSizeInfo,
362 SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount],
363 SkYUVColorSpace* yuvColorSpace,
364 const void* outPlanes[SkYUVASizeInfo::kMaxCount]) const {
365 ScopedGenerator generator(fSharedGenerator);
366
367 sk_sp<SkCachedData> data;
368 SkYUVPlanesCache::Info yuvInfo;
369 data.reset(SkYUVPlanesCache::FindAndRef(generator->uniqueID(), &yuvInfo));
370
371 void* planes[SkYUVASizeInfo::kMaxCount];
372
373 if (data.get()) {
374 planes[0] = (void*)data->data(); // we should always have at least one plane
375
376 for (int i = 1; i < SkYUVASizeInfo::kMaxCount; ++i) {
377 if (!yuvInfo.fSizeInfo.fWidthBytes[i]) {
378 SkASSERT(!yuvInfo.fSizeInfo.fWidthBytes[i] && !yuvInfo.fSizeInfo.fSizes[i].fHeight);
379 planes[i] = nullptr;
380 continue;
381 }
382
383 planes[i] = (uint8_t*)planes[i - 1] + (yuvInfo.fSizeInfo.fWidthBytes[i - 1] *
384 yuvInfo.fSizeInfo.fSizes[i - 1].fHeight);
385 }
386 } else {
387 // Fetch yuv plane sizes for memory allocation.
388 if (!generator->queryYUVA8(&yuvInfo.fSizeInfo, yuvInfo.fYUVAIndices,
389 &yuvInfo.fColorSpace)) {
390 return nullptr;
391 }
392
393 // Allocate the memory for YUVA
394 size_t totalSize(0);
395 for (int i = 0; i < SkYUVASizeInfo::kMaxCount; i++) {
396 SkASSERT((yuvInfo.fSizeInfo.fWidthBytes[i] && yuvInfo.fSizeInfo.fSizes[i].fHeight) ||
397 (!yuvInfo.fSizeInfo.fWidthBytes[i] && !yuvInfo.fSizeInfo.fSizes[i].fHeight));
398
399 totalSize += yuvInfo.fSizeInfo.fWidthBytes[i] * yuvInfo.fSizeInfo.fSizes[i].fHeight;
400 }
401
402 data.reset(SkResourceCache::NewCachedData(totalSize));
403
404 planes[0] = data->writable_data();
405
406 for (int i = 1; i < SkYUVASizeInfo::kMaxCount; ++i) {
407 if (!yuvInfo.fSizeInfo.fWidthBytes[i]) {
408 SkASSERT(!yuvInfo.fSizeInfo.fWidthBytes[i] && !yuvInfo.fSizeInfo.fSizes[i].fHeight);
409 planes[i] = nullptr;
410 continue;
411 }
412
413 planes[i] = (uint8_t*)planes[i-1] + (yuvInfo.fSizeInfo.fWidthBytes[i-1] *
414 yuvInfo.fSizeInfo.fSizes[i-1].fHeight);
415 }
416
417 // Get the YUV planes.
418 if (!generator->getYUVA8Planes(yuvInfo.fSizeInfo, yuvInfo.fYUVAIndices, planes)) {
419 return nullptr;
420 }
421
422 // Decoding is done, cache the resulting YUV planes
423 SkYUVPlanesCache::Add(this->uniqueID(), data.get(), &yuvInfo);
424 }
425
426 *yuvaSizeInfo = yuvInfo.fSizeInfo;
427 memcpy(yuvaIndices, yuvInfo.fYUVAIndices, sizeof(yuvInfo.fYUVAIndices));
428 *yuvColorSpace = yuvInfo.fColorSpace;
429 outPlanes[0] = planes[0];
430 outPlanes[1] = planes[1];
431 outPlanes[2] = planes[2];
432 outPlanes[3] = planes[3];
433 return data;
434}
435
436/*
437 * We have 4 ways to try to return a texture (in sorted order)
438 *
439 * 1. Check the cache for a pre-existing one
440 * 2. Ask the generator to natively create one
441 * 3. Ask the generator to return YUV planes, which the GPU can convert
442 * 4. Ask the generator to return RGB(A) data, which the GPU can convert
443 */
444GrSurfaceProxyView SkImage_Lazy::lockTextureProxyView(GrRecordingContext* ctx,
445 GrImageTexGenPolicy texGenPolicy,
446 GrMipmapped mipMapped) const {
447 // Values representing the various texture lock paths we can take. Used for logging the path
448 // taken to a histogram.
449 enum LockTexturePath {
450 kFailure_LockTexturePath,
451 kPreExisting_LockTexturePath,
452 kNative_LockTexturePath,
453 kCompressed_LockTexturePath, // Deprecated
454 kYUV_LockTexturePath,
455 kRGBA_LockTexturePath,
456 };
457
458 enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 };
459
460 GrUniqueKey key;
461 if (texGenPolicy == GrImageTexGenPolicy::kDraw) {
462 GrMakeKeyFromImageID(&key, this->uniqueID(), SkIRect::MakeSize(this->dimensions()));
463 }
464
465 const GrCaps* caps = ctx->priv().caps();
466 GrProxyProvider* proxyProvider = ctx->priv().proxyProvider();
467
468 auto installKey = [&](const GrSurfaceProxyView& view) {
469 SkASSERT(view && view.asTextureProxy());
470 if (key.isValid()) {
471 auto listener = GrMakeUniqueKeyInvalidationListener(&key, ctx->priv().contextID());
472 this->addUniqueIDListener(std::move(listener));
473 proxyProvider->assignUniqueKeyToProxy(key, view.asTextureProxy());
474 }
475 };
476
477 auto ct = this->colorTypeOfLockTextureProxy(caps);
478
479 // 1. Check the cache for a pre-existing one.
480 if (key.isValid()) {
481 auto proxy = proxyProvider->findOrCreateProxyByUniqueKey(key);
482 if (proxy) {
483 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kPreExisting_LockTexturePath,
484 kLockTexturePathCount);
485 GrSwizzle swizzle = caps->getReadSwizzle(proxy->backendFormat(), ct);
486 GrSurfaceProxyView view(std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle);
487 if (mipMapped == GrMipmapped::kNo ||
488 view.asTextureProxy()->mipmapped() == GrMipmapped::kYes) {
489 return view;
490 } else {
491 // We need a mipped proxy, but we found a cached proxy that wasn't mipped. Thus we
492 // generate a new mipped surface and copy the original proxy into the base layer. We
493 // will then let the gpu generate the rest of the mips.
494 auto mippedView = GrCopyBaseMipMapToView(ctx, view);
495 if (!mippedView) {
496 // We failed to make a mipped proxy with the base copied into it. This could
497 // have been from failure to make the proxy or failure to do the copy. Thus we
498 // will fall back to just using the non mipped proxy; See skbug.com/7094.
499 return view;
500 }
501 proxyProvider->removeUniqueKeyFromProxy(view.asTextureProxy());
502 installKey(mippedView);
503 return mippedView;
504 }
505 }
506 }
507
508 // 2. Ask the generator to natively create one.
509 {
510 ScopedGenerator generator(fSharedGenerator);
511 if (auto view = generator->generateTexture(ctx, this->imageInfo(), {0,0}, mipMapped,
512 texGenPolicy)) {
513 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kNative_LockTexturePath,
514 kLockTexturePathCount);
515 installKey(view);
516 return view;
517 }
518 }
519
520 // 3. Ask the generator to return YUV planes, which the GPU can convert. If we will be mipping
521 // the texture we skip this step so the CPU generate non-planar MIP maps for us.
522 if (mipMapped == GrMipmapped::kNo && !ctx->priv().options().fDisableGpuYUVConversion) {
523 // TODO: Update to create the mipped surface in the textureProxyViewFromPlanes generator and
524 // draw the base layer directly into the mipped surface.
525 SkBudgeted budgeted = texGenPolicy == GrImageTexGenPolicy::kNew_Uncached_Unbudgeted
526 ? SkBudgeted::kNo
527 : SkBudgeted::kYes;
528 auto view = this->textureProxyViewFromPlanes(ctx, budgeted);
529 if (view) {
530 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kYUV_LockTexturePath,
531 kLockTexturePathCount);
532 installKey(view);
533 return view;
534 }
535 }
536
537 // 4. Ask the generator to return a bitmap, which the GPU can convert.
538 auto hint = texGenPolicy == GrImageTexGenPolicy::kDraw ? CachingHint::kAllow_CachingHint
539 : CachingHint::kDisallow_CachingHint;
540 if (SkBitmap bitmap; this->getROPixels(&bitmap, hint)) {
541 // We always pass uncached here because we will cache it external to the maker based on
542 // *our* cache policy. We're just using the maker to generate the texture.
543 auto makerPolicy = texGenPolicy == GrImageTexGenPolicy::kNew_Uncached_Unbudgeted
544 ? GrImageTexGenPolicy::kNew_Uncached_Unbudgeted
545 : GrImageTexGenPolicy::kNew_Uncached_Budgeted;
546 GrBitmapTextureMaker bitmapMaker(ctx, bitmap, makerPolicy);
547 auto view = bitmapMaker.view(mipMapped);
548 if (view) {
549 installKey(view);
550 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kRGBA_LockTexturePath,
551 kLockTexturePathCount);
552 return view;
553 }
554 }
555
556 SK_HISTOGRAM_ENUMERATION("LockTexturePath", kFailure_LockTexturePath, kLockTexturePathCount);
557 return {};
558}
559
560GrColorType SkImage_Lazy::colorTypeOfLockTextureProxy(const GrCaps* caps) const {
561 GrColorType ct = SkColorTypeToGrColorType(this->colorType());
562 GrBackendFormat format = caps->getDefaultBackendFormat(ct, GrRenderable::kNo);
563 if (!format.isValid()) {
564 ct = GrColorType::kRGBA_8888;
565 }
566 return ct;
567}
568
569void SkImage_Lazy::addUniqueIDListener(sk_sp<SkIDChangeListener> listener) const {
570 bool singleThreaded = this->unique();
571 fUniqueIDListeners.add(std::move(listener), singleThreaded);
572}
573#endif
574