| 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #include "src/image/SkImage_Lazy.h" |
| 9 | |
| 10 | #include "include/core/SkBitmap.h" |
| 11 | #include "include/core/SkData.h" |
| 12 | #include "include/core/SkImageGenerator.h" |
| 13 | #include "src/core/SkBitmapCache.h" |
| 14 | #include "src/core/SkCachedData.h" |
| 15 | #include "src/core/SkImagePriv.h" |
| 16 | #include "src/core/SkNextID.h" |
| 17 | |
| 18 | #if SK_SUPPORT_GPU |
| 19 | #include "include/private/GrRecordingContext.h" |
| 20 | #include "include/private/GrResourceKey.h" |
| 21 | #include "src/gpu/GrBitmapTextureMaker.h" |
| 22 | #include "src/gpu/GrCaps.h" |
| 23 | #include "src/gpu/GrGpuResourcePriv.h" |
| 24 | #include "src/gpu/GrImageTextureMaker.h" |
| 25 | #include "src/gpu/GrProxyProvider.h" |
| 26 | #include "src/gpu/GrRecordingContextPriv.h" |
| 27 | #include "src/gpu/GrSamplerState.h" |
| 28 | #include "src/gpu/GrYUVProvider.h" |
| 29 | #include "src/gpu/SkGr.h" |
| 30 | #endif |
| 31 | |
| 32 | // Ref-counted tuple(SkImageGenerator, SkMutex) which allows sharing one generator among N images |
| 33 | class SharedGenerator final : public SkNVRefCnt<SharedGenerator> { |
| 34 | public: |
| 35 | static sk_sp<SharedGenerator> Make(std::unique_ptr<SkImageGenerator> gen) { |
| 36 | return gen ? sk_sp<SharedGenerator>(new SharedGenerator(std::move(gen))) : nullptr; |
| 37 | } |
| 38 | |
| 39 | // This is thread safe. It is a const field set in the constructor. |
| 40 | const SkImageInfo& getInfo() { return fGenerator->getInfo(); } |
| 41 | |
| 42 | private: |
| 43 | explicit SharedGenerator(std::unique_ptr<SkImageGenerator> gen) |
| 44 | : fGenerator(std::move(gen)) { |
| 45 | SkASSERT(fGenerator); |
| 46 | } |
| 47 | |
| 48 | friend class ScopedGenerator; |
| 49 | friend class SkImage_Lazy; |
| 50 | |
| 51 | std::unique_ptr<SkImageGenerator> fGenerator; |
| 52 | SkMutex fMutex; |
| 53 | }; |
| 54 | |
| 55 | /////////////////////////////////////////////////////////////////////////////// |
| 56 | |
| 57 | SkImage_Lazy::Validator::Validator(sk_sp<SharedGenerator> gen, const SkIRect* subset, |
| 58 | const SkColorType* colorType, sk_sp<SkColorSpace> colorSpace) |
| 59 | : fSharedGenerator(std::move(gen)) { |
| 60 | if (!fSharedGenerator) { |
| 61 | return; |
| 62 | } |
| 63 | |
| 64 | // The following generator accessors are safe without acquiring the mutex (const getters). |
| 65 | // TODO: refactor to use a ScopedGenerator instead, for clarity. |
| 66 | const SkImageInfo& info = fSharedGenerator->fGenerator->getInfo(); |
| 67 | if (info.isEmpty()) { |
| 68 | fSharedGenerator.reset(); |
| 69 | return; |
| 70 | } |
| 71 | |
| 72 | fUniqueID = fSharedGenerator->fGenerator->uniqueID(); |
| 73 | const SkIRect bounds = SkIRect::MakeWH(info.width(), info.height()); |
| 74 | if (subset) { |
| 75 | if (!bounds.contains(*subset)) { |
| 76 | fSharedGenerator.reset(); |
| 77 | return; |
| 78 | } |
| 79 | if (*subset != bounds) { |
| 80 | // we need a different uniqueID since we really are a subset of the raw generator |
| 81 | fUniqueID = SkNextID::ImageID(); |
| 82 | } |
| 83 | } else { |
| 84 | subset = &bounds; |
| 85 | } |
| 86 | |
| 87 | fInfo = info.makeDimensions(subset->size()); |
| 88 | fOrigin = SkIPoint::Make(subset->x(), subset->y()); |
| 89 | if (colorType || colorSpace) { |
| 90 | if (colorType) { |
| 91 | fInfo = fInfo.makeColorType(*colorType); |
| 92 | } |
| 93 | if (colorSpace) { |
| 94 | fInfo = fInfo.makeColorSpace(colorSpace); |
| 95 | } |
| 96 | fUniqueID = SkNextID::ImageID(); |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | /////////////////////////////////////////////////////////////////////////////// |
| 101 | |
| 102 | // Helper for exclusive access to a shared generator. |
| 103 | class SkImage_Lazy::ScopedGenerator { |
| 104 | public: |
| 105 | ScopedGenerator(const sk_sp<SharedGenerator>& gen) |
| 106 | : fSharedGenerator(gen) |
| 107 | , fAutoAquire(gen->fMutex) {} |
| 108 | |
| 109 | SkImageGenerator* operator->() const { |
| 110 | fSharedGenerator->fMutex.assertHeld(); |
| 111 | return fSharedGenerator->fGenerator.get(); |
| 112 | } |
| 113 | |
| 114 | operator SkImageGenerator*() const { |
| 115 | fSharedGenerator->fMutex.assertHeld(); |
| 116 | return fSharedGenerator->fGenerator.get(); |
| 117 | } |
| 118 | |
| 119 | private: |
| 120 | const sk_sp<SharedGenerator>& fSharedGenerator; |
| 121 | SkAutoMutexExclusive fAutoAquire; |
| 122 | }; |
| 123 | |
| 124 | /////////////////////////////////////////////////////////////////////////////// |
| 125 | |
| 126 | SkImage_Lazy::SkImage_Lazy(Validator* validator) |
| 127 | : INHERITED(validator->fInfo, validator->fUniqueID) |
| 128 | , fSharedGenerator(std::move(validator->fSharedGenerator)) |
| 129 | , fOrigin(validator->fOrigin) { |
| 130 | SkASSERT(fSharedGenerator); |
| 131 | fUniqueID = validator->fUniqueID; |
| 132 | } |
| 133 | |
| 134 | |
| 135 | ////////////////////////////////////////////////////////////////////////////////////////////////// |
| 136 | |
| 137 | static bool generate_pixels(SkImageGenerator* gen, const SkPixmap& pmap, int originX, int originY) { |
| 138 | const int genW = gen->getInfo().width(); |
| 139 | const int genH = gen->getInfo().height(); |
| 140 | const SkIRect srcR = SkIRect::MakeWH(genW, genH); |
| 141 | const SkIRect dstR = SkIRect::MakeXYWH(originX, originY, pmap.width(), pmap.height()); |
| 142 | if (!srcR.contains(dstR)) { |
| 143 | return false; |
| 144 | } |
| 145 | |
| 146 | // If they are requesting a subset, we have to have a temp allocation for full image, and |
| 147 | // then copy the subset into their allocation |
| 148 | SkBitmap full; |
| 149 | SkPixmap fullPM; |
| 150 | const SkPixmap* dstPM = &pmap; |
| 151 | if (srcR != dstR) { |
| 152 | if (!full.tryAllocPixels(pmap.info().makeWH(genW, genH))) { |
| 153 | return false; |
| 154 | } |
| 155 | if (!full.peekPixels(&fullPM)) { |
| 156 | return false; |
| 157 | } |
| 158 | dstPM = &fullPM; |
| 159 | } |
| 160 | |
| 161 | if (!gen->getPixels(dstPM->info(), dstPM->writable_addr(), dstPM->rowBytes())) { |
| 162 | return false; |
| 163 | } |
| 164 | |
| 165 | if (srcR != dstR) { |
| 166 | if (!full.readPixels(pmap, originX, originY)) { |
| 167 | return false; |
| 168 | } |
| 169 | } |
| 170 | return true; |
| 171 | } |
| 172 | |
| 173 | bool SkImage_Lazy::getROPixels(SkBitmap* bitmap, SkImage::CachingHint chint) const { |
| 174 | auto check_output_bitmap = [bitmap]() { |
| 175 | SkASSERT(bitmap->isImmutable()); |
| 176 | SkASSERT(bitmap->getPixels()); |
| 177 | (void)bitmap; |
| 178 | }; |
| 179 | |
| 180 | auto desc = SkBitmapCacheDesc::Make(this); |
| 181 | if (SkBitmapCache::Find(desc, bitmap)) { |
| 182 | check_output_bitmap(); |
| 183 | return true; |
| 184 | } |
| 185 | |
| 186 | if (SkImage::kAllow_CachingHint == chint) { |
| 187 | SkPixmap pmap; |
| 188 | SkBitmapCache::RecPtr cacheRec = SkBitmapCache::Alloc(desc, this->imageInfo(), &pmap); |
| 189 | if (!cacheRec || |
| 190 | !generate_pixels(ScopedGenerator(fSharedGenerator), pmap, |
| 191 | fOrigin.x(), fOrigin.y())) { |
| 192 | return false; |
| 193 | } |
| 194 | SkBitmapCache::Add(std::move(cacheRec), bitmap); |
| 195 | this->notifyAddedToRasterCache(); |
| 196 | } else { |
| 197 | if (!bitmap->tryAllocPixels(this->imageInfo()) || |
| 198 | !generate_pixels(ScopedGenerator(fSharedGenerator), bitmap->pixmap(), fOrigin.x(), |
| 199 | fOrigin.y())) { |
| 200 | return false; |
| 201 | } |
| 202 | bitmap->setImmutable(); |
| 203 | } |
| 204 | |
| 205 | check_output_bitmap(); |
| 206 | return true; |
| 207 | } |
| 208 | |
| 209 | ////////////////////////////////////////////////////////////////////////////////////////////////// |
| 210 | |
| 211 | bool SkImage_Lazy::onReadPixels(const SkImageInfo& dstInfo, void* dstPixels, size_t dstRB, |
| 212 | int srcX, int srcY, CachingHint chint) const { |
| 213 | SkBitmap bm; |
| 214 | if (this->getROPixels(&bm, chint)) { |
| 215 | return bm.readPixels(dstInfo, dstPixels, dstRB, srcX, srcY); |
| 216 | } |
| 217 | return false; |
| 218 | } |
| 219 | |
| 220 | sk_sp<SkData> SkImage_Lazy::onRefEncoded() const { |
| 221 | ScopedGenerator generator(fSharedGenerator); |
| 222 | return generator->refEncodedData(); |
| 223 | } |
| 224 | |
| 225 | bool SkImage_Lazy::onIsValid(GrContext* context) const { |
| 226 | ScopedGenerator generator(fSharedGenerator); |
| 227 | return generator->isValid(context); |
| 228 | } |
| 229 | |
| 230 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
| 231 | |
| 232 | #if SK_SUPPORT_GPU |
| 233 | GrSurfaceProxyView SkImage_Lazy::refView(GrRecordingContext* context, GrMipMapped mipMapped) const { |
| 234 | if (!context) { |
| 235 | return {}; |
| 236 | } |
| 237 | |
| 238 | GrImageTextureMaker textureMaker(context, this, GrImageTexGenPolicy::kDraw); |
| 239 | return textureMaker.view(mipMapped); |
| 240 | } |
| 241 | #endif |
| 242 | |
| 243 | sk_sp<SkImage> SkImage_Lazy::onMakeSubset(GrRecordingContext* context, |
| 244 | const SkIRect& subset) const { |
| 245 | SkASSERT(this->bounds().contains(subset)); |
| 246 | SkASSERT(this->bounds() != subset); |
| 247 | |
| 248 | const SkIRect generatorSubset = subset.makeOffset(fOrigin); |
| 249 | const SkColorType colorType = this->colorType(); |
| 250 | Validator validator(fSharedGenerator, &generatorSubset, &colorType, this->refColorSpace()); |
| 251 | return validator ? sk_sp<SkImage>(new SkImage_Lazy(&validator)) : nullptr; |
| 252 | } |
| 253 | |
| 254 | sk_sp<SkImage> SkImage_Lazy::onMakeColorTypeAndColorSpace(GrRecordingContext*, |
| 255 | SkColorType targetCT, |
| 256 | sk_sp<SkColorSpace> targetCS) const { |
| 257 | SkAutoMutexExclusive autoAquire(fOnMakeColorTypeAndSpaceMutex); |
| 258 | if (fOnMakeColorTypeAndSpaceResult && |
| 259 | targetCT == fOnMakeColorTypeAndSpaceResult->colorType() && |
| 260 | SkColorSpace::Equals(targetCS.get(), fOnMakeColorTypeAndSpaceResult->colorSpace())) { |
| 261 | return fOnMakeColorTypeAndSpaceResult; |
| 262 | } |
| 263 | const SkIRect generatorSubset = |
| 264 | SkIRect::MakeXYWH(fOrigin.x(), fOrigin.y(), this->width(), this->height()); |
| 265 | Validator validator(fSharedGenerator, &generatorSubset, &targetCT, targetCS); |
| 266 | sk_sp<SkImage> result = validator ? sk_sp<SkImage>(new SkImage_Lazy(&validator)) : nullptr; |
| 267 | if (result) { |
| 268 | fOnMakeColorTypeAndSpaceResult = result; |
| 269 | } |
| 270 | return result; |
| 271 | } |
| 272 | |
| 273 | sk_sp<SkImage> SkImage_Lazy::onReinterpretColorSpace(sk_sp<SkColorSpace> newCS) const { |
| 274 | // TODO: The correct thing is to clone the generator, and modify its color space. That's hard, |
| 275 | // because we don't have a clone method, and generator is public (and derived-from by clients). |
| 276 | // So do the simple/inefficient thing here, and fallback to raster when this is called. |
| 277 | |
| 278 | // We allocate the bitmap with the new color space, then generate the image using the original. |
| 279 | SkBitmap bitmap; |
| 280 | if (bitmap.tryAllocPixels(this->imageInfo().makeColorSpace(std::move(newCS)))) { |
| 281 | SkPixmap pixmap = bitmap.pixmap(); |
| 282 | pixmap.setColorSpace(this->refColorSpace()); |
| 283 | if (generate_pixels(ScopedGenerator(fSharedGenerator), pixmap, fOrigin.x(), fOrigin.y())) { |
| 284 | bitmap.setImmutable(); |
| 285 | return SkImage::MakeFromBitmap(bitmap); |
| 286 | } |
| 287 | } |
| 288 | return nullptr; |
| 289 | } |
| 290 | |
| 291 | sk_sp<SkImage> SkImage::MakeFromGenerator(std::unique_ptr<SkImageGenerator> generator, |
| 292 | const SkIRect* subset) { |
| 293 | SkImage_Lazy::Validator |
| 294 | validator(SharedGenerator::Make(std::move(generator)), subset, nullptr, nullptr); |
| 295 | |
| 296 | return validator ? sk_make_sp<SkImage_Lazy>(&validator) : nullptr; |
| 297 | } |
| 298 | |
| 299 | sk_sp<SkImage> SkImage::DecodeToRaster(const void* encoded, size_t length, const SkIRect* subset) { |
| 300 | // The generator will not outlive this function, so we can wrap the encoded data without copy |
| 301 | auto gen = SkImageGenerator::MakeFromEncoded(SkData::MakeWithoutCopy(encoded, length)); |
| 302 | if (!gen) { |
| 303 | return nullptr; |
| 304 | } |
| 305 | SkImageInfo info = gen->getInfo(); |
| 306 | if (info.isEmpty()) { |
| 307 | return nullptr; |
| 308 | } |
| 309 | |
| 310 | SkIPoint origin = {0, 0}; |
| 311 | if (subset) { |
| 312 | if (!SkIRect::MakeWH(info.width(), info.height()).contains(*subset)) { |
| 313 | return nullptr; |
| 314 | } |
| 315 | info = info.makeDimensions(subset->size()); |
| 316 | origin = {subset->x(), subset->y()}; |
| 317 | } |
| 318 | |
| 319 | size_t rb = info.minRowBytes(); |
| 320 | if (rb == 0) { |
| 321 | return nullptr; // rb was too big |
| 322 | } |
| 323 | size_t size = info.computeByteSize(rb); |
| 324 | if (size == SIZE_MAX) { |
| 325 | return nullptr; |
| 326 | } |
| 327 | auto data = SkData::MakeUninitialized(size); |
| 328 | |
| 329 | SkPixmap pmap(info, data->writable_data(), rb); |
| 330 | if (!generate_pixels(gen.get(), pmap, origin.x(), origin.y())) { |
| 331 | return nullptr; |
| 332 | } |
| 333 | |
| 334 | return SkImage::MakeRasterData(info, data, rb); |
| 335 | } |
| 336 | |
| 337 | ////////////////////////////////////////////////////////////////////////////////////////////////// |
| 338 | |
| 339 | #if SK_SUPPORT_GPU |
| 340 | |
| 341 | class Generator_GrYUVProvider : public GrYUVProvider { |
| 342 | public: |
| 343 | Generator_GrYUVProvider(SkImageGenerator* gen) : fGen(gen) {} |
| 344 | |
| 345 | private: |
| 346 | uint32_t onGetID() const override { return fGen->uniqueID(); } |
| 347 | bool onQueryYUVA8(SkYUVASizeInfo* sizeInfo, |
| 348 | SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount], |
| 349 | SkYUVColorSpace* colorSpace) const override { |
| 350 | return fGen->queryYUVA8(sizeInfo, yuvaIndices, colorSpace); |
| 351 | } |
| 352 | bool onGetYUVA8Planes(const SkYUVASizeInfo& sizeInfo, |
| 353 | const SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount], |
| 354 | void* planes[]) override { |
| 355 | return fGen->getYUVA8Planes(sizeInfo, yuvaIndices, planes); |
| 356 | } |
| 357 | |
| 358 | SkImageGenerator* fGen; |
| 359 | |
| 360 | typedef GrYUVProvider INHERITED; |
| 361 | }; |
| 362 | |
| 363 | |
| 364 | sk_sp<SkCachedData> SkImage_Lazy::getPlanes(SkYUVASizeInfo* yuvaSizeInfo, |
| 365 | SkYUVAIndex yuvaIndices[SkYUVAIndex::kIndexCount], |
| 366 | SkYUVColorSpace* yuvColorSpace, |
| 367 | const void* planes[SkYUVASizeInfo::kMaxCount]) { |
| 368 | ScopedGenerator generator(fSharedGenerator); |
| 369 | Generator_GrYUVProvider provider(generator); |
| 370 | |
| 371 | sk_sp<SkCachedData> data = provider.getPlanes(yuvaSizeInfo, yuvaIndices, yuvColorSpace, planes); |
| 372 | if (!data) { |
| 373 | return nullptr; |
| 374 | } |
| 375 | |
| 376 | return data; |
| 377 | } |
| 378 | |
| 379 | |
| 380 | /* |
| 381 | * We have 4 ways to try to return a texture (in sorted order) |
| 382 | * |
| 383 | * 1. Check the cache for a pre-existing one |
| 384 | * 2. Ask the generator to natively create one |
| 385 | * 3. Ask the generator to return YUV planes, which the GPU can convert |
| 386 | * 4. Ask the generator to return RGB(A) data, which the GPU can convert |
| 387 | */ |
| 388 | GrSurfaceProxyView SkImage_Lazy::lockTextureProxyView(GrRecordingContext* ctx, |
| 389 | GrImageTexGenPolicy texGenPolicy, |
| 390 | GrMipMapped mipMapped) const { |
| 391 | // Values representing the various texture lock paths we can take. Used for logging the path |
| 392 | // taken to a histogram. |
| 393 | enum LockTexturePath { |
| 394 | kFailure_LockTexturePath, |
| 395 | kPreExisting_LockTexturePath, |
| 396 | kNative_LockTexturePath, |
| 397 | kCompressed_LockTexturePath, // Deprecated |
| 398 | kYUV_LockTexturePath, |
| 399 | kRGBA_LockTexturePath, |
| 400 | }; |
| 401 | |
| 402 | enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 }; |
| 403 | |
| 404 | GrUniqueKey key; |
| 405 | if (texGenPolicy == GrImageTexGenPolicy::kDraw) { |
| 406 | GrMakeKeyFromImageID(&key, this->uniqueID(), SkIRect::MakeSize(this->dimensions())); |
| 407 | } |
| 408 | |
| 409 | const GrCaps* caps = ctx->priv().caps(); |
| 410 | GrProxyProvider* proxyProvider = ctx->priv().proxyProvider(); |
| 411 | |
| 412 | auto installKey = [&](const GrSurfaceProxyView& view) { |
| 413 | SkASSERT(view && view.asTextureProxy()); |
| 414 | if (key.isValid()) { |
| 415 | auto listener = GrMakeUniqueKeyInvalidationListener(&key, ctx->priv().contextID()); |
| 416 | this->addUniqueIDListener(std::move(listener)); |
| 417 | proxyProvider->assignUniqueKeyToProxy(key, view.asTextureProxy()); |
| 418 | } |
| 419 | }; |
| 420 | |
| 421 | auto ct = this->colorTypeOfLockTextureProxy(caps); |
| 422 | |
| 423 | // 1. Check the cache for a pre-existing one. |
| 424 | if (key.isValid()) { |
| 425 | auto proxy = proxyProvider->findOrCreateProxyByUniqueKey(key); |
| 426 | if (proxy) { |
| 427 | SK_HISTOGRAM_ENUMERATION("LockTexturePath" , kPreExisting_LockTexturePath, |
| 428 | kLockTexturePathCount); |
| 429 | GrSwizzle swizzle = caps->getReadSwizzle(proxy->backendFormat(), ct); |
| 430 | GrSurfaceProxyView view(std::move(proxy), kTopLeft_GrSurfaceOrigin, swizzle); |
| 431 | if (mipMapped == GrMipMapped::kNo || |
| 432 | view.asTextureProxy()->mipMapped() == GrMipMapped::kYes) { |
| 433 | return view; |
| 434 | } else { |
| 435 | // We need a mipped proxy, but we found a cached proxy that wasn't mipped. Thus we |
| 436 | // generate a new mipped surface and copy the original proxy into the base layer. We |
| 437 | // will then let the gpu generate the rest of the mips. |
| 438 | auto mippedView = GrCopyBaseMipMapToView(ctx, view); |
| 439 | if (!mippedView) { |
| 440 | // We failed to make a mipped proxy with the base copied into it. This could |
| 441 | // have been from failure to make the proxy or failure to do the copy. Thus we |
| 442 | // will fall back to just using the non mipped proxy; See skbug.com/7094. |
| 443 | return view; |
| 444 | } |
| 445 | proxyProvider->removeUniqueKeyFromProxy(view.asTextureProxy()); |
| 446 | installKey(mippedView); |
| 447 | return mippedView; |
| 448 | } |
| 449 | } |
| 450 | } |
| 451 | |
| 452 | // 2. Ask the generator to natively create one. |
| 453 | { |
| 454 | ScopedGenerator generator(fSharedGenerator); |
| 455 | if (auto view = generator->generateTexture(ctx, this->imageInfo(), fOrigin, mipMapped, |
| 456 | texGenPolicy)) { |
| 457 | SK_HISTOGRAM_ENUMERATION("LockTexturePath" , kNative_LockTexturePath, |
| 458 | kLockTexturePathCount); |
| 459 | installKey(view); |
| 460 | return view; |
| 461 | } |
| 462 | } |
| 463 | |
| 464 | // 3. Ask the generator to return YUV planes, which the GPU can convert. If we will be mipping |
| 465 | // the texture we skip this step so the CPU generate non-planar MIP maps for us. |
| 466 | if (mipMapped == GrMipMapped::kNo && !ctx->priv().options().fDisableGpuYUVConversion) { |
| 467 | SkColorType colorType = this->colorType(); |
| 468 | |
| 469 | ScopedGenerator generator(fSharedGenerator); |
| 470 | Generator_GrYUVProvider provider(generator); |
| 471 | |
| 472 | // The pixels in the texture will be in the generator's color space. |
| 473 | // If onMakeColorTypeAndColorSpace has been called then this will not match this image's |
| 474 | // color space. To correct this, apply a color space conversion from the generator's color |
| 475 | // space to this image's color space. |
| 476 | SkColorSpace* generatorColorSpace = fSharedGenerator->fGenerator->getInfo().colorSpace(); |
| 477 | SkColorSpace* thisColorSpace = this->colorSpace(); |
| 478 | |
| 479 | // TODO: Update to create the mipped surface in the YUV generator and draw the base |
| 480 | // layer directly into the mipped surface. |
| 481 | SkBudgeted budgeted = texGenPolicy == GrImageTexGenPolicy::kNew_Uncached_Unbudgeted |
| 482 | ? SkBudgeted::kNo |
| 483 | : SkBudgeted::kYes; |
| 484 | auto view = provider.refAsTextureProxyView(ctx, this->imageInfo().dimensions(), |
| 485 | SkColorTypeToGrColorType(colorType), |
| 486 | generatorColorSpace, thisColorSpace, budgeted); |
| 487 | if (view) { |
| 488 | SK_HISTOGRAM_ENUMERATION("LockTexturePath" , kYUV_LockTexturePath, |
| 489 | kLockTexturePathCount); |
| 490 | installKey(view); |
| 491 | return view; |
| 492 | } |
| 493 | } |
| 494 | |
| 495 | // 4. Ask the generator to return a bitmap, which the GPU can convert. |
| 496 | auto hint = texGenPolicy == GrImageTexGenPolicy::kDraw ? CachingHint::kAllow_CachingHint |
| 497 | : CachingHint::kDisallow_CachingHint; |
| 498 | if (SkBitmap bitmap; this->getROPixels(&bitmap, hint)) { |
| 499 | // We always pass uncached here because we will cache it external to the maker based on |
| 500 | // *our* cache policy. We're just using the maker to generate the texture. |
| 501 | auto makerPolicy = texGenPolicy == GrImageTexGenPolicy::kNew_Uncached_Unbudgeted |
| 502 | ? GrImageTexGenPolicy::kNew_Uncached_Unbudgeted |
| 503 | : GrImageTexGenPolicy::kNew_Uncached_Budgeted; |
| 504 | GrBitmapTextureMaker bitmapMaker(ctx, bitmap, makerPolicy); |
| 505 | auto view = bitmapMaker.view(mipMapped); |
| 506 | if (view) { |
| 507 | installKey(view); |
| 508 | SK_HISTOGRAM_ENUMERATION("LockTexturePath" , kRGBA_LockTexturePath, |
| 509 | kLockTexturePathCount); |
| 510 | return view; |
| 511 | } |
| 512 | } |
| 513 | |
| 514 | SK_HISTOGRAM_ENUMERATION("LockTexturePath" , kFailure_LockTexturePath, kLockTexturePathCount); |
| 515 | return {}; |
| 516 | } |
| 517 | |
| 518 | GrColorType SkImage_Lazy::colorTypeOfLockTextureProxy(const GrCaps* caps) const { |
| 519 | GrColorType ct = SkColorTypeToGrColorType(this->colorType()); |
| 520 | GrBackendFormat format = caps->getDefaultBackendFormat(ct, GrRenderable::kNo); |
| 521 | if (!format.isValid()) { |
| 522 | ct = GrColorType::kRGBA_8888; |
| 523 | } |
| 524 | return ct; |
| 525 | } |
| 526 | |
| 527 | #if SK_SUPPORT_GPU |
| 528 | void SkImage_Lazy::addUniqueIDListener(sk_sp<SkIDChangeListener> listener) const { |
| 529 | bool singleThreaded = this->unique(); |
| 530 | fUniqueIDListeners.add(std::move(listener), singleThreaded); |
| 531 | } |
| 532 | #endif |
| 533 | |
| 534 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
| 535 | |
| 536 | sk_sp<SkImage> SkImage::DecodeToTexture(GrContext* ctx, const void* encoded, size_t length, |
| 537 | const SkIRect* subset) { |
| 538 | // img will not survive this function, so we don't need to copy/own the encoded data, |
| 539 | auto img = MakeFromEncoded(SkData::MakeWithoutCopy(encoded, length), subset); |
| 540 | if (!img) { |
| 541 | return nullptr; |
| 542 | } |
| 543 | return img->makeTextureImage(ctx); |
| 544 | } |
| 545 | |
| 546 | #endif |
| 547 | |