| 1 | /* |
| 2 | * Copyright 2011 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #ifndef GrGpu_DEFINED |
| 9 | #define GrGpu_DEFINED |
| 10 | |
| 11 | #include "include/core/SkPath.h" |
| 12 | #include "include/core/SkSurface.h" |
| 13 | #include "include/gpu/GrTypes.h" |
| 14 | #include "include/private/SkTArray.h" |
| 15 | #include "src/core/SkTInternalLList.h" |
| 16 | #include "src/gpu/GrCaps.h" |
| 17 | #include "src/gpu/GrOpsRenderPass.h" |
| 18 | #include "src/gpu/GrSamplePatternDictionary.h" |
| 19 | #include "src/gpu/GrSwizzle.h" |
| 20 | #include "src/gpu/GrTextureProducer.h" |
| 21 | #include "src/gpu/GrXferProcessor.h" |
| 22 | |
| 23 | class GrBackendRenderTarget; |
| 24 | class GrBackendSemaphore; |
| 25 | class GrDirectContext; |
| 26 | class GrGpuBuffer; |
| 27 | struct GrContextOptions; |
| 28 | class GrGLContext; |
| 29 | class GrPath; |
| 30 | class GrPathRenderer; |
| 31 | class GrPathRendererChain; |
| 32 | class GrPathRendering; |
| 33 | class GrPipeline; |
| 34 | class GrPrimitiveProcessor; |
| 35 | class GrRenderTarget; |
| 36 | class GrRingBuffer; |
| 37 | class GrSemaphore; |
| 38 | class GrStagingBufferManager; |
| 39 | class GrStencilAttachment; |
| 40 | class GrStencilSettings; |
| 41 | class GrSurface; |
| 42 | class GrTexture; |
| 43 | class SkJSONWriter; |
| 44 | |
| 45 | class GrGpu : public SkRefCnt { |
| 46 | public: |
| 47 | GrGpu(GrDirectContext* direct); |
| 48 | ~GrGpu() override; |
| 49 | |
| 50 | GrDirectContext* getContext() { return fContext; } |
| 51 | const GrDirectContext* getContext() const { return fContext; } |
| 52 | |
| 53 | /** |
| 54 | * Gets the capabilities of the draw target. |
| 55 | */ |
| 56 | const GrCaps* caps() const { return fCaps.get(); } |
| 57 | sk_sp<const GrCaps> refCaps() const { return fCaps; } |
| 58 | |
| 59 | GrPathRendering* pathRendering() { return fPathRendering.get(); } |
| 60 | |
| 61 | virtual GrStagingBufferManager* stagingBufferManager() { return nullptr; } |
| 62 | |
| 63 | virtual GrRingBuffer* uniformsRingBuffer() { return nullptr; } |
| 64 | |
| 65 | enum class DisconnectType { |
| 66 | // No cleanup should be attempted, immediately cease making backend API calls |
| 67 | kAbandon, |
| 68 | // Free allocated resources (not known by GrResourceCache) before returning and |
| 69 | // ensure no backend backend 3D API calls will be made after disconnect() returns. |
| 70 | kCleanup, |
| 71 | }; |
| 72 | |
| 73 | // Called by context when the underlying backend context is already or will be destroyed |
| 74 | // before GrDirectContext. |
| 75 | virtual void disconnect(DisconnectType); |
| 76 | |
| 77 | // Called by GrDirectContext::isContextLost. Returns true if the backend Gpu object has gotten |
| 78 | // into an unrecoverable, lost state. |
| 79 | virtual bool isDeviceLost() const { return false; } |
| 80 | |
| 81 | /** |
| 82 | * The GrGpu object normally assumes that no outsider is setting state |
| 83 | * within the underlying 3D API's context/device/whatever. This call informs |
| 84 | * the GrGpu that the state was modified and it shouldn't make assumptions |
| 85 | * about the state. |
| 86 | */ |
| 87 | void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; } |
| 88 | |
| 89 | /** |
| 90 | * Creates a texture object. If renderable is kYes then the returned texture can |
| 91 | * be used as a render target by calling GrTexture::asRenderTarget(). Not all |
| 92 | * pixel configs can be used as render targets. Support for configs as textures |
| 93 | * or render targets can be checked using GrCaps. |
| 94 | * |
| 95 | * @param dimensions dimensions of the texture to be created. |
| 96 | * @param format the format for the texture (not currently used). |
| 97 | * @param renderable should the resulting texture be renderable |
| 98 | * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is |
| 99 | * kYes. If renderable is kNo then this must be 1. |
| 100 | * @param budgeted does this texture count against the resource cache budget? |
| 101 | * @param isProtected should the texture be created as protected. |
| 102 | * @param texels array of mipmap levels containing texel data to load. |
| 103 | * If level i has pixels then it is assumed that its dimensions are |
| 104 | * max(1, floor(dimensions.fWidth / 2)) by |
| 105 | * max(1, floor(dimensions.fHeight / 2)). |
| 106 | * If texels[i].fPixels == nullptr for all i <= mipLevelCount or |
| 107 | * mipLevelCount is 0 then the texture's contents are uninitialized. |
| 108 | * If a level has non-null pixels, its row bytes must be a multiple of the |
| 109 | * config's bytes-per-pixel. The row bytes must be tight to the |
| 110 | * level width if !caps->writePixelsRowBytesSupport(). |
| 111 | * If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0 |
| 112 | * then all levels must have non-null pixels. All levels must have |
| 113 | * non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true. |
| 114 | * @param textureColorType The color type interpretation of the texture for the purpose of |
| 115 | * of uploading texel data. |
| 116 | * @param srcColorType The color type of data in texels[]. |
| 117 | * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or |
| 118 | * floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It |
| 119 | * must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is |
| 120 | * true. |
| 121 | * @return The texture object if successful, otherwise nullptr. |
| 122 | */ |
| 123 | sk_sp<GrTexture> createTexture(SkISize dimensions, |
| 124 | const GrBackendFormat& format, |
| 125 | GrRenderable renderable, |
| 126 | int renderTargetSampleCnt, |
| 127 | SkBudgeted budgeted, |
| 128 | GrProtected isProtected, |
| 129 | GrColorType textureColorType, |
| 130 | GrColorType srcColorType, |
| 131 | const GrMipLevel texels[], |
| 132 | int texelLevelCount); |
| 133 | |
| 134 | /** |
| 135 | * Simplified createTexture() interface for when there is no initial texel data to upload. |
| 136 | */ |
| 137 | sk_sp<GrTexture> createTexture(SkISize dimensions, |
| 138 | const GrBackendFormat& format, |
| 139 | GrRenderable renderable, |
| 140 | int renderTargetSampleCnt, |
| 141 | GrMipmapped mipMapped, |
| 142 | SkBudgeted budgeted, |
| 143 | GrProtected isProtected); |
| 144 | |
| 145 | sk_sp<GrTexture> createCompressedTexture(SkISize dimensions, |
| 146 | const GrBackendFormat& format, |
| 147 | SkBudgeted budgeted, |
| 148 | GrMipmapped mipMapped, |
| 149 | GrProtected isProtected, |
| 150 | const void* data, size_t dataSize); |
| 151 | |
| 152 | /** |
| 153 | * Implements GrResourceProvider::wrapBackendTexture |
| 154 | */ |
| 155 | sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&, |
| 156 | GrWrapOwnership, |
| 157 | GrWrapCacheable, |
| 158 | GrIOType); |
| 159 | |
| 160 | sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&, |
| 161 | GrWrapOwnership, |
| 162 | GrWrapCacheable); |
| 163 | |
| 164 | /** |
| 165 | * Implements GrResourceProvider::wrapRenderableBackendTexture |
| 166 | */ |
| 167 | sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&, |
| 168 | int sampleCnt, |
| 169 | GrWrapOwnership, |
| 170 | GrWrapCacheable); |
| 171 | |
| 172 | /** |
| 173 | * Implements GrResourceProvider::wrapBackendRenderTarget |
| 174 | */ |
| 175 | sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&); |
| 176 | |
| 177 | /** |
| 178 | * Implements GrResourceProvider::wrapBackendTextureAsRenderTarget |
| 179 | */ |
| 180 | sk_sp<GrRenderTarget> wrapBackendTextureAsRenderTarget(const GrBackendTexture&, int sampleCnt); |
| 181 | |
| 182 | /** |
| 183 | * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget |
| 184 | */ |
| 185 | sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, |
| 186 | const GrVkDrawableInfo&); |
| 187 | |
| 188 | /** |
| 189 | * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked. |
| 190 | * |
| 191 | * @param size size of buffer to create. |
| 192 | * @param intendedType hint to the graphics subsystem about what the buffer will be used for. |
| 193 | * @param accessPattern hint to the graphics subsystem about how the data will be accessed. |
| 194 | * @param data optional data with which to initialize the buffer. |
| 195 | * |
| 196 | * @return the buffer if successful, otherwise nullptr. |
| 197 | */ |
| 198 | sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType, |
| 199 | GrAccessPattern accessPattern, const void* data = nullptr); |
| 200 | |
| 201 | /** |
| 202 | * Resolves MSAA. The resolveRect must already be in the native destination space. |
| 203 | */ |
| 204 | void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect); |
| 205 | |
| 206 | /** |
| 207 | * Uses the base of the texture to recompute the contents of the other levels. |
| 208 | */ |
| 209 | bool regenerateMipMapLevels(GrTexture*); |
| 210 | |
| 211 | /** |
| 212 | * If the backend API has stateful texture bindings, this resets them back to defaults. |
| 213 | */ |
| 214 | void resetTextureBindings(); |
| 215 | |
| 216 | /** |
| 217 | * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed. |
| 218 | * |
| 219 | * @param surface The surface to read from |
| 220 | * @param left left edge of the rectangle to read (inclusive) |
| 221 | * @param top top edge of the rectangle to read (inclusive) |
| 222 | * @param width width of rectangle to read in pixels. |
| 223 | * @param height height of rectangle to read in pixels. |
| 224 | * @param surfaceColorType the color type for this use of the surface. |
| 225 | * @param dstColorType the color type of the destination buffer. |
| 226 | * @param buffer memory to read the rectangle into. |
| 227 | * @param rowBytes the number of bytes between consecutive rows. Must be a multiple of |
| 228 | * dstColorType's bytes-per-pixel. Must be tight to width if |
| 229 | * !caps->readPixelsRowBytesSupport(). |
| 230 | * |
| 231 | * @return true if the read succeeded, false if not. The read can fail |
| 232 | * because of the surface doesn't support reading, the color type |
| 233 | * is not allowed for the format of the surface or if the rectangle |
| 234 | * read is not contained in the surface. |
| 235 | */ |
| 236 | bool readPixels(GrSurface* surface, int left, int top, int width, int height, |
| 237 | GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, |
| 238 | size_t rowBytes); |
| 239 | |
| 240 | /** |
| 241 | * Updates the pixels in a rectangle of a surface. No sRGB/linear conversions are performed. |
| 242 | * |
| 243 | * @param surface The surface to write to. |
| 244 | * @param left left edge of the rectangle to write (inclusive) |
| 245 | * @param top top edge of the rectangle to write (inclusive) |
| 246 | * @param width width of rectangle to write in pixels. |
| 247 | * @param height height of rectangle to write in pixels. |
| 248 | * @param surfaceColorType the color type for this use of the surface. |
| 249 | * @param srcColorType the color type of the source buffer. |
| 250 | * @param texels array of mipmap levels containing texture data. Row bytes must be a |
| 251 | * multiple of srcColorType's bytes-per-pixel. Must be tight to level |
| 252 | * width if !caps->writePixelsRowBytesSupport(). |
| 253 | * @param mipLevelCount number of levels in 'texels' |
| 254 | * @param prepForTexSampling After doing write pixels should the surface be prepared for texture |
| 255 | * sampling. This is currently only used by Vulkan for inline uploads |
| 256 | * to set that layout back to sampled after doing the upload. Inline |
| 257 | * uploads currently can happen between draws in a single op so it is |
| 258 | * not trivial to break up the GrOpsTask into two tasks when we see |
| 259 | * an inline upload. However, once we are able to support doing that |
| 260 | * we can remove this parameter. |
| 261 | * |
| 262 | * @return true if the write succeeded, false if not. The read can fail |
| 263 | * because of the surface doesn't support writing (e.g. read only), |
| 264 | * the color type is not allowed for the format of the surface or |
| 265 | * if the rectangle written is not contained in the surface. |
| 266 | */ |
| 267 | bool writePixels(GrSurface* surface, int left, int top, int width, int height, |
| 268 | GrColorType surfaceColorType, GrColorType srcColorType, |
| 269 | const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling = false); |
| 270 | |
| 271 | /** |
| 272 | * Helper for the case of a single level. |
| 273 | */ |
| 274 | bool writePixels(GrSurface* surface, int left, int top, int width, int height, |
| 275 | GrColorType surfaceColorType, GrColorType srcColorType, const void* buffer, |
| 276 | size_t rowBytes, bool prepForTexSampling = false) { |
| 277 | GrMipLevel mipLevel = {buffer, rowBytes}; |
| 278 | return this->writePixels(surface, left, top, width, height, surfaceColorType, srcColorType, |
| 279 | &mipLevel, 1, prepForTexSampling); |
| 280 | } |
| 281 | |
| 282 | /** |
| 283 | * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped, |
| 284 | * the base level is written to. |
| 285 | * |
| 286 | * @param texture The texture to write to. |
| 287 | * @param left left edge of the rectangle to write (inclusive) |
| 288 | * @param top top edge of the rectangle to write (inclusive) |
| 289 | * @param width width of rectangle to write in pixels. |
| 290 | * @param height height of rectangle to write in pixels. |
| 291 | * @param textureColorType the color type for this use of the surface. |
| 292 | * @param bufferColorType the color type of the transfer buffer's pixel data |
| 293 | * @param transferBuffer GrBuffer to read pixels from (type must be "kXferCpuToGpu") |
| 294 | * @param offset offset from the start of the buffer |
| 295 | * @param rowBytes number of bytes between consecutive rows in the buffer. Must be a |
| 296 | * multiple of bufferColorType's bytes-per-pixel. Must be tight to width |
| 297 | * if !caps->writePixelsRowBytesSupport(). |
| 298 | */ |
| 299 | bool transferPixelsTo(GrTexture* texture, int left, int top, int width, int height, |
| 300 | GrColorType textureColorType, GrColorType bufferColorType, |
| 301 | GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes); |
| 302 | |
| 303 | /** |
| 304 | * Reads the pixels from a rectangle of a surface into a buffer. Use |
| 305 | * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for |
| 306 | * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read. |
| 307 | * |
| 308 | * If successful the row bytes in the buffer is always: |
| 309 | * GrColorTypeBytesPerPixel(bufferColorType) * width |
| 310 | * |
| 311 | * Asserts that the caller has passed a properly aligned offset and that the buffer is |
| 312 | * large enough to hold the result |
| 313 | * |
| 314 | * @param surface The surface to read from. |
| 315 | * @param left left edge of the rectangle to read (inclusive) |
| 316 | * @param top top edge of the rectangle to read (inclusive) |
| 317 | * @param width width of rectangle to read in pixels. |
| 318 | * @param height height of rectangle to read in pixels. |
| 319 | * @param surfaceColorType the color type for this use of the surface. |
| 320 | * @param bufferColorType the color type of the transfer buffer's pixel data |
| 321 | * @param transferBuffer GrBuffer to write pixels to (type must be "kXferGpuToCpu") |
| 322 | * @param offset offset from the start of the buffer |
| 323 | */ |
| 324 | bool transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height, |
| 325 | GrColorType surfaceColorType, GrColorType bufferColorType, |
| 326 | GrGpuBuffer* transferBuffer, size_t offset); |
| 327 | |
| 328 | // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst |
| 329 | // take place at higher levels and this function implement faster copy paths. The rect |
| 330 | // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the |
| 331 | // src/dst bounds and non-empty. They must also be in their exact device space coords, including |
| 332 | // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true |
| 333 | // then we don't need to preserve any data on the dst surface outside of the copy. |
| 334 | bool copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
| 335 | const SkIPoint& dstPoint); |
| 336 | |
| 337 | // Queries the per-pixel HW sample locations for the given render target, and then finds or |
| 338 | // assigns a key that uniquely identifies the sample pattern. The actual sample locations can be |
| 339 | // retrieved with retrieveSampleLocations(). |
| 340 | int findOrAssignSamplePatternKey(GrRenderTarget*); |
| 341 | |
| 342 | // Retrieves the per-pixel HW sample locations for the given sample pattern key, and, as a |
| 343 | // by-product, the actual number of samples in use. (This may differ from the number of samples |
| 344 | // requested by the render target.) Sample locations are returned as 0..1 offsets relative to |
| 345 | // the top-left corner of the pixel. |
| 346 | const SkTArray<SkPoint>& retrieveSampleLocations(int samplePatternKey) const { |
| 347 | return fSamplePatternDictionary.retrieveSampleLocations(samplePatternKey); |
| 348 | } |
| 349 | |
| 350 | // Returns a GrOpsRenderPass which GrOpsTasks send draw commands to instead of directly |
| 351 | // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget. |
| 352 | // If a 'stencil' is provided it will be the one bound to 'renderTarget'. If one is not |
| 353 | // provided but 'renderTarget' has a stencil buffer then that is a signal that the |
| 354 | // render target's stencil buffer should be ignored. |
| 355 | virtual GrOpsRenderPass* getOpsRenderPass( |
| 356 | GrRenderTarget* renderTarget, |
| 357 | GrStencilAttachment* stencil, |
| 358 | GrSurfaceOrigin, |
| 359 | const SkIRect& bounds, |
| 360 | const GrOpsRenderPass::LoadAndStoreInfo&, |
| 361 | const GrOpsRenderPass::StencilLoadAndStoreInfo&, |
| 362 | const SkTArray<GrSurfaceProxy*, true>& sampledProxies) = 0; |
| 363 | |
| 364 | // Called by GrDrawingManager when flushing. |
| 365 | // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also |
| 366 | // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the |
| 367 | // inserted semaphores. |
| 368 | void executeFlushInfo(GrSurfaceProxy*[], int numProxies, |
| 369 | SkSurface::BackendSurfaceAccess access, |
| 370 | const GrFlushInfo&, |
| 371 | const GrBackendSurfaceMutableState* newState); |
| 372 | |
| 373 | bool submitToGpu(bool syncCpu); |
| 374 | |
| 375 | virtual void submit(GrOpsRenderPass*) = 0; |
| 376 | |
| 377 | virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0; |
| 378 | virtual bool waitFence(GrFence) = 0; |
| 379 | virtual void deleteFence(GrFence) const = 0; |
| 380 | |
| 381 | virtual std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore( |
| 382 | bool isOwned = true) = 0; |
| 383 | virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore, |
| 384 | GrResourceProvider::SemaphoreWrapType wrapType, GrWrapOwnership ownership) = 0; |
| 385 | virtual void insertSemaphore(GrSemaphore* semaphore) = 0; |
| 386 | virtual void waitSemaphore(GrSemaphore* semaphore) = 0; |
| 387 | |
| 388 | virtual void addFinishedProc(GrGpuFinishedProc finishedProc, |
| 389 | GrGpuFinishedContext finishedContext) = 0; |
| 390 | virtual void checkFinishProcs() = 0; |
| 391 | |
| 392 | virtual void takeOwnershipOfBuffer(sk_sp<GrGpuBuffer>) {} |
| 393 | |
| 394 | /** |
| 395 | * Checks if we detected an OOM from the underlying 3D API and if so returns true and resets |
| 396 | * the internal OOM state to false. Otherwise, returns false. |
| 397 | */ |
| 398 | bool checkAndResetOOMed(); |
| 399 | |
| 400 | /** |
| 401 | * Put this texture in a safe and known state for use across multiple contexts. Depending on |
| 402 | * the backend, this may return a GrSemaphore. If so, other contexts should wait on that |
| 403 | * semaphore before using this texture. |
| 404 | */ |
| 405 | virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0; |
| 406 | |
| 407 | /////////////////////////////////////////////////////////////////////////// |
| 408 | // Debugging and Stats |
| 409 | |
| 410 | class Stats { |
| 411 | public: |
| 412 | enum class ProgramCacheResult { |
| 413 | kHit, // the program was found in the cache |
| 414 | kMiss, // the program was not found in the cache (and was, thus, compiled) |
| 415 | kPartial, // a precompiled version was found in the persistent cache |
| 416 | |
| 417 | kLast = kPartial |
| 418 | }; |
| 419 | |
| 420 | static const int kNumProgramCacheResults = (int)ProgramCacheResult::kLast + 1; |
| 421 | |
| 422 | #if GR_GPU_STATS |
| 423 | Stats() = default; |
| 424 | |
| 425 | void reset() { *this = {}; } |
| 426 | |
| 427 | int renderTargetBinds() const { return fRenderTargetBinds; } |
| 428 | void incRenderTargetBinds() { fRenderTargetBinds++; } |
| 429 | |
| 430 | int shaderCompilations() const { return fShaderCompilations; } |
| 431 | void incShaderCompilations() { fShaderCompilations++; } |
| 432 | |
| 433 | int textureCreates() const { return fTextureCreates; } |
| 434 | void incTextureCreates() { fTextureCreates++; } |
| 435 | |
| 436 | int textureUploads() const { return fTextureUploads; } |
| 437 | void incTextureUploads() { fTextureUploads++; } |
| 438 | |
| 439 | int transfersToTexture() const { return fTransfersToTexture; } |
| 440 | void incTransfersToTexture() { fTransfersToTexture++; } |
| 441 | |
| 442 | int transfersFromSurface() const { return fTransfersFromSurface; } |
| 443 | void incTransfersFromSurface() { fTransfersFromSurface++; } |
| 444 | |
| 445 | int stencilAttachmentCreates() const { return fStencilAttachmentCreates; } |
| 446 | void incStencilAttachmentCreates() { fStencilAttachmentCreates++; } |
| 447 | |
| 448 | int numDraws() const { return fNumDraws; } |
| 449 | void incNumDraws() { fNumDraws++; } |
| 450 | |
| 451 | int numFailedDraws() const { return fNumFailedDraws; } |
| 452 | void incNumFailedDraws() { ++fNumFailedDraws; } |
| 453 | |
| 454 | int numSubmitToGpus() const { return fNumSubmitToGpus; } |
| 455 | void incNumSubmitToGpus() { ++fNumSubmitToGpus; } |
| 456 | |
| 457 | int numScratchTexturesReused() const { return fNumScratchTexturesReused; } |
| 458 | void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; } |
| 459 | |
| 460 | int numInlineCompilationFailures() const { return fNumInlineCompilationFailures; } |
| 461 | void incNumInlineCompilationFailures() { ++fNumInlineCompilationFailures; } |
| 462 | |
| 463 | int numInlineProgramCacheResult(ProgramCacheResult stat) const { |
| 464 | return fInlineProgramCacheStats[(int) stat]; |
| 465 | } |
| 466 | void incNumInlineProgramCacheResult(ProgramCacheResult stat) { |
| 467 | ++fInlineProgramCacheStats[(int) stat]; |
| 468 | } |
| 469 | |
| 470 | int numPreCompilationFailures() const { return fNumPreCompilationFailures; } |
| 471 | void incNumPreCompilationFailures() { ++fNumPreCompilationFailures; } |
| 472 | |
| 473 | int numPreProgramCacheResult(ProgramCacheResult stat) const { |
| 474 | return fPreProgramCacheStats[(int) stat]; |
| 475 | } |
| 476 | void incNumPreProgramCacheResult(ProgramCacheResult stat) { |
| 477 | ++fPreProgramCacheStats[(int) stat]; |
| 478 | } |
| 479 | |
| 480 | int numCompilationFailures() const { return fNumCompilationFailures; } |
| 481 | void incNumCompilationFailures() { ++fNumCompilationFailures; } |
| 482 | |
| 483 | int numPartialCompilationSuccesses() const { return fNumPartialCompilationSuccesses; } |
| 484 | void incNumPartialCompilationSuccesses() { ++fNumPartialCompilationSuccesses; } |
| 485 | |
| 486 | int numCompilationSuccesses() const { return fNumCompilationSuccesses; } |
| 487 | void incNumCompilationSuccesses() { ++fNumCompilationSuccesses; } |
| 488 | |
| 489 | #if GR_TEST_UTILS |
| 490 | void dump(SkString*); |
| 491 | void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values); |
| 492 | #endif |
| 493 | private: |
| 494 | int fRenderTargetBinds = 0; |
| 495 | int fShaderCompilations = 0; |
| 496 | int fTextureCreates = 0; |
| 497 | int fTextureUploads = 0; |
| 498 | int fTransfersToTexture = 0; |
| 499 | int fTransfersFromSurface = 0; |
| 500 | int fStencilAttachmentCreates = 0; |
| 501 | int fNumDraws = 0; |
| 502 | int fNumFailedDraws = 0; |
| 503 | int fNumSubmitToGpus = 0; |
| 504 | int fNumScratchTexturesReused = 0; |
| 505 | |
| 506 | int fNumInlineCompilationFailures = 0; |
| 507 | int fInlineProgramCacheStats[kNumProgramCacheResults] = { 0 }; |
| 508 | |
| 509 | int fNumPreCompilationFailures = 0; |
| 510 | int fPreProgramCacheStats[kNumProgramCacheResults] = { 0 }; |
| 511 | |
| 512 | int fNumCompilationFailures = 0; |
| 513 | int fNumPartialCompilationSuccesses = 0; |
| 514 | int fNumCompilationSuccesses = 0; |
| 515 | |
| 516 | #else |
| 517 | |
| 518 | #if GR_TEST_UTILS |
| 519 | void dump(SkString*) {} |
| 520 | void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {} |
| 521 | #endif |
| 522 | void incRenderTargetBinds() {} |
| 523 | void incShaderCompilations() {} |
| 524 | void incTextureCreates() {} |
| 525 | void incTextureUploads() {} |
| 526 | void incTransfersToTexture() {} |
| 527 | void incStencilAttachmentCreates() {} |
| 528 | void incNumDraws() {} |
| 529 | void incNumFailedDraws() {} |
| 530 | void incNumSubmitToGpus() {} |
| 531 | void incNumInlineCompilationFailures() {} |
| 532 | void incNumInlineProgramCacheResult(ProgramCacheResult stat) {} |
| 533 | void incNumPreCompilationFailures() {} |
| 534 | void incNumPreProgramCacheResult(ProgramCacheResult stat) {} |
| 535 | void incNumCompilationFailures() {} |
| 536 | void incNumPartialCompilationSuccesses() {} |
| 537 | void incNumCompilationSuccesses() {} |
| 538 | #endif |
| 539 | }; |
| 540 | |
| 541 | Stats* stats() { return &fStats; } |
| 542 | void dumpJSON(SkJSONWriter*) const; |
| 543 | |
| 544 | /** Used to initialize a backend texture with either a constant color, pixmaps or |
| 545 | * compressed data. |
| 546 | */ |
| 547 | class BackendTextureData { |
| 548 | public: |
| 549 | enum class Type { kColor, kPixmaps, kCompressed }; |
| 550 | BackendTextureData() = default; |
| 551 | BackendTextureData(const SkColor4f& color) : fType(Type::kColor), fColor(color) {} |
| 552 | BackendTextureData(const SkPixmap pixmaps[]) : fType(Type::kPixmaps), fPixmaps(pixmaps) { |
| 553 | SkASSERT(pixmaps); |
| 554 | } |
| 555 | BackendTextureData(const void* data, size_t size) : fType(Type::kCompressed) { |
| 556 | SkASSERT(data); |
| 557 | fCompressed.fData = data; |
| 558 | fCompressed.fSize = size; |
| 559 | } |
| 560 | |
| 561 | Type type() const { return fType; } |
| 562 | SkColor4f color() const { |
| 563 | SkASSERT(this->type() == Type::kColor); |
| 564 | return fColor; |
| 565 | } |
| 566 | |
| 567 | const SkPixmap& pixmap(int i) const { |
| 568 | SkASSERT(this->type() == Type::kPixmaps); |
| 569 | return fPixmaps[i]; |
| 570 | } |
| 571 | const SkPixmap* pixmaps() const { |
| 572 | SkASSERT(this->type() == Type::kPixmaps); |
| 573 | return fPixmaps; |
| 574 | } |
| 575 | |
| 576 | const void* compressedData() const { |
| 577 | SkASSERT(this->type() == Type::kCompressed); |
| 578 | return fCompressed.fData; |
| 579 | } |
| 580 | size_t compressedSize() const { |
| 581 | SkASSERT(this->type() == Type::kCompressed); |
| 582 | return fCompressed.fSize; |
| 583 | } |
| 584 | |
| 585 | |
| 586 | private: |
| 587 | Type fType = Type::kColor; |
| 588 | union { |
| 589 | SkColor4f fColor = {0, 0, 0, 0}; |
| 590 | const SkPixmap* fPixmaps; |
| 591 | struct { |
| 592 | const void* fData; |
| 593 | size_t fSize; |
| 594 | } fCompressed; |
| 595 | }; |
| 596 | }; |
| 597 | |
| 598 | /** |
| 599 | * Creates a texture directly in the backend API without wrapping it in a GrTexture. |
| 600 | * Must be matched with a call to deleteBackendTexture(). |
| 601 | * |
| 602 | * If data is null the texture is uninitialized. |
| 603 | * |
| 604 | * If data represents a color then all texture levels are cleared to that color. |
| 605 | * |
| 606 | * If data represents pixmaps then it must have a either one pixmap or, if mipmapping |
| 607 | * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip |
| 608 | * levels must be sized correctly according to the MIP sizes implied by dimensions. They |
| 609 | * must all have the same color type and that color type must be compatible with the |
| 610 | * texture format. |
| 611 | */ |
| 612 | GrBackendTexture createBackendTexture(SkISize dimensions, |
| 613 | const GrBackendFormat&, |
| 614 | GrRenderable, |
| 615 | GrMipmapped, |
| 616 | GrProtected); |
| 617 | |
| 618 | bool updateBackendTexture(const GrBackendTexture&, |
| 619 | sk_sp<GrRefCntedCallback> finishedCallback, |
| 620 | const BackendTextureData*); |
| 621 | |
| 622 | /** |
| 623 | * Same as the createBackendTexture case except compressed backend textures can |
| 624 | * never be renderable. |
| 625 | */ |
| 626 | GrBackendTexture createCompressedBackendTexture(SkISize dimensions, |
| 627 | const GrBackendFormat&, |
| 628 | GrMipmapped, |
| 629 | GrProtected); |
| 630 | |
| 631 | bool updateCompressedBackendTexture(const GrBackendTexture&, |
| 632 | sk_sp<GrRefCntedCallback> finishedCallback, |
| 633 | const BackendTextureData*); |
| 634 | |
| 635 | virtual bool setBackendTextureState(const GrBackendTexture&, |
| 636 | const GrBackendSurfaceMutableState&, |
| 637 | sk_sp<GrRefCntedCallback> finishedCallback) { |
| 638 | return false; |
| 639 | } |
| 640 | |
| 641 | virtual bool setBackendRenderTargetState(const GrBackendRenderTarget&, |
| 642 | const GrBackendSurfaceMutableState&, |
| 643 | sk_sp<GrRefCntedCallback> finishedCallback) { |
| 644 | return false; |
| 645 | } |
| 646 | |
| 647 | /** |
| 648 | * Frees a texture created by createBackendTexture(). If ownership of the backend |
| 649 | * texture has been transferred to a context using adopt semantics this should not be called. |
| 650 | */ |
| 651 | virtual void deleteBackendTexture(const GrBackendTexture&) = 0; |
| 652 | |
| 653 | /** |
| 654 | * In this case we have a program descriptor and a program info but no render target. |
| 655 | */ |
| 656 | virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0; |
| 657 | |
| 658 | virtual bool precompileShader(const SkData& key, const SkData& data) { return false; } |
| 659 | |
| 660 | #if GR_TEST_UTILS |
| 661 | /** Check a handle represents an actual texture in the backend API that has not been freed. */ |
| 662 | virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0; |
| 663 | |
| 664 | virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h, |
| 665 | GrColorType) = 0; |
| 666 | |
| 667 | virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0; |
| 668 | |
| 669 | // This is only to be used in GL-specific tests. |
| 670 | virtual const GrGLContext* glContextForTesting() const { return nullptr; } |
| 671 | |
| 672 | // This is only to be used by testing code |
| 673 | virtual void resetShaderCacheForTesting() const {} |
| 674 | |
| 675 | /** |
| 676 | * Flushes all work to the gpu and forces the GPU to wait until all the gpu work has completed. |
| 677 | * This is for testing purposes only. |
| 678 | */ |
| 679 | virtual void testingOnly_flushGpuAndSync() = 0; |
| 680 | |
| 681 | /** |
| 682 | * Inserted as a pair around a block of code to do a GPU frame capture. |
| 683 | * Currently only works with the Metal backend. |
| 684 | */ |
| 685 | virtual void testingOnly_startCapture() {} |
| 686 | virtual void testingOnly_endCapture() {} |
| 687 | #endif |
| 688 | |
| 689 | // width and height may be larger than rt (if underlying API allows it). |
| 690 | // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on |
| 691 | // the GrStencilAttachment. |
| 692 | virtual GrStencilAttachment* createStencilAttachmentForRenderTarget( |
| 693 | const GrRenderTarget*, int width, int height, int numStencilSamples) = 0; |
| 694 | |
| 695 | void handleDirtyContext() { |
| 696 | if (fResetBits) { |
| 697 | this->resetContext(); |
| 698 | } |
| 699 | } |
| 700 | |
| 701 | virtual void storeVkPipelineCacheData() {} |
| 702 | |
| 703 | // http://skbug.com/9739 |
| 704 | virtual void insertManualFramebufferBarrier() { |
| 705 | SkASSERT(!this->caps()->requiresManualFBBarrierAfterTessellatedStencilDraw()); |
| 706 | SK_ABORT("Manual framebuffer barrier not supported." ); |
| 707 | } |
| 708 | |
| 709 | // Called before certain draws in order to guarantee coherent results from dst reads. |
| 710 | virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0; |
| 711 | |
| 712 | protected: |
| 713 | static bool MipMapsAreCorrect(SkISize dimensions, GrMipmapped, const BackendTextureData*); |
| 714 | static bool CompressedDataIsCorrect(SkISize dimensions, SkImage::CompressionType, |
| 715 | GrMipmapped, const BackendTextureData*); |
| 716 | |
| 717 | // Handles cases where a surface will be updated without a call to flushRenderTarget. |
| 718 | void didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds, |
| 719 | uint32_t mipLevels = 1) const; |
| 720 | |
| 721 | void setOOMed() { fOOMed = true; } |
| 722 | |
| 723 | Stats fStats; |
| 724 | std::unique_ptr<GrPathRendering> fPathRendering; |
| 725 | // Subclass must initialize this in its constructor. |
| 726 | sk_sp<const GrCaps> fCaps; |
| 727 | |
| 728 | private: |
| 729 | virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions, |
| 730 | const GrBackendFormat&, |
| 731 | GrRenderable, |
| 732 | GrMipmapped, |
| 733 | GrProtected) = 0; |
| 734 | |
| 735 | virtual GrBackendTexture onCreateCompressedBackendTexture( |
| 736 | SkISize dimensions, const GrBackendFormat&, GrMipmapped, GrProtected) = 0; |
| 737 | |
| 738 | virtual bool onUpdateBackendTexture(const GrBackendTexture&, |
| 739 | sk_sp<GrRefCntedCallback> finishedCallback, |
| 740 | const BackendTextureData*) = 0; |
| 741 | |
| 742 | virtual bool onUpdateCompressedBackendTexture(const GrBackendTexture&, |
| 743 | sk_sp<GrRefCntedCallback> finishedCallback, |
| 744 | const BackendTextureData*) = 0; |
| 745 | |
| 746 | // called when the 3D context state is unknown. Subclass should emit any |
| 747 | // assumed 3D context state and dirty any state cache. |
| 748 | virtual void onResetContext(uint32_t resetBits) = 0; |
| 749 | |
| 750 | // Implementation of resetTextureBindings. |
| 751 | virtual void onResetTextureBindings() {} |
| 752 | |
| 753 | // Queries the effective number of samples in use by the hardware for the given render target, |
| 754 | // and queries the individual sample locations. |
| 755 | virtual void querySampleLocations(GrRenderTarget*, SkTArray<SkPoint>*) = 0; |
| 756 | |
| 757 | // overridden by backend-specific derived class to create objects. |
| 758 | // Texture size, renderablility, format support, sample count will have already been validated |
| 759 | // in base class before onCreateTexture is called. |
| 760 | // If the ith bit is set in levelClearMask then the ith MIP level should be cleared. |
| 761 | virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions, |
| 762 | const GrBackendFormat&, |
| 763 | GrRenderable, |
| 764 | int renderTargetSampleCnt, |
| 765 | SkBudgeted, |
| 766 | GrProtected, |
| 767 | int mipLevelCoont, |
| 768 | uint32_t levelClearMask) = 0; |
| 769 | virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions, |
| 770 | const GrBackendFormat&, |
| 771 | SkBudgeted, |
| 772 | GrMipmapped, |
| 773 | GrProtected, |
| 774 | const void* data, size_t dataSize) = 0; |
| 775 | virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, |
| 776 | GrWrapOwnership, |
| 777 | GrWrapCacheable, |
| 778 | GrIOType) = 0; |
| 779 | |
| 780 | virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&, |
| 781 | GrWrapOwnership, |
| 782 | GrWrapCacheable) = 0; |
| 783 | |
| 784 | virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&, |
| 785 | int sampleCnt, |
| 786 | GrWrapOwnership, |
| 787 | GrWrapCacheable) = 0; |
| 788 | virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0; |
| 789 | virtual sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&, |
| 790 | int sampleCnt) = 0; |
| 791 | virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, |
| 792 | const GrVkDrawableInfo&); |
| 793 | |
| 794 | virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, |
| 795 | GrAccessPattern, const void* data) = 0; |
| 796 | |
| 797 | // overridden by backend-specific derived class to perform the surface read |
| 798 | virtual bool onReadPixels(GrSurface*, int left, int top, int width, int height, |
| 799 | GrColorType surfaceColorType, GrColorType dstColorType, void* buffer, |
| 800 | size_t rowBytes) = 0; |
| 801 | |
| 802 | // overridden by backend-specific derived class to perform the surface write |
| 803 | virtual bool onWritePixels(GrSurface*, int left, int top, int width, int height, |
| 804 | GrColorType surfaceColorType, GrColorType srcColorType, |
| 805 | const GrMipLevel texels[], int mipLevelCount, |
| 806 | bool prepForTexSampling) = 0; |
| 807 | |
| 808 | // overridden by backend-specific derived class to perform the texture transfer |
| 809 | virtual bool onTransferPixelsTo(GrTexture*, int left, int top, int width, int height, |
| 810 | GrColorType textiueColorType, GrColorType bufferColorType, |
| 811 | GrGpuBuffer* transferBuffer, size_t offset, |
| 812 | size_t rowBytes) = 0; |
| 813 | // overridden by backend-specific derived class to perform the surface transfer |
| 814 | virtual bool onTransferPixelsFrom(GrSurface*, int left, int top, int width, int height, |
| 815 | GrColorType surfaceColorType, GrColorType bufferColorType, |
| 816 | GrGpuBuffer* transferBuffer, size_t offset) = 0; |
| 817 | |
| 818 | // overridden by backend-specific derived class to perform the resolve |
| 819 | virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect) = 0; |
| 820 | |
| 821 | // overridden by backend specific derived class to perform mip map level regeneration. |
| 822 | virtual bool onRegenerateMipMapLevels(GrTexture*) = 0; |
| 823 | |
| 824 | // overridden by backend specific derived class to perform the copy surface |
| 825 | virtual bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect, |
| 826 | const SkIPoint& dstPoint) = 0; |
| 827 | |
| 828 | virtual void prepareSurfacesForBackendAccessAndStateUpdates( |
| 829 | GrSurfaceProxy* proxies[], |
| 830 | int numProxies, |
| 831 | SkSurface::BackendSurfaceAccess access, |
| 832 | const GrBackendSurfaceMutableState* newState) {} |
| 833 | |
| 834 | virtual bool onSubmitToGpu(bool syncCpu) = 0; |
| 835 | |
| 836 | #ifdef SK_ENABLE_DUMP_GPU |
| 837 | virtual void onDumpJSON(SkJSONWriter*) const {} |
| 838 | #endif |
| 839 | |
| 840 | sk_sp<GrTexture> createTextureCommon(SkISize, |
| 841 | const GrBackendFormat&, |
| 842 | GrRenderable, |
| 843 | int renderTargetSampleCnt, |
| 844 | SkBudgeted, |
| 845 | GrProtected, |
| 846 | int mipLevelCnt, |
| 847 | uint32_t levelClearMask); |
| 848 | |
| 849 | void resetContext() { |
| 850 | this->onResetContext(fResetBits); |
| 851 | fResetBits = 0; |
| 852 | } |
| 853 | |
| 854 | void callSubmittedProcs(bool success); |
| 855 | |
| 856 | uint32_t fResetBits; |
| 857 | // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu. |
| 858 | GrDirectContext* fContext; |
| 859 | GrSamplePatternDictionary fSamplePatternDictionary; |
| 860 | |
| 861 | struct SubmittedProc { |
| 862 | SubmittedProc(GrGpuSubmittedProc proc, GrGpuSubmittedContext context) |
| 863 | : fProc(proc), fContext(context) {} |
| 864 | |
| 865 | GrGpuSubmittedProc fProc; |
| 866 | GrGpuSubmittedContext fContext; |
| 867 | }; |
| 868 | SkSTArray<4, SubmittedProc> fSubmittedProcs; |
| 869 | |
| 870 | bool fOOMed = false; |
| 871 | |
| 872 | friend class GrPathRendering; |
| 873 | typedef SkRefCnt INHERITED; |
| 874 | }; |
| 875 | |
| 876 | #endif |
| 877 | |