1/*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrGpu_DEFINED
9#define GrGpu_DEFINED
10
11#include "include/core/SkPath.h"
12#include "include/core/SkSurface.h"
13#include "include/gpu/GrTypes.h"
14#include "include/private/SkTArray.h"
15#include "src/core/SkTInternalLList.h"
16#include "src/gpu/GrCaps.h"
17#include "src/gpu/GrOpsRenderPass.h"
18#include "src/gpu/GrSamplePatternDictionary.h"
19#include "src/gpu/GrStagingBuffer.h"
20#include "src/gpu/GrSwizzle.h"
21#include "src/gpu/GrTextureProducer.h"
22#include "src/gpu/GrXferProcessor.h"
23#include <map>
24
25class GrBackendRenderTarget;
26class GrBackendSemaphore;
27class GrGpuBuffer;
28class GrContext;
29struct GrContextOptions;
30class GrGLContext;
31class GrPath;
32class GrPathRenderer;
33class GrPathRendererChain;
34class GrPathRendering;
35class GrPipeline;
36class GrPrimitiveProcessor;
37class GrRenderTarget;
38class GrSemaphore;
39class GrStencilAttachment;
40class GrStencilSettings;
41class GrSurface;
42class GrTexture;
43class SkJSONWriter;
44
45class GrGpu : public SkRefCnt {
46public:
47 GrGpu(GrContext* context);
48 ~GrGpu() override;
49
50 GrContext* getContext() { return fContext; }
51 const GrContext* getContext() const { return fContext; }
52
53 /**
54 * Gets the capabilities of the draw target.
55 */
56 const GrCaps* caps() const { return fCaps.get(); }
57 sk_sp<const GrCaps> refCaps() const { return fCaps; }
58
59 GrPathRendering* pathRendering() { return fPathRendering.get(); }
60
61 enum class DisconnectType {
62 // No cleanup should be attempted, immediately cease making backend API calls
63 kAbandon,
64 // Free allocated resources (not known by GrResourceCache) before returning and
65 // ensure no backend backend 3D API calls will be made after disconnect() returns.
66 kCleanup,
67 };
68
69 // Called by GrContext when the underlying backend context is already or will be destroyed
70 // before GrContext.
71 virtual void disconnect(DisconnectType);
72
73 // Called by GrContext::isContextLost. Returns true if the backend Gpu object has gotten into an
74 // unrecoverable, lost state.
75 virtual bool isDeviceLost() const { return false; }
76
77 /**
78 * The GrGpu object normally assumes that no outsider is setting state
79 * within the underlying 3D API's context/device/whatever. This call informs
80 * the GrGpu that the state was modified and it shouldn't make assumptions
81 * about the state.
82 */
83 void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; }
84
85 /**
86 * Creates a texture object. If renderable is kYes then the returned texture can
87 * be used as a render target by calling GrTexture::asRenderTarget(). Not all
88 * pixel configs can be used as render targets. Support for configs as textures
89 * or render targets can be checked using GrCaps.
90 *
91 * @param dimensions dimensions of the texture to be created.
92 * @param format the format for the texture (not currently used).
93 * @param renderable should the resulting texture be renderable
94 * @param renderTargetSampleCnt The number of samples to use for rendering if renderable is
95 * kYes. If renderable is kNo then this must be 1.
96 * @param budgeted does this texture count against the resource cache budget?
97 * @param isProtected should the texture be created as protected.
98 * @param texels array of mipmap levels containing texel data to load.
99 * If level i has pixels then it is assumed that its dimensions are
100 * max(1, floor(dimensions.fWidth / 2)) by
101 * max(1, floor(dimensions.fHeight / 2)).
102 * If texels[i].fPixels == nullptr for all i <= mipLevelCount or
103 * mipLevelCount is 0 then the texture's contents are uninitialized.
104 * If a level has non-null pixels, its row bytes must be a multiple of the
105 * config's bytes-per-pixel. The row bytes must be tight to the
106 * level width if !caps->writePixelsRowBytesSupport().
107 * If mipLevelCount > 1 and texels[i].fPixels != nullptr for any i > 0
108 * then all levels must have non-null pixels. All levels must have
109 * non-null pixels if GrCaps::createTextureMustSpecifyAllLevels() is true.
110 * @param textureColorType The color type interpretation of the texture for the purpose of
111 * of uploading texel data.
112 * @param srcColorType The color type of data in texels[].
113 * @param texelLevelCount the number of levels in 'texels'. May be 0, 1, or
114 * floor(max((log2(dimensions.fWidth), log2(dimensions.fHeight)))). It
115 * must be the latter if GrCaps::createTextureMustSpecifyAllLevels() is
116 * true.
117 * @return The texture object if successful, otherwise nullptr.
118 */
119 sk_sp<GrTexture> createTexture(SkISize dimensions,
120 const GrBackendFormat& format,
121 GrRenderable renderable,
122 int renderTargetSampleCnt,
123 SkBudgeted budgeted,
124 GrProtected isProtected,
125 GrColorType textureColorType,
126 GrColorType srcColorType,
127 const GrMipLevel texels[],
128 int texelLevelCount);
129
130 /**
131 * Simplified createTexture() interface for when there is no initial texel data to upload.
132 */
133 sk_sp<GrTexture> createTexture(SkISize dimensions,
134 const GrBackendFormat& format,
135 GrRenderable renderable,
136 int renderTargetSampleCnt,
137 GrMipMapped mipMapped,
138 SkBudgeted budgeted,
139 GrProtected isProtected);
140
141 sk_sp<GrTexture> createCompressedTexture(SkISize dimensions,
142 const GrBackendFormat& format,
143 SkBudgeted budgeted,
144 GrMipMapped mipMapped,
145 GrProtected isProtected,
146 const void* data, size_t dataSize);
147
148 /**
149 * Implements GrResourceProvider::wrapBackendTexture
150 */
151 sk_sp<GrTexture> wrapBackendTexture(const GrBackendTexture&,
152 GrWrapOwnership,
153 GrWrapCacheable,
154 GrIOType);
155
156 sk_sp<GrTexture> wrapCompressedBackendTexture(const GrBackendTexture&,
157 GrWrapOwnership,
158 GrWrapCacheable);
159
160 /**
161 * Implements GrResourceProvider::wrapRenderableBackendTexture
162 */
163 sk_sp<GrTexture> wrapRenderableBackendTexture(const GrBackendTexture&,
164 int sampleCnt,
165 GrWrapOwnership,
166 GrWrapCacheable);
167
168 /**
169 * Implements GrResourceProvider::wrapBackendRenderTarget
170 */
171 sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTarget&);
172
173 /**
174 * Implements GrResourceProvider::wrapBackendTextureAsRenderTarget
175 */
176 sk_sp<GrRenderTarget> wrapBackendTextureAsRenderTarget(const GrBackendTexture&, int sampleCnt);
177
178 /**
179 * Implements GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget
180 */
181 sk_sp<GrRenderTarget> wrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
182 const GrVkDrawableInfo&);
183
184 /**
185 * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked.
186 *
187 * @param size size of buffer to create.
188 * @param intendedType hint to the graphics subsystem about what the buffer will be used for.
189 * @param accessPattern hint to the graphics subsystem about how the data will be accessed.
190 * @param data optional data with which to initialize the buffer.
191 *
192 * @return the buffer if successful, otherwise nullptr.
193 */
194 sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType,
195 GrAccessPattern accessPattern, const void* data = nullptr);
196
197 enum class ForExternalIO : bool {
198 kYes = true,
199 kNo = false
200 };
201
202 /**
203 * Resolves MSAA. The resolveRect must already be in the native destination space.
204 */
205 void resolveRenderTarget(GrRenderTarget*, const SkIRect& resolveRect, ForExternalIO);
206
207 /**
208 * Uses the base of the texture to recompute the contents of the other levels.
209 */
210 bool regenerateMipMapLevels(GrTexture*);
211
212 /**
213 * If the backend API has stateful texture bindings, this resets them back to defaults.
214 */
215 void resetTextureBindings();
216
217 /**
218 * Reads a rectangle of pixels from a render target. No sRGB/linear conversions are performed.
219 *
220 * @param surface The surface to read from
221 * @param left left edge of the rectangle to read (inclusive)
222 * @param top top edge of the rectangle to read (inclusive)
223 * @param width width of rectangle to read in pixels.
224 * @param height height of rectangle to read in pixels.
225 * @param surfaceColorType the color type for this use of the surface.
226 * @param dstColorType the color type of the destination buffer.
227 * @param buffer memory to read the rectangle into.
228 * @param rowBytes the number of bytes between consecutive rows. Must be a multiple of
229 * dstColorType's bytes-per-pixel. Must be tight to width if
230 * !caps->readPixelsRowBytesSupport().
231 *
232 * @return true if the read succeeded, false if not. The read can fail
233 * because of the surface doesn't support reading, the color type
234 * is not allowed for the format of the surface or if the rectangle
235 * read is not contained in the surface.
236 */
237 bool readPixels(GrSurface* surface, int left, int top, int width, int height,
238 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
239 size_t rowBytes);
240
241 /**
242 * Updates the pixels in a rectangle of a surface. No sRGB/linear conversions are performed.
243 *
244 * @param surface The surface to write to.
245 * @param left left edge of the rectangle to write (inclusive)
246 * @param top top edge of the rectangle to write (inclusive)
247 * @param width width of rectangle to write in pixels.
248 * @param height height of rectangle to write in pixels.
249 * @param surfaceColorType the color type for this use of the surface.
250 * @param srcColorType the color type of the source buffer.
251 * @param texels array of mipmap levels containing texture data. Row bytes must be a
252 * multiple of srcColorType's bytes-per-pixel. Must be tight to level
253 * width if !caps->writePixelsRowBytesSupport().
254 * @param mipLevelCount number of levels in 'texels'
255 * @param prepForTexSampling After doing write pixels should the surface be prepared for texture
256 * sampling. This is currently only used by Vulkan for inline uploads
257 * to set that layout back to sampled after doing the upload. Inline
258 * uploads currently can happen between draws in a single op so it is
259 * not trivial to break up the GrOpsTask into two tasks when we see
260 * an inline upload. However, once we are able to support doing that
261 * we can remove this parameter.
262 *
263 * @return true if the write succeeded, false if not. The read can fail
264 * because of the surface doesn't support writing (e.g. read only),
265 * the color type is not allowed for the format of the surface or
266 * if the rectangle written is not contained in the surface.
267 */
268 bool writePixels(GrSurface* surface, int left, int top, int width, int height,
269 GrColorType surfaceColorType, GrColorType srcColorType,
270 const GrMipLevel texels[], int mipLevelCount, bool prepForTexSampling = false);
271
272 /**
273 * Helper for the case of a single level.
274 */
275 bool writePixels(GrSurface* surface, int left, int top, int width, int height,
276 GrColorType surfaceColorType, GrColorType srcColorType, const void* buffer,
277 size_t rowBytes, bool prepForTexSampling = false) {
278 GrMipLevel mipLevel = {buffer, rowBytes};
279 return this->writePixels(surface, left, top, width, height, surfaceColorType, srcColorType,
280 &mipLevel, 1, prepForTexSampling);
281 }
282
283 /**
284 * Updates the pixels in a rectangle of a texture using a buffer. If the texture is MIP mapped,
285 * the base level is written to.
286 *
287 * @param texture The texture to write to.
288 * @param left left edge of the rectangle to write (inclusive)
289 * @param top top edge of the rectangle to write (inclusive)
290 * @param width width of rectangle to write in pixels.
291 * @param height height of rectangle to write in pixels.
292 * @param textureColorType the color type for this use of the surface.
293 * @param bufferColorType the color type of the transfer buffer's pixel data
294 * @param transferBuffer GrBuffer to read pixels from (type must be "kXferCpuToGpu")
295 * @param offset offset from the start of the buffer
296 * @param rowBytes number of bytes between consecutive rows in the buffer. Must be a
297 * multiple of bufferColorType's bytes-per-pixel. Must be tight to width
298 * if !caps->writePixelsRowBytesSupport().
299 */
300 bool transferPixelsTo(GrTexture* texture, int left, int top, int width, int height,
301 GrColorType textureColorType, GrColorType bufferColorType,
302 GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes);
303
304 /**
305 * Reads the pixels from a rectangle of a surface into a buffer. Use
306 * GrCaps::SupportedRead::fOffsetAlignmentForTransferBuffer to determine the requirements for
307 * the buffer offset alignment. If the surface is a MIP mapped texture, the base level is read.
308 *
309 * If successful the row bytes in the buffer is always:
310 * GrColorTypeBytesPerPixel(bufferColorType) * width
311 *
312 * Asserts that the caller has passed a properly aligned offset and that the buffer is
313 * large enough to hold the result
314 *
315 * @param surface The surface to read from.
316 * @param left left edge of the rectangle to read (inclusive)
317 * @param top top edge of the rectangle to read (inclusive)
318 * @param width width of rectangle to read in pixels.
319 * @param height height of rectangle to read in pixels.
320 * @param surfaceColorType the color type for this use of the surface.
321 * @param bufferColorType the color type of the transfer buffer's pixel data
322 * @param transferBuffer GrBuffer to write pixels to (type must be "kXferGpuToCpu")
323 * @param offset offset from the start of the buffer
324 */
325 bool transferPixelsFrom(GrSurface* surface, int left, int top, int width, int height,
326 GrColorType surfaceColorType, GrColorType bufferColorType,
327 GrGpuBuffer* transferBuffer, size_t offset);
328
329 // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst
330 // take place at higher levels and this function implement faster copy paths. The rect
331 // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the
332 // src/dst bounds and non-empty. They must also be in their exact device space coords, including
333 // already being transformed for origin if need be. If canDiscardOutsideDstRect is set to true
334 // then we don't need to preserve any data on the dst surface outside of the copy.
335 bool copySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
336 const SkIPoint& dstPoint);
337
338 // Queries the per-pixel HW sample locations for the given render target, and then finds or
339 // assigns a key that uniquely identifies the sample pattern. The actual sample locations can be
340 // retrieved with retrieveSampleLocations().
341 int findOrAssignSamplePatternKey(GrRenderTarget*);
342
343 // Retrieves the per-pixel HW sample locations for the given sample pattern key, and, as a
344 // by-product, the actual number of samples in use. (This may differ from the number of samples
345 // requested by the render target.) Sample locations are returned as 0..1 offsets relative to
346 // the top-left corner of the pixel.
347 const SkTArray<SkPoint>& retrieveSampleLocations(int samplePatternKey) const {
348 return fSamplePatternDictionary.retrieveSampleLocations(samplePatternKey);
349 }
350
351 // Returns a GrOpsRenderPass which GrOpsTasks send draw commands to instead of directly
352 // to the Gpu object. The 'bounds' rect is the content rect of the renderTarget.
353 virtual GrOpsRenderPass* getOpsRenderPass(
354 GrRenderTarget* renderTarget, GrSurfaceOrigin, const SkIRect& bounds,
355 const GrOpsRenderPass::LoadAndStoreInfo&,
356 const GrOpsRenderPass::StencilLoadAndStoreInfo&,
357 const SkTArray<GrSurfaceProxy*, true>& sampledProxies) = 0;
358
359 // Called by GrDrawingManager when flushing.
360 // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). This will also
361 // insert any numSemaphore semaphores on the gpu and set the backendSemaphores to match the
362 // inserted semaphores.
363 void executeFlushInfo(GrSurfaceProxy*[], int numProxies,
364 SkSurface::BackendSurfaceAccess access, const GrFlushInfo&,
365 const GrPrepareForExternalIORequests&);
366
367 bool submitToGpu(bool syncCpu);
368
369 virtual void submit(GrOpsRenderPass*) = 0;
370
371 virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0;
372 virtual bool waitFence(GrFence, uint64_t timeout = 1000) = 0;
373 virtual void deleteFence(GrFence) const = 0;
374
375 virtual std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(
376 bool isOwned = true) = 0;
377 virtual std::unique_ptr<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
378 GrResourceProvider::SemaphoreWrapType wrapType, GrWrapOwnership ownership) = 0;
379 virtual void insertSemaphore(GrSemaphore* semaphore) = 0;
380 virtual void waitSemaphore(GrSemaphore* semaphore) = 0;
381
382 virtual void checkFinishProcs() = 0;
383
384 /**
385 * Put this texture in a safe and known state for use across multiple GrContexts. Depending on
386 * the backend, this may return a GrSemaphore. If so, other contexts should wait on that
387 * semaphore before using this texture.
388 */
389 virtual std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) = 0;
390
391 ///////////////////////////////////////////////////////////////////////////
392 // Debugging and Stats
393
394 class Stats {
395 public:
396 enum class ProgramCacheResult {
397 kHit, // the program was found in the cache
398 kMiss, // the program was not found in the cache (and was, thus, compiled)
399 kPartial, // a precompiled version was found in the persistent cache
400
401 kLast = kPartial
402 };
403
404 static const int kNumProgramCacheResults = (int)ProgramCacheResult::kLast + 1;
405
406#if GR_GPU_STATS
407 Stats() = default;
408
409 void reset() { *this = {}; }
410
411 int renderTargetBinds() const { return fRenderTargetBinds; }
412 void incRenderTargetBinds() { fRenderTargetBinds++; }
413
414 int shaderCompilations() const { return fShaderCompilations; }
415 void incShaderCompilations() { fShaderCompilations++; }
416
417 int textureCreates() const { return fTextureCreates; }
418 void incTextureCreates() { fTextureCreates++; }
419
420 int textureUploads() const { return fTextureUploads; }
421 void incTextureUploads() { fTextureUploads++; }
422
423 int transfersToTexture() const { return fTransfersToTexture; }
424 void incTransfersToTexture() { fTransfersToTexture++; }
425
426 int transfersFromSurface() const { return fTransfersFromSurface; }
427 void incTransfersFromSurface() { fTransfersFromSurface++; }
428
429 int stencilAttachmentCreates() const { return fStencilAttachmentCreates; }
430 void incStencilAttachmentCreates() { fStencilAttachmentCreates++; }
431
432 int numDraws() const { return fNumDraws; }
433 void incNumDraws() { fNumDraws++; }
434
435 int numFailedDraws() const { return fNumFailedDraws; }
436 void incNumFailedDraws() { ++fNumFailedDraws; }
437
438 int numSubmitToGpus() const { return fNumSubmitToGpus; }
439 void incNumSubmitToGpus() { ++fNumSubmitToGpus; }
440
441 int numScratchTexturesReused() const { return fNumScratchTexturesReused; }
442 void incNumScratchTexturesReused() { ++fNumScratchTexturesReused; }
443
444 int numInlineCompilationFailures() const { return fNumInlineCompilationFailures; }
445 void incNumInlineCompilationFailures() { ++fNumInlineCompilationFailures; }
446
447 int numInlineProgramCacheResult(ProgramCacheResult stat) const {
448 return fInlineProgramCacheStats[(int) stat];
449 }
450 void incNumInlineProgramCacheResult(ProgramCacheResult stat) {
451 ++fInlineProgramCacheStats[(int) stat];
452 }
453
454 int numPreCompilationFailures() const { return fNumPreCompilationFailures; }
455 void incNumPreCompilationFailures() { ++fNumPreCompilationFailures; }
456
457 int numPreProgramCacheResult(ProgramCacheResult stat) const {
458 return fPreProgramCacheStats[(int) stat];
459 }
460 void incNumPreProgramCacheResult(ProgramCacheResult stat) {
461 ++fPreProgramCacheStats[(int) stat];
462 }
463
464 int numCompilationFailures() const { return fNumCompilationFailures; }
465 void incNumCompilationFailures() { ++fNumCompilationFailures; }
466
467 int numPartialCompilationSuccesses() const { return fNumPartialCompilationSuccesses; }
468 void incNumPartialCompilationSuccesses() { ++fNumPartialCompilationSuccesses; }
469
470 int numCompilationSuccesses() const { return fNumCompilationSuccesses; }
471 void incNumCompilationSuccesses() { ++fNumCompilationSuccesses; }
472
473#if GR_TEST_UTILS
474 void dump(SkString*);
475 void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values);
476#endif
477 private:
478 int fRenderTargetBinds = 0;
479 int fShaderCompilations = 0;
480 int fTextureCreates = 0;
481 int fTextureUploads = 0;
482 int fTransfersToTexture = 0;
483 int fTransfersFromSurface = 0;
484 int fStencilAttachmentCreates = 0;
485 int fNumDraws = 0;
486 int fNumFailedDraws = 0;
487 int fNumSubmitToGpus = 0;
488 int fNumScratchTexturesReused = 0;
489
490 int fNumInlineCompilationFailures = 0;
491 int fInlineProgramCacheStats[kNumProgramCacheResults] = { 0 };
492
493 int fNumPreCompilationFailures = 0;
494 int fPreProgramCacheStats[kNumProgramCacheResults] = { 0 };
495
496 int fNumCompilationFailures = 0;
497 int fNumPartialCompilationSuccesses = 0;
498 int fNumCompilationSuccesses = 0;
499
500#else
501
502#if GR_TEST_UTILS
503 void dump(SkString*) {}
504 void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {}
505#endif
506 void incRenderTargetBinds() {}
507 void incShaderCompilations() {}
508 void incTextureCreates() {}
509 void incTextureUploads() {}
510 void incTransfersToTexture() {}
511 void incStencilAttachmentCreates() {}
512 void incNumDraws() {}
513 void incNumFailedDraws() {}
514 void incNumSubmitToGpus() {}
515 void incNumInlineCompilationFailures() {}
516 void incNumInlineProgramCacheResult(ProgramCacheResult stat) {}
517 void incNumPreCompilationFailures() {}
518 void incNumPreProgramCacheResult(ProgramCacheResult stat) {}
519 void incNumCompilationFailures() {}
520 void incNumPartialCompilationSuccesses() {}
521 void incNumCompilationSuccesses() {}
522#endif
523 };
524
525 Stats* stats() { return &fStats; }
526 void dumpJSON(SkJSONWriter*) const;
527
528 /** Used to initialize a backend texture with either a constant color, pixmaps or
529 * compressed data.
530 */
531 class BackendTextureData {
532 public:
533 enum class Type { kColor, kPixmaps, kCompressed };
534 BackendTextureData() = default;
535 BackendTextureData(const SkColor4f& color) : fType(Type::kColor), fColor(color) {}
536 BackendTextureData(const SkPixmap pixmaps[]) : fType(Type::kPixmaps), fPixmaps(pixmaps) {
537 SkASSERT(pixmaps);
538 }
539 BackendTextureData(const void* data, size_t size) : fType(Type::kCompressed) {
540 SkASSERT(data);
541 fCompressed.fData = data;
542 fCompressed.fSize = size;
543 }
544
545 Type type() const { return fType; }
546 SkColor4f color() const {
547 SkASSERT(this->type() == Type::kColor);
548 return fColor;
549 }
550
551 const SkPixmap& pixmap(int i) const {
552 SkASSERT(this->type() == Type::kPixmaps);
553 return fPixmaps[i];
554 }
555 const SkPixmap* pixmaps() const {
556 SkASSERT(this->type() == Type::kPixmaps);
557 return fPixmaps;
558 }
559
560 const void* compressedData() const {
561 SkASSERT(this->type() == Type::kCompressed);
562 return fCompressed.fData;
563 }
564 size_t compressedSize() const {
565 SkASSERT(this->type() == Type::kCompressed);
566 return fCompressed.fSize;
567 }
568
569
570 private:
571 Type fType = Type::kColor;
572 union {
573 SkColor4f fColor = {0, 0, 0, 0};
574 const SkPixmap* fPixmaps;
575 struct {
576 const void* fData;
577 size_t fSize;
578 } fCompressed;
579 };
580 };
581
582 /**
583 * Creates a texture directly in the backend API without wrapping it in a GrTexture.
584 * Must be matched with a call to deleteBackendTexture().
585 *
586 * If data is null the texture is uninitialized.
587 *
588 * If data represents a color then all texture levels are cleared to that color.
589 *
590 * If data represents pixmaps then it must have a either one pixmap or, if mipmapping
591 * is specified, a complete MIP hierarchy of pixmaps. Additionally, if provided, the mip
592 * levels must be sized correctly according to the MIP sizes implied by dimensions. They
593 * must all have the same color type and that color type must be compatible with the
594 * texture format.
595 */
596 GrBackendTexture createBackendTexture(SkISize dimensions,
597 const GrBackendFormat&,
598 GrRenderable,
599 GrMipMapped,
600 GrProtected,
601 const BackendTextureData*);
602
603 /**
604 * Same as the createBackendTexture case except compressed backend textures can
605 * never be renderable.
606 */
607 GrBackendTexture createCompressedBackendTexture(SkISize dimensions,
608 const GrBackendFormat&,
609 GrMipMapped,
610 GrProtected,
611 const BackendTextureData*);
612
613 /**
614 * Frees a texture created by createBackendTexture(). If ownership of the backend
615 * texture has been transferred to a GrContext using adopt semantics this should not be called.
616 */
617 virtual void deleteBackendTexture(const GrBackendTexture&) = 0;
618
619 /**
620 * In this case we have a program descriptor and a program info but no render target.
621 */
622 virtual bool compile(const GrProgramDesc&, const GrProgramInfo&) = 0;
623
624 virtual bool precompileShader(const SkData& key, const SkData& data) { return false; }
625
626#if GR_TEST_UTILS
627 /** Check a handle represents an actual texture in the backend API that has not been freed. */
628 virtual bool isTestingOnlyBackendTexture(const GrBackendTexture&) const = 0;
629
630 virtual GrBackendRenderTarget createTestingOnlyBackendRenderTarget(int w, int h,
631 GrColorType) = 0;
632
633 virtual void deleteTestingOnlyBackendRenderTarget(const GrBackendRenderTarget&) = 0;
634
635 // This is only to be used in GL-specific tests.
636 virtual const GrGLContext* glContextForTesting() const { return nullptr; }
637
638 // This is only to be used by testing code
639 virtual void resetShaderCacheForTesting() const {}
640
641 /**
642 * Flushes all work to the gpu and forces the GPU to wait until all the gpu work has completed.
643 * This is for testing purposes only.
644 */
645 virtual void testingOnly_flushGpuAndSync() = 0;
646
647 /**
648 * Inserted as a pair around a block of code to do a GPU frame capture.
649 * Currently only works with the Metal backend.
650 */
651 virtual void testingOnly_startCapture() {}
652 virtual void testingOnly_endCapture() {}
653#endif
654
655 // width and height may be larger than rt (if underlying API allows it).
656 // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on
657 // the GrStencilAttachment.
658 virtual GrStencilAttachment* createStencilAttachmentForRenderTarget(
659 const GrRenderTarget*, int width, int height, int numStencilSamples) = 0;
660
661 // Determines whether a texture will need to be copied because the draw requires mips but the
662 // texutre doesn't have any. This call should be only checked if IsACopyNeededForTextureParams
663 // fails. If the previous call succeeds, then a copy should be done using those params and the
664 // mip mapping requirements will be handled there.
665 static bool IsACopyNeededForMips(const GrCaps* caps, const GrTextureProxy* texProxy,
666 GrSamplerState::Filter filter);
667
668 void handleDirtyContext() {
669 if (fResetBits) {
670 this->resetContext();
671 }
672 }
673
674 virtual void storeVkPipelineCacheData() {}
675
676 // http://skbug.com/9739
677 virtual void insertManualFramebufferBarrier() {
678 SkASSERT(!this->caps()->requiresManualFBBarrierAfterTessellatedStencilDraw());
679 SK_ABORT("Manual framebuffer barrier not supported.");
680 }
681
682 // Called before certain draws in order to guarantee coherent results from dst reads.
683 virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0;
684
685 GrStagingBuffer* findStagingBuffer(size_t size);
686 GrStagingBuffer::Slice allocateStagingBufferSlice(size_t size);
687 virtual std::unique_ptr<GrStagingBuffer> createStagingBuffer(size_t size) { return nullptr; }
688 void unmapStagingBuffers();
689 void markStagingBufferAvailable(GrStagingBuffer* buffer);
690
691protected:
692 static bool MipMapsAreCorrect(SkISize dimensions, GrMipMapped, const BackendTextureData*);
693 static bool CompressedDataIsCorrect(SkISize dimensions, SkImage::CompressionType,
694 GrMipMapped, const BackendTextureData*);
695
696 // Handles cases where a surface will be updated without a call to flushRenderTarget.
697 void didWriteToSurface(GrSurface* surface, GrSurfaceOrigin origin, const SkIRect* bounds,
698 uint32_t mipLevels = 1) const;
699
700 typedef SkTInternalLList<GrStagingBuffer> StagingBufferList;
701
702 Stats fStats;
703 std::unique_ptr<GrPathRendering> fPathRendering;
704 // Subclass must initialize this in its constructor.
705 sk_sp<const GrCaps> fCaps;
706 std::vector<std::unique_ptr<GrStagingBuffer>> fStagingBuffers;
707
708 StagingBufferList fAvailableStagingBuffers;
709 StagingBufferList fActiveStagingBuffers;
710 StagingBufferList fBusyStagingBuffers;
711
712
713private:
714 virtual GrBackendTexture onCreateBackendTexture(SkISize dimensions,
715 const GrBackendFormat&,
716 GrRenderable,
717 GrMipMapped,
718 GrProtected,
719 const BackendTextureData*) = 0;
720
721 virtual GrBackendTexture onCreateCompressedBackendTexture(SkISize dimensions,
722 const GrBackendFormat&,
723 GrMipMapped,
724 GrProtected,
725 const BackendTextureData*) = 0;
726
727 // called when the 3D context state is unknown. Subclass should emit any
728 // assumed 3D context state and dirty any state cache.
729 virtual void onResetContext(uint32_t resetBits) = 0;
730
731 // Implementation of resetTextureBindings.
732 virtual void onResetTextureBindings() {}
733
734 // Queries the effective number of samples in use by the hardware for the given render target,
735 // and queries the individual sample locations.
736 virtual void querySampleLocations(GrRenderTarget*, SkTArray<SkPoint>*) = 0;
737
738 // overridden by backend-specific derived class to create objects.
739 // Texture size, renderablility, format support, sample count will have already been validated
740 // in base class before onCreateTexture is called.
741 // If the ith bit is set in levelClearMask then the ith MIP level should be cleared.
742 virtual sk_sp<GrTexture> onCreateTexture(SkISize dimensions,
743 const GrBackendFormat&,
744 GrRenderable,
745 int renderTargetSampleCnt,
746 SkBudgeted,
747 GrProtected,
748 int mipLevelCoont,
749 uint32_t levelClearMask) = 0;
750 virtual sk_sp<GrTexture> onCreateCompressedTexture(SkISize dimensions,
751 const GrBackendFormat&,
752 SkBudgeted,
753 GrMipMapped,
754 GrProtected,
755 const void* data, size_t dataSize) = 0;
756 virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&,
757 GrWrapOwnership,
758 GrWrapCacheable,
759 GrIOType) = 0;
760
761 virtual sk_sp<GrTexture> onWrapCompressedBackendTexture(const GrBackendTexture&,
762 GrWrapOwnership,
763 GrWrapCacheable) = 0;
764
765 virtual sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
766 int sampleCnt,
767 GrWrapOwnership,
768 GrWrapCacheable) = 0;
769 virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) = 0;
770 virtual sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
771 int sampleCnt) = 0;
772 virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
773 const GrVkDrawableInfo&);
774
775 virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType,
776 GrAccessPattern, const void* data) = 0;
777
778 // overridden by backend-specific derived class to perform the surface read
779 virtual bool onReadPixels(GrSurface*, int left, int top, int width, int height,
780 GrColorType surfaceColorType, GrColorType dstColorType, void* buffer,
781 size_t rowBytes) = 0;
782
783 // overridden by backend-specific derived class to perform the surface write
784 virtual bool onWritePixels(GrSurface*, int left, int top, int width, int height,
785 GrColorType surfaceColorType, GrColorType srcColorType,
786 const GrMipLevel texels[], int mipLevelCount,
787 bool prepForTexSampling) = 0;
788
789 // overridden by backend-specific derived class to perform the texture transfer
790 virtual bool onTransferPixelsTo(GrTexture*, int left, int top, int width, int height,
791 GrColorType textiueColorType, GrColorType bufferColorType,
792 GrGpuBuffer* transferBuffer, size_t offset,
793 size_t rowBytes) = 0;
794 // overridden by backend-specific derived class to perform the surface transfer
795 virtual bool onTransferPixelsFrom(GrSurface*, int left, int top, int width, int height,
796 GrColorType surfaceColorType, GrColorType bufferColorType,
797 GrGpuBuffer* transferBuffer, size_t offset) = 0;
798
799 // overridden by backend-specific derived class to perform the resolve
800 virtual void onResolveRenderTarget(GrRenderTarget* target, const SkIRect& resolveRect,
801 ForExternalIO) = 0;
802
803 // overridden by backend specific derived class to perform mip map level regeneration.
804 virtual bool onRegenerateMipMapLevels(GrTexture*) = 0;
805
806 // overridden by backend specific derived class to perform the copy surface
807 virtual bool onCopySurface(GrSurface* dst, GrSurface* src, const SkIRect& srcRect,
808 const SkIPoint& dstPoint) = 0;
809
810 virtual void addFinishedProc(GrGpuFinishedProc finishedProc,
811 GrGpuFinishedContext finishedContext) = 0;
812
813 virtual void prepareSurfacesForBackendAccessAndExternalIO(
814 GrSurfaceProxy* proxies[], int numProxies, SkSurface::BackendSurfaceAccess access,
815 const GrPrepareForExternalIORequests& externalRequests) {}
816
817 virtual bool onSubmitToGpu(bool syncCpu) = 0;
818
819#ifdef SK_ENABLE_DUMP_GPU
820 virtual void onDumpJSON(SkJSONWriter*) const {}
821#endif
822
823 sk_sp<GrTexture> createTextureCommon(SkISize,
824 const GrBackendFormat&,
825 GrRenderable,
826 int renderTargetSampleCnt,
827 SkBudgeted,
828 GrProtected,
829 int mipLevelCnt,
830 uint32_t levelClearMask);
831
832 void resetContext() {
833 this->onResetContext(fResetBits);
834 fResetBits = 0;
835 }
836#ifdef SK_DEBUG
837 bool inStagingBuffers(GrStagingBuffer* b) const;
838 void validateStagingBuffers() const;
839#endif
840
841 uint32_t fResetBits;
842 // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.
843 GrContext* fContext;
844 GrSamplePatternDictionary fSamplePatternDictionary;
845
846 friend class GrPathRendering;
847 typedef SkRefCnt INHERITED;
848};
849
850#endif
851