1 | /* |
2 | * Copyright 2017 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #include <new> |
9 | |
10 | #include "include/core/SkPoint.h" |
11 | #include "include/core/SkPoint3.h" |
12 | #include "include/gpu/GrRecordingContext.h" |
13 | #include "include/private/SkFloatingPoint.h" |
14 | #include "include/private/SkTo.h" |
15 | #include "src/core/SkMathPriv.h" |
16 | #include "src/core/SkMatrixPriv.h" |
17 | #include "src/core/SkRectPriv.h" |
18 | #include "src/gpu/GrAppliedClip.h" |
19 | #include "src/gpu/GrCaps.h" |
20 | #include "src/gpu/GrDrawOpTest.h" |
21 | #include "src/gpu/GrGeometryProcessor.h" |
22 | #include "src/gpu/GrGpu.h" |
23 | #include "src/gpu/GrMemoryPool.h" |
24 | #include "src/gpu/GrOpFlushState.h" |
25 | #include "src/gpu/GrRecordingContextPriv.h" |
26 | #include "src/gpu/GrResourceProvider.h" |
27 | #include "src/gpu/GrResourceProviderPriv.h" |
28 | #include "src/gpu/GrShaderCaps.h" |
29 | #include "src/gpu/GrTexture.h" |
30 | #include "src/gpu/GrTextureProxy.h" |
31 | #include "src/gpu/SkGr.h" |
32 | #include "src/gpu/effects/GrBlendFragmentProcessor.h" |
33 | #include "src/gpu/effects/generated/GrClampFragmentProcessor.h" |
34 | #include "src/gpu/geometry/GrQuad.h" |
35 | #include "src/gpu/geometry/GrQuadBuffer.h" |
36 | #include "src/gpu/geometry/GrQuadUtils.h" |
37 | #include "src/gpu/glsl/GrGLSLVarying.h" |
38 | #include "src/gpu/ops/GrFillRectOp.h" |
39 | #include "src/gpu/ops/GrMeshDrawOp.h" |
40 | #include "src/gpu/ops/GrQuadPerEdgeAA.h" |
41 | #include "src/gpu/ops/GrSimpleMeshDrawOpHelper.h" |
42 | #include "src/gpu/ops/GrTextureOp.h" |
43 | |
44 | namespace { |
45 | |
46 | using Subset = GrQuadPerEdgeAA::Subset; |
47 | using VertexSpec = GrQuadPerEdgeAA::VertexSpec; |
48 | using ColorType = GrQuadPerEdgeAA::ColorType; |
49 | |
50 | // Extracts lengths of vertical and horizontal edges of axis-aligned quad. "width" is the edge |
51 | // between v0 and v2 (or v1 and v3), "height" is the edge between v0 and v1 (or v2 and v3). |
52 | static SkSize axis_aligned_quad_size(const GrQuad& quad) { |
53 | SkASSERT(quad.quadType() == GrQuad::Type::kAxisAligned); |
54 | // Simplification of regular edge length equation, since it's axis aligned and can avoid sqrt |
55 | float dw = sk_float_abs(quad.x(2) - quad.x(0)) + sk_float_abs(quad.y(2) - quad.y(0)); |
56 | float dh = sk_float_abs(quad.x(1) - quad.x(0)) + sk_float_abs(quad.y(1) - quad.y(0)); |
57 | return {dw, dh}; |
58 | } |
59 | |
60 | static std::tuple<bool /* filter */, |
61 | bool /* mipmap */> |
62 | filter_and_mm_have_effect(const GrQuad& srcQuad, const GrQuad& dstQuad) { |
63 | // If not axis-aligned in src or dst, then always say it has an effect |
64 | if (srcQuad.quadType() != GrQuad::Type::kAxisAligned || |
65 | dstQuad.quadType() != GrQuad::Type::kAxisAligned) { |
66 | return {true, true}; |
67 | } |
68 | |
69 | SkRect srcRect; |
70 | SkRect dstRect; |
71 | if (srcQuad.asRect(&srcRect) && dstQuad.asRect(&dstRect)) { |
72 | // Disable filtering when there is no scaling (width and height are the same), and the |
73 | // top-left corners have the same fraction (so src and dst snap to the pixel grid |
74 | // identically). |
75 | SkASSERT(srcRect.isSorted()); |
76 | bool filter = srcRect.width() != dstRect.width() || srcRect.height() != dstRect.height() || |
77 | SkScalarFraction(srcRect.fLeft) != SkScalarFraction(dstRect.fLeft) || |
78 | SkScalarFraction(srcRect.fTop) != SkScalarFraction(dstRect.fTop); |
79 | bool mm = srcRect.width() > dstRect.width() || srcRect.height() > dstRect.height(); |
80 | return {filter, mm}; |
81 | } |
82 | // Extract edge lengths |
83 | SkSize srcSize = axis_aligned_quad_size(srcQuad); |
84 | SkSize dstSize = axis_aligned_quad_size(dstQuad); |
85 | // Although the quads are axis-aligned, the local coordinate system is transformed such |
86 | // that fractionally-aligned sample centers will not align with the device coordinate system |
87 | // So disable filtering when edges are the same length and both srcQuad and dstQuad |
88 | // 0th vertex is integer aligned. |
89 | bool filter = srcSize != dstSize || |
90 | !SkScalarIsInt(srcQuad.x(0)) || |
91 | !SkScalarIsInt(srcQuad.y(0)) || |
92 | !SkScalarIsInt(dstQuad.x(0)) || |
93 | !SkScalarIsInt(dstQuad.y(0)); |
94 | bool mm = srcSize.fWidth > dstSize.fWidth || srcSize.fHeight > dstSize.fHeight; |
95 | return {filter, mm}; |
96 | } |
97 | |
98 | // Describes function for normalizing src coords: [x * iw, y * ih + yOffset] can represent |
99 | // regular and rectangular textures, w/ or w/o origin correction. |
100 | struct NormalizationParams { |
101 | float fIW; // 1 / width of texture, or 1.0 for texture rectangles |
102 | float fInvH; // 1 / height of texture, or 1.0 for tex rects, X -1 if bottom-left origin |
103 | float fYOffset; // 0 for top-left origin, height of [normalized] tex if bottom-left |
104 | }; |
105 | static NormalizationParams proxy_normalization_params(const GrSurfaceProxy* proxy, |
106 | GrSurfaceOrigin origin) { |
107 | // Whether or not the proxy is instantiated, this is the size its texture will be, so we can |
108 | // normalize the src coordinates up front. |
109 | SkISize dimensions = proxy->backingStoreDimensions(); |
110 | float iw, ih, h; |
111 | if (proxy->backendFormat().textureType() == GrTextureType::kRectangle) { |
112 | iw = ih = 1.f; |
113 | h = dimensions.height(); |
114 | } else { |
115 | iw = 1.f / dimensions.width(); |
116 | ih = 1.f / dimensions.height(); |
117 | h = 1.f; |
118 | } |
119 | |
120 | if (origin == kBottomLeft_GrSurfaceOrigin) { |
121 | return {iw, -ih, h}; |
122 | } else { |
123 | return {iw, ih, 0.0f}; |
124 | } |
125 | } |
126 | |
127 | // Normalize the subset. If 'subsetRect' is null, it is assumed no subset constraint is desired, |
128 | // so a sufficiently large rect is returned even if the quad ends up batched with an op that uses |
129 | // subsets overall. When there is a subset it will be inset based on the filter mode. Normalization |
130 | // and y-flipping are applied as indicated by NormalizationParams. |
131 | static SkRect normalize_and_inset_subset(GrSamplerState::Filter filter, |
132 | const NormalizationParams& params, |
133 | const SkRect* subsetRect) { |
134 | static constexpr SkRect kLargeRect = {-100000, -100000, 1000000, 1000000}; |
135 | if (!subsetRect) { |
136 | // Either the quad has no subset constraint and is batched with a subset constrained op |
137 | // (in which case we want a subset that doesn't restrict normalized tex coords), or the |
138 | // entire op doesn't use the subset, in which case the returned value is ignored. |
139 | return kLargeRect; |
140 | } |
141 | |
142 | auto ltrb = skvx::Vec<4, float>::Load(subsetRect); |
143 | auto flipHi = skvx::Vec<4, float>({1.f, 1.f, -1.f, -1.f}); |
144 | if (filter == GrSamplerState::Filter::kNearest) { |
145 | // Make sure our insetting puts us at pixel centers. |
146 | ltrb = skvx::floor(ltrb*flipHi)*flipHi; |
147 | } |
148 | // Inset with pin to the rect center. |
149 | ltrb += skvx::Vec<4, float>({.5f, .5f, -.5f, -.5f}); |
150 | auto mid = (skvx::shuffle<2, 3, 0, 1>(ltrb) + ltrb)*0.5f; |
151 | ltrb = skvx::min(ltrb*flipHi, mid*flipHi)*flipHi; |
152 | |
153 | // Normalize and offset |
154 | ltrb = ltrb * skvx::Vec<4, float>{params.fIW, params.fInvH, params.fIW, params.fInvH} + |
155 | skvx::Vec<4, float>{0.f, params.fYOffset, 0.f, params.fYOffset}; |
156 | if (params.fInvH < 0.f) { |
157 | // Flip top and bottom to keep the rect sorted when loaded back to SkRect. |
158 | ltrb = skvx::shuffle<0, 3, 2, 1>(ltrb); |
159 | } |
160 | |
161 | SkRect out; |
162 | ltrb.store(&out); |
163 | return out; |
164 | } |
165 | |
166 | // Normalizes logical src coords and corrects for origin |
167 | static void normalize_src_quad(const NormalizationParams& params, |
168 | GrQuad* srcQuad) { |
169 | // The src quad should not have any perspective |
170 | SkASSERT(!srcQuad->hasPerspective()); |
171 | skvx::Vec<4, float> xs = srcQuad->x4f() * params.fIW; |
172 | skvx::Vec<4, float> ys = srcQuad->y4f() * params.fInvH + params.fYOffset; |
173 | xs.store(srcQuad->xs()); |
174 | ys.store(srcQuad->ys()); |
175 | } |
176 | |
177 | // Count the number of proxy runs in the entry set. This usually is already computed by |
178 | // SkGpuDevice, but when the BatchLengthLimiter chops the set up it must determine a new proxy count |
179 | // for each split. |
180 | static int proxy_run_count(const GrRenderTargetContext::TextureSetEntry set[], int count) { |
181 | int actualProxyRunCount = 0; |
182 | const GrSurfaceProxy* lastProxy = nullptr; |
183 | for (int i = 0; i < count; ++i) { |
184 | if (set[i].fProxyView.proxy() != lastProxy) { |
185 | actualProxyRunCount++; |
186 | lastProxy = set[i].fProxyView.proxy(); |
187 | } |
188 | } |
189 | return actualProxyRunCount; |
190 | } |
191 | |
192 | static bool safe_to_ignore_subset_rect(GrAAType aaType, GrSamplerState::Filter filter, |
193 | const DrawQuad& quad, const SkRect& subsetRect) { |
194 | // If both the device and local quad are both axis-aligned, and filtering is off, the local quad |
195 | // can push all the way up to the edges of the the subset rect and the sampler shouldn't |
196 | // overshoot. Unfortunately, antialiasing adds enough jitter that we can only rely on this in |
197 | // the non-antialiased case. |
198 | SkRect localBounds = quad.fLocal.bounds(); |
199 | if (aaType == GrAAType::kNone && |
200 | filter == GrSamplerState::Filter::kNearest && |
201 | quad.fDevice.quadType() == GrQuad::Type::kAxisAligned && |
202 | quad.fLocal.quadType() == GrQuad::Type::kAxisAligned && |
203 | subsetRect.contains(localBounds)) { |
204 | |
205 | return true; |
206 | } |
207 | |
208 | // If the subset rect is inset by at least 0.5 pixels into the local quad's bounds, the |
209 | // sampler shouldn't overshoot, even when antialiasing and filtering is taken into account. |
210 | if (subsetRect.makeInset(0.5f, 0.5f).contains(localBounds)) { |
211 | return true; |
212 | } |
213 | |
214 | // The subset rect cannot be ignored safely. |
215 | return false; |
216 | } |
217 | |
218 | /** |
219 | * Op that implements GrTextureOp::Make. It draws textured quads. Each quad can modulate against a |
220 | * the texture by color. The blend with the destination is always src-over. The edges are non-AA. |
221 | */ |
222 | class TextureOp final : public GrMeshDrawOp { |
223 | public: |
224 | static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context, |
225 | GrSurfaceProxyView proxyView, |
226 | sk_sp<GrColorSpaceXform> textureXform, |
227 | GrSamplerState::Filter filter, |
228 | GrSamplerState::MipmapMode mm, |
229 | const SkPMColor4f& color, |
230 | GrTextureOp::Saturate saturate, |
231 | GrAAType aaType, |
232 | DrawQuad* quad, |
233 | const SkRect* subset) { |
234 | GrOpMemoryPool* pool = context->priv().opMemoryPool(); |
235 | return pool->allocate<TextureOp>(std::move(proxyView), std::move(textureXform), filter, mm, |
236 | color, saturate, aaType, quad, subset); |
237 | } |
238 | |
239 | static std::unique_ptr<GrDrawOp> Make(GrRecordingContext* context, |
240 | GrRenderTargetContext::TextureSetEntry set[], |
241 | int cnt, |
242 | int proxyRunCnt, |
243 | GrSamplerState::Filter filter, |
244 | GrSamplerState::MipmapMode mm, |
245 | GrTextureOp::Saturate saturate, |
246 | GrAAType aaType, |
247 | SkCanvas::SrcRectConstraint constraint, |
248 | const SkMatrix& viewMatrix, |
249 | sk_sp<GrColorSpaceXform> textureColorSpaceXform) { |
250 | // Allocate size based on proxyRunCnt, since that determines number of ViewCountPairs. |
251 | SkASSERT(proxyRunCnt <= cnt); |
252 | |
253 | size_t size = sizeof(TextureOp) + sizeof(ViewCountPair) * (proxyRunCnt - 1); |
254 | GrOpMemoryPool* pool = context->priv().opMemoryPool(); |
255 | void* mem = pool->allocate(size); |
256 | return std::unique_ptr<GrDrawOp>( |
257 | new (mem) TextureOp(set, cnt, proxyRunCnt, filter, mm, saturate, aaType, constraint, |
258 | viewMatrix, std::move(textureColorSpaceXform))); |
259 | } |
260 | |
261 | ~TextureOp() override { |
262 | for (unsigned p = 1; p < fMetadata.fProxyCount; ++p) { |
263 | fViewCountPairs[p].~ViewCountPair(); |
264 | } |
265 | } |
266 | |
267 | const char* name() const override { return "TextureOp" ; } |
268 | |
269 | void visitProxies(const VisitProxyFunc& func) const override { |
270 | bool mipped = (fMetadata.mipmapMode() != GrSamplerState::MipmapMode::kNone); |
271 | for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) { |
272 | func(fViewCountPairs[p].fProxy.get(), GrMipmapped(mipped)); |
273 | } |
274 | if (fDesc && fDesc->fProgramInfo) { |
275 | fDesc->fProgramInfo->visitFPProxies(func); |
276 | } |
277 | } |
278 | |
279 | #ifdef SK_DEBUG |
280 | static void ValidateResourceLimits() { |
281 | // The op implementation has an upper bound on the number of quads that it can represent. |
282 | // However, the resource manager imposes its own limit on the number of quads, which should |
283 | // always be lower than the numerical limit this op can hold. |
284 | using CountStorage = decltype(Metadata::fTotalQuadCount); |
285 | CountStorage maxQuadCount = std::numeric_limits<CountStorage>::max(); |
286 | // GrResourceProvider::Max...() is typed as int, so don't compare across signed/unsigned. |
287 | int resourceLimit = SkTo<int>(maxQuadCount); |
288 | SkASSERT(GrResourceProvider::MaxNumAAQuads() <= resourceLimit && |
289 | GrResourceProvider::MaxNumNonAAQuads() <= resourceLimit); |
290 | } |
291 | #endif |
292 | |
293 | GrProcessorSet::Analysis finalize( |
294 | const GrCaps& caps, const GrAppliedClip*, bool hasMixedSampledCoverage, |
295 | GrClampType clampType) override { |
296 | SkASSERT(fMetadata.colorType() == ColorType::kNone); |
297 | auto iter = fQuads.metadata(); |
298 | while(iter.next()) { |
299 | auto colorType = GrQuadPerEdgeAA::MinColorType(iter->fColor); |
300 | fMetadata.fColorType = std::max(fMetadata.fColorType, static_cast<uint16_t>(colorType)); |
301 | } |
302 | return GrProcessorSet::EmptySetAnalysis(); |
303 | } |
304 | |
305 | FixedFunctionFlags fixedFunctionFlags() const override { |
306 | return fMetadata.aaType() == GrAAType::kMSAA ? FixedFunctionFlags::kUsesHWAA |
307 | : FixedFunctionFlags::kNone; |
308 | } |
309 | |
310 | DEFINE_OP_CLASS_ID |
311 | |
312 | private: |
313 | friend class ::GrOpMemoryPool; |
314 | |
315 | struct ColorSubsetAndAA { |
316 | ColorSubsetAndAA(const SkPMColor4f& color, const SkRect& subsetRect, GrQuadAAFlags aaFlags) |
317 | : fColor(color) |
318 | , fSubsetRect(subsetRect) |
319 | , fAAFlags(static_cast<uint16_t>(aaFlags)) { |
320 | SkASSERT(fAAFlags == static_cast<uint16_t>(aaFlags)); |
321 | } |
322 | |
323 | SkPMColor4f fColor; |
324 | // If the op doesn't use subsets, this is ignored. If the op uses subsets and the specific |
325 | // entry does not, this rect will equal kLargeRect, so it automatically has no effect. |
326 | SkRect fSubsetRect; |
327 | unsigned fAAFlags : 4; |
328 | |
329 | GrQuadAAFlags aaFlags() const { return static_cast<GrQuadAAFlags>(fAAFlags); } |
330 | }; |
331 | |
332 | struct ViewCountPair { |
333 | // Normally this would be a GrSurfaceProxyView, but GrTextureOp applies the GrOrigin right |
334 | // away so it doesn't need to be stored, and all ViewCountPairs in an op have the same |
335 | // swizzle so that is stored in the op metadata. |
336 | sk_sp<GrSurfaceProxy> fProxy; |
337 | int fQuadCnt; |
338 | }; |
339 | |
340 | // TextureOp and ViewCountPair are 8 byte aligned. This is packed into 8 bytes to minimally |
341 | // increase the size of the op; increasing the op size can have a surprising impact on |
342 | // performance (since texture ops are one of the most commonly used in an app). |
343 | struct Metadata { |
344 | // AAType must be filled after initialization; ColorType is determined in finalize() |
345 | Metadata(const GrSwizzle& swizzle, |
346 | GrSamplerState::Filter filter, |
347 | GrSamplerState::MipmapMode mm, |
348 | GrQuadPerEdgeAA::Subset subset, |
349 | GrTextureOp::Saturate saturate) |
350 | : fSwizzle(swizzle) |
351 | , fProxyCount(1) |
352 | , fTotalQuadCount(1) |
353 | , fFilter(static_cast<uint16_t>(filter)) |
354 | , fMipmapMode(static_cast<uint16_t>(mm)) |
355 | , fAAType(static_cast<uint16_t>(GrAAType::kNone)) |
356 | , fColorType(static_cast<uint16_t>(ColorType::kNone)) |
357 | , fSubset(static_cast<uint16_t>(subset)) |
358 | , fSaturate(static_cast<uint16_t>(saturate)) {} |
359 | |
360 | GrSwizzle fSwizzle; // sizeof(GrSwizzle) == uint16_t |
361 | uint16_t fProxyCount; |
362 | // This will be >= fProxyCount, since a proxy may be drawn multiple times |
363 | uint16_t fTotalQuadCount; |
364 | |
365 | // These must be based on uint16_t to help MSVC's pack bitfields optimally |
366 | uint16_t fFilter : 2; // GrSamplerState::Filter |
367 | uint16_t fMipmapMode : 2; // GrSamplerState::MipmapMode |
368 | uint16_t fAAType : 2; // GrAAType |
369 | uint16_t fColorType : 2; // GrQuadPerEdgeAA::ColorType |
370 | uint16_t fSubset : 1; // bool |
371 | uint16_t fSaturate : 1; // bool |
372 | uint16_t fUnused : 6; // # of bits left before Metadata exceeds 8 bytes |
373 | |
374 | GrSamplerState::Filter filter() const { |
375 | return static_cast<GrSamplerState::Filter>(fFilter); |
376 | } |
377 | GrSamplerState::MipmapMode mipmapMode() const { |
378 | return static_cast<GrSamplerState::MipmapMode>(fMipmapMode); |
379 | } |
380 | GrAAType aaType() const { return static_cast<GrAAType>(fAAType); } |
381 | ColorType colorType() const { return static_cast<ColorType>(fColorType); } |
382 | Subset subset() const { return static_cast<Subset>(fSubset); } |
383 | GrTextureOp::Saturate saturate() const { |
384 | return static_cast<GrTextureOp::Saturate>(fSaturate); |
385 | } |
386 | |
387 | static_assert(GrSamplerState::kFilterCount <= 4); |
388 | static_assert(kGrAATypeCount <= 4); |
389 | static_assert(GrQuadPerEdgeAA::kColorTypeCount <= 4); |
390 | }; |
391 | static_assert(sizeof(Metadata) == 8); |
392 | |
393 | // This descriptor is used to store the draw info we decide on during on(Pre)PrepareDraws. We |
394 | // store the data in a separate struct in order to minimize the size of the TextureOp. |
395 | // Historically, increasing the TextureOp's size has caused surprising perf regressions, but we |
396 | // may want to re-evaluate whether this is still necessary. |
397 | // |
398 | // In the onPrePrepareDraws case it is allocated in the creation-time opData arena, and |
399 | // allocatePrePreparedVertices is also called. |
400 | // |
401 | // In the onPrepareDraws case this descriptor is allocated in the flush-time arena (i.e., as |
402 | // part of the flushState). |
403 | struct Desc { |
404 | VertexSpec fVertexSpec; |
405 | int fNumProxies = 0; |
406 | int fNumTotalQuads = 0; |
407 | |
408 | // This member variable is only used by 'onPrePrepareDraws'. |
409 | char* fPrePreparedVertices = nullptr; |
410 | |
411 | GrProgramInfo* fProgramInfo = nullptr; |
412 | |
413 | sk_sp<const GrBuffer> fIndexBuffer; |
414 | sk_sp<const GrBuffer> fVertexBuffer; |
415 | int fBaseVertex; |
416 | |
417 | // How big should 'fVertices' be to hold all the vertex data? |
418 | size_t totalSizeInBytes() const { |
419 | return this->totalNumVertices() * fVertexSpec.vertexSize(); |
420 | } |
421 | |
422 | int totalNumVertices() const { |
423 | return fNumTotalQuads * fVertexSpec.verticesPerQuad(); |
424 | } |
425 | |
426 | void allocatePrePreparedVertices(SkArenaAlloc* arena) { |
427 | fPrePreparedVertices = arena->makeArrayDefault<char>(this->totalSizeInBytes()); |
428 | } |
429 | }; |
430 | // If subsetRect is not null it will be used to apply a strict src rect-style constraint. |
431 | TextureOp(GrSurfaceProxyView proxyView, |
432 | sk_sp<GrColorSpaceXform> textureColorSpaceXform, |
433 | GrSamplerState::Filter filter, |
434 | GrSamplerState::MipmapMode mm, |
435 | const SkPMColor4f& color, |
436 | GrTextureOp::Saturate saturate, |
437 | GrAAType aaType, |
438 | DrawQuad* quad, |
439 | const SkRect* subsetRect) |
440 | : INHERITED(ClassID()) |
441 | , fQuads(1, true /* includes locals */) |
442 | , fTextureColorSpaceXform(std::move(textureColorSpaceXform)) |
443 | , fDesc(nullptr) |
444 | , fMetadata(proxyView.swizzle(), filter, mm, Subset(!!subsetRect), saturate) { |
445 | // Clean up disparities between the overall aa type and edge configuration and apply |
446 | // optimizations based on the rect and matrix when appropriate |
447 | GrQuadUtils::ResolveAAType(aaType, quad->fEdgeFlags, quad->fDevice, |
448 | &aaType, &quad->fEdgeFlags); |
449 | fMetadata.fAAType = static_cast<uint16_t>(aaType); |
450 | |
451 | // We expect our caller to have already caught this optimization. |
452 | SkASSERT(!subsetRect || |
453 | !subsetRect->contains(proxyView.proxy()->backingStoreBoundsRect())); |
454 | |
455 | // We may have had a strict constraint with nearest filter solely due to possible AA bloat. |
456 | // Try to identify cases where the subsetting isn't actually necessary, and skip it. |
457 | if (subsetRect) { |
458 | if (safe_to_ignore_subset_rect(aaType, filter, *quad, *subsetRect)) { |
459 | subsetRect = nullptr; |
460 | fMetadata.fSubset = static_cast<uint16_t>(Subset::kNo); |
461 | } |
462 | } |
463 | |
464 | // Normalize src coordinates and the subset (if set) |
465 | NormalizationParams params = proxy_normalization_params(proxyView.proxy(), |
466 | proxyView.origin()); |
467 | normalize_src_quad(params, &quad->fLocal); |
468 | SkRect subset = normalize_and_inset_subset(filter, params, subsetRect); |
469 | |
470 | // Set bounds before clipping so we don't have to worry about unioning the bounds of |
471 | // the two potential quads (GrQuad::bounds() is perspective-safe). |
472 | this->setBounds(quad->fDevice.bounds(), HasAABloat(aaType == GrAAType::kCoverage), |
473 | IsHairline::kNo); |
474 | |
475 | int quadCount = this->appendQuad(quad, color, subset); |
476 | fViewCountPairs[0] = {proxyView.detachProxy(), quadCount}; |
477 | } |
478 | |
479 | TextureOp(GrRenderTargetContext::TextureSetEntry set[], |
480 | int cnt, |
481 | int proxyRunCnt, |
482 | GrSamplerState::Filter filter, |
483 | GrSamplerState::MipmapMode mm, |
484 | GrTextureOp::Saturate saturate, |
485 | GrAAType aaType, |
486 | SkCanvas::SrcRectConstraint constraint, |
487 | const SkMatrix& viewMatrix, |
488 | sk_sp<GrColorSpaceXform> textureColorSpaceXform) |
489 | : INHERITED(ClassID()) |
490 | , fQuads(cnt, true /* includes locals */) |
491 | , fTextureColorSpaceXform(std::move(textureColorSpaceXform)) |
492 | , fDesc(nullptr) |
493 | , fMetadata(set[0].fProxyView.swizzle(), |
494 | GrSamplerState::Filter::kNearest, |
495 | GrSamplerState::MipmapMode::kNone, |
496 | Subset::kNo, |
497 | saturate) { |
498 | // Update counts to reflect the batch op |
499 | fMetadata.fProxyCount = SkToUInt(proxyRunCnt); |
500 | fMetadata.fTotalQuadCount = SkToUInt(cnt); |
501 | |
502 | SkRect bounds = SkRectPriv::MakeLargestInverted(); |
503 | |
504 | GrAAType netAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects |
505 | Subset netSubset = Subset::kNo; |
506 | GrSamplerState::Filter netFilter = GrSamplerState::Filter::kNearest; |
507 | GrSamplerState::MipmapMode netMM = GrSamplerState::MipmapMode::kNone; |
508 | |
509 | const GrSurfaceProxy* curProxy = nullptr; |
510 | |
511 | // 'q' is the index in 'set' and fQuadBuffer; 'p' is the index in fViewCountPairs and only |
512 | // increases when set[q]'s proxy changes. |
513 | int p = 0; |
514 | for (int q = 0; q < cnt; ++q) { |
515 | SkASSERT(mm == GrSamplerState::MipmapMode::kNone || |
516 | (set[0].fProxyView.proxy()->asTextureProxy()->mipmapped() == |
517 | GrMipmapped::kYes)); |
518 | if (q == 0) { |
519 | // We do not placement new the first ViewCountPair since that one is allocated and |
520 | // initialized as part of the GrTextureOp creation. |
521 | fViewCountPairs[0].fProxy = set[0].fProxyView.detachProxy(); |
522 | fViewCountPairs[0].fQuadCnt = 0; |
523 | curProxy = fViewCountPairs[0].fProxy.get(); |
524 | } else if (set[q].fProxyView.proxy() != curProxy) { |
525 | // We must placement new the ViewCountPairs here so that the sk_sps in the |
526 | // GrSurfaceProxyView get initialized properly. |
527 | new(&fViewCountPairs[++p])ViewCountPair({set[q].fProxyView.detachProxy(), 0}); |
528 | |
529 | curProxy = fViewCountPairs[p].fProxy.get(); |
530 | SkASSERT(GrTextureProxy::ProxiesAreCompatibleAsDynamicState( |
531 | curProxy, fViewCountPairs[0].fProxy.get())); |
532 | SkASSERT(fMetadata.fSwizzle == set[q].fProxyView.swizzle()); |
533 | } // else another quad referencing the same proxy |
534 | |
535 | SkMatrix ctm = viewMatrix; |
536 | if (set[q].fPreViewMatrix) { |
537 | ctm.preConcat(*set[q].fPreViewMatrix); |
538 | } |
539 | |
540 | // Use dstRect/srcRect unless dstClip is provided, in which case derive new source |
541 | // coordinates by mapping dstClipQuad by the dstRect to srcRect transform. |
542 | DrawQuad quad; |
543 | if (set[q].fDstClipQuad) { |
544 | quad.fDevice = GrQuad::MakeFromSkQuad(set[q].fDstClipQuad, ctm); |
545 | |
546 | SkPoint srcPts[4]; |
547 | GrMapRectPoints(set[q].fDstRect, set[q].fSrcRect, set[q].fDstClipQuad, srcPts, 4); |
548 | quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I()); |
549 | } else { |
550 | quad.fDevice = GrQuad::MakeFromRect(set[q].fDstRect, ctm); |
551 | quad.fLocal = GrQuad(set[q].fSrcRect); |
552 | } |
553 | |
554 | if (netFilter != filter || netMM != mm) { |
555 | // The only way netFilter != filter is if linear is requested and we haven't yet |
556 | // found a quad that requires linear (so net is still nearest). Similar for mip |
557 | // mapping. |
558 | SkASSERT(filter == netFilter || |
559 | (netFilter == GrSamplerState::Filter::kNearest && filter > netFilter)); |
560 | SkASSERT(mm == netMM || |
561 | (netMM == GrSamplerState::MipmapMode::kNone && mm > netMM)); |
562 | auto [mustFilter, mustMM] = filter_and_mm_have_effect(quad.fLocal, quad.fDevice); |
563 | if (mustFilter && filter != GrSamplerState::Filter::kNearest) { |
564 | netFilter = filter; |
565 | } |
566 | if (mustMM && mm != GrSamplerState::MipmapMode::kNone) { |
567 | netMM = mm; |
568 | } |
569 | } |
570 | |
571 | // Update overall bounds of the op as the union of all quads |
572 | bounds.joinPossiblyEmptyRect(quad.fDevice.bounds()); |
573 | |
574 | // Determine the AA type for the quad, then merge with net AA type |
575 | GrAAType aaForQuad; |
576 | GrQuadUtils::ResolveAAType(aaType, set[q].fAAFlags, quad.fDevice, |
577 | &aaForQuad, &quad.fEdgeFlags); |
578 | |
579 | // Resolve sets aaForQuad to aaType or None, there is never a change between aa methods |
580 | SkASSERT(aaForQuad == GrAAType::kNone || aaForQuad == aaType); |
581 | if (netAAType == GrAAType::kNone && aaForQuad != GrAAType::kNone) { |
582 | netAAType = aaType; |
583 | } |
584 | |
585 | // Calculate metadata for the entry |
586 | const SkRect* subsetForQuad = nullptr; |
587 | if (constraint == SkCanvas::kStrict_SrcRectConstraint) { |
588 | // Check (briefly) if the subset rect is actually needed for this set entry. |
589 | SkRect* subsetRect = &set[q].fSrcRect; |
590 | if (!subsetRect->contains(curProxy->backingStoreBoundsRect())) { |
591 | if (!safe_to_ignore_subset_rect(aaForQuad, filter, quad, *subsetRect)) { |
592 | netSubset = Subset::kYes; |
593 | subsetForQuad = subsetRect; |
594 | } |
595 | } |
596 | } |
597 | |
598 | // Normalize the src quads and apply origin |
599 | NormalizationParams proxyParams = proxy_normalization_params( |
600 | curProxy, set[q].fProxyView.origin()); |
601 | normalize_src_quad(proxyParams, &quad.fLocal); |
602 | |
603 | // This subset may represent a no-op, otherwise it will have the origin and dimensions |
604 | // of the texture applied to it. Insetting for bilinear filtering is deferred until |
605 | // on[Pre]Prepare so that the overall filter can be lazily determined. |
606 | SkRect subset = normalize_and_inset_subset(filter, proxyParams, subsetForQuad); |
607 | |
608 | // Always append a quad (or 2 if perspective clipped), it just may refer back to a prior |
609 | // ViewCountPair (this frequently happens when Chrome draws 9-patches). |
610 | fViewCountPairs[p].fQuadCnt += this->appendQuad(&quad, set[q].fColor, subset); |
611 | } |
612 | // The # of proxy switches should match what was provided (+1 because we incremented p |
613 | // when a new proxy was encountered). |
614 | SkASSERT((p + 1) == fMetadata.fProxyCount); |
615 | SkASSERT(fQuads.count() == fMetadata.fTotalQuadCount); |
616 | |
617 | fMetadata.fAAType = static_cast<uint16_t>(netAAType); |
618 | fMetadata.fFilter = static_cast<uint16_t>(netFilter); |
619 | fMetadata.fSubset = static_cast<uint16_t>(netSubset); |
620 | |
621 | this->setBounds(bounds, HasAABloat(netAAType == GrAAType::kCoverage), IsHairline::kNo); |
622 | } |
623 | |
624 | int appendQuad(DrawQuad* quad, const SkPMColor4f& color, const SkRect& subset) { |
625 | DrawQuad ; |
626 | // Only clip when there's anti-aliasing. When non-aa, the GPU clips just fine and there's |
627 | // no inset/outset math that requires w > 0. |
628 | int quadCount = quad->fEdgeFlags != GrQuadAAFlags::kNone ? |
629 | GrQuadUtils::ClipToW0(quad, &extra) : 1; |
630 | if (quadCount == 0) { |
631 | // We can't discard the op at this point, but disable AA flags so it won't go through |
632 | // inset/outset processing |
633 | quad->fEdgeFlags = GrQuadAAFlags::kNone; |
634 | quadCount = 1; |
635 | } |
636 | fQuads.append(quad->fDevice, {color, subset, quad->fEdgeFlags}, &quad->fLocal); |
637 | if (quadCount > 1) { |
638 | fQuads.append(extra.fDevice, {color, subset, extra.fEdgeFlags}, &extra.fLocal); |
639 | fMetadata.fTotalQuadCount++; |
640 | } |
641 | return quadCount; |
642 | } |
643 | |
644 | GrProgramInfo* programInfo() override { |
645 | // Although this Op implements its own onPrePrepareDraws it calls GrMeshDrawOps' version so |
646 | // this entry point will be called. |
647 | return (fDesc) ? fDesc->fProgramInfo : nullptr; |
648 | } |
649 | |
650 | void onCreateProgramInfo(const GrCaps* caps, |
651 | SkArenaAlloc* arena, |
652 | const GrSurfaceProxyView* writeView, |
653 | GrAppliedClip&& appliedClip, |
654 | const GrXferProcessor::DstProxyView& dstProxyView) override { |
655 | SkASSERT(fDesc); |
656 | |
657 | GrGeometryProcessor* gp; |
658 | |
659 | { |
660 | const GrBackendFormat& backendFormat = |
661 | fViewCountPairs[0].fProxy->backendFormat(); |
662 | |
663 | GrSamplerState samplerState = GrSamplerState(GrSamplerState::WrapMode::kClamp, |
664 | fMetadata.filter()); |
665 | |
666 | gp = GrQuadPerEdgeAA::MakeTexturedProcessor( |
667 | arena, fDesc->fVertexSpec, *caps->shaderCaps(), backendFormat, samplerState, |
668 | fMetadata.fSwizzle, std::move(fTextureColorSpaceXform), fMetadata.saturate()); |
669 | |
670 | SkASSERT(fDesc->fVertexSpec.vertexSize() == gp->vertexStride()); |
671 | } |
672 | |
673 | auto pipelineFlags = (GrAAType::kMSAA == fMetadata.aaType()) ? |
674 | GrPipeline::InputFlags::kHWAntialias : GrPipeline::InputFlags::kNone; |
675 | |
676 | fDesc->fProgramInfo = GrSimpleMeshDrawOpHelper::CreateProgramInfo( |
677 | caps, arena, writeView, std::move(appliedClip), dstProxyView, gp, |
678 | GrProcessorSet::MakeEmptySet(), fDesc->fVertexSpec.primitiveType(), |
679 | pipelineFlags); |
680 | } |
681 | |
682 | void onPrePrepareDraws(GrRecordingContext* context, |
683 | const GrSurfaceProxyView* writeView, |
684 | GrAppliedClip* clip, |
685 | const GrXferProcessor::DstProxyView& dstProxyView) override { |
686 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
687 | |
688 | SkDEBUGCODE(this->validate();) |
689 | SkASSERT(!fDesc); |
690 | |
691 | SkArenaAlloc* arena = context->priv().recordTimeAllocator(); |
692 | |
693 | fDesc = arena->make<Desc>(); |
694 | this->characterize(fDesc); |
695 | fDesc->allocatePrePreparedVertices(arena); |
696 | FillInVertices(*context->priv().caps(), this, fDesc, fDesc->fPrePreparedVertices); |
697 | |
698 | // This will call onCreateProgramInfo and register the created program with the DDL. |
699 | this->INHERITED::onPrePrepareDraws(context, writeView, clip, dstProxyView); |
700 | } |
701 | |
702 | static void FillInVertices(const GrCaps& caps, TextureOp* texOp, Desc* desc, char* vertexData) { |
703 | SkASSERT(vertexData); |
704 | |
705 | int totQuadsSeen = 0; |
706 | SkDEBUGCODE(int totVerticesSeen = 0;) |
707 | SkDEBUGCODE(const size_t vertexSize = desc->fVertexSpec.vertexSize()); |
708 | |
709 | GrQuadPerEdgeAA::Tessellator tessellator(desc->fVertexSpec, vertexData); |
710 | for (const auto& op : ChainRange<TextureOp>(texOp)) { |
711 | auto iter = op.fQuads.iterator(); |
712 | for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) { |
713 | const int quadCnt = op.fViewCountPairs[p].fQuadCnt; |
714 | SkDEBUGCODE(int meshVertexCnt = quadCnt * desc->fVertexSpec.verticesPerQuad()); |
715 | |
716 | for (int i = 0; i < quadCnt && iter.next(); ++i) { |
717 | SkASSERT(iter.isLocalValid()); |
718 | const ColorSubsetAndAA& info = iter.metadata(); |
719 | |
720 | tessellator.append(iter.deviceQuad(), iter.localQuad(), info.fColor, |
721 | info.fSubsetRect, info.aaFlags()); |
722 | } |
723 | |
724 | SkASSERT((totVerticesSeen + meshVertexCnt) * vertexSize |
725 | == (size_t)(tessellator.vertices() - vertexData)); |
726 | |
727 | totQuadsSeen += quadCnt; |
728 | SkDEBUGCODE(totVerticesSeen += meshVertexCnt); |
729 | SkASSERT(totQuadsSeen * desc->fVertexSpec.verticesPerQuad() == totVerticesSeen); |
730 | } |
731 | |
732 | // If quad counts per proxy were calculated correctly, the entire iterator |
733 | // should have been consumed. |
734 | SkASSERT(!iter.next()); |
735 | } |
736 | |
737 | SkASSERT(desc->totalSizeInBytes() == (size_t)(tessellator.vertices() - vertexData)); |
738 | SkASSERT(totQuadsSeen == desc->fNumTotalQuads); |
739 | SkASSERT(totVerticesSeen == desc->totalNumVertices()); |
740 | } |
741 | |
742 | #ifdef SK_DEBUG |
743 | static int validate_op(GrTextureType textureType, |
744 | GrAAType aaType, |
745 | GrSwizzle swizzle, |
746 | const TextureOp* op) { |
747 | SkASSERT(op->fMetadata.fSwizzle == swizzle); |
748 | |
749 | int quadCount = 0; |
750 | for (unsigned p = 0; p < op->fMetadata.fProxyCount; ++p) { |
751 | auto* proxy = op->fViewCountPairs[p].fProxy->asTextureProxy(); |
752 | quadCount += op->fViewCountPairs[p].fQuadCnt; |
753 | SkASSERT(proxy); |
754 | SkASSERT(proxy->textureType() == textureType); |
755 | } |
756 | |
757 | SkASSERT(aaType == op->fMetadata.aaType()); |
758 | return quadCount; |
759 | } |
760 | |
761 | void validate() const override { |
762 | // NOTE: Since this is debug-only code, we use the virtual asTextureProxy() |
763 | auto textureType = fViewCountPairs[0].fProxy->asTextureProxy()->textureType(); |
764 | GrAAType aaType = fMetadata.aaType(); |
765 | GrSwizzle swizzle = fMetadata.fSwizzle; |
766 | |
767 | int quadCount = validate_op(textureType, aaType, swizzle, this); |
768 | |
769 | for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) { |
770 | quadCount += validate_op(textureType, aaType, swizzle, |
771 | static_cast<const TextureOp*>(tmp)); |
772 | } |
773 | |
774 | for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) { |
775 | quadCount += validate_op(textureType, aaType, swizzle, |
776 | static_cast<const TextureOp*>(tmp)); |
777 | } |
778 | |
779 | SkASSERT(quadCount == this->numChainedQuads()); |
780 | } |
781 | |
782 | #endif |
783 | |
784 | #if GR_TEST_UTILS |
785 | int numQuads() const final { return this->totNumQuads(); } |
786 | #endif |
787 | |
788 | void characterize(Desc* desc) const { |
789 | SkDEBUGCODE(this->validate();) |
790 | |
791 | GrQuad::Type quadType = GrQuad::Type::kAxisAligned; |
792 | ColorType colorType = ColorType::kNone; |
793 | GrQuad::Type srcQuadType = GrQuad::Type::kAxisAligned; |
794 | Subset subset = Subset::kNo; |
795 | GrAAType overallAAType = fMetadata.aaType(); |
796 | |
797 | desc->fNumProxies = 0; |
798 | desc->fNumTotalQuads = 0; |
799 | int maxQuadsPerMesh = 0; |
800 | |
801 | for (const auto& op : ChainRange<TextureOp>(this)) { |
802 | if (op.fQuads.deviceQuadType() > quadType) { |
803 | quadType = op.fQuads.deviceQuadType(); |
804 | } |
805 | if (op.fQuads.localQuadType() > srcQuadType) { |
806 | srcQuadType = op.fQuads.localQuadType(); |
807 | } |
808 | if (op.fMetadata.subset() == Subset::kYes) { |
809 | subset = Subset::kYes; |
810 | } |
811 | colorType = std::max(colorType, op.fMetadata.colorType()); |
812 | desc->fNumProxies += op.fMetadata.fProxyCount; |
813 | |
814 | for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) { |
815 | maxQuadsPerMesh = std::max(op.fViewCountPairs[p].fQuadCnt, maxQuadsPerMesh); |
816 | } |
817 | desc->fNumTotalQuads += op.totNumQuads(); |
818 | |
819 | if (op.fMetadata.aaType() == GrAAType::kCoverage) { |
820 | overallAAType = GrAAType::kCoverage; |
821 | } |
822 | } |
823 | |
824 | SkASSERT(desc->fNumTotalQuads == this->numChainedQuads()); |
825 | |
826 | SkASSERT(!CombinedQuadCountWillOverflow(overallAAType, false, desc->fNumTotalQuads)); |
827 | |
828 | auto indexBufferOption = GrQuadPerEdgeAA::CalcIndexBufferOption(overallAAType, |
829 | maxQuadsPerMesh); |
830 | |
831 | desc->fVertexSpec = VertexSpec(quadType, colorType, srcQuadType, /* hasLocal */ true, |
832 | subset, overallAAType, /* alpha as coverage */ true, |
833 | indexBufferOption); |
834 | |
835 | SkASSERT(desc->fNumTotalQuads <= GrQuadPerEdgeAA::QuadLimit(indexBufferOption)); |
836 | } |
837 | |
838 | int totNumQuads() const { |
839 | #ifdef SK_DEBUG |
840 | int tmp = 0; |
841 | for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) { |
842 | tmp += fViewCountPairs[p].fQuadCnt; |
843 | } |
844 | SkASSERT(tmp == fMetadata.fTotalQuadCount); |
845 | #endif |
846 | |
847 | return fMetadata.fTotalQuadCount; |
848 | } |
849 | |
850 | int numChainedQuads() const { |
851 | int numChainedQuads = this->totNumQuads(); |
852 | |
853 | for (const GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) { |
854 | numChainedQuads += ((const TextureOp*)tmp)->totNumQuads(); |
855 | } |
856 | |
857 | for (const GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) { |
858 | numChainedQuads += ((const TextureOp*)tmp)->totNumQuads(); |
859 | } |
860 | |
861 | return numChainedQuads; |
862 | } |
863 | |
864 | // onPrePrepareDraws may or may not have been called at this point |
865 | void onPrepareDraws(Target* target) override { |
866 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
867 | |
868 | SkDEBUGCODE(this->validate();) |
869 | |
870 | SkASSERT(!fDesc || fDesc->fPrePreparedVertices); |
871 | |
872 | if (!fDesc) { |
873 | SkArenaAlloc* arena = target->allocator(); |
874 | fDesc = arena->make<Desc>(); |
875 | this->characterize(fDesc); |
876 | SkASSERT(!fDesc->fPrePreparedVertices); |
877 | } |
878 | |
879 | size_t vertexSize = fDesc->fVertexSpec.vertexSize(); |
880 | |
881 | void* vdata = target->makeVertexSpace(vertexSize, fDesc->totalNumVertices(), |
882 | &fDesc->fVertexBuffer, &fDesc->fBaseVertex); |
883 | if (!vdata) { |
884 | SkDebugf("Could not allocate vertices\n" ); |
885 | return; |
886 | } |
887 | |
888 | if (fDesc->fVertexSpec.needsIndexBuffer()) { |
889 | fDesc->fIndexBuffer = GrQuadPerEdgeAA::GetIndexBuffer( |
890 | target, fDesc->fVertexSpec.indexBufferOption()); |
891 | if (!fDesc->fIndexBuffer) { |
892 | SkDebugf("Could not allocate indices\n" ); |
893 | return; |
894 | } |
895 | } |
896 | |
897 | if (fDesc->fPrePreparedVertices) { |
898 | memcpy(vdata, fDesc->fPrePreparedVertices, fDesc->totalSizeInBytes()); |
899 | } else { |
900 | FillInVertices(target->caps(), this, fDesc, (char*) vdata); |
901 | } |
902 | } |
903 | |
904 | void onExecute(GrOpFlushState* flushState, const SkRect& chainBounds) override { |
905 | if (!fDesc->fVertexBuffer) { |
906 | return; |
907 | } |
908 | |
909 | if (fDesc->fVertexSpec.needsIndexBuffer() && !fDesc->fIndexBuffer) { |
910 | return; |
911 | } |
912 | |
913 | if (!fDesc->fProgramInfo) { |
914 | this->createProgramInfo(flushState); |
915 | SkASSERT(fDesc->fProgramInfo); |
916 | } |
917 | |
918 | flushState->bindPipelineAndScissorClip(*fDesc->fProgramInfo, chainBounds); |
919 | flushState->bindBuffers(std::move(fDesc->fIndexBuffer), nullptr, |
920 | std::move(fDesc->fVertexBuffer)); |
921 | |
922 | int totQuadsSeen = 0; |
923 | SkDEBUGCODE(int numDraws = 0;) |
924 | for (const auto& op : ChainRange<TextureOp>(this)) { |
925 | for (unsigned p = 0; p < op.fMetadata.fProxyCount; ++p) { |
926 | const int quadCnt = op.fViewCountPairs[p].fQuadCnt; |
927 | SkASSERT(numDraws < fDesc->fNumProxies); |
928 | flushState->bindTextures(fDesc->fProgramInfo->primProc(), |
929 | *op.fViewCountPairs[p].fProxy, |
930 | fDesc->fProgramInfo->pipeline()); |
931 | GrQuadPerEdgeAA::IssueDraw(flushState->caps(), flushState->opsRenderPass(), |
932 | fDesc->fVertexSpec, totQuadsSeen, quadCnt, |
933 | fDesc->totalNumVertices(), fDesc->fBaseVertex); |
934 | totQuadsSeen += quadCnt; |
935 | SkDEBUGCODE(++numDraws;) |
936 | } |
937 | } |
938 | |
939 | SkASSERT(totQuadsSeen == fDesc->fNumTotalQuads); |
940 | SkASSERT(numDraws == fDesc->fNumProxies); |
941 | } |
942 | |
943 | void propagateCoverageAAThroughoutChain() { |
944 | fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage); |
945 | |
946 | for (GrOp* tmp = this->prevInChain(); tmp; tmp = tmp->prevInChain()) { |
947 | TextureOp* tex = static_cast<TextureOp*>(tmp); |
948 | SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage || |
949 | tex->fMetadata.aaType() == GrAAType::kNone); |
950 | tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage); |
951 | } |
952 | |
953 | for (GrOp* tmp = this->nextInChain(); tmp; tmp = tmp->nextInChain()) { |
954 | TextureOp* tex = static_cast<TextureOp*>(tmp); |
955 | SkASSERT(tex->fMetadata.aaType() == GrAAType::kCoverage || |
956 | tex->fMetadata.aaType() == GrAAType::kNone); |
957 | tex->fMetadata.fAAType = static_cast<uint16_t>(GrAAType::kCoverage); |
958 | } |
959 | } |
960 | |
961 | CombineResult onCombineIfPossible(GrOp* t, GrRecordingContext::Arenas*, |
962 | const GrCaps& caps) override { |
963 | TRACE_EVENT0("skia.gpu" , TRACE_FUNC); |
964 | auto* that = t->cast<TextureOp>(); |
965 | |
966 | SkDEBUGCODE(this->validate();) |
967 | SkDEBUGCODE(that->validate();) |
968 | |
969 | if (fDesc || that->fDesc) { |
970 | // This should never happen (since only DDL recorded ops should be prePrepared) |
971 | // but, in any case, we should never combine ops that that been prePrepared |
972 | return CombineResult::kCannotCombine; |
973 | } |
974 | |
975 | if (fMetadata.subset() != that->fMetadata.subset()) { |
976 | // It is technically possible to combine operations across subset modes, but performance |
977 | // testing suggests it's better to make more draw calls where some take advantage of |
978 | // the more optimal shader path without coordinate clamping. |
979 | return CombineResult::kCannotCombine; |
980 | } |
981 | if (!GrColorSpaceXform::Equals(fTextureColorSpaceXform.get(), |
982 | that->fTextureColorSpaceXform.get())) { |
983 | return CombineResult::kCannotCombine; |
984 | } |
985 | |
986 | bool upgradeToCoverageAAOnMerge = false; |
987 | if (fMetadata.aaType() != that->fMetadata.aaType()) { |
988 | if (!CanUpgradeAAOnMerge(fMetadata.aaType(), that->fMetadata.aaType())) { |
989 | return CombineResult::kCannotCombine; |
990 | } |
991 | upgradeToCoverageAAOnMerge = true; |
992 | } |
993 | |
994 | if (CombinedQuadCountWillOverflow(fMetadata.aaType(), upgradeToCoverageAAOnMerge, |
995 | this->numChainedQuads() + that->numChainedQuads())) { |
996 | return CombineResult::kCannotCombine; |
997 | } |
998 | |
999 | if (fMetadata.saturate() != that->fMetadata.saturate()) { |
1000 | return CombineResult::kCannotCombine; |
1001 | } |
1002 | if (fMetadata.filter() != that->fMetadata.filter()) { |
1003 | return CombineResult::kCannotCombine; |
1004 | } |
1005 | if (fMetadata.mipmapMode() != that->fMetadata.mipmapMode()) { |
1006 | return CombineResult::kCannotCombine; |
1007 | } |
1008 | if (fMetadata.fSwizzle != that->fMetadata.fSwizzle) { |
1009 | return CombineResult::kCannotCombine; |
1010 | } |
1011 | const auto* thisProxy = fViewCountPairs[0].fProxy.get(); |
1012 | const auto* thatProxy = that->fViewCountPairs[0].fProxy.get(); |
1013 | if (fMetadata.fProxyCount > 1 || that->fMetadata.fProxyCount > 1 || |
1014 | thisProxy != thatProxy) { |
1015 | // We can't merge across different proxies. Check if 'this' can be chained with 'that'. |
1016 | if (GrTextureProxy::ProxiesAreCompatibleAsDynamicState(thisProxy, thatProxy) && |
1017 | caps.dynamicStateArrayGeometryProcessorTextureSupport() && |
1018 | fMetadata.aaType() == that->fMetadata.aaType()) { |
1019 | // We only allow chaining when the aaTypes match bc otherwise the AA type |
1020 | // reported by the chain can be inconsistent. That is, since chaining doesn't |
1021 | // propagate revised AA information throughout the chain, the head of the chain |
1022 | // could have an AA setting of kNone while the chain as a whole could have a |
1023 | // setting of kCoverage. This inconsistency would then interfere with the validity |
1024 | // of the CombinedQuadCountWillOverflow calls. |
1025 | // This problem doesn't occur w/ merging bc we do propagate the AA information |
1026 | // (in propagateCoverageAAThroughoutChain) below. |
1027 | return CombineResult::kMayChain; |
1028 | } |
1029 | return CombineResult::kCannotCombine; |
1030 | } |
1031 | |
1032 | fMetadata.fSubset |= that->fMetadata.fSubset; |
1033 | fMetadata.fColorType = std::max(fMetadata.fColorType, that->fMetadata.fColorType); |
1034 | |
1035 | // Concatenate quad lists together |
1036 | fQuads.concat(that->fQuads); |
1037 | fViewCountPairs[0].fQuadCnt += that->fQuads.count(); |
1038 | fMetadata.fTotalQuadCount += that->fQuads.count(); |
1039 | |
1040 | if (upgradeToCoverageAAOnMerge) { |
1041 | // This merger may be the start of a concatenation of two chains. When one |
1042 | // of the chains mutates its AA the other must follow suit or else the above AA |
1043 | // check may prevent later ops from chaining together. A specific example of this is |
1044 | // when chain2 is prepended onto chain1: |
1045 | // chain1 (that): opA (non-AA/mergeable) opB (non-AA/non-mergeable) |
1046 | // chain2 (this): opC (cov-AA/non-mergeable) opD (cov-AA/mergeable) |
1047 | // W/o this propagation, after opD & opA merge, opB and opC would say they couldn't |
1048 | // chain - which would stop the concatenation process. |
1049 | this->propagateCoverageAAThroughoutChain(); |
1050 | that->propagateCoverageAAThroughoutChain(); |
1051 | } |
1052 | |
1053 | SkDEBUGCODE(this->validate();) |
1054 | |
1055 | return CombineResult::kMerged; |
1056 | } |
1057 | |
1058 | #if GR_TEST_UTILS |
1059 | SkString onDumpInfo() const override { |
1060 | SkString str = SkStringPrintf("# draws: %d\n" , fQuads.count()); |
1061 | auto iter = fQuads.iterator(); |
1062 | for (unsigned p = 0; p < fMetadata.fProxyCount; ++p) { |
1063 | str.appendf("Proxy ID: %d, Filter: %d, MM: %d\n" , |
1064 | fViewCountPairs[p].fProxy->uniqueID().asUInt(), |
1065 | static_cast<int>(fMetadata.fFilter), |
1066 | static_cast<int>(fMetadata.fMipmapMode)); |
1067 | int i = 0; |
1068 | while(i < fViewCountPairs[p].fQuadCnt && iter.next()) { |
1069 | const GrQuad* quad = iter.deviceQuad(); |
1070 | GrQuad uv = iter.isLocalValid() ? *(iter.localQuad()) : GrQuad(); |
1071 | const ColorSubsetAndAA& info = iter.metadata(); |
1072 | str.appendf( |
1073 | "%d: Color: 0x%08x, Subset(%d): [L: %.2f, T: %.2f, R: %.2f, B: %.2f]\n" |
1074 | " UVs [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n" |
1075 | " Quad [(%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f), (%.2f, %.2f)]\n" , |
1076 | i, info.fColor.toBytes_RGBA(), fMetadata.fSubset, info.fSubsetRect.fLeft, |
1077 | info.fSubsetRect.fTop, info.fSubsetRect.fRight, info.fSubsetRect.fBottom, |
1078 | quad->point(0).fX, quad->point(0).fY, quad->point(1).fX, quad->point(1).fY, |
1079 | quad->point(2).fX, quad->point(2).fY, quad->point(3).fX, quad->point(3).fY, |
1080 | uv.point(0).fX, uv.point(0).fY, uv.point(1).fX, uv.point(1).fY, |
1081 | uv.point(2).fX, uv.point(2).fY, uv.point(3).fX, uv.point(3).fY); |
1082 | |
1083 | i++; |
1084 | } |
1085 | } |
1086 | return str; |
1087 | } |
1088 | #endif |
1089 | |
1090 | GrQuadBuffer<ColorSubsetAndAA> fQuads; |
1091 | sk_sp<GrColorSpaceXform> fTextureColorSpaceXform; |
1092 | // Most state of TextureOp is packed into these two field to minimize the op's size. |
1093 | // Historically, increasing the size of TextureOp has caused surprising perf regressions, so |
1094 | // consider/measure changes with care. |
1095 | Desc* fDesc; |
1096 | Metadata fMetadata; |
1097 | |
1098 | // This field must go last. When allocating this op, we will allocate extra space to hold |
1099 | // additional ViewCountPairs immediately after the op's allocation so we can treat this |
1100 | // as an fProxyCnt-length array. |
1101 | ViewCountPair fViewCountPairs[1]; |
1102 | |
1103 | typedef GrMeshDrawOp INHERITED; |
1104 | }; |
1105 | |
1106 | } // anonymous namespace |
1107 | |
1108 | #if GR_TEST_UTILS |
1109 | uint32_t GrTextureOp::ClassID() { |
1110 | return TextureOp::ClassID(); |
1111 | } |
1112 | #endif |
1113 | |
1114 | std::unique_ptr<GrDrawOp> GrTextureOp::Make(GrRecordingContext* context, |
1115 | GrSurfaceProxyView proxyView, |
1116 | SkAlphaType alphaType, |
1117 | sk_sp<GrColorSpaceXform> textureXform, |
1118 | GrSamplerState::Filter filter, |
1119 | GrSamplerState::MipmapMode mm, |
1120 | const SkPMColor4f& color, |
1121 | Saturate saturate, |
1122 | SkBlendMode blendMode, |
1123 | GrAAType aaType, |
1124 | DrawQuad* quad, |
1125 | const SkRect* subset) { |
1126 | // Apply optimizations that are valid whether or not using GrTextureOp or GrFillRectOp |
1127 | if (subset && subset->contains(proxyView.proxy()->backingStoreBoundsRect())) { |
1128 | // No need for a shader-based subset if hardware clamping achieves the same effect |
1129 | subset = nullptr; |
1130 | } |
1131 | |
1132 | if (filter != GrSamplerState::Filter::kNearest || mm != GrSamplerState::MipmapMode::kNone) { |
1133 | auto [mustFilter, mustMM] = filter_and_mm_have_effect(quad->fLocal, quad->fDevice); |
1134 | if (!mustFilter) { |
1135 | filter = GrSamplerState::Filter::kNearest; |
1136 | } |
1137 | if (!mustMM) { |
1138 | mm = GrSamplerState::MipmapMode::kNone; |
1139 | } |
1140 | } |
1141 | |
1142 | if (blendMode == SkBlendMode::kSrcOver) { |
1143 | return TextureOp::Make(context, std::move(proxyView), std::move(textureXform), filter, mm, |
1144 | color, saturate, aaType, std::move(quad), subset); |
1145 | } else { |
1146 | // Emulate complex blending using GrFillRectOp |
1147 | GrPaint paint; |
1148 | paint.setColor4f(color); |
1149 | paint.setXPFactory(SkBlendMode_AsXPFactory(blendMode)); |
1150 | |
1151 | std::unique_ptr<GrFragmentProcessor> fp; |
1152 | if (subset) { |
1153 | const auto& caps = *context->priv().caps(); |
1154 | SkRect localRect; |
1155 | if (quad->fLocal.asRect(&localRect)) { |
1156 | fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(), |
1157 | filter, *subset, localRect, caps); |
1158 | } else { |
1159 | fp = GrTextureEffect::MakeSubset(std::move(proxyView), alphaType, SkMatrix::I(), |
1160 | filter, *subset, caps); |
1161 | } |
1162 | } else { |
1163 | fp = GrTextureEffect::Make(std::move(proxyView), alphaType, SkMatrix::I(), filter); |
1164 | } |
1165 | fp = GrColorSpaceXformEffect::Make(std::move(fp), std::move(textureXform)); |
1166 | fp = GrBlendFragmentProcessor::Make(std::move(fp), nullptr, SkBlendMode::kModulate); |
1167 | if (saturate == GrTextureOp::Saturate::kYes) { |
1168 | fp = GrClampFragmentProcessor::Make(std::move(fp), /*clampToPremul=*/false); |
1169 | } |
1170 | paint.setColorFragmentProcessor(std::move(fp)); |
1171 | return GrFillRectOp::Make(context, std::move(paint), aaType, quad); |
1172 | } |
1173 | } |
1174 | |
1175 | // A helper class that assists in breaking up bulk API quad draws into manageable chunks. |
1176 | class GrTextureOp::BatchSizeLimiter { |
1177 | public: |
1178 | BatchSizeLimiter(GrRenderTargetContext* rtc, |
1179 | const GrClip* clip, |
1180 | GrRecordingContext* context, |
1181 | int numEntries, |
1182 | GrSamplerState::Filter filter, |
1183 | GrSamplerState::MipmapMode mm, |
1184 | GrTextureOp::Saturate saturate, |
1185 | SkCanvas::SrcRectConstraint constraint, |
1186 | const SkMatrix& viewMatrix, |
1187 | sk_sp<GrColorSpaceXform> textureColorSpaceXform) |
1188 | : fRTC(rtc) |
1189 | , fClip(clip) |
1190 | , fContext(context) |
1191 | , fFilter(filter) |
1192 | , fMipmapMode(mm) |
1193 | , fSaturate(saturate) |
1194 | , fConstraint(constraint) |
1195 | , fViewMatrix(viewMatrix) |
1196 | , fTextureColorSpaceXform(textureColorSpaceXform) |
1197 | , fNumLeft(numEntries) {} |
1198 | |
1199 | void createOp(GrRenderTargetContext::TextureSetEntry set[], |
1200 | int clumpSize, |
1201 | GrAAType aaType) { |
1202 | int clumpProxyCount = proxy_run_count(&set[fNumClumped], clumpSize); |
1203 | std::unique_ptr<GrDrawOp> op = TextureOp::Make(fContext, |
1204 | &set[fNumClumped], |
1205 | clumpSize, |
1206 | clumpProxyCount, |
1207 | fFilter, |
1208 | fMipmapMode, |
1209 | fSaturate, |
1210 | aaType, |
1211 | fConstraint, |
1212 | fViewMatrix, |
1213 | fTextureColorSpaceXform); |
1214 | fRTC->addDrawOp(fClip, std::move(op)); |
1215 | |
1216 | fNumLeft -= clumpSize; |
1217 | fNumClumped += clumpSize; |
1218 | } |
1219 | |
1220 | int numLeft() const { return fNumLeft; } |
1221 | int baseIndex() const { return fNumClumped; } |
1222 | |
1223 | private: |
1224 | GrRenderTargetContext* fRTC; |
1225 | const GrClip* fClip; |
1226 | GrRecordingContext* fContext; |
1227 | GrSamplerState::Filter fFilter; |
1228 | GrSamplerState::MipmapMode fMipmapMode; |
1229 | GrTextureOp::Saturate fSaturate; |
1230 | SkCanvas::SrcRectConstraint fConstraint; |
1231 | const SkMatrix& fViewMatrix; |
1232 | sk_sp<GrColorSpaceXform> fTextureColorSpaceXform; |
1233 | |
1234 | int fNumLeft; |
1235 | int fNumClumped = 0; // also the offset for the start of the next clump |
1236 | }; |
1237 | |
1238 | // Greedily clump quad draws together until the index buffer limit is exceeded. |
1239 | void GrTextureOp::AddTextureSetOps(GrRenderTargetContext* rtc, |
1240 | const GrClip* clip, |
1241 | GrRecordingContext* context, |
1242 | GrRenderTargetContext::TextureSetEntry set[], |
1243 | int cnt, |
1244 | int proxyRunCnt, |
1245 | GrSamplerState::Filter filter, |
1246 | GrSamplerState::MipmapMode mm, |
1247 | Saturate saturate, |
1248 | SkBlendMode blendMode, |
1249 | GrAAType aaType, |
1250 | SkCanvas::SrcRectConstraint constraint, |
1251 | const SkMatrix& viewMatrix, |
1252 | sk_sp<GrColorSpaceXform> textureColorSpaceXform) { |
1253 | // Ensure that the index buffer limits are lower than the proxy and quad count limits of |
1254 | // the op's metadata so we don't need to worry about overflow. |
1255 | SkDEBUGCODE(TextureOp::ValidateResourceLimits();) |
1256 | SkASSERT(proxy_run_count(set, cnt) == proxyRunCnt); |
1257 | |
1258 | // First check if we can support batches as a single op |
1259 | if (blendMode != SkBlendMode::kSrcOver || |
1260 | !context->priv().caps()->dynamicStateArrayGeometryProcessorTextureSupport()) { |
1261 | // Append each entry as its own op; these may still be GrTextureOps if the blend mode is |
1262 | // src-over but the backend doesn't support dynamic state changes. Otherwise Make() |
1263 | // automatically creates the appropriate GrFillRectOp to emulate GrTextureOp. |
1264 | SkMatrix ctm; |
1265 | for (int i = 0; i < cnt; ++i) { |
1266 | ctm = viewMatrix; |
1267 | if (set[i].fPreViewMatrix) { |
1268 | ctm.preConcat(*set[i].fPreViewMatrix); |
1269 | } |
1270 | |
1271 | DrawQuad quad; |
1272 | quad.fEdgeFlags = set[i].fAAFlags; |
1273 | if (set[i].fDstClipQuad) { |
1274 | quad.fDevice = GrQuad::MakeFromSkQuad(set[i].fDstClipQuad, ctm); |
1275 | |
1276 | SkPoint srcPts[4]; |
1277 | GrMapRectPoints(set[i].fDstRect, set[i].fSrcRect, set[i].fDstClipQuad, srcPts, 4); |
1278 | quad.fLocal = GrQuad::MakeFromSkQuad(srcPts, SkMatrix::I()); |
1279 | } else { |
1280 | quad.fDevice = GrQuad::MakeFromRect(set[i].fDstRect, ctm); |
1281 | quad.fLocal = GrQuad(set[i].fSrcRect); |
1282 | } |
1283 | |
1284 | const SkRect* subset = constraint == SkCanvas::kStrict_SrcRectConstraint |
1285 | ? &set[i].fSrcRect : nullptr; |
1286 | |
1287 | auto op = Make(context, set[i].fProxyView, set[i].fSrcAlphaType, textureColorSpaceXform, |
1288 | filter, mm, set[i].fColor, saturate, blendMode, aaType, &quad, subset); |
1289 | rtc->addDrawOp(clip, std::move(op)); |
1290 | } |
1291 | return; |
1292 | } |
1293 | |
1294 | // Second check if we can always just make a single op and avoid the extra iteration |
1295 | // needed to clump things together. |
1296 | if (cnt <= std::min(GrResourceProvider::MaxNumNonAAQuads(), |
1297 | GrResourceProvider::MaxNumAAQuads())) { |
1298 | auto op = TextureOp::Make(context, set, cnt, proxyRunCnt, filter, mm, saturate, aaType, |
1299 | constraint, viewMatrix, std::move(textureColorSpaceXform)); |
1300 | rtc->addDrawOp(clip, std::move(op)); |
1301 | return; |
1302 | } |
1303 | |
1304 | BatchSizeLimiter state(rtc, clip, context, cnt, filter, mm, saturate, constraint, viewMatrix, |
1305 | std::move(textureColorSpaceXform)); |
1306 | |
1307 | // kNone and kMSAA never get altered |
1308 | if (aaType == GrAAType::kNone || aaType == GrAAType::kMSAA) { |
1309 | // Clump these into series of MaxNumNonAAQuads-sized GrTextureOps |
1310 | while (state.numLeft() > 0) { |
1311 | int clumpSize = std::min(state.numLeft(), GrResourceProvider::MaxNumNonAAQuads()); |
1312 | |
1313 | state.createOp(set, clumpSize, aaType); |
1314 | } |
1315 | } else { |
1316 | // kCoverage can be downgraded to kNone. Note that the following is conservative. kCoverage |
1317 | // can also get downgraded to kNone if all the quads are on integer coordinates and |
1318 | // axis-aligned. |
1319 | SkASSERT(aaType == GrAAType::kCoverage); |
1320 | |
1321 | while (state.numLeft() > 0) { |
1322 | GrAAType runningAA = GrAAType::kNone; |
1323 | bool clumped = false; |
1324 | |
1325 | for (int i = 0; i < state.numLeft(); ++i) { |
1326 | int absIndex = state.baseIndex() + i; |
1327 | |
1328 | if (set[absIndex].fAAFlags != GrQuadAAFlags::kNone || |
1329 | runningAA == GrAAType::kCoverage) { |
1330 | |
1331 | if (i >= GrResourceProvider::MaxNumAAQuads()) { |
1332 | // Here we either need to boost the AA type to kCoverage, but doing so with |
1333 | // all the accumulated quads would overflow, or we have a set of AA quads |
1334 | // that has just gotten too large. In either case, calve off the existing |
1335 | // quads as their own TextureOp. |
1336 | state.createOp( |
1337 | set, |
1338 | runningAA == GrAAType::kNone ? i : GrResourceProvider::MaxNumAAQuads(), |
1339 | runningAA); // maybe downgrading AA here |
1340 | clumped = true; |
1341 | break; |
1342 | } |
1343 | |
1344 | runningAA = GrAAType::kCoverage; |
1345 | } else if (runningAA == GrAAType::kNone) { |
1346 | |
1347 | if (i >= GrResourceProvider::MaxNumNonAAQuads()) { |
1348 | // Here we've found a consistent batch of non-AA quads that has gotten too |
1349 | // large. Calve it off as its own GrTextureOp. |
1350 | state.createOp(set, GrResourceProvider::MaxNumNonAAQuads(), |
1351 | GrAAType::kNone); // definitely downgrading AA here |
1352 | clumped = true; |
1353 | break; |
1354 | } |
1355 | } |
1356 | } |
1357 | |
1358 | if (!clumped) { |
1359 | // We ran through the above loop w/o hitting a limit. Spit out this last clump of |
1360 | // quads and call it a day. |
1361 | state.createOp(set, state.numLeft(), runningAA); // maybe downgrading AA here |
1362 | } |
1363 | } |
1364 | } |
1365 | } |
1366 | |
1367 | #if GR_TEST_UTILS |
1368 | #include "include/gpu/GrRecordingContext.h" |
1369 | #include "src/gpu/GrProxyProvider.h" |
1370 | #include "src/gpu/GrRecordingContextPriv.h" |
1371 | |
1372 | GR_DRAW_OP_TEST_DEFINE(TextureOp) { |
1373 | SkISize dims; |
1374 | dims.fHeight = random->nextULessThan(90) + 10; |
1375 | dims.fWidth = random->nextULessThan(90) + 10; |
1376 | auto origin = random->nextBool() ? kTopLeft_GrSurfaceOrigin : kBottomLeft_GrSurfaceOrigin; |
1377 | GrMipmapped mipMapped = random->nextBool() ? GrMipmapped::kYes : GrMipmapped::kNo; |
1378 | SkBackingFit fit = SkBackingFit::kExact; |
1379 | if (mipMapped == GrMipmapped::kNo) { |
1380 | fit = random->nextBool() ? SkBackingFit::kApprox : SkBackingFit::kExact; |
1381 | } |
1382 | const GrBackendFormat format = |
1383 | context->priv().caps()->getDefaultBackendFormat(GrColorType::kRGBA_8888, |
1384 | GrRenderable::kNo); |
1385 | GrProxyProvider* proxyProvider = context->priv().proxyProvider(); |
1386 | sk_sp<GrTextureProxy> proxy = proxyProvider->createProxy( |
1387 | format, dims, GrRenderable::kNo, 1, mipMapped, fit, SkBudgeted::kNo, GrProtected::kNo, |
1388 | GrInternalSurfaceFlags::kNone); |
1389 | |
1390 | SkRect rect = GrTest::TestRect(random); |
1391 | SkRect srcRect; |
1392 | srcRect.fLeft = random->nextRangeScalar(0.f, proxy->width() / 2.f); |
1393 | srcRect.fRight = random->nextRangeScalar(0.f, proxy->width()) + proxy->width() / 2.f; |
1394 | srcRect.fTop = random->nextRangeScalar(0.f, proxy->height() / 2.f); |
1395 | srcRect.fBottom = random->nextRangeScalar(0.f, proxy->height()) + proxy->height() / 2.f; |
1396 | SkMatrix viewMatrix = GrTest::TestMatrixPreservesRightAngles(random); |
1397 | SkPMColor4f color = SkPMColor4f::FromBytes_RGBA(SkColorToPremulGrColor(random->nextU())); |
1398 | GrSamplerState::Filter filter = (GrSamplerState::Filter)random->nextULessThan( |
1399 | static_cast<uint32_t>(GrSamplerState::Filter::kLast) + 1); |
1400 | GrSamplerState::MipmapMode mm = GrSamplerState::MipmapMode::kNone; |
1401 | if (mipMapped == GrMipmapped::kYes) { |
1402 | mm = (GrSamplerState::MipmapMode)random->nextULessThan( |
1403 | static_cast<uint32_t>(GrSamplerState::MipmapMode::kLast) + 1); |
1404 | } |
1405 | |
1406 | auto texXform = GrTest::TestColorXform(random); |
1407 | GrAAType aaType = GrAAType::kNone; |
1408 | if (random->nextBool()) { |
1409 | aaType = (numSamples > 1) ? GrAAType::kMSAA : GrAAType::kCoverage; |
1410 | } |
1411 | GrQuadAAFlags aaFlags = GrQuadAAFlags::kNone; |
1412 | aaFlags |= random->nextBool() ? GrQuadAAFlags::kLeft : GrQuadAAFlags::kNone; |
1413 | aaFlags |= random->nextBool() ? GrQuadAAFlags::kTop : GrQuadAAFlags::kNone; |
1414 | aaFlags |= random->nextBool() ? GrQuadAAFlags::kRight : GrQuadAAFlags::kNone; |
1415 | aaFlags |= random->nextBool() ? GrQuadAAFlags::kBottom : GrQuadAAFlags::kNone; |
1416 | bool useSubset = random->nextBool(); |
1417 | auto saturate = random->nextBool() ? GrTextureOp::Saturate::kYes : GrTextureOp::Saturate::kNo; |
1418 | GrSurfaceProxyView proxyView( |
1419 | std::move(proxy), origin, |
1420 | context->priv().caps()->getReadSwizzle(format, GrColorType::kRGBA_8888)); |
1421 | auto alphaType = static_cast<SkAlphaType>( |
1422 | random->nextRangeU(kUnknown_SkAlphaType + 1, kLastEnum_SkAlphaType)); |
1423 | |
1424 | DrawQuad quad = {GrQuad::MakeFromRect(rect, viewMatrix), GrQuad(srcRect), aaFlags}; |
1425 | return GrTextureOp::Make(context, std::move(proxyView), alphaType, std::move(texXform), filter, |
1426 | mm, color, saturate, SkBlendMode::kSrcOver, aaType, &quad, |
1427 | useSubset ? &srcRect : nullptr); |
1428 | } |
1429 | |
1430 | #endif |
1431 | |