1 | // [Blend2D] |
2 | // 2D Vector Graphics Powered by a JIT Compiler. |
3 | // |
4 | // [License] |
5 | // Zlib - See LICENSE.md file in the package. |
6 | |
7 | #include "./blapi-build_p.h" |
8 | #include "./blcontext.h" |
9 | #include "./blmath_p.h" |
10 | #include "./blmatrix_p.h" |
11 | #include "./blpipedefs_p.h" |
12 | #include "./blsupport_p.h" |
13 | |
14 | // ============================================================================ |
15 | // [BLPipeFetchData - Init Pattern] |
16 | // ============================================================================ |
17 | |
18 | static BL_INLINE uint32_t blExtendXFromExtendMode(uint32_t extendMode) noexcept { |
19 | BL_ASSERT(extendMode < BL_EXTEND_MODE_COMPLEX_COUNT); |
20 | |
21 | constexpr uint32_t kTable = (BL_EXTEND_MODE_PAD << 0) | // [pad-x pad-y ] |
22 | (BL_EXTEND_MODE_REPEAT << 2) | // [repeat-x repeat-y ] |
23 | (BL_EXTEND_MODE_REFLECT << 4) | // [reflect-x reflect-y] |
24 | (BL_EXTEND_MODE_PAD << 6) | // [pad-x repeat-y ] |
25 | (BL_EXTEND_MODE_PAD << 8) | // [pad-x reflect-y] |
26 | (BL_EXTEND_MODE_REPEAT << 10) | // [repeat-x pad-y ] |
27 | (BL_EXTEND_MODE_REPEAT << 12) | // [repeat-x reflect-y] |
28 | (BL_EXTEND_MODE_REFLECT << 14) | // [reflect-x pad-y ] |
29 | (BL_EXTEND_MODE_REFLECT << 16) ; // [reflect-x repeat-y ] |
30 | return (kTable >> (extendMode * 2u)) & 0x3u; |
31 | } |
32 | |
33 | static BL_INLINE uint32_t blExtendYFromExtendMode(uint32_t extendMode) noexcept { |
34 | BL_ASSERT(extendMode < BL_EXTEND_MODE_COMPLEX_COUNT); |
35 | |
36 | constexpr uint32_t kTable = (BL_EXTEND_MODE_PAD << 0) | // [pad-x pad-y ] |
37 | (BL_EXTEND_MODE_REPEAT << 2) | // [repeat-x repeat-y ] |
38 | (BL_EXTEND_MODE_REFLECT << 4) | // [reflect-x reflect-y] |
39 | (BL_EXTEND_MODE_REPEAT << 6) | // [pad-x repeat-y ] |
40 | (BL_EXTEND_MODE_REFLECT << 8) | // [pad-x reflect-y] |
41 | (BL_EXTEND_MODE_PAD << 10) | // [repeat-x pad-y ] |
42 | (BL_EXTEND_MODE_REFLECT << 12) | // [repeat-x reflect-y] |
43 | (BL_EXTEND_MODE_PAD << 14) | // [reflect-x pad-y ] |
44 | (BL_EXTEND_MODE_REPEAT << 16) ; // [reflect-x repeat-y ] |
45 | return (kTable >> (extendMode * 2u)) & 0x3u; |
46 | } |
47 | |
48 | static BL_INLINE uint32_t blPipeFetchDataInitPatternTxTy(BLPipeFetchData* fetchData, uint32_t fetchBase, uint32_t extendMode, int tx, int ty, bool isFractional) noexcept { |
49 | BLPipeFetchData::Pattern& d = fetchData->pattern; |
50 | uint32_t extendX = blExtendXFromExtendMode(extendMode); |
51 | uint32_t extendY = blExtendYFromExtendMode(extendMode); |
52 | uint32_t ixIndex = 17; |
53 | |
54 | int rx = 0; |
55 | int ry = 0; |
56 | |
57 | // If the pattern width/height is 1 all extend modes produce the same output. |
58 | // However, it's safer to just set it to PAD as FetchPatternPart requires |
59 | // `width` to be equal or greater than 2 if the extend mode is REPEAT or |
60 | // REFLECT. |
61 | if (d.src.size.w <= 1) extendX = BL_EXTEND_MODE_PAD; |
62 | if (d.src.size.h <= 1) extendY = BL_EXTEND_MODE_PAD; |
63 | |
64 | if (extendX >= BL_EXTEND_MODE_REPEAT) { |
65 | bool isReflect = extendX == BL_EXTEND_MODE_REFLECT; |
66 | |
67 | rx = int(d.src.size.w) << uint32_t(isReflect); |
68 | if (unsigned(tx) >= unsigned(rx)) |
69 | tx %= rx; |
70 | if (tx < 0) |
71 | tx += rx; |
72 | |
73 | // In extreme cases, when `rx` is very small, fetch4()/fetch8() functions |
74 | // may overflow `x` if they increment more than they can fix by subtracting |
75 | // `rw` in case of overflow (and overflow happens as it's used to start |
76 | // over). To fix this and simplify the compiled code we simply precalculate |
77 | // these constants so they are always safe. |
78 | ixIndex = blMin<uint32_t>(uint32_t(rx), 17); |
79 | |
80 | // Don't specialize `Repeat vs Reflect` when we are not pixel aligned. |
81 | if (isFractional) |
82 | extendX = 1; // TODO: Naming... |
83 | } |
84 | |
85 | if (extendY >= BL_EXTEND_MODE_REPEAT) { |
86 | ry = int(d.src.size.h) << uint32_t(extendY == BL_EXTEND_MODE_REFLECT); |
87 | if (unsigned(ty) >= unsigned(ry)) |
88 | ty %= ry; |
89 | if (ty < 0) |
90 | ty += ry; |
91 | } |
92 | |
93 | d.simple.tx = tx; |
94 | d.simple.ty = ty; |
95 | d.simple.rx = rx; |
96 | d.simple.ry = ry; |
97 | d.simple.ix = blModuloTable[ixIndex]; |
98 | |
99 | return fetchBase + extendX; |
100 | } |
101 | |
102 | uint32_t BLPipeFetchData::initPatternAxAy(uint32_t extendMode, int x, int y) noexcept { |
103 | return blPipeFetchDataInitPatternTxTy(this, BL_PIPE_FETCH_TYPE_PATTERN_AA_PAD, extendMode, -x, -y, false); |
104 | } |
105 | |
106 | uint32_t BLPipeFetchData::initPatternFxFy(uint32_t extendMode, uint32_t filter, int64_t tx64, int64_t ty64) noexcept { |
107 | BLPipeFetchData::Pattern& d = this->pattern; |
108 | |
109 | uint32_t fetchBase = BL_PIPE_FETCH_TYPE_PATTERN_AA_PAD; |
110 | uint32_t wx = uint32_t(tx64 & 0xFF); |
111 | uint32_t wy = uint32_t(ty64 & 0xFF); |
112 | |
113 | int tx = -int(((tx64)) >> 8); |
114 | int ty = -int(((ty64)) >> 8); |
115 | |
116 | // If one or both `wx` or `why` are non-zero it means that the translation |
117 | // is fractional. In that case we must calculate weights of [x0 y0], [x1 y0], |
118 | // [x0 y1], and [x1 y1] pixels. |
119 | bool isFractional = (wx | wy) != 0; |
120 | if (isFractional) { |
121 | if (filter == BL_PATTERN_QUALITY_NEAREST) { |
122 | tx -= (wx >= 128); |
123 | ty -= (wy >= 128); |
124 | isFractional = false; |
125 | } |
126 | else { |
127 | d.simple.wa = (( wy) * ( wx) ) >> 8; // [x0 y0] |
128 | d.simple.wb = (( wy) * (256 - wx) + 255) >> 8; // [x1 y0] |
129 | d.simple.wc = ((256 - wy) * ( wx) ) >> 8; // [x0 y1] |
130 | d.simple.wd = ((256 - wy) * (256 - wx) + 255) >> 8; // [x1 y1] |
131 | |
132 | // The FxFy fetcher must work even when one or both `wx` or `wy` are |
133 | // zero, so we always decrement `tx` and `ty` based on the fetch type. |
134 | if (wy == 0) { |
135 | tx--; |
136 | fetchBase = BL_PIPE_FETCH_TYPE_PATTERN_FX_PAD; |
137 | } |
138 | else if (wx == 0) { |
139 | ty--; |
140 | fetchBase = BL_PIPE_FETCH_TYPE_PATTERN_FY_PAD; |
141 | } |
142 | else { |
143 | tx--; |
144 | ty--; |
145 | fetchBase = BL_PIPE_FETCH_TYPE_PATTERN_FX_FY_PAD; |
146 | } |
147 | } |
148 | } |
149 | |
150 | return blPipeFetchDataInitPatternTxTy(this, fetchBase, extendMode, tx, ty, isFractional); |
151 | } |
152 | |
153 | uint32_t BLPipeFetchData::initPatternAffine(uint32_t extendMode, uint32_t filter, const BLMatrix2D& m, const BLMatrix2D& mInv) noexcept { |
154 | BL_UNUSED(m); |
155 | BLPipeFetchData::Pattern& d = this->pattern; |
156 | |
157 | // Inverted transformation matrix. |
158 | double xx = mInv.m00; |
159 | double xy = mInv.m01; |
160 | double yx = mInv.m10; |
161 | double yy = mInv.m11; |
162 | |
163 | if (isNearOne(xx) && isNearZero(xy) && isNearZero(yx) && isNearOne(yy)) { |
164 | return initPatternFxFy( |
165 | extendMode, |
166 | filter, |
167 | blFloorToInt64(-mInv.m20 * 256.0), |
168 | blFloorToInt64(-mInv.m21 * 256.0)); |
169 | } |
170 | |
171 | uint32_t fetchType = |
172 | filter == BL_PATTERN_QUALITY_NEAREST |
173 | ? BL_PIPE_FETCH_TYPE_PATTERN_AFFINE_NN_ANY |
174 | : BL_PIPE_FETCH_TYPE_PATTERN_AFFINE_BI_ANY; |
175 | |
176 | // Pattern bounds. |
177 | int tw = int(d.src.size.w); |
178 | int th = int(d.src.size.h); |
179 | |
180 | uint32_t opt = blMax(tw, th) < 32767 && |
181 | d.src.stride >= 0 && |
182 | d.src.stride <= intptr_t(blMaxValue<int16_t>()); |
183 | |
184 | // TODO: [PIPEGEN] Not implemented for bilinear yet. |
185 | if (filter == BL_PATTERN_QUALITY_BILINEAR) |
186 | opt = 0; |
187 | |
188 | fetchType += opt; |
189 | |
190 | // Pattern X/Y extends. |
191 | uint32_t extendX = blExtendXFromExtendMode(extendMode); |
192 | uint32_t extendY = blExtendYFromExtendMode(extendMode); |
193 | |
194 | // Translation. |
195 | double tx = mInv.m20; |
196 | double ty = mInv.m21; |
197 | |
198 | tx += 0.5 * (xx + yx); |
199 | ty += 0.5 * (xy + yy); |
200 | |
201 | // 32x32 fixed point scale as double, equals to `pow(2, 32)`. |
202 | double fpScale = 4294967296.0; |
203 | |
204 | // Overflow check of X/Y. When this check passes we decrement rx/ry from |
205 | // the overflown values. |
206 | int ox = blMaxValue<int32_t>(); |
207 | int oy = blMaxValue<int32_t>(); |
208 | |
209 | // Normalization of X/Y. These values are added to the current `px` and `py` |
210 | // when they overflow the repeat|reflect bounds. |
211 | int rx = 0; |
212 | int ry = 0; |
213 | |
214 | d.affine.minX = 0; |
215 | d.affine.minY = 0; |
216 | |
217 | d.affine.maxX = int32_t(tw - 1); |
218 | d.affine.maxY = int32_t(th - 1); |
219 | |
220 | d.affine.corX = int32_t(tw - 1); |
221 | d.affine.corY = int32_t(th - 1); |
222 | |
223 | if (extendX != BL_EXTEND_MODE_PAD) { |
224 | d.affine.minX = blMinValue<int32_t>(); |
225 | if (extendX == BL_EXTEND_MODE_REPEAT) |
226 | d.affine.corX = 0; |
227 | |
228 | ox = tw; |
229 | if (extendX == BL_EXTEND_MODE_REFLECT) |
230 | tw *= 2; |
231 | |
232 | if (xx < 0.0) { |
233 | xx = -xx; |
234 | yx = -yx; |
235 | tx = double(tw) - tx; |
236 | |
237 | if (extendX == BL_EXTEND_MODE_REPEAT) { |
238 | ox = 0; |
239 | d.affine.corY = d.affine.maxX; |
240 | } |
241 | } |
242 | ox--; |
243 | } |
244 | |
245 | if (extendY != BL_EXTEND_MODE_PAD) { |
246 | d.affine.minY = blMinValue<int32_t>(); |
247 | if (extendY == BL_EXTEND_MODE_REPEAT) |
248 | d.affine.corY = 0; |
249 | |
250 | oy = th; |
251 | if (extendY == BL_EXTEND_MODE_REFLECT) |
252 | th *= 2; |
253 | |
254 | if (xy < 0.0) { |
255 | xy = -xy; |
256 | yy = -yy; |
257 | ty = double(th) - ty; |
258 | |
259 | if (extendY == BL_EXTEND_MODE_REPEAT) { |
260 | oy = 0; |
261 | d.affine.corY = d.affine.maxY; |
262 | } |
263 | } |
264 | oy--; |
265 | } |
266 | |
267 | // Keep the center of the pixel at [0.5, 0.5] if the filter is NEAREST so |
268 | // it can properly round to the nearest pixel during the fetch phase. |
269 | // However, if the filter is not NEAREST the `tx` and `ty` have to be |
270 | // translated by -0.5 so the position starts at the beginning of the pixel. |
271 | if (filter != BL_PATTERN_QUALITY_NEAREST) { |
272 | tx -= 0.5; |
273 | ty -= 0.5; |
274 | } |
275 | |
276 | // Pattern boundaries converted to `double`. |
277 | double tw_d = double(tw); |
278 | double th_d = double(th); |
279 | |
280 | // Normalize the matrix in a way that it won't overflow the pattern more |
281 | // than once per a single iteration. Happens when scaling part is very |
282 | // small. Only useful for repeated / reflected cases. |
283 | if (extendX == BL_EXTEND_MODE_PAD) { |
284 | tw_d = 4294967296.0; |
285 | } |
286 | else { |
287 | tx = fmod(tx, tw_d); |
288 | rx = tw; |
289 | if (xx >= tw_d) xx = fmod(xx, tw_d); |
290 | } |
291 | |
292 | if (extendY == BL_EXTEND_MODE_PAD) { |
293 | th_d = 4294967296.0; |
294 | } |
295 | else { |
296 | ty = fmod(ty, th_d); |
297 | ry = th; |
298 | if (xy >= th_d) xy = fmod(xy, th_d); |
299 | } |
300 | |
301 | d.affine.xx.i64 = blFloorToInt64(xx * fpScale); |
302 | d.affine.xy.i64 = blFloorToInt64(xy * fpScale); |
303 | d.affine.yx.i64 = blFloorToInt64(yx * fpScale); |
304 | d.affine.yy.i64 = blFloorToInt64(yy * fpScale); |
305 | |
306 | d.affine.tx.i64 = blFloorToInt64(tx * fpScale); |
307 | d.affine.ty.i64 = blFloorToInt64(ty * fpScale); |
308 | d.affine.rx.i64 = blBitShl(int64_t(rx), 32); |
309 | d.affine.ry.i64 = blBitShl(int64_t(ry), 32); |
310 | |
311 | d.affine.ox.i32Hi = ox; |
312 | d.affine.ox.i32Lo = blMaxValue<int32_t>(); |
313 | d.affine.oy.i32Hi = oy; |
314 | d.affine.oy.i32Lo = blMaxValue<int32_t>(); |
315 | |
316 | d.affine.tw = tw_d; |
317 | d.affine.th = th_d; |
318 | |
319 | d.affine.xx2.u64 = d.affine.xx.u64 << 1u; |
320 | d.affine.xy2.u64 = d.affine.xy.u64 << 1u; |
321 | |
322 | if (extendX >= BL_EXTEND_MODE_REPEAT && d.affine.xx2.u32Hi >= uint32_t(tw)) d.affine.xx2.u32Hi %= uint32_t(tw); |
323 | if (extendY >= BL_EXTEND_MODE_REPEAT && d.affine.xy2.u32Hi >= uint32_t(th)) d.affine.xy2.u32Hi %= uint32_t(th); |
324 | |
325 | // TODO: Hardcoded for 32-bit PRGB/XRGB formats. |
326 | if (opt) { |
327 | d.affine.addrMul[0] = 4; |
328 | d.affine.addrMul[1] = int16_t(d.src.stride); |
329 | } |
330 | else { |
331 | d.affine.addrMul[0] = 0; |
332 | d.affine.addrMul[1] = 0; |
333 | } |
334 | |
335 | return fetchType; |
336 | } |
337 | |
338 | // ============================================================================ |
339 | // [BLPipeFetchData - Init Gradient] |
340 | // ============================================================================ |
341 | |
342 | static BL_INLINE uint32_t blPipeFetchDataInitLinearGradient(BLPipeFetchData* fetchData, const BLLinearGradientValues& values, uint32_t extendMode, const BLMatrix2D& m, const BLMatrix2D& mInv) noexcept { |
343 | BLPipeFetchData::Gradient& d = fetchData->gradient; |
344 | |
345 | BLPoint p0(values.x0, values.y0); |
346 | BLPoint p1(values.x1, values.y1); |
347 | |
348 | uint32_t lutSize = d.lut.size; |
349 | BL_ASSERT(lutSize > 0); |
350 | |
351 | bool isPad = extendMode == BL_EXTEND_MODE_PAD; |
352 | bool isReflect = extendMode == BL_EXTEND_MODE_REFLECT; |
353 | |
354 | // Distance between [x0, y0] and [x1, y1], before transform. |
355 | double ax = p1.x - p0.x; |
356 | double ay = p1.y - p0.y; |
357 | double dist = ax * ax + ay * ay; |
358 | |
359 | // Invert origin and move it to the center of the pixel. |
360 | BLPoint o = BLPoint(0.5, 0.5) - m.mapPoint(p0); |
361 | |
362 | double dt = ax * mInv.m00 + ay * mInv.m01; |
363 | double dy = ax * mInv.m10 + ay * mInv.m11; |
364 | |
365 | double scale = double(int64_t(uint64_t(lutSize) << 32)) / dist; |
366 | double offset = o.x * dt + o.y * dy; |
367 | |
368 | dt *= scale; |
369 | dy *= scale; |
370 | offset *= scale; |
371 | |
372 | d.linear.dy.i64 = blFloorToInt64(dy); |
373 | d.linear.dt.i64 = blFloorToInt64(dt); |
374 | d.linear.dt2.u64 = d.linear.dt.u64 << 1; |
375 | d.linear.pt[0].i64 = blFloorToInt64(offset); |
376 | d.linear.pt[1].u64 = d.linear.pt[0].u64 + d.linear.dt.u64; |
377 | |
378 | uint32_t rorSize = isReflect ? lutSize * 2u : lutSize; |
379 | d.linear.rep.u32Hi = isPad ? uint32_t(0xFFFFFFFFu) : uint32_t(rorSize - 1u); |
380 | d.linear.rep.u32Lo = 0xFFFFFFFFu; |
381 | d.linear.msk.u = isPad ? (lutSize - 1u) * 0x00010001u : (lutSize * 2u - 1u) * 0x00010001u; |
382 | |
383 | return isPad ? BL_PIPE_FETCH_TYPE_GRADIENT_LINEAR_PAD : BL_PIPE_FETCH_TYPE_GRADIENT_LINEAR_ROR; |
384 | } |
385 | |
386 | // The radial gradient uses the following equation: |
387 | // |
388 | // b = x * fx + y * fy |
389 | // d = x^2 * (r^2 - fy^2) + y^2 * (r^2 - fx^2) + x*y * (2*fx*fy) |
390 | // |
391 | // pos = ((b + sqrt(d))) * scale) |
392 | // |
393 | // Simplified to: |
394 | // |
395 | // C1 = r^2 - fy^2 |
396 | // C2 = r^2 - fx^2 |
397 | // C3 = 2 * fx * fy |
398 | // |
399 | // b = x*fx + y*fy |
400 | // d = x^2 * C1 + y^2 * C2 + x*y * C3 |
401 | // |
402 | // pos = ((b + sqrt(d))) * scale) |
403 | // |
404 | // Radial gradient function can be defined as follows: |
405 | // |
406 | // D = C1*(x^2) + C2*(y^2) + C3*(x*y) |
407 | // |
408 | // Which could be rewritten as: |
409 | // |
410 | // D = D1 + D2 + D3 |
411 | // |
412 | // Where: D1 = C1*(x^2) |
413 | // D2 = C2*(y^2) |
414 | // D3 = C3*(x*y) |
415 | // |
416 | // The variables `x` and `y` increase linearly, thus we can use multiple |
417 | // differentiation to get delta (d) and delta-of-delta (dd). |
418 | // |
419 | // Deltas for `C*(x^2)` at `t`: |
420 | // |
421 | // C*x*x: 1st delta `d` at step `t`: C*(t^2) + 2*C*x |
422 | // C*x*x: 2nd delta `dd` at step `t`: 2*C *t^2 |
423 | // |
424 | // ( Hint, use Mathematica DifferenceDelta[x*x*C, {x, 1, t}] ) |
425 | // |
426 | // Deltas for `C*(x*y)` at `t`: |
427 | // |
428 | // C*x*y: 1st delta `d` at step `tx/ty`: C*x*ty + C*y*tx + C*tx*ty |
429 | // C*x*y: 2nd delta `dd` at step `tx/ty`: 2*C * tx*ty |
430 | static BL_INLINE uint32_t blPipeFetchDataInitRadialGradient(BLPipeFetchData* fetchData, const BLRadialGradientValues& values, uint32_t extendMode, const BLMatrix2D& m, const BLMatrix2D& mInv) noexcept { |
431 | BL_UNUSED(m); |
432 | BLPipeFetchData::Gradient& d = fetchData->gradient; |
433 | |
434 | BLPoint c(values.x0, values.y0); |
435 | BLPoint f(values.x1, values.y1); |
436 | |
437 | double r = values.r0; |
438 | uint32_t lutSize = d.lut.size; |
439 | |
440 | BL_ASSERT(lutSize != 0); |
441 | BL_ASSERT(extendMode < BL_EXTEND_MODE_SIMPLE_COUNT); |
442 | |
443 | BLPoint fOrig = f; |
444 | f -= c; |
445 | |
446 | double fxfx = f.x * f.x; |
447 | double fyfy = f.y * f.y; |
448 | |
449 | double rr = r * r; |
450 | double dd = rr - fxfx - fyfy; |
451 | |
452 | // If the focal point is near the border we move it slightly to prevent |
453 | // division by zero. This idea comes from AntiGrain library. |
454 | if (isNearZero(dd)) { |
455 | if (!isNearZero(f.x)) f.x += (f.x < 0.0) ? 0.5 : -0.5; |
456 | if (!isNearZero(f.y)) f.y += (f.y < 0.0) ? 0.5 : -0.5; |
457 | |
458 | fxfx = f.x * f.x; |
459 | fyfy = f.y * f.y; |
460 | dd = rr - fxfx - fyfy; |
461 | } |
462 | |
463 | double scale = double(int(lutSize)) / dd; |
464 | double ax = rr - fyfy; |
465 | double ay = rr - fxfx; |
466 | |
467 | d.radial.ax = ax; |
468 | d.radial.ay = ay; |
469 | d.radial.fx = f.x; |
470 | d.radial.fy = f.y; |
471 | |
472 | double xx = mInv.m00; |
473 | double xy = mInv.m01; |
474 | double yx = mInv.m10; |
475 | double yy = mInv.m11; |
476 | |
477 | d.radial.xx = xx; |
478 | d.radial.xy = xy; |
479 | d.radial.yx = yx; |
480 | d.radial.yy = yy; |
481 | d.radial.ox = (mInv.m20 - fOrig.x) + 0.5 * (xx + yx); |
482 | d.radial.oy = (mInv.m21 - fOrig.y) + 0.5 * (xy + yy); |
483 | |
484 | double ax_xx = ax * xx; |
485 | double ay_xy = ay * xy; |
486 | double fx_xx = f.x * xx; |
487 | double fy_xy = f.y * xy; |
488 | |
489 | d.radial.dd = ax_xx * xx + ay_xy * xy + 2.0 * (fx_xx * fy_xy); |
490 | d.radial.bd = fx_xx + fy_xy; |
491 | |
492 | d.radial.ddx = 2.0 * (ax_xx + fy_xy * f.x); |
493 | d.radial.ddy = 2.0 * (ay_xy + fx_xx * f.y); |
494 | |
495 | d.radial.ddd = 2.0 * d.radial.dd; |
496 | d.radial.scale = scale; |
497 | d.radial.maxi = (extendMode == BL_EXTEND_MODE_REFLECT) ? int(lutSize * 2 - 1) : int(lutSize - 1); |
498 | |
499 | return BL_PIPE_FETCH_TYPE_GRADIENT_RADIAL_PAD + extendMode; |
500 | } |
501 | |
502 | static BL_INLINE uint32_t blPipeFetchDataInitConicalGradient(BLPipeFetchData* fetchData, const BLConicalGradientValues& values, uint32_t extendMode, const BLMatrix2D& m, const BLMatrix2D& mInv) noexcept { |
503 | BLPipeFetchData::Gradient& d = fetchData->gradient; |
504 | |
505 | BLPoint c(values.x0, values.y0); |
506 | double angle = values.angle; |
507 | |
508 | uint32_t lutSize = d.lut.size; |
509 | uint32_t tableId = blBitCtz(lutSize) - 8; |
510 | BL_ASSERT(tableId < BLCommonTable::kTableCount); |
511 | |
512 | // Invert the origin and move it to the center of the pixel. |
513 | c = BLPoint(0.5, 0.5) - m.mapPoint(c); |
514 | |
515 | d.conical.xx = mInv.m00; |
516 | d.conical.xy = mInv.m01; |
517 | d.conical.yx = mInv.m10; |
518 | d.conical.yy = mInv.m11; |
519 | d.conical.ox = mInv.m20 + c.x * mInv.m00 + c.y * mInv.m10; |
520 | d.conical.oy = mInv.m21 + c.x * mInv.m01 + c.y * mInv.m11; |
521 | d.conical.consts = &blCommonTable.xmm_f_con[tableId]; |
522 | |
523 | d.conical.maxi = int(lutSize - 1); |
524 | |
525 | return BL_PIPE_FETCH_TYPE_GRADIENT_CONICAL; |
526 | } |
527 | |
528 | uint32_t BLPipeFetchData::initGradient(uint32_t gradientType, const void* values, uint32_t extendMode, const BLGradientLUT* lut, const BLMatrix2D& m, const BLMatrix2D& mInv) noexcept { |
529 | // Initialize LUT. |
530 | this->gradient.lut.data = lut->data(); |
531 | this->gradient.lut.size = uint32_t(lut->size); |
532 | |
533 | // Initialize gradient by type. |
534 | switch (gradientType) { |
535 | case BL_GRADIENT_TYPE_LINEAR: return blPipeFetchDataInitLinearGradient(this, *static_cast<const BLLinearGradientValues*>(values), extendMode, m, mInv); |
536 | case BL_GRADIENT_TYPE_RADIAL: return blPipeFetchDataInitRadialGradient(this, *static_cast<const BLRadialGradientValues*>(values), extendMode, m, mInv); |
537 | case BL_GRADIENT_TYPE_CONICAL: return blPipeFetchDataInitConicalGradient(this, *static_cast<const BLConicalGradientValues*>(values), extendMode, m, mInv); |
538 | |
539 | default: |
540 | BL_NOT_REACHED(); |
541 | } |
542 | } |
543 | |