1 | /* |
2 | * Copyright © 2019 Adobe Inc. |
3 | * Copyright © 2019 Ebrahim Byagowi |
4 | * |
5 | * This is part of HarfBuzz, a text shaping library. |
6 | * |
7 | * Permission is hereby granted, without written agreement and without |
8 | * license or royalty fees, to use, copy, modify, and distribute this |
9 | * software and its documentation for any purpose, provided that the |
10 | * above copyright notice and the following two paragraphs appear in |
11 | * all copies of this software. |
12 | * |
13 | * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
14 | * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
15 | * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
16 | * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
17 | * DAMAGE. |
18 | * |
19 | * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
20 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
21 | * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
22 | * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
23 | * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
24 | * |
25 | * Adobe Author(s): Michiharu Ariza |
26 | */ |
27 | |
28 | #ifndef HB_OT_VAR_GVAR_TABLE_HH |
29 | #define HB_OT_VAR_GVAR_TABLE_HH |
30 | |
31 | #include "hb-open-type.hh" |
32 | #include "hb-ot-glyf-table.hh" |
33 | #include "hb-ot-var-fvar-table.hh" |
34 | |
35 | /* |
36 | * gvar -- Glyph Variation Table |
37 | * https://docs.microsoft.com/en-us/typography/opentype/spec/gvar |
38 | */ |
39 | #define HB_OT_TAG_gvar HB_TAG('g','v','a','r') |
40 | |
41 | namespace OT { |
42 | |
43 | struct contour_point_t |
44 | { |
45 | void init (float x_=0.f, float y_=0.f) { flag = 0; x = x_; y = y_; } |
46 | |
47 | void translate (const contour_point_t &p) { x += p.x; y += p.y; } |
48 | |
49 | uint8_t flag; |
50 | float x, y; |
51 | }; |
52 | |
53 | struct contour_point_vector_t : hb_vector_t<contour_point_t> |
54 | { |
55 | void extend (const hb_array_t<contour_point_t> &a) |
56 | { |
57 | unsigned int old_len = length; |
58 | resize (old_len + a.length); |
59 | for (unsigned int i = 0; i < a.length; i++) |
60 | (*this)[old_len + i] = a[i]; |
61 | } |
62 | |
63 | void transform (const float (&matrix)[4]) |
64 | { |
65 | for (unsigned int i = 0; i < length; i++) |
66 | { |
67 | contour_point_t &p = (*this)[i]; |
68 | float x_ = p.x * matrix[0] + p.y * matrix[2]; |
69 | p.y = p.x * matrix[1] + p.y * matrix[3]; |
70 | p.x = x_; |
71 | } |
72 | } |
73 | |
74 | void translate (const contour_point_t& delta) |
75 | { |
76 | for (unsigned int i = 0; i < length; i++) |
77 | (*this)[i].translate (delta); |
78 | } |
79 | }; |
80 | |
81 | struct Tuple : UnsizedArrayOf<F2DOT14> {}; |
82 | |
83 | struct TuppleIndex : HBUINT16 |
84 | { |
85 | enum Flags { |
86 | EmbeddedPeakTuple = 0x8000u, |
87 | IntermediateRegion = 0x4000u, |
88 | PrivatePointNumbers = 0x2000u, |
89 | TupleIndexMask = 0x0FFFu |
90 | }; |
91 | |
92 | DEFINE_SIZE_STATIC (2); |
93 | }; |
94 | |
95 | struct |
96 | { |
97 | unsigned int (unsigned int axis_count) const |
98 | { |
99 | return min_size + |
100 | (has_peak () ? get_peak_tuple ().get_size (axis_count) : 0) + |
101 | (has_intermediate () ? (get_start_tuple (axis_count).get_size (axis_count) + |
102 | get_end_tuple (axis_count).get_size (axis_count)) : 0); |
103 | } |
104 | |
105 | const TupleVarHeader & (unsigned int axis_count) const |
106 | { return StructAtOffset<TupleVarHeader> (this, get_size (axis_count)); } |
107 | |
108 | float (const int *coords, unsigned int coord_count, |
109 | const hb_array_t<const F2DOT14> shared_tuples) const |
110 | { |
111 | const F2DOT14 *peak_tuple; |
112 | |
113 | if (has_peak ()) |
114 | peak_tuple = &(get_peak_tuple ()[0]); |
115 | else |
116 | { |
117 | unsigned int index = get_index (); |
118 | if (unlikely (index * coord_count >= shared_tuples.length)) |
119 | return 0.f; |
120 | peak_tuple = &shared_tuples[coord_count * index]; |
121 | } |
122 | |
123 | const F2DOT14 *start_tuple = nullptr; |
124 | const F2DOT14 *end_tuple = nullptr; |
125 | if (has_intermediate ()) |
126 | { |
127 | start_tuple = get_start_tuple (coord_count); |
128 | end_tuple = get_end_tuple (coord_count); |
129 | } |
130 | |
131 | float scalar = 1.f; |
132 | for (unsigned int i = 0; i < coord_count; i++) |
133 | { |
134 | int v = coords[i]; |
135 | int peak = peak_tuple[i]; |
136 | if (!peak || v == peak) continue; |
137 | |
138 | if (has_intermediate ()) |
139 | { |
140 | int start = start_tuple[i]; |
141 | int end = end_tuple[i]; |
142 | if (unlikely (start > peak || peak > end || |
143 | (start < 0 && end > 0 && peak))) continue; |
144 | if (v < start || v > end) return 0.f; |
145 | if (v < peak) |
146 | { if (peak != start) scalar *= (float) (v - start) / (peak - start); } |
147 | else |
148 | { if (peak != end) scalar *= (float) (end - v) / (end - peak); } |
149 | } |
150 | else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.f; |
151 | else |
152 | scalar *= (float) v / peak; |
153 | } |
154 | return scalar; |
155 | } |
156 | |
157 | unsigned int () const { return varDataSize; } |
158 | |
159 | bool () const { return (tupleIndex & TuppleIndex::EmbeddedPeakTuple); } |
160 | bool () const { return (tupleIndex & TuppleIndex::IntermediateRegion); } |
161 | bool () const { return (tupleIndex & TuppleIndex::PrivatePointNumbers); } |
162 | unsigned int () const { return (tupleIndex & TuppleIndex::TupleIndexMask); } |
163 | |
164 | protected: |
165 | const Tuple & () const |
166 | { return StructAfter<Tuple> (tupleIndex); } |
167 | const Tuple & (unsigned int axis_count) const |
168 | { return *(const Tuple *) &get_peak_tuple ()[has_peak () ? axis_count : 0]; } |
169 | const Tuple & (unsigned int axis_count) const |
170 | { return *(const Tuple *) &get_peak_tuple ()[has_peak () ? (axis_count * 2) : axis_count]; } |
171 | |
172 | HBUINT16 ; |
173 | TuppleIndex ; |
174 | /* UnsizedArrayOf<F2DOT14> peakTuple - optional */ |
175 | /* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */ |
176 | /* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */ |
177 | |
178 | public: |
179 | DEFINE_SIZE_MIN (4); |
180 | }; |
181 | |
182 | struct TupleVarCount : HBUINT16 |
183 | { |
184 | bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); } |
185 | unsigned int get_count () const { return (*this) & CountMask; } |
186 | |
187 | protected: |
188 | enum Flags |
189 | { |
190 | SharedPointNumbers = 0x8000u, |
191 | CountMask = 0x0FFFu |
192 | }; |
193 | |
194 | public: |
195 | DEFINE_SIZE_STATIC (2); |
196 | }; |
197 | |
198 | struct GlyphVarData |
199 | { |
200 | const TupleVarHeader & (void) const |
201 | { return StructAfter<TupleVarHeader> (data); } |
202 | |
203 | struct tuple_iterator_t |
204 | { |
205 | void init (const GlyphVarData *var_data_, unsigned int length_, unsigned int axis_count_) |
206 | { |
207 | var_data = var_data_; |
208 | length = length_; |
209 | index = 0; |
210 | axis_count = axis_count_; |
211 | current_tuple = &var_data->get_tuple_var_header (); |
212 | data_offset = 0; |
213 | } |
214 | |
215 | bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */) |
216 | { |
217 | if (var_data->has_shared_point_numbers ()) |
218 | { |
219 | hb_bytes_t bytes ((const char *) var_data, length); |
220 | const HBUINT8 *base = &(var_data+var_data->data); |
221 | const HBUINT8 *p = base; |
222 | if (!unpack_points (p, shared_indices, bytes)) return false; |
223 | data_offset = p - base; |
224 | } |
225 | return true; |
226 | } |
227 | |
228 | bool is_valid () const |
229 | { |
230 | return (index < var_data->tupleVarCount.get_count ()) && |
231 | in_range (current_tuple) && |
232 | current_tuple->get_size (axis_count); |
233 | } |
234 | |
235 | bool move_to_next () |
236 | { |
237 | data_offset += current_tuple->get_data_size (); |
238 | current_tuple = ¤t_tuple->get_next (axis_count); |
239 | index++; |
240 | return is_valid (); |
241 | } |
242 | |
243 | bool in_range (const void *p, unsigned int l) const |
244 | { return (const char*) p >= (const char*) var_data && (const char*) p+l <= (const char*) var_data + length; } |
245 | |
246 | template <typename T> bool in_range (const T *p) const { return in_range (p, sizeof (*p)); } |
247 | |
248 | const HBUINT8 *get_serialized_data () const |
249 | { return &(var_data+var_data->data) + data_offset; } |
250 | |
251 | private: |
252 | const GlyphVarData *var_data; |
253 | unsigned int length; |
254 | unsigned int index; |
255 | unsigned int axis_count; |
256 | unsigned int data_offset; |
257 | |
258 | public: |
259 | const TupleVarHeader *current_tuple; |
260 | }; |
261 | |
262 | static bool get_tuple_iterator (const GlyphVarData *var_data, |
263 | unsigned int length, |
264 | unsigned int axis_count, |
265 | hb_vector_t<unsigned int> &shared_indices /* OUT */, |
266 | tuple_iterator_t *iterator /* OUT */) |
267 | { |
268 | iterator->init (var_data, length, axis_count); |
269 | if (!iterator->get_shared_indices (shared_indices)) |
270 | return false; |
271 | return iterator->is_valid (); |
272 | } |
273 | |
274 | bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); } |
275 | |
276 | static bool unpack_points (const HBUINT8 *&p /* IN/OUT */, |
277 | hb_vector_t<unsigned int> &points /* OUT */, |
278 | const hb_bytes_t &bytes) |
279 | { |
280 | enum packed_point_flag_t |
281 | { |
282 | POINTS_ARE_WORDS = 0x80, |
283 | POINT_RUN_COUNT_MASK = 0x7F |
284 | }; |
285 | |
286 | if (unlikely (!bytes.in_range (p))) return false; |
287 | |
288 | uint16_t count = *p++; |
289 | if (count & POINTS_ARE_WORDS) |
290 | { |
291 | if (unlikely (!bytes.in_range (p))) return false; |
292 | count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++; |
293 | } |
294 | points.resize (count); |
295 | |
296 | unsigned int n = 0; |
297 | uint16_t i = 0; |
298 | while (i < count) |
299 | { |
300 | if (unlikely (!bytes.in_range (p))) return false; |
301 | uint16_t j; |
302 | uint8_t control = *p++; |
303 | uint16_t run_count = (control & POINT_RUN_COUNT_MASK) + 1; |
304 | if (control & POINTS_ARE_WORDS) |
305 | { |
306 | for (j = 0; j < run_count && i < count; j++, i++) |
307 | { |
308 | if (unlikely (!bytes.in_range ((const HBUINT16 *) p))) |
309 | return false; |
310 | n += *(const HBUINT16 *)p; |
311 | points[i] = n; |
312 | p += HBUINT16::static_size; |
313 | } |
314 | } |
315 | else |
316 | { |
317 | for (j = 0; j < run_count && i < count; j++, i++) |
318 | { |
319 | if (unlikely (!bytes.in_range (p))) return false; |
320 | n += *p++; |
321 | points[i] = n; |
322 | } |
323 | } |
324 | if (j < run_count) return false; |
325 | } |
326 | return true; |
327 | } |
328 | |
329 | static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */, |
330 | hb_vector_t<int> &deltas /* IN/OUT */, |
331 | const hb_bytes_t &bytes) |
332 | { |
333 | enum packed_delta_flag_t |
334 | { |
335 | DELTAS_ARE_ZERO = 0x80, |
336 | DELTAS_ARE_WORDS = 0x40, |
337 | DELTA_RUN_COUNT_MASK = 0x3F |
338 | }; |
339 | |
340 | unsigned int i = 0; |
341 | unsigned int count = deltas.length; |
342 | while (i < count) |
343 | { |
344 | if (unlikely (!bytes.in_range (p))) return false; |
345 | uint8_t control = *p++; |
346 | unsigned int run_count = (control & DELTA_RUN_COUNT_MASK) + 1; |
347 | unsigned int j; |
348 | if (control & DELTAS_ARE_ZERO) |
349 | for (j = 0; j < run_count && i < count; j++, i++) |
350 | deltas[i] = 0; |
351 | else if (control & DELTAS_ARE_WORDS) |
352 | for (j = 0; j < run_count && i < count; j++, i++) |
353 | { |
354 | if (unlikely (!bytes.in_range ((const HBUINT16 *) p))) |
355 | return false; |
356 | deltas[i] = *(const HBINT16 *) p; |
357 | p += HBUINT16::static_size; |
358 | } |
359 | else |
360 | for (j = 0; j < run_count && i < count; j++, i++) |
361 | { |
362 | if (unlikely (!bytes.in_range (p))) |
363 | return false; |
364 | deltas[i] = *(const HBINT8 *) p++; |
365 | } |
366 | if (j < run_count) |
367 | return false; |
368 | } |
369 | return true; |
370 | } |
371 | |
372 | protected: |
373 | TupleVarCount tupleVarCount; |
374 | OffsetTo<HBUINT8> data; |
375 | /* TupleVarHeader tupleVarHeaders[] */ |
376 | public: |
377 | DEFINE_SIZE_MIN (4); |
378 | }; |
379 | |
380 | struct gvar |
381 | { |
382 | static constexpr hb_tag_t tableTag = HB_OT_TAG_gvar; |
383 | |
384 | bool sanitize_shallow (hb_sanitize_context_t *c) const |
385 | { |
386 | TRACE_SANITIZE (this); |
387 | return_trace (c->check_struct (this) && (version.major == 1) && |
388 | (glyphCount == c->get_num_glyphs ()) && |
389 | c->check_array (&(this+sharedTuples), axisCount * sharedTupleCount) && |
390 | (is_long_offset () ? |
391 | c->check_array (get_long_offset_array (), glyphCount+1) : |
392 | c->check_array (get_short_offset_array (), glyphCount+1)) && |
393 | c->check_array (((const HBUINT8*)&(this+dataZ)) + get_offset (0), |
394 | get_offset (glyphCount) - get_offset (0))); |
395 | } |
396 | |
397 | /* GlyphVarData not sanitized here; must be checked while accessing each glyph varation data */ |
398 | bool sanitize (hb_sanitize_context_t *c) const |
399 | { return sanitize_shallow (c); } |
400 | |
401 | bool subset (hb_subset_context_t *c) const |
402 | { |
403 | TRACE_SUBSET (this); |
404 | |
405 | gvar *out = c->serializer->allocate_min<gvar> (); |
406 | if (unlikely (!out)) return_trace (false); |
407 | |
408 | out->version.major = 1; |
409 | out->version.minor = 0; |
410 | out->axisCount = axisCount; |
411 | out->sharedTupleCount = sharedTupleCount; |
412 | |
413 | unsigned int num_glyphs = c->plan->num_output_glyphs (); |
414 | out->glyphCount = num_glyphs; |
415 | |
416 | unsigned int subset_data_size = 0; |
417 | for (hb_codepoint_t gid = 0; gid < num_glyphs; gid++) |
418 | { |
419 | hb_codepoint_t old_gid; |
420 | if (!c->plan->old_gid_for_new_gid (gid, &old_gid)) continue; |
421 | subset_data_size += get_glyph_var_data_length (old_gid); |
422 | } |
423 | |
424 | bool long_offset = subset_data_size & ~0xFFFFu; |
425 | out->flags = long_offset ? 1 : 0; |
426 | |
427 | HBUINT8 *subset_offsets = c->serializer->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1)); |
428 | if (!subset_offsets) return_trace (false); |
429 | |
430 | /* shared tuples */ |
431 | if (!sharedTupleCount || !sharedTuples) |
432 | out->sharedTuples = 0; |
433 | else |
434 | { |
435 | unsigned int shared_tuple_size = F2DOT14::static_size * axisCount * sharedTupleCount; |
436 | F2DOT14 *tuples = c->serializer->allocate_size<F2DOT14> (shared_tuple_size); |
437 | if (!tuples) return_trace (false); |
438 | out->sharedTuples = (char *) tuples - (char *) out; |
439 | memcpy (tuples, &(this+sharedTuples), shared_tuple_size); |
440 | } |
441 | |
442 | char *subset_data = c->serializer->allocate_size<char> (subset_data_size); |
443 | if (!subset_data) return_trace (false); |
444 | out->dataZ = subset_data - (char *)out; |
445 | |
446 | unsigned int glyph_offset = 0; |
447 | for (hb_codepoint_t gid = 0; gid < num_glyphs; gid++) |
448 | { |
449 | hb_codepoint_t old_gid; |
450 | unsigned int length = c->plan->old_gid_for_new_gid (gid, &old_gid) ? get_glyph_var_data_length (old_gid) : 0; |
451 | |
452 | if (long_offset) |
453 | ((HBUINT32 *) subset_offsets)[gid] = glyph_offset; |
454 | else |
455 | ((HBUINT16 *) subset_offsets)[gid] = glyph_offset / 2; |
456 | |
457 | if (length > 0) memcpy (subset_data, get_glyph_var_data (old_gid), length); |
458 | subset_data += length; |
459 | glyph_offset += length; |
460 | } |
461 | if (long_offset) |
462 | ((HBUINT32 *) subset_offsets)[num_glyphs] = glyph_offset; |
463 | else |
464 | ((HBUINT16 *) subset_offsets)[num_glyphs] = glyph_offset / 2; |
465 | |
466 | return_trace (true); |
467 | } |
468 | |
469 | protected: |
470 | const GlyphVarData *get_glyph_var_data (hb_codepoint_t glyph) const |
471 | { |
472 | unsigned int start_offset = get_offset (glyph); |
473 | unsigned int end_offset = get_offset (glyph+1); |
474 | |
475 | if ((start_offset == end_offset) || |
476 | unlikely ((start_offset > get_offset (glyphCount)) || |
477 | (start_offset + GlyphVarData::min_size > end_offset))) |
478 | return &Null (GlyphVarData); |
479 | return &(((unsigned char *) this + start_offset) + dataZ); |
480 | } |
481 | |
482 | bool is_long_offset () const { return (flags & 1) != 0; } |
483 | |
484 | unsigned int get_offset (unsigned int i) const |
485 | { |
486 | if (is_long_offset ()) |
487 | return get_long_offset_array ()[i]; |
488 | else |
489 | return get_short_offset_array ()[i] * 2; |
490 | } |
491 | |
492 | unsigned int get_glyph_var_data_length (unsigned int glyph) const |
493 | { |
494 | unsigned int end_offset = get_offset (glyph + 1); |
495 | unsigned int start_offset = get_offset (glyph); |
496 | if (unlikely (start_offset > end_offset || end_offset > get_offset (glyphCount))) |
497 | return 0; |
498 | return end_offset - start_offset; |
499 | } |
500 | |
501 | const HBUINT32 * get_long_offset_array () const { return (const HBUINT32 *) &offsetZ; } |
502 | const HBUINT16 *get_short_offset_array () const { return (const HBUINT16 *) &offsetZ; } |
503 | |
504 | public: |
505 | struct accelerator_t |
506 | { |
507 | void init (hb_face_t *face) |
508 | { |
509 | gvar_table = hb_sanitize_context_t ().reference_table<gvar> (face); |
510 | hb_blob_ptr_t<fvar> fvar_table = hb_sanitize_context_t ().reference_table<fvar> (face); |
511 | unsigned int axis_count = fvar_table->get_axis_count (); |
512 | fvar_table.destroy (); |
513 | |
514 | if (unlikely ((gvar_table->glyphCount != face->get_num_glyphs ()) || |
515 | (gvar_table->axisCount != axis_count))) |
516 | fini (); |
517 | |
518 | unsigned int num_shared_coord = gvar_table->sharedTupleCount * gvar_table->axisCount; |
519 | shared_tuples.resize (num_shared_coord); |
520 | for (unsigned int i = 0; i < num_shared_coord; i++) |
521 | shared_tuples[i] = (&(gvar_table + gvar_table->sharedTuples))[i]; |
522 | } |
523 | |
524 | void fini () |
525 | { |
526 | gvar_table.destroy (); |
527 | shared_tuples.fini (); |
528 | } |
529 | |
530 | private: |
531 | struct x_getter { static float get (const contour_point_t &p) { return p.x; } }; |
532 | struct y_getter { static float get (const contour_point_t &p) { return p.y; } }; |
533 | |
534 | template <typename T> |
535 | static float infer_delta (const hb_array_t<contour_point_t> points, |
536 | const hb_array_t<contour_point_t> deltas, |
537 | unsigned int target, unsigned int prev, unsigned int next) |
538 | { |
539 | float target_val = T::get (points[target]); |
540 | float prev_val = T::get (points[prev]); |
541 | float next_val = T::get (points[next]); |
542 | float prev_delta = T::get (deltas[prev]); |
543 | float next_delta = T::get (deltas[next]); |
544 | |
545 | if (prev_val == next_val) |
546 | return (prev_delta == next_delta) ? prev_delta : 0.f; |
547 | else if (target_val <= hb_min (prev_val, next_val)) |
548 | return (prev_val < next_val) ? prev_delta : next_delta; |
549 | else if (target_val >= hb_max (prev_val, next_val)) |
550 | return (prev_val > next_val) ? prev_delta : next_delta; |
551 | |
552 | /* linear interpolation */ |
553 | float r = (target_val - prev_val) / (next_val - prev_val); |
554 | return (1.f - r) * prev_delta + r * next_delta; |
555 | } |
556 | |
557 | static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end) |
558 | { return (i >= end) ? start : (i + 1); } |
559 | |
560 | public: |
561 | bool apply_deltas_to_points (hb_codepoint_t glyph, |
562 | const int *coords, unsigned int coord_count, |
563 | const hb_array_t<contour_point_t> points, |
564 | const hb_array_t<unsigned int> end_points) const |
565 | { |
566 | if (unlikely (coord_count != gvar_table->axisCount)) return false; |
567 | |
568 | const GlyphVarData *var_data = gvar_table->get_glyph_var_data (glyph); |
569 | if (var_data == &Null (GlyphVarData)) return true; |
570 | hb_vector_t<unsigned int> shared_indices; |
571 | GlyphVarData::tuple_iterator_t iterator; |
572 | if (!GlyphVarData::get_tuple_iterator (var_data, |
573 | gvar_table->get_glyph_var_data_length (glyph), |
574 | gvar_table->axisCount, |
575 | shared_indices, |
576 | &iterator)) |
577 | return false; |
578 | |
579 | /* Save original points for inferred delta calculation */ |
580 | contour_point_vector_t orig_points; |
581 | orig_points.resize (points.length); |
582 | for (unsigned int i = 0; i < orig_points.length; i++) |
583 | orig_points[i] = points[i]; |
584 | |
585 | contour_point_vector_t deltas; /* flag is used to indicate referenced point */ |
586 | deltas.resize (points.length); |
587 | |
588 | do |
589 | { |
590 | float scalar = iterator.current_tuple->calculate_scalar (coords, coord_count, shared_tuples.as_array ()); |
591 | if (scalar == 0.f) continue; |
592 | const HBUINT8 *p = iterator.get_serialized_data (); |
593 | unsigned int length = iterator.current_tuple->get_data_size (); |
594 | if (unlikely (!iterator.in_range (p, length))) |
595 | return false; |
596 | |
597 | hb_bytes_t bytes ((const char *) p, length); |
598 | hb_vector_t<unsigned int> private_indices; |
599 | if (iterator.current_tuple->has_private_points () && |
600 | !GlyphVarData::unpack_points (p, private_indices, bytes)) |
601 | return false; |
602 | const hb_array_t<unsigned int> &indices = private_indices.length ? private_indices : shared_indices; |
603 | |
604 | bool apply_to_all = (indices.length == 0); |
605 | unsigned int num_deltas = apply_to_all ? points.length : indices.length; |
606 | hb_vector_t<int> x_deltas; |
607 | x_deltas.resize (num_deltas); |
608 | if (!GlyphVarData::unpack_deltas (p, x_deltas, bytes)) |
609 | return false; |
610 | hb_vector_t<int> y_deltas; |
611 | y_deltas.resize (num_deltas); |
612 | if (!GlyphVarData::unpack_deltas (p, y_deltas, bytes)) |
613 | return false; |
614 | |
615 | for (unsigned int i = 0; i < deltas.length; i++) |
616 | deltas[i].init (); |
617 | for (unsigned int i = 0; i < num_deltas; i++) |
618 | { |
619 | unsigned int pt_index = apply_to_all ? i : indices[i]; |
620 | deltas[pt_index].flag = 1; /* this point is referenced, i.e., explicit deltas specified */ |
621 | deltas[pt_index].x += x_deltas[i] * scalar; |
622 | deltas[pt_index].y += y_deltas[i] * scalar; |
623 | } |
624 | |
625 | /* infer deltas for unreferenced points */ |
626 | unsigned int start_point = 0; |
627 | for (unsigned int c = 0; c < end_points.length; c++) |
628 | { |
629 | unsigned int end_point = end_points[c]; |
630 | unsigned int i, j; |
631 | |
632 | /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */ |
633 | unsigned int unref_count = 0; |
634 | for (i = start_point; i <= end_point; i++) |
635 | if (!deltas[i].flag) unref_count++; |
636 | if (unref_count == 0 || unref_count > end_point - start_point) |
637 | goto no_more_gaps; |
638 | |
639 | j = start_point; |
640 | for (;;) |
641 | { |
642 | /* Locate the next gap of unreferenced points between two referenced points prev and next. |
643 | * Note that a gap may wrap around at left (start_point) and/or at right (end_point). |
644 | */ |
645 | unsigned int prev, next; |
646 | for (;;) |
647 | { |
648 | i = j; |
649 | j = next_index (i, start_point, end_point); |
650 | if (deltas[i].flag && !deltas[j].flag) break; |
651 | } |
652 | prev = j = i; |
653 | for (;;) |
654 | { |
655 | i = j; |
656 | j = next_index (i, start_point, end_point); |
657 | if (!deltas[i].flag && deltas[j].flag) break; |
658 | } |
659 | next = j; |
660 | /* Infer deltas for all unref points in the gap between prev and next */ |
661 | i = prev; |
662 | for (;;) |
663 | { |
664 | i = next_index (i, start_point, end_point); |
665 | if (i == next) break; |
666 | deltas[i].x = infer_delta<x_getter> (orig_points.as_array (), deltas.as_array (), i, prev, next); |
667 | deltas[i].y = infer_delta<y_getter> (orig_points.as_array (), deltas.as_array (), i, prev, next); |
668 | if (--unref_count == 0) goto no_more_gaps; |
669 | } |
670 | } |
671 | no_more_gaps: |
672 | start_point = end_point + 1; |
673 | } |
674 | |
675 | /* apply specified / inferred deltas to points */ |
676 | for (unsigned int i = 0; i < points.length; i++) |
677 | { |
678 | points[i].x += (float) roundf (deltas[i].x); |
679 | points[i].y += (float) roundf (deltas[i].y); |
680 | } |
681 | } while (iterator.move_to_next ()); |
682 | |
683 | return true; |
684 | } |
685 | |
686 | unsigned int get_axis_count () const { return gvar_table->axisCount; } |
687 | |
688 | protected: |
689 | const GlyphVarData *get_glyph_var_data (hb_codepoint_t glyph) const |
690 | { return gvar_table->get_glyph_var_data (glyph); } |
691 | |
692 | private: |
693 | hb_blob_ptr_t<gvar> gvar_table; |
694 | hb_vector_t<F2DOT14> shared_tuples; |
695 | }; |
696 | |
697 | protected: |
698 | FixedVersion<>version; /* Version of gvar table. Set to 0x00010000u. */ |
699 | HBUINT16 axisCount; |
700 | HBUINT16 sharedTupleCount; |
701 | LOffsetTo<F2DOT14> |
702 | sharedTuples; /* LOffsetTo<UnsizedArrayOf<Tupple>> */ |
703 | HBUINT16 glyphCount; |
704 | HBUINT16 flags; |
705 | LOffsetTo<GlyphVarData> |
706 | dataZ; /* Array of GlyphVarData */ |
707 | UnsizedArrayOf<HBUINT8> |
708 | offsetZ; /* Array of 16-bit or 32-bit (glyphCount+1) offsets */ |
709 | public: |
710 | DEFINE_SIZE_MIN (20); |
711 | }; |
712 | |
713 | struct gvar_accelerator_t : gvar::accelerator_t {}; |
714 | |
715 | } /* namespace OT */ |
716 | |
717 | #endif /* HB_OT_VAR_GVAR_TABLE_HH */ |
718 | |