1/*
2 * Copyright © 2021 Google, Inc.
3 *
4 * This is part of HarfBuzz, a text shaping library.
5 *
6 * Permission is hereby granted, without written agreement and without
7 * license or royalty fees, to use, copy, modify, and distribute this
8 * software and its documentation for any purpose, provided that the
9 * above copyright notice and the following two paragraphs appear in
10 * all copies of this software.
11 *
12 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
13 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
14 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
15 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
16 * DAMAGE.
17 *
18 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
19 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
20 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
21 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
22 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
23 *
24 */
25
26#ifndef HB_OT_VAR_COMMON_HH
27#define HB_OT_VAR_COMMON_HH
28
29#include "hb-ot-layout-common.hh"
30
31
32namespace OT {
33
34template <typename MapCountT>
35struct DeltaSetIndexMapFormat01
36{
37 friend struct DeltaSetIndexMap;
38
39 unsigned get_size () const
40 { return min_size + mapCount * get_width (); }
41
42 private:
43 DeltaSetIndexMapFormat01* copy (hb_serialize_context_t *c) const
44 {
45 TRACE_SERIALIZE (this);
46 return_trace (c->embed (this));
47 }
48
49 template <typename T>
50 bool serialize (hb_serialize_context_t *c, const T &plan)
51 {
52 unsigned int width = plan.get_width ();
53 unsigned int inner_bit_count = plan.get_inner_bit_count ();
54 const hb_array_t<const uint32_t> output_map = plan.get_output_map ();
55
56 TRACE_SERIALIZE (this);
57 if (unlikely (output_map.length && ((((inner_bit_count-1)&~0xF)!=0) || (((width-1)&~0x3)!=0))))
58 return_trace (false);
59 if (unlikely (!c->extend_min (this))) return_trace (false);
60
61 entryFormat = ((width-1)<<4)|(inner_bit_count-1);
62 mapCount = output_map.length;
63 HBUINT8 *p = c->allocate_size<HBUINT8> (width * output_map.length);
64 if (unlikely (!p)) return_trace (false);
65 for (unsigned int i = 0; i < output_map.length; i++)
66 {
67 unsigned int v = output_map.arrayZ[i];
68 if (v)
69 {
70 unsigned int outer = v >> 16;
71 unsigned int inner = v & 0xFFFF;
72 unsigned int u = (outer << inner_bit_count) | inner;
73 for (unsigned int w = width; w > 0;)
74 {
75 p[--w] = u;
76 u >>= 8;
77 }
78 }
79 p += width;
80 }
81 return_trace (true);
82 }
83
84 uint32_t map (unsigned int v) const /* Returns 16.16 outer.inner. */
85 {
86 /* If count is zero, pass value unchanged. This takes
87 * care of direct mapping for advance map. */
88 if (!mapCount)
89 return v;
90
91 if (v >= mapCount)
92 v = mapCount - 1;
93
94 unsigned int u = 0;
95 { /* Fetch it. */
96 unsigned int w = get_width ();
97 const HBUINT8 *p = mapDataZ.arrayZ + w * v;
98 for (; w; w--)
99 u = (u << 8) + *p++;
100 }
101
102 { /* Repack it. */
103 unsigned int n = get_inner_bit_count ();
104 unsigned int outer = u >> n;
105 unsigned int inner = u & ((1 << n) - 1);
106 u = (outer<<16) | inner;
107 }
108
109 return u;
110 }
111
112 unsigned get_map_count () const { return mapCount; }
113 unsigned get_width () const { return ((entryFormat >> 4) & 3) + 1; }
114 unsigned get_inner_bit_count () const { return (entryFormat & 0xF) + 1; }
115
116
117 bool sanitize (hb_sanitize_context_t *c) const
118 {
119 TRACE_SANITIZE (this);
120 return_trace (c->check_struct (this) &&
121 c->check_range (mapDataZ.arrayZ,
122 mapCount,
123 get_width ()));
124 }
125
126 protected:
127 HBUINT8 format; /* Format identifier--format = 0 */
128 HBUINT8 entryFormat; /* A packed field that describes the compressed
129 * representation of delta-set indices. */
130 MapCountT mapCount; /* The number of mapping entries. */
131 UnsizedArrayOf<HBUINT8>
132 mapDataZ; /* The delta-set index mapping data. */
133
134 public:
135 DEFINE_SIZE_ARRAY (2+MapCountT::static_size, mapDataZ);
136};
137
138struct DeltaSetIndexMap
139{
140 template <typename T>
141 bool serialize (hb_serialize_context_t *c, const T &plan)
142 {
143 TRACE_SERIALIZE (this);
144 unsigned length = plan.get_output_map ().length;
145 u.format = length <= 0xFFFF ? 0 : 1;
146 switch (u.format) {
147 case 0: return_trace (u.format0.serialize (c, plan));
148 case 1: return_trace (u.format1.serialize (c, plan));
149 default:return_trace (false);
150 }
151 }
152
153 uint32_t map (unsigned v) const
154 {
155 switch (u.format) {
156 case 0: return (u.format0.map (v));
157 case 1: return (u.format1.map (v));
158 default:return v;
159 }
160 }
161
162 unsigned get_map_count () const
163 {
164 switch (u.format) {
165 case 0: return u.format0.get_map_count ();
166 case 1: return u.format1.get_map_count ();
167 default:return 0;
168 }
169 }
170
171 unsigned get_width () const
172 {
173 switch (u.format) {
174 case 0: return u.format0.get_width ();
175 case 1: return u.format1.get_width ();
176 default:return 0;
177 }
178 }
179
180 unsigned get_inner_bit_count () const
181 {
182 switch (u.format) {
183 case 0: return u.format0.get_inner_bit_count ();
184 case 1: return u.format1.get_inner_bit_count ();
185 default:return 0;
186 }
187 }
188
189 bool sanitize (hb_sanitize_context_t *c) const
190 {
191 TRACE_SANITIZE (this);
192 if (!u.format.sanitize (c)) return_trace (false);
193 switch (u.format) {
194 case 0: return_trace (u.format0.sanitize (c));
195 case 1: return_trace (u.format1.sanitize (c));
196 default:return_trace (true);
197 }
198 }
199
200 DeltaSetIndexMap* copy (hb_serialize_context_t *c) const
201 {
202 TRACE_SERIALIZE (this);
203 switch (u.format) {
204 case 0: return_trace (reinterpret_cast<DeltaSetIndexMap *> (u.format0.copy (c)));
205 case 1: return_trace (reinterpret_cast<DeltaSetIndexMap *> (u.format1.copy (c)));
206 default:return_trace (nullptr);
207 }
208 }
209
210 protected:
211 union {
212 HBUINT8 format; /* Format identifier */
213 DeltaSetIndexMapFormat01<HBUINT16> format0;
214 DeltaSetIndexMapFormat01<HBUINT32> format1;
215 } u;
216 public:
217 DEFINE_SIZE_UNION (1, format);
218};
219
220
221struct VarStoreInstancer
222{
223 VarStoreInstancer (const VariationStore *varStore,
224 const DeltaSetIndexMap *varIdxMap,
225 hb_array_t<int> coords) :
226 varStore (varStore), varIdxMap (varIdxMap), coords (coords) {}
227
228 operator bool () const { return varStore && bool (coords); }
229
230 /* according to the spec, if colr table has varStore but does not have
231 * varIdxMap, then an implicit identity mapping is used */
232 float operator() (uint32_t varIdx, unsigned short offset = 0) const
233 { return varStore->get_delta (varIdxMap ? varIdxMap->map (VarIdx::add (varIdx, offset)) : varIdx + offset, coords); }
234
235 const VariationStore *varStore;
236 const DeltaSetIndexMap *varIdxMap;
237 hb_array_t<int> coords;
238};
239
240/* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */
241struct TupleVariationHeader
242{
243 friend struct tuple_delta_t;
244 unsigned get_size (unsigned axis_count) const
245 { return min_size + get_all_tuples (axis_count).get_size (); }
246
247 unsigned get_data_size () const { return varDataSize; }
248
249 const TupleVariationHeader &get_next (unsigned axis_count) const
250 { return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); }
251
252 bool unpack_axis_tuples (unsigned axis_count,
253 const hb_array_t<const F2DOT14> shared_tuples,
254 const hb_map_t *axes_old_index_tag_map,
255 hb_hashmap_t<hb_tag_t, Triple>& axis_tuples /* OUT */) const
256 {
257 const F2DOT14 *peak_tuple = nullptr;
258 if (has_peak ())
259 peak_tuple = get_peak_tuple (axis_count).arrayZ;
260 else
261 {
262 unsigned int index = get_index ();
263 if (unlikely ((index + 1) * axis_count > shared_tuples.length))
264 return false;
265 peak_tuple = shared_tuples.sub_array (axis_count * index, axis_count).arrayZ;
266 }
267
268 const F2DOT14 *start_tuple = nullptr;
269 const F2DOT14 *end_tuple = nullptr;
270 bool has_interm = has_intermediate ();
271
272 if (has_interm)
273 {
274 start_tuple = get_start_tuple (axis_count).arrayZ;
275 end_tuple = get_end_tuple (axis_count).arrayZ;
276 }
277
278 for (unsigned i = 0; i < axis_count; i++)
279 {
280 float peak = peak_tuple[i].to_float ();
281 if (peak == 0.f) continue;
282
283 hb_tag_t *axis_tag;
284 if (!axes_old_index_tag_map->has (i, &axis_tag))
285 return false;
286
287 float start, end;
288 if (has_interm)
289 {
290 start = start_tuple[i].to_float ();
291 end = end_tuple[i].to_float ();
292 }
293 else
294 {
295 start = hb_min (peak, 0.f);
296 end = hb_max (peak, 0.f);
297 }
298 axis_tuples.set (*axis_tag, Triple (start, peak, end));
299 }
300
301 return true;
302 }
303
304 float calculate_scalar (hb_array_t<int> coords, unsigned int coord_count,
305 const hb_array_t<const F2DOT14> shared_tuples,
306 const hb_vector_t<hb_pair_t<int,int>> *shared_tuple_active_idx = nullptr) const
307 {
308 const F2DOT14 *peak_tuple;
309
310 unsigned start_idx = 0;
311 unsigned end_idx = coord_count;
312 unsigned step = 1;
313
314 if (has_peak ())
315 peak_tuple = get_peak_tuple (coord_count).arrayZ;
316 else
317 {
318 unsigned int index = get_index ();
319 if (unlikely ((index + 1) * coord_count > shared_tuples.length))
320 return 0.f;
321 peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count).arrayZ;
322
323 if (shared_tuple_active_idx)
324 {
325 if (unlikely (index >= shared_tuple_active_idx->length))
326 return 0.f;
327 auto _ = (*shared_tuple_active_idx).arrayZ[index];
328 if (_.second != -1)
329 {
330 start_idx = _.first;
331 end_idx = _.second + 1;
332 step = _.second - _.first;
333 }
334 else if (_.first != -1)
335 {
336 start_idx = _.first;
337 end_idx = start_idx + 1;
338 }
339 }
340 }
341
342 const F2DOT14 *start_tuple = nullptr;
343 const F2DOT14 *end_tuple = nullptr;
344 bool has_interm = has_intermediate ();
345 if (has_interm)
346 {
347 start_tuple = get_start_tuple (coord_count).arrayZ;
348 end_tuple = get_end_tuple (coord_count).arrayZ;
349 }
350
351 float scalar = 1.f;
352 for (unsigned int i = start_idx; i < end_idx; i += step)
353 {
354 int peak = peak_tuple[i].to_int ();
355 if (!peak) continue;
356
357 int v = coords[i];
358 if (v == peak) continue;
359
360 if (has_interm)
361 {
362 int start = start_tuple[i].to_int ();
363 int end = end_tuple[i].to_int ();
364 if (unlikely (start > peak || peak > end ||
365 (start < 0 && end > 0 && peak))) continue;
366 if (v < start || v > end) return 0.f;
367 if (v < peak)
368 { if (peak != start) scalar *= (float) (v - start) / (peak - start); }
369 else
370 { if (peak != end) scalar *= (float) (end - v) / (end - peak); }
371 }
372 else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.f;
373 else
374 scalar *= (float) v / peak;
375 }
376 return scalar;
377 }
378
379 bool has_peak () const { return tupleIndex & TuppleIndex::EmbeddedPeakTuple; }
380 bool has_intermediate () const { return tupleIndex & TuppleIndex::IntermediateRegion; }
381 bool has_private_points () const { return tupleIndex & TuppleIndex::PrivatePointNumbers; }
382 unsigned get_index () const { return tupleIndex & TuppleIndex::TupleIndexMask; }
383
384 protected:
385 struct TuppleIndex : HBUINT16
386 {
387 enum Flags {
388 EmbeddedPeakTuple = 0x8000u,
389 IntermediateRegion = 0x4000u,
390 PrivatePointNumbers = 0x2000u,
391 TupleIndexMask = 0x0FFFu
392 };
393
394 TuppleIndex& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
395 DEFINE_SIZE_STATIC (2);
396 };
397
398 hb_array_t<const F2DOT14> get_all_tuples (unsigned axis_count) const
399 { return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); }
400 hb_array_t<const F2DOT14> get_peak_tuple (unsigned axis_count) const
401 { return get_all_tuples (axis_count).sub_array (0, axis_count); }
402 hb_array_t<const F2DOT14> get_start_tuple (unsigned axis_count) const
403 { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); }
404 hb_array_t<const F2DOT14> get_end_tuple (unsigned axis_count) const
405 { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); }
406
407 HBUINT16 varDataSize; /* The size in bytes of the serialized
408 * data for this tuple variation table. */
409 TuppleIndex tupleIndex; /* A packed field. The high 4 bits are flags (see below).
410 The low 12 bits are an index into a shared tuple
411 records array. */
412 /* UnsizedArrayOf<F2DOT14> peakTuple - optional */
413 /* Peak tuple record for this tuple variation table — optional,
414 * determined by flags in the tupleIndex value.
415 *
416 * Note that this must always be included in the 'cvar' table. */
417 /* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */
418 /* Intermediate start tuple record for this tuple variation table — optional,
419 determined by flags in the tupleIndex value. */
420 /* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */
421 /* Intermediate end tuple record for this tuple variation table — optional,
422 * determined by flags in the tupleIndex value. */
423 public:
424 DEFINE_SIZE_MIN (4);
425};
426
427enum packed_delta_flag_t
428{
429 DELTAS_ARE_ZERO = 0x80,
430 DELTAS_ARE_WORDS = 0x40,
431 DELTA_RUN_COUNT_MASK = 0x3F
432};
433
434struct tuple_delta_t
435{
436 public:
437 hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
438
439 /* indices_length = point_count, indice[i] = 1 means point i is referenced */
440 hb_vector_t<bool> indices;
441
442 hb_vector_t<float> deltas_x;
443 /* empty for cvar tuples */
444 hb_vector_t<float> deltas_y;
445
446 /* compiled data: header and deltas
447 * compiled point data is saved in a hashmap within tuple_variations_t cause
448 * some point sets might be reused by different tuple variations */
449 hb_vector_t<char> compiled_tuple_header;
450 hb_vector_t<char> compiled_deltas;
451
452 tuple_delta_t () = default;
453 tuple_delta_t (const tuple_delta_t& o) = default;
454
455 tuple_delta_t (tuple_delta_t&& o) : tuple_delta_t ()
456 {
457 axis_tuples = std::move (o.axis_tuples);
458 indices = std::move (o.indices);
459 deltas_x = std::move (o.deltas_x);
460 deltas_y = std::move (o.deltas_y);
461 }
462
463 tuple_delta_t& operator = (tuple_delta_t&& o)
464 {
465 hb_swap (*this, o);
466 return *this;
467 }
468
469 void remove_axis (hb_tag_t axis_tag)
470 { axis_tuples.del (axis_tag); }
471
472 bool set_tent (hb_tag_t axis_tag, Triple tent)
473 { return axis_tuples.set (axis_tag, tent); }
474
475 tuple_delta_t& operator += (const tuple_delta_t& o)
476 {
477 unsigned num = indices.length;
478 for (unsigned i = 0; i < num; i++)
479 {
480 if (indices.arrayZ[i])
481 {
482 if (o.indices.arrayZ[i])
483 {
484 deltas_x[i] += o.deltas_x[i];
485 if (deltas_y && o.deltas_y)
486 deltas_y[i] += o.deltas_y[i];
487 }
488 }
489 else
490 {
491 if (!o.indices.arrayZ[i]) continue;
492 indices.arrayZ[i] = true;
493 deltas_x[i] = o.deltas_x[i];
494 if (deltas_y && o.deltas_y)
495 deltas_y[i] = o.deltas_y[i];
496 }
497 }
498 return *this;
499 }
500
501 tuple_delta_t& operator *= (float scalar)
502 {
503 if (scalar == 1.0f)
504 return *this;
505
506 unsigned num = indices.length;
507 for (unsigned i = 0; i < num; i++)
508 {
509 if (!indices.arrayZ[i]) continue;
510
511 deltas_x[i] *= scalar;
512 if (deltas_y)
513 deltas_y[i] *= scalar;
514 }
515 return *this;
516 }
517
518 hb_vector_t<tuple_delta_t> change_tuple_var_axis_limit (hb_tag_t axis_tag, Triple axis_limit,
519 TripleDistances axis_triple_distances) const
520 {
521 hb_vector_t<tuple_delta_t> out;
522 Triple *tent;
523 if (!axis_tuples.has (axis_tag, &tent))
524 {
525 out.push (*this);
526 return out;
527 }
528
529 if ((tent->minimum < 0.f && tent->maximum > 0.f) ||
530 !(tent->minimum <= tent->middle && tent->middle <= tent->maximum))
531 return out;
532
533 if (tent->middle == 0.f)
534 {
535 out.push (*this);
536 return out;
537 }
538
539 result_t solutions = rebase_tent (*tent, axis_limit, axis_triple_distances);
540 for (auto t : solutions)
541 {
542 tuple_delta_t new_var = *this;
543 if (t.second == Triple ())
544 new_var.remove_axis (axis_tag);
545 else
546 new_var.set_tent (axis_tag, t.second);
547
548 new_var *= t.first;
549 out.push (std::move (new_var));
550 }
551
552 return out;
553 }
554
555 /* deltas should be compiled already before we compile tuple
556 * variation header cause we need to fill in the size of the
557 * serialized data for this tuple variation */
558 //TODO(qxliu):add option to use sharedTuples in gvar
559 bool compile_tuple_var_header (const hb_map_t& axes_index_map,
560 unsigned points_data_length,
561 const hb_map_t& axes_old_index_tag_map)
562 {
563 if (!compiled_deltas) return false;
564
565 unsigned cur_axis_count = axes_index_map.get_population ();
566 /* allocate enough memory: 1 peak + 2 intermediate coords + fixed header size */
567 unsigned alloc_len = 3 * cur_axis_count * (F2DOT14::static_size) + 4;
568 if (unlikely (!compiled_tuple_header.resize (alloc_len))) return false;
569
570 unsigned flag = 0;
571 /* skip the first 4 header bytes: variationDataSize+tupleIndex */
572 F2DOT14* p = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.begin () + 4);
573 F2DOT14* end = reinterpret_cast<F2DOT14 *> (compiled_tuple_header.end ());
574 hb_array_t<F2DOT14> coords (p, end - p);
575
576 /* encode peak coords */
577 unsigned peak_count = encode_peak_coords(coords, flag, axes_index_map, axes_old_index_tag_map);
578 if (!peak_count) return false;
579
580 /* encode interim coords, it's optional so returned num could be 0 */
581 unsigned interim_count = encode_interm_coords (coords.sub_array (peak_count), flag, axes_index_map, axes_old_index_tag_map);
582
583 //TODO(qxliu): add option to use shared_points in gvar
584 flag |= TupleVariationHeader::TuppleIndex::PrivatePointNumbers;
585
586 unsigned serialized_data_size = points_data_length + compiled_deltas.length;
587 TupleVariationHeader *o = reinterpret_cast<TupleVariationHeader *> (compiled_tuple_header.begin ());
588 o->varDataSize = serialized_data_size;
589 o->tupleIndex = flag;
590
591 unsigned total_header_len = 4 + (peak_count + interim_count) * (F2DOT14::static_size);
592 return compiled_tuple_header.resize (total_header_len);
593 }
594
595 unsigned encode_peak_coords (hb_array_t<F2DOT14> peak_coords,
596 unsigned& flag,
597 const hb_map_t& axes_index_map,
598 const hb_map_t& axes_old_index_tag_map) const
599 {
600 unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
601 auto it = peak_coords.iter ();
602 unsigned count = 0;
603 for (unsigned i = 0; i < orig_axis_count; i++)
604 {
605 if (!axes_index_map.has (i)) /* axis pinned */
606 continue;
607 hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
608 Triple *coords;
609 if (!axis_tuples.has (axis_tag, &coords))
610 (*it).set_int (0);
611 else
612 (*it).set_float (coords->middle);
613 it++;
614 count++;
615 }
616 flag |= TupleVariationHeader::TuppleIndex::EmbeddedPeakTuple;
617 return count;
618 }
619
620 /* if no need to encode intermediate coords, then just return p */
621 unsigned encode_interm_coords (hb_array_t<F2DOT14> coords,
622 unsigned& flag,
623 const hb_map_t& axes_index_map,
624 const hb_map_t& axes_old_index_tag_map) const
625 {
626 unsigned orig_axis_count = axes_old_index_tag_map.get_population ();
627 unsigned cur_axis_count = axes_index_map.get_population ();
628
629 auto start_coords_iter = coords.sub_array (0, cur_axis_count).iter ();
630 auto end_coords_iter = coords.sub_array (cur_axis_count).iter ();
631 bool encode_needed = false;
632 unsigned count = 0;
633 for (unsigned i = 0; i < orig_axis_count; i++)
634 {
635 if (!axes_index_map.has (i)) /* axis pinned */
636 continue;
637 hb_tag_t axis_tag = axes_old_index_tag_map.get (i);
638 Triple *coords;
639 float min_val = 0.f, val = 0.f, max_val = 0.f;
640 if (axis_tuples.has (axis_tag, &coords))
641 {
642 min_val = coords->minimum;
643 val = coords->middle;
644 max_val = coords->maximum;
645 }
646
647 (*start_coords_iter).set_float (min_val);
648 (*end_coords_iter).set_float (max_val);
649
650 start_coords_iter++;
651 end_coords_iter++;
652 count += 2;
653 if (min_val != hb_min (val, 0.f) || max_val != hb_max (val, 0.f))
654 encode_needed = true;
655 }
656
657 if (encode_needed)
658 {
659 flag |= TupleVariationHeader::TuppleIndex::IntermediateRegion;
660 return count;
661 }
662 return 0;
663 }
664
665 bool compile_deltas ()
666 {
667 hb_vector_t<int> rounded_deltas;
668 if (unlikely (!rounded_deltas.alloc (indices.length)))
669 return false;
670
671 for (unsigned i = 0; i < indices.length; i++)
672 {
673 if (!indices[i]) continue;
674 int rounded_delta = (int) roundf (deltas_x[i]);
675 rounded_deltas.push (rounded_delta);
676 }
677
678 if (!rounded_deltas) return false;
679 /* allocate enough memories 3 * num_deltas */
680 unsigned alloc_len = 3 * rounded_deltas.length;
681 if (deltas_y)
682 alloc_len *= 2;
683
684 if (unlikely (!compiled_deltas.resize (alloc_len))) return false;
685
686 unsigned i = 0;
687 unsigned encoded_len = encode_delta_run (i, compiled_deltas.as_array (), rounded_deltas);
688
689 if (deltas_y)
690 {
691 /* reuse the rounded_deltas vector, check that deltas_y have the same num of deltas as deltas_x */
692 unsigned j = 0;
693 for (unsigned idx = 0; idx < indices.length; idx++)
694 {
695 if (!indices[idx]) continue;
696 int rounded_delta = (int) roundf (deltas_y[idx]);
697
698 if (j >= rounded_deltas.length) return false;
699
700 rounded_deltas[j++] = rounded_delta;
701 }
702
703 if (j != rounded_deltas.length) return false;
704 /* reset i because we reuse rounded_deltas for deltas_y */
705 i = 0;
706 encoded_len += encode_delta_run (i, compiled_deltas.as_array ().sub_array (encoded_len), rounded_deltas);
707 }
708 return compiled_deltas.resize (encoded_len);
709 }
710
711 unsigned encode_delta_run (unsigned& i,
712 hb_array_t<char> encoded_bytes,
713 const hb_vector_t<int>& deltas) const
714 {
715 unsigned num_deltas = deltas.length;
716 unsigned encoded_len = 0;
717 while (i < num_deltas)
718 {
719 int val = deltas[i];
720 if (val == 0)
721 encoded_len += encode_delta_run_as_zeroes (i, encoded_bytes.sub_array (encoded_len), deltas);
722 else if (val >= -128 && val <= 127)
723 encoded_len += encode_delta_run_as_bytes (i, encoded_bytes.sub_array (encoded_len), deltas);
724 else
725 encoded_len += encode_delta_run_as_words (i, encoded_bytes.sub_array (encoded_len), deltas);
726 }
727 return encoded_len;
728 }
729
730 unsigned encode_delta_run_as_zeroes (unsigned& i,
731 hb_array_t<char> encoded_bytes,
732 const hb_vector_t<int>& deltas) const
733 {
734 unsigned num_deltas = deltas.length;
735 unsigned run_length = 0;
736 auto it = encoded_bytes.iter ();
737 unsigned encoded_len = 0;
738 while (i < num_deltas && deltas[i] == 0)
739 {
740 i++;
741 run_length++;
742 }
743
744 while (run_length >= 64)
745 {
746 *it++ = char (DELTAS_ARE_ZERO | 63);
747 run_length -= 64;
748 encoded_len++;
749 }
750
751 if (run_length)
752 {
753 *it++ = char (DELTAS_ARE_ZERO | (run_length - 1));
754 encoded_len++;
755 }
756 return encoded_len;
757 }
758
759 unsigned encode_delta_run_as_bytes (unsigned &i,
760 hb_array_t<char> encoded_bytes,
761 const hb_vector_t<int>& deltas) const
762 {
763 unsigned start = i;
764 unsigned num_deltas = deltas.length;
765 while (i < num_deltas)
766 {
767 int val = deltas[i];
768 if (val > 127 || val < -128)
769 break;
770
771 /* from fonttools: if there're 2 or more zeros in a sequence,
772 * it is better to start a new run to save bytes. */
773 if (val == 0 && i + 1 < num_deltas && deltas[i+1] == 0)
774 break;
775
776 i++;
777 }
778 unsigned run_length = i - start;
779
780 unsigned encoded_len = 0;
781 auto it = encoded_bytes.iter ();
782
783 while (run_length >= 64)
784 {
785 *it++ = 63;
786 encoded_len++;
787
788 for (unsigned j = 0; j < 64; j++)
789 {
790 *it++ = static_cast<char> (deltas[start + j]);
791 encoded_len++;
792 }
793
794 start += 64;
795 run_length -= 64;
796 }
797
798 if (run_length)
799 {
800 *it++ = run_length - 1;
801 encoded_len++;
802
803 while (start < i)
804 {
805 *it++ = static_cast<char> (deltas[start++]);
806 encoded_len++;
807 }
808 }
809
810 return encoded_len;
811 }
812
813 unsigned encode_delta_run_as_words (unsigned &i,
814 hb_array_t<char> encoded_bytes,
815 const hb_vector_t<int>& deltas) const
816 {
817 unsigned start = i;
818 unsigned num_deltas = deltas.length;
819 while (i < num_deltas)
820 {
821 int val = deltas[i];
822
823 /* start a new run for a single zero value*/
824 if (val == 0) break;
825
826 /* from fonttools: continue word-encoded run if there's only one
827 * single value in the range [-128, 127] because it is more compact.
828 * Only start a new run when there're 2 continuous such values. */
829 if (val >= -128 && val <= 127 &&
830 i + 1 < num_deltas &&
831 deltas[i+1] >= -128 && deltas[i+1] <= 127)
832 break;
833
834 i++;
835 }
836
837 unsigned run_length = i - start;
838 auto it = encoded_bytes.iter ();
839 unsigned encoded_len = 0;
840 while (run_length >= 64)
841 {
842 *it++ = (DELTAS_ARE_WORDS | 63);
843 encoded_len++;
844
845 for (unsigned j = 0; j < 64; j++)
846 {
847 int16_t delta_val = deltas[start + j];
848 *it++ = static_cast<char> (delta_val >> 8);
849 *it++ = static_cast<char> (delta_val & 0xFF);
850
851 encoded_len += 2;
852 }
853
854 start += 64;
855 run_length -= 64;
856 }
857
858 if (run_length)
859 {
860 *it++ = (DELTAS_ARE_WORDS | (run_length - 1));
861 encoded_len++;
862 while (start < i)
863 {
864 int16_t delta_val = deltas[start++];
865 *it++ = static_cast<char> (delta_val >> 8);
866 *it++ = static_cast<char> (delta_val & 0xFF);
867
868 encoded_len += 2;
869 }
870 }
871 return encoded_len;
872 }
873};
874
875struct TupleVariationData
876{
877 bool sanitize (hb_sanitize_context_t *c) const
878 {
879 TRACE_SANITIZE (this);
880 // here check on min_size only, TupleVariationHeader and var data will be
881 // checked while accessing through iterator.
882 return_trace (c->check_struct (this));
883 }
884
885 unsigned get_size (unsigned axis_count) const
886 {
887 unsigned total_size = min_size;
888 unsigned count = tupleVarCount.get_count ();
889 const TupleVariationHeader *tuple_var_header = &(get_tuple_var_header());
890 for (unsigned i = 0; i < count; i++)
891 {
892 total_size += tuple_var_header->get_size (axis_count) + tuple_var_header->get_data_size ();
893 tuple_var_header = &tuple_var_header->get_next (axis_count);
894 }
895
896 return total_size;
897 }
898
899 const TupleVariationHeader &get_tuple_var_header (void) const
900 { return StructAfter<TupleVariationHeader> (data); }
901
902 struct tuple_iterator_t;
903 struct tuple_variations_t
904 {
905 hb_vector_t<tuple_delta_t> tuple_vars;
906
907 private:
908 /* referenced point set->compiled point data map */
909 hb_hashmap_t<const hb_vector_t<bool>*, hb_bytes_t> point_data_map;
910 /* referenced point set-> count map, used in finding shared points */
911 hb_hashmap_t<const hb_vector_t<bool>*, unsigned> point_set_count_map;
912
913 public:
914 ~tuple_variations_t () { fini (); }
915 void fini ()
916 {
917 for (auto _ : point_data_map.values ())
918 _.fini ();
919
920 point_set_count_map.fini ();
921 tuple_vars.fini ();
922 }
923
924 unsigned get_var_count () const
925 { return tuple_vars.length; }
926
927 bool create_from_tuple_var_data (tuple_iterator_t iterator,
928 unsigned tuple_var_count,
929 unsigned point_count,
930 bool is_gvar,
931 const hb_map_t *axes_old_index_tag_map,
932 const hb_vector_t<unsigned> &shared_indices,
933 const hb_array_t<const F2DOT14> shared_tuples)
934 {
935 do
936 {
937 const HBUINT8 *p = iterator.get_serialized_data ();
938 unsigned int length = iterator.current_tuple->get_data_size ();
939 if (unlikely (!iterator.var_data_bytes.check_range (p, length)))
940 { fini (); return false; }
941
942 hb_hashmap_t<hb_tag_t, Triple> axis_tuples;
943 if (!iterator.current_tuple->unpack_axis_tuples (iterator.get_axis_count (), shared_tuples, axes_old_index_tag_map, axis_tuples)
944 || axis_tuples.is_empty ())
945 { fini (); return false; }
946
947 hb_vector_t<unsigned> private_indices;
948 bool has_private_points = iterator.current_tuple->has_private_points ();
949 const HBUINT8 *end = p + length;
950 if (has_private_points &&
951 !TupleVariationData::unpack_points (p, private_indices, end))
952 { fini (); return false; }
953
954 const hb_vector_t<unsigned> &indices = has_private_points ? private_indices : shared_indices;
955 bool apply_to_all = (indices.length == 0);
956 unsigned num_deltas = apply_to_all ? point_count : indices.length;
957
958 hb_vector_t<int> deltas_x;
959
960 if (unlikely (!deltas_x.resize (num_deltas, false) ||
961 !TupleVariationData::unpack_deltas (p, deltas_x, end)))
962 { fini (); return false; }
963
964 hb_vector_t<int> deltas_y;
965 if (is_gvar)
966 {
967 if (unlikely (!deltas_y.resize (num_deltas, false) ||
968 !TupleVariationData::unpack_deltas (p, deltas_y, end)))
969 { fini (); return false; }
970 }
971
972 tuple_delta_t var;
973 var.axis_tuples = std::move (axis_tuples);
974 if (unlikely (!var.indices.resize (point_count) ||
975 !var.deltas_x.resize (point_count, false)))
976 { fini (); return false; }
977
978 if (is_gvar && unlikely (!var.deltas_y.resize (point_count, false)))
979 { fini (); return false; }
980
981 for (unsigned i = 0; i < num_deltas; i++)
982 {
983 unsigned idx = apply_to_all ? i : indices[i];
984 if (idx >= point_count) continue;
985 var.indices[idx] = true;
986 var.deltas_x[idx] = static_cast<float> (deltas_x[i]);
987 if (is_gvar)
988 var.deltas_y[idx] = static_cast<float> (deltas_y[i]);
989 }
990 tuple_vars.push (std::move (var));
991 } while (iterator.move_to_next ());
992 return true;
993 }
994
995 void change_tuple_variations_axis_limits (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
996 const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
997 {
998 for (auto _ : normalized_axes_location)
999 {
1000 hb_tag_t axis_tag = _.first;
1001 Triple axis_limit = _.second;
1002 TripleDistances axis_triple_distances{1.f, 1.f};
1003 if (axes_triple_distances.has (axis_tag))
1004 axis_triple_distances = axes_triple_distances.get (axis_tag);
1005
1006 hb_vector_t<tuple_delta_t> new_vars;
1007 for (const tuple_delta_t& var : tuple_vars)
1008 {
1009 hb_vector_t<tuple_delta_t> out = var.change_tuple_var_axis_limit (axis_tag, axis_limit, axis_triple_distances);
1010 if (!out) continue;
1011 unsigned new_len = new_vars.length + out.length;
1012
1013 if (unlikely (!new_vars.alloc (new_len, false)))
1014 { fini (); return;}
1015
1016 for (unsigned i = 0; i < out.length; i++)
1017 new_vars.push (std::move (out[i]));
1018 }
1019 tuple_vars.fini ();
1020 tuple_vars = std::move (new_vars);
1021 }
1022 }
1023
1024 /* merge tuple variations with overlapping tents */
1025 void merge_tuple_variations ()
1026 {
1027 hb_vector_t<tuple_delta_t> new_vars;
1028 hb_hashmap_t<hb_hashmap_t<hb_tag_t, Triple>, unsigned> m;
1029 unsigned i = 0;
1030 for (const tuple_delta_t& var : tuple_vars)
1031 {
1032 /* if all axes are pinned, drop the tuple variation */
1033 if (var.axis_tuples.is_empty ()) continue;
1034
1035 unsigned *idx;
1036 if (m.has (var.axis_tuples, &idx))
1037 {
1038 new_vars[*idx] += var;
1039 }
1040 else
1041 {
1042 new_vars.push (var);
1043 m.set (var.axis_tuples, i);
1044 i++;
1045 }
1046 }
1047 tuple_vars.fini ();
1048 tuple_vars = std::move (new_vars);
1049 }
1050
1051 hb_bytes_t compile_point_set (const hb_vector_t<bool> &point_indices)
1052 {
1053 unsigned num_points = 0;
1054 for (bool i : point_indices)
1055 if (i) num_points++;
1056
1057 unsigned indices_length = point_indices.length;
1058 /* If the points set consists of all points in the glyph, it's encoded with a
1059 * single zero byte */
1060 if (num_points == indices_length)
1061 {
1062 char *p = (char *) hb_calloc (1, sizeof (char));
1063 if (unlikely (!p)) return hb_bytes_t ();
1064
1065 return hb_bytes_t (p, 1);
1066 }
1067
1068 /* allocate enough memories: 2 bytes for count + 3 bytes for each point */
1069 unsigned num_bytes = 2 + 3 *num_points;
1070 char *p = (char *) hb_calloc (num_bytes, sizeof (char));
1071 if (unlikely (!p)) return hb_bytes_t ();
1072
1073 unsigned pos = 0;
1074 /* binary data starts with the total number of reference points */
1075 if (num_points < 0x80)
1076 p[pos++] = num_points;
1077 else
1078 {
1079 p[pos++] = ((num_points >> 8) | 0x80);
1080 p[pos++] = num_points & 0xFF;
1081 }
1082
1083 const unsigned max_run_length = 0x7F;
1084 unsigned i = 0;
1085 unsigned last_value = 0;
1086 unsigned num_encoded = 0;
1087 while (i < indices_length && num_encoded < num_points)
1088 {
1089 unsigned run_length = 0;
1090 unsigned header_pos = pos;
1091 p[pos++] = 0;
1092
1093 bool use_byte_encoding = false;
1094 bool new_run = true;
1095 while (i < indices_length && num_encoded < num_points &&
1096 run_length <= max_run_length)
1097 {
1098 // find out next referenced point index
1099 while (i < indices_length && !point_indices[i])
1100 i++;
1101
1102 if (i >= indices_length) break;
1103
1104 unsigned cur_value = i;
1105 unsigned delta = cur_value - last_value;
1106
1107 if (new_run)
1108 {
1109 use_byte_encoding = (delta <= 0xFF);
1110 new_run = false;
1111 }
1112
1113 if (use_byte_encoding && delta > 0xFF)
1114 break;
1115
1116 if (use_byte_encoding)
1117 p[pos++] = delta;
1118 else
1119 {
1120 p[pos++] = delta >> 8;
1121 p[pos++] = delta & 0xFF;
1122 }
1123 i++;
1124 last_value = cur_value;
1125 run_length++;
1126 num_encoded++;
1127 }
1128
1129 if (use_byte_encoding)
1130 p[header_pos] = run_length - 1;
1131 else
1132 p[header_pos] = (run_length - 1) | 0x80;
1133 }
1134 return hb_bytes_t (p, pos);
1135 }
1136
1137 /* compile all point set and store byte data in a point_set->hb_bytes_t hashmap,
1138 * also update point_set->count map, which will be used in finding shared
1139 * point set*/
1140 bool compile_all_point_sets ()
1141 {
1142 for (const auto& tuple: tuple_vars)
1143 {
1144 const hb_vector_t<bool>* points_set = &(tuple.indices);
1145 if (point_data_map.has (points_set))
1146 {
1147 unsigned *count;
1148 if (unlikely (!point_set_count_map.has (points_set, &count) ||
1149 !point_set_count_map.set (points_set, (*count) + 1)))
1150 return false;
1151 continue;
1152 }
1153
1154 hb_bytes_t compiled_data = compile_point_set (*points_set);
1155 if (unlikely (compiled_data == hb_bytes_t ()))
1156 return false;
1157
1158 if (!point_data_map.set (points_set, compiled_data) ||
1159 !point_set_count_map.set (points_set, 1))
1160 return false;
1161 }
1162 return true;
1163 }
1164
1165 /* find shared points set which saves most bytes */
1166 hb_bytes_t find_shared_points ()
1167 {
1168 unsigned max_saved_bytes = 0;
1169 hb_bytes_t res{};
1170
1171 for (const auto& _ : point_data_map.iter ())
1172 {
1173 const hb_vector_t<bool>* points_set = _.first;
1174 unsigned data_length = _.second.length;
1175 unsigned *count;
1176 if (unlikely (!point_set_count_map.has (points_set, &count) ||
1177 *count <= 1))
1178 return hb_bytes_t ();
1179
1180 unsigned saved_bytes = data_length * ((*count) -1);
1181 if (saved_bytes > max_saved_bytes)
1182 {
1183 max_saved_bytes = saved_bytes;
1184 res = _.second;
1185 }
1186 }
1187 return res;
1188 }
1189
1190 void instantiate (const hb_hashmap_t<hb_tag_t, Triple>& normalized_axes_location,
1191 const hb_hashmap_t<hb_tag_t, TripleDistances>& axes_triple_distances)
1192 {
1193 change_tuple_variations_axis_limits (normalized_axes_location, axes_triple_distances);
1194 merge_tuple_variations ();
1195 }
1196
1197 bool compile_bytes (const hb_map_t& axes_index_map,
1198 const hb_map_t& axes_old_index_tag_map)
1199 {
1200 // compile points set and store data in hashmap
1201 if (!compile_all_point_sets ())
1202 return false;
1203 // compile delta and tuple var header for each tuple variation
1204 for (auto& tuple: tuple_vars)
1205 {
1206 const hb_vector_t<bool>* points_set = &(tuple.indices);
1207 hb_bytes_t *points_data;
1208 if (unlikely (!point_data_map.has (points_set, &points_data)))
1209 return false;
1210
1211 if (!tuple.compile_deltas ())
1212 return false;
1213
1214 if (!tuple.compile_tuple_var_header (axes_index_map, points_data->length, axes_old_index_tag_map))
1215 return false;
1216 }
1217 return true;
1218 }
1219
1220 bool serialize_var_headers (hb_serialize_context_t *c, unsigned& total_header_len) const
1221 {
1222 TRACE_SERIALIZE (this);
1223 for (const auto& tuple: tuple_vars)
1224 {
1225 tuple.compiled_tuple_header.as_array ().copy (c);
1226 if (c->in_error ()) return_trace (false);
1227 total_header_len += tuple.compiled_tuple_header.length;
1228 }
1229 return_trace (true);
1230 }
1231
1232 bool serialize_var_data (hb_serialize_context_t *c) const
1233 {
1234 TRACE_SERIALIZE (this);
1235 for (const auto& tuple: tuple_vars)
1236 {
1237 const hb_vector_t<bool>* points_set = &(tuple.indices);
1238 hb_bytes_t *point_data;
1239 if (!point_data_map.has (points_set, &point_data))
1240 return_trace (false);
1241
1242 point_data->copy (c);
1243 tuple.compiled_deltas.as_array ().copy (c);
1244 if (c->in_error ()) return_trace (false);
1245 }
1246 return_trace (true);
1247 }
1248 };
1249
1250 struct tuple_iterator_t
1251 {
1252 unsigned get_axis_count () const { return axis_count; }
1253
1254 void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_, const void *table_base_)
1255 {
1256 var_data_bytes = var_data_bytes_;
1257 var_data = var_data_bytes_.as<TupleVariationData> ();
1258 index = 0;
1259 axis_count = axis_count_;
1260 current_tuple = &var_data->get_tuple_var_header ();
1261 data_offset = 0;
1262 table_base = table_base_;
1263 }
1264
1265 bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */)
1266 {
1267 if (var_data->has_shared_point_numbers ())
1268 {
1269 const HBUINT8 *base = &(table_base+var_data->data);
1270 const HBUINT8 *p = base;
1271 if (!unpack_points (p, shared_indices, (const HBUINT8 *) (var_data_bytes.arrayZ + var_data_bytes.length))) return false;
1272 data_offset = p - base;
1273 }
1274 return true;
1275 }
1276
1277 bool is_valid () const
1278 {
1279 return (index < var_data->tupleVarCount.get_count ()) &&
1280 var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) &&
1281 var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (),
1282 current_tuple->get_size (axis_count)));
1283 }
1284
1285 bool move_to_next ()
1286 {
1287 data_offset += current_tuple->get_data_size ();
1288 current_tuple = &current_tuple->get_next (axis_count);
1289 index++;
1290 return is_valid ();
1291 }
1292
1293 const HBUINT8 *get_serialized_data () const
1294 { return &(table_base+var_data->data) + data_offset; }
1295
1296 private:
1297 const TupleVariationData *var_data;
1298 unsigned int index;
1299 unsigned int axis_count;
1300 unsigned int data_offset;
1301 const void *table_base;
1302
1303 public:
1304 hb_bytes_t var_data_bytes;
1305 const TupleVariationHeader *current_tuple;
1306 };
1307
1308 static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count,
1309 const void *table_base,
1310 hb_vector_t<unsigned int> &shared_indices /* OUT */,
1311 tuple_iterator_t *iterator /* OUT */)
1312 {
1313 iterator->init (var_data_bytes, axis_count, table_base);
1314 if (!iterator->get_shared_indices (shared_indices))
1315 return false;
1316 return iterator->is_valid ();
1317 }
1318
1319 bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); }
1320
1321 static bool unpack_points (const HBUINT8 *&p /* IN/OUT */,
1322 hb_vector_t<unsigned int> &points /* OUT */,
1323 const HBUINT8 *end)
1324 {
1325 enum packed_point_flag_t
1326 {
1327 POINTS_ARE_WORDS = 0x80,
1328 POINT_RUN_COUNT_MASK = 0x7F
1329 };
1330
1331 if (unlikely (p + 1 > end)) return false;
1332
1333 unsigned count = *p++;
1334 if (count & POINTS_ARE_WORDS)
1335 {
1336 if (unlikely (p + 1 > end)) return false;
1337 count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++;
1338 }
1339 if (unlikely (!points.resize (count, false))) return false;
1340
1341 unsigned n = 0;
1342 unsigned i = 0;
1343 while (i < count)
1344 {
1345 if (unlikely (p + 1 > end)) return false;
1346 unsigned control = *p++;
1347 unsigned run_count = (control & POINT_RUN_COUNT_MASK) + 1;
1348 unsigned stop = i + run_count;
1349 if (unlikely (stop > count)) return false;
1350 if (control & POINTS_ARE_WORDS)
1351 {
1352 if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
1353 for (; i < stop; i++)
1354 {
1355 n += *(const HBUINT16 *)p;
1356 points.arrayZ[i] = n;
1357 p += HBUINT16::static_size;
1358 }
1359 }
1360 else
1361 {
1362 if (unlikely (p + run_count > end)) return false;
1363 for (; i < stop; i++)
1364 {
1365 n += *p++;
1366 points.arrayZ[i] = n;
1367 }
1368 }
1369 }
1370 return true;
1371 }
1372
1373 static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */,
1374 hb_vector_t<int> &deltas /* IN/OUT */,
1375 const HBUINT8 *end)
1376 {
1377 unsigned i = 0;
1378 unsigned count = deltas.length;
1379 while (i < count)
1380 {
1381 if (unlikely (p + 1 > end)) return false;
1382 unsigned control = *p++;
1383 unsigned run_count = (control & DELTA_RUN_COUNT_MASK) + 1;
1384 unsigned stop = i + run_count;
1385 if (unlikely (stop > count)) return false;
1386 if (control & DELTAS_ARE_ZERO)
1387 {
1388 for (; i < stop; i++)
1389 deltas.arrayZ[i] = 0;
1390 }
1391 else if (control & DELTAS_ARE_WORDS)
1392 {
1393 if (unlikely (p + run_count * HBUINT16::static_size > end)) return false;
1394 for (; i < stop; i++)
1395 {
1396 deltas.arrayZ[i] = * (const HBINT16 *) p;
1397 p += HBUINT16::static_size;
1398 }
1399 }
1400 else
1401 {
1402 if (unlikely (p + run_count > end)) return false;
1403 for (; i < stop; i++)
1404 {
1405 deltas.arrayZ[i] = * (const HBINT8 *) p++;
1406 }
1407 }
1408 }
1409 return true;
1410 }
1411
1412 bool has_data () const { return tupleVarCount; }
1413
1414 bool decompile_tuple_variations (unsigned point_count,
1415 bool is_gvar,
1416 tuple_iterator_t iterator,
1417 const hb_map_t *axes_old_index_tag_map,
1418 const hb_vector_t<unsigned> &shared_indices,
1419 const hb_array_t<const F2DOT14> shared_tuples,
1420 tuple_variations_t& tuple_variations /* OUT */) const
1421 {
1422 return tuple_variations.create_from_tuple_var_data (iterator, tupleVarCount,
1423 point_count, is_gvar,
1424 axes_old_index_tag_map,
1425 shared_indices,
1426 shared_tuples);
1427 }
1428
1429 bool serialize (hb_serialize_context_t *c,
1430 bool is_gvar,
1431 tuple_variations_t& tuple_variations) const
1432 {
1433 TRACE_SERIALIZE (this);
1434 auto *out = c->start_embed (this);
1435 if (unlikely (!c->extend_min (out))) return_trace (false);
1436
1437 if (!c->check_assign (out->tupleVarCount, tuple_variations.get_var_count (),
1438 HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1439
1440 unsigned total_header_len = 0;
1441
1442 if (!tuple_variations.serialize_var_headers (c, total_header_len))
1443 return_trace (false);
1444
1445 unsigned data_offset = min_size + total_header_len;
1446 if (!is_gvar) data_offset += 4;
1447 if (!c->check_assign (out->data, data_offset, HB_SERIALIZE_ERROR_INT_OVERFLOW)) return_trace (false);
1448
1449 return tuple_variations.serialize_var_data (c);
1450 }
1451
1452 protected:
1453 struct TupleVarCount : HBUINT16
1454 {
1455 bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); }
1456 unsigned int get_count () const { return (*this) & CountMask; }
1457 TupleVarCount& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
1458
1459 protected:
1460 enum Flags
1461 {
1462 SharedPointNumbers= 0x8000u,
1463 CountMask = 0x0FFFu
1464 };
1465 public:
1466 DEFINE_SIZE_STATIC (2);
1467 };
1468
1469 TupleVarCount tupleVarCount; /* A packed field. The high 4 bits are flags, and the
1470 * low 12 bits are the number of tuple variation tables
1471 * for this glyph. The number of tuple variation tables
1472 * can be any number between 1 and 4095. */
1473 Offset16To<HBUINT8>
1474 data; /* Offset from the start of the base table
1475 * to the serialized data. */
1476 /* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */
1477 public:
1478 DEFINE_SIZE_MIN (4);
1479};
1480
1481} /* namespace OT */
1482
1483
1484#endif /* HB_OT_VAR_COMMON_HH */
1485