| 1 | /* |
| 2 | * Copyright © 2019 Adobe Inc. |
| 3 | * Copyright © 2019 Ebrahim Byagowi |
| 4 | * |
| 5 | * This is part of HarfBuzz, a text shaping library. |
| 6 | * |
| 7 | * Permission is hereby granted, without written agreement and without |
| 8 | * license or royalty fees, to use, copy, modify, and distribute this |
| 9 | * software and its documentation for any purpose, provided that the |
| 10 | * above copyright notice and the following two paragraphs appear in |
| 11 | * all copies of this software. |
| 12 | * |
| 13 | * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
| 14 | * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
| 15 | * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
| 16 | * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
| 17 | * DAMAGE. |
| 18 | * |
| 19 | * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
| 20 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
| 21 | * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
| 22 | * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
| 23 | * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
| 24 | * |
| 25 | * Adobe Author(s): Michiharu Ariza |
| 26 | */ |
| 27 | |
| 28 | #ifndef HB_OT_VAR_GVAR_TABLE_HH |
| 29 | #define HB_OT_VAR_GVAR_TABLE_HH |
| 30 | |
| 31 | #include "hb-open-type.hh" |
| 32 | |
| 33 | /* |
| 34 | * gvar -- Glyph Variation Table |
| 35 | * https://docs.microsoft.com/en-us/typography/opentype/spec/gvar |
| 36 | */ |
| 37 | #define HB_OT_TAG_gvar HB_TAG('g','v','a','r') |
| 38 | |
| 39 | namespace OT { |
| 40 | |
| 41 | struct contour_point_t |
| 42 | { |
| 43 | void init (float x_ = 0.f, float y_ = 0.f, bool is_end_point_ = false) |
| 44 | { flag = 0; x = x_; y = y_; is_end_point = is_end_point_; } |
| 45 | |
| 46 | void translate (const contour_point_t &p) { x += p.x; y += p.y; } |
| 47 | |
| 48 | uint8_t flag; |
| 49 | float x, y; |
| 50 | bool is_end_point; |
| 51 | }; |
| 52 | |
| 53 | struct contour_point_vector_t : hb_vector_t<contour_point_t> |
| 54 | { |
| 55 | void extend (const hb_array_t<contour_point_t> &a) |
| 56 | { |
| 57 | unsigned int old_len = length; |
| 58 | resize (old_len + a.length); |
| 59 | for (unsigned int i = 0; i < a.length; i++) |
| 60 | (*this)[old_len + i] = a[i]; |
| 61 | } |
| 62 | |
| 63 | void transform (const float (&matrix)[4]) |
| 64 | { |
| 65 | for (unsigned int i = 0; i < length; i++) |
| 66 | { |
| 67 | contour_point_t &p = (*this)[i]; |
| 68 | float x_ = p.x * matrix[0] + p.y * matrix[2]; |
| 69 | p.y = p.x * matrix[1] + p.y * matrix[3]; |
| 70 | p.x = x_; |
| 71 | } |
| 72 | } |
| 73 | |
| 74 | void translate (const contour_point_t& delta) |
| 75 | { |
| 76 | for (unsigned int i = 0; i < length; i++) |
| 77 | (*this)[i].translate (delta); |
| 78 | } |
| 79 | }; |
| 80 | |
| 81 | /* https://docs.microsoft.com/en-us/typography/opentype/spec/otvarcommonformats#tuplevariationheader */ |
| 82 | struct |
| 83 | { |
| 84 | unsigned (unsigned axis_count) const |
| 85 | { return min_size + get_all_tuples (axis_count).get_size (); } |
| 86 | |
| 87 | unsigned () const { return varDataSize; } |
| 88 | |
| 89 | const TupleVariationHeader & (unsigned axis_count) const |
| 90 | { return StructAtOffset<TupleVariationHeader> (this, get_size (axis_count)); } |
| 91 | |
| 92 | float (const int *coords, unsigned int coord_count, |
| 93 | const hb_array_t<const F2DOT14> shared_tuples) const |
| 94 | { |
| 95 | hb_array_t<const F2DOT14> peak_tuple; |
| 96 | |
| 97 | if (has_peak ()) |
| 98 | peak_tuple = get_peak_tuple (coord_count); |
| 99 | else |
| 100 | { |
| 101 | unsigned int index = get_index (); |
| 102 | if (unlikely (index * coord_count >= shared_tuples.length)) |
| 103 | return 0.f; |
| 104 | peak_tuple = shared_tuples.sub_array (coord_count * index, coord_count); |
| 105 | } |
| 106 | |
| 107 | hb_array_t<const F2DOT14> start_tuple; |
| 108 | hb_array_t<const F2DOT14> end_tuple; |
| 109 | if (has_intermediate ()) |
| 110 | { |
| 111 | start_tuple = get_start_tuple (coord_count); |
| 112 | end_tuple = get_end_tuple (coord_count); |
| 113 | } |
| 114 | |
| 115 | float scalar = 1.f; |
| 116 | for (unsigned int i = 0; i < coord_count; i++) |
| 117 | { |
| 118 | int v = coords[i]; |
| 119 | int peak = peak_tuple[i]; |
| 120 | if (!peak || v == peak) continue; |
| 121 | |
| 122 | if (has_intermediate ()) |
| 123 | { |
| 124 | int start = start_tuple[i]; |
| 125 | int end = end_tuple[i]; |
| 126 | if (unlikely (start > peak || peak > end || |
| 127 | (start < 0 && end > 0 && peak))) continue; |
| 128 | if (v < start || v > end) return 0.f; |
| 129 | if (v < peak) |
| 130 | { if (peak != start) scalar *= (float) (v - start) / (peak - start); } |
| 131 | else |
| 132 | { if (peak != end) scalar *= (float) (end - v) / (end - peak); } |
| 133 | } |
| 134 | else if (!v || v < hb_min (0, peak) || v > hb_max (0, peak)) return 0.f; |
| 135 | else |
| 136 | scalar *= (float) v / peak; |
| 137 | } |
| 138 | return scalar; |
| 139 | } |
| 140 | |
| 141 | bool () const { return (tupleIndex & TuppleIndex::EmbeddedPeakTuple); } |
| 142 | bool () const { return (tupleIndex & TuppleIndex::IntermediateRegion); } |
| 143 | bool () const { return (tupleIndex & TuppleIndex::PrivatePointNumbers); } |
| 144 | unsigned int () const { return (tupleIndex & TuppleIndex::TupleIndexMask); } |
| 145 | |
| 146 | protected: |
| 147 | struct : HBUINT16 |
| 148 | { |
| 149 | enum { |
| 150 | = 0x8000u, |
| 151 | = 0x4000u, |
| 152 | = 0x2000u, |
| 153 | = 0x0FFFu |
| 154 | }; |
| 155 | |
| 156 | DEFINE_SIZE_STATIC (2); |
| 157 | }; |
| 158 | |
| 159 | hb_array_t<const F2DOT14> (unsigned axis_count) const |
| 160 | { return StructAfter<UnsizedArrayOf<F2DOT14>> (tupleIndex).as_array ((has_peak () + has_intermediate () * 2) * axis_count); } |
| 161 | hb_array_t<const F2DOT14> (unsigned axis_count) const |
| 162 | { return get_all_tuples (axis_count).sub_array (0, axis_count); } |
| 163 | hb_array_t<const F2DOT14> (unsigned axis_count) const |
| 164 | { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count, axis_count); } |
| 165 | hb_array_t<const F2DOT14> (unsigned axis_count) const |
| 166 | { return get_all_tuples (axis_count).sub_array (has_peak () * axis_count + axis_count, axis_count); } |
| 167 | |
| 168 | HBUINT16 ; /* The size in bytes of the serialized |
| 169 | * data for this tuple variation table. */ |
| 170 | TuppleIndex ; /* A packed field. The high 4 bits are flags (see below). |
| 171 | The low 12 bits are an index into a shared tuple |
| 172 | records array. */ |
| 173 | /* UnsizedArrayOf<F2DOT14> peakTuple - optional */ |
| 174 | /* Peak tuple record for this tuple variation table — optional, |
| 175 | * determined by flags in the tupleIndex value. |
| 176 | * |
| 177 | * Note that this must always be included in the 'cvar' table. */ |
| 178 | /* UnsizedArrayOf<F2DOT14> intermediateStartTuple - optional */ |
| 179 | /* Intermediate start tuple record for this tuple variation table — optional, |
| 180 | determined by flags in the tupleIndex value. */ |
| 181 | /* UnsizedArrayOf<F2DOT14> intermediateEndTuple - optional */ |
| 182 | /* Intermediate end tuple record for this tuple variation table — optional, |
| 183 | * determined by flags in the tupleIndex value. */ |
| 184 | public: |
| 185 | DEFINE_SIZE_MIN (4); |
| 186 | }; |
| 187 | |
| 188 | struct GlyphVariationData |
| 189 | { |
| 190 | const TupleVariationHeader & (void) const |
| 191 | { return StructAfter<TupleVariationHeader> (data); } |
| 192 | |
| 193 | struct tuple_iterator_t |
| 194 | { |
| 195 | void init (hb_bytes_t var_data_bytes_, unsigned int axis_count_) |
| 196 | { |
| 197 | var_data_bytes = var_data_bytes_; |
| 198 | var_data = var_data_bytes_.as<GlyphVariationData> (); |
| 199 | index = 0; |
| 200 | axis_count = axis_count_; |
| 201 | current_tuple = &var_data->get_tuple_var_header (); |
| 202 | data_offset = 0; |
| 203 | } |
| 204 | |
| 205 | bool get_shared_indices (hb_vector_t<unsigned int> &shared_indices /* OUT */) |
| 206 | { |
| 207 | if (var_data->has_shared_point_numbers ()) |
| 208 | { |
| 209 | const HBUINT8 *base = &(var_data+var_data->data); |
| 210 | const HBUINT8 *p = base; |
| 211 | if (!unpack_points (p, shared_indices, var_data_bytes)) return false; |
| 212 | data_offset = p - base; |
| 213 | } |
| 214 | return true; |
| 215 | } |
| 216 | |
| 217 | bool is_valid () const |
| 218 | { |
| 219 | return (index < var_data->tupleVarCount.get_count ()) && |
| 220 | var_data_bytes.check_range (current_tuple, TupleVariationHeader::min_size) && |
| 221 | var_data_bytes.check_range (current_tuple, hb_max (current_tuple->get_data_size (), current_tuple->get_size (axis_count))) && |
| 222 | current_tuple->get_size (axis_count); |
| 223 | } |
| 224 | |
| 225 | bool move_to_next () |
| 226 | { |
| 227 | data_offset += current_tuple->get_data_size (); |
| 228 | current_tuple = ¤t_tuple->get_next (axis_count); |
| 229 | index++; |
| 230 | return is_valid (); |
| 231 | } |
| 232 | |
| 233 | const HBUINT8 *get_serialized_data () const |
| 234 | { return &(var_data+var_data->data) + data_offset; } |
| 235 | |
| 236 | private: |
| 237 | const GlyphVariationData *var_data; |
| 238 | unsigned int index; |
| 239 | unsigned int axis_count; |
| 240 | unsigned int data_offset; |
| 241 | |
| 242 | public: |
| 243 | hb_bytes_t var_data_bytes; |
| 244 | const TupleVariationHeader *current_tuple; |
| 245 | }; |
| 246 | |
| 247 | static bool get_tuple_iterator (hb_bytes_t var_data_bytes, unsigned axis_count, |
| 248 | hb_vector_t<unsigned int> &shared_indices /* OUT */, |
| 249 | tuple_iterator_t *iterator /* OUT */) |
| 250 | { |
| 251 | iterator->init (var_data_bytes, axis_count); |
| 252 | if (!iterator->get_shared_indices (shared_indices)) |
| 253 | return false; |
| 254 | return iterator->is_valid (); |
| 255 | } |
| 256 | |
| 257 | bool has_shared_point_numbers () const { return tupleVarCount.has_shared_point_numbers (); } |
| 258 | |
| 259 | static bool unpack_points (const HBUINT8 *&p /* IN/OUT */, |
| 260 | hb_vector_t<unsigned int> &points /* OUT */, |
| 261 | const hb_bytes_t &bytes) |
| 262 | { |
| 263 | enum packed_point_flag_t |
| 264 | { |
| 265 | POINTS_ARE_WORDS = 0x80, |
| 266 | POINT_RUN_COUNT_MASK = 0x7F |
| 267 | }; |
| 268 | |
| 269 | if (unlikely (!bytes.check_range (p))) return false; |
| 270 | |
| 271 | uint16_t count = *p++; |
| 272 | if (count & POINTS_ARE_WORDS) |
| 273 | { |
| 274 | if (unlikely (!bytes.check_range (p))) return false; |
| 275 | count = ((count & POINT_RUN_COUNT_MASK) << 8) | *p++; |
| 276 | } |
| 277 | points.resize (count); |
| 278 | |
| 279 | unsigned int n = 0; |
| 280 | uint16_t i = 0; |
| 281 | while (i < count) |
| 282 | { |
| 283 | if (unlikely (!bytes.check_range (p))) return false; |
| 284 | uint16_t j; |
| 285 | uint8_t control = *p++; |
| 286 | uint16_t run_count = (control & POINT_RUN_COUNT_MASK) + 1; |
| 287 | if (control & POINTS_ARE_WORDS) |
| 288 | { |
| 289 | for (j = 0; j < run_count && i < count; j++, i++) |
| 290 | { |
| 291 | if (unlikely (!bytes.check_range ((const HBUINT16 *) p))) |
| 292 | return false; |
| 293 | n += *(const HBUINT16 *)p; |
| 294 | points[i] = n; |
| 295 | p += HBUINT16::static_size; |
| 296 | } |
| 297 | } |
| 298 | else |
| 299 | { |
| 300 | for (j = 0; j < run_count && i < count; j++, i++) |
| 301 | { |
| 302 | if (unlikely (!bytes.check_range (p))) return false; |
| 303 | n += *p++; |
| 304 | points[i] = n; |
| 305 | } |
| 306 | } |
| 307 | if (j < run_count) return false; |
| 308 | } |
| 309 | return true; |
| 310 | } |
| 311 | |
| 312 | static bool unpack_deltas (const HBUINT8 *&p /* IN/OUT */, |
| 313 | hb_vector_t<int> &deltas /* IN/OUT */, |
| 314 | const hb_bytes_t &bytes) |
| 315 | { |
| 316 | enum packed_delta_flag_t |
| 317 | { |
| 318 | DELTAS_ARE_ZERO = 0x80, |
| 319 | DELTAS_ARE_WORDS = 0x40, |
| 320 | DELTA_RUN_COUNT_MASK = 0x3F |
| 321 | }; |
| 322 | |
| 323 | unsigned int i = 0; |
| 324 | unsigned int count = deltas.length; |
| 325 | while (i < count) |
| 326 | { |
| 327 | if (unlikely (!bytes.check_range (p))) return false; |
| 328 | uint8_t control = *p++; |
| 329 | unsigned int run_count = (control & DELTA_RUN_COUNT_MASK) + 1; |
| 330 | unsigned int j; |
| 331 | if (control & DELTAS_ARE_ZERO) |
| 332 | for (j = 0; j < run_count && i < count; j++, i++) |
| 333 | deltas[i] = 0; |
| 334 | else if (control & DELTAS_ARE_WORDS) |
| 335 | for (j = 0; j < run_count && i < count; j++, i++) |
| 336 | { |
| 337 | if (unlikely (!bytes.check_range ((const HBUINT16 *) p))) |
| 338 | return false; |
| 339 | deltas[i] = *(const HBINT16 *) p; |
| 340 | p += HBUINT16::static_size; |
| 341 | } |
| 342 | else |
| 343 | for (j = 0; j < run_count && i < count; j++, i++) |
| 344 | { |
| 345 | if (unlikely (!bytes.check_range (p))) |
| 346 | return false; |
| 347 | deltas[i] = *(const HBINT8 *) p++; |
| 348 | } |
| 349 | if (j < run_count) |
| 350 | return false; |
| 351 | } |
| 352 | return true; |
| 353 | } |
| 354 | |
| 355 | bool has_data () const { return tupleVarCount; } |
| 356 | |
| 357 | protected: |
| 358 | struct TupleVarCount : HBUINT16 |
| 359 | { |
| 360 | bool has_shared_point_numbers () const { return ((*this) & SharedPointNumbers); } |
| 361 | unsigned int get_count () const { return (*this) & CountMask; } |
| 362 | |
| 363 | protected: |
| 364 | enum Flags |
| 365 | { |
| 366 | SharedPointNumbers= 0x8000u, |
| 367 | CountMask = 0x0FFFu |
| 368 | }; |
| 369 | public: |
| 370 | DEFINE_SIZE_STATIC (2); |
| 371 | }; |
| 372 | |
| 373 | TupleVarCount tupleVarCount; /* A packed field. The high 4 bits are flags, and the |
| 374 | * low 12 bits are the number of tuple variation tables |
| 375 | * for this glyph. The number of tuple variation tables |
| 376 | * can be any number between 1 and 4095. */ |
| 377 | OffsetTo<HBUINT8> |
| 378 | data; /* Offset from the start of the GlyphVariationData table |
| 379 | * to the serialized data. */ |
| 380 | /* TupleVariationHeader tupleVariationHeaders[] *//* Array of tuple variation headers. */ |
| 381 | public: |
| 382 | DEFINE_SIZE_MIN (4); |
| 383 | }; |
| 384 | |
| 385 | struct gvar |
| 386 | { |
| 387 | static constexpr hb_tag_t tableTag = HB_OT_TAG_gvar; |
| 388 | |
| 389 | bool sanitize_shallow (hb_sanitize_context_t *c) const |
| 390 | { |
| 391 | TRACE_SANITIZE (this); |
| 392 | return_trace (c->check_struct (this) && (version.major == 1) && |
| 393 | (glyphCount == c->get_num_glyphs ()) && |
| 394 | sharedTuples.sanitize (c, this, axisCount * sharedTupleCount) && |
| 395 | (is_long_offset () ? |
| 396 | c->check_array (get_long_offset_array (), glyphCount+1) : |
| 397 | c->check_array (get_short_offset_array (), glyphCount+1)) && |
| 398 | c->check_array (((const HBUINT8*)&(this+dataZ)) + get_offset (0), |
| 399 | get_offset (glyphCount) - get_offset (0))); |
| 400 | } |
| 401 | |
| 402 | /* GlyphVariationData not sanitized here; must be checked while accessing each glyph varation data */ |
| 403 | bool sanitize (hb_sanitize_context_t *c) const |
| 404 | { return sanitize_shallow (c); } |
| 405 | |
| 406 | bool subset (hb_subset_context_t *c) const |
| 407 | { |
| 408 | TRACE_SUBSET (this); |
| 409 | |
| 410 | gvar *out = c->serializer->allocate_min<gvar> (); |
| 411 | if (unlikely (!out)) return_trace (false); |
| 412 | |
| 413 | out->version.major = 1; |
| 414 | out->version.minor = 0; |
| 415 | out->axisCount = axisCount; |
| 416 | out->sharedTupleCount = sharedTupleCount; |
| 417 | |
| 418 | unsigned int num_glyphs = c->plan->num_output_glyphs (); |
| 419 | out->glyphCount = num_glyphs; |
| 420 | |
| 421 | unsigned int subset_data_size = 0; |
| 422 | for (hb_codepoint_t gid = 0; gid < num_glyphs; gid++) |
| 423 | { |
| 424 | hb_codepoint_t old_gid; |
| 425 | if (!c->plan->old_gid_for_new_gid (gid, &old_gid)) continue; |
| 426 | subset_data_size += get_glyph_var_data_bytes (c->source_blob, old_gid).length; |
| 427 | } |
| 428 | |
| 429 | bool long_offset = subset_data_size & ~0xFFFFu; |
| 430 | out->flags = long_offset ? 1 : 0; |
| 431 | |
| 432 | HBUINT8 *subset_offsets = c->serializer->allocate_size<HBUINT8> ((long_offset ? 4 : 2) * (num_glyphs + 1)); |
| 433 | if (!subset_offsets) return_trace (false); |
| 434 | |
| 435 | /* shared tuples */ |
| 436 | if (!sharedTupleCount || !sharedTuples) |
| 437 | out->sharedTuples = 0; |
| 438 | else |
| 439 | { |
| 440 | unsigned int shared_tuple_size = F2DOT14::static_size * axisCount * sharedTupleCount; |
| 441 | F2DOT14 *tuples = c->serializer->allocate_size<F2DOT14> (shared_tuple_size); |
| 442 | if (!tuples) return_trace (false); |
| 443 | out->sharedTuples = (char *) tuples - (char *) out; |
| 444 | memcpy (tuples, this+sharedTuples, shared_tuple_size); |
| 445 | } |
| 446 | |
| 447 | char *subset_data = c->serializer->allocate_size<char> (subset_data_size); |
| 448 | if (!subset_data) return_trace (false); |
| 449 | out->dataZ = subset_data - (char *) out; |
| 450 | |
| 451 | unsigned int glyph_offset = 0; |
| 452 | for (hb_codepoint_t gid = 0; gid < num_glyphs; gid++) |
| 453 | { |
| 454 | hb_codepoint_t old_gid; |
| 455 | hb_bytes_t var_data_bytes = c->plan->old_gid_for_new_gid (gid, &old_gid) |
| 456 | ? get_glyph_var_data_bytes (c->source_blob, old_gid) |
| 457 | : hb_bytes_t (); |
| 458 | |
| 459 | if (long_offset) |
| 460 | ((HBUINT32 *) subset_offsets)[gid] = glyph_offset; |
| 461 | else |
| 462 | ((HBUINT16 *) subset_offsets)[gid] = glyph_offset / 2; |
| 463 | |
| 464 | if (var_data_bytes.length > 0) |
| 465 | memcpy (subset_data, var_data_bytes.arrayZ, var_data_bytes.length); |
| 466 | subset_data += var_data_bytes.length; |
| 467 | glyph_offset += var_data_bytes.length; |
| 468 | } |
| 469 | if (long_offset) |
| 470 | ((HBUINT32 *) subset_offsets)[num_glyphs] = glyph_offset; |
| 471 | else |
| 472 | ((HBUINT16 *) subset_offsets)[num_glyphs] = glyph_offset / 2; |
| 473 | |
| 474 | return_trace (true); |
| 475 | } |
| 476 | |
| 477 | protected: |
| 478 | const hb_bytes_t get_glyph_var_data_bytes (hb_blob_t *blob, hb_codepoint_t glyph) const |
| 479 | { |
| 480 | unsigned start_offset = get_offset (glyph); |
| 481 | unsigned length = get_offset (glyph+1) - start_offset; |
| 482 | hb_bytes_t var_data = blob->as_bytes ().sub_array (((unsigned) dataZ) + start_offset, length); |
| 483 | return likely (var_data.length >= GlyphVariationData::min_size) ? var_data : hb_bytes_t (); |
| 484 | } |
| 485 | |
| 486 | bool is_long_offset () const { return (flags & 1) != 0; } |
| 487 | |
| 488 | unsigned int get_offset (unsigned int i) const |
| 489 | { |
| 490 | if (is_long_offset ()) |
| 491 | return get_long_offset_array ()[i]; |
| 492 | else |
| 493 | return get_short_offset_array ()[i] * 2; |
| 494 | } |
| 495 | |
| 496 | const HBUINT32 * get_long_offset_array () const { return (const HBUINT32 *) &offsetZ; } |
| 497 | const HBUINT16 *get_short_offset_array () const { return (const HBUINT16 *) &offsetZ; } |
| 498 | |
| 499 | public: |
| 500 | struct accelerator_t |
| 501 | { |
| 502 | void init (hb_face_t *face) |
| 503 | { table = hb_sanitize_context_t ().reference_table<gvar> (face); } |
| 504 | void fini () { table.destroy (); } |
| 505 | |
| 506 | private: |
| 507 | struct x_getter { static float get (const contour_point_t &p) { return p.x; } }; |
| 508 | struct y_getter { static float get (const contour_point_t &p) { return p.y; } }; |
| 509 | |
| 510 | template <typename T> |
| 511 | static float infer_delta (const hb_array_t<contour_point_t> points, |
| 512 | const hb_array_t<contour_point_t> deltas, |
| 513 | unsigned int target, unsigned int prev, unsigned int next) |
| 514 | { |
| 515 | float target_val = T::get (points[target]); |
| 516 | float prev_val = T::get (points[prev]); |
| 517 | float next_val = T::get (points[next]); |
| 518 | float prev_delta = T::get (deltas[prev]); |
| 519 | float next_delta = T::get (deltas[next]); |
| 520 | |
| 521 | if (prev_val == next_val) |
| 522 | return (prev_delta == next_delta) ? prev_delta : 0.f; |
| 523 | else if (target_val <= hb_min (prev_val, next_val)) |
| 524 | return (prev_val < next_val) ? prev_delta : next_delta; |
| 525 | else if (target_val >= hb_max (prev_val, next_val)) |
| 526 | return (prev_val > next_val) ? prev_delta : next_delta; |
| 527 | |
| 528 | /* linear interpolation */ |
| 529 | float r = (target_val - prev_val) / (next_val - prev_val); |
| 530 | return (1.f - r) * prev_delta + r * next_delta; |
| 531 | } |
| 532 | |
| 533 | static unsigned int next_index (unsigned int i, unsigned int start, unsigned int end) |
| 534 | { return (i >= end) ? start : (i + 1); } |
| 535 | |
| 536 | public: |
| 537 | bool apply_deltas_to_points (hb_codepoint_t glyph, hb_font_t *font, |
| 538 | const hb_array_t<contour_point_t> points) const |
| 539 | { |
| 540 | /* num_coords should exactly match gvar's axisCount due to how GlyphVariationData tuples are aligned */ |
| 541 | if (!font->num_coords || font->num_coords != table->axisCount) return true; |
| 542 | |
| 543 | hb_bytes_t var_data_bytes = table->get_glyph_var_data_bytes (table.get_blob (), glyph); |
| 544 | if (!var_data_bytes.as<GlyphVariationData> ()->has_data ()) return true; |
| 545 | hb_vector_t<unsigned int> shared_indices; |
| 546 | GlyphVariationData::tuple_iterator_t iterator; |
| 547 | if (!GlyphVariationData::get_tuple_iterator (var_data_bytes, table->axisCount, |
| 548 | shared_indices, &iterator)) |
| 549 | return true; /* so isn't applied at all */ |
| 550 | |
| 551 | /* Save original points for inferred delta calculation */ |
| 552 | contour_point_vector_t orig_points; |
| 553 | orig_points.resize (points.length); |
| 554 | for (unsigned int i = 0; i < orig_points.length; i++) |
| 555 | orig_points[i] = points[i]; |
| 556 | |
| 557 | contour_point_vector_t deltas; /* flag is used to indicate referenced point */ |
| 558 | deltas.resize (points.length); |
| 559 | |
| 560 | hb_vector_t<unsigned> end_points; |
| 561 | for (unsigned i = 0; i < points.length; ++i) |
| 562 | if (points[i].is_end_point) |
| 563 | end_points.push (i); |
| 564 | |
| 565 | int *coords = font->coords; |
| 566 | unsigned num_coords = font->num_coords; |
| 567 | hb_array_t<const F2DOT14> shared_tuples = (table+table->sharedTuples).as_array (table->sharedTupleCount * table->axisCount); |
| 568 | do |
| 569 | { |
| 570 | float scalar = iterator.current_tuple->calculate_scalar (coords, num_coords, shared_tuples); |
| 571 | if (scalar == 0.f) continue; |
| 572 | const HBUINT8 *p = iterator.get_serialized_data (); |
| 573 | unsigned int length = iterator.current_tuple->get_data_size (); |
| 574 | if (unlikely (!iterator.var_data_bytes.check_range (p, length))) |
| 575 | return false; |
| 576 | |
| 577 | hb_bytes_t bytes ((const char *) p, length); |
| 578 | hb_vector_t<unsigned int> private_indices; |
| 579 | if (iterator.current_tuple->has_private_points () && |
| 580 | !GlyphVariationData::unpack_points (p, private_indices, bytes)) |
| 581 | return false; |
| 582 | const hb_array_t<unsigned int> &indices = private_indices.length ? private_indices : shared_indices; |
| 583 | |
| 584 | bool apply_to_all = (indices.length == 0); |
| 585 | unsigned int num_deltas = apply_to_all ? points.length : indices.length; |
| 586 | hb_vector_t<int> x_deltas; |
| 587 | x_deltas.resize (num_deltas); |
| 588 | if (!GlyphVariationData::unpack_deltas (p, x_deltas, bytes)) |
| 589 | return false; |
| 590 | hb_vector_t<int> y_deltas; |
| 591 | y_deltas.resize (num_deltas); |
| 592 | if (!GlyphVariationData::unpack_deltas (p, y_deltas, bytes)) |
| 593 | return false; |
| 594 | |
| 595 | for (unsigned int i = 0; i < deltas.length; i++) |
| 596 | deltas[i].init (); |
| 597 | for (unsigned int i = 0; i < num_deltas; i++) |
| 598 | { |
| 599 | unsigned int pt_index = apply_to_all ? i : indices[i]; |
| 600 | deltas[pt_index].flag = 1; /* this point is referenced, i.e., explicit deltas specified */ |
| 601 | deltas[pt_index].x += x_deltas[i] * scalar; |
| 602 | deltas[pt_index].y += y_deltas[i] * scalar; |
| 603 | } |
| 604 | |
| 605 | /* infer deltas for unreferenced points */ |
| 606 | unsigned start_point = 0; |
| 607 | for (unsigned c = 0; c < end_points.length; c++) |
| 608 | { |
| 609 | unsigned end_point = end_points[c]; |
| 610 | |
| 611 | /* Check the number of unreferenced points in a contour. If no unref points or no ref points, nothing to do. */ |
| 612 | unsigned unref_count = 0; |
| 613 | for (unsigned i = start_point; i <= end_point; i++) |
| 614 | if (!deltas[i].flag) unref_count++; |
| 615 | |
| 616 | unsigned j = start_point; |
| 617 | if (unref_count == 0 || unref_count > end_point - start_point) |
| 618 | goto no_more_gaps; |
| 619 | |
| 620 | for (;;) |
| 621 | { |
| 622 | /* Locate the next gap of unreferenced points between two referenced points prev and next. |
| 623 | * Note that a gap may wrap around at left (start_point) and/or at right (end_point). |
| 624 | */ |
| 625 | unsigned int prev, next, i; |
| 626 | for (;;) |
| 627 | { |
| 628 | i = j; |
| 629 | j = next_index (i, start_point, end_point); |
| 630 | if (deltas[i].flag && !deltas[j].flag) break; |
| 631 | } |
| 632 | prev = j = i; |
| 633 | for (;;) |
| 634 | { |
| 635 | i = j; |
| 636 | j = next_index (i, start_point, end_point); |
| 637 | if (!deltas[i].flag && deltas[j].flag) break; |
| 638 | } |
| 639 | next = j; |
| 640 | /* Infer deltas for all unref points in the gap between prev and next */ |
| 641 | i = prev; |
| 642 | for (;;) |
| 643 | { |
| 644 | i = next_index (i, start_point, end_point); |
| 645 | if (i == next) break; |
| 646 | deltas[i].x = infer_delta<x_getter> (orig_points.as_array (), deltas.as_array (), i, prev, next); |
| 647 | deltas[i].y = infer_delta<y_getter> (orig_points.as_array (), deltas.as_array (), i, prev, next); |
| 648 | if (--unref_count == 0) goto no_more_gaps; |
| 649 | } |
| 650 | } |
| 651 | no_more_gaps: |
| 652 | start_point = end_point + 1; |
| 653 | } |
| 654 | |
| 655 | /* apply specified / inferred deltas to points */ |
| 656 | for (unsigned int i = 0; i < points.length; i++) |
| 657 | { |
| 658 | points[i].x += (float) roundf (deltas[i].x); |
| 659 | points[i].y += (float) roundf (deltas[i].y); |
| 660 | } |
| 661 | } while (iterator.move_to_next ()); |
| 662 | |
| 663 | return true; |
| 664 | } |
| 665 | |
| 666 | unsigned int get_axis_count () const { return table->axisCount; } |
| 667 | |
| 668 | private: |
| 669 | hb_blob_ptr_t<gvar> table; |
| 670 | }; |
| 671 | |
| 672 | protected: |
| 673 | FixedVersion<>version; /* Version number of the glyph variations table |
| 674 | * Set to 0x00010000u. */ |
| 675 | HBUINT16 axisCount; /* The number of variation axes for this font. This must be |
| 676 | * the same number as axisCount in the 'fvar' table. */ |
| 677 | HBUINT16 sharedTupleCount; |
| 678 | /* The number of shared tuple records. Shared tuple records |
| 679 | * can be referenced within glyph variation data tables for |
| 680 | * multiple glyphs, as opposed to other tuple records stored |
| 681 | * directly within a glyph variation data table. */ |
| 682 | LNNOffsetTo<UnsizedArrayOf<F2DOT14>> |
| 683 | sharedTuples; /* Offset from the start of this table to the shared tuple records. |
| 684 | * Array of tuple records shared across all glyph variation data tables. */ |
| 685 | HBUINT16 glyphCount; /* The number of glyphs in this font. This must match the number of |
| 686 | * glyphs stored elsewhere in the font. */ |
| 687 | HBUINT16 flags; /* Bit-field that gives the format of the offset array that follows. |
| 688 | * If bit 0 is clear, the offsets are uint16; if bit 0 is set, the |
| 689 | * offsets are uint32. */ |
| 690 | LOffsetTo<GlyphVariationData> |
| 691 | dataZ; /* Offset from the start of this table to the array of |
| 692 | * GlyphVariationData tables. */ |
| 693 | UnsizedArrayOf<HBUINT8> |
| 694 | offsetZ; /* Offsets from the start of the GlyphVariationData array |
| 695 | * to each GlyphVariationData table. */ |
| 696 | public: |
| 697 | DEFINE_SIZE_MIN (20); |
| 698 | }; |
| 699 | |
| 700 | struct gvar_accelerator_t : gvar::accelerator_t {}; |
| 701 | |
| 702 | } /* namespace OT */ |
| 703 | |
| 704 | #endif /* HB_OT_VAR_GVAR_TABLE_HH */ |
| 705 | |