1 | /* |
2 | * Copyright © 2022 Google, Inc. |
3 | * |
4 | * This is part of HarfBuzz, a text shaping library. |
5 | * |
6 | * Permission is hereby granted, without written agreement and without |
7 | * license or royalty fees, to use, copy, modify, and distribute this |
8 | * software and its documentation for any purpose, provided that the |
9 | * above copyright notice and the following two paragraphs appear in |
10 | * all copies of this software. |
11 | * |
12 | * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
13 | * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
14 | * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
15 | * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
16 | * DAMAGE. |
17 | * |
18 | * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
19 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
20 | * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
21 | * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
22 | * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
23 | * |
24 | * Google Author(s): Garret Rieger |
25 | */ |
26 | |
27 | #ifndef GRAPH_PAIRPOS_GRAPH_HH |
28 | #define GRAPH_PAIRPOS_GRAPH_HH |
29 | |
30 | #include "split-helpers.hh" |
31 | #include "coverage-graph.hh" |
32 | #include "classdef-graph.hh" |
33 | #include "../OT/Layout/GPOS/PairPos.hh" |
34 | #include "../OT/Layout/GPOS/PosLookupSubTable.hh" |
35 | |
36 | namespace graph { |
37 | |
38 | struct PairPosFormat1 : public OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes> |
39 | { |
40 | bool sanitize (graph_t::vertex_t& vertex) const |
41 | { |
42 | int64_t vertex_len = vertex.obj.tail - vertex.obj.head; |
43 | unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size; |
44 | if (vertex_len < min_size) return false; |
45 | |
46 | return vertex_len >= |
47 | min_size + pairSet.get_size () - pairSet.len.get_size(); |
48 | } |
49 | |
50 | hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c, |
51 | unsigned parent_index, |
52 | unsigned this_index) |
53 | { |
54 | hb_set_t visited; |
55 | |
56 | const unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage); |
57 | const unsigned coverage_size = c.graph.vertices_[coverage_id].table_size (); |
58 | const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size; |
59 | |
60 | unsigned partial_coverage_size = 4; |
61 | unsigned accumulated = base_size; |
62 | hb_vector_t<unsigned> split_points; |
63 | for (unsigned i = 0; i < pairSet.len; i++) |
64 | { |
65 | unsigned pair_set_index = pair_set_graph_index (c, this_index, i); |
66 | unsigned accumulated_delta = |
67 | c.graph.find_subgraph_size (pair_set_index, visited) + |
68 | SmallTypes::size; // for PairSet offset. |
69 | partial_coverage_size += OT::HBUINT16::static_size; |
70 | |
71 | accumulated += accumulated_delta; |
72 | unsigned total = accumulated + hb_min (partial_coverage_size, coverage_size); |
73 | |
74 | if (total >= (1 << 16)) |
75 | { |
76 | split_points.push (i); |
77 | accumulated = base_size + accumulated_delta; |
78 | partial_coverage_size = 6; |
79 | visited.clear (); // node sharing isn't allowed between splits. |
80 | } |
81 | } |
82 | |
83 | split_context_t split_context { |
84 | c, |
85 | this, |
86 | c.graph.duplicate_if_shared (parent_index, this_index), |
87 | }; |
88 | |
89 | return actuate_subtable_split<split_context_t> (split_context, split_points); |
90 | } |
91 | |
92 | private: |
93 | |
94 | struct split_context_t { |
95 | gsubgpos_graph_context_t& c; |
96 | PairPosFormat1* thiz; |
97 | unsigned this_index; |
98 | |
99 | unsigned original_count () |
100 | { |
101 | return thiz->pairSet.len; |
102 | } |
103 | |
104 | unsigned clone_range (unsigned start, unsigned end) |
105 | { |
106 | return thiz->clone_range (this->c, this->this_index, start, end); |
107 | } |
108 | |
109 | bool shrink (unsigned count) |
110 | { |
111 | return thiz->shrink (this->c, this->this_index, count); |
112 | } |
113 | }; |
114 | |
115 | bool shrink (gsubgpos_graph_context_t& c, |
116 | unsigned this_index, |
117 | unsigned count) |
118 | { |
119 | DEBUG_MSG (SUBSET_REPACK, nullptr, |
120 | " Shrinking PairPosFormat1 (%u) to [0, %u)." , |
121 | this_index, |
122 | count); |
123 | unsigned old_count = pairSet.len; |
124 | if (count >= old_count) |
125 | return true; |
126 | |
127 | pairSet.len = count; |
128 | c.graph.vertices_[this_index].obj.tail -= (old_count - count) * SmallTypes::size; |
129 | |
130 | auto coverage = c.graph.as_mutable_table<Coverage> (this_index, &this->coverage); |
131 | if (!coverage) return false; |
132 | |
133 | unsigned coverage_size = coverage.vertex->table_size (); |
134 | auto new_coverage = |
135 | + hb_zip (coverage.table->iter (), hb_range ()) |
136 | | hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) { |
137 | return p.second < count; |
138 | }) |
139 | | hb_map_retains_sorting (hb_first) |
140 | ; |
141 | |
142 | return Coverage::make_coverage (c, new_coverage, coverage.index, coverage_size); |
143 | } |
144 | |
145 | // Create a new PairPos including PairSet's from start (inclusive) to end (exclusive). |
146 | // Returns object id of the new object. |
147 | unsigned clone_range (gsubgpos_graph_context_t& c, |
148 | unsigned this_index, |
149 | unsigned start, unsigned end) const |
150 | { |
151 | DEBUG_MSG (SUBSET_REPACK, nullptr, |
152 | " Cloning PairPosFormat1 (%u) range [%u, %u)." , this_index, start, end); |
153 | |
154 | unsigned num_pair_sets = end - start; |
155 | unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat1_3<SmallTypes>::min_size |
156 | + num_pair_sets * SmallTypes::size; |
157 | |
158 | unsigned pair_pos_prime_id = c.create_node (prime_size); |
159 | if (pair_pos_prime_id == (unsigned) -1) return -1; |
160 | |
161 | PairPosFormat1* pair_pos_prime = (PairPosFormat1*) c.graph.object (pair_pos_prime_id).head; |
162 | pair_pos_prime->format = this->format; |
163 | pair_pos_prime->valueFormat[0] = this->valueFormat[0]; |
164 | pair_pos_prime->valueFormat[1] = this->valueFormat[1]; |
165 | pair_pos_prime->pairSet.len = num_pair_sets; |
166 | |
167 | for (unsigned i = start; i < end; i++) |
168 | { |
169 | c.graph.move_child<> (this_index, |
170 | &pairSet[i], |
171 | pair_pos_prime_id, |
172 | &pair_pos_prime->pairSet[i - start]); |
173 | } |
174 | |
175 | unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage); |
176 | if (!Coverage::clone_coverage (c, |
177 | coverage_id, |
178 | pair_pos_prime_id, |
179 | 2, |
180 | start, end)) |
181 | return -1; |
182 | |
183 | return pair_pos_prime_id; |
184 | } |
185 | |
186 | |
187 | |
188 | unsigned pair_set_graph_index (gsubgpos_graph_context_t& c, unsigned this_index, unsigned i) const |
189 | { |
190 | return c.graph.index_for_offset (this_index, &pairSet[i]); |
191 | } |
192 | }; |
193 | |
194 | struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes> |
195 | { |
196 | bool sanitize (graph_t::vertex_t& vertex) const |
197 | { |
198 | size_t vertex_len = vertex.table_size (); |
199 | unsigned min_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size; |
200 | if (vertex_len < min_size) return false; |
201 | |
202 | const unsigned class1_count = class1Count; |
203 | return vertex_len >= |
204 | min_size + class1_count * get_class1_record_size (); |
205 | } |
206 | |
207 | hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c, |
208 | unsigned parent_index, |
209 | unsigned this_index) |
210 | { |
211 | const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size; |
212 | const unsigned class_def_2_size = size_of (c, this_index, &classDef2); |
213 | const Coverage* coverage = get_coverage (c, this_index); |
214 | const ClassDef* class_def_1 = get_class_def_1 (c, this_index); |
215 | auto gid_and_class = |
216 | + coverage->iter () |
217 | | hb_map_retains_sorting ([&] (hb_codepoint_t gid) { |
218 | return hb_codepoint_pair_t (gid, class_def_1->get_class (gid)); |
219 | }) |
220 | ; |
221 | class_def_size_estimator_t estimator (gid_and_class); |
222 | |
223 | const unsigned class1_count = class1Count; |
224 | const unsigned class2_count = class2Count; |
225 | const unsigned class1_record_size = get_class1_record_size (); |
226 | |
227 | const unsigned value_1_len = valueFormat1.get_len (); |
228 | const unsigned value_2_len = valueFormat2.get_len (); |
229 | const unsigned total_value_len = value_1_len + value_2_len; |
230 | |
231 | unsigned accumulated = base_size; |
232 | unsigned coverage_size = 4; |
233 | unsigned class_def_1_size = 4; |
234 | unsigned max_coverage_size = coverage_size; |
235 | unsigned max_class_def_1_size = class_def_1_size; |
236 | |
237 | hb_vector_t<unsigned> split_points; |
238 | |
239 | hb_hashmap_t<unsigned, unsigned> device_tables = get_all_device_tables (c, this_index); |
240 | hb_vector_t<unsigned> format1_device_table_indices = valueFormat1.get_device_table_indices (); |
241 | hb_vector_t<unsigned> format2_device_table_indices = valueFormat2.get_device_table_indices (); |
242 | bool has_device_tables = bool(format1_device_table_indices) || bool(format2_device_table_indices); |
243 | |
244 | hb_set_t visited; |
245 | for (unsigned i = 0; i < class1_count; i++) |
246 | { |
247 | unsigned accumulated_delta = class1_record_size; |
248 | coverage_size += estimator.incremental_coverage_size (i); |
249 | class_def_1_size += estimator.incremental_class_def_size (i); |
250 | max_coverage_size = hb_max (max_coverage_size, coverage_size); |
251 | max_class_def_1_size = hb_max (max_class_def_1_size, class_def_1_size); |
252 | |
253 | if (has_device_tables) { |
254 | for (unsigned j = 0; j < class2_count; j++) |
255 | { |
256 | unsigned value1_index = total_value_len * (class2_count * i + j); |
257 | unsigned value2_index = value1_index + value_1_len; |
258 | accumulated_delta += size_of_value_record_children (c, |
259 | device_tables, |
260 | format1_device_table_indices, |
261 | value1_index, |
262 | visited); |
263 | accumulated_delta += size_of_value_record_children (c, |
264 | device_tables, |
265 | format2_device_table_indices, |
266 | value2_index, |
267 | visited); |
268 | } |
269 | } |
270 | |
271 | accumulated += accumulated_delta; |
272 | unsigned total = accumulated |
273 | + coverage_size + class_def_1_size + class_def_2_size |
274 | // The largest object will pack last and can exceed the size limit. |
275 | - hb_max (hb_max (coverage_size, class_def_1_size), class_def_2_size); |
276 | if (total >= (1 << 16)) |
277 | { |
278 | split_points.push (i); |
279 | // split does not include i, so add the size for i when we reset the size counters. |
280 | accumulated = base_size + accumulated_delta; |
281 | coverage_size = 4 + estimator.incremental_coverage_size (i); |
282 | class_def_1_size = 4 + estimator.incremental_class_def_size (i); |
283 | visited.clear (); // node sharing isn't allowed between splits. |
284 | } |
285 | } |
286 | |
287 | split_context_t split_context { |
288 | c, |
289 | this, |
290 | c.graph.duplicate_if_shared (parent_index, this_index), |
291 | class1_record_size, |
292 | total_value_len, |
293 | value_1_len, |
294 | value_2_len, |
295 | max_coverage_size, |
296 | max_class_def_1_size, |
297 | device_tables, |
298 | format1_device_table_indices, |
299 | format2_device_table_indices |
300 | }; |
301 | |
302 | return actuate_subtable_split<split_context_t> (split_context, split_points); |
303 | } |
304 | private: |
305 | |
306 | struct split_context_t |
307 | { |
308 | gsubgpos_graph_context_t& c; |
309 | PairPosFormat2* thiz; |
310 | unsigned this_index; |
311 | unsigned class1_record_size; |
312 | unsigned value_record_len; |
313 | unsigned value1_record_len; |
314 | unsigned value2_record_len; |
315 | unsigned max_coverage_size; |
316 | unsigned max_class_def_size; |
317 | |
318 | const hb_hashmap_t<unsigned, unsigned>& device_tables; |
319 | const hb_vector_t<unsigned>& format1_device_table_indices; |
320 | const hb_vector_t<unsigned>& format2_device_table_indices; |
321 | |
322 | unsigned original_count () |
323 | { |
324 | return thiz->class1Count; |
325 | } |
326 | |
327 | unsigned clone_range (unsigned start, unsigned end) |
328 | { |
329 | return thiz->clone_range (*this, start, end); |
330 | } |
331 | |
332 | bool shrink (unsigned count) |
333 | { |
334 | return thiz->shrink (*this, count); |
335 | } |
336 | }; |
337 | |
338 | size_t get_class1_record_size () const |
339 | { |
340 | const size_t class2_count = class2Count; |
341 | return |
342 | class2_count * (valueFormat1.get_size () + valueFormat2.get_size ()); |
343 | } |
344 | |
345 | unsigned clone_range (split_context_t& split_context, |
346 | unsigned start, unsigned end) const |
347 | { |
348 | DEBUG_MSG (SUBSET_REPACK, nullptr, |
349 | " Cloning PairPosFormat2 (%u) range [%u, %u)." , split_context.this_index, start, end); |
350 | |
351 | graph_t& graph = split_context.c.graph; |
352 | |
353 | unsigned num_records = end - start; |
354 | unsigned prime_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size |
355 | + num_records * split_context.class1_record_size; |
356 | |
357 | unsigned pair_pos_prime_id = split_context.c.create_node (prime_size); |
358 | if (pair_pos_prime_id == (unsigned) -1) return -1; |
359 | |
360 | PairPosFormat2* pair_pos_prime = |
361 | (PairPosFormat2*) graph.object (pair_pos_prime_id).head; |
362 | pair_pos_prime->format = this->format; |
363 | pair_pos_prime->valueFormat1 = this->valueFormat1; |
364 | pair_pos_prime->valueFormat2 = this->valueFormat2; |
365 | pair_pos_prime->class1Count = num_records; |
366 | pair_pos_prime->class2Count = this->class2Count; |
367 | clone_class1_records (split_context, |
368 | pair_pos_prime_id, |
369 | start, |
370 | end); |
371 | |
372 | unsigned coverage_id = |
373 | graph.index_for_offset (split_context.this_index, &coverage); |
374 | unsigned class_def_1_id = |
375 | graph.index_for_offset (split_context.this_index, &classDef1); |
376 | auto& coverage_v = graph.vertices_[coverage_id]; |
377 | auto& class_def_1_v = graph.vertices_[class_def_1_id]; |
378 | Coverage* coverage_table = (Coverage*) coverage_v.obj.head; |
379 | ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head; |
380 | if (!coverage_table |
381 | || !coverage_table->sanitize (coverage_v) |
382 | || !class_def_1_table |
383 | || !class_def_1_table->sanitize (class_def_1_v)) |
384 | return -1; |
385 | |
386 | auto klass_map = |
387 | + coverage_table->iter () |
388 | | hb_map_retains_sorting ([&] (hb_codepoint_t gid) { |
389 | return hb_codepoint_pair_t (gid, class_def_1_table->get_class (gid)); |
390 | }) |
391 | | hb_filter ([&] (hb_codepoint_t klass) { |
392 | return klass >= start && klass < end; |
393 | }, hb_second) |
394 | | hb_map_retains_sorting ([&] (hb_codepoint_pair_t gid_and_class) { |
395 | // Classes must be from 0...N so subtract start |
396 | return hb_codepoint_pair_t (gid_and_class.first, gid_and_class.second - start); |
397 | }) |
398 | ; |
399 | |
400 | if (!Coverage::add_coverage (split_context.c, |
401 | pair_pos_prime_id, |
402 | 2, |
403 | + klass_map | hb_map_retains_sorting (hb_first), |
404 | split_context.max_coverage_size)) |
405 | return -1; |
406 | |
407 | // classDef1 |
408 | if (!ClassDef::add_class_def (split_context.c, |
409 | pair_pos_prime_id, |
410 | 8, |
411 | + klass_map, |
412 | split_context.max_class_def_size)) |
413 | return -1; |
414 | |
415 | // classDef2 |
416 | unsigned class_def_2_id = |
417 | graph.index_for_offset (split_context.this_index, &classDef2); |
418 | auto* class_def_link = graph.vertices_[pair_pos_prime_id].obj.real_links.push (); |
419 | class_def_link->width = SmallTypes::size; |
420 | class_def_link->objidx = class_def_2_id; |
421 | class_def_link->position = 10; |
422 | graph.vertices_[class_def_2_id].add_parent (pair_pos_prime_id); |
423 | graph.duplicate (pair_pos_prime_id, class_def_2_id); |
424 | |
425 | return pair_pos_prime_id; |
426 | } |
427 | |
428 | void clone_class1_records (split_context_t& split_context, |
429 | unsigned pair_pos_prime_id, |
430 | unsigned start, unsigned end) const |
431 | { |
432 | PairPosFormat2* pair_pos_prime = |
433 | (PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head; |
434 | |
435 | char* start_addr = ((char*)&values[0]) + start * split_context.class1_record_size; |
436 | unsigned num_records = end - start; |
437 | hb_memcpy (&pair_pos_prime->values[0], |
438 | start_addr, |
439 | num_records * split_context.class1_record_size); |
440 | |
441 | if (!split_context.format1_device_table_indices |
442 | && !split_context.format2_device_table_indices) |
443 | // No device tables to move over. |
444 | return; |
445 | |
446 | unsigned class2_count = class2Count; |
447 | for (unsigned i = start; i < end; i++) |
448 | { |
449 | for (unsigned j = 0; j < class2_count; j++) |
450 | { |
451 | unsigned value1_index = split_context.value_record_len * (class2_count * i + j); |
452 | unsigned value2_index = value1_index + split_context.value1_record_len; |
453 | |
454 | unsigned new_value1_index = split_context.value_record_len * (class2_count * (i - start) + j); |
455 | unsigned new_value2_index = new_value1_index + split_context.value1_record_len; |
456 | |
457 | transfer_device_tables (split_context, |
458 | pair_pos_prime_id, |
459 | split_context.format1_device_table_indices, |
460 | value1_index, |
461 | new_value1_index); |
462 | |
463 | transfer_device_tables (split_context, |
464 | pair_pos_prime_id, |
465 | split_context.format2_device_table_indices, |
466 | value2_index, |
467 | new_value2_index); |
468 | } |
469 | } |
470 | } |
471 | |
472 | void transfer_device_tables (split_context_t& split_context, |
473 | unsigned pair_pos_prime_id, |
474 | const hb_vector_t<unsigned>& device_table_indices, |
475 | unsigned old_value_record_index, |
476 | unsigned new_value_record_index) const |
477 | { |
478 | PairPosFormat2* pair_pos_prime = |
479 | (PairPosFormat2*) split_context.c.graph.object (pair_pos_prime_id).head; |
480 | |
481 | for (unsigned i : device_table_indices) |
482 | { |
483 | OT::Offset16* record = (OT::Offset16*) &values[old_value_record_index + i]; |
484 | unsigned record_position = ((char*) record) - ((char*) this); |
485 | if (!split_context.device_tables.has (record_position)) continue; |
486 | |
487 | split_context.c.graph.move_child ( |
488 | split_context.this_index, |
489 | record, |
490 | pair_pos_prime_id, |
491 | (OT::Offset16*) &pair_pos_prime->values[new_value_record_index + i]); |
492 | } |
493 | } |
494 | |
495 | bool shrink (split_context_t& split_context, |
496 | unsigned count) |
497 | { |
498 | DEBUG_MSG (SUBSET_REPACK, nullptr, |
499 | " Shrinking PairPosFormat2 (%u) to [0, %u)." , |
500 | split_context.this_index, |
501 | count); |
502 | unsigned old_count = class1Count; |
503 | if (count >= old_count) |
504 | return true; |
505 | |
506 | graph_t& graph = split_context.c.graph; |
507 | class1Count = count; |
508 | graph.vertices_[split_context.this_index].obj.tail -= |
509 | (old_count - count) * split_context.class1_record_size; |
510 | |
511 | auto coverage = |
512 | graph.as_mutable_table<Coverage> (split_context.this_index, &this->coverage); |
513 | if (!coverage) return false; |
514 | |
515 | auto class_def_1 = |
516 | graph.as_mutable_table<ClassDef> (split_context.this_index, &classDef1); |
517 | if (!class_def_1) return false; |
518 | |
519 | auto klass_map = |
520 | + coverage.table->iter () |
521 | | hb_map_retains_sorting ([&] (hb_codepoint_t gid) { |
522 | return hb_codepoint_pair_t (gid, class_def_1.table->get_class (gid)); |
523 | }) |
524 | | hb_filter ([&] (hb_codepoint_t klass) { |
525 | return klass < count; |
526 | }, hb_second) |
527 | ; |
528 | |
529 | auto new_coverage = + klass_map | hb_map_retains_sorting (hb_first); |
530 | if (!Coverage::make_coverage (split_context.c, |
531 | + new_coverage, |
532 | coverage.index, |
533 | // existing ranges my not be kept, worst case size is a format 1 |
534 | // coverage table. |
535 | 4 + new_coverage.len() * 2)) |
536 | return false; |
537 | |
538 | return ClassDef::make_class_def (split_context.c, |
539 | + klass_map, |
540 | class_def_1.index, |
541 | class_def_1.vertex->table_size ()); |
542 | } |
543 | |
544 | hb_hashmap_t<unsigned, unsigned> |
545 | get_all_device_tables (gsubgpos_graph_context_t& c, |
546 | unsigned this_index) const |
547 | { |
548 | const auto& v = c.graph.vertices_[this_index]; |
549 | return v.position_to_index_map (); |
550 | } |
551 | |
552 | const Coverage* get_coverage (gsubgpos_graph_context_t& c, |
553 | unsigned this_index) const |
554 | { |
555 | unsigned coverage_id = c.graph.index_for_offset (this_index, &coverage); |
556 | auto& coverage_v = c.graph.vertices_[coverage_id]; |
557 | |
558 | Coverage* coverage_table = (Coverage*) coverage_v.obj.head; |
559 | if (!coverage_table || !coverage_table->sanitize (coverage_v)) |
560 | return &Null(Coverage); |
561 | return coverage_table; |
562 | } |
563 | |
564 | const ClassDef* get_class_def_1 (gsubgpos_graph_context_t& c, |
565 | unsigned this_index) const |
566 | { |
567 | unsigned class_def_1_id = c.graph.index_for_offset (this_index, &classDef1); |
568 | auto& class_def_1_v = c.graph.vertices_[class_def_1_id]; |
569 | |
570 | ClassDef* class_def_1_table = (ClassDef*) class_def_1_v.obj.head; |
571 | if (!class_def_1_table || !class_def_1_table->sanitize (class_def_1_v)) |
572 | return &Null(ClassDef); |
573 | return class_def_1_table; |
574 | } |
575 | |
576 | unsigned size_of_value_record_children (gsubgpos_graph_context_t& c, |
577 | const hb_hashmap_t<unsigned, unsigned>& device_tables, |
578 | const hb_vector_t<unsigned> device_table_indices, |
579 | unsigned value_record_index, |
580 | hb_set_t& visited) |
581 | { |
582 | unsigned size = 0; |
583 | for (unsigned i : device_table_indices) |
584 | { |
585 | OT::Layout::GPOS_impl::Value* record = &values[value_record_index + i]; |
586 | unsigned record_position = ((char*) record) - ((char*) this); |
587 | unsigned* obj_idx; |
588 | if (!device_tables.has (record_position, &obj_idx)) continue; |
589 | size += c.graph.find_subgraph_size (*obj_idx, visited); |
590 | } |
591 | return size; |
592 | } |
593 | |
594 | unsigned size_of (gsubgpos_graph_context_t& c, |
595 | unsigned this_index, |
596 | const void* offset) const |
597 | { |
598 | const unsigned id = c.graph.index_for_offset (this_index, offset); |
599 | return c.graph.vertices_[id].table_size (); |
600 | } |
601 | }; |
602 | |
603 | struct PairPos : public OT::Layout::GPOS_impl::PairPos |
604 | { |
605 | hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c, |
606 | unsigned parent_index, |
607 | unsigned this_index) |
608 | { |
609 | switch (u.format) { |
610 | case 1: |
611 | return ((PairPosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index); |
612 | case 2: |
613 | return ((PairPosFormat2*)(&u.format2))->split_subtables (c, parent_index, this_index); |
614 | #ifndef HB_NO_BEYOND_64K |
615 | case 3: HB_FALLTHROUGH; |
616 | case 4: HB_FALLTHROUGH; |
617 | // Don't split 24bit PairPos's. |
618 | #endif |
619 | default: |
620 | return hb_vector_t<unsigned> (); |
621 | } |
622 | } |
623 | |
624 | bool sanitize (graph_t::vertex_t& vertex) const |
625 | { |
626 | int64_t vertex_len = vertex.obj.tail - vertex.obj.head; |
627 | if (vertex_len < u.format.get_size ()) return false; |
628 | |
629 | switch (u.format) { |
630 | case 1: |
631 | return ((PairPosFormat1*)(&u.format1))->sanitize (vertex); |
632 | case 2: |
633 | return ((PairPosFormat2*)(&u.format2))->sanitize (vertex); |
634 | #ifndef HB_NO_BEYOND_64K |
635 | case 3: HB_FALLTHROUGH; |
636 | case 4: HB_FALLTHROUGH; |
637 | #endif |
638 | default: |
639 | // We don't handle format 3 and 4 here. |
640 | return false; |
641 | } |
642 | } |
643 | }; |
644 | |
645 | } |
646 | |
647 | #endif // GRAPH_PAIRPOS_GRAPH_HH |
648 | |