1 | /* |
2 | * Copyright © 2018 Ebrahim Byagowi |
3 | * Copyright © 2020 Google, Inc. |
4 | * |
5 | * This is part of HarfBuzz, a text shaping library. |
6 | * |
7 | * Permission is hereby granted, without written agreement and without |
8 | * license or royalty fees, to use, copy, modify, and distribute this |
9 | * software and its documentation for any purpose, provided that the |
10 | * above copyright notice and the following two paragraphs appear in |
11 | * all copies of this software. |
12 | * |
13 | * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
14 | * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
15 | * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
16 | * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
17 | * DAMAGE. |
18 | * |
19 | * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
20 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
21 | * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
22 | * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
23 | * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
24 | * |
25 | * Google Author(s): Calder Kitagawa |
26 | */ |
27 | |
28 | #ifndef HB_OT_COLOR_COLR_TABLE_HH |
29 | #define HB_OT_COLOR_COLR_TABLE_HH |
30 | |
31 | #include "hb-open-type.hh" |
32 | |
33 | /* |
34 | * COLR -- Color |
35 | * https://docs.microsoft.com/en-us/typography/opentype/spec/colr |
36 | */ |
37 | #define HB_OT_TAG_COLR HB_TAG('C','O','L','R') |
38 | |
39 | |
40 | namespace OT { |
41 | |
42 | |
43 | struct LayerRecord |
44 | { |
45 | operator hb_ot_color_layer_t () const { return {glyphId, colorIdx}; } |
46 | |
47 | bool sanitize (hb_sanitize_context_t *c) const |
48 | { |
49 | TRACE_SANITIZE (this); |
50 | return_trace (c->check_struct (this)); |
51 | } |
52 | |
53 | public: |
54 | HBGlyphID glyphId; /* Glyph ID of layer glyph */ |
55 | Index colorIdx; /* Index value to use with a |
56 | * selected color palette. |
57 | * An index value of 0xFFFF |
58 | * is a special case indicating |
59 | * that the text foreground |
60 | * color (defined by a |
61 | * higher-level client) should |
62 | * be used and shall not be |
63 | * treated as actual index |
64 | * into CPAL ColorRecord array. */ |
65 | public: |
66 | DEFINE_SIZE_STATIC (4); |
67 | }; |
68 | |
69 | struct BaseGlyphRecord |
70 | { |
71 | int cmp (hb_codepoint_t g) const |
72 | { return g < glyphId ? -1 : g > glyphId ? 1 : 0; } |
73 | |
74 | bool sanitize (hb_sanitize_context_t *c) const |
75 | { |
76 | TRACE_SANITIZE (this); |
77 | return_trace (likely (c->check_struct (this))); |
78 | } |
79 | |
80 | public: |
81 | HBGlyphID glyphId; /* Glyph ID of reference glyph */ |
82 | HBUINT16 firstLayerIdx; /* Index (from beginning of |
83 | * the Layer Records) to the |
84 | * layer record. There will be |
85 | * numLayers consecutive entries |
86 | * for this base glyph. */ |
87 | HBUINT16 numLayers; /* Number of color layers |
88 | * associated with this glyph */ |
89 | public: |
90 | DEFINE_SIZE_STATIC (6); |
91 | }; |
92 | |
93 | struct COLR |
94 | { |
95 | static constexpr hb_tag_t tableTag = HB_OT_TAG_COLR; |
96 | |
97 | bool has_data () const { return numBaseGlyphs; } |
98 | |
99 | unsigned int get_glyph_layers (hb_codepoint_t glyph, |
100 | unsigned int start_offset, |
101 | unsigned int *count, /* IN/OUT. May be NULL. */ |
102 | hb_ot_color_layer_t *layers /* OUT. May be NULL. */) const |
103 | { |
104 | const BaseGlyphRecord &record = (this+baseGlyphsZ).bsearch (numBaseGlyphs, glyph); |
105 | |
106 | hb_array_t<const LayerRecord> all_layers = (this+layersZ).as_array (numLayers); |
107 | hb_array_t<const LayerRecord> glyph_layers = all_layers.sub_array (record.firstLayerIdx, |
108 | record.numLayers); |
109 | if (count) |
110 | { |
111 | + glyph_layers.sub_array (start_offset, count) |
112 | | hb_sink (hb_array (layers, *count)) |
113 | ; |
114 | } |
115 | return glyph_layers.length; |
116 | } |
117 | |
118 | struct accelerator_t |
119 | { |
120 | accelerator_t () {} |
121 | ~accelerator_t () { fini (); } |
122 | |
123 | void init (hb_face_t *face) |
124 | { colr = hb_sanitize_context_t ().reference_table<COLR> (face); } |
125 | |
126 | void fini () { this->colr.destroy (); } |
127 | |
128 | bool is_valid () { return colr.get_blob ()->length; } |
129 | |
130 | void closure_glyphs (hb_codepoint_t glyph, |
131 | hb_set_t *related_ids /* OUT */) const |
132 | { colr->closure_glyphs (glyph, related_ids); } |
133 | |
134 | private: |
135 | hb_blob_ptr_t<COLR> colr; |
136 | }; |
137 | |
138 | void closure_glyphs (hb_codepoint_t glyph, |
139 | hb_set_t *related_ids /* OUT */) const |
140 | { |
141 | const BaseGlyphRecord *record = get_base_glyph_record (glyph); |
142 | if (!record) return; |
143 | |
144 | auto glyph_layers = (this+layersZ).as_array (numLayers).sub_array (record->firstLayerIdx, |
145 | record->numLayers); |
146 | if (!glyph_layers.length) return; |
147 | related_ids->add_array (&glyph_layers[0].glyphId, glyph_layers.length, LayerRecord::min_size); |
148 | } |
149 | |
150 | bool sanitize (hb_sanitize_context_t *c) const |
151 | { |
152 | TRACE_SANITIZE (this); |
153 | return_trace (likely (c->check_struct (this) && |
154 | (this+baseGlyphsZ).sanitize (c, numBaseGlyphs) && |
155 | (this+layersZ).sanitize (c, numLayers))); |
156 | } |
157 | |
158 | template<typename BaseIterator, typename LayerIterator, |
159 | hb_requires (hb_is_iterator (BaseIterator)), |
160 | hb_requires (hb_is_iterator (LayerIterator))> |
161 | bool serialize (hb_serialize_context_t *c, |
162 | unsigned version, |
163 | BaseIterator base_it, |
164 | LayerIterator layer_it) |
165 | { |
166 | TRACE_SERIALIZE (this); |
167 | if (unlikely (base_it.len () != layer_it.len ())) |
168 | return_trace (false); |
169 | |
170 | if (unlikely (!c->extend_min (this))) return_trace (false); |
171 | this->version = version; |
172 | numLayers = 0; |
173 | numBaseGlyphs = base_it.len (); |
174 | baseGlyphsZ = COLR::min_size; |
175 | layersZ = COLR::min_size + numBaseGlyphs * BaseGlyphRecord::min_size; |
176 | |
177 | for (const hb_item_type<BaseIterator> _ : + base_it.iter ()) |
178 | { |
179 | auto* record = c->embed (_); |
180 | if (unlikely (!record)) return_trace (false); |
181 | record->firstLayerIdx = numLayers; |
182 | numLayers += record->numLayers; |
183 | } |
184 | |
185 | for (const hb_item_type<LayerIterator>& _ : + layer_it.iter ()) |
186 | _.as_array ().copy (c); |
187 | |
188 | return_trace (true); |
189 | } |
190 | |
191 | const BaseGlyphRecord* get_base_glyph_record (hb_codepoint_t gid) const |
192 | { |
193 | if ((unsigned int) gid == 0) // Ignore notdef. |
194 | return nullptr; |
195 | const BaseGlyphRecord* record = &(this+baseGlyphsZ).bsearch (numBaseGlyphs, (unsigned int) gid); |
196 | if ((record && (hb_codepoint_t) record->glyphId != gid)) |
197 | record = nullptr; |
198 | return record; |
199 | } |
200 | |
201 | bool subset (hb_subset_context_t *c) const |
202 | { |
203 | TRACE_SUBSET (this); |
204 | |
205 | const hb_map_t &reverse_glyph_map = *c->plan->reverse_glyph_map; |
206 | |
207 | auto base_it = |
208 | + hb_range (c->plan->num_output_glyphs ()) |
209 | | hb_map_retains_sorting ([&](hb_codepoint_t new_gid) |
210 | { |
211 | hb_codepoint_t old_gid = reverse_glyph_map.get (new_gid); |
212 | |
213 | const BaseGlyphRecord* old_record = get_base_glyph_record (old_gid); |
214 | if (unlikely (!old_record)) |
215 | return hb_pair_t<bool, BaseGlyphRecord> (false, Null (BaseGlyphRecord)); |
216 | |
217 | BaseGlyphRecord new_record; |
218 | new_record.glyphId = new_gid; |
219 | new_record.numLayers = old_record->numLayers; |
220 | return hb_pair_t<bool, BaseGlyphRecord> (true, new_record); |
221 | }) |
222 | | hb_filter (hb_first) |
223 | | hb_map_retains_sorting (hb_second) |
224 | ; |
225 | |
226 | auto layer_it = |
227 | + hb_range (c->plan->num_output_glyphs ()) |
228 | | hb_map (reverse_glyph_map) |
229 | | hb_map_retains_sorting ([&](hb_codepoint_t old_gid) |
230 | { |
231 | const BaseGlyphRecord* old_record = get_base_glyph_record (old_gid); |
232 | hb_vector_t<LayerRecord> out_layers; |
233 | |
234 | if (unlikely (!old_record || |
235 | old_record->firstLayerIdx >= numLayers || |
236 | old_record->firstLayerIdx + old_record->numLayers > numLayers)) |
237 | return hb_pair_t<bool, hb_vector_t<LayerRecord>> (false, out_layers); |
238 | |
239 | auto layers = (this+layersZ).as_array (numLayers).sub_array (old_record->firstLayerIdx, |
240 | old_record->numLayers); |
241 | out_layers.resize (layers.length); |
242 | for (unsigned int i = 0; i < layers.length; i++) { |
243 | out_layers[i] = layers[i]; |
244 | hb_codepoint_t new_gid = 0; |
245 | if (unlikely (!c->plan->new_gid_for_old_gid (out_layers[i].glyphId, &new_gid))) |
246 | return hb_pair_t<bool, hb_vector_t<LayerRecord>> (false, out_layers); |
247 | out_layers[i].glyphId = new_gid; |
248 | } |
249 | |
250 | return hb_pair_t<bool, hb_vector_t<LayerRecord>> (true, out_layers); |
251 | }) |
252 | | hb_filter (hb_first) |
253 | | hb_map_retains_sorting (hb_second) |
254 | ; |
255 | |
256 | if (unlikely (!base_it || !layer_it || base_it.len () != layer_it.len ())) |
257 | return_trace (false); |
258 | |
259 | COLR *colr_prime = c->serializer->start_embed<COLR> (); |
260 | return_trace (colr_prime->serialize (c->serializer, version, base_it, layer_it)); |
261 | } |
262 | |
263 | protected: |
264 | HBUINT16 version; /* Table version number (starts at 0). */ |
265 | HBUINT16 numBaseGlyphs; /* Number of Base Glyph Records. */ |
266 | LNNOffsetTo<SortedUnsizedArrayOf<BaseGlyphRecord>> |
267 | baseGlyphsZ; /* Offset to Base Glyph records. */ |
268 | LNNOffsetTo<UnsizedArrayOf<LayerRecord>> |
269 | layersZ; /* Offset to Layer Records. */ |
270 | HBUINT16 numLayers; /* Number of Layer Records. */ |
271 | public: |
272 | DEFINE_SIZE_STATIC (14); |
273 | }; |
274 | |
275 | } /* namespace OT */ |
276 | |
277 | |
278 | #endif /* HB_OT_COLOR_COLR_TABLE_HH */ |
279 | |