1 | /* |
2 | * Copyright © 2007,2008,2009,2010 Red Hat, Inc. |
3 | * Copyright © 2012,2018 Google, Inc. |
4 | * Copyright © 2019 Facebook, Inc. |
5 | * |
6 | * This is part of HarfBuzz, a text shaping library. |
7 | * |
8 | * Permission is hereby granted, without written agreement and without |
9 | * license or royalty fees, to use, copy, modify, and distribute this |
10 | * software and its documentation for any purpose, provided that the |
11 | * above copyright notice and the following two paragraphs appear in |
12 | * all copies of this software. |
13 | * |
14 | * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
15 | * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
16 | * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
17 | * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
18 | * DAMAGE. |
19 | * |
20 | * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
21 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
22 | * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
23 | * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
24 | * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
25 | * |
26 | * Red Hat Author(s): Behdad Esfahbod |
27 | * Google Author(s): Behdad Esfahbod |
28 | * Facebook Author(s): Behdad Esfahbod |
29 | */ |
30 | |
31 | #ifndef HB_SERIALIZE_HH |
32 | #define HB_SERIALIZE_HH |
33 | |
34 | #include "hb.hh" |
35 | #include "hb-blob.hh" |
36 | #include "hb-map.hh" |
37 | #include "hb-pool.hh" |
38 | |
39 | |
40 | /* |
41 | * Serialize |
42 | */ |
43 | |
44 | struct hb_serialize_context_t |
45 | { |
46 | typedef unsigned objidx_t; |
47 | |
48 | enum whence_t { |
49 | Head, /* Relative to the current object head (default). */ |
50 | Tail, /* Relative to the current object tail after packed. */ |
51 | Absolute /* Absolute: from the start of the serialize buffer. */ |
52 | }; |
53 | |
54 | struct object_t |
55 | { |
56 | void fini () { links.fini (); } |
57 | |
58 | bool operator == (const object_t &o) const |
59 | { |
60 | return (tail - head == o.tail - o.head) |
61 | && (links.length == o.links.length) |
62 | && 0 == hb_memcmp (head, o.head, tail - head) |
63 | && links.as_bytes () == o.links.as_bytes (); |
64 | } |
65 | uint32_t hash () const |
66 | { |
67 | return hb_bytes_t (head, tail - head).hash () ^ |
68 | links.as_bytes ().hash (); |
69 | } |
70 | |
71 | struct link_t |
72 | { |
73 | bool is_wide: 1; |
74 | bool is_signed: 1; |
75 | unsigned whence: 2; |
76 | unsigned position: 28; |
77 | unsigned bias; |
78 | objidx_t objidx; |
79 | }; |
80 | |
81 | char *head; |
82 | char *tail; |
83 | hb_vector_t<link_t> links; |
84 | object_t *next; |
85 | }; |
86 | |
87 | struct snapshot_t |
88 | { |
89 | char *head; |
90 | char *tail; |
91 | object_t *current; // Just for sanity check |
92 | unsigned num_links; |
93 | }; |
94 | |
95 | snapshot_t snapshot () |
96 | { return snapshot_t { head, tail, current, current->links.length }; } |
97 | |
98 | hb_serialize_context_t (void *start_, unsigned int size) : |
99 | start ((char *) start_), |
100 | end (start + size), |
101 | current (nullptr) |
102 | { reset (); } |
103 | ~hb_serialize_context_t () { fini (); } |
104 | |
105 | void fini () |
106 | { |
107 | for (object_t *_ : ++hb_iter (packed)) _->fini (); |
108 | packed.fini (); |
109 | this->packed_map.fini (); |
110 | |
111 | while (current) |
112 | { |
113 | auto *_ = current; |
114 | current = current->next; |
115 | _->fini (); |
116 | } |
117 | object_pool.fini (); |
118 | } |
119 | |
120 | bool in_error () const { return !this->successful; } |
121 | |
122 | void reset () |
123 | { |
124 | this->successful = true; |
125 | this->ran_out_of_room = false; |
126 | this->head = this->start; |
127 | this->tail = this->end; |
128 | this->debug_depth = 0; |
129 | |
130 | fini (); |
131 | this->packed.push (nullptr); |
132 | } |
133 | |
134 | bool check_success (bool success) |
135 | { return this->successful && (success || (err_other_error (), false)); } |
136 | |
137 | template <typename T1, typename T2> |
138 | bool check_equal (T1 &&v1, T2 &&v2) |
139 | { return check_success ((long long) v1 == (long long) v2); } |
140 | |
141 | template <typename T1, typename T2> |
142 | bool check_assign (T1 &v1, T2 &&v2) |
143 | { return check_equal (v1 = v2, v2); } |
144 | |
145 | template <typename T> bool propagate_error (T &&obj) |
146 | { return check_success (!hb_deref (obj).in_error ()); } |
147 | |
148 | template <typename T1, typename... Ts> bool propagate_error (T1 &&o1, Ts&&... os) |
149 | { return propagate_error (hb_forward<T1> (o1)) && |
150 | propagate_error (hb_forward<Ts> (os)...); } |
151 | |
152 | /* To be called around main operation. */ |
153 | template <typename Type> |
154 | Type *start_serialize () |
155 | { |
156 | DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, +1, |
157 | "start [%p..%p] (%lu bytes)" , |
158 | this->start, this->end, |
159 | (unsigned long) (this->end - this->start)); |
160 | |
161 | assert (!current); |
162 | return push<Type> (); |
163 | } |
164 | void end_serialize () |
165 | { |
166 | DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, -1, |
167 | "end [%p..%p] serialized %u bytes; %s" , |
168 | this->start, this->end, |
169 | (unsigned) (this->head - this->start), |
170 | this->successful ? "successful" : "UNSUCCESSFUL" ); |
171 | |
172 | propagate_error (packed, packed_map); |
173 | |
174 | if (unlikely (!current)) return; |
175 | assert (!current->next); |
176 | |
177 | /* Only "pack" if there exist other objects... Otherwise, don't bother. |
178 | * Saves a move. */ |
179 | if (packed.length <= 1) |
180 | return; |
181 | |
182 | pop_pack (false); |
183 | |
184 | resolve_links (); |
185 | } |
186 | |
187 | template <typename Type = void> |
188 | Type *push () |
189 | { |
190 | object_t *obj = object_pool.alloc (); |
191 | if (unlikely (!obj)) |
192 | check_success (false); |
193 | else |
194 | { |
195 | obj->head = head; |
196 | obj->tail = tail; |
197 | obj->next = current; |
198 | current = obj; |
199 | } |
200 | return start_embed<Type> (); |
201 | } |
202 | void pop_discard () |
203 | { |
204 | object_t *obj = current; |
205 | if (unlikely (!obj)) return; |
206 | current = current->next; |
207 | revert (obj->head, obj->tail); |
208 | obj->fini (); |
209 | object_pool.free (obj); |
210 | } |
211 | |
212 | /* Set share to false when an object is unlikely sharable with others |
213 | * so not worth an attempt, or a contiguous table is serialized as |
214 | * multiple consecutive objects in the reverse order so can't be shared. |
215 | */ |
216 | objidx_t pop_pack (bool share=true) |
217 | { |
218 | object_t *obj = current; |
219 | if (unlikely (!obj)) return 0; |
220 | current = current->next; |
221 | obj->tail = head; |
222 | obj->next = nullptr; |
223 | unsigned len = obj->tail - obj->head; |
224 | head = obj->head; /* Rewind head. */ |
225 | |
226 | if (!len) |
227 | { |
228 | assert (!obj->links.length); |
229 | return 0; |
230 | } |
231 | |
232 | objidx_t objidx; |
233 | if (share) |
234 | { |
235 | objidx = packed_map.get (obj); |
236 | if (objidx) |
237 | { |
238 | obj->fini (); |
239 | return objidx; |
240 | } |
241 | } |
242 | |
243 | tail -= len; |
244 | memmove (tail, obj->head, len); |
245 | |
246 | obj->head = tail; |
247 | obj->tail = tail + len; |
248 | |
249 | packed.push (obj); |
250 | |
251 | if (unlikely (packed.in_error ())) |
252 | return 0; |
253 | |
254 | objidx = packed.length - 1; |
255 | |
256 | if (share) packed_map.set (obj, objidx); |
257 | |
258 | return objidx; |
259 | } |
260 | |
261 | void revert (snapshot_t snap) |
262 | { |
263 | assert (snap.current == current); |
264 | current->links.shrink (snap.num_links); |
265 | revert (snap.head, snap.tail); |
266 | } |
267 | void revert (char *snap_head, |
268 | char *snap_tail) |
269 | { |
270 | assert (snap_head <= head); |
271 | assert (tail <= snap_tail); |
272 | head = snap_head; |
273 | tail = snap_tail; |
274 | discard_stale_objects (); |
275 | } |
276 | |
277 | void discard_stale_objects () |
278 | { |
279 | while (packed.length > 1 && |
280 | packed.tail ()->head < tail) |
281 | { |
282 | packed_map.del (packed.tail ()); |
283 | assert (!packed.tail ()->next); |
284 | packed.tail ()->fini (); |
285 | packed.pop (); |
286 | } |
287 | if (packed.length > 1) |
288 | assert (packed.tail ()->head == tail); |
289 | } |
290 | |
291 | template <typename T> |
292 | void add_link (T &ofs, objidx_t objidx, |
293 | whence_t whence = Head, |
294 | unsigned bias = 0) |
295 | { |
296 | static_assert (sizeof (T) == 2 || sizeof (T) == 4, "" ); |
297 | |
298 | if (!objidx) |
299 | return; |
300 | |
301 | assert (current); |
302 | assert (current->head <= (const char *) &ofs); |
303 | |
304 | auto& link = *current->links.push (); |
305 | |
306 | link.is_wide = sizeof (T) == 4; |
307 | link.is_signed = hb_is_signed (hb_unwrap_type (T)); |
308 | link.whence = (unsigned) whence; |
309 | link.position = (const char *) &ofs - current->head; |
310 | link.bias = bias; |
311 | link.objidx = objidx; |
312 | } |
313 | |
314 | unsigned to_bias (const void *base) const |
315 | { |
316 | if (!base) return 0; |
317 | assert (current); |
318 | assert (current->head <= (const char *) base); |
319 | return (const char *) base - current->head; |
320 | } |
321 | |
322 | void resolve_links () |
323 | { |
324 | if (unlikely (in_error ())) return; |
325 | |
326 | assert (!current); |
327 | assert (packed.length > 1); |
328 | |
329 | for (const object_t* parent : ++hb_iter (packed)) |
330 | for (const object_t::link_t &link : parent->links) |
331 | { |
332 | const object_t* child = packed[link.objidx]; |
333 | if (unlikely (!child)) { err_other_error(); return; } |
334 | unsigned offset = 0; |
335 | switch ((whence_t) link.whence) { |
336 | case Head: offset = child->head - parent->head; break; |
337 | case Tail: offset = child->head - parent->tail; break; |
338 | case Absolute: offset = (head - start) + (child->head - tail); break; |
339 | } |
340 | |
341 | assert (offset >= link.bias); |
342 | offset -= link.bias; |
343 | if (link.is_signed) |
344 | { |
345 | if (link.is_wide) |
346 | assign_offset<int32_t> (parent, link, offset); |
347 | else |
348 | assign_offset<int16_t> (parent, link, offset); |
349 | } |
350 | else |
351 | { |
352 | if (link.is_wide) |
353 | assign_offset<uint32_t> (parent, link, offset); |
354 | else |
355 | assign_offset<uint16_t> (parent, link, offset); |
356 | } |
357 | } |
358 | } |
359 | |
360 | unsigned int length () const { return this->head - current->head; } |
361 | |
362 | void align (unsigned int alignment) |
363 | { |
364 | unsigned int l = length () % alignment; |
365 | if (l) |
366 | allocate_size<void> (alignment - l); |
367 | } |
368 | |
369 | template <typename Type = void> |
370 | Type *start_embed (const Type *obj HB_UNUSED = nullptr) const |
371 | { return reinterpret_cast<Type *> (this->head); } |
372 | template <typename Type> |
373 | Type *start_embed (const Type &obj) const |
374 | { return start_embed (hb_addressof (obj)); } |
375 | |
376 | /* Following two functions exist to allow setting breakpoint on. */ |
377 | void err_ran_out_of_room () { this->ran_out_of_room = true; } |
378 | void err_other_error () { this->successful = false; } |
379 | |
380 | template <typename Type> |
381 | Type *allocate_size (unsigned int size) |
382 | { |
383 | if (unlikely (!this->successful)) return nullptr; |
384 | |
385 | if (this->tail - this->head < ptrdiff_t (size)) |
386 | { |
387 | err_ran_out_of_room (); |
388 | this->successful = false; |
389 | return nullptr; |
390 | } |
391 | memset (this->head, 0, size); |
392 | char *ret = this->head; |
393 | this->head += size; |
394 | return reinterpret_cast<Type *> (ret); |
395 | } |
396 | |
397 | template <typename Type> |
398 | Type *allocate_min () |
399 | { return this->allocate_size<Type> (Type::min_size); } |
400 | |
401 | template <typename Type> |
402 | Type *embed (const Type *obj) |
403 | { |
404 | unsigned int size = obj->get_size (); |
405 | Type *ret = this->allocate_size<Type> (size); |
406 | if (unlikely (!ret)) return nullptr; |
407 | memcpy (ret, obj, size); |
408 | return ret; |
409 | } |
410 | template <typename Type> |
411 | Type *embed (const Type &obj) |
412 | { return embed (hb_addressof (obj)); } |
413 | |
414 | template <typename Type, typename ...Ts> auto |
415 | _copy (const Type &src, hb_priority<1>, Ts&&... ds) HB_RETURN |
416 | (Type *, src.copy (this, hb_forward<Ts> (ds)...)) |
417 | |
418 | template <typename Type> auto |
419 | _copy (const Type &src, hb_priority<0>) -> decltype (&(hb_declval<Type> () = src)) |
420 | { |
421 | Type *ret = this->allocate_size<Type> (sizeof (Type)); |
422 | if (unlikely (!ret)) return nullptr; |
423 | *ret = src; |
424 | return ret; |
425 | } |
426 | |
427 | /* Like embed, but active: calls obj.operator=() or obj.copy() to transfer data |
428 | * instead of memcpy(). */ |
429 | template <typename Type, typename ...Ts> |
430 | Type *copy (const Type &src, Ts&&... ds) |
431 | { return _copy (src, hb_prioritize, hb_forward<Ts> (ds)...); } |
432 | template <typename Type, typename ...Ts> |
433 | Type *copy (const Type *src, Ts&&... ds) |
434 | { return copy (*src, hb_forward<Ts> (ds)...); } |
435 | |
436 | template<typename Iterator, |
437 | hb_requires (hb_is_iterator (Iterator)), |
438 | typename ...Ts> |
439 | void copy_all (Iterator it, Ts&&... ds) |
440 | { for (decltype (*it) _ : it) copy (_, hb_forward<Ts> (ds)...); } |
441 | |
442 | template <typename Type> |
443 | hb_serialize_context_t& operator << (const Type &obj) & { embed (obj); return *this; } |
444 | |
445 | template <typename Type> |
446 | Type *extend_size (Type *obj, unsigned int size) |
447 | { |
448 | assert (this->start <= (char *) obj); |
449 | assert ((char *) obj <= this->head); |
450 | assert ((char *) obj + size >= this->head); |
451 | if (unlikely (!this->allocate_size<Type> (((char *) obj) + size - this->head))) return nullptr; |
452 | return reinterpret_cast<Type *> (obj); |
453 | } |
454 | template <typename Type> |
455 | Type *extend_size (Type &obj, unsigned int size) |
456 | { return extend_size (hb_addressof (obj), size); } |
457 | |
458 | template <typename Type> |
459 | Type *extend_min (Type *obj) { return extend_size (obj, obj->min_size); } |
460 | template <typename Type> |
461 | Type *extend_min (Type &obj) { return extend_min (hb_addressof (obj)); } |
462 | |
463 | template <typename Type, typename ...Ts> |
464 | Type *extend (Type *obj, Ts&&... ds) |
465 | { return extend_size (obj, obj->get_size (hb_forward<Ts> (ds)...)); } |
466 | template <typename Type, typename ...Ts> |
467 | Type *extend (Type &obj, Ts&&... ds) |
468 | { return extend (hb_addressof (obj), hb_forward<Ts> (ds)...); } |
469 | |
470 | /* Output routines. */ |
471 | hb_bytes_t copy_bytes () const |
472 | { |
473 | assert (this->successful); |
474 | /* Copy both items from head side and tail side... */ |
475 | unsigned int len = (this->head - this->start) |
476 | + (this->end - this->tail); |
477 | |
478 | char *p = (char *) malloc (len); |
479 | if (unlikely (!p)) return hb_bytes_t (); |
480 | |
481 | memcpy (p, this->start, this->head - this->start); |
482 | memcpy (p + (this->head - this->start), this->tail, this->end - this->tail); |
483 | return hb_bytes_t (p, len); |
484 | } |
485 | template <typename Type> |
486 | Type *copy () const |
487 | { return reinterpret_cast<Type *> ((char *) copy_bytes ().arrayZ); } |
488 | hb_blob_t *copy_blob () const |
489 | { |
490 | hb_bytes_t b = copy_bytes (); |
491 | return hb_blob_create (b.arrayZ, b.length, |
492 | HB_MEMORY_MODE_WRITABLE, |
493 | (char *) b.arrayZ, free); |
494 | } |
495 | |
496 | private: |
497 | template <typename T> |
498 | void assign_offset (const object_t* parent, const object_t::link_t &link, unsigned offset) |
499 | { |
500 | auto &off = * ((BEInt<T, sizeof (T)> *) (parent->head + link.position)); |
501 | assert (0 == off); |
502 | check_assign (off, offset); |
503 | } |
504 | |
505 | public: /* TODO Make private. */ |
506 | char *start, *head, *tail, *end; |
507 | unsigned int debug_depth; |
508 | bool successful; |
509 | bool ran_out_of_room; |
510 | |
511 | private: |
512 | |
513 | /* Object memory pool. */ |
514 | hb_pool_t<object_t> object_pool; |
515 | |
516 | /* Stack of currently under construction objects. */ |
517 | object_t *current; |
518 | |
519 | /* Stack of packed objects. Object 0 is always nil object. */ |
520 | hb_vector_t<object_t *> packed; |
521 | |
522 | /* Map view of packed objects. */ |
523 | hb_hashmap_t<const object_t *, objidx_t, nullptr, 0> packed_map; |
524 | }; |
525 | |
526 | |
527 | #endif /* HB_SERIALIZE_HH */ |
528 | |