1 | /* |
2 | * Copyright © 2007,2008,2009,2010 Red Hat, Inc. |
3 | * Copyright © 2012,2018 Google, Inc. |
4 | * Copyright © 2019 Facebook, Inc. |
5 | * |
6 | * This is part of HarfBuzz, a text shaping library. |
7 | * |
8 | * Permission is hereby granted, without written agreement and without |
9 | * license or royalty fees, to use, copy, modify, and distribute this |
10 | * software and its documentation for any purpose, provided that the |
11 | * above copyright notice and the following two paragraphs appear in |
12 | * all copies of this software. |
13 | * |
14 | * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR |
15 | * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES |
16 | * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN |
17 | * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH |
18 | * DAMAGE. |
19 | * |
20 | * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, |
21 | * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND |
22 | * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS |
23 | * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO |
24 | * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. |
25 | * |
26 | * Red Hat Author(s): Behdad Esfahbod |
27 | * Google Author(s): Behdad Esfahbod |
28 | * Facebook Author(s): Behdad Esfahbod |
29 | */ |
30 | |
31 | #ifndef HB_SERIALIZE_HH |
32 | #define HB_SERIALIZE_HH |
33 | |
34 | #include "hb.hh" |
35 | #include "hb-blob.hh" |
36 | #include "hb-map.hh" |
37 | #include "hb-pool.hh" |
38 | |
39 | #ifdef HB_EXPERIMENTAL_API |
40 | #include "hb-subset-repacker.h" |
41 | #endif |
42 | |
43 | /* |
44 | * Serialize |
45 | */ |
46 | |
47 | enum hb_serialize_error_t { |
48 | HB_SERIALIZE_ERROR_NONE = 0x00000000u, |
49 | HB_SERIALIZE_ERROR_OTHER = 0x00000001u, |
50 | HB_SERIALIZE_ERROR_OFFSET_OVERFLOW = 0x00000002u, |
51 | HB_SERIALIZE_ERROR_OUT_OF_ROOM = 0x00000004u, |
52 | HB_SERIALIZE_ERROR_INT_OVERFLOW = 0x00000008u, |
53 | HB_SERIALIZE_ERROR_ARRAY_OVERFLOW = 0x00000010u |
54 | }; |
55 | HB_MARK_AS_FLAG_T (hb_serialize_error_t); |
56 | |
57 | struct hb_serialize_context_t |
58 | { |
59 | typedef unsigned objidx_t; |
60 | |
61 | enum whence_t { |
62 | Head, /* Relative to the current object head (default). */ |
63 | Tail, /* Relative to the current object tail after packed. */ |
64 | Absolute /* Absolute: from the start of the serialize buffer. */ |
65 | }; |
66 | |
67 | |
68 | |
69 | struct object_t |
70 | { |
71 | void fini () { |
72 | real_links.fini (); |
73 | virtual_links.fini (); |
74 | } |
75 | |
76 | object_t () = default; |
77 | |
78 | #ifdef HB_EXPERIMENTAL_API |
79 | object_t (const hb_object_t &o) |
80 | { |
81 | head = o.head; |
82 | tail = o.tail; |
83 | next = nullptr; |
84 | real_links.alloc (o.num_real_links, true); |
85 | for (unsigned i = 0 ; i < o.num_real_links; i++) |
86 | real_links.push (o.real_links[i]); |
87 | |
88 | virtual_links.alloc (o.num_virtual_links, true); |
89 | for (unsigned i = 0; i < o.num_virtual_links; i++) |
90 | virtual_links.push (o.virtual_links[i]); |
91 | } |
92 | #endif |
93 | |
94 | friend void swap (object_t& a, object_t& b) |
95 | { |
96 | hb_swap (a.head, b.head); |
97 | hb_swap (a.tail, b.tail); |
98 | hb_swap (a.next, b.next); |
99 | hb_swap (a.real_links, b.real_links); |
100 | hb_swap (a.virtual_links, b.virtual_links); |
101 | } |
102 | |
103 | bool operator == (const object_t &o) const |
104 | { |
105 | // Virtual links aren't considered for equality since they don't affect the functionality |
106 | // of the object. |
107 | return (tail - head == o.tail - o.head) |
108 | && (real_links.length == o.real_links.length) |
109 | && 0 == hb_memcmp (head, o.head, tail - head) |
110 | && real_links.as_bytes () == o.real_links.as_bytes (); |
111 | } |
112 | uint32_t hash () const |
113 | { |
114 | // Virtual links aren't considered for equality since they don't affect the functionality |
115 | // of the object. |
116 | return hb_bytes_t (head, hb_min (128, tail - head)).hash () ^ |
117 | real_links.as_bytes ().hash (); |
118 | } |
119 | |
120 | struct link_t |
121 | { |
122 | unsigned width: 3; |
123 | unsigned is_signed: 1; |
124 | unsigned whence: 2; |
125 | unsigned bias : 26; |
126 | unsigned position; |
127 | objidx_t objidx; |
128 | |
129 | link_t () = default; |
130 | |
131 | #ifdef HB_EXPERIMENTAL_API |
132 | link_t (const hb_link_t &o) |
133 | { |
134 | width = o.width; |
135 | is_signed = 0; |
136 | whence = 0; |
137 | position = o.position; |
138 | bias = 0; |
139 | objidx = o.objidx; |
140 | } |
141 | #endif |
142 | |
143 | HB_INTERNAL static int cmp (const void* a, const void* b) |
144 | { |
145 | int cmp = ((const link_t*)a)->position - ((const link_t*)b)->position; |
146 | if (cmp) return cmp; |
147 | |
148 | return ((const link_t*)a)->objidx - ((const link_t*)b)->objidx; |
149 | } |
150 | }; |
151 | |
152 | char *head; |
153 | char *tail; |
154 | hb_vector_t<link_t> real_links; |
155 | hb_vector_t<link_t> virtual_links; |
156 | object_t *next; |
157 | |
158 | auto all_links () const HB_AUTO_RETURN |
159 | (( hb_concat (this->real_links, this->virtual_links) )); |
160 | auto all_links_writer () HB_AUTO_RETURN |
161 | (( hb_concat (this->real_links.writer (), this->virtual_links.writer ()) )); |
162 | }; |
163 | |
164 | struct snapshot_t |
165 | { |
166 | char *head; |
167 | char *tail; |
168 | object_t *current; // Just for sanity check |
169 | unsigned num_real_links; |
170 | unsigned num_virtual_links; |
171 | hb_serialize_error_t errors; |
172 | }; |
173 | |
174 | snapshot_t snapshot () |
175 | { |
176 | return snapshot_t { |
177 | head, tail, current, |
178 | current ? current->real_links.length : 0, |
179 | current ? current->virtual_links.length : 0, |
180 | errors |
181 | }; |
182 | } |
183 | |
184 | hb_serialize_context_t (void *start_, unsigned int size) : |
185 | start ((char *) start_), |
186 | end (start + size), |
187 | current (nullptr) |
188 | { reset (); } |
189 | ~hb_serialize_context_t () { fini (); } |
190 | |
191 | void fini () |
192 | { |
193 | for (object_t *_ : ++hb_iter (packed)) _->fini (); |
194 | packed.fini (); |
195 | this->packed_map.fini (); |
196 | |
197 | while (current) |
198 | { |
199 | auto *_ = current; |
200 | current = current->next; |
201 | _->fini (); |
202 | } |
203 | } |
204 | |
205 | bool in_error () const { return bool (errors); } |
206 | |
207 | bool successful () const { return !bool (errors); } |
208 | |
209 | HB_NODISCARD bool ran_out_of_room () const { return errors & HB_SERIALIZE_ERROR_OUT_OF_ROOM; } |
210 | HB_NODISCARD bool offset_overflow () const { return errors & HB_SERIALIZE_ERROR_OFFSET_OVERFLOW; } |
211 | HB_NODISCARD bool only_offset_overflow () const { return errors == HB_SERIALIZE_ERROR_OFFSET_OVERFLOW; } |
212 | HB_NODISCARD bool only_overflow () const |
213 | { |
214 | return errors == HB_SERIALIZE_ERROR_OFFSET_OVERFLOW |
215 | || errors == HB_SERIALIZE_ERROR_INT_OVERFLOW |
216 | || errors == HB_SERIALIZE_ERROR_ARRAY_OVERFLOW; |
217 | } |
218 | |
219 | void reset (void *start_, unsigned int size) |
220 | { |
221 | start = (char*) start_; |
222 | end = start + size; |
223 | reset (); |
224 | current = nullptr; |
225 | } |
226 | |
227 | void reset () |
228 | { |
229 | this->errors = HB_SERIALIZE_ERROR_NONE; |
230 | this->head = this->start; |
231 | this->tail = this->end; |
232 | this->zerocopy = nullptr; |
233 | this->debug_depth = 0; |
234 | |
235 | fini (); |
236 | this->packed.push (nullptr); |
237 | this->packed_map.init (); |
238 | } |
239 | |
240 | bool check_success (bool success, |
241 | hb_serialize_error_t err_type = HB_SERIALIZE_ERROR_OTHER) |
242 | { |
243 | return successful () |
244 | && (success || err (err_type)); |
245 | } |
246 | |
247 | template <typename T1, typename T2> |
248 | bool check_equal (T1 &&v1, T2 &&v2, hb_serialize_error_t err_type) |
249 | { |
250 | if ((long long) v1 != (long long) v2) |
251 | { |
252 | return err (err_type); |
253 | } |
254 | return true; |
255 | } |
256 | |
257 | template <typename T1, typename T2> |
258 | bool check_assign (T1 &v1, T2 &&v2, hb_serialize_error_t err_type) |
259 | { return check_equal (v1 = v2, v2, err_type); } |
260 | |
261 | template <typename T> bool propagate_error (T &&obj) |
262 | { return check_success (!hb_deref (obj).in_error ()); } |
263 | |
264 | template <typename T1, typename... Ts> bool propagate_error (T1 &&o1, Ts&&... os) |
265 | { return propagate_error (std::forward<T1> (o1)) && |
266 | propagate_error (std::forward<Ts> (os)...); } |
267 | |
268 | /* To be called around main operation. */ |
269 | template <typename Type=char> |
270 | __attribute__((returns_nonnull)) |
271 | Type *start_serialize () |
272 | { |
273 | DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, +1, |
274 | "start [%p..%p] (%lu bytes)" , |
275 | this->start, this->end, |
276 | (unsigned long) (this->end - this->start)); |
277 | |
278 | assert (!current); |
279 | return push<Type> (); |
280 | } |
281 | void end_serialize () |
282 | { |
283 | DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, -1, |
284 | "end [%p..%p] serialized %u bytes; %s" , |
285 | this->start, this->end, |
286 | (unsigned) (this->head - this->start), |
287 | successful () ? "successful" : "UNSUCCESSFUL" ); |
288 | |
289 | propagate_error (packed, packed_map); |
290 | |
291 | if (unlikely (!current)) return; |
292 | if (unlikely (in_error())) |
293 | { |
294 | // Offset overflows that occur before link resolution cannot be handled |
295 | // by repacking, so set a more general error. |
296 | if (offset_overflow ()) err (HB_SERIALIZE_ERROR_OTHER); |
297 | return; |
298 | } |
299 | |
300 | assert (!current->next); |
301 | |
302 | /* Only "pack" if there exist other objects... Otherwise, don't bother. |
303 | * Saves a move. */ |
304 | if (packed.length <= 1) |
305 | return; |
306 | |
307 | pop_pack (false); |
308 | |
309 | resolve_links (); |
310 | } |
311 | |
312 | template <typename Type = void> |
313 | __attribute__((returns_nonnull)) |
314 | Type *push () |
315 | { |
316 | if (unlikely (in_error ())) return start_embed<Type> (); |
317 | |
318 | object_t *obj = object_pool.alloc (); |
319 | if (unlikely (!obj)) |
320 | check_success (false); |
321 | else |
322 | { |
323 | obj->head = head; |
324 | obj->tail = tail; |
325 | obj->next = current; |
326 | current = obj; |
327 | } |
328 | return start_embed<Type> (); |
329 | } |
330 | void pop_discard () |
331 | { |
332 | object_t *obj = current; |
333 | if (unlikely (!obj)) return; |
334 | // Allow cleanup when we've error'd out on int overflows which don't compromise |
335 | // the serializer state. |
336 | if (unlikely (in_error() && !only_overflow ())) return; |
337 | |
338 | current = current->next; |
339 | revert (zerocopy ? zerocopy : obj->head, obj->tail); |
340 | zerocopy = nullptr; |
341 | obj->fini (); |
342 | object_pool.release (obj); |
343 | } |
344 | |
345 | /* Set share to false when an object is unlikely shareable with others |
346 | * so not worth an attempt, or a contiguous table is serialized as |
347 | * multiple consecutive objects in the reverse order so can't be shared. |
348 | */ |
349 | objidx_t pop_pack (bool share=true) |
350 | { |
351 | object_t *obj = current; |
352 | if (unlikely (!obj)) return 0; |
353 | // Allow cleanup when we've error'd out on int overflows which don't compromise |
354 | // the serializer state. |
355 | if (unlikely (in_error() && !only_overflow ())) return 0; |
356 | |
357 | current = current->next; |
358 | obj->tail = head; |
359 | obj->next = nullptr; |
360 | assert (obj->head <= obj->tail); |
361 | unsigned len = obj->tail - obj->head; |
362 | head = zerocopy ? zerocopy : obj->head; /* Rewind head. */ |
363 | bool was_zerocopy = zerocopy; |
364 | zerocopy = nullptr; |
365 | |
366 | if (!len) |
367 | { |
368 | assert (!obj->real_links.length); |
369 | assert (!obj->virtual_links.length); |
370 | return 0; |
371 | } |
372 | |
373 | objidx_t objidx; |
374 | uint32_t hash = 0; |
375 | if (share) |
376 | { |
377 | hash = hb_hash (obj); |
378 | objidx = packed_map.get_with_hash (obj, hash); |
379 | if (objidx) |
380 | { |
381 | merge_virtual_links (obj, objidx); |
382 | obj->fini (); |
383 | return objidx; |
384 | } |
385 | } |
386 | |
387 | tail -= len; |
388 | if (was_zerocopy) |
389 | assert (tail == obj->head); |
390 | else |
391 | memmove (tail, obj->head, len); |
392 | |
393 | obj->head = tail; |
394 | obj->tail = tail + len; |
395 | |
396 | packed.push (obj); |
397 | |
398 | if (unlikely (!propagate_error (packed))) |
399 | { |
400 | /* Obj wasn't successfully added to packed, so clean it up otherwise its |
401 | * links will be leaked. When we use constructor/destructors properly, we |
402 | * can remove these. */ |
403 | obj->fini (); |
404 | return 0; |
405 | } |
406 | |
407 | objidx = packed.length - 1; |
408 | |
409 | if (share) packed_map.set_with_hash (obj, hash, objidx); |
410 | propagate_error (packed_map); |
411 | |
412 | return objidx; |
413 | } |
414 | |
415 | void revert (snapshot_t snap) |
416 | { |
417 | // Overflows that happened after the snapshot will be erased by the revert. |
418 | if (unlikely (in_error () && !only_overflow ())) return; |
419 | assert (snap.current == current); |
420 | if (current) |
421 | { |
422 | current->real_links.shrink (snap.num_real_links); |
423 | current->virtual_links.shrink (snap.num_virtual_links); |
424 | } |
425 | errors = snap.errors; |
426 | revert (snap.head, snap.tail); |
427 | } |
428 | |
429 | void revert (char *snap_head, |
430 | char *snap_tail) |
431 | { |
432 | if (unlikely (in_error ())) return; |
433 | assert (snap_head <= head); |
434 | assert (tail <= snap_tail); |
435 | head = snap_head; |
436 | tail = snap_tail; |
437 | discard_stale_objects (); |
438 | } |
439 | |
440 | void discard_stale_objects () |
441 | { |
442 | if (unlikely (in_error ())) return; |
443 | while (packed.length > 1 && |
444 | packed.tail ()->head < tail) |
445 | { |
446 | packed_map.del (packed.tail ()); |
447 | assert (!packed.tail ()->next); |
448 | packed.tail ()->fini (); |
449 | packed.pop (); |
450 | } |
451 | if (packed.length > 1) |
452 | assert (packed.tail ()->head == tail); |
453 | } |
454 | |
455 | // Adds a virtual link from the current object to objidx. A virtual link is not associated with |
456 | // an actual offset field. They are solely used to enforce ordering constraints between objects. |
457 | // Adding a virtual link from object a to object b will ensure that object b is always packed after |
458 | // object a in the final serialized order. |
459 | // |
460 | // This is useful in certain situations where there needs to be a specific ordering in the |
461 | // final serialization. Such as when platform bugs require certain orderings, or to provide |
462 | // guidance to the repacker for better offset overflow resolution. |
463 | void add_virtual_link (objidx_t objidx) |
464 | { |
465 | if (unlikely (in_error ())) return; |
466 | |
467 | if (!objidx) |
468 | return; |
469 | |
470 | assert (current); |
471 | |
472 | auto& link = *current->virtual_links.push (); |
473 | if (current->virtual_links.in_error ()) |
474 | err (HB_SERIALIZE_ERROR_OTHER); |
475 | |
476 | link.width = 0; |
477 | link.objidx = objidx; |
478 | link.is_signed = 0; |
479 | link.whence = 0; |
480 | link.position = 0; |
481 | link.bias = 0; |
482 | } |
483 | |
484 | template <typename T> |
485 | void add_link (T &ofs, objidx_t objidx, |
486 | whence_t whence = Head, |
487 | unsigned bias = 0) |
488 | { |
489 | if (unlikely (in_error ())) return; |
490 | |
491 | if (!objidx) |
492 | return; |
493 | |
494 | assert (current); |
495 | assert (current->head <= (const char *) &ofs); |
496 | |
497 | auto& link = *current->real_links.push (); |
498 | if (current->real_links.in_error ()) |
499 | err (HB_SERIALIZE_ERROR_OTHER); |
500 | |
501 | link.width = sizeof (T); |
502 | link.objidx = objidx; |
503 | if (unlikely (!sizeof (T))) |
504 | { |
505 | // This link is not associated with an actual offset and exists merely to enforce |
506 | // an ordering constraint. |
507 | link.is_signed = 0; |
508 | link.whence = 0; |
509 | link.position = 0; |
510 | link.bias = 0; |
511 | return; |
512 | } |
513 | |
514 | link.is_signed = std::is_signed<hb_unwrap_type (T)>::value; |
515 | link.whence = (unsigned) whence; |
516 | link.position = (const char *) &ofs - current->head; |
517 | link.bias = bias; |
518 | } |
519 | |
520 | unsigned to_bias (const void *base) const |
521 | { |
522 | if (unlikely (in_error ())) return 0; |
523 | if (!base) return 0; |
524 | assert (current); |
525 | assert (current->head <= (const char *) base); |
526 | return (const char *) base - current->head; |
527 | } |
528 | |
529 | void resolve_links () |
530 | { |
531 | if (unlikely (in_error ())) return; |
532 | |
533 | assert (!current); |
534 | assert (packed.length > 1); |
535 | |
536 | for (const object_t* parent : ++hb_iter (packed)) |
537 | for (const object_t::link_t &link : parent->real_links) |
538 | { |
539 | const object_t* child = packed[link.objidx]; |
540 | if (unlikely (!child)) { err (HB_SERIALIZE_ERROR_OTHER); return; } |
541 | unsigned offset = 0; |
542 | switch ((whence_t) link.whence) { |
543 | case Head: offset = child->head - parent->head; break; |
544 | case Tail: offset = child->head - parent->tail; break; |
545 | case Absolute: offset = (head - start) + (child->head - tail); break; |
546 | } |
547 | |
548 | assert (offset >= link.bias); |
549 | offset -= link.bias; |
550 | if (link.is_signed) |
551 | { |
552 | assert (link.width == 2 || link.width == 4); |
553 | if (link.width == 4) |
554 | assign_offset<int32_t> (parent, link, offset); |
555 | else |
556 | assign_offset<int16_t> (parent, link, offset); |
557 | } |
558 | else |
559 | { |
560 | assert (link.width == 2 || link.width == 3 || link.width == 4); |
561 | if (link.width == 4) |
562 | assign_offset<uint32_t> (parent, link, offset); |
563 | else if (link.width == 3) |
564 | assign_offset<uint32_t, 3> (parent, link, offset); |
565 | else |
566 | assign_offset<uint16_t> (parent, link, offset); |
567 | } |
568 | } |
569 | } |
570 | |
571 | unsigned int length () const |
572 | { |
573 | if (unlikely (!current)) return 0; |
574 | return this->head - current->head; |
575 | } |
576 | |
577 | void align (unsigned int alignment) |
578 | { |
579 | unsigned int l = length () % alignment; |
580 | if (l) |
581 | (void) allocate_size<void> (alignment - l); |
582 | } |
583 | |
584 | template <typename Type = void> |
585 | __attribute__((returns_nonnull)) |
586 | Type *start_embed (const Type *obj HB_UNUSED = nullptr) const |
587 | { return reinterpret_cast<Type *> (this->head); } |
588 | template <typename Type> |
589 | __attribute__((returns_nonnull)) |
590 | Type *start_embed (const Type &obj) const |
591 | { return start_embed (std::addressof (obj)); } |
592 | |
593 | bool err (hb_serialize_error_t err_type) |
594 | { |
595 | return !bool ((errors = (errors | err_type))); |
596 | } |
597 | |
598 | bool start_zerocopy (size_t size) |
599 | { |
600 | if (unlikely (in_error ())) return false; |
601 | |
602 | if (unlikely (size > INT_MAX || this->tail - this->head < ptrdiff_t (size))) |
603 | { |
604 | err (HB_SERIALIZE_ERROR_OUT_OF_ROOM); |
605 | return false; |
606 | } |
607 | |
608 | assert (!this->zerocopy); |
609 | this->zerocopy = this->head; |
610 | |
611 | assert (this->current->head == this->head); |
612 | this->current->head = this->current->tail = this->head = this->tail - size; |
613 | return true; |
614 | } |
615 | |
616 | template <typename Type> |
617 | HB_NODISCARD |
618 | Type *allocate_size (size_t size, bool clear = true) |
619 | { |
620 | if (unlikely (in_error ())) return nullptr; |
621 | |
622 | if (unlikely (size > INT_MAX || this->tail - this->head < ptrdiff_t (size))) |
623 | { |
624 | err (HB_SERIALIZE_ERROR_OUT_OF_ROOM); |
625 | return nullptr; |
626 | } |
627 | if (clear) |
628 | hb_memset (this->head, 0, size); |
629 | char *ret = this->head; |
630 | this->head += size; |
631 | return reinterpret_cast<Type *> (ret); |
632 | } |
633 | |
634 | template <typename Type> |
635 | Type *allocate_min () |
636 | { return this->allocate_size<Type> (Type::min_size); } |
637 | |
638 | template <typename Type> |
639 | HB_NODISCARD |
640 | Type *embed (const Type *obj) |
641 | { |
642 | unsigned int size = obj->get_size (); |
643 | Type *ret = this->allocate_size<Type> (size, false); |
644 | if (unlikely (!ret)) return nullptr; |
645 | hb_memcpy (ret, obj, size); |
646 | return ret; |
647 | } |
648 | template <typename Type> |
649 | HB_NODISCARD |
650 | Type *embed (const Type &obj) |
651 | { return embed (std::addressof (obj)); } |
652 | char *embed (const char *obj, unsigned size) |
653 | { |
654 | char *ret = this->allocate_size<char> (size, false); |
655 | if (unlikely (!ret)) return nullptr; |
656 | hb_memcpy (ret, obj, size); |
657 | return ret; |
658 | } |
659 | |
660 | template <typename Type, typename ...Ts> auto |
661 | _copy (const Type &src, hb_priority<1>, Ts&&... ds) HB_RETURN |
662 | (Type *, src.copy (this, std::forward<Ts> (ds)...)) |
663 | |
664 | template <typename Type> auto |
665 | _copy (const Type &src, hb_priority<0>) -> decltype (&(hb_declval<Type> () = src)) |
666 | { |
667 | Type *ret = this->allocate_size<Type> (sizeof (Type)); |
668 | if (unlikely (!ret)) return nullptr; |
669 | *ret = src; |
670 | return ret; |
671 | } |
672 | |
673 | /* Like embed, but active: calls obj.operator=() or obj.copy() to transfer data |
674 | * instead of hb_memcpy(). */ |
675 | template <typename Type, typename ...Ts> |
676 | Type *copy (const Type &src, Ts&&... ds) |
677 | { return _copy (src, hb_prioritize, std::forward<Ts> (ds)...); } |
678 | template <typename Type, typename ...Ts> |
679 | Type *copy (const Type *src, Ts&&... ds) |
680 | { return copy (*src, std::forward<Ts> (ds)...); } |
681 | |
682 | template<typename Iterator, |
683 | hb_requires (hb_is_iterator (Iterator)), |
684 | typename ...Ts> |
685 | void copy_all (Iterator it, Ts&&... ds) |
686 | { for (decltype (*it) _ : it) copy (_, std::forward<Ts> (ds)...); } |
687 | |
688 | template <typename Type> |
689 | hb_serialize_context_t& operator << (const Type &obj) & { embed (obj); return *this; } |
690 | |
691 | template <typename Type> |
692 | Type *extend_size (Type *obj, size_t size, bool clear = true) |
693 | { |
694 | if (unlikely (in_error ())) return nullptr; |
695 | |
696 | assert (this->start <= (char *) obj); |
697 | assert ((char *) obj <= this->head); |
698 | assert ((size_t) (this->head - (char *) obj) <= size); |
699 | if (unlikely (((char *) obj + size < (char *) obj) || |
700 | !this->allocate_size<Type> (((char *) obj) + size - this->head, clear))) return nullptr; |
701 | return reinterpret_cast<Type *> (obj); |
702 | } |
703 | template <typename Type> |
704 | Type *extend_size (Type &obj, size_t size, bool clear = true) |
705 | { return extend_size (std::addressof (obj), size, clear); } |
706 | |
707 | template <typename Type> |
708 | Type *extend_min (Type *obj) { return extend_size (obj, obj->min_size); } |
709 | template <typename Type> |
710 | Type *extend_min (Type &obj) { return extend_min (std::addressof (obj)); } |
711 | |
712 | template <typename Type, typename ...Ts> |
713 | Type *extend (Type *obj, Ts&&... ds) |
714 | { return extend_size (obj, obj->get_size (std::forward<Ts> (ds)...)); } |
715 | template <typename Type, typename ...Ts> |
716 | Type *extend (Type &obj, Ts&&... ds) |
717 | { return extend (std::addressof (obj), std::forward<Ts> (ds)...); } |
718 | |
719 | /* Output routines. */ |
720 | hb_bytes_t copy_bytes () const |
721 | { |
722 | assert (successful ()); |
723 | /* Copy both items from head side and tail side... */ |
724 | unsigned int len = (this->head - this->start) |
725 | + (this->end - this->tail); |
726 | |
727 | // If len is zero don't hb_malloc as the memory won't get properly |
728 | // cleaned up later. |
729 | if (!len) return hb_bytes_t (); |
730 | |
731 | char *p = (char *) hb_malloc (len); |
732 | if (unlikely (!p)) return hb_bytes_t (); |
733 | |
734 | hb_memcpy (p, this->start, this->head - this->start); |
735 | hb_memcpy (p + (this->head - this->start), this->tail, this->end - this->tail); |
736 | return hb_bytes_t (p, len); |
737 | } |
738 | template <typename Type> |
739 | Type *copy () const |
740 | { return reinterpret_cast<Type *> ((char *) copy_bytes ().arrayZ); } |
741 | hb_blob_t *copy_blob () const |
742 | { |
743 | hb_bytes_t b = copy_bytes (); |
744 | return hb_blob_create (b.arrayZ, b.length, |
745 | HB_MEMORY_MODE_WRITABLE, |
746 | (char *) b.arrayZ, hb_free); |
747 | } |
748 | |
749 | const hb_vector_t<object_t *>& object_graph() const |
750 | { return packed; } |
751 | |
752 | private: |
753 | template <typename T, unsigned Size = sizeof (T)> |
754 | void assign_offset (const object_t* parent, const object_t::link_t &link, unsigned offset) |
755 | { |
756 | auto &off = * ((BEInt<T, Size> *) (parent->head + link.position)); |
757 | assert (0 == off); |
758 | check_assign (off, offset, HB_SERIALIZE_ERROR_OFFSET_OVERFLOW); |
759 | } |
760 | |
761 | public: |
762 | char *start, *head, *tail, *end, *zerocopy; |
763 | unsigned int debug_depth; |
764 | hb_serialize_error_t errors; |
765 | |
766 | private: |
767 | |
768 | void merge_virtual_links (const object_t* from, objidx_t to_idx) { |
769 | object_t* to = packed[to_idx]; |
770 | for (const auto& l : from->virtual_links) { |
771 | to->virtual_links.push (l); |
772 | } |
773 | } |
774 | |
775 | /* Object memory pool. */ |
776 | hb_pool_t<object_t> object_pool; |
777 | |
778 | /* Stack of currently under construction objects. */ |
779 | object_t *current; |
780 | |
781 | /* Stack of packed objects. Object 0 is always nil object. */ |
782 | hb_vector_t<object_t *> packed; |
783 | |
784 | /* Map view of packed objects. */ |
785 | hb_hashmap_t<const object_t *, objidx_t> packed_map; |
786 | }; |
787 | |
788 | #endif /* HB_SERIALIZE_HH */ |
789 | |