1/*
2 * Copyright © 2007,2008,2009,2010 Red Hat, Inc.
3 * Copyright © 2012,2018 Google, Inc.
4 *
5 * This is part of HarfBuzz, a text shaping library.
6 *
7 * Permission is hereby granted, without written agreement and without
8 * license or royalty fees, to use, copy, modify, and distribute this
9 * software and its documentation for any purpose, provided that the
10 * above copyright notice and the following two paragraphs appear in
11 * all copies of this software.
12 *
13 * IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
14 * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
15 * ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
16 * IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
17 * DAMAGE.
18 *
19 * THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
20 * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
21 * FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
22 * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
23 * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
24 *
25 * Red Hat Author(s): Behdad Esfahbod
26 * Google Author(s): Behdad Esfahbod
27 */
28
29#ifndef HB_MACHINERY_HH
30#define HB_MACHINERY_HH
31
32#include "hb.hh"
33#include "hb-blob.hh"
34
35#include "hb-array.hh"
36#include "hb-vector.hh"
37
38
39/*
40 * Casts
41 */
42
43/* Cast to struct T, reference to reference */
44template<typename Type, typename TObject>
45static inline const Type& CastR(const TObject &X)
46{ return reinterpret_cast<const Type&> (X); }
47template<typename Type, typename TObject>
48static inline Type& CastR(TObject &X)
49{ return reinterpret_cast<Type&> (X); }
50
51/* Cast to struct T, pointer to pointer */
52template<typename Type, typename TObject>
53static inline const Type* CastP(const TObject *X)
54{ return reinterpret_cast<const Type*> (X); }
55template<typename Type, typename TObject>
56static inline Type* CastP(TObject *X)
57{ return reinterpret_cast<Type*> (X); }
58
59/* StructAtOffset<T>(P,Ofs) returns the struct T& that is placed at memory
60 * location pointed to by P plus Ofs bytes. */
61template<typename Type>
62static inline const Type& StructAtOffset(const void *P, unsigned int offset)
63{ return * reinterpret_cast<const Type*> ((const char *) P + offset); }
64template<typename Type>
65static inline Type& StructAtOffset(void *P, unsigned int offset)
66{ return * reinterpret_cast<Type*> ((char *) P + offset); }
67template<typename Type>
68static inline const Type& StructAtOffsetUnaligned(const void *P, unsigned int offset)
69{
70#pragma GCC diagnostic push
71#pragma GCC diagnostic ignored "-Wcast-align"
72 return * reinterpret_cast<Type*> ((char *) P + offset);
73#pragma GCC diagnostic pop
74}
75template<typename Type>
76static inline Type& StructAtOffsetUnaligned(void *P, unsigned int offset)
77{
78#pragma GCC diagnostic push
79#pragma GCC diagnostic ignored "-Wcast-align"
80 return * reinterpret_cast<Type*> ((char *) P + offset);
81#pragma GCC diagnostic pop
82}
83
84/* StructAfter<T>(X) returns the struct T& that is placed after X.
85 * Works with X of variable size also. X must implement get_size() */
86template<typename Type, typename TObject>
87static inline const Type& StructAfter(const TObject &X)
88{ return StructAtOffset<Type>(&X, X.get_size()); }
89template<typename Type, typename TObject>
90static inline Type& StructAfter(TObject &X)
91{ return StructAtOffset<Type>(&X, X.get_size()); }
92
93
94/*
95 * Size checking
96 */
97
98/* Check _assertion in a method environment */
99#define _DEFINE_INSTANCE_ASSERTION1(_line, _assertion) \
100 void _instance_assertion_on_line_##_line () const \
101 { static_assert ((_assertion), ""); }
102# define _DEFINE_INSTANCE_ASSERTION0(_line, _assertion) _DEFINE_INSTANCE_ASSERTION1 (_line, _assertion)
103# define DEFINE_INSTANCE_ASSERTION(_assertion) _DEFINE_INSTANCE_ASSERTION0 (__LINE__, _assertion)
104
105/* Check that _code compiles in a method environment */
106#define _DEFINE_COMPILES_ASSERTION1(_line, _code) \
107 void _compiles_assertion_on_line_##_line () const \
108 { _code; }
109# define _DEFINE_COMPILES_ASSERTION0(_line, _code) _DEFINE_COMPILES_ASSERTION1 (_line, _code)
110# define DEFINE_COMPILES_ASSERTION(_code) _DEFINE_COMPILES_ASSERTION0 (__LINE__, _code)
111
112
113#define DEFINE_SIZE_STATIC(size) \
114 DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size)) \
115 unsigned int get_size () const { return (size); } \
116 static constexpr unsigned null_size = (size); \
117 static constexpr unsigned min_size = (size); \
118 static constexpr unsigned static_size = (size)
119
120#define DEFINE_SIZE_UNION(size, _member) \
121 DEFINE_COMPILES_ASSERTION ((void) this->u._member.static_size) \
122 DEFINE_INSTANCE_ASSERTION (sizeof(this->u._member) == (size)) \
123 static constexpr unsigned null_size = (size); \
124 static constexpr unsigned min_size = (size)
125
126#define DEFINE_SIZE_MIN(size) \
127 DEFINE_INSTANCE_ASSERTION (sizeof (*this) >= (size)) \
128 static constexpr unsigned null_size = (size); \
129 static constexpr unsigned min_size = (size)
130
131#define DEFINE_SIZE_UNBOUNDED(size) \
132 DEFINE_INSTANCE_ASSERTION (sizeof (*this) >= (size)) \
133 static constexpr unsigned min_size = (size)
134
135#define DEFINE_SIZE_ARRAY(size, array) \
136 DEFINE_COMPILES_ASSERTION ((void) (array)[0].static_size) \
137 DEFINE_INSTANCE_ASSERTION (sizeof (*this) == (size) + VAR * sizeof ((array)[0])) \
138 static constexpr unsigned null_size = (size); \
139 static constexpr unsigned min_size = (size)
140
141#define DEFINE_SIZE_ARRAY_SIZED(size, array) \
142 unsigned int get_size () const { return (size - (array).min_size + (array).get_size ()); } \
143 DEFINE_SIZE_ARRAY(size, array)
144
145
146/*
147 * Dispatch
148 */
149
150template <typename Context, typename Return, unsigned int MaxDebugDepth>
151struct hb_dispatch_context_t
152{
153 static constexpr unsigned max_debug_depth = MaxDebugDepth;
154 typedef Return return_t;
155 template <typename T, typename F>
156 bool may_dispatch (const T *obj HB_UNUSED, const F *format HB_UNUSED) { return true; }
157 static return_t no_dispatch_return_value () { return Context::default_return_value (); }
158 static bool stop_sublookup_iteration (const return_t r HB_UNUSED) { return false; }
159};
160
161
162/*
163 * Sanitize
164 *
165 *
166 * === Introduction ===
167 *
168 * The sanitize machinery is at the core of our zero-cost font loading. We
169 * mmap() font file into memory and create a blob out of it. Font subtables
170 * are returned as a readonly sub-blob of the main font blob. These table
171 * blobs are then sanitized before use, to ensure invalid memory access does
172 * not happen. The toplevel sanitize API use is like, eg. to load the 'head'
173 * table:
174 *
175 * hb_blob_t *head_blob = hb_sanitize_context_t ().reference_table<OT::head> (face);
176 *
177 * The blob then can be converted to a head table struct with:
178 *
179 * const head *head_table = head_blob->as<head> ();
180 *
181 * What the reference_table does is, to call hb_face_reference_table() to load
182 * the table blob, sanitize it and return either the sanitized blob, or empty
183 * blob if sanitization failed. The blob->as() function returns the null
184 * object of its template type argument if the blob is empty. Otherwise, it
185 * just casts the blob contents to the desired type.
186 *
187 * Sanitizing a blob of data with a type T works as follows (with minor
188 * simplification):
189 *
190 * - Cast blob content to T*, call sanitize() method of it,
191 * - If sanitize succeeded, return blob.
192 * - Otherwise, if blob is not writable, try making it writable,
193 * or copy if cannot be made writable in-place,
194 * - Call sanitize() again. Return blob if sanitize succeeded.
195 * - Return empty blob otherwise.
196 *
197 *
198 * === The sanitize() contract ===
199 *
200 * The sanitize() method of each object type shall return true if it's safe to
201 * call other methods of the object, and false otherwise.
202 *
203 * Note that what sanitize() checks for might align with what the specification
204 * describes as valid table data, but does not have to be. In particular, we
205 * do NOT want to be pedantic and concern ourselves with validity checks that
206 * are irrelevant to our use of the table. On the contrary, we want to be
207 * lenient with error handling and accept invalid data to the extent that it
208 * does not impose extra burden on us.
209 *
210 * Based on the sanitize contract, one can see that what we check for depends
211 * on how we use the data in other table methods. Ie. if other table methods
212 * assume that offsets do NOT point out of the table data block, then that's
213 * something sanitize() must check for (GSUB/GPOS/GDEF/etc work this way). On
214 * the other hand, if other methods do such checks themselves, then sanitize()
215 * does not have to bother with them (glyf/local work this way). The choice
216 * depends on the table structure and sanitize() performance. For example, to
217 * check glyf/loca offsets in sanitize() would cost O(num-glyphs). We try hard
218 * to avoid such costs during font loading. By postponing such checks to the
219 * actual glyph loading, we reduce the sanitize cost to O(1) and total runtime
220 * cost to O(used-glyphs). As such, this is preferred.
221 *
222 * The same argument can be made re GSUB/GPOS/GDEF, but there, the table
223 * structure is so complicated that by checking all offsets at sanitize() time,
224 * we make the code much simpler in other methods, as offsets and referenced
225 * objects do not need to be validated at each use site.
226 */
227
228/* This limits sanitizing time on really broken fonts. */
229#ifndef HB_SANITIZE_MAX_EDITS
230#define HB_SANITIZE_MAX_EDITS 32
231#endif
232#ifndef HB_SANITIZE_MAX_OPS_FACTOR
233#define HB_SANITIZE_MAX_OPS_FACTOR 8
234#endif
235#ifndef HB_SANITIZE_MAX_OPS_MIN
236#define HB_SANITIZE_MAX_OPS_MIN 16384
237#endif
238#ifndef HB_SANITIZE_MAX_OPS_MAX
239#define HB_SANITIZE_MAX_OPS_MAX 0x3FFFFFFF
240#endif
241
242struct hb_sanitize_context_t :
243 hb_dispatch_context_t<hb_sanitize_context_t, bool, HB_DEBUG_SANITIZE>
244{
245 hb_sanitize_context_t () :
246 debug_depth (0),
247 start (nullptr), end (nullptr),
248 max_ops (0),
249 writable (false), edit_count (0),
250 blob (nullptr),
251 num_glyphs (65536),
252 num_glyphs_set (false) {}
253
254 const char *get_name () { return "SANITIZE"; }
255 template <typename T, typename F>
256 bool may_dispatch (const T *obj HB_UNUSED, const F *format)
257 { return format->sanitize (this); }
258 template <typename T>
259 return_t dispatch (const T &obj) { return obj.sanitize (this); }
260 static return_t default_return_value () { return true; }
261 static return_t no_dispatch_return_value () { return false; }
262 bool stop_sublookup_iteration (const return_t r) const { return !r; }
263
264 void init (hb_blob_t *b)
265 {
266 this->blob = hb_blob_reference (b);
267 this->writable = false;
268 }
269
270 void set_num_glyphs (unsigned int num_glyphs_)
271 {
272 num_glyphs = num_glyphs_;
273 num_glyphs_set = true;
274 }
275 unsigned int get_num_glyphs () { return num_glyphs; }
276
277 void set_max_ops (int max_ops_) { max_ops = max_ops_; }
278
279 template <typename T>
280 void set_object (const T *obj)
281 {
282 reset_object ();
283
284 if (!obj) return;
285
286 const char *obj_start = (const char *) obj;
287 if (unlikely (obj_start < this->start || this->end <= obj_start))
288 this->start = this->end = nullptr;
289 else
290 {
291 this->start = obj_start;
292 this->end = obj_start + MIN<uintptr_t> (this->end - obj_start, obj->get_size ());
293 }
294 }
295
296 void reset_object ()
297 {
298 this->start = this->blob->data;
299 this->end = this->start + this->blob->length;
300 assert (this->start <= this->end); /* Must not overflow. */
301 }
302
303 void start_processing ()
304 {
305 reset_object ();
306 this->max_ops = MAX ((unsigned int) (this->end - this->start) * HB_SANITIZE_MAX_OPS_FACTOR,
307 (unsigned) HB_SANITIZE_MAX_OPS_MIN);
308 this->edit_count = 0;
309 this->debug_depth = 0;
310
311 DEBUG_MSG_LEVEL (SANITIZE, start, 0, +1,
312 "start [%p..%p] (%lu bytes)",
313 this->start, this->end,
314 (unsigned long) (this->end - this->start));
315 }
316
317 void end_processing ()
318 {
319 DEBUG_MSG_LEVEL (SANITIZE, this->start, 0, -1,
320 "end [%p..%p] %u edit requests",
321 this->start, this->end, this->edit_count);
322
323 hb_blob_destroy (this->blob);
324 this->blob = nullptr;
325 this->start = this->end = nullptr;
326 }
327
328 bool check_range (const void *base,
329 unsigned int len) const
330 {
331 const char *p = (const char *) base;
332 bool ok = this->start <= p &&
333 p <= this->end &&
334 (unsigned int) (this->end - p) >= len &&
335 this->max_ops-- > 0;
336
337 DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0,
338 "check_range [%p..%p] (%d bytes) in [%p..%p] -> %s",
339 p, p + len, len,
340 this->start, this->end,
341 ok ? "OK" : "OUT-OF-RANGE");
342
343 return likely (ok);
344 }
345
346 template <typename T>
347 bool check_range (const T *base,
348 unsigned int a,
349 unsigned int b) const
350 {
351 return !hb_unsigned_mul_overflows (a, b) &&
352 this->check_range (base, a * b);
353 }
354
355 template <typename T>
356 bool check_range (const T *base,
357 unsigned int a,
358 unsigned int b,
359 unsigned int c) const
360 {
361 return !hb_unsigned_mul_overflows (a, b) &&
362 this->check_range (base, a * b, c);
363 }
364
365 template <typename T>
366 bool check_array (const T *base, unsigned int len) const
367 {
368 return this->check_range (base, len, hb_static_size (T));
369 }
370
371 template <typename T>
372 bool check_array (const T *base,
373 unsigned int a,
374 unsigned int b) const
375 {
376 return this->check_range (base, a, b, hb_static_size (T));
377 }
378
379 template <typename Type>
380 bool check_struct (const Type *obj) const
381 { return likely (this->check_range (obj, obj->min_size)); }
382
383 bool may_edit (const void *base, unsigned int len)
384 {
385 if (this->edit_count >= HB_SANITIZE_MAX_EDITS)
386 return false;
387
388 const char *p = (const char *) base;
389 this->edit_count++;
390
391 DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0,
392 "may_edit(%u) [%p..%p] (%d bytes) in [%p..%p] -> %s",
393 this->edit_count,
394 p, p + len, len,
395 this->start, this->end,
396 this->writable ? "GRANTED" : "DENIED");
397
398 return this->writable;
399 }
400
401 template <typename Type, typename ValueType>
402 bool try_set (const Type *obj, const ValueType &v)
403 {
404 if (this->may_edit (obj, hb_static_size (Type)))
405 {
406 hb_assign (* const_cast<Type *> (obj), v);
407 return true;
408 }
409 return false;
410 }
411
412 template <typename Type>
413 hb_blob_t *sanitize_blob (hb_blob_t *blob)
414 {
415 bool sane;
416
417 init (blob);
418
419 retry:
420 DEBUG_MSG_FUNC (SANITIZE, start, "start");
421
422 start_processing ();
423
424 if (unlikely (!start))
425 {
426 end_processing ();
427 return blob;
428 }
429
430 Type *t = CastP<Type> (const_cast<char *> (start));
431
432 sane = t->sanitize (this);
433 if (sane)
434 {
435 if (edit_count)
436 {
437 DEBUG_MSG_FUNC (SANITIZE, start, "passed first round with %d edits; going for second round", edit_count);
438
439 /* sanitize again to ensure no toe-stepping */
440 edit_count = 0;
441 sane = t->sanitize (this);
442 if (edit_count) {
443 DEBUG_MSG_FUNC (SANITIZE, start, "requested %d edits in second round; FAILLING", edit_count);
444 sane = false;
445 }
446 }
447 }
448 else
449 {
450 if (edit_count && !writable) {
451 start = hb_blob_get_data_writable (blob, nullptr);
452 end = start + blob->length;
453
454 if (start)
455 {
456 writable = true;
457 /* ok, we made it writable by relocating. try again */
458 DEBUG_MSG_FUNC (SANITIZE, start, "retry");
459 goto retry;
460 }
461 }
462 }
463
464 end_processing ();
465
466 DEBUG_MSG_FUNC (SANITIZE, start, sane ? "PASSED" : "FAILED");
467 if (sane)
468 {
469 hb_blob_make_immutable (blob);
470 return blob;
471 }
472 else
473 {
474 hb_blob_destroy (blob);
475 return hb_blob_get_empty ();
476 }
477 }
478
479 template <typename Type>
480 hb_blob_t *reference_table (const hb_face_t *face, hb_tag_t tableTag = Type::tableTag)
481 {
482 if (!num_glyphs_set)
483 set_num_glyphs (hb_face_get_glyph_count (face));
484 return sanitize_blob<Type> (hb_face_reference_table (face, tableTag));
485 }
486
487 mutable unsigned int debug_depth;
488 const char *start, *end;
489 mutable int max_ops;
490 private:
491 bool writable;
492 unsigned int edit_count;
493 hb_blob_t *blob;
494 unsigned int num_glyphs;
495 bool num_glyphs_set;
496};
497
498struct hb_sanitize_with_object_t
499{
500 template <typename T>
501 hb_sanitize_with_object_t (hb_sanitize_context_t *c,
502 const T& obj) : c (c)
503 { c->set_object (obj); }
504 ~hb_sanitize_with_object_t ()
505 { c->reset_object (); }
506
507 private:
508 hb_sanitize_context_t *c;
509};
510
511
512/*
513 * Serialize
514 */
515
516struct hb_serialize_context_t
517{
518 hb_serialize_context_t (void *start_, unsigned int size)
519 {
520 this->start = (char *) start_;
521 this->end = this->start + size;
522 reset ();
523 }
524
525 bool in_error () const { return !this->successful; }
526
527 void reset ()
528 {
529 this->successful = true;
530 this->head = this->start;
531 this->debug_depth = 0;
532 }
533
534 bool propagate_error (bool e)
535 { return this->successful = this->successful && e; }
536 template <typename T> bool propagate_error (const T &obj)
537 { return this->successful = this->successful && !obj.in_error (); }
538 template <typename T> bool propagate_error (const T *obj)
539 { return this->successful = this->successful && !obj->in_error (); }
540 template <typename T1, typename T2> bool propagate_error (T1 &o1, T2 &o2)
541 { return propagate_error (o1) && propagate_error (o2); }
542 template <typename T1, typename T2> bool propagate_error (T1 *o1, T2 *o2)
543 { return propagate_error (o1) && propagate_error (o2); }
544 template <typename T1, typename T2, typename T3>
545 bool propagate_error (T1 &o1, T2 &o2, T3 &o3)
546 { return propagate_error (o1) && propagate_error (o2, o3); }
547 template <typename T1, typename T2, typename T3>
548 bool propagate_error (T1 *o1, T2 *o2, T3 *o3)
549 { return propagate_error (o1) && propagate_error (o2, o3); }
550
551 /* To be called around main operation. */
552 template <typename Type>
553 Type *start_serialize ()
554 {
555 DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, +1,
556 "start [%p..%p] (%lu bytes)",
557 this->start, this->end,
558 (unsigned long) (this->end - this->start));
559
560 return start_embed<Type> ();
561 }
562 void end_serialize ()
563 {
564 DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, -1,
565 "end [%p..%p] serialized %d bytes; %s",
566 this->start, this->end,
567 (int) (this->head - this->start),
568 this->successful ? "successful" : "UNSUCCESSFUL");
569 }
570
571 unsigned int length () const { return this->head - this->start; }
572
573 void align (unsigned int alignment)
574 {
575 unsigned int l = length () % alignment;
576 if (l)
577 allocate_size<void> (alignment - l);
578 }
579
580 template <typename Type>
581 Type *start_embed (const Type *_ HB_UNUSED = nullptr) const
582 {
583 Type *ret = reinterpret_cast<Type *> (this->head);
584 return ret;
585 }
586
587 template <typename Type>
588 Type *allocate_size (unsigned int size)
589 {
590 if (unlikely (!this->successful || this->end - this->head < ptrdiff_t (size))) {
591 this->successful = false;
592 return nullptr;
593 }
594 memset (this->head, 0, size);
595 char *ret = this->head;
596 this->head += size;
597 return reinterpret_cast<Type *> (ret);
598 }
599
600 template <typename Type>
601 Type *allocate_min ()
602 {
603 return this->allocate_size<Type> (Type::min_size);
604 }
605
606 template <typename Type>
607 Type *embed (const Type &obj)
608 {
609 unsigned int size = obj.get_size ();
610 Type *ret = this->allocate_size<Type> (size);
611 if (unlikely (!ret)) return nullptr;
612 memcpy (ret, &obj, size);
613 return ret;
614 }
615 template <typename Type>
616 hb_serialize_context_t &operator << (const Type &obj) { embed (obj); return *this; }
617
618 template <typename Type>
619 Type *extend_size (Type &obj, unsigned int size)
620 {
621 assert (this->start <= (char *) &obj);
622 assert ((char *) &obj <= this->head);
623 assert ((char *) &obj + size >= this->head);
624 if (unlikely (!this->allocate_size<Type> (((char *) &obj) + size - this->head))) return nullptr;
625 return reinterpret_cast<Type *> (&obj);
626 }
627
628 template <typename Type>
629 Type *extend_min (Type &obj) { return extend_size (obj, obj.min_size); }
630
631 template <typename Type>
632 Type *extend (Type &obj) { return extend_size (obj, obj.get_size ()); }
633
634 /* Output routines. */
635 template <typename Type>
636 Type *copy () const
637 {
638 assert (this->successful);
639 unsigned int len = this->head - this->start;
640 void *p = malloc (len);
641 if (p)
642 memcpy (p, this->start, len);
643 return reinterpret_cast<Type *> (p);
644 }
645 hb_bytes_t copy_bytes () const
646 {
647 assert (this->successful);
648 unsigned int len = this->head - this->start;
649 void *p = malloc (len);
650 if (p)
651 memcpy (p, this->start, len);
652 else
653 return hb_bytes_t ();
654 return hb_bytes_t ((char *) p, len);
655 }
656 hb_blob_t *copy_blob () const
657 {
658 assert (this->successful);
659 return hb_blob_create (this->start,
660 this->head - this->start,
661 HB_MEMORY_MODE_DUPLICATE,
662 nullptr, nullptr);
663 }
664
665 public:
666 unsigned int debug_depth;
667 char *start, *end, *head;
668 bool successful;
669};
670
671
672
673/*
674 * Big-endian integers.
675 */
676
677template <typename Type, int Bytes> struct BEInt;
678
679template <typename Type>
680struct BEInt<Type, 1>
681{
682 public:
683 void set (Type V) { v = V; }
684 operator Type () const { return v; }
685 private: uint8_t v;
686};
687template <typename Type>
688struct BEInt<Type, 2>
689{
690 public:
691 void set (Type V)
692 {
693 v[0] = (V >> 8) & 0xFF;
694 v[1] = (V ) & 0xFF;
695 }
696 operator Type () const
697 {
698#if ((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__)) && \
699 defined(__BYTE_ORDER) && \
700 (__BYTE_ORDER == __LITTLE_ENDIAN || __BYTE_ORDER == __BIG_ENDIAN)
701 /* Spoon-feed the compiler a big-endian integer with alignment 1.
702 * https://github.com/harfbuzz/harfbuzz/pull/1398 */
703 struct __attribute__((packed)) packed_uint16_t { uint16_t v; };
704#if __BYTE_ORDER == __LITTLE_ENDIAN
705 return __builtin_bswap16 (((packed_uint16_t *) this)->v);
706#else /* __BYTE_ORDER == __BIG_ENDIAN */
707 return ((packed_uint16_t *) this)->v;
708#endif
709#endif
710 return (v[0] << 8)
711 + (v[1] );
712 }
713 private: uint8_t v[2];
714};
715template <typename Type>
716struct BEInt<Type, 3>
717{
718 public:
719 void set (Type V)
720 {
721 v[0] = (V >> 16) & 0xFF;
722 v[1] = (V >> 8) & 0xFF;
723 v[2] = (V ) & 0xFF;
724 }
725 operator Type () const
726 {
727 return (v[0] << 16)
728 + (v[1] << 8)
729 + (v[2] );
730 }
731 private: uint8_t v[3];
732};
733template <typename Type>
734struct BEInt<Type, 4>
735{
736 public:
737 typedef Type type;
738 void set (Type V)
739 {
740 v[0] = (V >> 24) & 0xFF;
741 v[1] = (V >> 16) & 0xFF;
742 v[2] = (V >> 8) & 0xFF;
743 v[3] = (V ) & 0xFF;
744 }
745 operator Type () const
746 {
747 return (v[0] << 24)
748 + (v[1] << 16)
749 + (v[2] << 8)
750 + (v[3] );
751 }
752 private: uint8_t v[4];
753};
754
755
756/*
757 * Lazy loaders.
758 */
759
760template <typename Data, unsigned int WheresData>
761struct hb_data_wrapper_t
762{
763 static_assert (WheresData > 0, "");
764
765 Data * get_data () const
766 { return *(((Data **) (void *) this) - WheresData); }
767
768 bool is_inert () const { return !get_data (); }
769
770 template <typename Stored, typename Subclass>
771 Stored * call_create () const { return Subclass::create (get_data ()); }
772};
773template <>
774struct hb_data_wrapper_t<void, 0>
775{
776 bool is_inert () const { return false; }
777
778 template <typename Stored, typename Funcs>
779 Stored * call_create () const { return Funcs::create (); }
780};
781
782template <typename T1, typename T2> struct hb_non_void_t { typedef T1 value; };
783template <typename T2> struct hb_non_void_t<void, T2> { typedef T2 value; };
784
785template <typename Returned,
786 typename Subclass = void,
787 typename Data = void,
788 unsigned int WheresData = 0,
789 typename Stored = Returned>
790struct hb_lazy_loader_t : hb_data_wrapper_t<Data, WheresData>
791{
792 typedef typename hb_non_void_t<Subclass,
793 hb_lazy_loader_t<Returned,Subclass,Data,WheresData,Stored>
794 >::value Funcs;
795
796 void init0 () {} /* Init, when memory is already set to 0. No-op for us. */
797 void init () { instance.set_relaxed (nullptr); }
798 void fini () { do_destroy (instance.get ()); }
799
800 void free_instance ()
801 {
802 retry:
803 Stored *p = instance.get ();
804 if (unlikely (p && !cmpexch (p, nullptr)))
805 goto retry;
806 do_destroy (p);
807 }
808
809 static void do_destroy (Stored *p)
810 {
811 if (p && p != const_cast<Stored *> (Funcs::get_null ()))
812 Funcs::destroy (p);
813 }
814
815 const Returned * operator -> () const { return get (); }
816 const Returned & operator * () const { return *get (); }
817 explicit_operator bool () const
818 { return get_stored () != Funcs::get_null (); }
819 template <typename C> operator const C * () const { return get (); }
820
821 Stored * get_stored () const
822 {
823 retry:
824 Stored *p = this->instance.get ();
825 if (unlikely (!p))
826 {
827 if (unlikely (this->is_inert ()))
828 return const_cast<Stored *> (Funcs::get_null ());
829
830 p = this->template call_create<Stored, Funcs> ();
831 if (unlikely (!p))
832 p = const_cast<Stored *> (Funcs::get_null ());
833
834 if (unlikely (!cmpexch (nullptr, p)))
835 {
836 do_destroy (p);
837 goto retry;
838 }
839 }
840 return p;
841 }
842 Stored * get_stored_relaxed () const
843 {
844 return this->instance.get_relaxed ();
845 }
846
847 bool cmpexch (Stored *current, Stored *value) const
848 {
849 /* This *must* be called when there are no other threads accessing. */
850 return this->instance.cmpexch (current, value);
851 }
852
853 const Returned * get () const { return Funcs::convert (get_stored ()); }
854 const Returned * get_relaxed () const { return Funcs::convert (get_stored_relaxed ()); }
855 Returned * get_unconst () const { return const_cast<Returned *> (Funcs::convert (get_stored ())); }
856
857 /* To be possibly overloaded by subclasses. */
858 static Returned* convert (Stored *p) { return p; }
859
860 /* By default null/init/fini the object. */
861 static const Stored* get_null () { return &Null(Stored); }
862 static Stored *create (Data *data)
863 {
864 Stored *p = (Stored *) calloc (1, sizeof (Stored));
865 if (likely (p))
866 p->init (data);
867 return p;
868 }
869 static Stored *create ()
870 {
871 Stored *p = (Stored *) calloc (1, sizeof (Stored));
872 if (likely (p))
873 p->init ();
874 return p;
875 }
876 static void destroy (Stored *p)
877 {
878 p->fini ();
879 free (p);
880 }
881
882// private:
883 /* Must only have one pointer. */
884 hb_atomic_ptr_t<Stored *> instance;
885};
886
887/* Specializations. */
888
889template <typename T, unsigned int WheresFace>
890struct hb_face_lazy_loader_t : hb_lazy_loader_t<T,
891 hb_face_lazy_loader_t<T, WheresFace>,
892 hb_face_t, WheresFace> {};
893
894template <typename T, unsigned int WheresFace>
895struct hb_table_lazy_loader_t : hb_lazy_loader_t<T,
896 hb_table_lazy_loader_t<T, WheresFace>,
897 hb_face_t, WheresFace,
898 hb_blob_t>
899{
900 static hb_blob_t *create (hb_face_t *face)
901 { return hb_sanitize_context_t ().reference_table<T> (face); }
902 static void destroy (hb_blob_t *p) { hb_blob_destroy (p); }
903
904 static const hb_blob_t *get_null ()
905 { return hb_blob_get_empty (); }
906
907 static const T* convert (const hb_blob_t *blob)
908 { return blob->as<T> (); }
909
910 hb_blob_t* get_blob () const { return this->get_stored (); }
911};
912
913template <typename Subclass>
914struct hb_font_funcs_lazy_loader_t : hb_lazy_loader_t<hb_font_funcs_t, Subclass>
915{
916 static void destroy (hb_font_funcs_t *p)
917 { hb_font_funcs_destroy (p); }
918 static const hb_font_funcs_t *get_null ()
919 { return hb_font_funcs_get_empty (); }
920};
921template <typename Subclass>
922struct hb_unicode_funcs_lazy_loader_t : hb_lazy_loader_t<hb_unicode_funcs_t, Subclass>
923{
924 static void destroy (hb_unicode_funcs_t *p)
925 { hb_unicode_funcs_destroy (p); }
926 static const hb_unicode_funcs_t *get_null ()
927 { return hb_unicode_funcs_get_empty (); }
928};
929
930
931#endif /* HB_MACHINERY_HH */
932