1// SPDX-License-Identifier: MIT OR MPL-2.0 OR LGPL-2.1-or-later OR GPL-2.0-or-later
2// Copyright 2010, SIL International, All rights reserved.
3
4#include "inc/Main.h"
5#include "inc/debug.h"
6#include "inc/Endian.h"
7#include "inc/Pass.h"
8#include <cstring>
9#include <cstdlib>
10#include <cassert>
11#include <cmath>
12#include "inc/Segment.h"
13#include "inc/Code.h"
14#include "inc/Rule.h"
15#include "inc/Error.h"
16#include "inc/Collider.h"
17
18using namespace graphite2;
19using vm::Machine;
20typedef Machine::Code Code;
21
22enum KernCollison
23{
24 None = 0,
25 CrossSpace = 1,
26 InWord = 2,
27 reserved = 3
28};
29
30Pass::Pass()
31: m_silf(0),
32 m_cols(0),
33 m_rules(0),
34 m_ruleMap(0),
35 m_startStates(0),
36 m_transitions(0),
37 m_states(0),
38 m_codes(0),
39 m_progs(0),
40 m_numCollRuns(0),
41 m_kernColls(0),
42 m_iMaxLoop(0),
43 m_numGlyphs(0),
44 m_numRules(0),
45 m_numStates(0),
46 m_numTransition(0),
47 m_numSuccess(0),
48 m_successStart(0),
49 m_numColumns(0),
50 m_minPreCtxt(0),
51 m_maxPreCtxt(0),
52 m_colThreshold(0),
53 m_isReverseDir(false)
54{
55}
56
57Pass::~Pass()
58{
59 free(m_cols);
60 free(m_startStates);
61 free(m_transitions);
62 free(m_states);
63 free(m_ruleMap);
64
65 if (m_rules) delete [] m_rules;
66 if (m_codes) delete [] m_codes;
67 free(m_progs);
68}
69
70bool Pass::readPass(const byte * const pass_start, size_t pass_length, size_t subtable_base,
71 GR_MAYBE_UNUSED Face & face, passtype pt, GR_MAYBE_UNUSED uint32 version, Error &e)
72{
73 const byte * p = pass_start,
74 * const pass_end = p + pass_length;
75 size_t numRanges;
76
77 if (e.test(pass_length < 40, E_BADPASSLENGTH)) return face.error(e);
78 // Read in basic values
79 const byte flags = be::read<byte>(p);
80 if (e.test((flags & 0x1f) &&
81 (pt < PASS_TYPE_POSITIONING || !m_silf->aCollision() || !face.glyphs().hasBoxes() || !(m_silf->flags() & 0x20)),
82 E_BADCOLLISIONPASS))
83 return face.error(e);
84 m_numCollRuns = flags & 0x7;
85 m_kernColls = (flags >> 3) & 0x3;
86 m_isReverseDir = (flags >> 5) & 0x1;
87 m_iMaxLoop = be::read<byte>(p);
88 if (m_iMaxLoop < 1) m_iMaxLoop = 1;
89 be::skip<byte>(p,2); // skip maxContext & maxBackup
90 m_numRules = be::read<uint16>(p);
91 if (e.test(!m_numRules && m_numCollRuns == 0, E_BADEMPTYPASS)) return face.error(e);
92 be::skip<uint16>(p); // fsmOffset - not sure why we would want this
93 const byte * const pcCode = pass_start + be::read<uint32>(p) - subtable_base,
94 * const rcCode = pass_start + be::read<uint32>(p) - subtable_base,
95 * const aCode = pass_start + be::read<uint32>(p) - subtable_base;
96 be::skip<uint32>(p);
97 m_numStates = be::read<uint16>(p);
98 m_numTransition = be::read<uint16>(p);
99 m_numSuccess = be::read<uint16>(p);
100 m_numColumns = be::read<uint16>(p);
101 numRanges = be::read<uint16>(p);
102 be::skip<uint16>(p, 3); // skip searchRange, entrySelector & rangeShift.
103 assert(p - pass_start == 40);
104 // Perform some sanity checks.
105 if ( e.test(m_numTransition > m_numStates, E_BADNUMTRANS)
106 || e.test(m_numSuccess > m_numStates, E_BADNUMSUCCESS)
107 || e.test(m_numSuccess + m_numTransition < m_numStates, E_BADNUMSTATES)
108 || e.test(m_numRules && numRanges == 0, E_NORANGES)
109 || e.test(m_numColumns > 0x7FFF, E_BADNUMCOLUMNS))
110 return face.error(e);
111
112 m_successStart = m_numStates - m_numSuccess;
113 // test for beyond end - 1 to account for reading uint16
114 if (e.test(p + numRanges * 6 - 2 > pass_end, E_BADPASSLENGTH)) return face.error(e);
115 m_numGlyphs = be::peek<uint16>(p + numRanges * 6 - 4) + 1;
116 // Calculate the start of various arrays.
117 const byte * const ranges = p;
118 be::skip<uint16>(p, numRanges*3);
119 const byte * const o_rule_map = p;
120 be::skip<uint16>(p, m_numSuccess + 1);
121
122 // More sanity checks
123 if (e.test(reinterpret_cast<const byte *>(o_rule_map + m_numSuccess*sizeof(uint16)) > pass_end
124 || p > pass_end, E_BADRULEMAPLEN))
125 return face.error(e);
126 const size_t numEntries = be::peek<uint16>(o_rule_map + m_numSuccess*sizeof(uint16));
127 const byte * const rule_map = p;
128 be::skip<uint16>(p, numEntries);
129
130 if (e.test(p + 2*sizeof(uint8) > pass_end, E_BADPASSLENGTH)) return face.error(e);
131 m_minPreCtxt = be::read<uint8>(p);
132 m_maxPreCtxt = be::read<uint8>(p);
133 if (e.test(m_minPreCtxt > m_maxPreCtxt, E_BADCTXTLENBOUNDS)) return face.error(e);
134 const byte * const start_states = p;
135 be::skip<int16>(p, m_maxPreCtxt - m_minPreCtxt + 1);
136 const uint16 * const sort_keys = reinterpret_cast<const uint16 *>(p);
137 be::skip<uint16>(p, m_numRules);
138 const byte * const precontext = p;
139 be::skip<byte>(p, m_numRules);
140
141 if (e.test(p + sizeof(uint16) + sizeof(uint8) > pass_end, E_BADCTXTLENS)) return face.error(e);
142 m_colThreshold = be::read<uint8>(p);
143 if (m_colThreshold == 0) m_colThreshold = 10; // A default
144 const size_t pass_constraint_len = be::read<uint16>(p);
145
146 const uint16 * const o_constraint = reinterpret_cast<const uint16 *>(p);
147 be::skip<uint16>(p, m_numRules + 1);
148 const uint16 * const o_actions = reinterpret_cast<const uint16 *>(p);
149 be::skip<uint16>(p, m_numRules + 1);
150 const byte * const states = p;
151 if (e.test(2u*m_numTransition*m_numColumns >= (unsigned)(pass_end - p), E_BADPASSLENGTH)
152 || e.test(p >= pass_end, E_BADPASSLENGTH))
153 return face.error(e);
154 be::skip<int16>(p, m_numTransition*m_numColumns);
155 be::skip<uint8>(p);
156 if (e.test(p != pcCode, E_BADPASSCCODEPTR)) return face.error(e);
157 be::skip<byte>(p, pass_constraint_len);
158 if (e.test(p != rcCode, E_BADRULECCODEPTR)
159 || e.test(size_t(rcCode - pcCode) != pass_constraint_len, E_BADCCODELEN)) return face.error(e);
160 be::skip<byte>(p, be::peek<uint16>(o_constraint + m_numRules));
161 if (e.test(p != aCode, E_BADACTIONCODEPTR)) return face.error(e);
162 be::skip<byte>(p, be::peek<uint16>(o_actions + m_numRules));
163
164 // We should be at the end or within the pass
165 if (e.test(p > pass_end, E_BADPASSLENGTH)) return face.error(e);
166
167 // Load the pass constraint if there is one.
168 if (pass_constraint_len)
169 {
170 face.error_context(face.error_context() + 1);
171 m_cPConstraint = vm::Machine::Code(true, pcCode, pcCode + pass_constraint_len,
172 precontext[0], be::peek<uint16>(sort_keys), *m_silf, face, PASS_TYPE_UNKNOWN);
173 if (e.test(!m_cPConstraint, E_OUTOFMEM)
174 || e.test(m_cPConstraint.status() != Code::loaded, int(m_cPConstraint.status()) + E_CODEFAILURE))
175 return face.error(e);
176 face.error_context(face.error_context() - 1);
177 }
178 if (m_numRules)
179 {
180 if (!readRanges(ranges, numRanges, e)) return face.error(e);
181 if (!readRules(rule_map, numEntries, precontext, sort_keys,
182 o_constraint, rcCode, o_actions, aCode, face, pt, e)) return false;
183 }
184#ifdef GRAPHITE2_TELEMETRY
185 telemetry::category _states_cat(face.tele.states);
186#endif
187 return m_numRules ? readStates(start_states, states, o_rule_map, face, e) : true;
188}
189
190
191bool Pass::readRules(const byte * rule_map, const size_t num_entries,
192 const byte *precontext, const uint16 * sort_key,
193 const uint16 * o_constraint, const byte *rc_data,
194 const uint16 * o_action, const byte * ac_data,
195 Face & face, passtype pt, Error &e)
196{
197 const byte * const ac_data_end = ac_data + be::peek<uint16>(o_action + m_numRules);
198 const byte * const rc_data_end = rc_data + be::peek<uint16>(o_constraint + m_numRules);
199
200 precontext += m_numRules;
201 sort_key += m_numRules;
202 o_constraint += m_numRules;
203 o_action += m_numRules;
204
205 // Load rules.
206 const byte * ac_begin = 0, * rc_begin = 0,
207 * ac_end = ac_data + be::peek<uint16>(o_action),
208 * rc_end = rc_data + be::peek<uint16>(o_constraint);
209
210 // Allocate pools
211 m_rules = new Rule [m_numRules];
212 m_codes = new Code [m_numRules*2];
213 int totalSlots = 0;
214 const uint16 *tsort = sort_key;
215 for (int i = 0; i < m_numRules; ++i)
216 totalSlots += be::peek<uint16>(--tsort);
217 const size_t prog_pool_sz = vm::Machine::Code::estimateCodeDataOut(ac_end - ac_data + rc_end - rc_data, 2 * m_numRules, totalSlots);
218 m_progs = gralloc<byte>(prog_pool_sz);
219 byte * prog_pool_free = m_progs,
220 * prog_pool_end = m_progs + prog_pool_sz;
221 if (e.test(!(m_rules && m_codes && m_progs), E_OUTOFMEM)) return face.error(e);
222
223 Rule * r = m_rules + m_numRules - 1;
224 for (size_t n = m_numRules; r >= m_rules; --n, --r, ac_end = ac_begin, rc_end = rc_begin)
225 {
226 face.error_context((face.error_context() & 0xFFFF00) + EC_ARULE + int((n - 1) << 24));
227 r->preContext = *--precontext;
228 r->sort = be::peek<uint16>(--sort_key);
229#ifndef NDEBUG
230 r->rule_idx = uint16(n - 1);
231#endif
232 if (r->sort > 63 || r->preContext >= r->sort || r->preContext > m_maxPreCtxt || r->preContext < m_minPreCtxt)
233 return false;
234 ac_begin = ac_data + be::peek<uint16>(--o_action);
235 --o_constraint;
236 rc_begin = be::peek<uint16>(o_constraint) ? rc_data + be::peek<uint16>(o_constraint) : rc_end;
237
238 if (ac_begin > ac_end || ac_begin > ac_data_end || ac_end > ac_data_end
239 || rc_begin > rc_end || rc_begin > rc_data_end || rc_end > rc_data_end
240 || vm::Machine::Code::estimateCodeDataOut(ac_end - ac_begin + rc_end - rc_begin, 2, r->sort) > size_t(prog_pool_end - prog_pool_free))
241 return false;
242 r->action = new (m_codes+n*2-2) vm::Machine::Code(false, ac_begin, ac_end, r->preContext, r->sort, *m_silf, face, pt, &prog_pool_free);
243 r->constraint = new (m_codes+n*2-1) vm::Machine::Code(true, rc_begin, rc_end, r->preContext, r->sort, *m_silf, face, pt, &prog_pool_free);
244
245 if (e.test(!r->action || !r->constraint, E_OUTOFMEM)
246 || e.test(r->action->status() != Code::loaded, int(r->action->status()) + E_CODEFAILURE)
247 || e.test(r->constraint->status() != Code::loaded, int(r->constraint->status()) + E_CODEFAILURE)
248 || e.test(!r->constraint->immutable(), E_MUTABLECCODE))
249 return face.error(e);
250 }
251
252 byte * const moved_progs = prog_pool_free > m_progs ? static_cast<byte *>(realloc(m_progs, prog_pool_free - m_progs)) : 0;
253 if (e.test(!moved_progs, E_OUTOFMEM))
254 {
255 free(m_progs);
256 m_progs = 0;
257 return face.error(e);
258 }
259
260 if (moved_progs != m_progs)
261 {
262 for (Code * c = m_codes, * const ce = c + m_numRules*2; c != ce; ++c)
263 {
264 c->externalProgramMoved(moved_progs - m_progs);
265 }
266 m_progs = moved_progs;
267 }
268
269 // Load the rule entries map
270 face.error_context((face.error_context() & 0xFFFF00) + EC_APASS);
271 //TODO: Coverity: 1315804: FORWARD_NULL
272 RuleEntry * re = m_ruleMap = gralloc<RuleEntry>(num_entries);
273 if (e.test(!re, E_OUTOFMEM)) return face.error(e);
274 for (size_t n = num_entries; n; --n, ++re)
275 {
276 const ptrdiff_t rn = be::read<uint16>(rule_map);
277 if (e.test(rn >= m_numRules, E_BADRULENUM)) return face.error(e);
278 re->rule = m_rules + rn;
279 }
280
281 return true;
282}
283
284static int cmpRuleEntry(const void *a, const void *b) { return (*(RuleEntry *)a < *(RuleEntry *)b ? -1 :
285 (*(RuleEntry *)b < *(RuleEntry *)a ? 1 : 0)); }
286
287bool Pass::readStates(const byte * starts, const byte *states, const byte * o_rule_map, GR_MAYBE_UNUSED Face & face, Error &e)
288{
289#ifdef GRAPHITE2_TELEMETRY
290 telemetry::category _states_cat(face.tele.starts);
291#endif
292 m_startStates = gralloc<uint16>(m_maxPreCtxt - m_minPreCtxt + 1);
293#ifdef GRAPHITE2_TELEMETRY
294 telemetry::set_category(face.tele.states);
295#endif
296 m_states = gralloc<State>(m_numStates);
297#ifdef GRAPHITE2_TELEMETRY
298 telemetry::set_category(face.tele.transitions);
299#endif
300 m_transitions = gralloc<uint16>(m_numTransition * m_numColumns);
301
302 if (e.test(!m_startStates || !m_states || !m_transitions, E_OUTOFMEM)) return face.error(e);
303 // load start states
304 for (uint16 * s = m_startStates,
305 * const s_end = s + m_maxPreCtxt - m_minPreCtxt + 1; s != s_end; ++s)
306 {
307 *s = be::read<uint16>(starts);
308 if (e.test(*s >= m_numStates, E_BADSTATE))
309 {
310 face.error_context((face.error_context() & 0xFFFF00) + EC_ASTARTS + int((s - m_startStates) << 24));
311 return face.error(e); // true;
312 }
313 }
314
315 // load state transition table.
316 for (uint16 * t = m_transitions,
317 * const t_end = t + m_numTransition*m_numColumns; t != t_end; ++t)
318 {
319 *t = be::read<uint16>(states);
320 if (e.test(*t >= m_numStates, E_BADSTATE))
321 {
322 face.error_context((face.error_context() & 0xFFFF00) + EC_ATRANS + int(((t - m_transitions) / m_numColumns) << 8));
323 return face.error(e);
324 }
325 }
326
327 State * s = m_states,
328 * const success_begin = m_states + m_numStates - m_numSuccess;
329 const RuleEntry * rule_map_end = m_ruleMap + be::peek<uint16>(o_rule_map + m_numSuccess*sizeof(uint16));
330 for (size_t n = m_numStates; n; --n, ++s)
331 {
332 RuleEntry * const begin = s < success_begin ? 0 : m_ruleMap + be::read<uint16>(o_rule_map),
333 * const end = s < success_begin ? 0 : m_ruleMap + be::peek<uint16>(o_rule_map);
334
335 if (e.test(begin >= rule_map_end || end > rule_map_end || begin > end, E_BADRULEMAPPING))
336 {
337 face.error_context((face.error_context() & 0xFFFF00) + EC_ARULEMAP + int(n << 24));
338 return face.error(e);
339 }
340 s->rules = begin;
341 s->rules_end = (end - begin <= FiniteStateMachine::MAX_RULES)? end :
342 begin + FiniteStateMachine::MAX_RULES;
343 if (begin) // keep UBSan happy can't call qsort with null begin
344 qsort(begin, end - begin, sizeof(RuleEntry), &cmpRuleEntry);
345 }
346
347 return true;
348}
349
350bool Pass::readRanges(const byte * ranges, size_t num_ranges, Error &e)
351{
352 m_cols = gralloc<uint16>(m_numGlyphs);
353 if (e.test(!m_cols, E_OUTOFMEM)) return false;
354 memset(m_cols, 0xFF, m_numGlyphs * sizeof(uint16));
355 for (size_t n = num_ranges; n; --n)
356 {
357 uint16 * ci = m_cols + be::read<uint16>(ranges),
358 * ci_end = m_cols + be::read<uint16>(ranges) + 1,
359 col = be::read<uint16>(ranges);
360
361 if (e.test(ci >= ci_end || ci_end > m_cols+m_numGlyphs || col >= m_numColumns, E_BADRANGE))
362 return false;
363
364 // A glyph must only belong to one column at a time
365 while (ci != ci_end && *ci == 0xffff)
366 *ci++ = col;
367
368 if (e.test(ci != ci_end, E_BADRANGE))
369 return false;
370 }
371 return true;
372}
373
374
375bool Pass::runGraphite(vm::Machine & m, FiniteStateMachine & fsm, bool reverse) const
376{
377 Slot *s = m.slotMap().segment.first();
378 if (!s || !testPassConstraint(m)) return true;
379 if (reverse)
380 {
381 m.slotMap().segment.reverseSlots();
382 s = m.slotMap().segment.first();
383 }
384 if (m_numRules)
385 {
386 Slot *currHigh = s->next();
387
388#if !defined GRAPHITE2_NTRACING
389 if (fsm.dbgout) *fsm.dbgout << "rules" << json::array;
390 json::closer rules_array_closer(fsm.dbgout);
391#endif
392
393 m.slotMap().highwater(currHigh);
394 int lc = m_iMaxLoop;
395 do
396 {
397 findNDoRule(s, m, fsm);
398 if (m.status() != Machine::finished) return false;
399 if (s && (s == m.slotMap().highwater() || m.slotMap().highpassed() || --lc == 0)) {
400 if (!lc)
401 s = m.slotMap().highwater();
402 lc = m_iMaxLoop;
403 if (s)
404 m.slotMap().highwater(s->next());
405 }
406 } while (s);
407 }
408 //TODO: Use enums for flags
409 const bool collisions = m_numCollRuns || m_kernColls;
410
411 if (!collisions || !m.slotMap().segment.hasCollisionInfo())
412 return true;
413
414 if (m_numCollRuns)
415 {
416 if (!(m.slotMap().segment.flags() & Segment::SEG_INITCOLLISIONS))
417 {
418 m.slotMap().segment.positionSlots(0, 0, 0, m.slotMap().dir(), true);
419// m.slotMap().segment.flags(m.slotMap().segment.flags() | Segment::SEG_INITCOLLISIONS);
420 }
421 if (!collisionShift(&m.slotMap().segment, m.slotMap().dir(), fsm.dbgout))
422 return false;
423 }
424 if ((m_kernColls) && !collisionKern(&m.slotMap().segment, m.slotMap().dir(), fsm.dbgout))
425 return false;
426 if (collisions && !collisionFinish(&m.slotMap().segment, fsm.dbgout))
427 return false;
428 return true;
429}
430
431bool Pass::runFSM(FiniteStateMachine& fsm, Slot * slot) const
432{
433 fsm.reset(slot, m_maxPreCtxt);
434 if (fsm.slots.context() < m_minPreCtxt)
435 return false;
436
437 uint16 state = m_startStates[m_maxPreCtxt - fsm.slots.context()];
438 uint8 free_slots = SlotMap::MAX_SLOTS;
439 do
440 {
441 fsm.slots.pushSlot(slot);
442 if (slot->gid() >= m_numGlyphs
443 || m_cols[slot->gid()] == 0xffffU
444 || --free_slots == 0
445 || state >= m_numTransition)
446 return free_slots != 0;
447
448 const uint16 * transitions = m_transitions + state*m_numColumns;
449 state = transitions[m_cols[slot->gid()]];
450 if (state >= m_successStart)
451 fsm.rules.accumulate_rules(m_states[state]);
452
453 slot = slot->next();
454 } while (state != 0 && slot);
455
456 fsm.slots.pushSlot(slot);
457 return true;
458}
459
460#if !defined GRAPHITE2_NTRACING
461
462inline
463Slot * input_slot(const SlotMap & slots, const int n)
464{
465 Slot * s = slots[slots.context() + n];
466 if (!s->isCopied()) return s;
467
468 return s->prev() ? s->prev()->next() : (s->next() ? s->next()->prev() : slots.segment.last());
469}
470
471inline
472Slot * output_slot(const SlotMap & slots, const int n)
473{
474 Slot * s = slots[slots.context() + n - 1];
475 return s ? s->next() : slots.segment.first();
476}
477
478#endif //!defined GRAPHITE2_NTRACING
479
480void Pass::findNDoRule(Slot * & slot, Machine &m, FiniteStateMachine & fsm) const
481{
482 assert(slot);
483
484 if (runFSM(fsm, slot))
485 {
486 // Search for the first rule which passes the constraint
487 const RuleEntry * r = fsm.rules.begin(),
488 * const re = fsm.rules.end();
489 while (r != re && !testConstraint(*r->rule, m))
490 {
491 ++r;
492 if (m.status() != Machine::finished)
493 return;
494 }
495
496#if !defined GRAPHITE2_NTRACING
497 if (fsm.dbgout)
498 {
499 if (fsm.rules.size() != 0)
500 {
501 *fsm.dbgout << json::item << json::object;
502 dumpRuleEventConsidered(fsm, *r);
503 if (r != re)
504 {
505 const int adv = doAction(r->rule->action, slot, m);
506 dumpRuleEventOutput(fsm, *r->rule, slot);
507 if (r->rule->action->deletes()) fsm.slots.collectGarbage(slot);
508 adjustSlot(adv, slot, fsm.slots);
509 *fsm.dbgout << "cursor" << objectid(dslot(&fsm.slots.segment, slot))
510 << json::close; // Close RuelEvent object
511
512 return;
513 }
514 else
515 {
516 *fsm.dbgout << json::close // close "considered" array
517 << "output" << json::null
518 << "cursor" << objectid(dslot(&fsm.slots.segment, slot->next()))
519 << json::close;
520 }
521 }
522 }
523 else
524#endif
525 {
526 if (r != re)
527 {
528 const int adv = doAction(r->rule->action, slot, m);
529 if (m.status() != Machine::finished) return;
530 if (r->rule->action->deletes()) fsm.slots.collectGarbage(slot);
531 adjustSlot(adv, slot, fsm.slots);
532 return;
533 }
534 }
535 }
536
537 slot = slot->next();
538 return;
539}
540
541#if !defined GRAPHITE2_NTRACING
542
543void Pass::dumpRuleEventConsidered(const FiniteStateMachine & fsm, const RuleEntry & re) const
544{
545 *fsm.dbgout << "considered" << json::array;
546 for (const RuleEntry *r = fsm.rules.begin(); r != &re; ++r)
547 {
548 if (r->rule->preContext > fsm.slots.context())
549 continue;
550 *fsm.dbgout << json::flat << json::object
551 << "id" << r->rule - m_rules
552 << "failed" << true
553 << "input" << json::flat << json::object
554 << "start" << objectid(dslot(&fsm.slots.segment, input_slot(fsm.slots, -r->rule->preContext)))
555 << "length" << r->rule->sort
556 << json::close // close "input"
557 << json::close; // close Rule object
558 }
559}
560
561
562void Pass::dumpRuleEventOutput(const FiniteStateMachine & fsm, const Rule & r, Slot * const last_slot) const
563{
564 *fsm.dbgout << json::item << json::flat << json::object
565 << "id" << &r - m_rules
566 << "failed" << false
567 << "input" << json::flat << json::object
568 << "start" << objectid(dslot(&fsm.slots.segment, input_slot(fsm.slots, 0)))
569 << "length" << r.sort - r.preContext
570 << json::close // close "input"
571 << json::close // close Rule object
572 << json::close // close considered array
573 << "output" << json::object
574 << "range" << json::flat << json::object
575 << "start" << objectid(dslot(&fsm.slots.segment, input_slot(fsm.slots, 0)))
576 << "end" << objectid(dslot(&fsm.slots.segment, last_slot))
577 << json::close // close "input"
578 << "slots" << json::array;
579 const Position rsb_prepos = last_slot ? last_slot->origin() : fsm.slots.segment.advance();
580 fsm.slots.segment.positionSlots(0, 0, 0, fsm.slots.segment.currdir());
581
582 for(Slot * slot = output_slot(fsm.slots, 0); slot != last_slot; slot = slot->next())
583 *fsm.dbgout << dslot(&fsm.slots.segment, slot);
584 *fsm.dbgout << json::close // close "slots"
585 << "postshift" << (last_slot ? last_slot->origin() : fsm.slots.segment.advance()) - rsb_prepos
586 << json::close; // close "output" object
587
588}
589
590#endif
591
592
593inline
594bool Pass::testPassConstraint(Machine & m) const
595{
596 if (!m_cPConstraint) return true;
597
598 assert(m_cPConstraint.constraint());
599
600 m.slotMap().reset(*m.slotMap().segment.first(), 0);
601 m.slotMap().pushSlot(m.slotMap().segment.first());
602 vm::slotref * map = m.slotMap().begin();
603 const uint32 ret = m_cPConstraint.run(m, map);
604
605#if !defined GRAPHITE2_NTRACING
606 json * const dbgout = m.slotMap().segment.getFace()->logger();
607 if (dbgout)
608 *dbgout << "constraint" << (ret && m.status() == Machine::finished);
609#endif
610
611 return ret && m.status() == Machine::finished;
612}
613
614
615bool Pass::testConstraint(const Rule & r, Machine & m) const
616{
617 const uint16 curr_context = m.slotMap().context();
618 if (unsigned(r.sort + curr_context - r.preContext) > m.slotMap().size()
619 || curr_context - r.preContext < 0) return false;
620
621 vm::slotref * map = m.slotMap().begin() + curr_context - r.preContext;
622 if (map[r.sort - 1] == 0)
623 return false;
624
625 if (!*r.constraint) return true;
626 assert(r.constraint->constraint());
627 for (int n = r.sort; n && map; --n, ++map)
628 {
629 if (!*map) continue;
630 const int32 ret = r.constraint->run(m, map);
631 if (!ret || m.status() != Machine::finished)
632 return false;
633 }
634
635 return true;
636}
637
638
639void SlotMap::collectGarbage(Slot * &aSlot)
640{
641 for(Slot **s = begin(), *const *const se = end() - 1; s != se; ++s) {
642 Slot *& slot = *s;
643 if(slot && (slot->isDeleted() || slot->isCopied()))
644 {
645 if (slot == aSlot)
646 aSlot = slot->prev() ? slot->prev() : slot->next();
647 segment.freeSlot(slot);
648 }
649 }
650}
651
652
653
654int Pass::doAction(const Code *codeptr, Slot * & slot_out, vm::Machine & m) const
655{
656 assert(codeptr);
657 if (!*codeptr) return 0;
658 SlotMap & smap = m.slotMap();
659 vm::slotref * map = &smap[smap.context()];
660 smap.highpassed(false);
661
662 int32 ret = codeptr->run(m, map);
663
664 if (m.status() != Machine::finished)
665 {
666 slot_out = NULL;
667 smap.highwater(0);
668 return 0;
669 }
670
671 slot_out = *map;
672 return ret;
673}
674
675
676void Pass::adjustSlot(int delta, Slot * & slot_out, SlotMap & smap) const
677{
678 if (!slot_out)
679 {
680 if (smap.highpassed() || slot_out == smap.highwater())
681 {
682 slot_out = smap.segment.last();
683 ++delta;
684 if (!smap.highwater() || smap.highwater() == slot_out)
685 smap.highpassed(false);
686 }
687 else
688 {
689 slot_out = smap.segment.first();
690 --delta;
691 }
692 }
693 if (delta < 0)
694 {
695 while (++delta <= 0 && slot_out)
696 {
697 slot_out = slot_out->prev();
698 if (smap.highpassed() && smap.highwater() == slot_out)
699 smap.highpassed(false);
700 }
701 }
702 else if (delta > 0)
703 {
704 while (--delta >= 0 && slot_out)
705 {
706 if (slot_out == smap.highwater() && slot_out)
707 smap.highpassed(true);
708 slot_out = slot_out->next();
709 }
710 }
711}
712
713bool Pass::collisionShift(Segment *seg, int dir, json * const dbgout) const
714{
715 ShiftCollider shiftcoll(dbgout);
716 // bool isfirst = true;
717 bool hasCollisions = false;
718 Slot *start = seg->first(); // turn on collision fixing for the first slot
719 Slot *end = NULL;
720 bool moved = false;
721
722#if !defined GRAPHITE2_NTRACING
723 if (dbgout)
724 *dbgout << "collisions" << json::array
725 << json::flat << json::object << "num-loops" << m_numCollRuns << json::close;
726#endif
727
728 while (start)
729 {
730#if !defined GRAPHITE2_NTRACING
731 if (dbgout) *dbgout << json::object << "phase" << "1" << "moves" << json::array;
732#endif
733 hasCollisions = false;
734 end = NULL;
735 // phase 1 : position shiftable glyphs, ignoring kernable glyphs
736 for (Slot *s = start; s; s = s->next())
737 {
738 const SlotCollision * c = seg->collisionInfo(s);
739 if (start && (c->flags() & (SlotCollision::COLL_FIX | SlotCollision::COLL_KERN)) == SlotCollision::COLL_FIX
740 && !resolveCollisions(seg, s, start, shiftcoll, false, dir, moved, hasCollisions, dbgout))
741 return false;
742 if (s != start && (c->flags() & SlotCollision::COLL_END))
743 {
744 end = s->next();
745 break;
746 }
747 }
748
749#if !defined GRAPHITE2_NTRACING
750 if (dbgout)
751 *dbgout << json::close << json::close; // phase-1
752#endif
753
754 // phase 2 : loop until happy.
755 for (int i = 0; i < m_numCollRuns - 1; ++i)
756 {
757 if (hasCollisions || moved)
758 {
759
760#if !defined GRAPHITE2_NTRACING
761 if (dbgout)
762 *dbgout << json::object << "phase" << "2a" << "loop" << i << "moves" << json::array;
763#endif
764 // phase 2a : if any shiftable glyphs are in collision, iterate backwards,
765 // fixing them and ignoring other non-collided glyphs. Note that this handles ONLY
766 // glyphs that are actually in collision from phases 1 or 2b, and working backwards
767 // has the intended effect of breaking logjams.
768 if (hasCollisions)
769 {
770 hasCollisions = false;
771 #if 0
772 moved = true;
773 for (Slot *s = start; s != end; s = s->next())
774 {
775 SlotCollision * c = seg->collisionInfo(s);
776 c->setShift(Position(0, 0));
777 }
778 #endif
779 Slot *lend = end ? end->prev() : seg->last();
780 Slot *lstart = start->prev();
781 for (Slot *s = lend; s != lstart; s = s->prev())
782 {
783 SlotCollision * c = seg->collisionInfo(s);
784 if (start && (c->flags() & (SlotCollision::COLL_FIX | SlotCollision::COLL_KERN | SlotCollision::COLL_ISCOL))
785 == (SlotCollision::COLL_FIX | SlotCollision::COLL_ISCOL)) // ONLY if this glyph is still colliding
786 {
787 if (!resolveCollisions(seg, s, lend, shiftcoll, true, dir, moved, hasCollisions, dbgout))
788 return false;
789 c->setFlags(c->flags() | SlotCollision::COLL_TEMPLOCK);
790 }
791 }
792 }
793
794#if !defined GRAPHITE2_NTRACING
795 if (dbgout)
796 *dbgout << json::close << json::close // phase 2a
797 << json::object << "phase" << "2b" << "loop" << i << "moves" << json::array;
798#endif
799
800 // phase 2b : redo basic diacritic positioning pass for ALL glyphs. Each successive loop adjusts
801 // glyphs from their current adjusted position, which has the effect of gradually minimizing the
802 // resulting adjustment; ie, the final result will be gradually closer to the original location.
803 // Also it allows more flexibility in the final adjustment, since it is moving along the
804 // possible 8 vectors from successively different starting locations.
805 if (moved)
806 {
807 moved = false;
808 for (Slot *s = start; s != end; s = s->next())
809 {
810 SlotCollision * c = seg->collisionInfo(s);
811 if (start && (c->flags() & (SlotCollision::COLL_FIX | SlotCollision::COLL_TEMPLOCK
812 | SlotCollision::COLL_KERN)) == SlotCollision::COLL_FIX
813 && !resolveCollisions(seg, s, start, shiftcoll, false, dir, moved, hasCollisions, dbgout))
814 return false;
815 else if (c->flags() & SlotCollision::COLL_TEMPLOCK)
816 c->setFlags(c->flags() & ~SlotCollision::COLL_TEMPLOCK);
817 }
818 }
819 // if (!hasCollisions) // no, don't leave yet because phase 2b will continue to improve things
820 // break;
821#if !defined GRAPHITE2_NTRACING
822 if (dbgout)
823 *dbgout << json::close << json::close; // phase 2
824#endif
825 }
826 }
827 if (!end)
828 break;
829 start = NULL;
830 for (Slot *s = end->prev(); s; s = s->next())
831 {
832 if (seg->collisionInfo(s)->flags() & SlotCollision::COLL_START)
833 {
834 start = s;
835 break;
836 }
837 }
838 }
839 return true;
840}
841
842bool Pass::collisionKern(Segment *seg, int dir, json * const dbgout) const
843{
844 Slot *start = seg->first();
845 float ymin = 1e38f;
846 float ymax = -1e38f;
847 const GlyphCache &gc = seg->getFace()->glyphs();
848
849 // phase 3 : handle kerning of clusters
850#if !defined GRAPHITE2_NTRACING
851 if (dbgout)
852 *dbgout << json::object << "phase" << "3" << "moves" << json::array;
853#endif
854
855 for (Slot *s = seg->first(); s; s = s->next())
856 {
857 if (!gc.check(s->gid()))
858 return false;
859 const SlotCollision * c = seg->collisionInfo(s);
860 const Rect &bbox = seg->theGlyphBBoxTemporary(s->gid());
861 float y = s->origin().y + c->shift().y;
862 if (!(c->flags() & SlotCollision::COLL_ISSPACE))
863 {
864 ymax = max(y + bbox.tr.y, ymax);
865 ymin = min(y + bbox.bl.y, ymin);
866 }
867 if (start && (c->flags() & (SlotCollision::COLL_KERN | SlotCollision::COLL_FIX))
868 == (SlotCollision::COLL_KERN | SlotCollision::COLL_FIX))
869 resolveKern(seg, s, start, dir, ymin, ymax, dbgout);
870 if (c->flags() & SlotCollision::COLL_END)
871 start = NULL;
872 if (c->flags() & SlotCollision::COLL_START)
873 start = s;
874 }
875
876#if !defined GRAPHITE2_NTRACING
877 if (dbgout)
878 *dbgout << json::close << json::close; // phase 3
879#endif
880 return true;
881}
882
883bool Pass::collisionFinish(Segment *seg, GR_MAYBE_UNUSED json * const dbgout) const
884{
885 for (Slot *s = seg->first(); s; s = s->next())
886 {
887 SlotCollision *c = seg->collisionInfo(s);
888 if (c->shift().x != 0 || c->shift().y != 0)
889 {
890 const Position newOffset = c->shift();
891 const Position nullPosition(0, 0);
892 c->setOffset(newOffset + c->offset());
893 c->setShift(nullPosition);
894 }
895 }
896// seg->positionSlots();
897
898#if !defined GRAPHITE2_NTRACING
899 if (dbgout)
900 *dbgout << json::close;
901#endif
902 return true;
903}
904
905// Can slot s be kerned, or is it attached to something that can be kerned?
906static bool inKernCluster(Segment *seg, Slot *s)
907{
908 SlotCollision *c = seg->collisionInfo(s);
909 if (c->flags() & SlotCollision::COLL_KERN /** && c->flags() & SlotCollision::COLL_FIX **/ )
910 return true;
911 while (s->attachedTo())
912 {
913 s = s->attachedTo();
914 c = seg->collisionInfo(s);
915 if (c->flags() & SlotCollision::COLL_KERN /** && c->flags() & SlotCollision::COLL_FIX **/ )
916 return true;
917 }
918 return false;
919}
920
921// Fix collisions for the given slot.
922// Return true if everything was fixed, false if there are still collisions remaining.
923// isRev means be we are processing backwards.
924bool Pass::resolveCollisions(Segment *seg, Slot *slotFix, Slot *start,
925 ShiftCollider &coll, GR_MAYBE_UNUSED bool isRev, int dir, bool &moved, bool &hasCol,
926 json * const dbgout) const
927{
928 Slot * nbor; // neighboring slot
929 SlotCollision *cFix = seg->collisionInfo(slotFix);
930 if (!coll.initSlot(seg, slotFix, cFix->limit(), cFix->margin(), cFix->marginWt(),
931 cFix->shift(), cFix->offset(), dir, dbgout))
932 return false;
933 bool collides = false;
934 // When we're processing forward, ignore kernable glyphs that preceed the target glyph.
935 // When processing backward, don't ignore these until we pass slotFix.
936 bool ignoreForKern = !isRev;
937 bool rtl = dir & 1;
938 Slot *base = slotFix;
939 while (base->attachedTo())
940 base = base->attachedTo();
941 Position zero(0., 0.);
942
943 // Look for collisions with the neighboring glyphs.
944 for (nbor = start; nbor; nbor = isRev ? nbor->prev() : nbor->next())
945 {
946 SlotCollision *cNbor = seg->collisionInfo(nbor);
947 bool sameCluster = nbor->isChildOf(base);
948 if (nbor != slotFix // don't process if this is the slot of interest
949 && !(cNbor->ignore()) // don't process if ignoring
950 && (nbor == base || sameCluster // process if in the same cluster as slotFix
951 || !inKernCluster(seg, nbor)) // or this cluster is not to be kerned
952// || (rtl ^ ignoreForKern)) // or it comes before(ltr) or after(rtl)
953 && (!isRev // if processing forwards then good to merge otherwise only:
954 || !(cNbor->flags() & SlotCollision::COLL_FIX) // merge in immovable stuff
955 || ((cNbor->flags() & SlotCollision::COLL_KERN) && !sameCluster) // ignore other kernable clusters
956 || (cNbor->flags() & SlotCollision::COLL_ISCOL)) // test against other collided glyphs
957 && !coll.mergeSlot(seg, nbor, cNbor, cNbor->shift(), !ignoreForKern, sameCluster, collides, false, dbgout))
958 return false;
959 else if (nbor == slotFix)
960 // Switching sides of this glyph - if we were ignoring kernable stuff before, don't anymore.
961 ignoreForKern = !ignoreForKern;
962
963 if (nbor != start && (cNbor->flags() & (isRev ? SlotCollision::COLL_START : SlotCollision::COLL_END)))
964 break;
965 }
966 bool isCol = false;
967 if (collides || cFix->shift().x != 0.f || cFix->shift().y != 0.f)
968 {
969 Position shift = coll.resolve(seg, isCol, dbgout);
970 // isCol has been set to true if a collision remains.
971 if (std::fabs(shift.x) < 1e38f && std::fabs(shift.y) < 1e38f)
972 {
973 if (sqr(shift.x-cFix->shift().x) + sqr(shift.y-cFix->shift().y) >= m_colThreshold * m_colThreshold)
974 moved = true;
975 cFix->setShift(shift);
976 if (slotFix->firstChild())
977 {
978 Rect bbox;
979 Position here = slotFix->origin() + shift;
980 float clusterMin = here.x;
981 slotFix->firstChild()->finalise(seg, NULL, here, bbox, 0, clusterMin, rtl, false);
982 }
983 }
984 }
985 else
986 {
987 // This glyph is not colliding with anything.
988#if !defined GRAPHITE2_NTRACING
989 if (dbgout)
990 {
991 *dbgout << json::object
992 << "missed" << objectid(dslot(seg, slotFix));
993 coll.outputJsonDbg(dbgout, seg, -1);
994 *dbgout << json::close;
995 }
996#endif
997 }
998
999 // Set the is-collision flag bit.
1000 if (isCol)
1001 { cFix->setFlags(cFix->flags() | SlotCollision::COLL_ISCOL | SlotCollision::COLL_KNOWN); }
1002 else
1003 { cFix->setFlags((cFix->flags() & ~SlotCollision::COLL_ISCOL) | SlotCollision::COLL_KNOWN); }
1004 hasCol |= isCol;
1005 return true;
1006}
1007
1008float Pass::resolveKern(Segment *seg, Slot *slotFix, GR_MAYBE_UNUSED Slot *start, int dir,
1009 float &ymin, float &ymax, json *const dbgout) const
1010{
1011 Slot *nbor; // neighboring slot
1012 float currSpace = 0.;
1013 bool collides = false;
1014 unsigned int space_count = 0;
1015 Slot *base = slotFix;
1016 while (base->attachedTo())
1017 base = base->attachedTo();
1018 SlotCollision *cFix = seg->collisionInfo(base);
1019 const GlyphCache &gc = seg->getFace()->glyphs();
1020 const Rect &bbb = seg->theGlyphBBoxTemporary(slotFix->gid());
1021 const float by = slotFix->origin().y + cFix->shift().y;
1022
1023 if (base != slotFix)
1024 {
1025 cFix->setFlags(cFix->flags() | SlotCollision::COLL_KERN | SlotCollision::COLL_FIX);
1026 return 0;
1027 }
1028 bool seenEnd = (cFix->flags() & SlotCollision::COLL_END) != 0;
1029 bool isInit = false;
1030 KernCollider coll(dbgout);
1031
1032 ymax = max(by + bbb.tr.y, ymax);
1033 ymin = min(by + bbb.bl.y, ymin);
1034 for (nbor = slotFix->next(); nbor; nbor = nbor->next())
1035 {
1036 if (!gc.check(nbor->gid()))
1037 return 0.;
1038 const Rect &bb = seg->theGlyphBBoxTemporary(nbor->gid());
1039 SlotCollision *cNbor = seg->collisionInfo(nbor);
1040 const float nby = nbor->origin().y + cNbor->shift().y;
1041 if (nbor->isChildOf(base))
1042 {
1043 ymax = max(nby + bb.tr.y, ymax);
1044 ymin = min(nby + bb.bl.y, ymin);
1045 continue;
1046 }
1047 if ((bb.bl.y == 0.f && bb.tr.y == 0.f) || (cNbor->flags() & SlotCollision::COLL_ISSPACE))
1048 {
1049 if (m_kernColls == InWord)
1050 break;
1051 // Add space for a space glyph.
1052 currSpace += nbor->advance();
1053 ++space_count;
1054 }
1055 else
1056 {
1057 space_count = 0;
1058 if (nbor != slotFix && !cNbor->ignore())
1059 {
1060 seenEnd = true;
1061 if (!isInit)
1062 {
1063 if (!coll.initSlot(seg, slotFix, cFix->limit(), cFix->margin(),
1064 cFix->shift(), cFix->offset(), dir, ymin, ymax, dbgout))
1065 return 0.;
1066 isInit = true;
1067 }
1068 collides |= coll.mergeSlot(seg, nbor, cNbor->shift(), currSpace, dir, dbgout);
1069 }
1070 }
1071 if (cNbor->flags() & SlotCollision::COLL_END)
1072 {
1073 if (seenEnd && space_count < 2)
1074 break;
1075 else
1076 seenEnd = true;
1077 }
1078 }
1079 if (collides)
1080 {
1081 Position mv = coll.resolve(seg, slotFix, dir, dbgout);
1082 coll.shift(mv, dir);
1083 Position delta = slotFix->advancePos() + mv - cFix->shift();
1084 slotFix->advance(delta);
1085 cFix->setShift(mv);
1086 return mv.x;
1087 }
1088 return 0.;
1089}
1090