1 | /* |
2 | * Copyright (c) 2015-2016, Intel Corporation |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions are met: |
6 | * |
7 | * * Redistributions of source code must retain the above copyright notice, |
8 | * this list of conditions and the following disclaimer. |
9 | * * Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * * Neither the name of Intel Corporation nor the names of its contributors |
13 | * may be used to endorse or promote products derived from this software |
14 | * without specific prior written permission. |
15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
20 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
28 | |
29 | /** \file |
30 | * \brief NFA graph merging ("uncalc") |
31 | * |
32 | * The file contains our collection of NFA graph merging strategies. |
33 | * |
34 | * NFAGraph merging is generally guided by the length of the common prefix |
35 | * between NFAGraph pairs. |
36 | */ |
37 | #include "grey.h" |
38 | #include "ng_holder.h" |
39 | #include "ng_limex.h" |
40 | #include "ng_redundancy.h" |
41 | #include "ng_region.h" |
42 | #include "ng_uncalc_components.h" |
43 | #include "ng_util.h" |
44 | #include "ue2common.h" |
45 | #include "util/compile_context.h" |
46 | #include "util/container.h" |
47 | #include "util/graph_range.h" |
48 | #include "util/ue2string.h" |
49 | |
50 | #include <algorithm> |
51 | #include <deque> |
52 | #include <map> |
53 | #include <queue> |
54 | #include <set> |
55 | #include <vector> |
56 | |
57 | #include <boost/range/adaptor/map.hpp> |
58 | |
59 | using namespace std; |
60 | using boost::adaptors::map_values; |
61 | |
62 | namespace ue2 { |
63 | |
64 | static const u32 FAST_STATE_LIMIT = 256; /**< largest possible desirable NFA */ |
65 | |
66 | /** Sentinel value meaning no component has yet been selected. */ |
67 | static const u32 NO_COMPONENT = ~0U; |
68 | |
69 | static const u32 UNUSED_STATE = ~0U; |
70 | |
71 | namespace { |
72 | struct ranking_info { |
73 | explicit ranking_info(const NGHolder &h) : to_vertex(getTopoOrdering(h)) { |
74 | u32 rank = 0; |
75 | |
76 | reverse(to_vertex.begin(), to_vertex.end()); |
77 | |
78 | for (NFAVertex v : to_vertex) { |
79 | to_rank[v] = rank++; |
80 | } |
81 | |
82 | for (NFAVertex v : vertices_range(h)) { |
83 | if (!contains(to_rank, v)) { |
84 | to_rank[v] = UNUSED_STATE; |
85 | } |
86 | } |
87 | } |
88 | |
89 | NFAVertex at(u32 ranking) const { return to_vertex.at(ranking); } |
90 | u32 get(NFAVertex v) const { return to_rank.at(v); } |
91 | u32 size() const { return (u32)to_vertex.size(); } |
92 | u32 add_to_tail(NFAVertex v) { |
93 | u32 rank = size(); |
94 | to_rank[v] = rank; |
95 | to_vertex.push_back(v); |
96 | return rank; |
97 | } |
98 | |
99 | private: |
100 | vector<NFAVertex> to_vertex; |
101 | unordered_map<NFAVertex, u32> to_rank; |
102 | }; |
103 | } |
104 | |
105 | static never_inline |
106 | bool cplVerticesMatch(const NGHolder &ga, NFAVertex va, |
107 | const NGHolder &gb, NFAVertex vb) { |
108 | // Must have the same reachability. |
109 | if (ga[va].char_reach != gb[vb].char_reach) { |
110 | return false; |
111 | } |
112 | |
113 | // If they're start vertices, they must be the same one. |
114 | if (is_any_start(va, ga) || is_any_start(vb, gb)) { |
115 | if (ga[va].index != gb[vb].index) { |
116 | return false; |
117 | } |
118 | } |
119 | |
120 | bool va_accept = edge(va, ga.accept, ga).second; |
121 | bool vb_accept = edge(vb, gb.accept, gb).second; |
122 | bool va_acceptEod = edge(va, ga.acceptEod, ga).second; |
123 | bool vb_acceptEod = edge(vb, gb.acceptEod, gb).second; |
124 | |
125 | // Must have the same accept/acceptEod edges. |
126 | if (va_accept != vb_accept || va_acceptEod != vb_acceptEod) { |
127 | return false; |
128 | } |
129 | |
130 | return true; |
131 | } |
132 | |
133 | static never_inline |
134 | u32 cplCommonReachAndSimple(const NGHolder &ga, const ranking_info &a_ranking, |
135 | const NGHolder &gb, const ranking_info &b_ranking) { |
136 | u32 ml = min(a_ranking.size(), b_ranking.size()); |
137 | if (ml > 65535) { |
138 | ml = 65535; |
139 | } |
140 | |
141 | // Count the number of common vertices which share reachability, report and |
142 | // "startedness" properties. |
143 | u32 max = 0; |
144 | for (; max < ml; max++) { |
145 | if (!cplVerticesMatch(ga, a_ranking.at(max), gb, b_ranking.at(max))) { |
146 | break; |
147 | } |
148 | } |
149 | |
150 | return max; |
151 | } |
152 | |
153 | static |
154 | u32 commonPrefixLength(const NGHolder &ga, const ranking_info &a_ranking, |
155 | const NGHolder &gb, const ranking_info &b_ranking) { |
156 | /* upper bound on the common region based on local properties */ |
157 | u32 max = cplCommonReachAndSimple(ga, a_ranking, gb, b_ranking); |
158 | DEBUG_PRINTF("cpl upper bound %u\n" , max); |
159 | |
160 | while (max > 0) { |
161 | /* shrink max region based on in-edges from outside the region */ |
162 | for (size_t j = max; j > 0; j--) { |
163 | NFAVertex a_v = a_ranking.at(j - 1); |
164 | NFAVertex b_v = b_ranking.at(j - 1); |
165 | for (auto u : inv_adjacent_vertices_range(a_v, ga)) { |
166 | u32 state_id = a_ranking.get(u); |
167 | if (state_id != UNUSED_STATE && state_id >= max) { |
168 | max = j - 1; |
169 | DEBUG_PRINTF("lowering max to %u\n" , max); |
170 | goto next_vertex; |
171 | } |
172 | } |
173 | |
174 | for (auto u : inv_adjacent_vertices_range(b_v, gb)) { |
175 | u32 state_id = b_ranking.get(u); |
176 | if (state_id != UNUSED_STATE && state_id >= max) { |
177 | max = j - 1; |
178 | DEBUG_PRINTF("lowering max to %u\n" , max); |
179 | goto next_vertex; |
180 | } |
181 | } |
182 | |
183 | next_vertex:; |
184 | } |
185 | |
186 | /* Ensure that every pair of vertices has same out-edges to vertices in |
187 | the region. */ |
188 | for (size_t i = 0; i < max; i++) { |
189 | size_t a_count = 0; |
190 | size_t b_count = 0; |
191 | |
192 | for (NFAEdge a_edge : out_edges_range(a_ranking.at(i), ga)) { |
193 | u32 sid = a_ranking.get(target(a_edge, ga)); |
194 | if (sid == UNUSED_STATE || sid >= max) { |
195 | continue; |
196 | } |
197 | |
198 | a_count++; |
199 | |
200 | NFAEdge b_edge = edge(b_ranking.at(i), b_ranking.at(sid), gb); |
201 | |
202 | if (!b_edge) { |
203 | max = i; |
204 | DEBUG_PRINTF("lowering max to %u due to edge %zu->%u\n" , |
205 | max, i, sid); |
206 | goto try_smaller; |
207 | } |
208 | |
209 | if (ga[a_edge].tops != gb[b_edge].tops) { |
210 | max = i; |
211 | DEBUG_PRINTF("tops don't match on edge %zu->%u\n" , i, sid); |
212 | goto try_smaller; |
213 | } |
214 | } |
215 | |
216 | for (NFAVertex b_v : adjacent_vertices_range(b_ranking.at(i), gb)) { |
217 | u32 sid = b_ranking.get(b_v); |
218 | if (sid == UNUSED_STATE || sid >= max) { |
219 | continue; |
220 | } |
221 | |
222 | b_count++; |
223 | } |
224 | |
225 | if (a_count != b_count) { |
226 | max = i; |
227 | DEBUG_PRINTF("lowering max to %u due to a,b count (a_count=%zu," |
228 | " b_count=%zu)\n" , max, a_count, b_count); |
229 | goto try_smaller; |
230 | } |
231 | } |
232 | |
233 | DEBUG_PRINTF("survived checks, returning cpl %u\n" , max); |
234 | return max; |
235 | try_smaller:; |
236 | } |
237 | |
238 | DEBUG_PRINTF("failed to find any common region\n" ); |
239 | return 0; |
240 | } |
241 | |
242 | u32 commonPrefixLength(const NGHolder &ga, const NGHolder &gb) { |
243 | return commonPrefixLength(ga, ranking_info(ga), gb, ranking_info(gb)); |
244 | } |
245 | |
246 | static never_inline |
247 | void mergeNfaComponent(NGHolder &dest, const NGHolder &vic, size_t common_len) { |
248 | assert(&dest != &vic); |
249 | |
250 | auto dest_info = ranking_info(dest); |
251 | auto vic_info = ranking_info(vic); |
252 | |
253 | map<NFAVertex, NFAVertex> vmap; // vic -> dest |
254 | |
255 | vmap[vic.start] = dest.start; |
256 | vmap[vic.startDs] = dest.startDs; |
257 | vmap[vic.accept] = dest.accept; |
258 | vmap[vic.acceptEod] = dest.acceptEod; |
259 | vmap[NGHolder::null_vertex()] = NGHolder::null_vertex(); |
260 | |
261 | // For vertices in the common len, add to vmap and merge in the reports, if |
262 | // any. |
263 | for (u32 i = 0; i < common_len; i++) { |
264 | NFAVertex v_old = vic_info.at(i); |
265 | NFAVertex v = dest_info.at(i); |
266 | vmap[v_old] = v; |
267 | |
268 | const auto &reports = vic[v_old].reports; |
269 | dest[v].reports.insert(reports.begin(), reports.end()); |
270 | } |
271 | |
272 | // Add in vertices beyond the common len |
273 | for (u32 i = common_len; i < vic_info.size(); i++) { |
274 | NFAVertex v_old = vic_info.at(i); |
275 | |
276 | if (is_special(v_old, vic)) { |
277 | // Dest already has start vertices, just merge the reports. |
278 | u32 idx = vic[v_old].index; |
279 | NFAVertex v = dest.getSpecialVertex(idx); |
280 | const auto &reports = vic[v_old].reports; |
281 | dest[v].reports.insert(reports.begin(), reports.end()); |
282 | continue; |
283 | } |
284 | |
285 | NFAVertex v = add_vertex(vic[v_old], dest); |
286 | dest_info.add_to_tail(v); |
287 | vmap[v_old] = v; |
288 | } |
289 | |
290 | /* add edges */ |
291 | DEBUG_PRINTF("common_len=%zu\n" , common_len); |
292 | for (const auto &e : edges_range(vic)) { |
293 | NFAVertex u_old = source(e, vic); |
294 | NFAVertex v_old = target(e, vic); |
295 | NFAVertex u = vmap[u_old]; |
296 | NFAVertex v = vmap[v_old]; |
297 | bool uspecial = is_special(u, dest); |
298 | bool vspecial = is_special(v, dest); |
299 | |
300 | // Skip stylised edges that are already present. |
301 | if (uspecial && vspecial && edge(u, v, dest).second) { |
302 | continue; |
303 | } |
304 | |
305 | // We're in the common region if v's state ID is low enough, unless v |
306 | // is a special (an accept), in which case we use u's state ID. |
307 | bool in_common_region = dest_info.get(v) < common_len; |
308 | if (vspecial && dest_info.get(u) < common_len) { |
309 | in_common_region = true; |
310 | } |
311 | |
312 | DEBUG_PRINTF("adding idx=%zu (state %u) -> idx=%zu (state %u)%s\n" , |
313 | dest[u].index, dest_info.get(u), |
314 | dest[v].index, dest_info.get(v), |
315 | in_common_region ? " [common]" : "" ); |
316 | |
317 | if (in_common_region) { |
318 | if (!is_special(v, dest)) { |
319 | DEBUG_PRINTF("skipping common edge\n" ); |
320 | assert(edge(u, v, dest).second); |
321 | // Should never merge edges with different top values. |
322 | assert(vic[e].tops == dest[edge(u, v, dest)].tops); |
323 | continue; |
324 | } else { |
325 | assert(is_any_accept(v, dest)); |
326 | // If the edge exists in both graphs, skip it. |
327 | if (edge(u, v, dest).second) { |
328 | DEBUG_PRINTF("skipping common edge to accept\n" ); |
329 | continue; |
330 | } |
331 | } |
332 | } |
333 | |
334 | assert(!edge(u, v, dest).second); |
335 | add_edge(u, v, vic[e], dest); |
336 | } |
337 | |
338 | renumber_edges(dest); |
339 | renumber_vertices(dest); |
340 | } |
341 | |
342 | namespace { |
343 | struct NfaMergeCandidateH { |
344 | NfaMergeCandidateH(size_t cpl_in, NGHolder *first_in, NGHolder *second_in, |
345 | u32 tb_in) |
346 | : cpl(cpl_in), first(first_in), second(second_in), tie_breaker(tb_in) {} |
347 | |
348 | size_t cpl; //!< common prefix length |
349 | NGHolder *first; //!< first component to merge |
350 | NGHolder *second; //!< second component to merge |
351 | u32 tie_breaker; //!< for determinism |
352 | |
353 | bool operator<(const NfaMergeCandidateH &other) const { |
354 | if (cpl != other.cpl) { |
355 | return cpl < other.cpl; |
356 | } else { |
357 | return tie_breaker < other.tie_breaker; |
358 | } |
359 | } |
360 | }; |
361 | |
362 | } // end namespace |
363 | |
364 | /** Returns true if graphs \p h1 and \p h2 can (and should) be merged. */ |
365 | static |
366 | bool shouldMerge(const NGHolder &ha, const NGHolder &hb, size_t cpl, |
367 | const ReportManager *rm, const CompileContext &cc) { |
368 | size_t combinedStateCount = num_vertices(ha) + num_vertices(hb) - cpl; |
369 | |
370 | combinedStateCount -= 2 * 2; /* discount accepts from both */ |
371 | |
372 | if (is_triggered(ha)) { |
373 | /* allow for a state for each top, ignore existing starts */ |
374 | combinedStateCount -= 2; /* for start, startDs */ |
375 | auto tops = getTops(ha); |
376 | insert(&tops, getTops(hb)); |
377 | combinedStateCount += tops.size(); |
378 | } |
379 | |
380 | if (combinedStateCount > FAST_STATE_LIMIT) { |
381 | // More complex implementability check. |
382 | NGHolder h_temp; |
383 | cloneHolder(h_temp, ha); |
384 | assert(h_temp.kind == hb.kind); |
385 | mergeNfaComponent(h_temp, hb, cpl); |
386 | reduceImplementableGraph(h_temp, SOM_NONE, rm, cc); |
387 | u32 numStates = isImplementableNFA(h_temp, rm, cc); |
388 | DEBUG_PRINTF("isImplementableNFA returned %u states\n" , numStates); |
389 | if (!numStates) { |
390 | DEBUG_PRINTF("not implementable\n" ); |
391 | return false; |
392 | } else if (numStates > FAST_STATE_LIMIT) { |
393 | DEBUG_PRINTF("too many states to merge\n" ); |
394 | return false; |
395 | } |
396 | } |
397 | |
398 | return true; |
399 | } |
400 | |
401 | /** Returns true if the graph has start vertices that are compatible for |
402 | * merging. Rose may generate all sorts of wacky vacuous cases, and the merge |
403 | * code isn't currently up to handling them. */ |
404 | static |
405 | bool compatibleStarts(const NGHolder &ga, const NGHolder &gb) { |
406 | // Start and startDs must have the same self-loops. |
407 | return (edge(ga.startDs, ga.startDs, ga).second == |
408 | edge(gb.startDs, gb.startDs, gb).second) && |
409 | (edge(ga.start, ga.start, ga).second == |
410 | edge(gb.start, gb.start, gb).second); |
411 | } |
412 | |
413 | static never_inline |
414 | void buildNfaMergeQueue(const vector<NGHolder *> &cluster, |
415 | priority_queue<NfaMergeCandidateH> *pq) { |
416 | const size_t cs = cluster.size(); |
417 | assert(cs < NO_COMPONENT); |
418 | |
419 | // First, make sure all holders have numbered states and collect their |
420 | // counts. |
421 | vector<ranking_info> states_map; |
422 | states_map.reserve(cs); |
423 | for (size_t i = 0; i < cs; i++) { |
424 | assert(cluster[i]); |
425 | assert(states_map.size() == i); |
426 | const NGHolder &g = *(cluster[i]); |
427 | states_map.emplace_back(g); |
428 | } |
429 | |
430 | vector<u16> seen_cpl(cs * cs, 0); |
431 | vector<u32> best_comp(cs, NO_COMPONENT); |
432 | |
433 | /* TODO: understand, explain */ |
434 | for (u32 ci = 0; ci < cs; ci++) { |
435 | for (u32 cj = ci + 1; cj < cs; cj++) { |
436 | u16 cpl = 0; |
437 | bool calc = false; |
438 | |
439 | if (best_comp[ci] != NO_COMPONENT) { |
440 | u32 bc = best_comp[ci]; |
441 | if (seen_cpl[bc + cs * cj] < seen_cpl[bc + cs * ci]) { |
442 | cpl = seen_cpl[bc + cs * cj]; |
443 | DEBUG_PRINTF("using cached cpl from %u %u\n" , bc, cpl); |
444 | calc = true; |
445 | } |
446 | } |
447 | |
448 | if (!calc && best_comp[cj] != NO_COMPONENT) { |
449 | u32 bc = best_comp[cj]; |
450 | if (seen_cpl[bc + cs * ci] < seen_cpl[bc + cs * cj]) { |
451 | cpl = seen_cpl[bc + cs * ci]; |
452 | DEBUG_PRINTF("using cached cpl from %u %u\n" , bc, cpl); |
453 | calc = true; |
454 | } |
455 | } |
456 | |
457 | NGHolder &g_i = *(cluster[ci]); |
458 | NGHolder &g_j = *(cluster[cj]); |
459 | |
460 | if (!compatibleStarts(g_i, g_j)) { |
461 | continue; |
462 | } |
463 | |
464 | if (!calc) { |
465 | cpl = commonPrefixLength(g_i, states_map[ci], |
466 | g_j, states_map[cj]); |
467 | } |
468 | |
469 | seen_cpl[ci + cs * cj] = cpl; |
470 | seen_cpl[cj + cs * ci] = cpl; |
471 | |
472 | if (best_comp[cj] == NO_COMPONENT |
473 | || seen_cpl[best_comp[cj] + cs * cj] < cpl) { |
474 | best_comp[cj] = ci; |
475 | } |
476 | |
477 | DEBUG_PRINTF("cpl %u %u = %u\n" , ci, cj, cpl); |
478 | |
479 | pq->push(NfaMergeCandidateH(cpl, cluster[ci], cluster[cj], |
480 | ci * cs + cj)); |
481 | } |
482 | } |
483 | } |
484 | |
485 | /** |
486 | * True if the graphs have mergeable starts. |
487 | * |
488 | * Nowadays, this means that any vacuous edges must have the same tops. In |
489 | * addition, mixed-accept cases need to have matching reports. |
490 | */ |
491 | static |
492 | bool mergeableStarts(const NGHolder &h1, const NGHolder &h2) { |
493 | if (!isVacuous(h1) || !isVacuous(h2)) { |
494 | return true; |
495 | } |
496 | |
497 | // Vacuous edges from startDs should not occur: we have better ways to |
498 | // implement true dot-star relationships. Just in case they do, ban them |
499 | // from being merged unless they have identical reports. |
500 | if (is_match_vertex(h1.startDs, h1) || is_match_vertex(h2.startDs, h2)) { |
501 | assert(0); |
502 | return false; |
503 | } |
504 | |
505 | /* TODO: relax top checks if reports match */ |
506 | |
507 | // If both graphs have edge (start, accept), the tops must match. |
508 | NFAEdge e1_accept = edge(h1.start, h1.accept, h1); |
509 | NFAEdge e2_accept = edge(h2.start, h2.accept, h2); |
510 | if (e1_accept && e2_accept && h1[e1_accept].tops != h2[e2_accept].tops) { |
511 | return false; |
512 | } |
513 | |
514 | // If both graphs have edge (start, acceptEod), the tops must match. |
515 | NFAEdge e1_eod = edge(h1.start, h1.acceptEod, h1); |
516 | NFAEdge e2_eod = edge(h2.start, h2.acceptEod, h2); |
517 | if (e1_eod && e2_eod && h1[e1_eod].tops != h2[e2_eod].tops) { |
518 | return false; |
519 | } |
520 | |
521 | // If one graph has an edge to accept and the other has an edge to |
522 | // acceptEod, the reports must match for the merge to be safe. |
523 | if ((e1_accept && e2_eod) || (e2_accept && e1_eod)) { |
524 | if (h1[h1.start].reports != h2[h2.start].reports) { |
525 | return false; |
526 | } |
527 | } |
528 | |
529 | return true; |
530 | } |
531 | |
532 | /** Merge graph \p ga into graph \p gb. Returns false on failure. */ |
533 | bool mergeNfaPair(const NGHolder &ga, NGHolder &gb, const ReportManager *rm, |
534 | const CompileContext &cc) { |
535 | assert(ga.kind == gb.kind); |
536 | |
537 | // Vacuous NFAs require special checks on their starts to ensure that tops |
538 | // match, and that reports match for mixed-accept cases. |
539 | if (!mergeableStarts(ga, gb)) { |
540 | DEBUG_PRINTF("starts aren't mergeable\n" ); |
541 | return false; |
542 | } |
543 | |
544 | u32 cpl = commonPrefixLength(ga, gb); |
545 | if (!shouldMerge(gb, ga, cpl, rm, cc)) { |
546 | return false; |
547 | } |
548 | |
549 | mergeNfaComponent(gb, ga, cpl); |
550 | reduceImplementableGraph(gb, SOM_NONE, rm, cc); |
551 | return true; |
552 | } |
553 | |
554 | map<NGHolder *, NGHolder *> mergeNfaCluster(const vector<NGHolder *> &cluster, |
555 | const ReportManager *rm, |
556 | const CompileContext &cc) { |
557 | map<NGHolder *, NGHolder *> merged; |
558 | |
559 | if (cluster.size() < 2) { |
560 | return merged; |
561 | } |
562 | |
563 | DEBUG_PRINTF("new cluster, size %zu\n" , cluster.size()); |
564 | |
565 | priority_queue<NfaMergeCandidateH> pq; |
566 | buildNfaMergeQueue(cluster, &pq); |
567 | |
568 | while (!pq.empty()) { |
569 | NGHolder &pholder = *pq.top().first; |
570 | NGHolder &vholder = *pq.top().second; |
571 | pq.pop(); |
572 | |
573 | if (contains(merged, &pholder) || contains(merged, &vholder)) { |
574 | DEBUG_PRINTF("dead\n" ); |
575 | continue; |
576 | } |
577 | |
578 | if (!mergeNfaPair(vholder, pholder, rm, cc)) { |
579 | DEBUG_PRINTF("merge failed\n" ); |
580 | continue; |
581 | } |
582 | |
583 | merged.emplace(&vholder, &pholder); |
584 | |
585 | // Seek closure. |
586 | for (auto &m : merged) { |
587 | if (m.second == &vholder) { |
588 | m.second = &pholder; |
589 | } |
590 | } |
591 | } |
592 | |
593 | return merged; |
594 | } |
595 | |
596 | } // namespace ue2 |
597 | |