1/*
2 * Copyright (c) 2016, Intel Corporation
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29/** \file
30 \brief Tamarama: container engine for exclusive engines, runtime code.
31*/
32#include "config.h"
33
34#include "tamarama.h"
35
36#include "tamarama_internal.h"
37#include "nfa_api.h"
38#include "nfa_api_queue.h"
39#include "nfa_api_util.h"
40#include "nfa_internal.h"
41#include "scratch.h"
42#include "util/partial_store.h"
43
44static really_inline
45u32 getSubOffset(const struct Tamarama *t, u32 num) {
46 DEBUG_PRINTF("subengine:%u\n", num);
47 assert(num < t->numSubEngines);
48 const u32 *sub =
49 (const u32 *)((const char *)t + sizeof(struct Tamarama) +
50 t->numSubEngines * sizeof(u32));
51 assert(ISALIGNED(sub));
52 return sub[num];
53}
54
55static
56const struct NFA *getSubEngine(const struct Tamarama *t,
57 const u32 activeIdx) {
58 const u32 offset = getSubOffset(t, activeIdx);
59 DEBUG_PRINTF("activeIdx:%u offsets:%u\n", activeIdx, offset);
60 const char *base = (const char *)t;
61 return (const struct NFA *)(base + offset);
62}
63
64static
65void storeActiveIdx(const struct Tamarama *t, char *state,
66 const u32 idx) {
67 assert(idx <= t->numSubEngines);
68 partial_store_u32(state, idx, t->activeIdxSize);
69}
70
71static
72u32 loadActiveIdx(const char *state,
73 const u32 activeIdxSize) {
74 return partial_load_u32(state, activeIdxSize);
75}
76
77static really_inline
78void copyQueueProperties(const struct mq *q1, struct mq *q2,
79 const u32 activeIdxSize) {
80 q2->state = q1->state;
81 q2->streamState = q1->streamState + activeIdxSize;
82 q2->offset = q1->offset;
83 q2->buffer = q1->buffer;
84 q2->length = q1->length;
85 q2->history = q1->history;
86 q2->hlength = q1->hlength;
87 q2->cb = q1->cb;
88 q2->context = q1->context;
89 q2->scratch = q1->scratch;
90 q2->report_current = q1->report_current;
91}
92
93static
94void copyQueueItems(const struct Tamarama *t, const struct NFA *sub,
95 struct mq *q1, struct mq *q2, const u32 activeIdx) {
96 const u32 *baseTop = (const u32 *)((const char *)t +
97 sizeof(struct Tamarama));
98
99 u32 lower = baseTop[activeIdx];
100 u32 upper = activeIdx == t->numSubEngines - 1 ?
101 ~0U : baseTop[activeIdx + 1];
102 u32 event_base = isMultiTopType(sub->type) ? MQE_TOP_FIRST : MQE_TOP;
103 while (q1->cur < q1->end) {
104 u32 type = q1->items[q1->cur].type;
105 s64a loc = q1->items[q1->cur].location;
106 DEBUG_PRINTF("type:%u lower:%u upper:%u\n", type, lower, upper);
107 if (type >= lower && type < upper) {
108 u32 event = event_base;
109 if (event == MQE_TOP_FIRST) {
110 event += type - lower;
111 }
112 pushQueue(q2, event, loc);
113 } else {
114 pushQueueNoMerge(q2, MQE_END, loc);
115 break;
116 }
117 q1->cur++;
118 }
119}
120
121static
122void copyQueue(const struct Tamarama *t, const struct NFA *sub,
123 struct mq *q1, struct mq *q2, const u32 activeIdx) {
124 copyQueueProperties(q1, q2, t->activeIdxSize);
125
126 // copy MQE_START item
127 u32 cur = q1->cur++;
128 q2->cur = cur;
129 q2->items[cur] = q1->items[cur];
130 q2->end = cur + 1;
131
132 copyQueueItems(t, sub, q1, q2, activeIdx);
133 // restore cur index of the main queue
134 q1->cur = cur;
135}
136
137static
138u32 findEngineForTop(const u32 *baseTop, const u32 cur,
139 const u32 numSubEngines) {
140 u32 i;
141 for (i = 0; i < numSubEngines; ++i) {
142 DEBUG_PRINTF("cur:%u base:%u\n", cur, baseTop[i]);
143 if (cur >= baseTop[i] &&
144 (i == numSubEngines - 1 || cur < baseTop[i + 1])) {
145 break;
146 }
147 }
148 return i;
149}
150
151static
152void initSubQueue(const struct Tamarama *t, struct mq *q1,
153 struct mq *q2, const u32 lastActiveIdx,
154 const u32 activeIdx) {
155 // Push events to the new queue
156 const struct NFA *sub = getSubEngine(t, activeIdx);
157 assert(!isContainerType(sub->type));
158 q2->nfa = sub;
159
160 // Reinitialize state if the last active subengine is different
161 // from current one
162 if (lastActiveIdx == t->numSubEngines ||
163 lastActiveIdx != activeIdx) {
164 nfaQueueInitState(q2->nfa, q2);
165 }
166
167 copyQueueItems(t, sub, q1, q2, activeIdx);
168 if (q1->items[q1->cur].type == MQE_END) {
169 q1->cur++;
170 }
171 DEBUG_PRINTF("update lastIdx:%u\n", activeIdx);
172 storeActiveIdx(t, q1->streamState, activeIdx);
173}
174
175static
176void updateQueues(const struct Tamarama *t, struct mq *q1, struct mq *q2) {
177 q2->cur = q2->end = 0;
178 copyQueueProperties(q1, q2, t->activeIdxSize);
179
180 const u32 numSubEngines = t->numSubEngines;
181 u32 lastActiveIdx = loadActiveIdx(q1->streamState,
182 t->activeIdxSize);
183#ifdef DEBUG
184 DEBUG_PRINTF("external queue\n");
185 debugQueue(q1);
186#endif
187
188 // Push MQE_START event to the subqueue
189 s64a loc = q1->items[q1->cur].location;
190 pushQueueAt(q2, 0, MQE_START, loc);
191 char hasStart = 0;
192 if (q1->items[q1->cur].type == MQE_START) {
193 hasStart = 1;
194 q1->cur++;
195 }
196
197 u32 activeIdx = lastActiveIdx;
198 // If we have top events in the main queue, update current active id
199 if (q1->cur < q1->end - 1) {
200 const u32 *baseTop = (const u32 *)((const char *)t +
201 sizeof(struct Tamarama));
202 u32 curTop = q1->items[q1->cur].type;
203 activeIdx = findEngineForTop(baseTop, curTop, numSubEngines);
204 }
205
206 assert(activeIdx < numSubEngines);
207 DEBUG_PRINTF("last id:%u, current id:%u, num of subengines:%u\n",
208 lastActiveIdx, activeIdx, numSubEngines);
209 // Handle unfinished last alive subengine
210 if (lastActiveIdx != activeIdx &&
211 lastActiveIdx != numSubEngines && hasStart) {
212 loc = q1->items[q1->cur].location;
213 pushQueueNoMerge(q2, MQE_END, loc);
214 q2->nfa = getSubEngine(t, lastActiveIdx);
215 return;
216 }
217
218 initSubQueue(t, q1, q2, lastActiveIdx, activeIdx);
219 DEBUG_PRINTF("finish queues\n");
220}
221
222// After processing subqueue items for subengines, we need to copy back
223// remaining items in subqueue if there are any to Tamarama main queue
224static
225void copyBack(const struct Tamarama *t, struct mq *q, struct mq *q1) {
226 DEBUG_PRINTF("copy back %u, %u\n", q1->cur, q1->end);
227 q->report_current = q1->report_current;
228 if (q->cur >= q->end && q1->cur >= q1->end) {
229 return;
230 }
231
232 const u32 *baseTop = (const u32 *)((const char *)t +
233 sizeof(struct Tamarama));
234 const u32 lastIdx = loadActiveIdx(q->streamState,
235 t->activeIdxSize);
236 u32 base = 0, event_base = 0;
237 if (lastIdx != t->numSubEngines) {
238 base = baseTop[lastIdx];
239 const struct NFA *sub = getSubEngine(t, lastIdx);
240 event_base = isMultiTopType(sub->type) ? MQE_TOP_FIRST : MQE_TOP;
241 }
242
243 u32 numItems = q1->end > q1->cur + 1 ? q1->end - q1->cur - 1 : 1;
244 // Also need to copy MQE_END if the main queue is empty
245 if (q->cur == q->end) {
246 assert(q->cur > 1 && q1->items[q1->end - 1].type == MQE_END);
247 q->items[--q->cur] = q1->items[q1->end - 1];
248 }
249 u32 cur = q->cur - numItems;
250 q->items[cur] = q1->items[q1->cur++];
251 q->items[cur].type = MQE_START;
252 q->cur = cur++;
253 for (u32 i = 0; i < numItems - 1; ++i) {
254 assert(q1->cur < q1->end);
255 u32 type = q1->items[q1->cur].type;
256 if (type > MQE_END) {
257 q1->items[q1->cur].type = type - event_base + base;
258 }
259 q->items[cur++] = q1->items[q1->cur++];
260 }
261
262#ifdef DEBUG
263 DEBUG_PRINTF("external queue\n");
264 debugQueue(q);
265#endif
266}
267
268char nfaExecTamarama_testEOD(const struct NFA *n, const char *state,
269 const char *streamState, u64a offset,
270 NfaCallback callback, void *context) {
271 const struct Tamarama *t = getImplNfa(n);
272 u32 activeIdx = loadActiveIdx(streamState, t->activeIdxSize);
273 if (activeIdx == t->numSubEngines) {
274 return MO_CONTINUE_MATCHING;
275 }
276
277 const struct NFA *sub = getSubEngine(t, activeIdx);
278 if (nfaAcceptsEod(sub)) {
279 assert(!isContainerType(sub->type));
280 const char *subStreamState = streamState + t->activeIdxSize;
281 return nfaCheckFinalState(sub, state, subStreamState, offset, callback,
282 context);
283 }
284
285 return MO_CONTINUE_MATCHING;
286}
287
288char nfaExecTamarama_QR(const struct NFA *n, struct mq *q, ReportID report) {
289 DEBUG_PRINTF("exec rose\n");
290 struct mq q1;
291 q1.cur = q1.end = 0;
292 char rv = 0;
293 const struct Tamarama *t = getImplNfa(n);
294 while (q->cur < q->end) {
295 updateQueues(t, q, &q1);
296 }
297
298 if (q1.cur < q1.end) {
299 rv = nfaQueueExecRose(q1.nfa, &q1, report);
300 }
301
302 DEBUG_PRINTF("exec rose rv:%u\n", rv);
303 return rv;
304}
305
306char nfaExecTamarama_reportCurrent(const struct NFA *n, struct mq *q) {
307 const struct Tamarama *t = getImplNfa(n);
308 u32 activeIdx = loadActiveIdx(q->streamState, t->activeIdxSize);
309 if (activeIdx == t->numSubEngines) {
310 return 1;
311 }
312
313 const struct NFA *sub = getSubEngine(t, activeIdx);
314 struct mq q1;
315 copyQueue(t, sub, q, &q1, activeIdx);
316 return nfaReportCurrentMatches(sub, &q1);
317}
318
319char nfaExecTamarama_inAccept(const struct NFA *n, ReportID report,
320 struct mq *q) {
321 const struct Tamarama *t = getImplNfa(n);
322 u32 activeIdx = loadActiveIdx(q->streamState, t->activeIdxSize);
323 if (activeIdx == t->numSubEngines) {
324 return 0;
325 }
326 const struct NFA *sub = getSubEngine(t, activeIdx);
327
328 struct mq q1;
329 copyQueue(t, sub, q, &q1, activeIdx);
330 return nfaInAcceptState(sub, report, &q1);
331}
332
333char nfaExecTamarama_inAnyAccept(const struct NFA *n, struct mq *q) {
334 const struct Tamarama *t = getImplNfa(n);
335 u32 activeIdx = loadActiveIdx(q->streamState, t->activeIdxSize);
336 if (activeIdx == t->numSubEngines) {
337 return 0;
338 }
339 const struct NFA *sub = getSubEngine(t, activeIdx);
340
341 struct mq q1;
342 copyQueue(t, sub, q, &q1, activeIdx);
343 return nfaInAnyAcceptState(sub, &q1);
344}
345
346char nfaExecTamarama_queueInitState(const struct NFA *n, struct mq *q) {
347 DEBUG_PRINTF("init state\n");
348 const struct Tamarama *t = getImplNfa(n);
349 char *ptr = q->streamState;
350 // Use activeIdxSize as a sentinel value and initialize the state to
351 // an invalid engine as nothing has been triggered yet
352 storeActiveIdx(t, ptr, t->numSubEngines);
353 return 0;
354}
355
356char nfaExecTamarama_queueCompressState(const struct NFA *n, const struct mq *q,
357 s64a loc) {
358 const struct Tamarama *t = getImplNfa(n);
359 u32 activeIdx = loadActiveIdx(q->streamState, t->activeIdxSize);
360 if (activeIdx == t->numSubEngines) {
361 return 0;
362 }
363
364 const struct NFA *sub = getSubEngine(t, activeIdx);
365
366 struct mq q1;
367 copyQueueProperties(q, &q1, t->activeIdxSize);
368 return nfaQueueCompressState(sub, &q1, loc);
369}
370
371char nfaExecTamarama_expandState(const struct NFA *n, void *dest,
372 const void *src, u64a offset, u8 key) {
373 const struct Tamarama *t = getImplNfa(n);
374 u32 activeIdx = loadActiveIdx(src, t->activeIdxSize);
375 if (activeIdx == t->numSubEngines) {
376 return 0;
377 }
378
379 const struct NFA *sub = getSubEngine(t, activeIdx);
380
381 const char *subStreamState = (const char *)src + t->activeIdxSize;
382 return nfaExpandState(sub, dest, subStreamState, offset, key);
383}
384
385enum nfa_zombie_status nfaExecTamarama_zombie_status(const struct NFA *n,
386 struct mq *q, s64a loc) {
387 const struct Tamarama *t = getImplNfa(n);
388 u32 activeIdx = loadActiveIdx(q->streamState, t->activeIdxSize);
389 if (activeIdx == t->numSubEngines) {
390 return NFA_ZOMBIE_NO;
391 }
392 const struct NFA *sub = getSubEngine(t, activeIdx);
393
394 struct mq q1;
395 copyQueue(t, sub, q, &q1, activeIdx);
396 return nfaGetZombieStatus(sub, &q1, loc);
397}
398
399char nfaExecTamarama_Q(const struct NFA *n, struct mq *q, s64a end) {
400 DEBUG_PRINTF("exec\n");
401 struct mq q1;
402 char rv = MO_ALIVE;
403 char copy = 0;
404 const struct Tamarama *t = getImplNfa(n);
405 while (q->cur < q->end && q_cur_loc(q) <= end) {
406 updateQueues(t, q, &q1);
407 rv = nfaQueueExec_raw(q1.nfa, &q1, end);
408 q->report_current = q1.report_current;
409 copy = 1;
410 if (can_stop_matching(q->scratch)) {
411 break;
412 }
413 }
414 if (copy) {
415 copyBack(t, q, &q1);
416 }
417 return rv;
418}
419
420char nfaExecTamarama_Q2(const struct NFA *n, struct mq *q, s64a end) {
421 DEBUG_PRINTF("exec to match\n");
422 struct mq q1;
423 char rv = 0;
424 char copy = 0;
425 const struct Tamarama *t = getImplNfa(n);
426 while (q->cur < q->end && q_cur_loc(q) <= end &&
427 rv != MO_MATCHES_PENDING) {
428 updateQueues(t, q, &q1);
429 rv = nfaQueueExec2_raw(q1.nfa, &q1, end);
430 q->report_current = q1.report_current;
431 copy = 1;
432 if (can_stop_matching(q->scratch)) {
433 break;
434 }
435 }
436 if (copy) {
437 copyBack(t, q, &q1);
438 }
439 return rv;
440}
441
442