1 | /* |
2 | * Copyright (c) 2015-2016, Intel Corporation |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions are met: |
6 | * |
7 | * * Redistributions of source code must retain the above copyright notice, |
8 | * this list of conditions and the following disclaimer. |
9 | * * Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * * Neither the name of Intel Corporation nor the names of its contributors |
13 | * may be used to endorse or promote products derived from this software |
14 | * without specific prior written permission. |
15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
20 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
28 | |
29 | /** \file |
30 | \brief Dispatches NFA engine API calls to the appropriate engines |
31 | */ |
32 | #include "nfa_api.h" |
33 | |
34 | #include "nfa_api_queue.h" |
35 | #include "nfa_internal.h" |
36 | #include "ue2common.h" |
37 | |
38 | // Engine implementations. |
39 | #include "castle.h" |
40 | #include "gough.h" |
41 | #include "lbr.h" |
42 | #include "limex.h" |
43 | #include "mcclellan.h" |
44 | #include "mcsheng.h" |
45 | #include "mpv.h" |
46 | #include "sheng.h" |
47 | #include "tamarama.h" |
48 | |
49 | #define DISPATCH_CASE(dc_ltype, dc_ftype, dc_func_call) \ |
50 | case dc_ltype: \ |
51 | return nfaExec##dc_ftype##dc_func_call; \ |
52 | break |
53 | |
54 | // general framework calls |
55 | |
56 | #define DISPATCH_BY_NFA_TYPE(dbnt_func) \ |
57 | switch (nfa->type) { \ |
58 | DISPATCH_CASE(LIMEX_NFA_32, LimEx32, dbnt_func); \ |
59 | DISPATCH_CASE(LIMEX_NFA_64, LimEx64, dbnt_func); \ |
60 | DISPATCH_CASE(LIMEX_NFA_128, LimEx128, dbnt_func); \ |
61 | DISPATCH_CASE(LIMEX_NFA_256, LimEx256, dbnt_func); \ |
62 | DISPATCH_CASE(LIMEX_NFA_384, LimEx384, dbnt_func); \ |
63 | DISPATCH_CASE(LIMEX_NFA_512, LimEx512, dbnt_func); \ |
64 | DISPATCH_CASE(MCCLELLAN_NFA_8, McClellan8, dbnt_func); \ |
65 | DISPATCH_CASE(MCCLELLAN_NFA_16, McClellan16, dbnt_func); \ |
66 | DISPATCH_CASE(GOUGH_NFA_8, Gough8, dbnt_func); \ |
67 | DISPATCH_CASE(GOUGH_NFA_16, Gough16, dbnt_func); \ |
68 | DISPATCH_CASE(MPV_NFA, Mpv, dbnt_func); \ |
69 | DISPATCH_CASE(LBR_NFA_DOT, LbrDot, dbnt_func); \ |
70 | DISPATCH_CASE(LBR_NFA_VERM, LbrVerm, dbnt_func); \ |
71 | DISPATCH_CASE(LBR_NFA_NVERM, LbrNVerm, dbnt_func); \ |
72 | DISPATCH_CASE(LBR_NFA_SHUF, LbrShuf, dbnt_func); \ |
73 | DISPATCH_CASE(LBR_NFA_TRUF, LbrTruf, dbnt_func); \ |
74 | DISPATCH_CASE(CASTLE_NFA, Castle, dbnt_func); \ |
75 | DISPATCH_CASE(SHENG_NFA, Sheng, dbnt_func); \ |
76 | DISPATCH_CASE(TAMARAMA_NFA, Tamarama, dbnt_func); \ |
77 | DISPATCH_CASE(MCSHENG_NFA_8, McSheng8, dbnt_func); \ |
78 | DISPATCH_CASE(MCSHENG_NFA_16, McSheng16, dbnt_func); \ |
79 | default: \ |
80 | assert(0); \ |
81 | } |
82 | |
83 | char nfaCheckFinalState(const struct NFA *nfa, const char *state, |
84 | const char *streamState, u64a offset, |
85 | NfaCallback callback, void *context) { |
86 | assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa))); |
87 | |
88 | // Caller should avoid calling us if we can never produce matches. |
89 | assert(nfaAcceptsEod(nfa)); |
90 | |
91 | DISPATCH_BY_NFA_TYPE(_testEOD(nfa, state, streamState, offset, callback, |
92 | context)); |
93 | return 0; |
94 | } |
95 | |
96 | char nfaQueueInitState(const struct NFA *nfa, struct mq *q) { |
97 | assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa))); |
98 | |
99 | DISPATCH_BY_NFA_TYPE(_queueInitState(nfa, q)); |
100 | return 0; |
101 | } |
102 | |
103 | static really_inline |
104 | char nfaQueueExec_i(const struct NFA *nfa, struct mq *q, s64a end) { |
105 | DISPATCH_BY_NFA_TYPE(_Q(nfa, q, end)); |
106 | return 0; |
107 | } |
108 | |
109 | static really_inline |
110 | char nfaQueueExec2_i(const struct NFA *nfa, struct mq *q, s64a end) { |
111 | DISPATCH_BY_NFA_TYPE(_Q2(nfa, q, end)); |
112 | return 0; |
113 | } |
114 | |
115 | char nfaQueueExec_raw(const struct NFA *nfa, struct mq *q, s64a end) { |
116 | return nfaQueueExec_i(nfa, q, end); |
117 | } |
118 | |
119 | char nfaQueueExec2_raw(const struct NFA *nfa, struct mq *q, s64a end) { |
120 | return nfaQueueExec2_i(nfa, q, end); |
121 | } |
122 | |
123 | static really_inline |
124 | char nfaQueueExecRose_i(const struct NFA *nfa, struct mq *q, ReportID report) { |
125 | DISPATCH_BY_NFA_TYPE(_QR(nfa, q, report)); |
126 | return 0; |
127 | } |
128 | |
129 | /** Returns 0 if this NFA cannot possibly match (due to width constraints etc) |
130 | * and the caller should return 0. May also edit the queue. */ |
131 | static really_inline |
132 | char nfaQueueCanMatch(const struct NFA *nfa, struct mq *q, s64a end, |
133 | char *q_trimmed) { |
134 | assert(q_trimmed); |
135 | assert(q->end - q->cur >= 2); |
136 | assert(end >= 0); |
137 | |
138 | DEBUG_PRINTF("q->offset=%llu, end=%lld\n" , q->offset, end); |
139 | DEBUG_PRINTF("maxBiAnchoredWidth=%u, maxOffset=%u\n" , |
140 | nfa->maxBiAnchoredWidth, nfa->maxOffset); |
141 | |
142 | if (nfa->maxBiAnchoredWidth && |
143 | (end + q->offset > nfa->maxBiAnchoredWidth)) { |
144 | DEBUG_PRINTF("stream too long: o %llu l %zu max: %hhu\n" , q->offset, |
145 | q->length, nfa->maxBiAnchoredWidth); |
146 | return 0; |
147 | } |
148 | |
149 | if (nfa->maxOffset) { |
150 | if (q->offset >= nfa->maxOffset) { |
151 | DEBUG_PRINTF("stream is past maxOffset\n" ); |
152 | return 0; |
153 | } |
154 | |
155 | if (q->offset + end > nfa->maxOffset) { |
156 | s64a maxEnd = nfa->maxOffset - q->offset; |
157 | DEBUG_PRINTF("me %lld off %llu len = %lld\n" , maxEnd, |
158 | q->offset, end); |
159 | while (q->end > q->cur |
160 | && q->items[q->end - 1].location > maxEnd) { |
161 | *q_trimmed = 1; |
162 | DEBUG_PRINTF("killing item %u %lld %u\n" , q->end, |
163 | q->items[q->end - 1].location, |
164 | q->items[q->end - 1].type); |
165 | q->items[q->end - 1].location = maxEnd; |
166 | q->items[q->end - 1].type = MQE_END; |
167 | if (q->end - q->cur < 2 |
168 | ||q->items[q->end - 2].location <= maxEnd) { |
169 | break; |
170 | } |
171 | q->end--; |
172 | } |
173 | |
174 | if (q->end - q->cur < 2) { /* nothing left on q */ |
175 | DEBUG_PRINTF("queue empty\n" ); |
176 | return 0; |
177 | } |
178 | } |
179 | |
180 | #ifdef DEBUG |
181 | if (*q_trimmed) { |
182 | debugQueue(q); |
183 | } |
184 | #endif |
185 | } |
186 | |
187 | return 1; |
188 | } |
189 | |
190 | char nfaQueueExec(const struct NFA *nfa, struct mq *q, s64a end) { |
191 | DEBUG_PRINTF("nfa=%p end=%lld\n" , nfa, end); |
192 | #ifdef DEBUG |
193 | debugQueue(q); |
194 | #endif |
195 | |
196 | assert(q && q->context && q->state); |
197 | assert(end >= 0); |
198 | assert(q->cur < q->end); |
199 | assert(q->end <= MAX_MQE_LEN); |
200 | assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa))); |
201 | assert(end < q->items[q->end - 1].location |
202 | || q->items[q->end - 1].type == MQE_END); |
203 | |
204 | if (q->items[q->cur].location > end) { |
205 | return 1; |
206 | } |
207 | |
208 | char q_trimmed = 0; |
209 | |
210 | assert(end <= (s64a)q->length || !q->hlength); |
211 | /* due to reverse accel in block mode some queues may work on a truncated |
212 | * buffer */ |
213 | if (end > (s64a)q->length) { |
214 | end = q->length; |
215 | q_trimmed = 1; |
216 | } |
217 | |
218 | if (!nfaQueueCanMatch(nfa, q, end, &q_trimmed)) { |
219 | if (q->report_current) { |
220 | nfaReportCurrentMatches(nfa, q); |
221 | q->report_current = 0; |
222 | } |
223 | |
224 | return 0; |
225 | } |
226 | |
227 | char rv = nfaQueueExec_i(nfa, q, end); |
228 | |
229 | #ifdef DEBUG |
230 | debugQueue(q); |
231 | #endif |
232 | |
233 | assert(!q->report_current); |
234 | DEBUG_PRINTF("returned rv=%d, q_trimmed=%d\n" , rv, q_trimmed); |
235 | return rv && !q_trimmed; |
236 | } |
237 | |
238 | char nfaQueueExecToMatch(const struct NFA *nfa, struct mq *q, s64a end) { |
239 | DEBUG_PRINTF("nfa=%p end=%lld\n" , nfa, end); |
240 | #ifdef DEBUG |
241 | debugQueue(q); |
242 | #endif |
243 | |
244 | assert(q); |
245 | assert(end >= 0); |
246 | assert(q->state); |
247 | assert(q->cur < q->end); |
248 | assert(q->end <= MAX_MQE_LEN); |
249 | assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa))); |
250 | assert(end < q->items[q->end - 1].location |
251 | || q->items[q->end - 1].type == MQE_END); |
252 | |
253 | char q_trimmed_ra = 0; |
254 | assert(end <= (s64a)q->length || !q->hlength); |
255 | /* due to reverse accel in block mode some queues may work on a truncated |
256 | * buffer */ |
257 | if (q->items[q->cur].location > end) { |
258 | return 1; |
259 | } |
260 | |
261 | if (end > (s64a)q->length) { |
262 | end = q->length; |
263 | q_trimmed_ra = 1; |
264 | } |
265 | |
266 | char q_trimmed = 0; |
267 | if (!nfaQueueCanMatch(nfa, q, end, &q_trimmed)) { |
268 | if (q->report_current) { |
269 | nfaReportCurrentMatches(nfa, q); |
270 | q->report_current = 0; |
271 | } |
272 | |
273 | return 0; |
274 | } |
275 | |
276 | char rv = nfaQueueExec2_i(nfa, q, end); |
277 | assert(!q->report_current); |
278 | DEBUG_PRINTF("returned rv=%d, q_trimmed=%d\n" , rv, q_trimmed); |
279 | if (rv == MO_MATCHES_PENDING) { |
280 | if (q_trimmed) { |
281 | // We need to "fix" the queue so that subsequent operations must |
282 | // trim it as well. |
283 | assert(q->end > 0); |
284 | assert(nfa->maxOffset); |
285 | q->items[q->end - 1].location = nfa->maxOffset + 1; |
286 | } |
287 | return rv; |
288 | } |
289 | return rv && !q_trimmed && !q_trimmed_ra; |
290 | } |
291 | |
292 | char nfaReportCurrentMatches(const struct NFA *nfa, struct mq *q) { |
293 | DISPATCH_BY_NFA_TYPE(_reportCurrent(nfa, q)); |
294 | return 0; |
295 | } |
296 | |
297 | char nfaInAcceptState(const struct NFA *nfa, ReportID report, struct mq *q) { |
298 | DISPATCH_BY_NFA_TYPE(_inAccept(nfa, report, q)); |
299 | return 0; |
300 | } |
301 | |
302 | char nfaInAnyAcceptState(const struct NFA *nfa, struct mq *q) { |
303 | DISPATCH_BY_NFA_TYPE(_inAnyAccept(nfa, q)); |
304 | return 0; |
305 | } |
306 | |
307 | char nfaQueueExecRose(const struct NFA *nfa, struct mq *q, ReportID r) { |
308 | DEBUG_PRINTF("nfa=%p\n" , nfa); |
309 | #ifdef DEBUG |
310 | debugQueue(q); |
311 | #endif |
312 | |
313 | assert(q && !q->context && q->state); |
314 | assert(q->cur <= q->end); |
315 | assert(q->end <= MAX_MQE_LEN); |
316 | assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa))); |
317 | assert(!q->report_current); |
318 | |
319 | return nfaQueueExecRose_i(nfa, q, r); |
320 | } |
321 | |
322 | char nfaBlockExecReverse(const struct NFA *nfa, u64a offset, const u8 *buf, |
323 | size_t buflen, const u8 *hbuf, size_t hlen, |
324 | NfaCallback callback, void *context) { |
325 | assert(nfa); |
326 | assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa))); |
327 | |
328 | DISPATCH_BY_NFA_TYPE(_B_Reverse(nfa, offset, buf, buflen, hbuf, hlen, |
329 | callback, context)); |
330 | return 0; |
331 | } |
332 | |
333 | char nfaQueueCompressState(const struct NFA *nfa, const struct mq *q, |
334 | s64a loc) { |
335 | assert(nfa && q); |
336 | assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa))); |
337 | |
338 | DISPATCH_BY_NFA_TYPE(_queueCompressState(nfa, q, loc)); |
339 | return 0; |
340 | } |
341 | |
342 | char nfaExpandState(const struct NFA *nfa, void *dest, const void *src, |
343 | u64a offset, u8 key) { |
344 | assert(nfa && dest && src); |
345 | assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa))); |
346 | |
347 | DISPATCH_BY_NFA_TYPE(_expandState(nfa, dest, src, offset, key)); |
348 | return 0; |
349 | } |
350 | |
351 | char nfaInitCompressedState(const struct NFA *nfa, u64a offset, void *state, |
352 | u8 key) { |
353 | assert(nfa && state); |
354 | assert(ISALIGNED_CL(nfa) && ISALIGNED_CL(getImplNfa(nfa))); |
355 | |
356 | DISPATCH_BY_NFA_TYPE(_initCompressedState(nfa, offset, state, key)); |
357 | return 0; |
358 | } |
359 | |
360 | enum nfa_zombie_status nfaGetZombieStatus(const struct NFA *nfa, struct mq *q, |
361 | s64a loc) { |
362 | DISPATCH_BY_NFA_TYPE(_zombie_status(nfa, q, loc)); |
363 | return NFA_ZOMBIE_NO; |
364 | } |
365 | |