1 | /* |
2 | * Copyright (c) 2015-2018, Intel Corporation |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions are met: |
6 | * |
7 | * * Redistributions of source code must retain the above copyright notice, |
8 | * this list of conditions and the following disclaimer. |
9 | * * Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * * Neither the name of Intel Corporation nor the names of its contributors |
13 | * may be used to endorse or promote products derived from this software |
14 | * without specific prior written permission. |
15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
20 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
21 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
22 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
23 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
24 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
25 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
26 | * POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
28 | |
29 | /** \file |
30 | * \brief Runtime functions shared between various Rose runtime code. |
31 | */ |
32 | |
33 | #ifndef ROSE_RUNTIME_H |
34 | #define ROSE_RUNTIME_H |
35 | |
36 | #include "rose_internal.h" |
37 | #include "scratch.h" |
38 | #include "util/partial_store.h" |
39 | |
40 | /* |
41 | * ROSE STATE LAYOUT: |
42 | * |
43 | * - runtime status byte (halt status, delay rebuild dirty, etc) |
44 | * - rose state multibit |
45 | * - active leaf array (multibit) |
46 | * - active leftfix array (multibit) |
47 | * - leftfix lag table |
48 | * - anchored matcher state |
49 | * - literal groups |
50 | * - history buffer |
51 | * - exhausted bitvector |
52 | * - som slots, som multibit arrays |
53 | * - nfa stream state (for each nfa) |
54 | */ |
55 | |
56 | #define rose_inline really_inline |
57 | |
58 | /* Maximum offset that we will eagerly run prefixes to. Beyond this point, eager |
59 | * prefixes are always run in exactly the same way as normal prefixes. */ |
60 | #define EAGER_STOP_OFFSET 64 |
61 | |
62 | |
63 | static really_inline |
64 | const void *getByOffset(const struct RoseEngine *t, u32 offset) { |
65 | assert(offset < t->size); |
66 | return (const u8 *)t + offset; |
67 | } |
68 | |
69 | static really_inline |
70 | void *getRoleState(char *state) { |
71 | return state + ROSE_STATE_OFFSET_ROLE_MMBIT; |
72 | } |
73 | |
74 | /** \brief Fetch the active array for suffix nfas. */ |
75 | static really_inline |
76 | u8 *getActiveLeafArray(const struct RoseEngine *t, char *state) { |
77 | return (u8 *)(state + t->stateOffsets.activeLeafArray); |
78 | } |
79 | |
80 | /** \brief Fetch the active array for rose nfas. */ |
81 | static really_inline |
82 | u8 *getActiveLeftArray(const struct RoseEngine *t, char *state) { |
83 | return (u8 *)(state + t->stateOffsets.activeLeftArray); |
84 | } |
85 | |
86 | static really_inline |
87 | rose_group loadGroups(const struct RoseEngine *t, const char *state) { |
88 | return partial_load_u64a(state + t->stateOffsets.groups, |
89 | t->stateOffsets.groups_size); |
90 | |
91 | } |
92 | |
93 | static really_inline |
94 | void storeGroups(const struct RoseEngine *t, char *state, rose_group groups) { |
95 | partial_store_u64a(state + t->stateOffsets.groups, groups, |
96 | t->stateOffsets.groups_size); |
97 | } |
98 | |
99 | static really_inline |
100 | u8 *getLongLitState(const struct RoseEngine *t, char *state) { |
101 | return (u8 *)(state + t->stateOffsets.longLitState); |
102 | } |
103 | |
104 | static really_inline |
105 | u8 *getLeftfixLagTable(const struct RoseEngine *t, char *state) { |
106 | return (u8 *)(state + t->stateOffsets.leftfixLagTable); |
107 | } |
108 | |
109 | static really_inline |
110 | const u8 *getLeftfixLagTableConst(const struct RoseEngine *t, |
111 | const char *state) { |
112 | return (const u8 *)(state + t->stateOffsets.leftfixLagTable); |
113 | } |
114 | |
115 | static really_inline |
116 | u32 has_chained_nfas(const struct RoseEngine *t) { |
117 | return t->outfixBeginQueue; |
118 | } |
119 | |
120 | static really_inline |
121 | void updateLastMatchOffset(struct RoseContext *tctxt, u64a offset) { |
122 | DEBUG_PRINTF("match @%llu, last match @%llu\n" , offset, |
123 | tctxt->lastMatchOffset); |
124 | |
125 | assert(offset >= tctxt->minMatchOffset); |
126 | assert(offset >= tctxt->lastMatchOffset); |
127 | tctxt->lastMatchOffset = offset; |
128 | } |
129 | |
130 | static really_inline |
131 | void updateLastCombMatchOffset(struct RoseContext *tctxt, u64a offset) { |
132 | DEBUG_PRINTF("match @%llu, last match @%llu\n" , offset, |
133 | tctxt->lastCombMatchOffset); |
134 | |
135 | assert(offset >= tctxt->lastCombMatchOffset); |
136 | tctxt->lastCombMatchOffset = offset; |
137 | } |
138 | |
139 | static really_inline |
140 | void updateMinMatchOffset(struct RoseContext *tctxt, u64a offset) { |
141 | DEBUG_PRINTF("min match now @%llu, was @%llu\n" , offset, |
142 | tctxt->minMatchOffset); |
143 | |
144 | assert(offset >= tctxt->minMatchOffset); |
145 | assert(offset >= tctxt->minNonMpvMatchOffset); |
146 | tctxt->minMatchOffset = offset; |
147 | tctxt->minNonMpvMatchOffset = offset; |
148 | } |
149 | |
150 | static really_inline |
151 | void updateMinMatchOffsetFromMpv(struct RoseContext *tctxt, u64a offset) { |
152 | DEBUG_PRINTF("min match now @%llu, was @%llu\n" , offset, |
153 | tctxt->minMatchOffset); |
154 | |
155 | assert(offset >= tctxt->minMatchOffset); |
156 | assert(tctxt->minNonMpvMatchOffset >= tctxt->minMatchOffset); |
157 | tctxt->minMatchOffset = offset; |
158 | tctxt->minNonMpvMatchOffset = MAX(tctxt->minNonMpvMatchOffset, offset); |
159 | } |
160 | #endif |
161 | |