1/*-
2 * Copyright 1997-1999, 2001, John-Mark Gurney.
3 * 2008-2009, Attractive Chaos <attractor@live.co.uk>
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28#ifndef NVIM_LIB_KBTREE_H
29#define NVIM_LIB_KBTREE_H
30
31#include <stdlib.h>
32#include <string.h>
33#include <stdint.h>
34#include <assert.h>
35
36#include "nvim/memory.h"
37
38#define KB_MAX_DEPTH 64
39
40#define __KB_KEY(type, x) (x->key)
41#define __KB_PTR(btr, x) (x->ptr)
42
43#define __KB_TREE_T(name,key_t,T) \
44 typedef struct kbnode_##name##_s kbnode_##name##_t; \
45 struct kbnode_##name##_s { \
46 int32_t n; \
47 bool is_internal; \
48 key_t key[2*T-1]; \
49 kbnode_##name##_t *ptr[]; \
50 } ; \
51 \
52 typedef struct { \
53 kbnode_##name##_t *root; \
54 int n_keys, n_nodes; \
55 } kbtree_##name##_t; \
56 \
57 typedef struct { \
58 kbnode_##name##_t *x; \
59 int i; \
60 } kbpos_##name##_t; \
61 typedef struct { \
62 kbpos_##name##_t stack[KB_MAX_DEPTH], *p; \
63 } kbitr_##name##_t; \
64
65
66#define __kb_destroy(kbnode_t,b) do { \
67 int i; \
68 unsigned int max = 8; \
69 kbnode_t *x, **top, **stack = 0; \
70 if (b->root) { \
71 top = stack = (kbnode_t**)xcalloc(max, sizeof(kbnode_t*)); \
72 *top++ = (b)->root; \
73 while (top != stack) { \
74 x = *--top; \
75 if (x->is_internal == 0) { XFREE_CLEAR(x); continue; } \
76 for (i = 0; i <= x->n; ++i) \
77 if (__KB_PTR(b, x)[i]) { \
78 if (top - stack == (int)max) { \
79 max <<= 1; \
80 stack = (kbnode_t**)xrealloc(stack, max * sizeof(kbnode_t*)); \
81 top = stack + (max>>1); \
82 } \
83 *top++ = __KB_PTR(b, x)[i]; \
84 } \
85 XFREE_CLEAR(x); \
86 } \
87 } \
88 XFREE_CLEAR(stack); \
89 } while (0)
90
91#define __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
92 static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, key_t * __restrict k, int *r) \
93 { \
94 int tr, *rr, begin = 0, end = x->n; \
95 if (x->n == 0) return -1; \
96 rr = r? r : &tr; \
97 while (begin < end) { \
98 int mid = (begin + end) >> 1; \
99 if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
100 else end = mid; \
101 } \
102 if (begin == x->n) { *rr = 1; return x->n - 1; } \
103 if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
104 return begin; \
105 }
106
107#define __KB_GET(name, key_t, kbnode_t) \
108 static key_t *kb_getp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
109 { \
110 if (!b->root) { \
111 return 0; \
112 } \
113 int i, r = 0; \
114 kbnode_t *x = b->root; \
115 while (x) { \
116 i = __kb_getp_aux_##name(x, k, &r); \
117 if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
118 if (x->is_internal == 0) return 0; \
119 x = __KB_PTR(b, x)[i + 1]; \
120 } \
121 return 0; \
122 } \
123 static inline key_t *kb_get_##name(kbtree_##name##_t *b, key_t k) \
124 { \
125 return kb_getp_##name(b, &k); \
126 }
127
128#define __KB_INTERVAL(name, key_t, kbnode_t) \
129 static inline void kb_intervalp_##name(kbtree_##name##_t *b, key_t * __restrict k, key_t **lower, key_t **upper) \
130 { \
131 if (!b->root) { \
132 return; \
133 } \
134 int i, r = 0; \
135 kbnode_t *x = b->root; \
136 *lower = *upper = 0; \
137 while (x) { \
138 i = __kb_getp_aux_##name(x, k, &r); \
139 if (i >= 0 && r == 0) { \
140 *lower = *upper = &__KB_KEY(key_t, x)[i]; \
141 return; \
142 } \
143 if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
144 if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
145 if (x->is_internal == 0) return; \
146 x = __KB_PTR(b, x)[i + 1]; \
147 } \
148 } \
149 static inline void kb_interval_##name(kbtree_##name##_t *b, key_t k, key_t **lower, key_t **upper) \
150 { \
151 kb_intervalp_##name(b, &k, lower, upper); \
152 }
153
154#define __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
155 /* x must be an internal node */ \
156 static inline void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
157 { \
158 kbnode_t *z; \
159 z = (kbnode_t*)xcalloc(1, y->is_internal? ILEN : sizeof(kbnode_##name##_t)); \
160 ++b->n_nodes; \
161 z->is_internal = y->is_internal; \
162 z->n = T - 1; \
163 memcpy(__KB_KEY(key_t, z), &__KB_KEY(key_t, y)[T], sizeof(key_t) * (T - 1)); \
164 if (y->is_internal) memcpy(__KB_PTR(b, z), &__KB_PTR(b, y)[T], sizeof(void*) * T); \
165 y->n = T - 1; \
166 memmove(&__KB_PTR(b, x)[i + 2], &__KB_PTR(b, x)[i + 1], sizeof(void*) * (unsigned int)(x->n - i)); \
167 __KB_PTR(b, x)[i + 1] = z; \
168 memmove(&__KB_KEY(key_t, x)[i + 1], &__KB_KEY(key_t, x)[i], sizeof(key_t) * (unsigned int)(x->n - i)); \
169 __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[T - 1]; \
170 ++x->n; \
171 } \
172 static inline key_t *__kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k) \
173 { \
174 int i = x->n - 1; \
175 key_t *ret; \
176 if (x->is_internal == 0) { \
177 i = __kb_getp_aux_##name(x, k, 0); \
178 if (i != x->n - 1) \
179 memmove(&__KB_KEY(key_t, x)[i + 2], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
180 ret = &__KB_KEY(key_t, x)[i + 1]; \
181 *ret = *k; \
182 ++x->n; \
183 } else { \
184 i = __kb_getp_aux_##name(x, k, 0) + 1; \
185 if (__KB_PTR(b, x)[i]->n == 2 * T - 1) { \
186 __kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
187 if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
188 } \
189 ret = __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
190 } \
191 return ret; \
192 } \
193 static inline key_t *kb_putp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
194 { \
195 if (!b->root) { \
196 b->root = (kbnode_t*)xcalloc(1, ILEN); \
197 ++b->n_nodes; \
198 } \
199 kbnode_t *r, *s; \
200 ++b->n_keys; \
201 r = b->root; \
202 if (r->n == 2 * T - 1) { \
203 ++b->n_nodes; \
204 s = (kbnode_t*)xcalloc(1, ILEN); \
205 b->root = s; s->is_internal = 1; s->n = 0; \
206 __KB_PTR(b, s)[0] = r; \
207 __kb_split_##name(b, s, 0, r); \
208 r = s; \
209 } \
210 return __kb_putp_aux_##name(b, r, k); \
211 } \
212 static inline void kb_put_##name(kbtree_##name##_t *b, key_t k) \
213 { \
214 kb_putp_##name(b, &k); \
215 }
216
217
218#define __KB_DEL(name, key_t, kbnode_t, T) \
219 static inline key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, key_t * __restrict k, int s) \
220 { \
221 int yn, zn, i, r = 0; \
222 kbnode_t *xp, *y, *z; \
223 key_t kp; \
224 if (x == 0) return *k; \
225 if (s) { /* s can only be 0, 1 or 2 */ \
226 r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
227 i = s == 1? x->n - 1 : -1; \
228 } else i = __kb_getp_aux_##name(x, k, &r); \
229 if (x->is_internal == 0) { \
230 if (s == 2) ++i; \
231 kp = __KB_KEY(key_t, x)[i]; \
232 memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
233 --x->n; \
234 return kp; \
235 } \
236 if (r == 0) { \
237 if ((yn = __KB_PTR(b, x)[i]->n) >= T) { \
238 xp = __KB_PTR(b, x)[i]; \
239 kp = __KB_KEY(key_t, x)[i]; \
240 __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
241 return kp; \
242 } else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= T) { \
243 xp = __KB_PTR(b, x)[i + 1]; \
244 kp = __KB_KEY(key_t, x)[i]; \
245 __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
246 return kp; \
247 } else if (yn == T - 1 && zn == T - 1) { \
248 y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
249 __KB_KEY(key_t, y)[y->n++] = *k; \
250 memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, z), (unsigned int)z->n * sizeof(key_t)); \
251 if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, z), (unsigned int)(z->n + 1) * sizeof(void*)); \
252 y->n += z->n; \
253 memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
254 memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \
255 --x->n; \
256 XFREE_CLEAR(z); \
257 return __kb_delp_aux_##name(b, y, k, s); \
258 } \
259 } \
260 ++i; \
261 if ((xp = __KB_PTR(b, x)[i])->n == T - 1) { \
262 if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= T) { \
263 memmove(&__KB_KEY(key_t, xp)[1], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
264 if (xp->is_internal) memmove(&__KB_PTR(b, xp)[1], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \
265 __KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
266 __KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
267 if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
268 --y->n; ++xp->n; \
269 } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= T) { \
270 __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
271 __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
272 if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
273 --y->n; \
274 memmove(__KB_KEY(key_t, y), &__KB_KEY(key_t, y)[1], (unsigned int)y->n * sizeof(key_t)); \
275 if (y->is_internal) memmove(__KB_PTR(b, y), &__KB_PTR(b, y)[1], (unsigned int)(y->n + 1) * sizeof(void*)); \
276 } else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == T - 1) { \
277 __KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
278 memmove(&__KB_KEY(key_t, y)[y->n], __KB_KEY(key_t, xp), (unsigned int)xp->n * sizeof(key_t)); \
279 if (y->is_internal) memmove(&__KB_PTR(b, y)[y->n], __KB_PTR(b, xp), (unsigned int)(xp->n + 1) * sizeof(void*)); \
280 y->n += xp->n; \
281 memmove(&__KB_KEY(key_t, x)[i - 1], &__KB_KEY(key_t, x)[i], (unsigned int)(x->n - i) * sizeof(key_t)); \
282 memmove(&__KB_PTR(b, x)[i], &__KB_PTR(b, x)[i + 1], (unsigned int)(x->n - i) * sizeof(void*)); \
283 --x->n; \
284 XFREE_CLEAR(xp); \
285 xp = y; \
286 } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == T - 1) { \
287 __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
288 memmove(&__KB_KEY(key_t, xp)[xp->n], __KB_KEY(key_t, y), (unsigned int)y->n * sizeof(key_t)); \
289 if (xp->is_internal) memmove(&__KB_PTR(b, xp)[xp->n], __KB_PTR(b, y), (unsigned int)(y->n + 1) * sizeof(void*)); \
290 xp->n += y->n; \
291 memmove(&__KB_KEY(key_t, x)[i], &__KB_KEY(key_t, x)[i + 1], (unsigned int)(x->n - i - 1) * sizeof(key_t)); \
292 memmove(&__KB_PTR(b, x)[i + 1], &__KB_PTR(b, x)[i + 2], (unsigned int)(x->n - i - 1) * sizeof(void*)); \
293 --x->n; \
294 XFREE_CLEAR(y); \
295 } \
296 } \
297 return __kb_delp_aux_##name(b, xp, k, s); \
298 } \
299 static inline key_t kb_delp_##name(kbtree_##name##_t *b, key_t * __restrict k) \
300 { \
301 kbnode_t *x; \
302 key_t ret; \
303 ret = __kb_delp_aux_##name(b, b->root, k, 0); \
304 --b->n_keys; \
305 if (b->root->n == 0 && b->root->is_internal) { \
306 --b->n_nodes; \
307 x = b->root; \
308 b->root = __KB_PTR(b, x)[0]; \
309 XFREE_CLEAR(x); \
310 } \
311 return ret; \
312 } \
313 static inline key_t kb_del_##name(kbtree_##name##_t *b, key_t k) \
314 { \
315 return kb_delp_##name(b, &k); \
316 }
317
318#define __KB_ITR(name, key_t, kbnode_t) \
319 static inline void kb_itr_first_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
320 { \
321 itr->p = NULL; \
322 if (b->n_keys == 0) return; \
323 itr->p = itr->stack; \
324 itr->p->x = b->root; itr->p->i = 0; \
325 while (itr->p->x->is_internal && __KB_PTR(b, itr->p->x)[0] != 0) { \
326 kbnode_t *x = itr->p->x; \
327 ++itr->p; \
328 itr->p->x = __KB_PTR(b, x)[0]; itr->p->i = 0; \
329 } \
330 } \
331 static inline int kb_itr_next_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
332 { \
333 if (itr->p == NULL) return 0; \
334 for (;;) { \
335 ++itr->p->i; \
336 assert(itr->p->i <= 21); \
337 while (itr->p->x && itr->p->i <= itr->p->x->n) { \
338 itr->p[1].i = 0; \
339 itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
340 ++itr->p; \
341 } \
342 if (itr->p == itr->stack) { \
343 itr->p = NULL; \
344 return 0; \
345 } \
346 --itr->p; \
347 if (itr->p->x && itr->p->i < itr->p->x->n) return 1; \
348 } \
349 } \
350 static inline int kb_itr_prev_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
351 { \
352 if (itr->p == NULL) return 0; \
353 for (;;) { \
354 while (itr->p->x && itr->p->i >= 0) { \
355 itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[itr->p->i] : 0; \
356 itr->p[1].i = itr->p[1].x ? itr->p[1].x->n : -1; \
357 ++itr->p; \
358 } \
359 if (itr->p == itr->stack) { \
360 itr->p = NULL; \
361 return 0; \
362 } \
363 --itr->p; \
364 --itr->p->i; \
365 if (itr->p->x && itr->p->i >= 0) return 1; \
366 } \
367 } \
368 static inline int kb_itr_getp_##name(kbtree_##name##_t *b, key_t * __restrict k, kbitr_##name##_t *itr) \
369 { \
370 if (b->n_keys == 0) { \
371 itr->p = NULL; \
372 return 0; \
373 } \
374 int i, r = 0; \
375 itr->p = itr->stack; \
376 itr->p->x = b->root; \
377 while (itr->p->x) { \
378 i = __kb_getp_aux_##name(itr->p->x, k, &r); \
379 itr->p->i = i; \
380 if (i >= 0 && r == 0) return 1; \
381 ++itr->p->i; \
382 assert(itr->p->i <= 21); \
383 itr->p[1].x = itr->p->x->is_internal? __KB_PTR(b, itr->p->x)[i + 1] : 0; \
384 ++itr->p; \
385 } \
386 itr->p->i = 0; \
387 return 0; \
388 } \
389 static inline int kb_itr_get_##name(kbtree_##name##_t *b, key_t k, kbitr_##name##_t *itr) \
390 { \
391 return kb_itr_getp_##name(b,&k,itr); \
392 } \
393 static inline void kb_del_itr_##name(kbtree_##name##_t *b, kbitr_##name##_t *itr) \
394 { \
395 key_t k = kb_itr_key(itr); \
396 kb_delp_##name(b, &k); \
397 kb_itr_getp_##name(b, &k, itr); \
398 }
399
400#define KBTREE_INIT(name, key_t, __cmp, T) \
401 KBTREE_INIT_IMPL(name, key_t, kbnode_##name##_t, __cmp, T, (sizeof(kbnode_##name##_t)+(2*T)*sizeof(void *)))
402
403#define KBTREE_INIT_IMPL(name, key_t, kbnode_t, __cmp, T, ILEN) \
404 __KB_TREE_T(name, key_t, T) \
405 __KB_GET_AUX1(name, key_t, kbnode_t, __cmp) \
406 __KB_GET(name, key_t, kbnode_t) \
407 __KB_INTERVAL(name, key_t, kbnode_t) \
408 __KB_PUT(name, key_t, kbnode_t, __cmp, T, ILEN) \
409 __KB_DEL(name, key_t, kbnode_t, T) \
410 __KB_ITR(name, key_t, kbnode_t)
411
412#define KB_DEFAULT_SIZE 512
413
414#define kbtree_t(name) kbtree_##name##_t
415#define kbitr_t(name) kbitr_##name##_t
416#define kb_init(b) ((b)->n_keys = (b)->n_nodes = 0, (b)->root = 0)
417#define kb_destroy(name, b) __kb_destroy(kbnode_##name##_t, b)
418#define kb_get(name, b, k) kb_get_##name(b, k)
419#define kb_put(name, b, k) kb_put_##name(b, k)
420#define kb_del(name, b, k) kb_del_##name(b, k)
421#define kb_interval(name, b, k, l, u) kb_interval_##name(b, k, l, u)
422#define kb_getp(name, b, k) kb_getp_##name(b, k)
423#define kb_putp(name, b, k) kb_putp_##name(b, k)
424#define kb_delp(name, b, k) kb_delp_##name(b, k)
425#define kb_intervalp(name, b, k, l, u) kb_intervalp_##name(b, k, l, u)
426
427#define kb_itr_first(name, b, i) kb_itr_first_##name(b, i)
428#define kb_itr_get(name, b, k, i) kb_itr_get_##name(b, k, i)
429#define kb_itr_getp(name, b, k, i) kb_itr_getp_##name(b, k, i)
430#define kb_itr_next(name, b, i) kb_itr_next_##name(b, i)
431#define kb_itr_prev(name, b, i) kb_itr_prev_##name(b, i)
432#define kb_del_itr(name, b, i) kb_del_itr_##name(b, i)
433#define kb_itr_key(itr) __KB_KEY(dummy, (itr)->p->x)[(itr)->p->i]
434#define kb_itr_valid(itr) ((itr)->p >= (itr)->stack)
435
436#define kb_size(b) ((b)->n_keys)
437
438#define kb_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
439#define kb_str_cmp(a, b) strcmp(a, b)
440
441#endif // NVIM_LIB_KBTREE_H
442