1 | /* -*- c-basic-offset: 2 -*- */ |
2 | /* |
3 | Copyright(C) 2009-2016 Brazil |
4 | |
5 | This library is free software; you can redistribute it and/or |
6 | modify it under the terms of the GNU Lesser General Public |
7 | License version 2.1 as published by the Free Software Foundation. |
8 | |
9 | This library is distributed in the hope that it will be useful, |
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
12 | Lesser General Public License for more details. |
13 | |
14 | You should have received a copy of the GNU Lesser General Public |
15 | License along with this library; if not, write to the Free Software |
16 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
17 | */ |
18 | #include "grn_hash.h" |
19 | #include "grn_output.h" |
20 | #include <string.h> |
21 | #include <limits.h> |
22 | |
23 | #include "grn_store.h" |
24 | #include "grn_normalizer.h" |
25 | |
26 | /* grn_tiny_array */ |
27 | |
28 | /* Requirements: id != GRN_ID_NIL. */ |
29 | inline static int |
30 | grn_tiny_array_get_block_id(grn_id id) |
31 | { |
32 | int most_significant_one_bit_offset; |
33 | GRN_BIT_SCAN_REV(id, most_significant_one_bit_offset); |
34 | return most_significant_one_bit_offset >> GRN_TINY_ARRAY_FACTOR; |
35 | } |
36 | |
37 | /* Requirements: id != GRN_ID_NIL. */ |
38 | inline static void * |
39 | grn_tiny_array_get(grn_tiny_array *array, grn_id id) { |
40 | const int block_id = grn_tiny_array_get_block_id(id); |
41 | uint8_t * const block = (uint8_t *)array->blocks[block_id]; |
42 | if (block) { |
43 | const size_t offset = GRN_TINY_ARRAY_GET_OFFSET(block_id); |
44 | return block + (id - offset) * array->element_size; |
45 | } |
46 | return NULL; |
47 | } |
48 | |
49 | /* Requirements: id != GRN_ID_NIL. */ |
50 | inline static void * |
51 | grn_tiny_array_put(grn_tiny_array *array, grn_id id) { |
52 | const int block_id = grn_tiny_array_get_block_id(id); |
53 | void ** const block = &array->blocks[block_id]; |
54 | const size_t offset = GRN_TINY_ARRAY_GET_OFFSET(block_id); |
55 | if (!*block) { |
56 | grn_ctx * const ctx = array->ctx; |
57 | if (array->flags & GRN_TINY_ARRAY_THREADSAFE) { |
58 | CRITICAL_SECTION_ENTER(array->lock); |
59 | } |
60 | if (!*block) { |
61 | const size_t block_size = |
62 | GRN_TINY_ARRAY_GET_BLOCK_SIZE(block_id) * array->element_size; |
63 | if (array->flags & GRN_TINY_ARRAY_USE_MALLOC) { |
64 | if (array->flags & GRN_TINY_ARRAY_CLEAR) { |
65 | *block = GRN_CALLOC(block_size); |
66 | } else { |
67 | *block = GRN_MALLOC(block_size); |
68 | } |
69 | } else { |
70 | *block = GRN_CTX_ALLOC(ctx, block_size); |
71 | } |
72 | } |
73 | if (array->flags & GRN_TINY_ARRAY_THREADSAFE) { |
74 | CRITICAL_SECTION_LEAVE(array->lock); |
75 | } |
76 | if (!*block) { |
77 | return NULL; |
78 | } |
79 | } |
80 | if (id > array->max) { |
81 | array->max = id; |
82 | } |
83 | return (uint8_t *)*block + (id - offset) * array->element_size; |
84 | } |
85 | |
86 | inline static void * |
87 | grn_tiny_array_at_inline(grn_tiny_array *array, grn_id id) |
88 | { |
89 | return id ? grn_tiny_array_put(array, id) : NULL; |
90 | } |
91 | |
92 | inline static void * |
93 | grn_tiny_array_next(grn_tiny_array *array) |
94 | { |
95 | return grn_tiny_array_put(array, array->max + 1); |
96 | } |
97 | |
98 | void |
99 | grn_tiny_array_init(grn_ctx *ctx, grn_tiny_array *array, |
100 | uint16_t element_size, uint16_t flags) |
101 | { |
102 | array->ctx = ctx; |
103 | array->max = 0; |
104 | array->element_size = element_size; |
105 | array->flags = flags; |
106 | memset(array->blocks, 0, sizeof(array->blocks)); |
107 | if (flags & GRN_TINY_ARRAY_THREADSAFE) { |
108 | CRITICAL_SECTION_INIT(array->lock); |
109 | } |
110 | } |
111 | |
112 | void |
113 | grn_tiny_array_fin(grn_tiny_array *array) |
114 | { |
115 | int block_id; |
116 | grn_ctx * const ctx = array->ctx; |
117 | for (block_id = 0; block_id < GRN_TINY_ARRAY_NUM_BLOCKS; block_id++) { |
118 | if (array->blocks[block_id]) { |
119 | if (array->flags & GRN_TINY_ARRAY_USE_MALLOC) { |
120 | GRN_FREE(array->blocks[block_id]); |
121 | } else { |
122 | GRN_CTX_FREE(ctx, array->blocks[block_id]); |
123 | } |
124 | array->blocks[block_id] = NULL; |
125 | } |
126 | } |
127 | } |
128 | |
129 | void * |
130 | grn_tiny_array_at(grn_tiny_array *array, grn_id id) |
131 | { |
132 | return grn_tiny_array_at_inline(array, id); |
133 | } |
134 | |
135 | grn_id |
136 | grn_tiny_array_id(grn_tiny_array *array, const void *element_address) |
137 | { |
138 | const uint8_t * const ptr = (const uint8_t *)element_address; |
139 | uint32_t block_id, offset = 1; |
140 | for (block_id = 0; block_id < GRN_TINY_ARRAY_NUM_BLOCKS; block_id++) { |
141 | const uint32_t block_size = GRN_TINY_ARRAY_GET_BLOCK_SIZE(block_id); |
142 | const uint8_t * const block = (const uint8_t *)array->blocks[block_id]; |
143 | if (block) { |
144 | if (block <= ptr && ptr < (block + block_size * array->element_size)) { |
145 | return offset + ((ptr - block) / array->element_size); |
146 | } |
147 | } |
148 | offset += block_size; |
149 | } |
150 | return GRN_ID_NIL; |
151 | } |
152 | |
153 | /* grn_tiny_bitmap */ |
154 | |
155 | static void |
156 | grn_tiny_bitmap_init(grn_ctx *ctx, grn_tiny_bitmap *bitmap) |
157 | { |
158 | bitmap->ctx = ctx; |
159 | memset(bitmap->blocks, 0, sizeof(bitmap->blocks)); |
160 | } |
161 | |
162 | static void |
163 | grn_tiny_bitmap_fin(grn_tiny_bitmap *bitmap) |
164 | { |
165 | int block_id; |
166 | grn_ctx * const ctx = bitmap->ctx; |
167 | for (block_id = 0; block_id < GRN_TINY_ARRAY_NUM_BLOCKS; block_id++) { |
168 | if (bitmap->blocks[block_id]) { |
169 | GRN_CTX_FREE(ctx, bitmap->blocks[block_id]); |
170 | bitmap->blocks[block_id] = NULL; |
171 | } |
172 | } |
173 | } |
174 | |
175 | /* Requirements: bit_id != GRN_ID_NIL. */ |
176 | inline static uint8_t * |
177 | grn_tiny_bitmap_get_byte(grn_tiny_bitmap *bitmap, grn_id bit_id) { |
178 | const uint32_t byte_id = (bit_id >> 3) + 1; |
179 | const int block_id = grn_tiny_array_get_block_id(byte_id); |
180 | uint8_t * const block = (uint8_t *)bitmap->blocks[block_id]; |
181 | if (block) { |
182 | const size_t offset = GRN_TINY_ARRAY_GET_OFFSET(block_id); |
183 | return block + byte_id - offset; |
184 | } |
185 | return NULL; |
186 | } |
187 | |
188 | /* Requirements: bit_id != GRN_ID_NIL. */ |
189 | inline static uint8_t * |
190 | grn_tiny_bitmap_put_byte(grn_tiny_bitmap *bitmap, grn_id bit_id) { |
191 | const uint32_t byte_id = (bit_id >> 3) + 1; |
192 | const int block_id = grn_tiny_array_get_block_id(byte_id); |
193 | void ** const block = &bitmap->blocks[block_id]; |
194 | const size_t offset = GRN_TINY_ARRAY_GET_OFFSET(block_id); |
195 | if (!*block) { |
196 | grn_ctx * const ctx = bitmap->ctx; |
197 | *block = GRN_CTX_ALLOC(ctx, GRN_TINY_ARRAY_GET_BLOCK_SIZE(block_id)); |
198 | if (!*block) { |
199 | return NULL; |
200 | } |
201 | } |
202 | return (uint8_t *)*block + byte_id - offset; |
203 | } |
204 | |
205 | /* Requirements: bit_id != GRN_ID_NIL. */ |
206 | /* Return value: 1/0 on success, -1 on failure. */ |
207 | inline static int |
208 | grn_tiny_bitmap_get(grn_tiny_bitmap *bitmap, grn_id bit_id) |
209 | { |
210 | uint8_t * const ptr = grn_tiny_bitmap_get_byte(bitmap, bit_id); |
211 | return ptr ? ((*ptr >> (bit_id & 7)) & 1) : -1; |
212 | } |
213 | |
214 | /* Requirements: bit_id != GRN_ID_NIL. */ |
215 | /* Return value: 1/0 on success, -1 on failure. */ |
216 | /* Note: A bitmap is extended if needed. */ |
217 | inline static int |
218 | grn_tiny_bitmap_put(grn_tiny_bitmap *bitmap, grn_id bit_id) |
219 | { |
220 | uint8_t * const ptr = grn_tiny_bitmap_put_byte(bitmap, bit_id); |
221 | return ptr ? ((*ptr >> (bit_id & 7)) & 1) : -1; |
222 | } |
223 | |
224 | /* Requirements: bit_id != GRN_ID_NIL. */ |
225 | inline static uint8_t * |
226 | grn_tiny_bitmap_get_and_set(grn_tiny_bitmap *bitmap, grn_id bit_id, |
227 | grn_bool bit) |
228 | { |
229 | uint8_t * const ptr = grn_tiny_bitmap_get_byte(bitmap, bit_id); |
230 | if (ptr) { |
231 | /* This branch will be removed because the given `bit' is constant. */ |
232 | if (bit) { |
233 | *ptr |= 1 << (bit_id & 7); |
234 | } else { |
235 | *ptr &= ~(1 << (bit_id & 7)); |
236 | } |
237 | } |
238 | return ptr; |
239 | } |
240 | |
241 | /* Requirements: bit_id != GRN_ID_NIL. */ |
242 | /* Note: A bitmap is extended if needed. */ |
243 | inline static uint8_t * |
244 | grn_tiny_bitmap_put_and_set(grn_tiny_bitmap *bitmap, grn_id bit_id, |
245 | grn_bool bit) |
246 | { |
247 | uint8_t * const ptr = grn_tiny_bitmap_put_byte(bitmap, bit_id); |
248 | if (ptr) { |
249 | /* This branch will be removed because the given `bit' is constant. */ |
250 | if (bit) { |
251 | *ptr |= 1 << (bit_id & 7); |
252 | } else { |
253 | *ptr &= ~(1 << (bit_id & 7)); |
254 | } |
255 | } |
256 | return ptr; |
257 | } |
258 | |
259 | /* grn_io_array */ |
260 | |
261 | #define GRN_ARRAY_MAX (GRN_ID_MAX - 8) |
262 | |
263 | inline static void * |
264 | grn_io_array_at_inline(grn_ctx *ctx, grn_io *io, uint32_t segment_id, |
265 | uint64_t offset, int flags) |
266 | { |
267 | void *ptr; |
268 | GRN_IO_ARRAY_AT(io, segment_id, offset, &flags, ptr); |
269 | return ptr; |
270 | } |
271 | |
272 | /* |
273 | * grn_io_array_bit_at() returns 1/0 on success, -1 on failure. |
274 | */ |
275 | inline static int |
276 | grn_io_array_bit_at(grn_ctx *ctx, grn_io *io, |
277 | uint32_t segment_id, uint32_t offset) |
278 | { |
279 | uint8_t * const ptr = (uint8_t *)grn_io_array_at_inline( |
280 | ctx, io, segment_id, (offset >> 3) + 1, 0); |
281 | return ptr ? ((*ptr >> (offset & 7)) & 1) : -1; |
282 | } |
283 | |
284 | /* |
285 | * The following functions, grn_io_array_bit_*(), return a non-NULL pointer on |
286 | * success, a NULL pointer on failure. |
287 | */ |
288 | inline static void * |
289 | grn_io_array_bit_on(grn_ctx *ctx, grn_io *io, |
290 | uint32_t segment_id, uint32_t offset) |
291 | { |
292 | uint8_t * const ptr = (uint8_t *)grn_io_array_at_inline( |
293 | ctx, io, segment_id, (offset >> 3) + 1, GRN_TABLE_ADD); |
294 | if (ptr) { |
295 | *ptr |= 1 << (offset & 7); |
296 | } |
297 | return ptr; |
298 | } |
299 | |
300 | inline static void * |
301 | grn_io_array_bit_off(grn_ctx *ctx, grn_io *io, |
302 | uint32_t segment_id, uint32_t offset) |
303 | { |
304 | uint8_t * const ptr = (uint8_t *)grn_io_array_at_inline( |
305 | ctx, io, segment_id, (offset >> 3) + 1, GRN_TABLE_ADD); |
306 | if (ptr) { |
307 | *ptr &= ~(1 << (offset & 7)); |
308 | } |
309 | return ptr; |
310 | } |
311 | |
312 | inline static void * |
313 | grn_io_array_bit_flip(grn_ctx *ctx, grn_io *io, |
314 | uint32_t segment_id, uint32_t offset) |
315 | { |
316 | uint8_t * const ptr = (uint8_t *)grn_io_array_at_inline( |
317 | ctx, io, segment_id, (offset >> 3) + 1, GRN_TABLE_ADD); |
318 | if (ptr) { |
319 | *ptr ^= 1 << (offset & 7); |
320 | } |
321 | return ptr; |
322 | } |
323 | |
324 | /* grn_table_queue */ |
325 | |
326 | static void |
327 | grn_table_queue_lock_init(grn_ctx *ctx, grn_table_queue *queue) |
328 | { |
329 | MUTEX_INIT_SHARED(queue->mutex); |
330 | COND_INIT_SHARED(queue->cond); |
331 | } |
332 | |
333 | static void |
334 | grn_table_queue_init(grn_ctx *ctx, grn_table_queue *queue) |
335 | { |
336 | queue->head = 0; |
337 | queue->tail = 0; |
338 | queue->cap = GRN_ARRAY_MAX; |
339 | queue->unblock_requested = GRN_FALSE; |
340 | grn_table_queue_lock_init(ctx, queue); |
341 | } |
342 | |
343 | uint32_t |
344 | grn_table_queue_size(grn_table_queue *queue) |
345 | { |
346 | return (queue->head < queue->tail) |
347 | ? 2 * queue->cap + queue->head - queue->tail |
348 | : queue->head - queue->tail; |
349 | } |
350 | |
351 | void |
352 | grn_table_queue_head_increment(grn_table_queue *queue) |
353 | { |
354 | if (queue->head == 2 * queue->cap) { |
355 | queue->head = 1; |
356 | } else { |
357 | queue->head++; |
358 | } |
359 | } |
360 | |
361 | void |
362 | grn_table_queue_tail_increment(grn_table_queue *queue) |
363 | { |
364 | if (queue->tail == 2 * queue->cap) { |
365 | queue->tail = 1; |
366 | } else { |
367 | queue->tail++; |
368 | } |
369 | } |
370 | |
371 | grn_id |
372 | grn_table_queue_head(grn_table_queue *queue) |
373 | { |
374 | return queue->head > queue->cap |
375 | ? queue->head - queue->cap |
376 | : queue->head; |
377 | } |
378 | |
379 | grn_id |
380 | grn_table_queue_tail(grn_table_queue *queue) |
381 | { |
382 | return queue->tail > queue->cap |
383 | ? queue->tail - queue->cap |
384 | : queue->tail; |
385 | } |
386 | |
387 | /* grn_array */ |
388 | |
389 | #define GRN_ARRAY_SEGMENT_SIZE 0x400000 |
390 | |
391 | /* Header of grn_io-based grn_array. */ |
392 | struct { |
393 | uint32_t ; |
394 | uint32_t ; |
395 | uint32_t ; |
396 | uint32_t ; |
397 | uint32_t ; |
398 | grn_id ; |
399 | uint32_t ; |
400 | uint32_t ; |
401 | uint32_t [8]; |
402 | grn_table_queue ; |
403 | }; |
404 | |
405 | /* |
406 | * A grn_io-based grn_array consists of the following 2 segments. |
407 | * GRN_ARRAY_VALUE_SEGMENT: stores values. |
408 | * GRN_ARRAY_BITMAP_SEGMENT: stores whether entries are valid or not. |
409 | */ |
410 | enum { |
411 | GRN_ARRAY_VALUE_SEGMENT = 0, |
412 | GRN_ARRAY_BITMAP_SEGMENT = 1 |
413 | }; |
414 | |
415 | inline static grn_bool |
416 | grn_array_is_io_array(grn_array *array) |
417 | { |
418 | return array->io != NULL; |
419 | } |
420 | |
421 | inline static void * |
422 | grn_array_io_entry_at(grn_ctx *ctx, grn_array *array, grn_id id, int flags) |
423 | { |
424 | return grn_io_array_at_inline(ctx, array->io, GRN_ARRAY_VALUE_SEGMENT, id, flags); |
425 | } |
426 | |
427 | inline static void * |
428 | grn_array_entry_at(grn_ctx *ctx, grn_array *array, grn_id id, int flags) |
429 | { |
430 | if (grn_array_is_io_array(array)) { |
431 | return grn_array_io_entry_at(ctx, array, id, flags); |
432 | } else { |
433 | return grn_tiny_array_at_inline(&array->array, id); |
434 | } |
435 | } |
436 | |
437 | /* grn_array_bitmap_at() returns 1/0 on success, -1 on failure. */ |
438 | inline static int |
439 | grn_array_bitmap_at(grn_ctx *ctx, grn_array *array, grn_id id) |
440 | { |
441 | if (grn_array_is_io_array(array)) { |
442 | return grn_io_array_bit_at(ctx, array->io, GRN_ARRAY_BITMAP_SEGMENT, id); |
443 | } else { |
444 | return grn_tiny_bitmap_put(&array->bitmap, id); |
445 | } |
446 | } |
447 | |
448 | static grn_rc |
449 | grn_array_init_tiny_array(grn_ctx *ctx, grn_array *array, const char *path, |
450 | uint32_t value_size, uint32_t flags) |
451 | { |
452 | if (path) { |
453 | ERR(GRN_INVALID_ARGUMENT, "failed to create tiny array" ); |
454 | return ctx->rc; |
455 | } |
456 | array->obj.header.flags = flags; |
457 | array->ctx = ctx; |
458 | array->value_size = value_size; |
459 | array->n_keys = 0; |
460 | array->keys = NULL; |
461 | array->n_garbages = &array->n_garbages_buf; |
462 | array->n_entries = &array->n_entries_buf; |
463 | array->n_garbages_buf = 0; |
464 | array->n_entries_buf = 0; |
465 | array->io = NULL; |
466 | array->header = NULL; |
467 | array->garbages = GRN_ID_NIL; |
468 | grn_tiny_array_init(ctx, &array->array, value_size, GRN_TINY_ARRAY_CLEAR); |
469 | grn_tiny_bitmap_init(ctx, &array->bitmap); |
470 | return GRN_SUCCESS; |
471 | } |
472 | |
473 | static grn_io * |
474 | grn_array_create_io_array(grn_ctx *ctx, const char *path, uint32_t value_size) |
475 | { |
476 | uint32_t w_of_element = 0; |
477 | grn_io_array_spec array_spec[2]; |
478 | |
479 | while ((1U << w_of_element) < value_size) { |
480 | w_of_element++; |
481 | } |
482 | |
483 | array_spec[GRN_ARRAY_VALUE_SEGMENT].w_of_element = w_of_element; |
484 | array_spec[GRN_ARRAY_VALUE_SEGMENT].max_n_segments = |
485 | 1U << (30 - (22 - w_of_element)); |
486 | array_spec[GRN_ARRAY_BITMAP_SEGMENT].w_of_element = 0; |
487 | array_spec[GRN_ARRAY_BITMAP_SEGMENT].max_n_segments = 1U << (30 - (22 + 3)); |
488 | return grn_io_create_with_array(ctx, path, sizeof(struct grn_array_header), |
489 | GRN_ARRAY_SEGMENT_SIZE, grn_io_auto, |
490 | 2, array_spec); |
491 | } |
492 | |
493 | static grn_rc |
494 | grn_array_init_io_array(grn_ctx *ctx, grn_array *array, const char *path, |
495 | uint32_t value_size, uint32_t flags) |
496 | { |
497 | grn_io *io; |
498 | struct grn_array_header *; |
499 | |
500 | io = grn_array_create_io_array(ctx, path, value_size); |
501 | if (!io) { |
502 | return ctx->rc; |
503 | } |
504 | grn_io_set_type(io, GRN_TABLE_NO_KEY); |
505 | |
506 | header = grn_io_header(io); |
507 | header->flags = flags; |
508 | header->curr_rec = 0; |
509 | header->lock = 0; |
510 | header->value_size = value_size; |
511 | header->n_entries = 0; |
512 | header->n_garbages = 0; |
513 | header->garbages = GRN_ID_NIL; |
514 | header->truncated = GRN_FALSE; |
515 | grn_table_queue_init(ctx, &header->queue); |
516 | array->obj.header.flags = flags; |
517 | array->ctx = ctx; |
518 | array->value_size = value_size; |
519 | array->n_keys = 0; |
520 | array->keys = NULL; |
521 | array->n_garbages = &header->n_garbages; |
522 | array->n_entries = &header->n_entries; |
523 | array->io = io; |
524 | array->header = header; |
525 | array->lock = &header->lock; |
526 | return GRN_SUCCESS; |
527 | } |
528 | |
529 | void |
530 | grn_array_queue_lock_clear(grn_ctx *ctx, grn_array *array) |
531 | { |
532 | struct grn_array_header *; |
533 | header = grn_io_header(array->io); |
534 | grn_table_queue_lock_init(ctx, &header->queue); |
535 | } |
536 | |
537 | grn_table_queue * |
538 | grn_array_queue(grn_ctx *ctx, grn_array *array) |
539 | { |
540 | if (grn_array_is_io_array(array)) { |
541 | struct grn_array_header *; |
542 | header = grn_io_header(array->io); |
543 | return &header->queue; |
544 | } else { |
545 | return NULL; |
546 | } |
547 | } |
548 | |
549 | static grn_rc |
550 | grn_array_init(grn_ctx *ctx, grn_array *array, |
551 | const char *path, uint32_t value_size, uint32_t flags) |
552 | { |
553 | if (flags & GRN_ARRAY_TINY) { |
554 | return grn_array_init_tiny_array(ctx, array, path, value_size, flags); |
555 | } else { |
556 | return grn_array_init_io_array(ctx, array, path, value_size, flags); |
557 | } |
558 | } |
559 | |
560 | grn_array * |
561 | grn_array_create(grn_ctx *ctx, const char *path, uint32_t value_size, uint32_t flags) |
562 | { |
563 | if (ctx) { |
564 | grn_array * const array = (grn_array *)GRN_CALLOC(sizeof(grn_array)); |
565 | if (array) { |
566 | GRN_DB_OBJ_SET_TYPE(array, GRN_TABLE_NO_KEY); |
567 | if (!grn_array_init(ctx, array, path, value_size, flags)) { |
568 | return array; |
569 | } |
570 | GRN_FREE(array); |
571 | } |
572 | } |
573 | return NULL; |
574 | } |
575 | |
576 | grn_array * |
577 | grn_array_open(grn_ctx *ctx, const char *path) |
578 | { |
579 | if (ctx) { |
580 | grn_io * const io = grn_io_open(ctx, path, grn_io_auto); |
581 | if (io) { |
582 | struct grn_array_header * const = grn_io_header(io); |
583 | uint32_t io_type = grn_io_get_type(io); |
584 | if (io_type == GRN_TABLE_NO_KEY) { |
585 | grn_array * const array = (grn_array *)GRN_MALLOC(sizeof(grn_array)); |
586 | if (array) { |
587 | if (!(header->flags & GRN_ARRAY_TINY)) { |
588 | GRN_DB_OBJ_SET_TYPE(array, GRN_TABLE_NO_KEY); |
589 | array->obj.header.flags = header->flags; |
590 | array->ctx = ctx; |
591 | array->value_size = header->value_size; |
592 | array->n_keys = 0; |
593 | array->keys = NULL; |
594 | array->n_garbages = &header->n_garbages; |
595 | array->n_entries = &header->n_entries; |
596 | array->io = io; |
597 | array->header = header; |
598 | array->lock = &header->lock; |
599 | return array; |
600 | } else { |
601 | GRN_LOG(ctx, GRN_LOG_NOTICE, "invalid array flags. (%x)" , header->flags); |
602 | } |
603 | GRN_FREE(array); |
604 | } |
605 | } else { |
606 | ERR(GRN_INVALID_FORMAT, |
607 | "[table][array] file type must be %#04x: <%#04x>" , |
608 | GRN_TABLE_NO_KEY, io_type); |
609 | } |
610 | grn_io_close(ctx, io); |
611 | } |
612 | } |
613 | return NULL; |
614 | } |
615 | |
616 | /* |
617 | * grn_array_error_if_truncated() logs an error and returns its error code if |
618 | * an array is truncated by another process. |
619 | * Otherwise, this function returns GRN_SUCCESS. |
620 | * Note that `ctx` and `array` must be valid. |
621 | * |
622 | * FIXME: An array should be reopened if possible. |
623 | */ |
624 | static grn_rc |
625 | grn_array_error_if_truncated(grn_ctx *ctx, grn_array *array) |
626 | { |
627 | if (array->header && array->header->truncated) { |
628 | ERR(GRN_FILE_CORRUPT, |
629 | "array is truncated, please unmap or reopen the database" ); |
630 | return GRN_FILE_CORRUPT; |
631 | } |
632 | return GRN_SUCCESS; |
633 | } |
634 | |
635 | grn_rc |
636 | grn_array_close(grn_ctx *ctx, grn_array *array) |
637 | { |
638 | grn_rc rc = GRN_SUCCESS; |
639 | if (!ctx || !array) { return GRN_INVALID_ARGUMENT; } |
640 | if (array->keys) { GRN_FREE(array->keys); } |
641 | if (grn_array_is_io_array(array)) { |
642 | rc = grn_io_close(ctx, array->io); |
643 | } else { |
644 | GRN_ASSERT(ctx == array->ctx); |
645 | grn_tiny_array_fin(&array->array); |
646 | grn_tiny_bitmap_fin(&array->bitmap); |
647 | } |
648 | GRN_FREE(array); |
649 | return rc; |
650 | } |
651 | |
652 | grn_rc |
653 | grn_array_remove(grn_ctx *ctx, const char *path) |
654 | { |
655 | if (!ctx || !path) { return GRN_INVALID_ARGUMENT; } |
656 | return grn_io_remove(ctx, path); |
657 | } |
658 | |
659 | uint32_t |
660 | grn_array_size(grn_ctx *ctx, grn_array *array) |
661 | { |
662 | if (grn_array_error_if_truncated(ctx, array) != GRN_SUCCESS) { |
663 | return 0; |
664 | } |
665 | return *array->n_entries; |
666 | } |
667 | |
668 | uint32_t |
669 | grn_array_get_flags(grn_ctx *ctx, grn_array *array) |
670 | { |
671 | return array->header->flags; |
672 | } |
673 | |
674 | grn_rc |
675 | grn_array_truncate(grn_ctx *ctx, grn_array *array) |
676 | { |
677 | grn_rc rc; |
678 | char *path = NULL; |
679 | uint32_t value_size, flags; |
680 | |
681 | if (!ctx || !array) { return GRN_INVALID_ARGUMENT; } |
682 | rc = grn_array_error_if_truncated(ctx, array); |
683 | if (rc != GRN_SUCCESS) { |
684 | return rc; |
685 | } |
686 | if (grn_array_is_io_array(array)) { |
687 | const char * const io_path = grn_io_path(array->io); |
688 | if (io_path && *io_path) { |
689 | path = GRN_STRDUP(io_path); |
690 | if (!path) { |
691 | ERR(GRN_NO_MEMORY_AVAILABLE, "cannot duplicate path: <%s>" , io_path); |
692 | return GRN_NO_MEMORY_AVAILABLE; |
693 | } |
694 | } |
695 | } |
696 | value_size = array->value_size; |
697 | flags = array->obj.header.flags; |
698 | |
699 | if (grn_array_is_io_array(array)) { |
700 | if (path) { |
701 | /* Only an I/O array with a valid path uses the `truncated` flag. */ |
702 | array->header->truncated = GRN_TRUE; |
703 | } |
704 | rc = grn_io_close(ctx, array->io); |
705 | if (!rc) { |
706 | array->io = NULL; |
707 | if (path) { |
708 | rc = grn_io_remove(ctx, path); |
709 | } |
710 | } |
711 | } |
712 | if (!rc) { |
713 | rc = grn_array_init(ctx, array, path, value_size, flags); |
714 | } |
715 | if (path) { GRN_FREE(path); } |
716 | return rc; |
717 | } |
718 | |
719 | inline static grn_id |
720 | grn_array_get_max_id(grn_array *array) |
721 | { |
722 | return grn_array_is_io_array(array) ? array->header->curr_rec : array->array.max; |
723 | } |
724 | |
725 | inline static void * |
726 | grn_array_get_value_inline(grn_ctx *ctx, grn_array *array, grn_id id) |
727 | { |
728 | if (!ctx || !array) { |
729 | return NULL; |
730 | } |
731 | if (grn_array_error_if_truncated(ctx, array) != GRN_SUCCESS) { |
732 | return NULL; |
733 | } |
734 | if (*array->n_garbages) { |
735 | /* |
736 | * grn_array_bitmap_at() is a time-consuming function, so it is called only |
737 | * when there are garbages in the array. |
738 | */ |
739 | if (grn_array_bitmap_at(ctx, array, id) != 1) { |
740 | return NULL; |
741 | } |
742 | } else if (id == 0 || id > grn_array_get_max_id(array)) { |
743 | return NULL; |
744 | } |
745 | return grn_array_entry_at(ctx, array, id, 0); |
746 | } |
747 | |
748 | int |
749 | grn_array_get_value(grn_ctx *ctx, grn_array *array, grn_id id, void *valuebuf) |
750 | { |
751 | void * const value = grn_array_get_value_inline(ctx, array, id); |
752 | if (value) { |
753 | if (valuebuf) { |
754 | grn_memcpy(valuebuf, value, array->value_size); |
755 | } |
756 | return array->value_size; |
757 | } |
758 | return 0; |
759 | } |
760 | |
761 | void * |
762 | _grn_array_get_value(grn_ctx *ctx, grn_array *array, grn_id id) |
763 | { |
764 | return grn_array_get_value_inline(ctx, array, id); |
765 | } |
766 | |
767 | inline static grn_rc |
768 | grn_array_set_value_inline(grn_ctx *ctx, grn_array *array, grn_id id, |
769 | const void *value, int flags) |
770 | { |
771 | void *entry; |
772 | entry = grn_array_entry_at(ctx, array, id, 0); |
773 | if (!entry) { |
774 | return GRN_NO_MEMORY_AVAILABLE; |
775 | } |
776 | |
777 | switch ((flags & GRN_OBJ_SET_MASK)) { |
778 | case GRN_OBJ_SET : |
779 | grn_memcpy(entry, value, array->value_size); |
780 | return GRN_SUCCESS; |
781 | case GRN_OBJ_INCR : |
782 | switch (array->value_size) { |
783 | case sizeof(int32_t) : |
784 | *((int32_t *)entry) += *((int32_t *)value); |
785 | return GRN_SUCCESS; |
786 | case sizeof(int64_t) : |
787 | *((int64_t *)entry) += *((int64_t *)value); |
788 | return GRN_SUCCESS; |
789 | default : |
790 | return GRN_INVALID_ARGUMENT; |
791 | } |
792 | break; |
793 | case GRN_OBJ_DECR : |
794 | switch (array->value_size) { |
795 | case sizeof(int32_t) : |
796 | *((int32_t *)entry) -= *((int32_t *)value); |
797 | return GRN_SUCCESS; |
798 | case sizeof(int64_t) : |
799 | *((int64_t *)entry) -= *((int64_t *)value); |
800 | return GRN_SUCCESS; |
801 | default : |
802 | return GRN_INVALID_ARGUMENT; |
803 | } |
804 | break; |
805 | default : |
806 | /* todo : support other types. */ |
807 | return GRN_INVALID_ARGUMENT; |
808 | } |
809 | } |
810 | |
811 | grn_rc |
812 | grn_array_set_value(grn_ctx *ctx, grn_array *array, grn_id id, |
813 | const void *value, int flags) |
814 | { |
815 | grn_rc rc; |
816 | |
817 | if (!ctx || !array || !value) { |
818 | return GRN_INVALID_ARGUMENT; |
819 | } |
820 | |
821 | rc = grn_array_error_if_truncated(ctx, array); |
822 | if (rc != GRN_SUCCESS) { |
823 | return rc; |
824 | } |
825 | if (*array->n_garbages) { |
826 | /* |
827 | * grn_array_bitmap_at() is a time-consuming function, so it is called only |
828 | * when there are garbages in the array. |
829 | */ |
830 | if (grn_array_bitmap_at(ctx, array, id) != 1) { |
831 | return GRN_INVALID_ARGUMENT; |
832 | } |
833 | } else if (id == 0 || id > grn_array_get_max_id(array)) { |
834 | return GRN_INVALID_ARGUMENT; |
835 | } |
836 | return grn_array_set_value_inline(ctx, array, id, value, flags); |
837 | } |
838 | |
839 | grn_rc |
840 | grn_array_delete_by_id(grn_ctx *ctx, grn_array *array, grn_id id, |
841 | grn_table_delete_optarg *optarg) |
842 | { |
843 | grn_rc rc; |
844 | if (!ctx || !array) { |
845 | return GRN_INVALID_ARGUMENT; |
846 | } |
847 | rc = grn_array_error_if_truncated(ctx, array); |
848 | if (rc != GRN_SUCCESS) { |
849 | return rc; |
850 | } |
851 | if (grn_array_bitmap_at(ctx, array, id) != 1) { |
852 | return GRN_INVALID_ARGUMENT; |
853 | } |
854 | |
855 | { |
856 | rc = GRN_SUCCESS; |
857 | /* lock */ |
858 | if (grn_array_is_io_array(array)) { |
859 | if (array->value_size >= sizeof(grn_id)) { |
860 | struct grn_array_header * const = array->header; |
861 | void * const entry = grn_array_io_entry_at(ctx, array, id, 0); |
862 | if (!entry) { |
863 | rc = GRN_INVALID_ARGUMENT; |
864 | } else { |
865 | *((grn_id *)entry) = header->garbages; |
866 | header->garbages = id; |
867 | } |
868 | } |
869 | if (!rc) { |
870 | (*array->n_entries)--; |
871 | (*array->n_garbages)++; |
872 | /* |
873 | * The following grn_io_array_bit_off() fails iff a problem has |
874 | * occurred after the above grn_array_bitmap_at(). That is to say, |
875 | * an unexpected case. |
876 | */ |
877 | grn_io_array_bit_off(ctx, array->io, GRN_ARRAY_BITMAP_SEGMENT, id); |
878 | } |
879 | } else { |
880 | if (array->value_size >= sizeof(grn_id)) { |
881 | void * const entry = grn_tiny_array_get(&array->array, id); |
882 | if (!entry) { |
883 | rc = GRN_INVALID_ARGUMENT; |
884 | } else { |
885 | *((grn_id *)entry) = array->garbages; |
886 | array->garbages = id; |
887 | } |
888 | } |
889 | if (!rc) { |
890 | (*array->n_entries)--; |
891 | (*array->n_garbages)++; |
892 | /* |
893 | * The following grn_io_array_bit_off() fails iff a problem has |
894 | * occurred after the above grn_array_bitmap_at(). That is to say, |
895 | * an unexpected case. |
896 | */ |
897 | grn_tiny_bitmap_get_and_set(&array->bitmap, id, 0); |
898 | } |
899 | } |
900 | /* unlock */ |
901 | return rc; |
902 | } |
903 | } |
904 | |
905 | grn_id |
906 | grn_array_at(grn_ctx *ctx, grn_array *array, grn_id id) |
907 | { |
908 | if (grn_array_error_if_truncated(ctx, array) != GRN_SUCCESS) { |
909 | return GRN_ID_NIL; |
910 | } |
911 | if (*array->n_garbages) { |
912 | /* |
913 | * grn_array_bitmap_at() is a time-consuming function, so it is called only |
914 | * when there are garbages in the array. |
915 | */ |
916 | if (grn_array_bitmap_at(ctx, array, id) != 1) { |
917 | return GRN_ID_NIL; |
918 | } |
919 | } else if (id > grn_array_get_max_id(array)) { |
920 | return GRN_ID_NIL; |
921 | } |
922 | return id; |
923 | } |
924 | |
925 | grn_rc |
926 | grn_array_copy_sort_key(grn_ctx *ctx, grn_array *array, |
927 | grn_table_sort_key *keys, int n_keys) |
928 | { |
929 | array->keys = (grn_table_sort_key *)GRN_MALLOCN(grn_table_sort_key, n_keys); |
930 | if (!array->keys) { |
931 | return ctx->rc; |
932 | } |
933 | grn_memcpy(array->keys, keys, sizeof(grn_table_sort_key) * n_keys); |
934 | array->n_keys = n_keys; |
935 | return GRN_SUCCESS; |
936 | } |
937 | |
938 | void |
939 | grn_array_cursor_close(grn_ctx *ctx, grn_array_cursor *cursor) |
940 | { |
941 | GRN_ASSERT(cursor->ctx == ctx); |
942 | GRN_FREE(cursor); |
943 | } |
944 | |
945 | grn_array_cursor * |
946 | grn_array_cursor_open(grn_ctx *ctx, grn_array *array, grn_id min, grn_id max, |
947 | int offset, int limit, int flags) |
948 | { |
949 | grn_array_cursor *cursor; |
950 | if (!array || !ctx) { return NULL; } |
951 | if (grn_array_error_if_truncated(ctx, array) != GRN_SUCCESS) { |
952 | return NULL; |
953 | } |
954 | |
955 | cursor = (grn_array_cursor *)GRN_MALLOCN(grn_array_cursor, 1); |
956 | if (!cursor) { return NULL; } |
957 | |
958 | GRN_DB_OBJ_SET_TYPE(cursor, GRN_CURSOR_TABLE_NO_KEY); |
959 | cursor->array = array; |
960 | cursor->ctx = ctx; |
961 | cursor->obj.header.flags = flags; |
962 | cursor->obj.header.domain = GRN_ID_NIL; |
963 | |
964 | if (flags & GRN_CURSOR_DESCENDING) { |
965 | cursor->dir = -1; |
966 | if (max) { |
967 | cursor->curr_rec = max; |
968 | if (!(flags & GRN_CURSOR_LT)) { cursor->curr_rec++; } |
969 | } else { |
970 | cursor->curr_rec = grn_array_get_max_id(array) + 1; |
971 | } |
972 | if (min) { |
973 | cursor->tail = min; |
974 | if ((flags & GRN_CURSOR_GT)) { cursor->tail++; } |
975 | } else { |
976 | cursor->tail = GRN_ID_NIL + 1; |
977 | } |
978 | if (cursor->curr_rec < cursor->tail) { cursor->tail = cursor->curr_rec; } |
979 | } else { |
980 | cursor->dir = 1; |
981 | if (min) { |
982 | cursor->curr_rec = min; |
983 | if (!(flags & GRN_CURSOR_GT)) { cursor->curr_rec--; } |
984 | } else { |
985 | cursor->curr_rec = GRN_ID_NIL; |
986 | } |
987 | if (max) { |
988 | cursor->tail = max; |
989 | if ((flags & GRN_CURSOR_LT)) { cursor->tail--; } |
990 | } else { |
991 | cursor->tail = grn_array_get_max_id(array); |
992 | } |
993 | if (cursor->tail < cursor->curr_rec) { cursor->tail = cursor->curr_rec; } |
994 | } |
995 | |
996 | if (*array->n_garbages) { |
997 | while (offset && cursor->curr_rec != cursor->tail) { |
998 | cursor->curr_rec += cursor->dir; |
999 | if (grn_array_bitmap_at(ctx, cursor->array, cursor->curr_rec) == 1) { |
1000 | offset--; |
1001 | } |
1002 | } |
1003 | } else { |
1004 | cursor->curr_rec += cursor->dir * offset; |
1005 | } |
1006 | cursor->rest = (limit < 0) ? GRN_ARRAY_MAX : limit; |
1007 | return cursor; |
1008 | } |
1009 | |
1010 | grn_id |
1011 | grn_array_cursor_next(grn_ctx *ctx, grn_array_cursor *cursor) |
1012 | { |
1013 | if (cursor && cursor->rest) { |
1014 | while (cursor->curr_rec != cursor->tail) { |
1015 | cursor->curr_rec += cursor->dir; |
1016 | if (*cursor->array->n_garbages) { |
1017 | if (grn_array_bitmap_at(ctx, cursor->array, cursor->curr_rec) != 1) { |
1018 | continue; |
1019 | } |
1020 | } |
1021 | cursor->rest--; |
1022 | return cursor->curr_rec; |
1023 | } |
1024 | } |
1025 | return GRN_ID_NIL; |
1026 | } |
1027 | |
1028 | grn_id |
1029 | grn_array_next(grn_ctx *ctx, grn_array *array, grn_id id) |
1030 | { |
1031 | grn_id max_id; |
1032 | if (grn_array_error_if_truncated(ctx, array) != GRN_SUCCESS) { |
1033 | return GRN_ID_NIL; |
1034 | } |
1035 | max_id = grn_array_get_max_id(array); |
1036 | while (++id <= max_id) { |
1037 | if (!*array->n_garbages || |
1038 | grn_array_bitmap_at(ctx, array, id) == 1) { |
1039 | return id; |
1040 | } |
1041 | } |
1042 | return GRN_ID_NIL; |
1043 | } |
1044 | |
1045 | int |
1046 | grn_array_cursor_get_value(grn_ctx *ctx, grn_array_cursor *cursor, void **value) |
1047 | { |
1048 | if (cursor && value) { |
1049 | void * const entry = grn_array_entry_at(ctx, cursor->array, cursor->curr_rec, 0); |
1050 | if (entry) { |
1051 | *value = entry; |
1052 | return cursor->array->value_size; |
1053 | } |
1054 | } |
1055 | return 0; |
1056 | } |
1057 | |
1058 | grn_rc |
1059 | grn_array_cursor_set_value(grn_ctx *ctx, grn_array_cursor *cursor, |
1060 | const void *value, int flags) |
1061 | { |
1062 | return grn_array_set_value_inline(ctx, cursor->array, cursor->curr_rec, |
1063 | value, flags); |
1064 | } |
1065 | |
1066 | grn_rc |
1067 | grn_array_cursor_delete(grn_ctx *ctx, grn_array_cursor *cursor, |
1068 | grn_table_delete_optarg *optarg) |
1069 | { |
1070 | return grn_array_delete_by_id(ctx, cursor->array, cursor->curr_rec, optarg); |
1071 | } |
1072 | |
1073 | inline static grn_id |
1074 | grn_array_add_to_tiny_array(grn_ctx *ctx, grn_array *array, void **value) |
1075 | { |
1076 | grn_id id = array->garbages; |
1077 | void *entry; |
1078 | if (id) { |
1079 | /* These operations fail iff the array is broken. */ |
1080 | entry = grn_tiny_array_get(&array->array, id); |
1081 | if (!entry) { |
1082 | return GRN_ID_NIL; |
1083 | } |
1084 | array->garbages = *(grn_id *)entry; |
1085 | memset(entry, 0, array->value_size); |
1086 | (*array->n_garbages)--; |
1087 | if (!grn_tiny_bitmap_get_and_set(&array->bitmap, id, 1)) { |
1088 | /* Actually, it is difficult to recover from this error. */ |
1089 | *(grn_id *)entry = array->garbages; |
1090 | array->garbages = id; |
1091 | (*array->n_garbages)++; |
1092 | return GRN_ID_NIL; |
1093 | } |
1094 | } else { |
1095 | id = array->array.max + 1; |
1096 | if (!grn_tiny_bitmap_put_and_set(&array->bitmap, id, 1)) { |
1097 | return GRN_ID_NIL; |
1098 | } |
1099 | entry = grn_tiny_array_put(&array->array, id); |
1100 | if (!entry) { |
1101 | grn_tiny_bitmap_get_and_set(&array->bitmap, id, 0); |
1102 | return GRN_ID_NIL; |
1103 | } |
1104 | array->array.max = id; |
1105 | } |
1106 | (*array->n_entries)++; |
1107 | if (value) { *value = entry; } |
1108 | return id; |
1109 | } |
1110 | |
1111 | inline static grn_id |
1112 | grn_array_add_to_io_array(grn_ctx *ctx, grn_array *array, void **value) |
1113 | { |
1114 | grn_id id; |
1115 | void *entry; |
1116 | struct grn_array_header *; |
1117 | if (grn_array_error_if_truncated(ctx, array) != GRN_SUCCESS) { |
1118 | return GRN_ID_NIL; |
1119 | } |
1120 | header = array->header; |
1121 | id = header->garbages; |
1122 | if (id) { |
1123 | /* These operations fail iff the array is broken. */ |
1124 | entry = grn_array_io_entry_at(ctx, array, id, GRN_TABLE_ADD); |
1125 | if (!entry) { |
1126 | return GRN_ID_NIL; |
1127 | } |
1128 | header->garbages = *(grn_id *)entry; |
1129 | memset(entry, 0, header->value_size); |
1130 | (*array->n_garbages)--; |
1131 | if (!grn_io_array_bit_on(ctx, array->io, GRN_ARRAY_BITMAP_SEGMENT, id)) { |
1132 | /* Actually, it is difficult to recover from this error. */ |
1133 | *(grn_id *)entry = array->garbages; |
1134 | array->garbages = id; |
1135 | (*array->n_garbages)++; |
1136 | return GRN_ID_NIL; |
1137 | } |
1138 | } else { |
1139 | if (header->curr_rec >= GRN_ARRAY_MAX) { return GRN_ID_NIL; } |
1140 | id = header->curr_rec + 1; |
1141 | if (!grn_io_array_bit_on(ctx, array->io, GRN_ARRAY_BITMAP_SEGMENT, id)) { |
1142 | return GRN_ID_NIL; |
1143 | } |
1144 | entry = grn_array_io_entry_at(ctx, array, id, GRN_TABLE_ADD); |
1145 | if (!entry) { |
1146 | grn_io_array_bit_off(ctx, array->io, GRN_ARRAY_BITMAP_SEGMENT, id); |
1147 | return GRN_ID_NIL; |
1148 | } |
1149 | header->curr_rec = id; |
1150 | } |
1151 | (*array->n_entries)++; |
1152 | if (value) { *value = entry; } |
1153 | return id; |
1154 | } |
1155 | |
1156 | void |
1157 | grn_array_clear_curr_rec(grn_ctx *ctx, grn_array *array) |
1158 | { |
1159 | struct grn_array_header * const = array->header; |
1160 | header->curr_rec = GRN_ID_NIL; |
1161 | } |
1162 | |
1163 | grn_id |
1164 | grn_array_add(grn_ctx *ctx, grn_array *array, void **value) |
1165 | { |
1166 | if (ctx && array) { |
1167 | if (grn_array_is_io_array(array)) { |
1168 | return grn_array_add_to_io_array(ctx, array, value); |
1169 | } else { |
1170 | return grn_array_add_to_tiny_array(ctx, array, value); |
1171 | } |
1172 | } |
1173 | return GRN_ID_NIL; |
1174 | } |
1175 | |
1176 | grn_id |
1177 | grn_array_push(grn_ctx *ctx, grn_array *array, |
1178 | void (*func)(grn_ctx *, grn_array *, grn_id, void *), |
1179 | void *func_arg) |
1180 | { |
1181 | grn_id id = GRN_ID_NIL; |
1182 | grn_table_queue *queue = grn_array_queue(ctx, array); |
1183 | if (queue) { |
1184 | MUTEX_LOCK(queue->mutex); |
1185 | if (grn_table_queue_head(queue) == queue->cap) { |
1186 | grn_array_clear_curr_rec(ctx, array); |
1187 | } |
1188 | id = grn_array_add(ctx, array, NULL); |
1189 | if (func) { |
1190 | func(ctx, array, id, func_arg); |
1191 | } |
1192 | if (grn_table_queue_size(queue) == queue->cap) { |
1193 | grn_table_queue_tail_increment(queue); |
1194 | } |
1195 | grn_table_queue_head_increment(queue); |
1196 | COND_SIGNAL(queue->cond); |
1197 | MUTEX_UNLOCK(queue->mutex); |
1198 | } else { |
1199 | ERR(GRN_OPERATION_NOT_SUPPORTED, "only persistent arrays support push" ); |
1200 | } |
1201 | return id; |
1202 | } |
1203 | |
1204 | grn_id |
1205 | grn_array_pull(grn_ctx *ctx, grn_array *array, grn_bool blockp, |
1206 | void (*func)(grn_ctx *, grn_array *, grn_id, void *), |
1207 | void *func_arg) |
1208 | { |
1209 | grn_id id = GRN_ID_NIL; |
1210 | grn_table_queue *queue = grn_array_queue(ctx, array); |
1211 | if (queue) { |
1212 | MUTEX_LOCK(queue->mutex); |
1213 | queue->unblock_requested = GRN_FALSE; |
1214 | while (grn_table_queue_size(queue) == 0) { |
1215 | if (!blockp || queue->unblock_requested) { |
1216 | MUTEX_UNLOCK(queue->mutex); |
1217 | GRN_OUTPUT_BOOL(0); |
1218 | return id; |
1219 | } |
1220 | COND_WAIT(queue->cond, queue->mutex); |
1221 | } |
1222 | grn_table_queue_tail_increment(queue); |
1223 | id = grn_table_queue_tail(queue); |
1224 | if (func) { |
1225 | func(ctx, array, id, func_arg); |
1226 | } |
1227 | MUTEX_UNLOCK(queue->mutex); |
1228 | } else { |
1229 | ERR(GRN_OPERATION_NOT_SUPPORTED, "only persistent arrays support pull" ); |
1230 | } |
1231 | return id; |
1232 | } |
1233 | |
1234 | void |
1235 | grn_array_unblock(grn_ctx *ctx, grn_array *array) |
1236 | { |
1237 | grn_table_queue *queue = grn_array_queue(ctx, array); |
1238 | if (!queue) { |
1239 | return; |
1240 | } |
1241 | |
1242 | queue->unblock_requested = GRN_TRUE; |
1243 | COND_BROADCAST(queue->cond); |
1244 | } |
1245 | |
1246 | /* grn_hash : hash table */ |
1247 | |
1248 | #define GRN_HASH_MAX_SEGMENT 0x400 |
1249 | #define 0x9000 |
1250 | #define \ |
1251 | (GRN_HASH_HEADER_SIZE_NORMAL +\ |
1252 | (sizeof(grn_id) *\ |
1253 | (GRN_HASH_MAX_KEY_SIZE_LARGE - GRN_HASH_MAX_KEY_SIZE_NORMAL))) |
1254 | #define GRN_HASH_SEGMENT_SIZE 0x400000 |
1255 | #define GRN_HASH_KEY_MAX_N_SEGMENTS_NORMAL 0x400 |
1256 | #define GRN_HASH_KEY_MAX_N_SEGMENTS_LARGE 0x40000 |
1257 | #define W_OF_KEY_IN_A_SEGMENT 22 |
1258 | #define GRN_HASH_KEY_MAX_TOTAL_SIZE_NORMAL\ |
1259 | (((uint64_t)(1) << W_OF_KEY_IN_A_SEGMENT) *\ |
1260 | GRN_HASH_KEY_MAX_N_SEGMENTS_NORMAL - 1) |
1261 | #define GRN_HASH_KEY_MAX_TOTAL_SIZE_LARGE\ |
1262 | (((uint64_t)(1) << W_OF_KEY_IN_A_SEGMENT) *\ |
1263 | GRN_HASH_KEY_MAX_N_SEGMENTS_LARGE - 1) |
1264 | #define IDX_MASK_IN_A_SEGMENT 0xfffff |
1265 | |
1266 | typedef struct { |
1267 | uint8_t key[4]; |
1268 | uint8_t value[1]; |
1269 | } grn_plain_hash_entry; |
1270 | |
1271 | typedef struct { |
1272 | uint32_t hash_value; |
1273 | uint8_t key_and_value[1]; |
1274 | } grn_rich_hash_entry; |
1275 | |
1276 | typedef struct { |
1277 | uint32_t hash_value; |
1278 | uint16_t flag; |
1279 | uint16_t key_size; |
1280 | union { |
1281 | uint8_t buf[sizeof(uint32_t)]; |
1282 | uint32_t offset; |
1283 | } key; |
1284 | uint8_t value[1]; |
1285 | } grn_io_hash_entry_normal; |
1286 | |
1287 | typedef struct { |
1288 | uint32_t hash_value; |
1289 | uint16_t flag; |
1290 | uint16_t key_size; |
1291 | union { |
1292 | uint8_t buf[sizeof(uint64_t)]; |
1293 | uint64_t offset; |
1294 | } key; |
1295 | uint8_t value[1]; |
1296 | } grn_io_hash_entry_large; |
1297 | |
1298 | typedef struct { |
1299 | uint32_t hash_value; |
1300 | uint16_t flag; |
1301 | uint16_t key_size; |
1302 | union { |
1303 | uint8_t buf[sizeof(void *)]; |
1304 | void *ptr; |
1305 | } key; |
1306 | uint8_t value[1]; |
1307 | } grn_tiny_hash_entry; |
1308 | |
1309 | /* |
1310 | * hash_value is valid even if the entry is grn_plain_hash_entry. In this case, |
1311 | * its hash_value equals its key. |
1312 | * flag, key_size and key.buf are valid if the entry has a variable length key. |
1313 | */ |
1314 | typedef struct { |
1315 | uint32_t hash_value; |
1316 | uint16_t flag; |
1317 | uint16_t key_size; |
1318 | } ; |
1319 | |
1320 | typedef union { |
1321 | uint32_t hash_value; |
1322 | grn_hash_entry_header ; |
1323 | grn_plain_hash_entry plain_entry; |
1324 | grn_rich_hash_entry rich_entry; |
1325 | grn_io_hash_entry_normal io_entry_normal; |
1326 | grn_io_hash_entry_large io_entry_large; |
1327 | grn_tiny_hash_entry tiny_entry; |
1328 | } grn_hash_entry; |
1329 | |
1330 | typedef struct { |
1331 | uint32_t key; |
1332 | uint8_t dummy[1]; |
1333 | } entry; |
1334 | |
1335 | typedef struct { |
1336 | uint32_t key; |
1337 | uint16_t flag; |
1338 | uint16_t size; |
1339 | uint32_t str; |
1340 | uint8_t dummy[1]; |
1341 | } entry_str; |
1342 | |
1343 | typedef struct { |
1344 | uint32_t key; |
1345 | uint16_t flag; |
1346 | uint16_t size; |
1347 | char *str; |
1348 | uint8_t dummy[1]; |
1349 | } entry_astr; |
1350 | |
1351 | enum { |
1352 | GRN_HASH_KEY_SEGMENT = 0, |
1353 | GRN_HASH_ENTRY_SEGMENT = 1, |
1354 | GRN_HASH_INDEX_SEGMENT = 2, |
1355 | GRN_HASH_BITMAP_SEGMENT = 3 |
1356 | }; |
1357 | |
1358 | inline static int |
1359 | grn_hash_name(grn_ctx *ctx, grn_hash *hash, char *buffer, int buffer_size) |
1360 | { |
1361 | int name_size; |
1362 | |
1363 | if (DB_OBJ(hash)->id == GRN_ID_NIL) { |
1364 | grn_strcpy(buffer, buffer_size, "(anonymous)" ); |
1365 | name_size = strlen(buffer); |
1366 | } else { |
1367 | name_size = grn_obj_name(ctx, (grn_obj *)hash, buffer, buffer_size); |
1368 | } |
1369 | |
1370 | return name_size; |
1371 | } |
1372 | |
1373 | inline static grn_bool |
1374 | grn_hash_is_io_hash(grn_hash *hash) |
1375 | { |
1376 | return hash->io != NULL; |
1377 | } |
1378 | |
1379 | inline static void * |
1380 | grn_io_hash_entry_at(grn_ctx *ctx, grn_hash *hash, grn_id id, int flags) |
1381 | { |
1382 | return grn_io_array_at_inline(ctx, hash->io, GRN_HASH_ENTRY_SEGMENT, id, flags); |
1383 | } |
1384 | |
1385 | /* todo : error handling */ |
1386 | inline static void * |
1387 | grn_hash_entry_at(grn_ctx *ctx, grn_hash *hash, grn_id id, int flags) |
1388 | { |
1389 | if (grn_hash_is_io_hash(hash)) { |
1390 | return grn_io_hash_entry_at(ctx, hash, id, flags); |
1391 | } else { |
1392 | return grn_tiny_array_at_inline(&hash->a, id); |
1393 | } |
1394 | } |
1395 | |
1396 | inline static grn_bool |
1397 | grn_hash_bitmap_at(grn_ctx *ctx, grn_hash *hash, grn_id id) |
1398 | { |
1399 | if (grn_hash_is_io_hash(hash)) { |
1400 | return grn_io_array_bit_at(ctx, hash->io, GRN_HASH_BITMAP_SEGMENT, id) == 1; |
1401 | } else { |
1402 | return grn_tiny_bitmap_put(&hash->bitmap, id) == 1; |
1403 | } |
1404 | } |
1405 | |
1406 | inline static grn_id * |
1407 | grn_io_hash_idx_at(grn_ctx *ctx, grn_hash *hash, grn_id id) |
1408 | { |
1409 | return grn_io_array_at_inline(ctx, hash->io, GRN_HASH_INDEX_SEGMENT, |
1410 | id, GRN_TABLE_ADD); |
1411 | } |
1412 | |
1413 | inline static grn_id * |
1414 | grn_hash_idx_at(grn_ctx *ctx, grn_hash *hash, grn_id id) |
1415 | { |
1416 | if (grn_hash_is_io_hash(hash)) { |
1417 | id = (id & *hash->max_offset) + hash->header.common->idx_offset; |
1418 | return grn_io_hash_idx_at(ctx, hash, id); |
1419 | } else { |
1420 | return hash->index + (id & *hash->max_offset); |
1421 | } |
1422 | } |
1423 | |
1424 | inline static void * |
1425 | grn_io_hash_key_at(grn_ctx *ctx, grn_hash *hash, uint64_t pos) |
1426 | { |
1427 | return grn_io_array_at_inline(ctx, hash->io, GRN_HASH_KEY_SEGMENT, |
1428 | pos, GRN_TABLE_ADD); |
1429 | } |
1430 | |
1431 | #define HASH_IMMEDIATE 1 |
1432 | |
1433 | #define MAX_INDEX_SIZE ((GRN_HASH_MAX_SEGMENT * (IDX_MASK_IN_A_SEGMENT + 1)) >> 1) |
1434 | |
1435 | inline static uint16_t |
1436 | grn_hash_entry_get_key_size(grn_hash *hash, grn_hash_entry *entry) |
1437 | { |
1438 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
1439 | return entry->header.key_size; |
1440 | } else { |
1441 | return hash->key_size; |
1442 | } |
1443 | } |
1444 | |
1445 | inline static char * |
1446 | grn_hash_entry_get_key(grn_ctx *ctx, grn_hash *hash, grn_hash_entry *entry) |
1447 | { |
1448 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
1449 | if (grn_hash_is_io_hash(hash)) { |
1450 | if (grn_hash_is_large_total_key_size(ctx, hash)) { |
1451 | if (entry->io_entry_large.flag & HASH_IMMEDIATE) { |
1452 | return (char *)entry->io_entry_large.key.buf; |
1453 | } else { |
1454 | return (char *)grn_io_hash_key_at(ctx, hash, |
1455 | entry->io_entry_large.key.offset); |
1456 | } |
1457 | } else { |
1458 | if (entry->io_entry_normal.flag & HASH_IMMEDIATE) { |
1459 | return (char *)entry->io_entry_normal.key.buf; |
1460 | } else { |
1461 | return (char *)grn_io_hash_key_at(ctx, hash, |
1462 | entry->io_entry_normal.key.offset); |
1463 | } |
1464 | } |
1465 | } else { |
1466 | if (entry->tiny_entry.flag & HASH_IMMEDIATE) { |
1467 | return (char *)entry->tiny_entry.key.buf; |
1468 | } else { |
1469 | return entry->tiny_entry.key.ptr; |
1470 | } |
1471 | } |
1472 | } else { |
1473 | if (hash->key_size == sizeof(uint32_t)) { |
1474 | return (char *)entry->plain_entry.key; |
1475 | } else { |
1476 | return (char *)entry->rich_entry.key_and_value; |
1477 | } |
1478 | } |
1479 | } |
1480 | |
1481 | inline static void * |
1482 | grn_hash_entry_get_value(grn_ctx *ctx, grn_hash *hash, grn_hash_entry *entry) |
1483 | { |
1484 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
1485 | if (grn_hash_is_io_hash(hash)) { |
1486 | if (grn_hash_is_large_total_key_size(ctx, hash)) { |
1487 | return entry->io_entry_large.value; |
1488 | } else { |
1489 | return entry->io_entry_normal.value; |
1490 | } |
1491 | } else { |
1492 | return entry->tiny_entry.value; |
1493 | } |
1494 | } else { |
1495 | if (hash->key_size == sizeof(uint32_t)) { |
1496 | return entry->plain_entry.value; |
1497 | } else { |
1498 | return entry->rich_entry.key_and_value + hash->key_size; |
1499 | } |
1500 | } |
1501 | } |
1502 | |
1503 | inline static grn_rc |
1504 | grn_io_hash_entry_put_key(grn_ctx *ctx, grn_hash *hash, |
1505 | grn_hash_entry *entry, |
1506 | const void *key, unsigned int key_size) |
1507 | { |
1508 | grn_bool is_large_mode; |
1509 | grn_bool key_exist; |
1510 | uint64_t key_offset; |
1511 | grn_io_hash_entry_normal *io_entry_normal = &(entry->io_entry_normal); |
1512 | grn_io_hash_entry_large *io_entry_large = &(entry->io_entry_large); |
1513 | |
1514 | is_large_mode = grn_hash_is_large_total_key_size(ctx, hash); |
1515 | |
1516 | if (is_large_mode) { |
1517 | key_exist = (io_entry_large->key_size > 0); |
1518 | } else { |
1519 | key_exist = (io_entry_normal->key_size > 0); |
1520 | } |
1521 | |
1522 | if (key_exist > 0) { |
1523 | if (is_large_mode) { |
1524 | key_offset = io_entry_large->key.offset; |
1525 | } else { |
1526 | key_offset = io_entry_normal->key.offset; |
1527 | } |
1528 | } else { |
1529 | uint64_t segment_id; |
1530 | grn_hash_header_common *; |
1531 | uint64_t curr_key; |
1532 | uint64_t max_total_size; |
1533 | |
1534 | header = hash->header.common; |
1535 | if (key_size >= GRN_HASH_SEGMENT_SIZE) { |
1536 | char name[GRN_TABLE_MAX_KEY_SIZE]; |
1537 | int name_size; |
1538 | name_size = grn_hash_name(ctx, hash, name, GRN_TABLE_MAX_KEY_SIZE); |
1539 | ERR(GRN_INVALID_ARGUMENT, |
1540 | "[hash][key][put] too long key: <%.*s>: max=%u: key size=%u" , |
1541 | name_size, name, |
1542 | GRN_HASH_SEGMENT_SIZE, |
1543 | key_size); |
1544 | return ctx->rc; |
1545 | } |
1546 | |
1547 | if (is_large_mode) { |
1548 | curr_key = header->curr_key_large; |
1549 | max_total_size = GRN_HASH_KEY_MAX_TOTAL_SIZE_LARGE; |
1550 | } else { |
1551 | curr_key = header->curr_key_normal; |
1552 | max_total_size = GRN_HASH_KEY_MAX_TOTAL_SIZE_NORMAL; |
1553 | } |
1554 | |
1555 | if (key_size > (max_total_size - curr_key)) { |
1556 | char name[GRN_TABLE_MAX_KEY_SIZE]; |
1557 | int name_size; |
1558 | name_size = grn_hash_name(ctx, hash, name, GRN_TABLE_MAX_KEY_SIZE); |
1559 | ERR(GRN_NOT_ENOUGH_SPACE, |
1560 | "[hash][key][put] total key size is over: <%.*s>: " |
1561 | "max=%" GRN_FMT_INT64U ": " |
1562 | "current=%" GRN_FMT_INT64U ": " |
1563 | "new key size=%u" , |
1564 | name_size, name, |
1565 | max_total_size, |
1566 | curr_key, |
1567 | key_size); |
1568 | return ctx->rc; |
1569 | } |
1570 | key_offset = curr_key; |
1571 | segment_id = (key_offset + key_size) >> W_OF_KEY_IN_A_SEGMENT; |
1572 | if ((key_offset >> W_OF_KEY_IN_A_SEGMENT) != segment_id) { |
1573 | key_offset = segment_id << W_OF_KEY_IN_A_SEGMENT; |
1574 | if (is_large_mode) { |
1575 | header->curr_key_large = key_offset; |
1576 | } else { |
1577 | header->curr_key_normal = key_offset; |
1578 | } |
1579 | } |
1580 | if (is_large_mode) { |
1581 | header->curr_key_large += key_size; |
1582 | io_entry_large->key.offset = key_offset; |
1583 | } else { |
1584 | header->curr_key_normal += key_size; |
1585 | io_entry_normal->key.offset = key_offset; |
1586 | } |
1587 | } |
1588 | |
1589 | { |
1590 | void * const key_ptr = grn_io_hash_key_at(ctx, hash, key_offset); |
1591 | if (!key_ptr) { |
1592 | char name[GRN_TABLE_MAX_KEY_SIZE]; |
1593 | int name_size; |
1594 | name_size = grn_hash_name(ctx, hash, name, GRN_TABLE_MAX_KEY_SIZE); |
1595 | ERR(GRN_NO_MEMORY_AVAILABLE, |
1596 | "[hash][key][put] failed to allocate for new key: <%.*s>: " |
1597 | "new offset:%" GRN_FMT_INT64U " " |
1598 | "key size:%u" , |
1599 | name_size, name, |
1600 | key_offset, |
1601 | key_size); |
1602 | return ctx->rc; |
1603 | } |
1604 | grn_memcpy(key_ptr, key, key_size); |
1605 | } |
1606 | return GRN_SUCCESS; |
1607 | } |
1608 | |
1609 | inline static grn_rc |
1610 | grn_hash_entry_put_key(grn_ctx *ctx, grn_hash *hash, |
1611 | grn_hash_entry *entry, uint32_t hash_value, |
1612 | const void *key, unsigned int key_size) |
1613 | { |
1614 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
1615 | if (grn_hash_is_io_hash(hash)) { |
1616 | grn_bool is_large_mode; |
1617 | uint8_t *buffer; |
1618 | size_t buffer_size; |
1619 | uint16_t flag; |
1620 | |
1621 | is_large_mode = grn_hash_is_large_total_key_size(ctx, hash); |
1622 | if (is_large_mode) { |
1623 | buffer = entry->io_entry_large.key.buf; |
1624 | buffer_size = sizeof(entry->io_entry_large.key.buf); |
1625 | } else { |
1626 | buffer = entry->io_entry_normal.key.buf; |
1627 | buffer_size = sizeof(entry->io_entry_normal.key.buf); |
1628 | } |
1629 | |
1630 | if (key_size <= buffer_size) { |
1631 | grn_memcpy(buffer, key, key_size); |
1632 | flag = HASH_IMMEDIATE; |
1633 | } else { |
1634 | const grn_rc rc = |
1635 | grn_io_hash_entry_put_key(ctx, hash, entry, key, key_size); |
1636 | if (rc) { |
1637 | return rc; |
1638 | } |
1639 | flag = 0; |
1640 | } |
1641 | |
1642 | if (is_large_mode) { |
1643 | entry->io_entry_large.flag = flag; |
1644 | entry->io_entry_large.hash_value = hash_value; |
1645 | entry->io_entry_large.key_size = key_size; |
1646 | } else { |
1647 | entry->io_entry_normal.flag = flag; |
1648 | entry->io_entry_normal.hash_value = hash_value; |
1649 | entry->io_entry_normal.key_size = key_size; |
1650 | } |
1651 | } else { |
1652 | if (key_size <= sizeof(entry->tiny_entry.key.buf)) { |
1653 | grn_memcpy(entry->tiny_entry.key.buf, key, key_size); |
1654 | entry->tiny_entry.flag = HASH_IMMEDIATE; |
1655 | } else { |
1656 | grn_ctx * const ctx = hash->ctx; |
1657 | entry->tiny_entry.key.ptr = GRN_CTX_ALLOC(ctx, key_size); |
1658 | if (!entry->tiny_entry.key.ptr) { |
1659 | return GRN_NO_MEMORY_AVAILABLE; |
1660 | } |
1661 | grn_memcpy(entry->tiny_entry.key.ptr, key, key_size); |
1662 | entry->tiny_entry.flag = 0; |
1663 | } |
1664 | entry->tiny_entry.hash_value = hash_value; |
1665 | entry->tiny_entry.key_size = key_size; |
1666 | } |
1667 | } else { |
1668 | if (hash->key_size == sizeof(uint32_t)) { |
1669 | *(uint32_t *)entry->plain_entry.key = hash_value; |
1670 | } else { |
1671 | entry->rich_entry.hash_value = hash_value; |
1672 | grn_memcpy(entry->rich_entry.key_and_value, key, key_size); |
1673 | } |
1674 | } |
1675 | return GRN_SUCCESS; |
1676 | } |
1677 | |
1678 | /* |
1679 | * grn_hash_entry_compare_key() returns GRN_TRUE if the entry key equals the |
1680 | * specified key, or GRN_FALSE otherwise. |
1681 | */ |
1682 | inline static grn_bool |
1683 | grn_hash_entry_compare_key(grn_ctx *ctx, grn_hash *hash, |
1684 | grn_hash_entry *entry, uint32_t hash_value, |
1685 | const void *key, unsigned int key_size) |
1686 | { |
1687 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
1688 | if (entry->hash_value != hash_value || |
1689 | entry->header.key_size != key_size) { |
1690 | return GRN_FALSE; |
1691 | } |
1692 | if (grn_hash_is_io_hash(hash)) { |
1693 | if (grn_hash_is_large_total_key_size(ctx, hash)) { |
1694 | if (entry->io_entry_large.flag & HASH_IMMEDIATE) { |
1695 | return !memcmp(key, entry->io_entry_large.key.buf, key_size); |
1696 | } else { |
1697 | const void * const entry_key_ptr = |
1698 | grn_io_hash_key_at(ctx, hash, entry->io_entry_large.key.offset); |
1699 | return !memcmp(key, entry_key_ptr, key_size); |
1700 | } |
1701 | } else { |
1702 | if (entry->io_entry_normal.flag & HASH_IMMEDIATE) { |
1703 | return !memcmp(key, entry->io_entry_normal.key.buf, key_size); |
1704 | } else { |
1705 | const void * const entry_key_ptr = |
1706 | grn_io_hash_key_at(ctx, hash, entry->io_entry_normal.key.offset); |
1707 | return !memcmp(key, entry_key_ptr, key_size); |
1708 | } |
1709 | } |
1710 | } else { |
1711 | if (entry->tiny_entry.flag & HASH_IMMEDIATE) { |
1712 | return !memcmp(key, entry->tiny_entry.key.buf, key_size); |
1713 | } else { |
1714 | return !memcmp(key, entry->tiny_entry.key.ptr, key_size); |
1715 | } |
1716 | } |
1717 | } else { |
1718 | if (entry->hash_value != hash_value) { |
1719 | return GRN_FALSE; |
1720 | } |
1721 | if (key_size == sizeof(uint32_t)) { |
1722 | return GRN_TRUE; |
1723 | } else { |
1724 | return !memcmp(key, entry->rich_entry.key_and_value, key_size); |
1725 | } |
1726 | } |
1727 | } |
1728 | |
1729 | inline static char * |
1730 | get_key(grn_ctx *ctx, grn_hash *hash, entry_str *n) |
1731 | { |
1732 | return grn_hash_entry_get_key(ctx, hash, (grn_hash_entry *)n); |
1733 | } |
1734 | |
1735 | inline static void * |
1736 | get_value(grn_ctx *ctx, grn_hash *hash, entry_str *n) |
1737 | { |
1738 | return grn_hash_entry_get_value(ctx, hash, (grn_hash_entry *)n); |
1739 | } |
1740 | |
1741 | inline static grn_rc |
1742 | put_key(grn_ctx *ctx, grn_hash *hash, entry_str *n, uint32_t h, |
1743 | const char *key, unsigned int len) |
1744 | { |
1745 | return grn_hash_entry_put_key(ctx, hash, (grn_hash_entry *)n, h, key, len); |
1746 | } |
1747 | |
1748 | inline static int |
1749 | match_key(grn_ctx *ctx, grn_hash *hash, entry_str *ee, uint32_t h, |
1750 | const char *key, unsigned int len) |
1751 | { |
1752 | return grn_hash_entry_compare_key(ctx, hash, (grn_hash_entry *)ee, |
1753 | h, key, len); |
1754 | } |
1755 | |
1756 | #define GARBAGE (0xffffffff) |
1757 | |
1758 | inline static uint32_t |
1759 | grn_io_hash_calculate_entry_size(uint32_t key_size, uint32_t value_size, |
1760 | uint32_t flags) |
1761 | { |
1762 | if (flags & GRN_OBJ_KEY_VAR_SIZE) { |
1763 | if (flags & GRN_OBJ_KEY_LARGE) { |
1764 | return (uintptr_t)((grn_io_hash_entry_large *)0)->value + value_size; |
1765 | } else { |
1766 | return (uintptr_t)((grn_io_hash_entry_normal *)0)->value + value_size; |
1767 | } |
1768 | } else { |
1769 | if (key_size == sizeof(uint32_t)) { |
1770 | return (uintptr_t)((grn_plain_hash_entry *)0)->value + value_size; |
1771 | } else { |
1772 | return (uintptr_t)((grn_rich_hash_entry *)0)->key_and_value |
1773 | + key_size + value_size; |
1774 | } |
1775 | } |
1776 | } |
1777 | |
1778 | static grn_io * |
1779 | grn_io_hash_create_io(grn_ctx *ctx, const char *path, |
1780 | uint32_t , uint32_t entry_size, |
1781 | uint32_t flags) |
1782 | { |
1783 | uint32_t w_of_element = 0; |
1784 | grn_io_array_spec array_spec[4]; |
1785 | |
1786 | while ((1U << w_of_element) < entry_size) { |
1787 | w_of_element++; |
1788 | } |
1789 | |
1790 | array_spec[GRN_HASH_KEY_SEGMENT].w_of_element = 0; |
1791 | if (flags & GRN_OBJ_KEY_LARGE) { |
1792 | array_spec[GRN_HASH_KEY_SEGMENT].max_n_segments = |
1793 | GRN_HASH_KEY_MAX_N_SEGMENTS_LARGE; |
1794 | } else { |
1795 | array_spec[GRN_HASH_KEY_SEGMENT].max_n_segments = |
1796 | GRN_HASH_KEY_MAX_N_SEGMENTS_NORMAL; |
1797 | } |
1798 | array_spec[GRN_HASH_ENTRY_SEGMENT].w_of_element = w_of_element; |
1799 | array_spec[GRN_HASH_ENTRY_SEGMENT].max_n_segments = |
1800 | 1U << (30 - (22 - w_of_element)); |
1801 | array_spec[GRN_HASH_INDEX_SEGMENT].w_of_element = 2; |
1802 | array_spec[GRN_HASH_INDEX_SEGMENT].max_n_segments = 1U << (30 - (22 - 2)); |
1803 | array_spec[GRN_HASH_BITMAP_SEGMENT].w_of_element = 0; |
1804 | array_spec[GRN_HASH_BITMAP_SEGMENT].max_n_segments = 1U << (30 - (22 + 3)); |
1805 | return grn_io_create_with_array(ctx, path, header_size, |
1806 | GRN_HASH_SEGMENT_SIZE, |
1807 | grn_io_auto, 4, array_spec); |
1808 | } |
1809 | |
1810 | static grn_rc |
1811 | grn_io_hash_init(grn_ctx *ctx, grn_hash *hash, const char *path, |
1812 | uint32_t key_size, uint32_t value_size, uint32_t flags, |
1813 | grn_encoding encoding, uint32_t init_size) |
1814 | { |
1815 | grn_io *io; |
1816 | grn_hash_header_common *; |
1817 | uint32_t , entry_size, max_offset; |
1818 | |
1819 | if (key_size <= GRN_HASH_MAX_KEY_SIZE_NORMAL) { |
1820 | header_size = GRN_HASH_HEADER_SIZE_NORMAL; |
1821 | } else { |
1822 | header_size = GRN_HASH_HEADER_SIZE_LARGE; |
1823 | } |
1824 | entry_size = grn_io_hash_calculate_entry_size(key_size, value_size, flags); |
1825 | |
1826 | io = grn_io_hash_create_io(ctx, path, header_size, entry_size, flags); |
1827 | if (!io) { |
1828 | return GRN_NO_MEMORY_AVAILABLE; |
1829 | } |
1830 | grn_io_set_type(io, GRN_TABLE_HASH_KEY); |
1831 | |
1832 | max_offset = IDX_MASK_IN_A_SEGMENT + 1; |
1833 | while (max_offset < init_size * 2) { |
1834 | max_offset *= 2; |
1835 | } |
1836 | max_offset--; |
1837 | |
1838 | if (encoding == GRN_ENC_DEFAULT) { |
1839 | encoding = ctx->encoding; |
1840 | } |
1841 | |
1842 | hash->key_size = key_size; |
1843 | |
1844 | header = grn_io_header(io); |
1845 | header->flags = flags; |
1846 | header->encoding = encoding; |
1847 | header->key_size = key_size; |
1848 | header->curr_rec = 0; |
1849 | header->curr_key_normal = 0; |
1850 | header->curr_key_large = 0; |
1851 | header->lock = 0; |
1852 | header->idx_offset = 0; |
1853 | header->value_size = value_size; |
1854 | header->entry_size = entry_size; |
1855 | header->max_offset = max_offset; |
1856 | header->n_entries = 0; |
1857 | header->n_garbages = 0; |
1858 | header->tokenizer = GRN_ID_NIL; |
1859 | if (header->flags & GRN_OBJ_KEY_NORMALIZE) { |
1860 | header->flags &= ~GRN_OBJ_KEY_NORMALIZE; |
1861 | hash->normalizer = grn_ctx_get(ctx, GRN_NORMALIZER_AUTO_NAME, -1); |
1862 | header->normalizer = grn_obj_id(ctx, hash->normalizer); |
1863 | } else { |
1864 | hash->normalizer = NULL; |
1865 | header->normalizer = GRN_ID_NIL; |
1866 | } |
1867 | header->truncated = GRN_FALSE; |
1868 | GRN_PTR_INIT(&(hash->token_filters), GRN_OBJ_VECTOR, GRN_ID_NIL); |
1869 | { |
1870 | grn_table_queue *queue; |
1871 | if (GRN_HASH_IS_LARGE_KEY(hash)) { |
1872 | queue = &(((grn_hash_header_large *)(header))->queue); |
1873 | } else { |
1874 | queue = &(((grn_hash_header_normal *)(header))->queue); |
1875 | } |
1876 | grn_table_queue_init(ctx, queue); |
1877 | } |
1878 | |
1879 | hash->obj.header.flags = (header->flags & GRN_OBJ_FLAGS_MASK); |
1880 | hash->ctx = ctx; |
1881 | hash->encoding = encoding; |
1882 | hash->value_size = value_size; |
1883 | hash->entry_size = entry_size; |
1884 | hash->n_garbages = &header->n_garbages; |
1885 | hash->n_entries = &header->n_entries; |
1886 | hash->max_offset = &header->max_offset; |
1887 | hash->io = io; |
1888 | hash->header.common = header; |
1889 | hash->lock = &header->lock; |
1890 | hash->tokenizer = NULL; |
1891 | return GRN_SUCCESS; |
1892 | } |
1893 | |
1894 | #define INITIAL_INDEX_SIZE 256U |
1895 | |
1896 | static uint32_t |
1897 | grn_tiny_hash_calculate_entry_size(uint32_t key_size, uint32_t value_size, |
1898 | uint32_t flags) |
1899 | { |
1900 | uint32_t entry_size; |
1901 | if (flags & GRN_OBJ_KEY_VAR_SIZE) { |
1902 | entry_size = (uintptr_t)((grn_tiny_hash_entry *)0)->value + value_size; |
1903 | } else { |
1904 | if (key_size == sizeof(uint32_t)) { |
1905 | entry_size = (uintptr_t)((grn_plain_hash_entry *)0)->value + value_size; |
1906 | } else { |
1907 | entry_size = (uintptr_t)((grn_rich_hash_entry *)0)->key_and_value |
1908 | + key_size + value_size; |
1909 | } |
1910 | } |
1911 | if (entry_size != sizeof(uint32_t)) { |
1912 | entry_size += sizeof(uintptr_t) - 1; |
1913 | entry_size &= ~(sizeof(uintptr_t) - 1); |
1914 | } |
1915 | return entry_size; |
1916 | } |
1917 | |
1918 | static grn_rc |
1919 | grn_tiny_hash_init(grn_ctx *ctx, grn_hash *hash, const char *path, |
1920 | uint32_t key_size, uint32_t value_size, uint32_t flags, |
1921 | grn_encoding encoding) |
1922 | { |
1923 | uint32_t entry_size; |
1924 | |
1925 | if (path) { |
1926 | return GRN_INVALID_ARGUMENT; |
1927 | } |
1928 | hash->index = GRN_CTX_ALLOC(ctx, INITIAL_INDEX_SIZE * sizeof(grn_id)); |
1929 | if (!hash->index) { |
1930 | return GRN_NO_MEMORY_AVAILABLE; |
1931 | } |
1932 | |
1933 | entry_size = grn_tiny_hash_calculate_entry_size(key_size, value_size, flags); |
1934 | hash->obj.header.flags = flags; |
1935 | hash->ctx = ctx; |
1936 | hash->key_size = key_size; |
1937 | hash->encoding = encoding; |
1938 | hash->value_size = value_size; |
1939 | hash->entry_size = entry_size; |
1940 | hash->n_garbages = &hash->n_garbages_; |
1941 | hash->n_entries = &hash->n_entries_; |
1942 | hash->max_offset = &hash->max_offset_; |
1943 | hash->max_offset_ = INITIAL_INDEX_SIZE - 1; |
1944 | hash->io = NULL; |
1945 | hash->header.common = NULL; |
1946 | hash->n_garbages_ = 0; |
1947 | hash->n_entries_ = 0; |
1948 | hash->garbages = GRN_ID_NIL; |
1949 | hash->tokenizer = NULL; |
1950 | hash->normalizer = NULL; |
1951 | GRN_PTR_INIT(&(hash->token_filters), GRN_OBJ_VECTOR, GRN_ID_NIL); |
1952 | grn_tiny_array_init(ctx, &hash->a, entry_size, GRN_TINY_ARRAY_CLEAR); |
1953 | grn_tiny_bitmap_init(ctx, &hash->bitmap); |
1954 | return GRN_SUCCESS; |
1955 | } |
1956 | |
1957 | static grn_rc |
1958 | grn_hash_init(grn_ctx *ctx, grn_hash *hash, const char *path, |
1959 | uint32_t key_size, uint32_t value_size, uint32_t flags) |
1960 | { |
1961 | if (flags & GRN_HASH_TINY) { |
1962 | return grn_tiny_hash_init(ctx, hash, path, key_size, value_size, |
1963 | flags, ctx->encoding); |
1964 | } else { |
1965 | return grn_io_hash_init(ctx, hash, path, key_size, value_size, |
1966 | flags, ctx->encoding, 0); |
1967 | } |
1968 | } |
1969 | |
1970 | grn_hash * |
1971 | grn_hash_create(grn_ctx *ctx, const char *path, uint32_t key_size, uint32_t value_size, |
1972 | uint32_t flags) |
1973 | { |
1974 | grn_hash *hash; |
1975 | if (!ctx) { |
1976 | return NULL; |
1977 | } |
1978 | if (key_size > GRN_HASH_MAX_KEY_SIZE_LARGE) { |
1979 | return NULL; |
1980 | } |
1981 | hash = (grn_hash *)GRN_CALLOC(sizeof(grn_hash)); |
1982 | if (!hash) { |
1983 | return NULL; |
1984 | } |
1985 | GRN_DB_OBJ_SET_TYPE(hash, GRN_TABLE_HASH_KEY); |
1986 | if (grn_hash_init(ctx, hash, path, key_size, value_size, flags)) { |
1987 | GRN_FREE(hash); |
1988 | return NULL; |
1989 | } |
1990 | return hash; |
1991 | } |
1992 | |
1993 | grn_hash * |
1994 | grn_hash_open(grn_ctx *ctx, const char *path) |
1995 | { |
1996 | if (ctx) { |
1997 | grn_io * const io = grn_io_open(ctx, path, grn_io_auto); |
1998 | if (io) { |
1999 | grn_hash_header_common * const = grn_io_header(io); |
2000 | uint32_t io_type = grn_io_get_type(io); |
2001 | if (io_type == GRN_TABLE_HASH_KEY) { |
2002 | grn_hash * const hash = (grn_hash *)GRN_MALLOC(sizeof(grn_hash)); |
2003 | if (hash) { |
2004 | if (!(header->flags & GRN_HASH_TINY)) { |
2005 | GRN_DB_OBJ_SET_TYPE(hash, GRN_TABLE_HASH_KEY); |
2006 | hash->ctx = ctx; |
2007 | hash->key_size = header->key_size; |
2008 | hash->encoding = header->encoding; |
2009 | hash->value_size = header->value_size; |
2010 | hash->entry_size = header->entry_size; |
2011 | hash->n_garbages = &header->n_garbages; |
2012 | hash->n_entries = &header->n_entries; |
2013 | hash->max_offset = &header->max_offset; |
2014 | hash->io = io; |
2015 | hash->header.common = header; |
2016 | hash->lock = &header->lock; |
2017 | hash->tokenizer = grn_ctx_at(ctx, header->tokenizer); |
2018 | if (header->flags & GRN_OBJ_KEY_NORMALIZE) { |
2019 | header->flags &= ~GRN_OBJ_KEY_NORMALIZE; |
2020 | hash->normalizer = grn_ctx_get(ctx, GRN_NORMALIZER_AUTO_NAME, -1); |
2021 | header->normalizer = grn_obj_id(ctx, hash->normalizer); |
2022 | } else { |
2023 | hash->normalizer = grn_ctx_at(ctx, header->normalizer); |
2024 | } |
2025 | GRN_PTR_INIT(&(hash->token_filters), GRN_OBJ_VECTOR, GRN_ID_NIL); |
2026 | hash->obj.header.flags = header->flags; |
2027 | return hash; |
2028 | } else { |
2029 | GRN_LOG(ctx, GRN_LOG_NOTICE, |
2030 | "invalid hash flag. (%x)" , header->flags); |
2031 | } |
2032 | GRN_FREE(hash); |
2033 | } |
2034 | } else { |
2035 | ERR(GRN_INVALID_FORMAT, |
2036 | "[table][hash] file type must be %#04x: <%#04x>" , |
2037 | GRN_TABLE_HASH_KEY, io_type); |
2038 | } |
2039 | grn_io_close(ctx, io); |
2040 | } |
2041 | } |
2042 | return NULL; |
2043 | } |
2044 | |
2045 | /* |
2046 | * grn_hash_error_if_truncated() logs an error and returns its error code if |
2047 | * a hash is truncated by another process. |
2048 | * Otherwise, this function returns GRN_SUCCESS. |
2049 | * Note that `ctx` and `hash` must be valid. |
2050 | * |
2051 | * FIXME: A hash should be reopened if possible. |
2052 | */ |
2053 | static grn_rc |
2054 | grn_hash_error_if_truncated(grn_ctx *ctx, grn_hash *hash) |
2055 | { |
2056 | if (hash->header.common && hash->header.common->truncated) { |
2057 | ERR(GRN_FILE_CORRUPT, |
2058 | "hash is truncated, please unmap or reopen the database" ); |
2059 | return GRN_FILE_CORRUPT; |
2060 | } |
2061 | return GRN_SUCCESS; |
2062 | } |
2063 | |
2064 | static grn_rc |
2065 | grn_tiny_hash_fin(grn_ctx *ctx, grn_hash *hash) |
2066 | { |
2067 | if (!hash->index) { |
2068 | return GRN_INVALID_ARGUMENT; |
2069 | } |
2070 | |
2071 | GRN_OBJ_FIN(ctx, &(hash->token_filters)); |
2072 | |
2073 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
2074 | uint32_t num_remaining_entries = *hash->n_entries; |
2075 | grn_id *hash_ptr; |
2076 | for (hash_ptr = hash->index; num_remaining_entries; hash_ptr++) { |
2077 | const grn_id id = *hash_ptr; |
2078 | if (id && id != GARBAGE) { |
2079 | grn_tiny_hash_entry * const entry = |
2080 | (grn_tiny_hash_entry *)grn_tiny_array_get(&hash->a, id); |
2081 | GRN_ASSERT(entry); |
2082 | num_remaining_entries--; |
2083 | if (entry && !(entry->flag & HASH_IMMEDIATE)) { |
2084 | GRN_CTX_FREE(ctx, entry->key.ptr); |
2085 | } |
2086 | } |
2087 | } |
2088 | } |
2089 | grn_tiny_array_fin(&hash->a); |
2090 | grn_tiny_bitmap_fin(&hash->bitmap); |
2091 | GRN_CTX_FREE(ctx, hash->index); |
2092 | return GRN_SUCCESS; |
2093 | } |
2094 | |
2095 | grn_rc |
2096 | grn_hash_close(grn_ctx *ctx, grn_hash *hash) |
2097 | { |
2098 | grn_rc rc; |
2099 | if (!ctx || !hash) { return GRN_INVALID_ARGUMENT; } |
2100 | if (grn_hash_is_io_hash(hash)) { |
2101 | rc = grn_io_close(ctx, hash->io); |
2102 | GRN_OBJ_FIN(ctx, &(hash->token_filters)); |
2103 | } else { |
2104 | GRN_ASSERT(ctx == hash->ctx); |
2105 | rc = grn_tiny_hash_fin(ctx, hash); |
2106 | } |
2107 | GRN_FREE(hash); |
2108 | return rc; |
2109 | } |
2110 | |
2111 | grn_rc |
2112 | grn_hash_remove(grn_ctx *ctx, const char *path) |
2113 | { |
2114 | if (!ctx || !path) { return GRN_INVALID_ARGUMENT; } |
2115 | return grn_io_remove(ctx, path); |
2116 | } |
2117 | |
2118 | grn_rc |
2119 | grn_hash_truncate(grn_ctx *ctx, grn_hash *hash) |
2120 | { |
2121 | grn_rc rc; |
2122 | char *path = NULL; |
2123 | uint32_t key_size, value_size, flags; |
2124 | |
2125 | if (!ctx || !hash) { |
2126 | return GRN_INVALID_ARGUMENT; |
2127 | } |
2128 | rc = grn_hash_error_if_truncated(ctx, hash); |
2129 | if (rc != GRN_SUCCESS) { |
2130 | return rc; |
2131 | } |
2132 | |
2133 | if (grn_hash_is_io_hash(hash)) { |
2134 | const char * const io_path = grn_io_path(hash->io); |
2135 | if (io_path && *io_path) { |
2136 | path = GRN_STRDUP(io_path); |
2137 | if (!path) { |
2138 | ERR(GRN_NO_MEMORY_AVAILABLE, "cannot duplicate path: <%s>" , io_path); |
2139 | return GRN_NO_MEMORY_AVAILABLE; |
2140 | } |
2141 | } |
2142 | } |
2143 | key_size = hash->key_size; |
2144 | value_size = hash->value_size; |
2145 | flags = hash->obj.header.flags; |
2146 | |
2147 | if (grn_hash_is_io_hash(hash)) { |
2148 | if (path) { |
2149 | /* Only an I/O hash with a valid path uses the `truncated` flag. */ |
2150 | hash->header.common->truncated = GRN_TRUE; |
2151 | } |
2152 | rc = grn_io_close(ctx, hash->io); |
2153 | if (!rc) { |
2154 | hash->io = NULL; |
2155 | if (path) { |
2156 | rc = grn_io_remove(ctx, path); |
2157 | } |
2158 | } |
2159 | GRN_OBJ_FIN(ctx, &(hash->token_filters)); |
2160 | } |
2161 | if (!rc) { |
2162 | rc = grn_hash_init(ctx, hash, path, key_size, value_size, flags); |
2163 | } |
2164 | if (path) { |
2165 | GRN_FREE(path); |
2166 | } |
2167 | return rc; |
2168 | } |
2169 | |
2170 | inline static uint32_t |
2171 | grn_hash_calculate_hash_value(const void *ptr, uint32_t size) |
2172 | { |
2173 | uint32_t i; |
2174 | uint32_t hash_value = 0; |
2175 | for (i = 0; i < size; i++) { |
2176 | hash_value = (hash_value * 1021) + ((const uint8_t *)ptr)[i]; |
2177 | } |
2178 | return hash_value; |
2179 | } |
2180 | |
2181 | inline static uint32_t |
2182 | grn_hash_calculate_step(uint32_t hash_value) |
2183 | { |
2184 | return (hash_value >> 2) | 0x1010101; |
2185 | } |
2186 | |
2187 | static grn_rc |
2188 | grn_hash_reset(grn_ctx *ctx, grn_hash *hash, uint32_t expected_n_entries) |
2189 | { |
2190 | grn_id *new_index = NULL; |
2191 | uint32_t new_index_size = INITIAL_INDEX_SIZE; |
2192 | grn_id *src_ptr = NULL, *dest_ptr = NULL; |
2193 | uint32_t src_offset = 0, dest_offset = 0; |
2194 | const uint32_t n_entries = *hash->n_entries; |
2195 | const uint32_t max_offset = *hash->max_offset; |
2196 | |
2197 | if (!expected_n_entries) { |
2198 | expected_n_entries = n_entries * 2; |
2199 | } |
2200 | if (expected_n_entries > INT_MAX) { |
2201 | return GRN_NO_MEMORY_AVAILABLE; |
2202 | } |
2203 | while (new_index_size <= expected_n_entries) { |
2204 | new_index_size *= 2; |
2205 | } |
2206 | |
2207 | if (grn_hash_is_io_hash(hash)) { |
2208 | uint32_t i; |
2209 | src_offset = hash->header.common->idx_offset; |
2210 | dest_offset = MAX_INDEX_SIZE - src_offset; |
2211 | for (i = 0; i < new_index_size; i += (IDX_MASK_IN_A_SEGMENT + 1)) { |
2212 | /* |
2213 | * The following grn_io_hash_idx_at() allocates memory for a new segment |
2214 | * and returns a pointer to the new segment. It's actually bad manners |
2215 | * but faster than calling grn_io_hash_idx_at() for each element. |
2216 | */ |
2217 | dest_ptr = grn_io_hash_idx_at(ctx, hash, i + dest_offset); |
2218 | if (!dest_ptr) { |
2219 | return GRN_NO_MEMORY_AVAILABLE; |
2220 | } |
2221 | memset(dest_ptr, 0, GRN_HASH_SEGMENT_SIZE); |
2222 | } |
2223 | } else { |
2224 | GRN_ASSERT(ctx == hash->ctx); |
2225 | new_index = GRN_CTX_ALLOC(ctx, new_index_size * sizeof(grn_id)); |
2226 | if (!new_index) { |
2227 | return GRN_NO_MEMORY_AVAILABLE; |
2228 | } |
2229 | src_ptr = hash->index; |
2230 | } |
2231 | |
2232 | { |
2233 | uint32_t src_pos, count; |
2234 | const uint32_t new_max_offset = new_index_size - 1; |
2235 | for (count = 0, src_pos = 0; count < n_entries && src_pos <= max_offset; |
2236 | src_pos++, src_ptr++) { |
2237 | uint32_t i, step; |
2238 | grn_id entry_id; |
2239 | grn_hash_entry *entry; |
2240 | if (grn_hash_is_io_hash(hash) && !(src_pos & IDX_MASK_IN_A_SEGMENT)) { |
2241 | src_ptr = grn_io_hash_idx_at(ctx, hash, src_pos + src_offset); |
2242 | if (!src_ptr) { |
2243 | return GRN_NO_MEMORY_AVAILABLE; |
2244 | } |
2245 | } |
2246 | entry_id = *src_ptr; |
2247 | if (!entry_id || (entry_id == GARBAGE)) { |
2248 | continue; |
2249 | } |
2250 | entry = grn_hash_entry_at(ctx, hash, entry_id, GRN_TABLE_ADD); |
2251 | if (!entry) { |
2252 | return GRN_NO_MEMORY_AVAILABLE; |
2253 | } |
2254 | step = grn_hash_calculate_step(entry->hash_value); |
2255 | for (i = entry->hash_value; ; i += step) { |
2256 | i &= new_max_offset; |
2257 | if (grn_hash_is_io_hash(hash)) { |
2258 | dest_ptr = grn_io_hash_idx_at(ctx, hash, i + dest_offset); |
2259 | if (!dest_ptr) { |
2260 | return GRN_NO_MEMORY_AVAILABLE; |
2261 | } |
2262 | } else { |
2263 | dest_ptr = new_index + i; |
2264 | } |
2265 | if (!*dest_ptr) { |
2266 | break; |
2267 | } |
2268 | } |
2269 | *dest_ptr = entry_id; |
2270 | count++; |
2271 | } |
2272 | *hash->max_offset = new_max_offset; |
2273 | *hash->n_garbages = 0; |
2274 | } |
2275 | |
2276 | if (grn_hash_is_io_hash(hash)) { |
2277 | hash->header.common->idx_offset = dest_offset; |
2278 | } else { |
2279 | grn_id * const old_index = hash->index; |
2280 | hash->index = new_index; |
2281 | GRN_CTX_FREE(ctx, old_index); |
2282 | } |
2283 | |
2284 | return GRN_SUCCESS; |
2285 | } |
2286 | |
2287 | grn_rc |
2288 | grn_hash_lock(grn_ctx *ctx, grn_hash *hash, int timeout) |
2289 | { |
2290 | static int _ncalls = 0, _ncolls = 0; |
2291 | uint32_t count; |
2292 | _ncalls++; |
2293 | for (count = 0;; count++) { |
2294 | uint32_t lock; |
2295 | GRN_ATOMIC_ADD_EX(hash->lock, 1, lock); |
2296 | if (lock) { |
2297 | GRN_ATOMIC_ADD_EX(hash->lock, -1, lock); |
2298 | if (!timeout || (timeout > 0 && timeout == count)) { break; } |
2299 | if (!(++_ncolls % 1000000) && (_ncolls > _ncalls)) { |
2300 | if (_ncolls < 0 || _ncalls < 0) { |
2301 | _ncolls = 0; _ncalls = 0; |
2302 | } else { |
2303 | GRN_LOG(ctx, GRN_LOG_NOTICE, "hash(%p) collisions(%d/%d)" , hash, _ncolls, _ncalls); |
2304 | } |
2305 | } |
2306 | grn_nanosleep(GRN_LOCK_WAIT_TIME_NANOSECOND); |
2307 | continue; |
2308 | } |
2309 | return GRN_SUCCESS; |
2310 | } |
2311 | ERR(GRN_RESOURCE_DEADLOCK_AVOIDED, "grn_hash_lock" ); |
2312 | return ctx->rc; |
2313 | } |
2314 | |
2315 | grn_rc |
2316 | grn_hash_unlock(grn_ctx *ctx, grn_hash *hash) |
2317 | { |
2318 | uint32_t lock; |
2319 | GRN_ATOMIC_ADD_EX(hash->lock, -1, lock); |
2320 | return GRN_SUCCESS; |
2321 | } |
2322 | |
2323 | grn_rc |
2324 | grn_hash_clear_lock(grn_ctx *ctx, grn_hash *hash) |
2325 | { |
2326 | *hash->lock = 0; |
2327 | return GRN_SUCCESS; |
2328 | } |
2329 | |
2330 | uint32_t |
2331 | grn_hash_size(grn_ctx *ctx, grn_hash *hash) |
2332 | { |
2333 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2334 | return 0; |
2335 | } |
2336 | return *hash->n_entries; |
2337 | } |
2338 | |
2339 | inline static grn_id |
2340 | grn_io_hash_add(grn_ctx *ctx, grn_hash *hash, uint32_t hash_value, |
2341 | const void *key, unsigned int key_size, void **value) |
2342 | { |
2343 | grn_id entry_id; |
2344 | grn_hash_entry *entry; |
2345 | grn_hash_header_common * const = hash->header.common; |
2346 | grn_id *garbages; |
2347 | |
2348 | if (GRN_HASH_IS_LARGE_KEY(hash)) { |
2349 | garbages = hash->header.large->garbages; |
2350 | } else { |
2351 | garbages = hash->header.normal->garbages; |
2352 | } |
2353 | |
2354 | entry_id = garbages[key_size - 1]; |
2355 | if (entry_id) { |
2356 | entry = grn_io_hash_entry_at(ctx, hash, entry_id, GRN_TABLE_ADD); |
2357 | if (!entry) { |
2358 | return GRN_ID_NIL; |
2359 | } |
2360 | garbages[key_size - 1] = *(grn_id *)entry; |
2361 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
2362 | /* keep entry->io_entry's hash_value, flag, key_size and key. */ |
2363 | if (grn_hash_is_large_total_key_size(ctx, hash)) { |
2364 | memset(entry->io_entry_large.value, 0, header->value_size); |
2365 | } else { |
2366 | memset(entry->io_entry_normal.value, 0, header->value_size); |
2367 | } |
2368 | } else { |
2369 | memset(entry, 0, header->entry_size); |
2370 | } |
2371 | } else { |
2372 | entry_id = header->curr_rec + 1; |
2373 | entry = grn_hash_entry_at(ctx, hash, entry_id, GRN_TABLE_ADD); |
2374 | if (!entry) { |
2375 | return GRN_ID_NIL; |
2376 | } |
2377 | header->curr_rec = entry_id; |
2378 | } |
2379 | |
2380 | if (!grn_io_array_bit_on(ctx, hash->io, GRN_HASH_BITMAP_SEGMENT, entry_id)) { |
2381 | /* TODO: error handling. */ |
2382 | } |
2383 | |
2384 | if (grn_hash_entry_put_key(ctx, hash, entry, hash_value, key, key_size)) { |
2385 | grn_hash_delete_by_id(ctx, hash, entry_id, NULL); |
2386 | return GRN_ID_NIL; |
2387 | } |
2388 | |
2389 | if (value) { |
2390 | *value = grn_hash_entry_get_value(ctx, hash, entry); |
2391 | } |
2392 | return entry_id; |
2393 | } |
2394 | |
2395 | inline static grn_id |
2396 | grn_tiny_hash_add(grn_ctx *ctx, grn_hash *hash, uint32_t hash_value, |
2397 | const void *key, unsigned int key_size, void **value) |
2398 | { |
2399 | grn_id entry_id; |
2400 | grn_hash_entry *entry; |
2401 | if (hash->garbages) { |
2402 | entry_id = hash->garbages; |
2403 | entry = (grn_hash_entry *)grn_tiny_array_get(&hash->a, entry_id); |
2404 | hash->garbages = *(grn_id *)entry; |
2405 | memset(entry, 0, hash->entry_size); |
2406 | } else { |
2407 | entry_id = hash->a.max + 1; |
2408 | entry = (grn_hash_entry *)grn_tiny_array_put(&hash->a, entry_id); |
2409 | if (!entry) { |
2410 | return GRN_ID_NIL; |
2411 | } |
2412 | } |
2413 | |
2414 | if (!grn_tiny_bitmap_put_and_set(&hash->bitmap, entry_id, 1)) { |
2415 | /* TODO: error handling. */ |
2416 | } |
2417 | |
2418 | if (grn_hash_entry_put_key(ctx, hash, entry, hash_value, key, key_size)) { |
2419 | /* TODO: error handling. */ |
2420 | } |
2421 | |
2422 | if (value) { |
2423 | *value = grn_hash_entry_get_value(ctx, hash, entry); |
2424 | } |
2425 | return entry_id; |
2426 | } |
2427 | |
2428 | grn_id |
2429 | grn_hash_add(grn_ctx *ctx, grn_hash *hash, const void *key, |
2430 | unsigned int key_size, void **value, int *added) |
2431 | { |
2432 | uint32_t hash_value; |
2433 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2434 | return GRN_ID_NIL; |
2435 | } |
2436 | if (!key || !key_size) { |
2437 | return GRN_ID_NIL; |
2438 | } |
2439 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
2440 | if (key_size > hash->key_size) { |
2441 | ERR(GRN_INVALID_ARGUMENT, "too long key" ); |
2442 | return GRN_ID_NIL; |
2443 | } |
2444 | hash_value = grn_hash_calculate_hash_value(key, key_size); |
2445 | } else { |
2446 | if (key_size != hash->key_size) { |
2447 | ERR(GRN_INVALID_ARGUMENT, "key size unmatch" ); |
2448 | return GRN_ID_NIL; |
2449 | } |
2450 | if (key_size == sizeof(uint32_t)) { |
2451 | hash_value = *((uint32_t *)key); |
2452 | } else { |
2453 | hash_value = grn_hash_calculate_hash_value(key, key_size); |
2454 | } |
2455 | } |
2456 | |
2457 | { |
2458 | uint32_t i; |
2459 | const uint32_t step = grn_hash_calculate_step(hash_value); |
2460 | grn_id id, *index, *garbage_index = NULL; |
2461 | grn_hash_entry *entry; |
2462 | |
2463 | /* lock */ |
2464 | if ((*hash->n_entries + *hash->n_garbages) * 2 > *hash->max_offset) { |
2465 | if (*hash->max_offset > (1 << 29)) { |
2466 | ERR(GRN_TOO_LARGE_OFFSET, "hash table size limit" ); |
2467 | return GRN_ID_NIL; |
2468 | } |
2469 | grn_hash_reset(ctx, hash, 0); |
2470 | } |
2471 | |
2472 | for (i = hash_value; ; i += step) { |
2473 | index = grn_hash_idx_at(ctx, hash, i); |
2474 | if (!index) { |
2475 | return GRN_ID_NIL; |
2476 | } |
2477 | id = *index; |
2478 | if (!id) { |
2479 | break; |
2480 | } |
2481 | if (id == GARBAGE) { |
2482 | if (!garbage_index) { |
2483 | garbage_index = index; |
2484 | } |
2485 | continue; |
2486 | } |
2487 | |
2488 | entry = grn_hash_entry_at(ctx, hash, id, GRN_TABLE_ADD); |
2489 | if (!entry) { |
2490 | return GRN_ID_NIL; |
2491 | } |
2492 | if (grn_hash_entry_compare_key(ctx, hash, entry, hash_value, |
2493 | key, key_size)) { |
2494 | if (value) { |
2495 | *value = grn_hash_entry_get_value(ctx, hash, entry); |
2496 | } |
2497 | if (added) { |
2498 | *added = 0; |
2499 | } |
2500 | return id; |
2501 | } |
2502 | } |
2503 | |
2504 | if (grn_hash_is_io_hash(hash)) { |
2505 | id = grn_io_hash_add(ctx, hash, hash_value, key, key_size, value); |
2506 | } else { |
2507 | id = grn_tiny_hash_add(ctx, hash, hash_value, key, key_size, value); |
2508 | } |
2509 | if (!id) { |
2510 | return GRN_ID_NIL; |
2511 | } |
2512 | if (garbage_index) { |
2513 | (*hash->n_garbages)--; |
2514 | index = garbage_index; |
2515 | } |
2516 | *index = id; |
2517 | (*hash->n_entries)++; |
2518 | /* unlock */ |
2519 | |
2520 | if (added) { |
2521 | *added = 1; |
2522 | } |
2523 | return id; |
2524 | } |
2525 | } |
2526 | |
2527 | grn_id |
2528 | grn_hash_get(grn_ctx *ctx, grn_hash *hash, const void *key, |
2529 | unsigned int key_size, void **value) |
2530 | { |
2531 | uint32_t hash_value; |
2532 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2533 | return GRN_ID_NIL; |
2534 | } |
2535 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
2536 | if (key_size > hash->key_size) { |
2537 | return GRN_ID_NIL; |
2538 | } |
2539 | hash_value = grn_hash_calculate_hash_value(key, key_size); |
2540 | } else { |
2541 | if (key_size != hash->key_size) { |
2542 | return GRN_ID_NIL; |
2543 | } |
2544 | if (key_size == sizeof(uint32_t)) { |
2545 | hash_value = *((uint32_t *)key); |
2546 | } else { |
2547 | hash_value = grn_hash_calculate_hash_value(key, key_size); |
2548 | } |
2549 | } |
2550 | |
2551 | { |
2552 | uint32_t i; |
2553 | const uint32_t step = grn_hash_calculate_step(hash_value); |
2554 | for (i = hash_value; ; i += step) { |
2555 | grn_id id; |
2556 | grn_id * const index = grn_hash_idx_at(ctx, hash, i); |
2557 | if (!index) { |
2558 | return GRN_ID_NIL; |
2559 | } |
2560 | id = *index; |
2561 | if (!id) { |
2562 | return GRN_ID_NIL; |
2563 | } |
2564 | if (id != GARBAGE) { |
2565 | grn_hash_entry * const entry = grn_hash_entry_at(ctx, hash, id, 0); |
2566 | if (entry) { |
2567 | if (grn_hash_entry_compare_key(ctx, hash, entry, hash_value, |
2568 | key, key_size)) { |
2569 | if (value) { |
2570 | *value = grn_hash_entry_get_value(ctx, hash, entry); |
2571 | } |
2572 | return id; |
2573 | } |
2574 | } |
2575 | } |
2576 | } |
2577 | } |
2578 | } |
2579 | |
2580 | inline static grn_hash_entry * |
2581 | grn_hash_get_entry(grn_ctx *ctx, grn_hash *hash, grn_id id) |
2582 | { |
2583 | if (!grn_hash_bitmap_at(ctx, hash, id)) { |
2584 | return NULL; |
2585 | } |
2586 | return grn_hash_entry_at(ctx, hash, id, 0); |
2587 | } |
2588 | |
2589 | const char * |
2590 | _grn_hash_key(grn_ctx *ctx, grn_hash *hash, grn_id id, uint32_t *key_size) |
2591 | { |
2592 | grn_hash_entry * const entry = grn_hash_get_entry(ctx, hash, id); |
2593 | if (!entry) { |
2594 | *key_size = 0; |
2595 | return NULL; |
2596 | } |
2597 | *key_size = grn_hash_entry_get_key_size(hash, entry); |
2598 | return grn_hash_entry_get_key(ctx, hash, entry); |
2599 | } |
2600 | |
2601 | int |
2602 | grn_hash_get_key(grn_ctx *ctx, grn_hash *hash, grn_id id, void *keybuf, int bufsize) |
2603 | { |
2604 | int key_size; |
2605 | grn_hash_entry *entry; |
2606 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2607 | return 0; |
2608 | } |
2609 | entry = grn_hash_get_entry(ctx, hash, id); |
2610 | if (!entry) { |
2611 | return 0; |
2612 | } |
2613 | key_size = grn_hash_entry_get_key_size(hash, entry); |
2614 | if (bufsize >= key_size) { |
2615 | grn_memcpy(keybuf, grn_hash_entry_get_key(ctx, hash, entry), key_size); |
2616 | } |
2617 | return key_size; |
2618 | } |
2619 | |
2620 | int |
2621 | grn_hash_get_key2(grn_ctx *ctx, grn_hash *hash, grn_id id, grn_obj *bulk) |
2622 | { |
2623 | int key_size; |
2624 | char *key; |
2625 | grn_hash_entry *entry; |
2626 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2627 | return 0; |
2628 | } |
2629 | entry = grn_hash_get_entry(ctx, hash, id); |
2630 | if (!entry) { |
2631 | return 0; |
2632 | } |
2633 | key_size = grn_hash_entry_get_key_size(hash, entry); |
2634 | key = grn_hash_entry_get_key(ctx, hash, entry); |
2635 | if (bulk->header.impl_flags & GRN_OBJ_REFER) { |
2636 | bulk->u.b.head = key; |
2637 | bulk->u.b.curr = key + key_size; |
2638 | } else { |
2639 | grn_bulk_write(ctx, bulk, key, key_size); |
2640 | } |
2641 | return key_size; |
2642 | } |
2643 | |
2644 | int |
2645 | grn_hash_get_value(grn_ctx *ctx, grn_hash *hash, grn_id id, void *valuebuf) |
2646 | { |
2647 | void *value; |
2648 | grn_hash_entry *entry; |
2649 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2650 | return 0; |
2651 | } |
2652 | entry = grn_hash_get_entry(ctx, hash, id); |
2653 | if (!entry) { |
2654 | return 0; |
2655 | } |
2656 | value = grn_hash_entry_get_value(ctx, hash, entry); |
2657 | if (!value) { |
2658 | return 0; |
2659 | } |
2660 | if (valuebuf) { |
2661 | grn_memcpy(valuebuf, value, hash->value_size); |
2662 | } |
2663 | return hash->value_size; |
2664 | } |
2665 | |
2666 | const char * |
2667 | grn_hash_get_value_(grn_ctx *ctx, grn_hash *hash, grn_id id, uint32_t *size) |
2668 | { |
2669 | const void *value; |
2670 | grn_hash_entry *entry; |
2671 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2672 | return NULL; |
2673 | } |
2674 | entry = grn_hash_get_entry(ctx, hash, id); |
2675 | if (!entry) { |
2676 | return NULL; |
2677 | } |
2678 | value = grn_hash_entry_get_value(ctx, hash, entry); |
2679 | if (!value) { |
2680 | return NULL; |
2681 | } |
2682 | if (size) { |
2683 | *size = hash->value_size; |
2684 | } |
2685 | return (const char *)value; |
2686 | } |
2687 | |
2688 | int |
2689 | grn_hash_get_key_value(grn_ctx *ctx, grn_hash *hash, grn_id id, |
2690 | void *keybuf, int bufsize, void *valuebuf) |
2691 | { |
2692 | void *value; |
2693 | int key_size; |
2694 | grn_hash_entry *entry; |
2695 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2696 | return 0; |
2697 | } |
2698 | entry = grn_hash_get_entry(ctx, hash, id); |
2699 | if (!entry) { |
2700 | return 0; |
2701 | } |
2702 | key_size = grn_hash_entry_get_key_size(hash, entry); |
2703 | if (bufsize >= key_size) { |
2704 | grn_memcpy(keybuf, grn_hash_entry_get_key(ctx, hash, entry), key_size); |
2705 | } |
2706 | value = grn_hash_entry_get_value(ctx, hash, entry); |
2707 | if (!value) { |
2708 | return 0; |
2709 | } |
2710 | if (valuebuf) { |
2711 | grn_memcpy(valuebuf, value, hash->value_size); |
2712 | } |
2713 | return key_size; |
2714 | } |
2715 | |
2716 | int |
2717 | _grn_hash_get_key_value(grn_ctx *ctx, grn_hash *hash, grn_id id, |
2718 | void **key, void **value) |
2719 | { |
2720 | int key_size; |
2721 | grn_hash_entry *entry; |
2722 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2723 | return 0; |
2724 | } |
2725 | entry = grn_hash_get_entry(ctx, hash, id); |
2726 | if (!entry) { |
2727 | return 0; |
2728 | } |
2729 | key_size = grn_hash_entry_get_key_size(hash, entry); |
2730 | *key = grn_hash_entry_get_key(ctx, hash, entry); |
2731 | *value = grn_hash_entry_get_value(ctx, hash, entry); |
2732 | return *value ? key_size : 0; |
2733 | } |
2734 | |
2735 | grn_rc |
2736 | grn_hash_set_value(grn_ctx *ctx, grn_hash *hash, grn_id id, |
2737 | const void *value, int flags) |
2738 | { |
2739 | void *entry_value; |
2740 | grn_hash_entry *entry; |
2741 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2742 | return GRN_ID_NIL; |
2743 | } |
2744 | if (!value) { |
2745 | return GRN_INVALID_ARGUMENT; |
2746 | } |
2747 | entry = grn_hash_get_entry(ctx, hash, id); |
2748 | if (!entry) { |
2749 | return GRN_NO_MEMORY_AVAILABLE; |
2750 | } |
2751 | entry_value = grn_hash_entry_get_value(ctx, hash, entry); |
2752 | if (!entry_value) { |
2753 | return GRN_NO_MEMORY_AVAILABLE; |
2754 | } |
2755 | |
2756 | switch (flags & GRN_OBJ_SET_MASK) { |
2757 | case GRN_OBJ_SET : |
2758 | grn_memcpy(entry_value, value, hash->value_size); |
2759 | return GRN_SUCCESS; |
2760 | case GRN_OBJ_INCR : |
2761 | switch (hash->value_size) { |
2762 | case sizeof(int32_t) : |
2763 | *((int32_t *)entry_value) += *((int32_t *)value); |
2764 | return GRN_SUCCESS; |
2765 | case sizeof(int64_t) : |
2766 | *((int64_t *)entry_value) += *((int64_t *)value); |
2767 | return GRN_SUCCESS; |
2768 | default : |
2769 | return GRN_INVALID_ARGUMENT; |
2770 | } |
2771 | break; |
2772 | case GRN_OBJ_DECR : |
2773 | switch (hash->value_size) { |
2774 | case sizeof(int32_t) : |
2775 | *((int32_t *)entry_value) -= *((int32_t *)value); |
2776 | return GRN_SUCCESS; |
2777 | case sizeof(int64_t) : |
2778 | *((int64_t *)entry_value) -= *((int64_t *)value); |
2779 | return GRN_SUCCESS; |
2780 | default : |
2781 | return GRN_INVALID_ARGUMENT; |
2782 | } |
2783 | break; |
2784 | default : |
2785 | ERR(GRN_INVALID_ARGUMENT, "flags = %d" , flags); |
2786 | return ctx->rc; |
2787 | } |
2788 | } |
2789 | |
2790 | #define DELETE_IT do {\ |
2791 | *ep = GARBAGE;\ |
2792 | if (grn_hash_is_io_hash(hash)) {\ |
2793 | uint32_t size = key_size - 1;\ |
2794 | grn_id *garbages;\ |
2795 | if (GRN_HASH_IS_LARGE_KEY(hash)) {\ |
2796 | garbages = hash->header.large->garbages;\ |
2797 | } else {\ |
2798 | garbages = hash->header.normal->garbages;\ |
2799 | }\ |
2800 | ee->key = garbages[size];\ |
2801 | garbages[size] = e;\ |
2802 | grn_io_array_bit_off(ctx, hash->io, GRN_HASH_BITMAP_SEGMENT, e);\ |
2803 | } else {\ |
2804 | ee->key = hash->garbages;\ |
2805 | hash->garbages = e;\ |
2806 | if ((hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) && !(ee->flag & HASH_IMMEDIATE)) {\ |
2807 | grn_ctx *ctx = hash->ctx;\ |
2808 | GRN_CTX_FREE(ctx, ((entry_astr *)ee)->str);\ |
2809 | }\ |
2810 | grn_tiny_bitmap_get_and_set(&hash->bitmap, e, 0);\ |
2811 | }\ |
2812 | (*hash->n_entries)--;\ |
2813 | (*hash->n_garbages)++;\ |
2814 | rc = GRN_SUCCESS;\ |
2815 | } while (0) |
2816 | |
2817 | grn_rc |
2818 | grn_hash_delete_by_id(grn_ctx *ctx, grn_hash *hash, grn_id id, |
2819 | grn_table_delete_optarg *optarg) |
2820 | { |
2821 | entry_str *ee; |
2822 | grn_rc rc; |
2823 | if (!hash || !id) { return GRN_INVALID_ARGUMENT; } |
2824 | rc = grn_hash_error_if_truncated(ctx, hash); |
2825 | if (rc != GRN_SUCCESS) { |
2826 | return rc; |
2827 | } |
2828 | rc = GRN_INVALID_ARGUMENT; |
2829 | /* lock */ |
2830 | ee = grn_hash_entry_at(ctx, hash, id, 0); |
2831 | if (ee) { |
2832 | grn_id e, *ep; |
2833 | uint32_t i, key_size, h = ee->key, s = grn_hash_calculate_step(h); |
2834 | key_size = (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) ? ee->size : hash->key_size; |
2835 | for (i = h; ; i += s) { |
2836 | if (!(ep = grn_hash_idx_at(ctx, hash, i))) { return GRN_NO_MEMORY_AVAILABLE; } |
2837 | if (!(e = *ep)) { break; } |
2838 | if (e == id) { |
2839 | DELETE_IT; |
2840 | break; |
2841 | } |
2842 | } |
2843 | } |
2844 | /* unlock */ |
2845 | return rc; |
2846 | } |
2847 | |
2848 | grn_rc |
2849 | grn_hash_delete(grn_ctx *ctx, grn_hash *hash, const void *key, uint32_t key_size, |
2850 | grn_table_delete_optarg *optarg) |
2851 | { |
2852 | uint32_t h, i, m, s; |
2853 | grn_rc rc = grn_hash_error_if_truncated(ctx, hash); |
2854 | if (rc != GRN_SUCCESS) { |
2855 | return rc; |
2856 | } |
2857 | rc = GRN_INVALID_ARGUMENT; |
2858 | if (hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) { |
2859 | if (key_size > hash->key_size) { return GRN_INVALID_ARGUMENT; } |
2860 | h = grn_hash_calculate_hash_value(key, key_size); |
2861 | } else { |
2862 | if (key_size != hash->key_size) { return GRN_INVALID_ARGUMENT; } |
2863 | if (key_size == sizeof(uint32_t)) { |
2864 | h = *((uint32_t *)key); |
2865 | } else { |
2866 | h = grn_hash_calculate_hash_value(key, key_size); |
2867 | } |
2868 | } |
2869 | s = grn_hash_calculate_step(h); |
2870 | { |
2871 | grn_id e, *ep; |
2872 | /* lock */ |
2873 | m = *hash->max_offset; |
2874 | for (i = h; ; i += s) { |
2875 | if (!(ep = grn_hash_idx_at(ctx, hash, i))) { return GRN_NO_MEMORY_AVAILABLE; } |
2876 | if (!(e = *ep)) { break; } |
2877 | if (e == GARBAGE) { continue; } |
2878 | { |
2879 | entry_str * const ee = grn_hash_entry_at(ctx, hash, e, 0); |
2880 | if (ee && match_key(ctx, hash, ee, h, key, key_size)) { |
2881 | DELETE_IT; |
2882 | break; |
2883 | } |
2884 | } |
2885 | } |
2886 | /* unlock */ |
2887 | return rc; |
2888 | } |
2889 | } |
2890 | |
2891 | /* only valid for hash tables, GRN_OBJ_KEY_VAR_SIZE && GRN_HASH_TINY */ |
2892 | const char * |
2893 | _grn_hash_strkey_by_val(void *v, uint16_t *size) |
2894 | { |
2895 | entry_astr *n = (entry_astr *)((uintptr_t)v - |
2896 | (uintptr_t)&((entry_astr *)0)->dummy); |
2897 | *size = n->size; |
2898 | return (n->flag & HASH_IMMEDIATE) ? (char *)&n->str : n->str; |
2899 | } |
2900 | |
2901 | void |
2902 | grn_hash_cursor_close(grn_ctx *ctx, grn_hash_cursor *c) |
2903 | { |
2904 | GRN_ASSERT(c->ctx == ctx); |
2905 | GRN_FREE(c); |
2906 | } |
2907 | |
2908 | #define HASH_CURR_MAX(hash) \ |
2909 | ((grn_hash_is_io_hash(hash)) ? (hash)->header.common->curr_rec : (hash)->a.max) |
2910 | |
2911 | grn_hash_cursor * |
2912 | grn_hash_cursor_open(grn_ctx *ctx, grn_hash *hash, |
2913 | const void *min, uint32_t min_size, |
2914 | const void *max, uint32_t max_size, |
2915 | int offset, int limit, int flags) |
2916 | { |
2917 | grn_hash_cursor *c; |
2918 | if (!hash || !ctx) { return NULL; } |
2919 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
2920 | return NULL; |
2921 | } |
2922 | if (!(c = GRN_MALLOCN(grn_hash_cursor, 1))) { return NULL; } |
2923 | GRN_DB_OBJ_SET_TYPE(c, GRN_CURSOR_TABLE_HASH_KEY); |
2924 | c->hash = hash; |
2925 | c->ctx = ctx; |
2926 | c->obj.header.flags = flags; |
2927 | c->obj.header.domain = GRN_ID_NIL; |
2928 | if (flags & GRN_CURSOR_DESCENDING) { |
2929 | c->dir = -1; |
2930 | if (max) { |
2931 | if (!(c->curr_rec = grn_hash_get(ctx, hash, max, max_size, NULL))) { |
2932 | c->tail = GRN_ID_NIL; |
2933 | goto exit; |
2934 | } |
2935 | if (!(flags & GRN_CURSOR_LT)) { c->curr_rec++; } |
2936 | } else { |
2937 | c->curr_rec = HASH_CURR_MAX(hash) + 1; |
2938 | } |
2939 | if (min) { |
2940 | if (!(c->tail = grn_hash_get(ctx, hash, min, min_size, NULL))) { |
2941 | c->curr_rec = GRN_ID_NIL; |
2942 | goto exit; |
2943 | } |
2944 | if ((flags & GRN_CURSOR_GT)) { c->tail++; } |
2945 | } else { |
2946 | c->tail = GRN_ID_NIL + 1; |
2947 | } |
2948 | if (c->curr_rec < c->tail) { c->tail = c->curr_rec; } |
2949 | } else { |
2950 | c->dir = 1; |
2951 | if (min) { |
2952 | if (!(c->curr_rec = grn_hash_get(ctx, hash, min, min_size, NULL))) { |
2953 | c->tail = GRN_ID_NIL; |
2954 | goto exit; |
2955 | } |
2956 | if (!(flags & GRN_CURSOR_GT)) { c->curr_rec--; } |
2957 | } else { |
2958 | c->curr_rec = GRN_ID_NIL; |
2959 | } |
2960 | if (max) { |
2961 | if (!(c->tail = grn_hash_get(ctx, hash, max, max_size, NULL))) { |
2962 | c->curr_rec = GRN_ID_NIL; |
2963 | goto exit; |
2964 | } |
2965 | if ((flags & GRN_CURSOR_LT)) { c->tail--; } |
2966 | } else { |
2967 | c->tail = HASH_CURR_MAX(hash); |
2968 | } |
2969 | if (c->tail < c->curr_rec) { c->tail = c->curr_rec; } |
2970 | } |
2971 | if (*hash->n_entries != HASH_CURR_MAX(hash)) { |
2972 | while (offset && c->curr_rec != c->tail) { |
2973 | c->curr_rec += c->dir; |
2974 | if (grn_hash_bitmap_at(ctx, c->hash, c->curr_rec)) { offset--; } |
2975 | } |
2976 | } else { |
2977 | c->curr_rec += c->dir * offset; |
2978 | } |
2979 | exit : |
2980 | c->rest = (limit < 0) ? GRN_ARRAY_MAX : limit; |
2981 | return c; |
2982 | } |
2983 | |
2984 | grn_id |
2985 | grn_hash_cursor_next(grn_ctx *ctx, grn_hash_cursor *c) |
2986 | { |
2987 | if (c && c->rest) { |
2988 | while (c->curr_rec != c->tail) { |
2989 | c->curr_rec += c->dir; |
2990 | if (*c->hash->n_entries != HASH_CURR_MAX(c->hash)) { |
2991 | if (!grn_hash_bitmap_at(ctx, c->hash, c->curr_rec)) { continue; } |
2992 | } |
2993 | c->rest--; |
2994 | return c->curr_rec; |
2995 | } |
2996 | } |
2997 | return GRN_ID_NIL; |
2998 | } |
2999 | |
3000 | grn_id |
3001 | grn_hash_next(grn_ctx *ctx, grn_hash *hash, grn_id id) |
3002 | { |
3003 | grn_id max = HASH_CURR_MAX(hash); |
3004 | while (++id <= max) { |
3005 | if (grn_hash_bitmap_at(ctx, hash, id)) { return id; } |
3006 | } |
3007 | return GRN_ID_NIL; |
3008 | } |
3009 | |
3010 | grn_id |
3011 | grn_hash_at(grn_ctx *ctx, grn_hash *hash, grn_id id) |
3012 | { |
3013 | return grn_hash_bitmap_at(ctx, hash, id) ? id : GRN_ID_NIL; |
3014 | } |
3015 | |
3016 | int |
3017 | grn_hash_cursor_get_key(grn_ctx *ctx, grn_hash_cursor *c, void **key) |
3018 | { |
3019 | int key_size; |
3020 | entry_str *ee; |
3021 | if (!c) { return 0; } |
3022 | ee = grn_hash_entry_at(ctx, c->hash, c->curr_rec, 0); |
3023 | if (!ee) { return 0; } |
3024 | key_size = (c->hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) ? ee->size : c->hash->key_size; |
3025 | *key = get_key(ctx, c->hash, ee); |
3026 | return key_size; |
3027 | } |
3028 | |
3029 | int |
3030 | grn_hash_cursor_get_value(grn_ctx *ctx, grn_hash_cursor *c, void **value) |
3031 | { |
3032 | void *v; |
3033 | entry_str *ee; |
3034 | if (!c) { return 0; } |
3035 | ee = grn_hash_entry_at(ctx, c->hash, c->curr_rec, 0); |
3036 | if (ee && (v = get_value(ctx, c->hash, ee))) { |
3037 | *value = v; |
3038 | return c->hash->value_size; |
3039 | } |
3040 | return 0; |
3041 | } |
3042 | |
3043 | int |
3044 | grn_hash_cursor_get_key_value(grn_ctx *ctx, grn_hash_cursor *c, |
3045 | void **key, uint32_t *key_size, void **value) |
3046 | { |
3047 | entry_str *ee; |
3048 | if (!c) { return GRN_INVALID_ARGUMENT; } |
3049 | ee = grn_hash_entry_at(ctx, c->hash, c->curr_rec, 0); |
3050 | if (!ee) { return GRN_INVALID_ARGUMENT; } |
3051 | if (key_size) { |
3052 | *key_size = (c->hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) ? ee->size : c->hash->key_size; |
3053 | } |
3054 | if (key) { *key = get_key(ctx, c->hash, ee); } |
3055 | if (value) { *value = get_value(ctx, c->hash, ee); } |
3056 | return c->hash->value_size; |
3057 | } |
3058 | |
3059 | grn_rc |
3060 | grn_hash_cursor_set_value(grn_ctx *ctx, grn_hash_cursor *c, |
3061 | const void *value, int flags) |
3062 | { |
3063 | if (!c) { return GRN_INVALID_ARGUMENT; } |
3064 | return grn_hash_set_value(ctx, c->hash, c->curr_rec, value, flags); |
3065 | } |
3066 | |
3067 | grn_rc |
3068 | grn_hash_cursor_delete(grn_ctx *ctx, grn_hash_cursor *c, |
3069 | grn_table_delete_optarg *optarg) |
3070 | { |
3071 | if (!c) { return GRN_INVALID_ARGUMENT; } |
3072 | return grn_hash_delete_by_id(ctx, c->hash, c->curr_rec, optarg); |
3073 | } |
3074 | |
3075 | /* sort */ |
3076 | |
3077 | #define PREPARE_VAL(e,ep,es) do {\ |
3078 | if ((arg->flags & GRN_TABLE_SORT_BY_VALUE)) {\ |
3079 | ep = ((const uint8_t *)(get_value(ctx, hash, (entry_str *)(e))));\ |
3080 | es = hash->value_size;\ |
3081 | } else {\ |
3082 | ep = ((const uint8_t *)(get_key(ctx, hash, (entry_str *)(e))));\ |
3083 | es = ((hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE)\ |
3084 | ? ((entry_str *)(e))->size : hash->key_size); \ |
3085 | }\ |
3086 | ep += arg->offset;\ |
3087 | es -= arg->offset;\ |
3088 | } while (0) |
3089 | |
3090 | #define COMPARE_VAL_(ap,as,bp,bs)\ |
3091 | (arg->compar\ |
3092 | ? arg->compar(ctx,\ |
3093 | (grn_obj *)hash, (void *)(ap), as,\ |
3094 | (grn_obj *)hash, (void *)(bp), bs, arg->compar_arg)\ |
3095 | : ((arg->flags & GRN_TABLE_SORT_AS_NUMBER)\ |
3096 | ? ((arg->flags & GRN_TABLE_SORT_AS_UNSIGNED)\ |
3097 | ? ((arg->flags & GRN_TABLE_SORT_AS_INT64)\ |
3098 | ? *((uint64_t *)(ap)) > *((uint64_t *)(bp))\ |
3099 | : *((uint32_t *)(ap)) > *((uint32_t *)(bp)))\ |
3100 | : ((arg->flags & GRN_TABLE_SORT_AS_INT64)\ |
3101 | ? *((int64_t *)(ap)) > *((int64_t *)(bp))\ |
3102 | : *((int32_t *)(ap)) > *((int32_t *)(bp))))\ |
3103 | : grn_str_greater(ap, as, bp, bs))) |
3104 | |
3105 | #define COMPARE_VAL(ap,as,bp,bs)\ |
3106 | ((dir) ? COMPARE_VAL_((bp),(bs),(ap),(as)) : COMPARE_VAL_((ap),(as),(bp),(bs))) |
3107 | |
3108 | inline static entry ** |
3109 | pack(grn_ctx *ctx, grn_hash *hash, entry **res, grn_table_sort_optarg *arg, int dir) |
3110 | { |
3111 | uint32_t n; |
3112 | uint32_t cs, es; |
3113 | const uint8_t *cp, *ep; |
3114 | entry **head, **tail, *e, *c; |
3115 | grn_id id, m = HASH_CURR_MAX(hash); |
3116 | for (id = m >> 1;;id = (id == m) ? 1 : id + 1) { |
3117 | if (grn_hash_bitmap_at(ctx, hash, id)) { break; } |
3118 | } |
3119 | c = grn_hash_entry_at(ctx, hash, id, 0); |
3120 | if (!c) { return NULL; } |
3121 | PREPARE_VAL(c, cp, cs); |
3122 | head = res; |
3123 | n = *hash->n_entries - 1; |
3124 | tail = res + n; |
3125 | while (n--) { |
3126 | do { |
3127 | id = (id == m) ? 1 : id + 1; |
3128 | } while (!grn_hash_bitmap_at(ctx, hash, id)); |
3129 | e = grn_hash_entry_at(ctx, hash, id, 0); |
3130 | if (!e) { return NULL; } |
3131 | PREPARE_VAL(e, ep, es); |
3132 | if (COMPARE_VAL(cp, cs, ep, es)) { |
3133 | *head++ = e; |
3134 | } else { |
3135 | *tail-- = e; |
3136 | } |
3137 | } |
3138 | *head = c; |
3139 | return *hash->n_entries > 2 ? head : NULL; |
3140 | } |
3141 | |
3142 | inline static void |
3143 | swap(entry **a, entry **b) |
3144 | { |
3145 | entry *c_ = *a; |
3146 | *a = *b; |
3147 | *b = c_; |
3148 | } |
3149 | |
3150 | #define SWAP(a,ap,as,b,bp,bs) do {\ |
3151 | const uint8_t *cp_ = ap;\ |
3152 | uint32_t cs_ = as;\ |
3153 | ap = bp; bp = cp_;\ |
3154 | as = bs; bs = cs_;\ |
3155 | swap(a,b);\ |
3156 | } while (0) |
3157 | |
3158 | inline static entry ** |
3159 | part(grn_ctx *ctx, entry **b, entry **e, grn_table_sort_optarg *arg, grn_hash *hash, int dir) |
3160 | { |
3161 | entry **c; |
3162 | const uint8_t *bp, *cp, *ep; |
3163 | uint32_t bs, cs, es; |
3164 | intptr_t d = e - b; |
3165 | PREPARE_VAL(*b, bp, bs); |
3166 | PREPARE_VAL(*e, ep, es); |
3167 | if (COMPARE_VAL(bp, bs, ep, es)) { |
3168 | SWAP(b, bp, bs, e, ep, es); |
3169 | } |
3170 | if (d < 2) { return NULL; } |
3171 | c = b + (d >> 1); |
3172 | PREPARE_VAL(*c, cp, cs); |
3173 | if (COMPARE_VAL(bp, bs, cp, cs)) { |
3174 | SWAP(b, bp, bs, c, cp, cs); |
3175 | } else { |
3176 | if (COMPARE_VAL(cp, cs, ep, es)) { |
3177 | SWAP(c, cp, cs, e, ep, es); |
3178 | } |
3179 | } |
3180 | if (d < 3) { return NULL; } |
3181 | b++; |
3182 | swap(b, c); |
3183 | c = b; |
3184 | PREPARE_VAL(*c, cp, cs); |
3185 | for (;;) { |
3186 | do { |
3187 | b++; |
3188 | PREPARE_VAL(*b, bp, bs); |
3189 | } while (COMPARE_VAL(cp, cs, bp, bs)); |
3190 | do { |
3191 | e--; |
3192 | PREPARE_VAL(*e, ep, es); |
3193 | } while (COMPARE_VAL(ep, es, cp, cs)); |
3194 | if (b >= e) { break; } |
3195 | SWAP(b, bp, bs, e, ep, es); |
3196 | } |
3197 | SWAP(c, cp, cs, e, ep, es); |
3198 | return e; |
3199 | } |
3200 | |
3201 | static void |
3202 | _sort(grn_ctx *ctx, entry **head, entry **tail, int limit, |
3203 | grn_table_sort_optarg *arg, grn_hash *hash, int dir) |
3204 | { |
3205 | entry **c; |
3206 | if (head < tail && (c = part(ctx, head, tail, arg, hash, dir))) { |
3207 | intptr_t rest = limit - 1 - (c - head); |
3208 | _sort(ctx, head, c - 1, limit, arg, hash, dir); |
3209 | if (rest > 0) { _sort(ctx, c + 1, tail, (int)rest, arg, hash, dir); } |
3210 | } |
3211 | } |
3212 | |
3213 | static void |
3214 | sort(grn_ctx *ctx, |
3215 | grn_hash *hash, entry **res, int limit, grn_table_sort_optarg *arg, int dir) |
3216 | { |
3217 | entry **c = pack(ctx, hash, res, arg, dir); |
3218 | if (c) { |
3219 | intptr_t rest = limit - 1 - (c - res); |
3220 | _sort(ctx, res, c - 1, limit, arg, hash, dir); |
3221 | if (rest > 0 ) { |
3222 | _sort(ctx, c + 1, res + *hash->n_entries - 1, (int)rest, arg, hash, dir); |
3223 | } |
3224 | } |
3225 | } |
3226 | |
3227 | typedef struct { |
3228 | grn_id id; |
3229 | int32_t v; |
3230 | } val32; |
3231 | |
3232 | #define PREPARE_VAL32(id,e,ep) do {\ |
3233 | (ep)->id = id;\ |
3234 | (ep)->v = (arg->flags & GRN_TABLE_SORT_BY_ID)\ |
3235 | ? (int32_t) id\ |
3236 | : (*((int32_t *)((byte *)((arg->flags & GRN_TABLE_SORT_BY_VALUE)\ |
3237 | ? get_value(ctx, hash, (e))\ |
3238 | : get_key(ctx, hash, (e))) + arg->offset)));\ |
3239 | } while (0) |
3240 | |
3241 | #define COMPARE_VAL32_(ap,bp) \ |
3242 | (arg->compar\ |
3243 | ? arg->compar(ctx,\ |
3244 | (grn_obj *)hash, (void *)&(ap)->v, sizeof(uint32_t),\ |
3245 | (grn_obj *)hash, (void *)&(bp)->v, sizeof(uint32_t),\ |
3246 | arg->compar_arg)\ |
3247 | : ((arg->flags & GRN_TABLE_SORT_AS_NUMBER)\ |
3248 | ? ((arg->flags & GRN_TABLE_SORT_AS_UNSIGNED)\ |
3249 | ? *((uint32_t *)&(ap)->v) > *((uint32_t *)&(bp)->v)\ |
3250 | : *((int32_t *)&(ap)->v) > *((int32_t *)&(bp)->v))\ |
3251 | : memcmp(&(ap)->v, &(bp)->v, sizeof(uint32_t)) > 0)) |
3252 | |
3253 | #define COMPARE_VAL32(ap,bp)\ |
3254 | ((dir) ? COMPARE_VAL32_((bp),(ap)) : COMPARE_VAL32_((ap),(bp))) |
3255 | |
3256 | inline static val32 * |
3257 | pack_val32(grn_ctx *ctx, grn_hash *hash, val32 *res, grn_table_sort_optarg *arg, int dir) |
3258 | { |
3259 | uint32_t n; |
3260 | entry_str *e, *c; |
3261 | val32 *head, *tail, cr, er; |
3262 | grn_id id, m = HASH_CURR_MAX(hash); |
3263 | for (id = m >> 1;;id = (id == m) ? 1 : id + 1) { |
3264 | if (grn_hash_bitmap_at(ctx, hash, id)) { break; } |
3265 | } |
3266 | c = grn_hash_entry_at(ctx, hash, id, 0); |
3267 | if (!c) { return NULL; } |
3268 | PREPARE_VAL32(id, c, &cr); |
3269 | head = res; |
3270 | n = *hash->n_entries - 1; |
3271 | tail = res + n; |
3272 | while (n--) { |
3273 | do { |
3274 | id = (id == m) ? 1 : id + 1; |
3275 | } while (!grn_hash_bitmap_at(ctx, hash, id)); |
3276 | e = grn_hash_entry_at(ctx, hash, id, 0); |
3277 | if (!e) { return NULL; } |
3278 | PREPARE_VAL32(id, e, &er); |
3279 | if (COMPARE_VAL32(&cr, &er)) { |
3280 | *head++ = er; |
3281 | } else { |
3282 | *tail-- = er; |
3283 | } |
3284 | } |
3285 | *head = cr; |
3286 | return *hash->n_entries > 2 ? head : NULL; |
3287 | } |
3288 | |
3289 | #define SWAP_VAL32(ap,bp) do {\ |
3290 | val32 cr_ = *ap;\ |
3291 | *ap = *bp;\ |
3292 | *bp = cr_;\ |
3293 | } while (0) |
3294 | |
3295 | inline static val32 * |
3296 | part_val32(grn_ctx *ctx, |
3297 | val32 *b, val32 *e, grn_table_sort_optarg *arg, grn_hash *hash, int dir) |
3298 | { |
3299 | val32 *c; |
3300 | intptr_t d = e - b; |
3301 | if (COMPARE_VAL32(b, e)) { SWAP_VAL32(b, e); } |
3302 | if (d < 2) { return NULL; } |
3303 | c = b + (d >> 1); |
3304 | if (COMPARE_VAL32(b, c)) { |
3305 | SWAP_VAL32(b, c); |
3306 | } else { |
3307 | if (COMPARE_VAL32(c, e)) { SWAP_VAL32(c, e); } |
3308 | } |
3309 | if (d < 3) { return NULL; } |
3310 | b++; |
3311 | SWAP_VAL32(b, c); |
3312 | c = b; |
3313 | for (;;) { |
3314 | do { b++; } while (COMPARE_VAL32(c, b)); |
3315 | do { e--; } while (COMPARE_VAL32(e, c)); |
3316 | if (b >= e) { break; } |
3317 | SWAP_VAL32(b, e); |
3318 | } |
3319 | SWAP_VAL32(c, e); |
3320 | return e; |
3321 | } |
3322 | |
3323 | static void |
3324 | _sort_val32(grn_ctx *ctx, val32 *head, val32 *tail, int limit, |
3325 | grn_table_sort_optarg *arg, grn_hash *hash, int dir) |
3326 | { |
3327 | val32 *c; |
3328 | if (head < tail && (c = part_val32(ctx, head, tail, arg, hash, dir))) { |
3329 | intptr_t rest = limit - 1 - (c - head); |
3330 | _sort_val32(ctx, head, c - 1, limit, arg, hash, dir); |
3331 | if (rest > 0) { _sort_val32(ctx, c + 1, tail, (int)rest, arg, hash, dir); } |
3332 | } |
3333 | } |
3334 | |
3335 | static void |
3336 | sort_val32(grn_ctx *ctx, |
3337 | grn_hash *hash, val32 *res, int limit, grn_table_sort_optarg *arg, int dir) |
3338 | { |
3339 | val32 *c = pack_val32(ctx, hash, res, arg, dir); |
3340 | if (c) { |
3341 | intptr_t rest = limit - 1 - (c - res); |
3342 | _sort_val32(ctx, res, c - 1, limit, arg, hash, dir); |
3343 | if (rest > 0 ) { |
3344 | _sort_val32(ctx, c + 1, res + *hash->n_entries - 1, (int)rest, arg, hash, dir); |
3345 | } |
3346 | } |
3347 | } |
3348 | |
3349 | inline static grn_id |
3350 | entry2id(grn_ctx *ctx, grn_hash *hash, entry *e) |
3351 | { |
3352 | entry *e2; |
3353 | grn_id id, *ep; |
3354 | uint32_t i, h = e->key, s = grn_hash_calculate_step(h); |
3355 | for (i = h; ; i += s) { |
3356 | if (!(ep = grn_hash_idx_at(ctx, hash, i))) { return GRN_ID_NIL; } |
3357 | if (!(id = *ep)) { break; } |
3358 | if (id != GARBAGE) { |
3359 | e2 = grn_hash_entry_at(ctx, hash, id, 0); |
3360 | if (!e2) { return GRN_ID_NIL; } |
3361 | if (e2 == e) { break; } |
3362 | } |
3363 | } |
3364 | return id; |
3365 | } |
3366 | |
3367 | int |
3368 | grn_hash_sort(grn_ctx *ctx, grn_hash *hash, |
3369 | int limit, grn_array *result, grn_table_sort_optarg *optarg) |
3370 | { |
3371 | entry **res; |
3372 | if (!result || !*hash->n_entries) { return 0; } |
3373 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
3374 | return 0; |
3375 | } |
3376 | if (!(res = GRN_MALLOC(sizeof(entry *) * *hash->n_entries))) { |
3377 | GRN_LOG(ctx, GRN_LOG_ALERT, "allocation of entries failed on grn_hash_sort !" ); |
3378 | return 0; |
3379 | } |
3380 | if (limit < 0) { |
3381 | limit += *hash->n_entries + 1; |
3382 | if (limit < 0) { |
3383 | GRN_LOG(ctx, GRN_LOG_ALERT, "limit is too small in grn_hash_sort !" ); |
3384 | return 0; |
3385 | } |
3386 | } |
3387 | if (limit > *hash->n_entries) { limit = *hash->n_entries; } |
3388 | /* hash->limit = limit; */ |
3389 | if (optarg) { |
3390 | int dir = (optarg->flags & GRN_TABLE_SORT_DESC); |
3391 | if ((optarg->flags & GRN_TABLE_SORT_BY_ID) || |
3392 | (optarg->flags & GRN_TABLE_SORT_BY_VALUE) |
3393 | ? ((hash->value_size - optarg->offset) == sizeof(uint32_t)) |
3394 | : (!(hash->obj.header.flags & GRN_OBJ_KEY_VAR_SIZE) |
3395 | && hash->key_size == sizeof(uint32_t))) { |
3396 | if (sizeof(entry *) != sizeof(val32)) { |
3397 | GRN_FREE(res); |
3398 | if (!(res = GRN_MALLOC(sizeof(val32) * *hash->n_entries))) { |
3399 | GRN_LOG(ctx, GRN_LOG_ALERT, "allocation of entries failed on grn_hash_sort !" ); |
3400 | return 0; |
3401 | } |
3402 | } |
3403 | sort_val32(ctx, hash, (val32 *)res, limit, optarg, dir); |
3404 | { |
3405 | int i; |
3406 | grn_id *v; |
3407 | val32 *rp = (val32 *)res; |
3408 | for (i = 0; i < limit; i++, rp++) { |
3409 | if (!grn_array_add(ctx, result, (void **)&v)) { break; } |
3410 | if (!(*v = rp->id)) { break; } |
3411 | } |
3412 | GRN_FREE(res); |
3413 | return i; |
3414 | } |
3415 | } else { |
3416 | sort(ctx, hash, res, limit, optarg, dir); |
3417 | } |
3418 | } else { |
3419 | grn_table_sort_optarg opt = {0, NULL, NULL, NULL, 0}; |
3420 | sort(ctx, hash, res, limit, &opt, 0); |
3421 | } |
3422 | { |
3423 | int i; |
3424 | grn_id *v; |
3425 | entry **rp = res; |
3426 | for (i = 0; i < limit; i++, rp++) { |
3427 | if (!grn_array_add(ctx, result, (void **)&v)) { break; } |
3428 | if (!(*v = entry2id(ctx, hash, *rp))) { break; } |
3429 | } |
3430 | GRN_FREE(res); |
3431 | return i; |
3432 | } |
3433 | } |
3434 | |
3435 | void |
3436 | grn_hash_check(grn_ctx *ctx, grn_hash *hash) |
3437 | { |
3438 | char buf[8]; |
3439 | grn_hash_header_common *h = hash->header.common; |
3440 | if (grn_hash_error_if_truncated(ctx, hash) != GRN_SUCCESS) { |
3441 | return; |
3442 | } |
3443 | GRN_OUTPUT_ARRAY_OPEN("RESULT" , 1); |
3444 | GRN_OUTPUT_MAP_OPEN("SUMMARY" , 26); |
3445 | GRN_OUTPUT_CSTR("flags" ); |
3446 | grn_itoh(h->flags, buf, 8); |
3447 | GRN_OUTPUT_STR(buf, 8); |
3448 | GRN_OUTPUT_CSTR("key_size" ); |
3449 | GRN_OUTPUT_INT64(hash->key_size); |
3450 | GRN_OUTPUT_CSTR("value_size" ); |
3451 | GRN_OUTPUT_INT64(hash->value_size); |
3452 | GRN_OUTPUT_CSTR("tokenizer" ); |
3453 | GRN_OUTPUT_INT64(h->tokenizer); |
3454 | GRN_OUTPUT_CSTR("normalizer" ); |
3455 | GRN_OUTPUT_INT64(h->normalizer); |
3456 | GRN_OUTPUT_CSTR("curr_rec" ); |
3457 | GRN_OUTPUT_INT64(h->curr_rec); |
3458 | GRN_OUTPUT_CSTR("curr_key_normal" ); |
3459 | GRN_OUTPUT_UINT64(h->curr_key_normal); |
3460 | GRN_OUTPUT_CSTR("curr_key_large" ); |
3461 | GRN_OUTPUT_UINT64(h->curr_key_large); |
3462 | GRN_OUTPUT_CSTR("idx_offset" ); |
3463 | GRN_OUTPUT_INT64(h->idx_offset); |
3464 | GRN_OUTPUT_CSTR("entry_size" ); |
3465 | GRN_OUTPUT_INT64(hash->entry_size); |
3466 | GRN_OUTPUT_CSTR("max_offset" ); |
3467 | GRN_OUTPUT_INT64(*hash->max_offset); |
3468 | GRN_OUTPUT_CSTR("n_entries" ); |
3469 | GRN_OUTPUT_INT64(*hash->n_entries); |
3470 | GRN_OUTPUT_CSTR("n_garbages" ); |
3471 | GRN_OUTPUT_INT64(*hash->n_garbages); |
3472 | GRN_OUTPUT_CSTR("lock" ); |
3473 | GRN_OUTPUT_INT64(h->lock); |
3474 | GRN_OUTPUT_MAP_CLOSE(); |
3475 | GRN_OUTPUT_ARRAY_CLOSE(); |
3476 | } |
3477 | |
3478 | /* rhash : grn_hash with subrecs */ |
3479 | |
3480 | #ifdef USE_GRN_INDEX2 |
3481 | |
3482 | static uint32_t default_flags = GRN_HASH_TINY; |
3483 | |
3484 | grn_rc |
3485 | grn_rhash_init(grn_ctx *ctx, grn_hash *hash, grn_rec_unit record_unit, int record_size, |
3486 | grn_rec_unit subrec_unit, int subrec_size, unsigned int max_n_subrecs) |
3487 | { |
3488 | grn_rc rc; |
3489 | record_size = grn_rec_unit_size(record_unit, record_size); |
3490 | subrec_size = grn_rec_unit_size(subrec_unit, subrec_size); |
3491 | if (record_unit != grn_rec_userdef && subrec_unit != grn_rec_userdef) { |
3492 | subrec_size -= record_size; |
3493 | } |
3494 | if (!hash) { return GRN_INVALID_ARGUMENT; } |
3495 | if (record_size < 0) { return GRN_INVALID_ARGUMENT; } |
3496 | if ((default_flags & GRN_HASH_TINY)) { |
3497 | rc = grn_tiny_hash_init(ctx, hash, NULL, record_size, |
3498 | max_n_subrecs * (GRN_RSET_SCORE_SIZE + subrec_size), |
3499 | default_flags, GRN_ENC_NONE); |
3500 | } else { |
3501 | rc = grn_io_hash_init(ctx, hash, NULL, record_size, |
3502 | max_n_subrecs * (GRN_RSET_SCORE_SIZE + subrec_size), |
3503 | default_flags, GRN_ENC_NONE, 0); |
3504 | } |
3505 | if (rc) { return rc; } |
3506 | hash->record_unit = record_unit; |
3507 | hash->subrec_unit = subrec_unit; |
3508 | hash->subrec_size = subrec_size; |
3509 | hash->max_n_subrecs = max_n_subrecs; |
3510 | return rc; |
3511 | } |
3512 | |
3513 | grn_rc |
3514 | grn_rhash_fin(grn_ctx *ctx, grn_hash *hash) |
3515 | { |
3516 | grn_rc rc; |
3517 | if (grn_hash_is_io_hash(hash)) { |
3518 | rc = grn_io_close(ctx, hash->io); |
3519 | } else { |
3520 | GRN_ASSERT(ctx == hash->ctx); |
3521 | rc = grn_tiny_hash_fin(ctx, hash); |
3522 | } |
3523 | return rc; |
3524 | } |
3525 | |
3526 | inline static void |
3527 | subrecs_push(byte *subrecs, int size, int n_subrecs, int score, void *body, int dir) |
3528 | { |
3529 | byte *v; |
3530 | int *c2; |
3531 | int n = n_subrecs - 1, n2; |
3532 | while (n) { |
3533 | n2 = (n - 1) >> 1; |
3534 | c2 = GRN_RSET_SUBRECS_NTH(subrecs,size,n2); |
3535 | if (GRN_RSET_SUBRECS_CMP(score, *c2, dir) > 0) { break; } |
3536 | GRN_RSET_SUBRECS_COPY(subrecs,size,n,c2); |
3537 | n = n2; |
3538 | } |
3539 | v = subrecs + n * (size + GRN_RSET_SCORE_SIZE); |
3540 | *((int *)v) = score; |
3541 | grn_memcpy(v + GRN_RSET_SCORE_SIZE, body, size); |
3542 | } |
3543 | |
3544 | inline static void |
3545 | subrecs_replace_min(byte *subrecs, int size, int n_subrecs, int score, void *body, int dir) |
3546 | { |
3547 | byte *v; |
3548 | int n = 0, n1, n2, *c1, *c2; |
3549 | for (;;) { |
3550 | n1 = n * 2 + 1; |
3551 | n2 = n1 + 1; |
3552 | c1 = n1 < n_subrecs ? GRN_RSET_SUBRECS_NTH(subrecs,size,n1) : NULL; |
3553 | c2 = n2 < n_subrecs ? GRN_RSET_SUBRECS_NTH(subrecs,size,n2) : NULL; |
3554 | if (c1 && GRN_RSET_SUBRECS_CMP(score, *c1, dir) > 0) { |
3555 | if (c2 && |
3556 | GRN_RSET_SUBRECS_CMP(score, *c2, dir) > 0 && |
3557 | GRN_RSET_SUBRECS_CMP(*c1, *c2, dir) > 0) { |
3558 | GRN_RSET_SUBRECS_COPY(subrecs,size,n,c2); |
3559 | n = n2; |
3560 | } else { |
3561 | GRN_RSET_SUBRECS_COPY(subrecs,size,n,c1); |
3562 | n = n1; |
3563 | } |
3564 | } else { |
3565 | if (c2 && GRN_RSET_SUBRECS_CMP(score, *c2, dir) > 0) { |
3566 | GRN_RSET_SUBRECS_COPY(subrecs,size,n,c2); |
3567 | n = n2; |
3568 | } else { |
3569 | break; |
3570 | } |
3571 | } |
3572 | } |
3573 | v = subrecs + n * (size + GRN_RSET_SCORE_SIZE); |
3574 | grn_memcpy(v, &score, GRN_RSET_SCORE_SIZE); |
3575 | grn_memcpy(v + GRN_RSET_SCORE_SIZE, body, size); |
3576 | } |
3577 | |
3578 | void |
3579 | grn_rhash_add_subrec(grn_hash *s, grn_rset_recinfo *ri, int score, void *body, int dir) |
3580 | { |
3581 | int limit = s->max_n_subrecs; |
3582 | ri->score += score; |
3583 | ri->n_subrecs += 1; |
3584 | if (limit) { |
3585 | int ssize = s->subrec_size; |
3586 | int n_subrecs = GRN_RSET_N_SUBRECS(ri); |
3587 | if (limit < n_subrecs) { |
3588 | if (GRN_RSET_SUBRECS_CMP(score, *ri->subrecs, dir) > 0) { |
3589 | subrecs_replace_min(ri->subrecs, ssize, limit, score, body, dir); |
3590 | } |
3591 | } else { |
3592 | subrecs_push(ri->subrecs, ssize, n_subrecs, score, body, dir); |
3593 | } |
3594 | } |
3595 | } |
3596 | |
3597 | grn_hash * |
3598 | grn_rhash_group(grn_hash *s, int limit, grn_group_optarg *optarg) |
3599 | { |
3600 | grn_ctx *ctx = s->ctx; |
3601 | grn_hash *g, h; |
3602 | grn_rset_recinfo *ri; |
3603 | grn_rec_unit unit; |
3604 | grn_hash_cursor *c; |
3605 | grn_id rh; |
3606 | byte *key, *ekey, *gkey = NULL; |
3607 | int funcp, dir; |
3608 | unsigned int rsize; |
3609 | if (!s || !s->index) { return NULL; } |
3610 | if (optarg) { |
3611 | unit = grn_rec_userdef; |
3612 | rsize = optarg->key_size; |
3613 | funcp = optarg->func ? 1 : 0; |
3614 | dir = (optarg->mode == grn_sort_ascending) ? -1 : 1; |
3615 | } else { |
3616 | unit = grn_rec_document; |
3617 | rsize = grn_rec_unit_size(unit, sizeof(grn_id)); |
3618 | funcp = 0; |
3619 | dir = 1; |
3620 | } |
3621 | if (funcp) { |
3622 | gkey = GRN_MALLOC(rsize ? rsize : 8192); |
3623 | if (!gkey) { |
3624 | GRN_LOG(ctx, GRN_LOG_ALERT, "allocation for gkey failed !" ); |
3625 | return NULL; |
3626 | } |
3627 | } else { |
3628 | if (s->key_size <= rsize) { return NULL; } |
3629 | } |
3630 | if (!(c = grn_hash_cursor_open(s->ctx, s, NULL, 0, NULL, -1, 0))) { |
3631 | GRN_LOG(ctx, GRN_LOG_ALERT, "grn_hash_cursor_open on grn_hash_group failed !" ); |
3632 | if (gkey) { GRN_FREE(gkey); } |
3633 | return NULL; |
3634 | } |
3635 | grn_memcpy(&h, s, sizeof(grn_hash)); |
3636 | g = s; |
3637 | s = &h; |
3638 | if (grn_rhash_init(ctx, g, unit, rsize, s->record_unit, s->key_size, limit)) { |
3639 | GRN_LOG(ctx, GRN_LOG_ALERT, "grn_rhash_init in grn_hash_group failed !" ); |
3640 | grn_hash_cursor_close(s->ctx, c); |
3641 | if (gkey) { GRN_FREE(gkey); } |
3642 | return NULL; |
3643 | } |
3644 | while ((rh = grn_hash_cursor_next(ctx, c))) { |
3645 | grn_hash_cursor_get_key_value(ctx, c, (void **)&key, NULL, (void **)&ri); |
3646 | if (funcp) { |
3647 | if (optarg->func((grn_records *)s, |
3648 | (grn_recordh *)(intptr_t)rh, gkey, optarg->func_arg)) { continue; } |
3649 | ekey = key; |
3650 | } else { |
3651 | gkey = key; |
3652 | ekey = key + rsize; |
3653 | } |
3654 | { |
3655 | grn_rset_recinfo *gri; |
3656 | if (grn_hash_add(ctx, g, gkey, rsize, (void **)&gri, NULL)) { |
3657 | grn_rhash_add_subrec(g, gri, ri->score, ekey, dir); |
3658 | } |
3659 | } |
3660 | } |
3661 | grn_hash_cursor_close(s->ctx, c); |
3662 | grn_rhash_fin(s->ctx, s); |
3663 | if (funcp) { GRN_FREE(gkey); } |
3664 | return g; |
3665 | } |
3666 | |
3667 | grn_rc |
3668 | grn_rhash_subrec_info(grn_ctx *ctx, grn_hash *s, grn_id rh, int index, |
3669 | grn_id *rid, int *section, int *pos, int *score, void **subrec) |
3670 | { |
3671 | grn_rset_posinfo *pi; |
3672 | grn_rset_recinfo *ri; |
3673 | int *p, unit_size = GRN_RSET_SCORE_SIZE + s->subrec_size; |
3674 | if (!s || !rh || index < 0) { return GRN_INVALID_ARGUMENT; } |
3675 | if ((unsigned int)index >= s->max_n_subrecs) { return GRN_INVALID_ARGUMENT; } |
3676 | { |
3677 | entry_str *ee; |
3678 | if (!grn_hash_bitmap_at(ctx, s, rh)) { return GRN_INVALID_ARGUMENT; } |
3679 | ee = grn_hash_entry_at(ctx, s, rh, 0); |
3680 | if (!ee) { return GRN_INVALID_ARGUMENT; } |
3681 | pi = (grn_rset_posinfo *)get_key(ctx, s, ee); |
3682 | ri = get_value(ctx, s, ee); |
3683 | if (!pi || !ri) { return GRN_INVALID_ARGUMENT; } |
3684 | } |
3685 | if (index >= ri->n_subrecs) { return GRN_INVALID_ARGUMENT; } |
3686 | p = (int *)(ri->subrecs + index * unit_size); |
3687 | if (score) { *score = p[0]; } |
3688 | if (subrec) { *subrec = &p[1]; } |
3689 | switch (s->record_unit) { |
3690 | case grn_rec_document : |
3691 | if (rid) { *rid = pi->rid; } |
3692 | if (section) { *section = (s->subrec_unit != grn_rec_userdef) ? p[1] : 0; } |
3693 | if (pos) { *pos = (s->subrec_unit == grn_rec_position) ? p[2] : 0; } |
3694 | break; |
3695 | case grn_rec_section : |
3696 | if (rid) { *rid = pi->rid; } |
3697 | if (section) { *section = pi->sid; } |
3698 | if (pos) { *pos = (s->subrec_unit == grn_rec_position) ? p[1] : 0; } |
3699 | break; |
3700 | default : |
3701 | pi = (grn_rset_posinfo *)&p[1]; |
3702 | switch (s->subrec_unit) { |
3703 | case grn_rec_document : |
3704 | if (rid) { *rid = pi->rid; } |
3705 | if (section) { *section = 0; } |
3706 | if (pos) { *pos = 0; } |
3707 | break; |
3708 | case grn_rec_section : |
3709 | if (rid) { *rid = pi->rid; } |
3710 | if (section) { *section = pi->sid; } |
3711 | if (pos) { *pos = 0; } |
3712 | break; |
3713 | case grn_rec_position : |
3714 | if (rid) { *rid = pi->rid; } |
3715 | if (section) { *section = pi->sid; } |
3716 | if (pos) { *pos = pi->pos; } |
3717 | break; |
3718 | default : |
3719 | if (rid) { *rid = 0; } |
3720 | if (section) { *section = 0; } |
3721 | if (pos) { *pos = 0; } |
3722 | break; |
3723 | } |
3724 | break; |
3725 | } |
3726 | return GRN_SUCCESS; |
3727 | } |
3728 | #endif /* USE_GRN_INDEX2 */ |
3729 | |
3730 | grn_bool |
3731 | grn_hash_is_large_total_key_size(grn_ctx *ctx, grn_hash *hash) |
3732 | { |
3733 | return (hash->header.common->flags & GRN_OBJ_KEY_LARGE) == GRN_OBJ_KEY_LARGE; |
3734 | } |
3735 | |
3736 | uint64_t |
3737 | grn_hash_total_key_size(grn_ctx *ctx, grn_hash *hash) |
3738 | { |
3739 | if (grn_hash_is_large_total_key_size(ctx, hash)) { |
3740 | return hash->header.common->curr_key_large; |
3741 | } else { |
3742 | return hash->header.common->curr_key_normal; |
3743 | } |
3744 | } |
3745 | |
3746 | uint64_t |
3747 | grn_hash_max_total_key_size(grn_ctx *ctx, grn_hash *hash) |
3748 | { |
3749 | if (grn_hash_is_large_total_key_size(ctx, hash)) { |
3750 | return GRN_HASH_KEY_MAX_TOTAL_SIZE_LARGE; |
3751 | } else { |
3752 | return GRN_HASH_KEY_MAX_TOTAL_SIZE_NORMAL; |
3753 | } |
3754 | } |
3755 | |