1/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
3#ident "$Id$"
4/*======
5This file is part of PerconaFT.
6
7
8Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
9
10 PerconaFT is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License, version 2,
12 as published by the Free Software Foundation.
13
14 PerconaFT is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
21
22----------------------------------------
23
24 PerconaFT is free software: you can redistribute it and/or modify
25 it under the terms of the GNU Affero General Public License, version 3,
26 as published by the Free Software Foundation.
27
28 PerconaFT is distributed in the hope that it will be useful,
29 but WITHOUT ANY WARRANTY; without even the implied warranty of
30 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 GNU Affero General Public License for more details.
32
33 You should have received a copy of the GNU Affero General Public License
34 along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
35======= */
36
37#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
38
39#pragma once
40
41#include <stdint.h>
42#include <memory.h>
43#include <toku_portability.h>
44#include <toku_race_tools.h>
45#include "growable_array.h"
46
47namespace toku {
48
49/**
50 * Order Maintenance Tree (OMT)
51 *
52 * Maintains a collection of totally ordered values, where each value has an integer weight.
53 * The OMT is a mutable datatype.
54 *
55 * The Abstraction:
56 *
57 * An OMT is a vector of values, $V$, where $|V|$ is the length of the vector.
58 * The vector is numbered from $0$ to $|V|-1$.
59 * Each value has a weight. The weight of the $i$th element is denoted $w(V_i)$.
60 *
61 * We can create a new OMT, which is the empty vector.
62 *
63 * We can insert a new element $x$ into slot $i$, changing $V$ into $V'$ where
64 * $|V'|=1+|V|$ and
65 *
66 * V'_j = V_j if $j<i$
67 * x if $j=i$
68 * V_{j-1} if $j>i$.
69 *
70 * We can specify $i$ using a kind of function instead of as an integer.
71 * Let $b$ be a function mapping from values to nonzero integers, such that
72 * the signum of $b$ is monotically increasing.
73 * We can specify $i$ as the minimum integer such that $b(V_i)>0$.
74 *
75 * We look up a value using its index, or using a Heaviside function.
76 * For lookups, we allow $b$ to be zero for some values, and again the signum of $b$ must be monotonically increasing.
77 * When lookup up values, we can look up
78 * $V_i$ where $i$ is the minimum integer such that $b(V_i)=0$. (With a special return code if no such value exists.)
79 * (Rationale: Ordinarily we want $i$ to be unique. But for various reasons we want to allow multiple zeros, and we want the smallest $i$ in that case.)
80 * $V_i$ where $i$ is the minimum integer such that $b(V_i)>0$. (Or an indication that no such value exists.)
81 * $V_i$ where $i$ is the maximum integer such that $b(V_i)<0$. (Or an indication that no such value exists.)
82 *
83 * When looking up a value using a Heaviside function, we get the value and its index.
84 *
85 * We can also split an OMT into two OMTs, splitting the weight of the values evenly.
86 * Find a value $j$ such that the values to the left of $j$ have about the same total weight as the values to the right of $j$.
87 * The resulting two OMTs contain the values to the left of $j$ and the values to the right of $j$ respectively.
88 * All of the values from the original OMT go into one of the new OMTs.
89 * If the weights of the values don't split exactly evenly, then the implementation has the freedom to choose whether
90 * the new left OMT or the new right OMT is larger.
91 *
92 * Performance:
93 * Insertion and deletion should run with $O(\log |V|)$ time and $O(\log |V|)$ calls to the Heaviside function.
94 * The memory required is O(|V|).
95 *
96 * Usage:
97 * The omt is templated by two parameters:
98 * - omtdata_t is what will be stored within the omt. These could be pointers or real data types (ints, structs).
99 * - omtdataout_t is what will be returned by find and related functions. By default, it is the same as omtdata_t, but you can set it to (omtdata_t *).
100 * To create an omt which will store "TXNID"s, for example, it is a good idea to typedef the template:
101 * typedef omt<TXNID> txnid_omt_t;
102 * If you are storing structs, you may want to be able to get a pointer to the data actually stored in the omt (see find_zero). To do this, use the second template parameter:
103 * typedef omt<struct foo, struct foo *> foo_omt_t;
104 */
105
106namespace omt_internal {
107
108template<bool subtree_supports_marks>
109class subtree_templated {
110private:
111 uint32_t m_index;
112public:
113 static const uint32_t NODE_NULL = UINT32_MAX;
114 inline void set_to_null(void) {
115 m_index = NODE_NULL;
116 }
117
118 inline bool is_null(void) const {
119 return NODE_NULL == this->get_index();
120 }
121
122 inline uint32_t get_index(void) const {
123 return m_index;
124 }
125
126 inline void set_index(uint32_t index) {
127 paranoid_invariant(index != NODE_NULL);
128 m_index = index;
129 }
130} __attribute__((__packed__,aligned(4)));
131
132template<>
133class subtree_templated<true> {
134private:
135 uint32_t m_bitfield;
136 static const uint32_t MASK_INDEX = ~(((uint32_t)1) << 31);
137 static const uint32_t MASK_BIT = ((uint32_t)1) << 31;
138
139 inline void set_index_internal(uint32_t new_index) {
140 m_bitfield = (m_bitfield & MASK_BIT) | new_index;
141 }
142public:
143 static const uint32_t NODE_NULL = INT32_MAX;
144 inline void set_to_null(void) {
145 this->set_index_internal(NODE_NULL);
146 }
147
148 inline bool is_null(void) const {
149 return NODE_NULL == this->get_index();
150 }
151
152 inline uint32_t get_index(void) const {
153 TOKU_DRD_IGNORE_VAR(m_bitfield);
154 const uint32_t bits = m_bitfield;
155 TOKU_DRD_STOP_IGNORING_VAR(m_bitfield);
156 return bits & MASK_INDEX;
157 }
158
159 inline void set_index(uint32_t index) {
160 paranoid_invariant(index < NODE_NULL);
161 this->set_index_internal(index);
162 }
163
164 inline bool get_bit(void) const {
165 TOKU_DRD_IGNORE_VAR(m_bitfield);
166 const uint32_t bits = m_bitfield;
167 TOKU_DRD_STOP_IGNORING_VAR(m_bitfield);
168 return (bits & MASK_BIT) != 0;
169 }
170
171 inline void enable_bit(void) {
172 // These bits may be set by a thread with a write lock on some
173 // leaf, and the index can be read by another thread with a (read
174 // or write) lock on another thread. Also, the has_marks_below
175 // bit can be set by two threads simultaneously. Neither of these
176 // are real races, so if we are using DRD we should tell it to
177 // ignore these bits just while we set this bit. If there were a
178 // race in setting the index, that would be a real race.
179 TOKU_DRD_IGNORE_VAR(m_bitfield);
180 m_bitfield |= MASK_BIT;
181 TOKU_DRD_STOP_IGNORING_VAR(m_bitfield);
182 }
183
184 inline void disable_bit(void) {
185 m_bitfield &= MASK_INDEX;
186 }
187} __attribute__((__packed__)) ;
188
189template<typename omtdata_t, bool subtree_supports_marks>
190class omt_node_templated {
191public:
192 uint32_t weight;
193 subtree_templated<subtree_supports_marks> left;
194 subtree_templated<subtree_supports_marks> right;
195 omtdata_t value;
196
197 // this needs to be in both implementations because we don't have
198 // a "static if" the caller can use
199 inline void clear_stolen_bits(void) {}
200} __attribute__((__packed__,aligned(4)));
201
202template<typename omtdata_t>
203class omt_node_templated<omtdata_t, true> {
204public:
205 uint32_t weight;
206 subtree_templated<true> left;
207 subtree_templated<true> right;
208 omtdata_t value;
209 inline bool get_marked(void) const {
210 return left.get_bit();
211 }
212 inline void set_marked_bit(void) {
213 return left.enable_bit();
214 }
215 inline void unset_marked_bit(void) {
216 return left.disable_bit();
217 }
218
219 inline bool get_marks_below(void) const {
220 return right.get_bit();
221 }
222 inline void set_marks_below_bit(void) {
223 // This function can be called by multiple threads.
224 // Checking first reduces cache invalidation.
225 if (!this->get_marks_below()) {
226 right.enable_bit();
227 }
228 }
229 inline void unset_marks_below_bit(void) {
230 right.disable_bit();
231 }
232
233 inline void clear_stolen_bits(void) {
234 this->unset_marked_bit();
235 this->unset_marks_below_bit();
236 }
237} __attribute__((__packed__,aligned(4)));
238
239}
240
241template<typename omtdata_t,
242 typename omtdataout_t=omtdata_t,
243 bool supports_marks=false>
244class omt {
245public:
246 /**
247 * Effect: Create an empty OMT.
248 * Performance: constant time.
249 */
250 void create(void);
251
252 /**
253 * Effect: Create an empty OMT with no internal allocated space.
254 * Performance: constant time.
255 * Rationale: In some cases we need a valid omt but don't want to malloc.
256 */
257 void create_no_array(void);
258
259 /**
260 * Effect: Create a OMT containing values. The number of values is in numvalues.
261 * Stores the new OMT in *omtp.
262 * Requires: this has not been created yet
263 * Requires: values != NULL
264 * Requires: values is sorted
265 * Performance: time=O(numvalues)
266 * Rationale: Normally to insert N values takes O(N lg N) amortized time.
267 * If the N values are known in advance, are sorted, and
268 * the structure is empty, we can batch insert them much faster.
269 */
270 __attribute__((nonnull))
271 void create_from_sorted_array(const omtdata_t *const values, const uint32_t numvalues);
272
273 /**
274 * Effect: Create an OMT containing values. The number of values is in numvalues.
275 * On success the OMT takes ownership of *values array, and sets values=NULL.
276 * Requires: this has not been created yet
277 * Requires: values != NULL
278 * Requires: *values is sorted
279 * Requires: *values was allocated with toku_malloc
280 * Requires: Capacity of the *values array is <= new_capacity
281 * Requires: On success, *values may not be accessed again by the caller.
282 * Performance: time=O(1)
283 * Rational: create_from_sorted_array takes O(numvalues) time.
284 * By taking ownership of the array, we save a malloc and memcpy,
285 * and possibly a free (if the caller is done with the array).
286 */
287 void create_steal_sorted_array(omtdata_t **const values, const uint32_t numvalues, const uint32_t new_capacity);
288
289 /**
290 * Effect: Create a new OMT, storing it in *newomt.
291 * The values to the right of index (starting at index) are moved to *newomt.
292 * Requires: newomt != NULL
293 * Returns
294 * 0 success,
295 * EINVAL if index > toku_omt_size(omt)
296 * On nonzero return, omt and *newomt are unmodified.
297 * Performance: time=O(n)
298 * Rationale: We don't need a split-evenly operation. We need to split items so that their total sizes
299 * are even, and other similar splitting criteria. It's easy to split evenly by calling size(), and dividing by two.
300 */
301 __attribute__((nonnull))
302 int split_at(omt *const newomt, const uint32_t idx);
303
304 /**
305 * Effect: Appends leftomt and rightomt to produce a new omt.
306 * Creates this as the new omt.
307 * leftomt and rightomt are destroyed.
308 * Performance: time=O(n) is acceptable, but one can imagine implementations that are O(\log n) worst-case.
309 */
310 __attribute__((nonnull))
311 void merge(omt *const leftomt, omt *const rightomt);
312
313 /**
314 * Effect: Creates a copy of an omt.
315 * Creates this as the clone.
316 * Each element is copied directly. If they are pointers, the underlying data is not duplicated.
317 * Performance: O(n) or the running time of fill_array_with_subtree_values()
318 */
319 void clone(const omt &src);
320
321 /**
322 * Effect: Set the tree to be empty.
323 * Note: Will not reallocate or resize any memory.
324 * Performance: time=O(1)
325 */
326 void clear(void);
327
328 /**
329 * Effect: Destroy an OMT, freeing all its memory.
330 * If the values being stored are pointers, their underlying data is not freed. See free_items()
331 * Those values may be freed before or after calling toku_omt_destroy.
332 * Rationale: Returns no values since free() cannot fail.
333 * Rationale: Does not free the underlying pointers to reduce complexity.
334 * Performance: time=O(1)
335 */
336 void destroy(void);
337
338 /**
339 * Effect: return |this|.
340 * Performance: time=O(1)
341 */
342 uint32_t size(void) const;
343
344
345 /**
346 * Effect: Insert value into the OMT.
347 * If there is some i such that $h(V_i, v)=0$ then returns DB_KEYEXIST.
348 * Otherwise, let i be the minimum value such that $h(V_i, v)>0$.
349 * If no such i exists, then let i be |V|
350 * Then this has the same effect as
351 * insert_at(tree, value, i);
352 * If idx!=NULL then i is stored in *idx
353 * Requires: The signum of h must be monotonically increasing.
354 * Returns:
355 * 0 success
356 * DB_KEYEXIST the key is present (h was equal to zero for some value)
357 * On nonzero return, omt is unchanged.
358 * Performance: time=O(\log N) amortized.
359 * Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
360 */
361 template<typename omtcmp_t, int (*h)(const omtdata_t &, const omtcmp_t &)>
362 int insert(const omtdata_t &value, const omtcmp_t &v, uint32_t *const idx);
363
364 /**
365 * Effect: Increases indexes of all items at slot >= idx by 1.
366 * Insert value into the position at idx.
367 * Returns:
368 * 0 success
369 * EINVAL if idx > this->size()
370 * On error, omt is unchanged.
371 * Performance: time=O(\log N) amortized time.
372 * Rationale: Some future implementation may be O(\log N) worst-case time, but O(\log N) amortized is good enough for now.
373 */
374 int insert_at(const omtdata_t &value, const uint32_t idx);
375
376 /**
377 * Effect: Replaces the item at idx with value.
378 * Returns:
379 * 0 success
380 * EINVAL if idx>=this->size()
381 * On error, omt is unchanged.
382 * Performance: time=O(\log N)
383 * Rationale: The FT needs to be able to replace a value with another copy of the same value (allocated in a different location)
384 *
385 */
386 int set_at(const omtdata_t &value, const uint32_t idx);
387
388 /**
389 * Effect: Delete the item in slot idx.
390 * Decreases indexes of all items at slot > idx by 1.
391 * Returns
392 * 0 success
393 * EINVAL if idx>=this->size()
394 * On error, omt is unchanged.
395 * Rationale: To delete an item, first find its index using find or find_zero, then delete it.
396 * Performance: time=O(\log N) amortized.
397 */
398 int delete_at(const uint32_t idx);
399
400 /**
401 * Effect: Iterate over the values of the omt, from left to right, calling f on each value.
402 * The first argument passed to f is a ref-to-const of the value stored in the omt.
403 * The second argument passed to f is the index of the value.
404 * The third argument passed to f is iterate_extra.
405 * The indices run from 0 (inclusive) to this->size() (exclusive).
406 * Requires: f != NULL
407 * Returns:
408 * If f ever returns nonzero, then the iteration stops, and the value returned by f is returned by iterate.
409 * If f always returns zero, then iterate returns 0.
410 * Requires: Don't modify the omt while running. (E.g., f may not insert or delete values from the omt.)
411 * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the omt.
412 * Rationale: Although the functional iterator requires defining another function (as opposed to C++ style iterator), it is much easier to read.
413 * Rationale: We may at some point use functors, but for now this is a smaller change from the old OMT.
414 */
415 template<typename iterate_extra_t,
416 int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
417 int iterate(iterate_extra_t *const iterate_extra) const;
418
419 /**
420 * Effect: Iterate over the values of the omt, from left to right, calling f on each value.
421 * The first argument passed to f is a ref-to-const of the value stored in the omt.
422 * The second argument passed to f is the index of the value.
423 * The third argument passed to f is iterate_extra.
424 * The indices run from 0 (inclusive) to this->size() (exclusive).
425 * We will iterate only over [left,right)
426 *
427 * Requires: left <= right
428 * Requires: f != NULL
429 * Returns:
430 * EINVAL if right > this->size()
431 * If f ever returns nonzero, then the iteration stops, and the value returned by f is returned by iterate_on_range.
432 * If f always returns zero, then iterate_on_range returns 0.
433 * Requires: Don't modify the omt while running. (E.g., f may not insert or delete values from the omt.)
434 * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the omt.
435 * Rational: Although the functional iterator requires defining another function (as opposed to C++ style iterator), it is much easier to read.
436 */
437 template<typename iterate_extra_t,
438 int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
439 int iterate_on_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra) const;
440
441 /**
442 * Effect: Iterate over the values of the omt, and mark the nodes that are visited.
443 * Other than the marks, this behaves the same as iterate_on_range.
444 * Requires: supports_marks == true
445 * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the omt.
446 * Notes:
447 * This function MAY be called concurrently by multiple threads, but
448 * not concurrently with any other non-const function.
449 */
450 template<typename iterate_extra_t,
451 int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
452 int iterate_and_mark_range(const uint32_t left, const uint32_t right, iterate_extra_t *const iterate_extra);
453
454 /**
455 * Effect: Iterate over the values of the omt, from left to right, calling f on each value whose node has been marked.
456 * Other than the marks, this behaves the same as iterate.
457 * Requires: supports_marks == true
458 * Performance: time=O(i+\log N) where i is the number of times f is called, and N is the number of elements in the omt.
459 */
460 template<typename iterate_extra_t,
461 int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
462 int iterate_over_marked(iterate_extra_t *const iterate_extra) const;
463
464 /**
465 * Effect: Delete all elements from the omt, whose nodes have been marked.
466 * Requires: supports_marks == true
467 * Performance: time=O(N + i\log N) where i is the number of marked elements, {c,sh}ould be faster
468 */
469 void delete_all_marked(void);
470
471 /**
472 * Effect: Verify that the internal state of the marks in the tree are self-consistent.
473 * Crashes the system if the marks are in a bad state.
474 * Requires: supports_marks == true
475 * Performance: time=O(N)
476 * Notes:
477 * Even though this is a const function, it requires exclusive access.
478 * Rationale:
479 * The current implementation of the marks relies on a sort of
480 * "cache" bit representing the state of bits below it in the tree.
481 * This allows glass-box testing that these bits are correct.
482 */
483 void verify_marks_consistent(void) const;
484
485 /**
486 * Effect: None
487 * Returns whether there are any marks in the tree.
488 */
489 bool has_marks(void) const;
490
491 /**
492 * Effect: Iterate over the values of the omt, from left to right, calling f on each value.
493 * The first argument passed to f is a pointer to the value stored in the omt.
494 * The second argument passed to f is the index of the value.
495 * The third argument passed to f is iterate_extra.
496 * The indices run from 0 (inclusive) to this->size() (exclusive).
497 * Requires: same as for iterate()
498 * Returns: same as for iterate()
499 * Performance: same as for iterate()
500 * Rationale: In general, most iterators should use iterate() since they should not modify the data stored in the omt. This function is for iterators which need to modify values (for example, free_items).
501 * Rationale: We assume if you are transforming the data in place, you want to do it to everything at once, so there is not yet an iterate_on_range_ptr (but there could be).
502 */
503 template<typename iterate_extra_t,
504 int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
505 void iterate_ptr(iterate_extra_t *const iterate_extra);
506
507 /**
508 * Effect: Set *value=V_idx
509 * Returns
510 * 0 success
511 * EINVAL if index>=toku_omt_size(omt)
512 * On nonzero return, *value is unchanged
513 * Performance: time=O(\log N)
514 */
515 int fetch(const uint32_t idx, omtdataout_t *const value) const;
516
517 /**
518 * Effect: Find the smallest i such that h(V_i, extra)>=0
519 * If there is such an i and h(V_i,extra)==0 then set *idxp=i, set *value = V_i, and return 0.
520 * If there is such an i and h(V_i,extra)>0 then set *idxp=i and return DB_NOTFOUND.
521 * If there is no such i then set *idx=this->size() and return DB_NOTFOUND.
522 * Note: value is of type omtdataout_t, which may be of type (omtdata_t) or (omtdata_t *) but is fixed by the instantiation.
523 * If it is the value type, then the value is copied out (even if the value type is a pointer to something else)
524 * If it is the pointer type, then *value is set to a pointer to the data within the omt.
525 * This is determined by the type of the omt as initially declared.
526 * If the omt is declared as omt<foo_t>, then foo_t's will be stored and foo_t's will be returned by find and related functions.
527 * If the omt is declared as omt<foo_t, foo_t *>, then foo_t's will be stored, and pointers to the stored items will be returned by find and related functions.
528 * Rationale:
529 * Structs too small for malloc should be stored directly in the omt.
530 * These structs may need to be edited as they exist inside the omt, so we need a way to get a pointer within the omt.
531 * Using separate functions for returning pointers and values increases code duplication and reduces type-checking.
532 * That also reduces the ability of the creator of a data structure to give advice to its future users.
533 * Slight overloading in this case seemed to provide a better API and better type checking.
534 */
535 template<typename omtcmp_t,
536 int (*h)(const omtdata_t &, const omtcmp_t &)>
537 int find_zero(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
538
539 /**
540 * Effect:
541 * If direction >0 then find the smallest i such that h(V_i,extra)>0.
542 * If direction <0 then find the largest i such that h(V_i,extra)<0.
543 * (Direction may not be equal to zero.)
544 * If value!=NULL then store V_i in *value
545 * If idxp!=NULL then store i in *idxp.
546 * Requires: The signum of h is monotically increasing.
547 * Returns
548 * 0 success
549 * DB_NOTFOUND no such value is found.
550 * On nonzero return, *value and *idxp are unchanged
551 * Performance: time=O(\log N)
552 * Rationale:
553 * Here's how to use the find function to find various things
554 * Cases for find:
555 * find first value: ( h(v)=+1, direction=+1 )
556 * find last value ( h(v)=-1, direction=-1 )
557 * find first X ( h(v)=(v< x) ? -1 : 1 direction=+1 )
558 * find last X ( h(v)=(v<=x) ? -1 : 1 direction=-1 )
559 * find X or successor to X ( same as find first X. )
560 *
561 * Rationale: To help understand heaviside functions and behavor of find:
562 * There are 7 kinds of heaviside functions.
563 * The signus of the h must be monotonically increasing.
564 * Given a function of the following form, A is the element
565 * returned for direction>0, B is the element returned
566 * for direction<0, C is the element returned for
567 * direction==0 (see find_zero) (with a return of 0), and D is the element
568 * returned for direction==0 (see find_zero) with a return of DB_NOTFOUND.
569 * If any of A, B, or C are not found, then asking for the
570 * associated direction will return DB_NOTFOUND.
571 * See find_zero for more information.
572 *
573 * Let the following represent the signus of the heaviside function.
574 *
575 * -...-
576 * A
577 * D
578 *
579 * +...+
580 * B
581 * D
582 *
583 * 0...0
584 * C
585 *
586 * -...-0...0
587 * AC
588 *
589 * 0...0+...+
590 * C B
591 *
592 * -...-+...+
593 * AB
594 * D
595 *
596 * -...-0...0+...+
597 * AC B
598 */
599 template<typename omtcmp_t,
600 int (*h)(const omtdata_t &, const omtcmp_t &)>
601 int find(const omtcmp_t &extra, int direction, omtdataout_t *const value, uint32_t *const idxp) const;
602
603 /**
604 * Effect: Return the size (in bytes) of the omt, as it resides in main memory. If the data stored are pointers, don't include the size of what they all point to.
605 */
606 size_t memory_size(void);
607
608private:
609 typedef uint32_t node_idx;
610 typedef omt_internal::subtree_templated<supports_marks> subtree;
611 typedef omt_internal::omt_node_templated<omtdata_t, supports_marks> omt_node;
612 ENSURE_POD(subtree);
613
614 struct omt_array {
615 uint32_t start_idx;
616 uint32_t num_values;
617 omtdata_t *values;
618 };
619
620 struct omt_tree {
621 subtree root;
622 uint32_t free_idx;
623 omt_node *nodes;
624 };
625
626 bool is_array;
627 uint32_t capacity;
628 union {
629 struct omt_array a;
630 struct omt_tree t;
631 } d;
632
633 __attribute__((nonnull))
634 void unmark(const subtree &subtree, const uint32_t index, GrowableArray<node_idx> *const indexes);
635
636 void create_internal_no_array(const uint32_t new_capacity);
637
638 void create_internal(const uint32_t new_capacity);
639
640 uint32_t nweight(const subtree &subtree) const;
641
642 node_idx node_malloc(void);
643
644 void node_free(const node_idx idx);
645
646 void maybe_resize_array(const uint32_t n);
647
648 __attribute__((nonnull))
649 void fill_array_with_subtree_values(omtdata_t *const array, const subtree &subtree) const;
650
651 void convert_to_array(void);
652
653 __attribute__((nonnull))
654 void rebuild_from_sorted_array(subtree *const subtree, const omtdata_t *const values, const uint32_t numvalues);
655
656 void convert_to_tree(void);
657
658 void maybe_resize_or_convert(const uint32_t n);
659
660 bool will_need_rebalance(const subtree &subtree, const int leftmod, const int rightmod) const;
661
662 __attribute__((nonnull))
663 void insert_internal(subtree *const subtreep, const omtdata_t &value, const uint32_t idx, subtree **const rebalance_subtree);
664
665 void set_at_internal_array(const omtdata_t &value, const uint32_t idx);
666
667 void set_at_internal(const subtree &subtree, const omtdata_t &value, const uint32_t idx);
668
669 void delete_internal(subtree *const subtreep, const uint32_t idx, omt_node *const copyn, subtree **const rebalance_subtree);
670
671 template<typename iterate_extra_t,
672 int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
673 int iterate_internal_array(const uint32_t left, const uint32_t right,
674 iterate_extra_t *const iterate_extra) const;
675
676 template<typename iterate_extra_t,
677 int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
678 void iterate_ptr_internal(const uint32_t left, const uint32_t right,
679 const subtree &subtree, const uint32_t idx,
680 iterate_extra_t *const iterate_extra);
681
682 template<typename iterate_extra_t,
683 int (*f)(omtdata_t *, const uint32_t, iterate_extra_t *const)>
684 void iterate_ptr_internal_array(const uint32_t left, const uint32_t right,
685 iterate_extra_t *const iterate_extra);
686
687 template<typename iterate_extra_t,
688 int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
689 int iterate_internal(const uint32_t left, const uint32_t right,
690 const subtree &subtree, const uint32_t idx,
691 iterate_extra_t *const iterate_extra) const;
692
693 template<typename iterate_extra_t,
694 int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
695 int iterate_and_mark_range_internal(const uint32_t left, const uint32_t right,
696 const subtree &subtree, const uint32_t idx,
697 iterate_extra_t *const iterate_extra);
698
699 template<typename iterate_extra_t,
700 int (*f)(const omtdata_t &, const uint32_t, iterate_extra_t *const)>
701 int iterate_over_marked_internal(const subtree &subtree, const uint32_t idx,
702 iterate_extra_t *const iterate_extra) const;
703
704 uint32_t verify_marks_consistent_internal(const subtree &subtree, const bool allow_marks) const;
705
706 void fetch_internal_array(const uint32_t i, omtdataout_t *const value) const;
707
708 void fetch_internal(const subtree &subtree, const uint32_t i, omtdataout_t *const value) const;
709
710 __attribute__((nonnull))
711 void fill_array_with_subtree_idxs(node_idx *const array, const subtree &subtree) const;
712
713 __attribute__((nonnull))
714 void rebuild_subtree_from_idxs(subtree *const subtree, const node_idx *const idxs, const uint32_t numvalues);
715
716 __attribute__((nonnull))
717 void rebalance(subtree *const subtree);
718
719 __attribute__((nonnull))
720 static void copyout(omtdata_t *const out, const omt_node *const n);
721
722 __attribute__((nonnull))
723 static void copyout(omtdata_t **const out, omt_node *const n);
724
725 __attribute__((nonnull))
726 static void copyout(omtdata_t *const out, const omtdata_t *const stored_value_ptr);
727
728 __attribute__((nonnull))
729 static void copyout(omtdata_t **const out, omtdata_t *const stored_value_ptr);
730
731 template<typename omtcmp_t,
732 int (*h)(const omtdata_t &, const omtcmp_t &)>
733 int find_internal_zero_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
734
735 template<typename omtcmp_t,
736 int (*h)(const omtdata_t &, const omtcmp_t &)>
737 int find_internal_zero(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
738
739 template<typename omtcmp_t,
740 int (*h)(const omtdata_t &, const omtcmp_t &)>
741 int find_internal_plus_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
742
743 template<typename omtcmp_t,
744 int (*h)(const omtdata_t &, const omtcmp_t &)>
745 int find_internal_plus(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
746
747 template<typename omtcmp_t,
748 int (*h)(const omtdata_t &, const omtcmp_t &)>
749 int find_internal_minus_array(const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
750
751 template<typename omtcmp_t,
752 int (*h)(const omtdata_t &, const omtcmp_t &)>
753 int find_internal_minus(const subtree &subtree, const omtcmp_t &extra, omtdataout_t *const value, uint32_t *const idxp) const;
754};
755
756} // namespace toku
757
758// include the implementation here
759#include "omt.cc"
760