1 | /***************************************************************************** |
2 | |
3 | Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. |
4 | Copyright (c) 2017, MariaDB Corporation. |
5 | |
6 | This program is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free Software |
8 | Foundation; version 2 of the License. |
9 | |
10 | This program is distributed in the hope that it will be useful, but WITHOUT |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
12 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. |
13 | |
14 | You should have received a copy of the GNU General Public License along with |
15 | this program; if not, write to the Free Software Foundation, Inc., |
16 | 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA |
17 | |
18 | *****************************************************************************/ |
19 | |
20 | /********************************************************************//** |
21 | @file ha/ha0ha.cc |
22 | The hash table with external chains |
23 | |
24 | Created 8/22/1994 Heikki Tuuri |
25 | *************************************************************************/ |
26 | |
27 | #include "ha0ha.h" |
28 | |
29 | #ifdef UNIV_DEBUG |
30 | # include "buf0buf.h" |
31 | #endif /* UNIV_DEBUG */ |
32 | #include "btr0sea.h" |
33 | #include "page0page.h" |
34 | |
35 | /*************************************************************//** |
36 | Creates a hash table with at least n array cells. The actual number |
37 | of cells is chosen to be a prime number slightly bigger than n. |
38 | @return own: created table */ |
39 | hash_table_t* |
40 | ib_create( |
41 | /*======*/ |
42 | ulint n, /*!< in: number of array cells */ |
43 | latch_id_t id, /*!< in: latch ID */ |
44 | ulint n_sync_obj, |
45 | /*!< in: number of mutexes to protect the |
46 | hash table: must be a power of 2, or 0 */ |
47 | ulint type) /*!< in: type of datastructure for which |
48 | MEM_HEAP_FOR_PAGE_HASH */ |
49 | { |
50 | hash_table_t* table; |
51 | |
52 | ut_a(type == MEM_HEAP_FOR_BTR_SEARCH |
53 | || type == MEM_HEAP_FOR_PAGE_HASH); |
54 | |
55 | ut_ad(ut_is_2pow(n_sync_obj)); |
56 | table = hash_create(n); |
57 | |
58 | /* Creating MEM_HEAP_BTR_SEARCH type heaps can potentially fail, |
59 | but in practise it never should in this case, hence the asserts. */ |
60 | |
61 | if (n_sync_obj == 0) { |
62 | table->heap = mem_heap_create_typed( |
63 | std::min<ulong>( |
64 | 4096, |
65 | MEM_MAX_ALLOC_IN_BUF / 2 |
66 | - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)), |
67 | type); |
68 | ut_a(table->heap); |
69 | |
70 | return(table); |
71 | } |
72 | |
73 | if (type == MEM_HEAP_FOR_PAGE_HASH) { |
74 | /* We create a hash table protected by rw_locks for |
75 | buf_pool->page_hash. */ |
76 | hash_create_sync_obj( |
77 | table, HASH_TABLE_SYNC_RW_LOCK, id, n_sync_obj); |
78 | } else { |
79 | hash_create_sync_obj( |
80 | table, HASH_TABLE_SYNC_MUTEX, id, n_sync_obj); |
81 | } |
82 | |
83 | table->heaps = static_cast<mem_heap_t**>( |
84 | ut_malloc_nokey(n_sync_obj * sizeof(void*))); |
85 | |
86 | for (ulint i = 0; i < n_sync_obj; i++) { |
87 | table->heaps[i] = mem_heap_create_typed( |
88 | std::min<ulong>( |
89 | 4096, |
90 | MEM_MAX_ALLOC_IN_BUF / 2 |
91 | - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)), |
92 | type); |
93 | ut_a(table->heaps[i]); |
94 | } |
95 | |
96 | return(table); |
97 | } |
98 | |
99 | /** Recreate a hash table with at least n array cells. The actual number |
100 | of cells is chosen to be a prime number slightly bigger than n. |
101 | The new cells are all cleared. The heaps are recreated. |
102 | The sync objects are reused. |
103 | @param[in,out] table hash table to be resuzed (to be freed later) |
104 | @param[in] n number of array cells |
105 | @return resized new table */ |
106 | hash_table_t* |
107 | ib_recreate( |
108 | hash_table_t* table, |
109 | ulint n) |
110 | { |
111 | /* This function is for only page_hash for now */ |
112 | ut_ad(table->type == HASH_TABLE_SYNC_RW_LOCK); |
113 | ut_ad(table->n_sync_obj > 0); |
114 | |
115 | hash_table_t* new_table = hash_create(n); |
116 | |
117 | new_table->type = table->type; |
118 | new_table->n_sync_obj = table->n_sync_obj; |
119 | new_table->sync_obj = table->sync_obj; |
120 | |
121 | for (ulint i = 0; i < table->n_sync_obj; i++) { |
122 | mem_heap_free(table->heaps[i]); |
123 | } |
124 | ut_free(table->heaps); |
125 | |
126 | new_table->heaps = static_cast<mem_heap_t**>( |
127 | ut_malloc_nokey(new_table->n_sync_obj * sizeof(void*))); |
128 | |
129 | for (ulint i = 0; i < new_table->n_sync_obj; i++) { |
130 | new_table->heaps[i] = mem_heap_create_typed( |
131 | std::min<ulong>( |
132 | 4096, |
133 | MEM_MAX_ALLOC_IN_BUF / 2 |
134 | - MEM_BLOCK_HEADER_SIZE - MEM_SPACE_NEEDED(0)), |
135 | MEM_HEAP_FOR_PAGE_HASH); |
136 | ut_a(new_table->heaps[i]); |
137 | } |
138 | |
139 | return(new_table); |
140 | } |
141 | |
142 | /*************************************************************//** |
143 | Empties a hash table and frees the memory heaps. */ |
144 | void |
145 | ha_clear( |
146 | /*=====*/ |
147 | hash_table_t* table) /*!< in, own: hash table */ |
148 | { |
149 | ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); |
150 | #ifdef BTR_CUR_HASH_ADAPT |
151 | ut_ad(!table->adaptive || btr_search_own_all(RW_LOCK_X)); |
152 | #endif /* BTR_CUR_HASH_ADAPT */ |
153 | |
154 | for (ulint i = 0; i < table->n_sync_obj; i++) { |
155 | mem_heap_free(table->heaps[i]); |
156 | } |
157 | |
158 | ut_free(table->heaps); |
159 | |
160 | switch (table->type) { |
161 | case HASH_TABLE_SYNC_MUTEX: |
162 | for (ulint i = 0; i < table->n_sync_obj; ++i) { |
163 | mutex_destroy(&table->sync_obj.mutexes[i]); |
164 | } |
165 | ut_free(table->sync_obj.mutexes); |
166 | table->sync_obj.mutexes = NULL; |
167 | break; |
168 | |
169 | case HASH_TABLE_SYNC_RW_LOCK: |
170 | for (ulint i = 0; i < table->n_sync_obj; ++i) { |
171 | rw_lock_free(&table->sync_obj.rw_locks[i]); |
172 | } |
173 | |
174 | ut_free(table->sync_obj.rw_locks); |
175 | table->sync_obj.rw_locks = NULL; |
176 | break; |
177 | |
178 | case HASH_TABLE_SYNC_NONE: |
179 | /* do nothing */ |
180 | break; |
181 | } |
182 | |
183 | table->n_sync_obj = 0; |
184 | table->type = HASH_TABLE_SYNC_NONE; |
185 | |
186 | |
187 | /* Clear the hash table. */ |
188 | ulint n = hash_get_n_cells(table); |
189 | |
190 | for (ulint i = 0; i < n; i++) { |
191 | hash_get_nth_cell(table, i)->node = NULL; |
192 | } |
193 | } |
194 | |
195 | #ifdef BTR_CUR_HASH_ADAPT |
196 | # if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
197 | /** Maximum number of records in a page */ |
198 | static const ulint MAX_N_POINTERS |
199 | = UNIV_PAGE_SIZE_MAX / REC_N_NEW_EXTRA_BYTES; |
200 | # endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
201 | |
202 | /*************************************************************//** |
203 | Inserts an entry into a hash table. If an entry with the same fold number |
204 | is found, its node is updated to point to the new data, and no new node |
205 | is inserted. If btr_search_enabled is set to FALSE, we will only allow |
206 | updating existing nodes, but no new node is allowed to be added. |
207 | @return TRUE if succeed, FALSE if no more memory could be allocated */ |
208 | ibool |
209 | ha_insert_for_fold_func( |
210 | /*====================*/ |
211 | hash_table_t* table, /*!< in: hash table */ |
212 | ulint fold, /*!< in: folded value of data; if a node with |
213 | the same fold value already exists, it is |
214 | updated to point to the same data, and no new |
215 | node is created! */ |
216 | #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
217 | buf_block_t* block, /*!< in: buffer block containing the data */ |
218 | #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
219 | const rec_t* data) /*!< in: data, must not be NULL */ |
220 | { |
221 | hash_cell_t* cell; |
222 | ha_node_t* node; |
223 | ha_node_t* prev_node; |
224 | ulint hash; |
225 | |
226 | ut_ad(data); |
227 | ut_ad(table); |
228 | ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); |
229 | #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
230 | ut_a(block->frame == page_align(data)); |
231 | #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
232 | hash_assert_can_modify(table, fold); |
233 | ut_ad(btr_search_enabled); |
234 | |
235 | hash = hash_calc_hash(fold, table); |
236 | |
237 | cell = hash_get_nth_cell(table, hash); |
238 | |
239 | prev_node = static_cast<ha_node_t*>(cell->node); |
240 | |
241 | while (prev_node != NULL) { |
242 | if (prev_node->fold == fold) { |
243 | #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
244 | if (table->adaptive) { |
245 | buf_block_t* prev_block = prev_node->block; |
246 | ut_a(prev_block->frame |
247 | == page_align(prev_node->data)); |
248 | ut_a(my_atomic_addlint(&prev_block->n_pointers, |
249 | ulint(-1)) |
250 | < MAX_N_POINTERS); |
251 | ut_a(my_atomic_addlint(&block->n_pointers, 1) |
252 | < MAX_N_POINTERS); |
253 | } |
254 | |
255 | prev_node->block = block; |
256 | #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
257 | prev_node->data = data; |
258 | |
259 | return(TRUE); |
260 | } |
261 | |
262 | prev_node = prev_node->next; |
263 | } |
264 | |
265 | /* We have to allocate a new chain node */ |
266 | |
267 | node = static_cast<ha_node_t*>( |
268 | mem_heap_alloc(hash_get_heap(table, fold), sizeof(ha_node_t))); |
269 | |
270 | if (node == NULL) { |
271 | /* It was a btr search type memory heap and at the moment |
272 | no more memory could be allocated: return */ |
273 | |
274 | ut_ad(hash_get_heap(table, fold)->type & MEM_HEAP_BTR_SEARCH); |
275 | |
276 | return(FALSE); |
277 | } |
278 | |
279 | ha_node_set_data(node, block, data); |
280 | |
281 | #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
282 | if (table->adaptive) { |
283 | ut_a(my_atomic_addlint(&block->n_pointers, 1) |
284 | < MAX_N_POINTERS); |
285 | } |
286 | #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
287 | |
288 | node->fold = fold; |
289 | |
290 | node->next = NULL; |
291 | |
292 | prev_node = static_cast<ha_node_t*>(cell->node); |
293 | |
294 | if (prev_node == NULL) { |
295 | |
296 | cell->node = node; |
297 | |
298 | return(TRUE); |
299 | } |
300 | |
301 | while (prev_node->next != NULL) { |
302 | |
303 | prev_node = prev_node->next; |
304 | } |
305 | |
306 | prev_node->next = node; |
307 | |
308 | return(TRUE); |
309 | } |
310 | |
311 | #ifdef UNIV_DEBUG |
312 | /** Verify if latch corresponding to the hash table is x-latched |
313 | @param[in] table hash table */ |
314 | static |
315 | void |
316 | ha_btr_search_latch_x_locked(const hash_table_t* table) |
317 | { |
318 | ulint i; |
319 | for (i = 0; i < btr_ahi_parts; ++i) { |
320 | if (btr_search_sys->hash_tables[i] == table) { |
321 | break; |
322 | } |
323 | } |
324 | |
325 | ut_ad(i < btr_ahi_parts); |
326 | ut_ad(rw_lock_own(btr_search_latches[i], RW_LOCK_X)); |
327 | } |
328 | #endif /* UNIV_DEBUG */ |
329 | |
330 | /***********************************************************//** |
331 | Deletes a hash node. */ |
332 | void |
333 | ha_delete_hash_node( |
334 | /*================*/ |
335 | hash_table_t* table, /*!< in: hash table */ |
336 | ha_node_t* del_node) /*!< in: node to be deleted */ |
337 | { |
338 | ut_ad(table); |
339 | ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); |
340 | ut_d(ha_btr_search_latch_x_locked(table)); |
341 | ut_ad(btr_search_enabled); |
342 | #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
343 | if (table->adaptive) { |
344 | ut_a(del_node->block->frame = page_align(del_node->data)); |
345 | ut_a(my_atomic_addlint(&del_node->block->n_pointers, ulint(-1)) |
346 | < MAX_N_POINTERS); |
347 | } |
348 | #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
349 | |
350 | HASH_DELETE_AND_COMPACT(ha_node_t, next, table, del_node); |
351 | } |
352 | |
353 | /*********************************************************//** |
354 | Looks for an element when we know the pointer to the data, and updates |
355 | the pointer to data, if found. |
356 | @return TRUE if found */ |
357 | ibool |
358 | ha_search_and_update_if_found_func( |
359 | /*===============================*/ |
360 | hash_table_t* table, /*!< in/out: hash table */ |
361 | ulint fold, /*!< in: folded value of the searched data */ |
362 | const rec_t* data, /*!< in: pointer to the data */ |
363 | #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
364 | buf_block_t* new_block,/*!< in: block containing new_data */ |
365 | #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
366 | const rec_t* new_data)/*!< in: new pointer to the data */ |
367 | { |
368 | ha_node_t* node; |
369 | |
370 | ut_ad(table); |
371 | ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); |
372 | hash_assert_can_modify(table, fold); |
373 | #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
374 | ut_a(new_block->frame == page_align(new_data)); |
375 | #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
376 | |
377 | ut_d(ha_btr_search_latch_x_locked(table)); |
378 | |
379 | if (!btr_search_enabled) { |
380 | return(FALSE); |
381 | } |
382 | |
383 | node = ha_search_with_data(table, fold, data); |
384 | |
385 | if (node) { |
386 | #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
387 | if (table->adaptive) { |
388 | ut_a(my_atomic_addlint(&node->block->n_pointers, |
389 | ulint(-1)) |
390 | < MAX_N_POINTERS); |
391 | ut_a(my_atomic_addlint(&new_block->n_pointers, 1) |
392 | < MAX_N_POINTERS); |
393 | } |
394 | |
395 | node->block = new_block; |
396 | #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ |
397 | node->data = new_data; |
398 | |
399 | return(TRUE); |
400 | } |
401 | |
402 | return(FALSE); |
403 | } |
404 | |
405 | /*****************************************************************//** |
406 | Removes from the chain determined by fold all nodes whose data pointer |
407 | points to the page given. */ |
408 | void |
409 | ha_remove_all_nodes_to_page( |
410 | /*========================*/ |
411 | hash_table_t* table, /*!< in: hash table */ |
412 | ulint fold, /*!< in: fold value */ |
413 | const page_t* page) /*!< in: buffer page */ |
414 | { |
415 | ha_node_t* node; |
416 | |
417 | ut_ad(table); |
418 | ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); |
419 | hash_assert_can_modify(table, fold); |
420 | ut_ad(btr_search_enabled); |
421 | |
422 | node = ha_chain_get_first(table, fold); |
423 | |
424 | while (node) { |
425 | if (page_align(ha_node_get_data(node)) == page) { |
426 | |
427 | /* Remove the hash node */ |
428 | |
429 | ha_delete_hash_node(table, node); |
430 | |
431 | /* Start again from the first node in the chain |
432 | because the deletion may compact the heap of |
433 | nodes and move other nodes! */ |
434 | |
435 | node = ha_chain_get_first(table, fold); |
436 | } else { |
437 | node = ha_chain_get_next(node); |
438 | } |
439 | } |
440 | #ifdef UNIV_DEBUG |
441 | /* Check that all nodes really got deleted */ |
442 | |
443 | node = ha_chain_get_first(table, fold); |
444 | |
445 | while (node) { |
446 | ut_a(page_align(ha_node_get_data(node)) != page); |
447 | |
448 | node = ha_chain_get_next(node); |
449 | } |
450 | #endif /* UNIV_DEBUG */ |
451 | } |
452 | |
453 | #if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG |
454 | /*************************************************************//** |
455 | Validates a given range of the cells in hash table. |
456 | @return TRUE if ok */ |
457 | ibool |
458 | ha_validate( |
459 | /*========*/ |
460 | hash_table_t* table, /*!< in: hash table */ |
461 | ulint start_index, /*!< in: start index */ |
462 | ulint end_index) /*!< in: end index */ |
463 | { |
464 | ibool ok = TRUE; |
465 | ulint i; |
466 | |
467 | ut_ad(table); |
468 | ut_ad(table->magic_n == HASH_TABLE_MAGIC_N); |
469 | ut_a(start_index <= end_index); |
470 | ut_a(start_index < hash_get_n_cells(table)); |
471 | ut_a(end_index < hash_get_n_cells(table)); |
472 | |
473 | for (i = start_index; i <= end_index; i++) { |
474 | ha_node_t* node; |
475 | hash_cell_t* cell; |
476 | |
477 | cell = hash_get_nth_cell(table, i); |
478 | |
479 | for (node = static_cast<ha_node_t*>(cell->node); |
480 | node != 0; |
481 | node = node->next) { |
482 | |
483 | if (hash_calc_hash(node->fold, table) != i) { |
484 | ib::error() << "Hash table node fold value " |
485 | << node->fold << " does not match the" |
486 | " cell number " << i << "." ; |
487 | |
488 | ok = FALSE; |
489 | } |
490 | } |
491 | } |
492 | |
493 | return(ok); |
494 | } |
495 | #endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */ |
496 | #endif /* BTR_CUR_HASH_ADAPT */ |
497 | |