| 1 | /* |
| 2 | Copyright (c) 2000, 2011, Oracle and/or its affiliates |
| 3 | |
| 4 | This program is free software; you can redistribute it and/or modify |
| 5 | it under the terms of the GNU General Public License as published by |
| 6 | the Free Software Foundation; version 2 of the License. |
| 7 | |
| 8 | This program is distributed in the hope that it will be useful, |
| 9 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | GNU General Public License for more details. |
| 12 | |
| 13 | You should have received a copy of the GNU General Public License |
| 14 | along with this program; if not, write to the Free Software |
| 15 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ |
| 16 | |
| 17 | |
| 18 | #ifdef USE_PRAGMA_IMPLEMENTATION |
| 19 | #pragma implementation // gcc: Class implementation |
| 20 | #endif |
| 21 | |
| 22 | #define MYSQL_SERVER 1 |
| 23 | #include "heapdef.h" |
| 24 | #include "sql_priv.h" |
| 25 | #include "sql_plugin.h" |
| 26 | #include "ha_heap.h" |
| 27 | #include "sql_base.h" // enum_tdc_remove_table_type |
| 28 | |
| 29 | static handler *heap_create_handler(handlerton *hton, |
| 30 | TABLE_SHARE *table, |
| 31 | MEM_ROOT *mem_root); |
| 32 | static int |
| 33 | heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table, |
| 34 | HP_CREATE_INFO *hp_create_info); |
| 35 | |
| 36 | |
| 37 | int heap_panic(handlerton *hton, ha_panic_function flag) |
| 38 | { |
| 39 | return hp_panic(flag); |
| 40 | } |
| 41 | |
| 42 | |
| 43 | int heap_init(void *p) |
| 44 | { |
| 45 | handlerton *heap_hton; |
| 46 | |
| 47 | #ifdef HAVE_PSI_INTERFACE |
| 48 | init_heap_psi_keys(); |
| 49 | #endif |
| 50 | |
| 51 | heap_hton= (handlerton *)p; |
| 52 | heap_hton->state= SHOW_OPTION_YES; |
| 53 | heap_hton->db_type= DB_TYPE_HEAP; |
| 54 | heap_hton->create= heap_create_handler; |
| 55 | heap_hton->panic= heap_panic; |
| 56 | heap_hton->flags= HTON_CAN_RECREATE; |
| 57 | |
| 58 | return 0; |
| 59 | } |
| 60 | |
| 61 | static handler *heap_create_handler(handlerton *hton, |
| 62 | TABLE_SHARE *table, |
| 63 | MEM_ROOT *mem_root) |
| 64 | { |
| 65 | return new (mem_root) ha_heap(hton, table); |
| 66 | } |
| 67 | |
| 68 | |
| 69 | /***************************************************************************** |
| 70 | ** HEAP tables |
| 71 | *****************************************************************************/ |
| 72 | |
| 73 | ha_heap::ha_heap(handlerton *hton, TABLE_SHARE *table_arg) |
| 74 | :handler(hton, table_arg), file(0), records_changed(0), key_stat_version(0), |
| 75 | internal_table(0) |
| 76 | {} |
| 77 | |
| 78 | /* |
| 79 | Hash index statistics is updated (copied from HP_KEYDEF::hash_buckets to |
| 80 | rec_per_key) after 1/HEAP_STATS_UPDATE_THRESHOLD fraction of table records |
| 81 | have been inserted/updated/deleted. delete_all_rows() and table flush cause |
| 82 | immediate update. |
| 83 | |
| 84 | NOTE |
| 85 | hash index statistics must be updated when number of table records changes |
| 86 | from 0 to non-zero value and vice versa. Otherwise records_in_range may |
| 87 | erroneously return 0 and 'range' may miss records. |
| 88 | */ |
| 89 | #define HEAP_STATS_UPDATE_THRESHOLD 10 |
| 90 | |
| 91 | int ha_heap::open(const char *name, int mode, uint test_if_locked) |
| 92 | { |
| 93 | internal_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE); |
| 94 | if (internal_table || (!(file= heap_open(name, mode)) && my_errno == ENOENT)) |
| 95 | { |
| 96 | HP_CREATE_INFO create_info; |
| 97 | my_bool created_new_share; |
| 98 | int rc; |
| 99 | file= 0; |
| 100 | if (heap_prepare_hp_create_info(table, internal_table, &create_info)) |
| 101 | goto end; |
| 102 | create_info.pin_share= TRUE; |
| 103 | |
| 104 | rc= heap_create(name, &create_info, &internal_share, &created_new_share); |
| 105 | my_free(create_info.keydef); |
| 106 | if (rc) |
| 107 | goto end; |
| 108 | |
| 109 | implicit_emptied= MY_TEST(created_new_share); |
| 110 | if (internal_table) |
| 111 | file= heap_open_from_share(internal_share, mode); |
| 112 | else |
| 113 | file= heap_open_from_share_and_register(internal_share, mode); |
| 114 | |
| 115 | if (!file) |
| 116 | { |
| 117 | heap_release_share(internal_share, internal_table); |
| 118 | goto end; |
| 119 | } |
| 120 | } |
| 121 | |
| 122 | ref_length= sizeof(HEAP_PTR); |
| 123 | /* Initialize variables for the opened table */ |
| 124 | set_keys_for_scanning(); |
| 125 | /* |
| 126 | We cannot run update_key_stats() here because we do not have a |
| 127 | lock on the table. The 'records' count might just be changed |
| 128 | temporarily at this moment and we might get wrong statistics (Bug |
| 129 | #10178). Instead we request for update. This will be done in |
| 130 | ha_heap::info(), which is always called before key statistics are |
| 131 | used. |
| 132 | */ |
| 133 | key_stat_version= file->s->key_stat_version-1; |
| 134 | end: |
| 135 | return (file ? 0 : 1); |
| 136 | } |
| 137 | |
| 138 | int ha_heap::close(void) |
| 139 | { |
| 140 | return internal_table ? hp_close(file) : heap_close(file); |
| 141 | } |
| 142 | |
| 143 | |
| 144 | /* |
| 145 | Create a copy of this table |
| 146 | |
| 147 | DESCRIPTION |
| 148 | Do same as default implementation but use file->s->name instead of |
| 149 | table->s->path. This is needed by Windows where the clone() call sees |
| 150 | '/'-delimited path in table->s->path, while ha_heap::open() was called |
| 151 | with '\'-delimited path. |
| 152 | */ |
| 153 | |
| 154 | handler *ha_heap::clone(const char *name, MEM_ROOT *mem_root) |
| 155 | { |
| 156 | handler *new_handler= get_new_handler(table->s, mem_root, table->s->db_type()); |
| 157 | if (new_handler && !new_handler->ha_open(table, file->s->name, table->db_stat, |
| 158 | HA_OPEN_IGNORE_IF_LOCKED)) |
| 159 | return new_handler; |
| 160 | return NULL; /* purecov: inspected */ |
| 161 | } |
| 162 | |
| 163 | |
| 164 | /* |
| 165 | Compute which keys to use for scanning |
| 166 | |
| 167 | SYNOPSIS |
| 168 | set_keys_for_scanning() |
| 169 | no parameter |
| 170 | |
| 171 | DESCRIPTION |
| 172 | Set the bitmap btree_keys, which is used when the upper layers ask |
| 173 | which keys to use for scanning. For each btree index the |
| 174 | corresponding bit is set. |
| 175 | |
| 176 | RETURN |
| 177 | void |
| 178 | */ |
| 179 | |
| 180 | void ha_heap::set_keys_for_scanning(void) |
| 181 | { |
| 182 | btree_keys.clear_all(); |
| 183 | for (uint i= 0 ; i < table->s->keys ; i++) |
| 184 | { |
| 185 | if (table->key_info[i].algorithm == HA_KEY_ALG_BTREE) |
| 186 | btree_keys.set_bit(i); |
| 187 | } |
| 188 | } |
| 189 | |
| 190 | |
| 191 | int ha_heap::can_continue_handler_scan() |
| 192 | { |
| 193 | int error= 0; |
| 194 | if ((file->key_version != file->s->key_version && inited == INDEX) || |
| 195 | (file->file_version != file->s->file_version && inited == RND)) |
| 196 | { |
| 197 | /* Data changed, not safe to do index or rnd scan */ |
| 198 | error= HA_ERR_RECORD_CHANGED; |
| 199 | } |
| 200 | return error; |
| 201 | } |
| 202 | |
| 203 | |
| 204 | void ha_heap::update_key_stats() |
| 205 | { |
| 206 | for (uint i= 0; i < table->s->keys; i++) |
| 207 | { |
| 208 | KEY *key=table->key_info+i; |
| 209 | if (!key->rec_per_key) |
| 210 | continue; |
| 211 | if (key->algorithm != HA_KEY_ALG_BTREE) |
| 212 | { |
| 213 | if (key->flags & HA_NOSAME) |
| 214 | key->rec_per_key[key->user_defined_key_parts-1]= 1; |
| 215 | else |
| 216 | { |
| 217 | ha_rows hash_buckets= file->s->keydef[i].hash_buckets; |
| 218 | ulong no_records= hash_buckets ? (ulong)(file->s->records/hash_buckets) : 2; |
| 219 | if (no_records < 2) |
| 220 | no_records= 2; |
| 221 | key->rec_per_key[key->user_defined_key_parts-1]= no_records; |
| 222 | } |
| 223 | } |
| 224 | } |
| 225 | records_changed= 0; |
| 226 | /* At the end of update_key_stats() we can proudly claim they are OK. */ |
| 227 | key_stat_version= file->s->key_stat_version; |
| 228 | } |
| 229 | |
| 230 | |
| 231 | int ha_heap::write_row(uchar * buf) |
| 232 | { |
| 233 | int res; |
| 234 | if (table->next_number_field && buf == table->record[0]) |
| 235 | { |
| 236 | if ((res= update_auto_increment())) |
| 237 | return res; |
| 238 | } |
| 239 | res= heap_write(file,buf); |
| 240 | if (!res && (++records_changed*HEAP_STATS_UPDATE_THRESHOLD > |
| 241 | file->s->records)) |
| 242 | { |
| 243 | /* |
| 244 | We can perform this safely since only one writer at the time is |
| 245 | allowed on the table. |
| 246 | */ |
| 247 | records_changed= 0; |
| 248 | file->s->key_stat_version++; |
| 249 | } |
| 250 | return res; |
| 251 | } |
| 252 | |
| 253 | int ha_heap::update_row(const uchar * old_data, const uchar * new_data) |
| 254 | { |
| 255 | int res; |
| 256 | res= heap_update(file,old_data,new_data); |
| 257 | if (!res && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > |
| 258 | file->s->records) |
| 259 | { |
| 260 | /* |
| 261 | We can perform this safely since only one writer at the time is |
| 262 | allowed on the table. |
| 263 | */ |
| 264 | records_changed= 0; |
| 265 | file->s->key_stat_version++; |
| 266 | } |
| 267 | return res; |
| 268 | } |
| 269 | |
| 270 | int ha_heap::delete_row(const uchar * buf) |
| 271 | { |
| 272 | int res; |
| 273 | res= heap_delete(file,buf); |
| 274 | if (!res && table->s->tmp_table == NO_TMP_TABLE && |
| 275 | ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) |
| 276 | { |
| 277 | /* |
| 278 | We can perform this safely since only one writer at the time is |
| 279 | allowed on the table. |
| 280 | */ |
| 281 | records_changed= 0; |
| 282 | file->s->key_stat_version++; |
| 283 | } |
| 284 | return res; |
| 285 | } |
| 286 | |
| 287 | int ha_heap::index_read_map(uchar *buf, const uchar *key, |
| 288 | key_part_map keypart_map, |
| 289 | enum ha_rkey_function find_flag) |
| 290 | { |
| 291 | DBUG_ASSERT(inited==INDEX); |
| 292 | int error = heap_rkey(file,buf,active_index, key, keypart_map, find_flag); |
| 293 | return error; |
| 294 | } |
| 295 | |
| 296 | int ha_heap::index_read_last_map(uchar *buf, const uchar *key, |
| 297 | key_part_map keypart_map) |
| 298 | { |
| 299 | DBUG_ASSERT(inited==INDEX); |
| 300 | int error= heap_rkey(file, buf, active_index, key, keypart_map, |
| 301 | HA_READ_PREFIX_LAST); |
| 302 | return error; |
| 303 | } |
| 304 | |
| 305 | int ha_heap::index_read_idx_map(uchar *buf, uint index, const uchar *key, |
| 306 | key_part_map keypart_map, |
| 307 | enum ha_rkey_function find_flag) |
| 308 | { |
| 309 | int error = heap_rkey(file, buf, index, key, keypart_map, find_flag); |
| 310 | return error; |
| 311 | } |
| 312 | |
| 313 | int ha_heap::index_next(uchar * buf) |
| 314 | { |
| 315 | DBUG_ASSERT(inited==INDEX); |
| 316 | int error=heap_rnext(file,buf); |
| 317 | return error; |
| 318 | } |
| 319 | |
| 320 | int ha_heap::index_prev(uchar * buf) |
| 321 | { |
| 322 | DBUG_ASSERT(inited==INDEX); |
| 323 | int error=heap_rprev(file,buf); |
| 324 | return error; |
| 325 | } |
| 326 | |
| 327 | int ha_heap::index_first(uchar * buf) |
| 328 | { |
| 329 | DBUG_ASSERT(inited==INDEX); |
| 330 | int error=heap_rfirst(file, buf, active_index); |
| 331 | return error; |
| 332 | } |
| 333 | |
| 334 | int ha_heap::index_last(uchar * buf) |
| 335 | { |
| 336 | DBUG_ASSERT(inited==INDEX); |
| 337 | int error=heap_rlast(file, buf, active_index); |
| 338 | return error; |
| 339 | } |
| 340 | |
| 341 | int ha_heap::rnd_init(bool scan) |
| 342 | { |
| 343 | return scan ? heap_scan_init(file) : 0; |
| 344 | } |
| 345 | |
| 346 | int ha_heap::rnd_next(uchar *buf) |
| 347 | { |
| 348 | int error=heap_scan(file, buf); |
| 349 | return error; |
| 350 | } |
| 351 | |
| 352 | int ha_heap::rnd_pos(uchar * buf, uchar *pos) |
| 353 | { |
| 354 | int error; |
| 355 | HEAP_PTR heap_position; |
| 356 | memcpy(&heap_position, pos, sizeof(HEAP_PTR)); |
| 357 | error=heap_rrnd(file, buf, heap_position); |
| 358 | return error; |
| 359 | } |
| 360 | |
| 361 | void ha_heap::position(const uchar *record) |
| 362 | { |
| 363 | *(HEAP_PTR*) ref= heap_position(file); // Ref is aligned |
| 364 | } |
| 365 | |
| 366 | int ha_heap::info(uint flag) |
| 367 | { |
| 368 | HEAPINFO hp_info; |
| 369 | |
| 370 | if (!table) |
| 371 | return 1; |
| 372 | |
| 373 | (void) heap_info(file,&hp_info,flag); |
| 374 | |
| 375 | errkey= hp_info.errkey; |
| 376 | stats.records= hp_info.records; |
| 377 | stats.deleted= hp_info.deleted; |
| 378 | stats.mean_rec_length= hp_info.reclength; |
| 379 | stats.data_file_length= hp_info.data_length; |
| 380 | stats.index_file_length= hp_info.index_length; |
| 381 | stats.max_data_file_length= hp_info.max_records * hp_info.reclength; |
| 382 | stats.delete_length= hp_info.deleted * hp_info.reclength; |
| 383 | stats.create_time= (ulong) hp_info.create_time; |
| 384 | if (flag & HA_STATUS_AUTO) |
| 385 | stats.auto_increment_value= hp_info.auto_increment; |
| 386 | /* |
| 387 | If info() is called for the first time after open(), we will still |
| 388 | have to update the key statistics. Hoping that a table lock is now |
| 389 | in place. |
| 390 | */ |
| 391 | if (key_stat_version != file->s->key_stat_version) |
| 392 | update_key_stats(); |
| 393 | return 0; |
| 394 | } |
| 395 | |
| 396 | |
| 397 | int ha_heap::(enum ha_extra_function operation) |
| 398 | { |
| 399 | return heap_extra(file,operation); |
| 400 | } |
| 401 | |
| 402 | |
| 403 | int ha_heap::reset() |
| 404 | { |
| 405 | return heap_reset(file); |
| 406 | } |
| 407 | |
| 408 | |
| 409 | int ha_heap::delete_all_rows() |
| 410 | { |
| 411 | heap_clear(file); |
| 412 | if (table->s->tmp_table == NO_TMP_TABLE) |
| 413 | { |
| 414 | /* |
| 415 | We can perform this safely since only one writer at the time is |
| 416 | allowed on the table. |
| 417 | */ |
| 418 | file->s->key_stat_version++; |
| 419 | } |
| 420 | return 0; |
| 421 | } |
| 422 | |
| 423 | |
| 424 | int ha_heap::reset_auto_increment(ulonglong value) |
| 425 | { |
| 426 | file->s->auto_increment= value; |
| 427 | return 0; |
| 428 | } |
| 429 | |
| 430 | |
| 431 | int ha_heap::external_lock(THD *thd, int lock_type) |
| 432 | { |
| 433 | return 0; // No external locking |
| 434 | } |
| 435 | |
| 436 | |
| 437 | /* |
| 438 | Disable indexes. |
| 439 | |
| 440 | SYNOPSIS |
| 441 | disable_indexes() |
| 442 | mode mode of operation: |
| 443 | HA_KEY_SWITCH_NONUNIQ disable all non-unique keys |
| 444 | HA_KEY_SWITCH_ALL disable all keys |
| 445 | HA_KEY_SWITCH_NONUNIQ_SAVE dis. non-uni. and make persistent |
| 446 | HA_KEY_SWITCH_ALL_SAVE dis. all keys and make persistent |
| 447 | |
| 448 | DESCRIPTION |
| 449 | Disable indexes and clear keys to use for scanning. |
| 450 | |
| 451 | IMPLEMENTATION |
| 452 | HA_KEY_SWITCH_NONUNIQ is not implemented. |
| 453 | HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP. |
| 454 | HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP. |
| 455 | |
| 456 | RETURN |
| 457 | 0 ok |
| 458 | HA_ERR_WRONG_COMMAND mode not implemented. |
| 459 | */ |
| 460 | |
| 461 | int ha_heap::disable_indexes(uint mode) |
| 462 | { |
| 463 | int error; |
| 464 | |
| 465 | if (mode == HA_KEY_SWITCH_ALL) |
| 466 | { |
| 467 | if (!(error= heap_disable_indexes(file))) |
| 468 | set_keys_for_scanning(); |
| 469 | } |
| 470 | else |
| 471 | { |
| 472 | /* mode not implemented */ |
| 473 | error= HA_ERR_WRONG_COMMAND; |
| 474 | } |
| 475 | return error; |
| 476 | } |
| 477 | |
| 478 | |
| 479 | /* |
| 480 | Enable indexes. |
| 481 | |
| 482 | SYNOPSIS |
| 483 | enable_indexes() |
| 484 | mode mode of operation: |
| 485 | HA_KEY_SWITCH_NONUNIQ enable all non-unique keys |
| 486 | HA_KEY_SWITCH_ALL enable all keys |
| 487 | HA_KEY_SWITCH_NONUNIQ_SAVE en. non-uni. and make persistent |
| 488 | HA_KEY_SWITCH_ALL_SAVE en. all keys and make persistent |
| 489 | |
| 490 | DESCRIPTION |
| 491 | Enable indexes and set keys to use for scanning. |
| 492 | The indexes might have been disabled by disable_index() before. |
| 493 | The function works only if both data and indexes are empty, |
| 494 | since the heap storage engine cannot repair the indexes. |
| 495 | To be sure, call handler::delete_all_rows() before. |
| 496 | |
| 497 | IMPLEMENTATION |
| 498 | HA_KEY_SWITCH_NONUNIQ is not implemented. |
| 499 | HA_KEY_SWITCH_NONUNIQ_SAVE is not implemented with HEAP. |
| 500 | HA_KEY_SWITCH_ALL_SAVE is not implemented with HEAP. |
| 501 | |
| 502 | RETURN |
| 503 | 0 ok |
| 504 | HA_ERR_CRASHED data or index is non-empty. Delete all rows and retry. |
| 505 | HA_ERR_WRONG_COMMAND mode not implemented. |
| 506 | */ |
| 507 | |
| 508 | int ha_heap::enable_indexes(uint mode) |
| 509 | { |
| 510 | int error; |
| 511 | |
| 512 | if (mode == HA_KEY_SWITCH_ALL) |
| 513 | { |
| 514 | if (!(error= heap_enable_indexes(file))) |
| 515 | set_keys_for_scanning(); |
| 516 | } |
| 517 | else |
| 518 | { |
| 519 | /* mode not implemented */ |
| 520 | error= HA_ERR_WRONG_COMMAND; |
| 521 | } |
| 522 | return error; |
| 523 | } |
| 524 | |
| 525 | |
| 526 | /* |
| 527 | Test if indexes are disabled. |
| 528 | |
| 529 | SYNOPSIS |
| 530 | indexes_are_disabled() |
| 531 | no parameters |
| 532 | |
| 533 | RETURN |
| 534 | 0 indexes are not disabled |
| 535 | 1 all indexes are disabled |
| 536 | [2 non-unique indexes are disabled - NOT YET IMPLEMENTED] |
| 537 | */ |
| 538 | |
| 539 | int ha_heap::indexes_are_disabled(void) |
| 540 | { |
| 541 | return heap_indexes_are_disabled(file); |
| 542 | } |
| 543 | |
| 544 | THR_LOCK_DATA **ha_heap::store_lock(THD *thd, |
| 545 | THR_LOCK_DATA **to, |
| 546 | enum thr_lock_type lock_type) |
| 547 | { |
| 548 | if (lock_type != TL_IGNORE && file->lock.type == TL_UNLOCK) |
| 549 | file->lock.type=lock_type; |
| 550 | *to++= &file->lock; |
| 551 | return to; |
| 552 | } |
| 553 | |
| 554 | /* |
| 555 | We have to ignore ENOENT entries as the HEAP table is created on open and |
| 556 | not when doing a CREATE on the table. |
| 557 | */ |
| 558 | |
| 559 | int ha_heap::delete_table(const char *name) |
| 560 | { |
| 561 | int error= heap_delete_table(name); |
| 562 | return error == ENOENT ? 0 : error; |
| 563 | } |
| 564 | |
| 565 | |
| 566 | void ha_heap::drop_table(const char *name) |
| 567 | { |
| 568 | file->s->delete_on_close= 1; |
| 569 | ha_close(); |
| 570 | } |
| 571 | |
| 572 | |
| 573 | int ha_heap::rename_table(const char * from, const char * to) |
| 574 | { |
| 575 | return heap_rename(from,to); |
| 576 | } |
| 577 | |
| 578 | |
| 579 | ha_rows ha_heap::records_in_range(uint inx, key_range *min_key, |
| 580 | key_range *max_key) |
| 581 | { |
| 582 | KEY *key=table->key_info+inx; |
| 583 | if (key->algorithm == HA_KEY_ALG_BTREE) |
| 584 | return hp_rb_records_in_range(file, inx, min_key, max_key); |
| 585 | |
| 586 | if (!min_key || !max_key || |
| 587 | min_key->length != max_key->length || |
| 588 | min_key->length != key->key_length || |
| 589 | min_key->flag != HA_READ_KEY_EXACT || |
| 590 | max_key->flag != HA_READ_AFTER_KEY) |
| 591 | return HA_POS_ERROR; // Can only use exact keys |
| 592 | |
| 593 | if (stats.records <= 1) |
| 594 | return stats.records; |
| 595 | |
| 596 | /* Assert that info() did run. We need current statistics here. */ |
| 597 | DBUG_ASSERT(key_stat_version == file->s->key_stat_version); |
| 598 | return key->rec_per_key[key->user_defined_key_parts-1]; |
| 599 | } |
| 600 | |
| 601 | |
| 602 | static int |
| 603 | heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table, |
| 604 | HP_CREATE_INFO *hp_create_info) |
| 605 | { |
| 606 | uint key, parts, mem_per_row= 0, keys= table_arg->s->keys; |
| 607 | uint auto_key= 0, auto_key_type= 0; |
| 608 | ha_rows max_rows; |
| 609 | HP_KEYDEF *keydef; |
| 610 | HA_KEYSEG *seg; |
| 611 | TABLE_SHARE *share= table_arg->s; |
| 612 | bool found_real_auto_increment= 0; |
| 613 | |
| 614 | bzero(hp_create_info, sizeof(*hp_create_info)); |
| 615 | |
| 616 | for (key= parts= 0; key < keys; key++) |
| 617 | parts+= table_arg->key_info[key].user_defined_key_parts; |
| 618 | |
| 619 | if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) + |
| 620 | parts * sizeof(HA_KEYSEG), |
| 621 | MYF(MY_WME | MY_THREAD_SPECIFIC)))) |
| 622 | return my_errno; |
| 623 | seg= reinterpret_cast<HA_KEYSEG*>(keydef + keys); |
| 624 | for (key= 0; key < keys; key++) |
| 625 | { |
| 626 | KEY *pos= table_arg->key_info+key; |
| 627 | KEY_PART_INFO *key_part= pos->key_part; |
| 628 | KEY_PART_INFO *key_part_end= key_part + pos->user_defined_key_parts; |
| 629 | |
| 630 | keydef[key].keysegs= (uint) pos->user_defined_key_parts; |
| 631 | keydef[key].flag= (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL)); |
| 632 | keydef[key].seg= seg; |
| 633 | |
| 634 | switch (pos->algorithm) { |
| 635 | case HA_KEY_ALG_UNDEF: |
| 636 | case HA_KEY_ALG_HASH: |
| 637 | keydef[key].algorithm= HA_KEY_ALG_HASH; |
| 638 | mem_per_row+= sizeof(char*) * 2; // = sizeof(HASH_INFO) |
| 639 | break; |
| 640 | case HA_KEY_ALG_BTREE: |
| 641 | keydef[key].algorithm= HA_KEY_ALG_BTREE; |
| 642 | mem_per_row+=sizeof(TREE_ELEMENT)+pos->key_length+sizeof(char*); |
| 643 | break; |
| 644 | default: |
| 645 | DBUG_ASSERT(0); // cannot happen |
| 646 | } |
| 647 | |
| 648 | for (; key_part != key_part_end; key_part++, seg++) |
| 649 | { |
| 650 | Field *field= key_part->field; |
| 651 | |
| 652 | if (pos->algorithm == HA_KEY_ALG_BTREE) |
| 653 | seg->type= field->key_type(); |
| 654 | else |
| 655 | { |
| 656 | if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT && |
| 657 | seg->type != HA_KEYTYPE_VARTEXT1 && |
| 658 | seg->type != HA_KEYTYPE_VARTEXT2 && |
| 659 | seg->type != HA_KEYTYPE_VARBINARY1 && |
| 660 | seg->type != HA_KEYTYPE_VARBINARY2 && |
| 661 | seg->type != HA_KEYTYPE_BIT) |
| 662 | seg->type= HA_KEYTYPE_BINARY; |
| 663 | } |
| 664 | seg->start= (uint) key_part->offset; |
| 665 | seg->length= (uint) key_part->length; |
| 666 | seg->flag= key_part->key_part_flag; |
| 667 | |
| 668 | if (field->flags & (ENUM_FLAG | SET_FLAG)) |
| 669 | seg->charset= &my_charset_bin; |
| 670 | else |
| 671 | seg->charset= field->charset_for_protocol(); |
| 672 | if (field->null_ptr) |
| 673 | { |
| 674 | seg->null_bit= field->null_bit; |
| 675 | seg->null_pos= (uint) (field->null_ptr - (uchar*) table_arg->record[0]); |
| 676 | } |
| 677 | else |
| 678 | { |
| 679 | seg->null_bit= 0; |
| 680 | seg->null_pos= 0; |
| 681 | } |
| 682 | if (field->flags & AUTO_INCREMENT_FLAG && |
| 683 | table_arg->found_next_number_field && |
| 684 | key == share->next_number_index) |
| 685 | { |
| 686 | /* |
| 687 | Store key number and type for found auto_increment key |
| 688 | We have to store type as seg->type can differ from it |
| 689 | */ |
| 690 | auto_key= key+ 1; |
| 691 | auto_key_type= field->key_type(); |
| 692 | } |
| 693 | if (seg->type == HA_KEYTYPE_BIT) |
| 694 | { |
| 695 | seg->bit_length= ((Field_bit *) field)->bit_len; |
| 696 | seg->bit_start= ((Field_bit *) field)->bit_ofs; |
| 697 | seg->bit_pos= (uint) (((Field_bit *) field)->bit_ptr - |
| 698 | (uchar*) table_arg->record[0]); |
| 699 | } |
| 700 | else |
| 701 | { |
| 702 | seg->bit_length= seg->bit_start= 0; |
| 703 | seg->bit_pos= 0; |
| 704 | } |
| 705 | } |
| 706 | } |
| 707 | mem_per_row+= MY_ALIGN(MY_MAX(share->reclength, sizeof(char*)) + 1, sizeof(char*)); |
| 708 | if (table_arg->found_next_number_field) |
| 709 | { |
| 710 | keydef[share->next_number_index].flag|= HA_AUTO_KEY; |
| 711 | found_real_auto_increment= share->next_number_key_offset == 0; |
| 712 | } |
| 713 | hp_create_info->auto_key= auto_key; |
| 714 | hp_create_info->auto_key_type= auto_key_type; |
| 715 | hp_create_info->max_table_size=current_thd->variables.max_heap_table_size; |
| 716 | hp_create_info->with_auto_increment= found_real_auto_increment; |
| 717 | hp_create_info->internal_table= internal_table; |
| 718 | |
| 719 | max_rows= (ha_rows) (hp_create_info->max_table_size / mem_per_row); |
| 720 | if (share->max_rows && share->max_rows < max_rows) |
| 721 | max_rows= share->max_rows; |
| 722 | |
| 723 | hp_create_info->max_records= (ulong) MY_MIN(max_rows, ULONG_MAX); |
| 724 | hp_create_info->min_records= (ulong) MY_MIN(share->min_rows, ULONG_MAX); |
| 725 | hp_create_info->keys= share->keys; |
| 726 | hp_create_info->reclength= share->reclength; |
| 727 | hp_create_info->keydef= keydef; |
| 728 | return 0; |
| 729 | } |
| 730 | |
| 731 | |
| 732 | int ha_heap::create(const char *name, TABLE *table_arg, |
| 733 | HA_CREATE_INFO *create_info) |
| 734 | { |
| 735 | int error; |
| 736 | my_bool created; |
| 737 | HP_CREATE_INFO hp_create_info; |
| 738 | |
| 739 | error= heap_prepare_hp_create_info(table_arg, internal_table, |
| 740 | &hp_create_info); |
| 741 | if (error) |
| 742 | return error; |
| 743 | hp_create_info.auto_increment= (create_info->auto_increment_value ? |
| 744 | create_info->auto_increment_value - 1 : 0); |
| 745 | error= heap_create(name, &hp_create_info, &internal_share, &created); |
| 746 | my_free(hp_create_info.keydef); |
| 747 | DBUG_ASSERT(file == 0); |
| 748 | return (error); |
| 749 | } |
| 750 | |
| 751 | |
| 752 | void ha_heap::update_create_info(HA_CREATE_INFO *create_info) |
| 753 | { |
| 754 | table->file->info(HA_STATUS_AUTO); |
| 755 | if (!(create_info->used_fields & HA_CREATE_USED_AUTO)) |
| 756 | create_info->auto_increment_value= stats.auto_increment_value; |
| 757 | } |
| 758 | |
| 759 | void ha_heap::get_auto_increment(ulonglong offset, ulonglong increment, |
| 760 | ulonglong nb_desired_values, |
| 761 | ulonglong *first_value, |
| 762 | ulonglong *nb_reserved_values) |
| 763 | { |
| 764 | ha_heap::info(HA_STATUS_AUTO); |
| 765 | *first_value= stats.auto_increment_value; |
| 766 | /* such table has only table-level locking so reserves up to +inf */ |
| 767 | *nb_reserved_values= ULONGLONG_MAX; |
| 768 | } |
| 769 | |
| 770 | |
| 771 | bool ha_heap::check_if_incompatible_data(HA_CREATE_INFO *info, |
| 772 | uint table_changes) |
| 773 | { |
| 774 | /* Check that auto_increment value was not changed */ |
| 775 | if ((info->used_fields & HA_CREATE_USED_AUTO && |
| 776 | info->auto_increment_value != 0) || |
| 777 | table_changes == IS_EQUAL_NO || |
| 778 | table_changes & IS_EQUAL_PACK_LENGTH) // Not implemented yet |
| 779 | return COMPATIBLE_DATA_NO; |
| 780 | return COMPATIBLE_DATA_YES; |
| 781 | } |
| 782 | |
| 783 | |
| 784 | /** |
| 785 | Find record by unique index (used in temporary tables with the index) |
| 786 | |
| 787 | @param record (IN|OUT) the record to find |
| 788 | @param unique_idx (IN) number of index (for this engine) |
| 789 | |
| 790 | @note It is like hp_search but uses function for raw where hp_search |
| 791 | uses functions for index. |
| 792 | |
| 793 | @retval 0 OK |
| 794 | @retval 1 Not found |
| 795 | @retval -1 Error |
| 796 | */ |
| 797 | |
| 798 | int ha_heap::find_unique_row(uchar *record, uint unique_idx) |
| 799 | { |
| 800 | DBUG_ENTER("ha_heap::find_unique_row" ); |
| 801 | HP_SHARE *share= file->s; |
| 802 | DBUG_ASSERT(inited==NONE); |
| 803 | HP_KEYDEF *keyinfo= share->keydef + unique_idx; |
| 804 | DBUG_ASSERT(keyinfo->algorithm == HA_KEY_ALG_HASH); |
| 805 | DBUG_ASSERT(keyinfo->flag & HA_NOSAME); |
| 806 | if (!share->records) |
| 807 | DBUG_RETURN(1); // not found |
| 808 | HASH_INFO *pos= hp_find_hash(&keyinfo->block, |
| 809 | hp_mask(hp_rec_hashnr(keyinfo, record), |
| 810 | share->blength, share->records)); |
| 811 | do |
| 812 | { |
| 813 | if (!hp_rec_key_cmp(keyinfo, pos->ptr_to_rec, record)) |
| 814 | { |
| 815 | file->current_hash_ptr= pos; |
| 816 | file->current_ptr= pos->ptr_to_rec; |
| 817 | file->update = HA_STATE_AKTIV; |
| 818 | /* |
| 819 | We compare it only by record in the index, so better to read all |
| 820 | records. |
| 821 | */ |
| 822 | memcpy(record, file->current_ptr, (size_t) share->reclength); |
| 823 | |
| 824 | DBUG_RETURN(0); // found and position set |
| 825 | } |
| 826 | } |
| 827 | while ((pos= pos->next_key)); |
| 828 | DBUG_RETURN(1); // not found |
| 829 | } |
| 830 | |
| 831 | struct st_mysql_storage_engine heap_storage_engine= |
| 832 | { MYSQL_HANDLERTON_INTERFACE_VERSION }; |
| 833 | |
| 834 | mysql_declare_plugin(heap) |
| 835 | { |
| 836 | MYSQL_STORAGE_ENGINE_PLUGIN, |
| 837 | &heap_storage_engine, |
| 838 | "MEMORY" , |
| 839 | "MySQL AB" , |
| 840 | "Hash based, stored in memory, useful for temporary tables" , |
| 841 | PLUGIN_LICENSE_GPL, |
| 842 | heap_init, |
| 843 | NULL, |
| 844 | 0x0100, /* 1.0 */ |
| 845 | NULL, /* status variables */ |
| 846 | NULL, /* system variables */ |
| 847 | NULL, /* config options */ |
| 848 | 0, /* flags */ |
| 849 | } |
| 850 | mysql_declare_plugin_end; |
| 851 | maria_declare_plugin(heap) |
| 852 | { |
| 853 | MYSQL_STORAGE_ENGINE_PLUGIN, |
| 854 | &heap_storage_engine, |
| 855 | "MEMORY" , |
| 856 | "MySQL AB" , |
| 857 | "Hash based, stored in memory, useful for temporary tables" , |
| 858 | PLUGIN_LICENSE_GPL, |
| 859 | heap_init, |
| 860 | NULL, |
| 861 | 0x0100, /* 1.0 */ |
| 862 | NULL, /* status variables */ |
| 863 | NULL, /* system variables */ |
| 864 | "1.0" , /* string version */ |
| 865 | MariaDB_PLUGIN_MATURITY_STABLE /* maturity */ |
| 866 | } |
| 867 | maria_declare_plugin_end; |
| 868 | |