| 1 | /***************************************************************************** |
| 2 | |
| 3 | Copyright (c) 1996, 2018, Oracle and/or its affiliates. All Rights Reserved. |
| 4 | Copyright (c) 2012, Facebook Inc. |
| 5 | Copyright (c) 2013, 2018, MariaDB Corporation. |
| 6 | |
| 7 | This program is free software; you can redistribute it and/or modify it under |
| 8 | the terms of the GNU General Public License as published by the Free Software |
| 9 | Foundation; version 2 of the License. |
| 10 | |
| 11 | This program is distributed in the hope that it will be useful, but WITHOUT |
| 12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
| 13 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU General Public License along with |
| 16 | this program; if not, write to the Free Software Foundation, Inc., |
| 17 | 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA |
| 18 | |
| 19 | *****************************************************************************/ |
| 20 | |
| 21 | /******************************************************************//** |
| 22 | @file dict/dict0dict.cc |
| 23 | Data dictionary system |
| 24 | |
| 25 | Created 1/8/1996 Heikki Tuuri |
| 26 | ***********************************************************************/ |
| 27 | |
| 28 | #include <my_config.h> |
| 29 | #include <string> |
| 30 | |
| 31 | #include "ha_prototypes.h" |
| 32 | #include <mysqld.h> |
| 33 | #include <strfunc.h> |
| 34 | |
| 35 | #include "dict0dict.h" |
| 36 | #include "fts0fts.h" |
| 37 | #include "fil0fil.h" |
| 38 | #include <algorithm> |
| 39 | |
| 40 | /** dummy index for ROW_FORMAT=REDUNDANT supremum and infimum records */ |
| 41 | dict_index_t* dict_ind_redundant; |
| 42 | |
| 43 | #if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG |
| 44 | /** Flag to control insert buffer debugging. */ |
| 45 | extern uint ibuf_debug; |
| 46 | #endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */ |
| 47 | |
| 48 | /********************************************************************** |
| 49 | Issue a warning that the row is too big. */ |
| 50 | void |
| 51 | ib_warn_row_too_big(const dict_table_t* table); |
| 52 | |
| 53 | #include "btr0btr.h" |
| 54 | #include "btr0cur.h" |
| 55 | #include "btr0sea.h" |
| 56 | #include "buf0buf.h" |
| 57 | #include "data0type.h" |
| 58 | #include "dict0boot.h" |
| 59 | #include "dict0crea.h" |
| 60 | #include "dict0mem.h" |
| 61 | #include "dict0priv.h" |
| 62 | #include "dict0stats.h" |
| 63 | #include "fsp0sysspace.h" |
| 64 | #include "fts0fts.h" |
| 65 | #include "fts0types.h" |
| 66 | #include "lock0lock.h" |
| 67 | #include "mach0data.h" |
| 68 | #include "mem0mem.h" |
| 69 | #include "os0once.h" |
| 70 | #include "page0page.h" |
| 71 | #include "page0zip.h" |
| 72 | #include "pars0pars.h" |
| 73 | #include "pars0sym.h" |
| 74 | #include "que0que.h" |
| 75 | #include "rem0cmp.h" |
| 76 | #include "row0log.h" |
| 77 | #include "row0merge.h" |
| 78 | #include "row0mysql.h" |
| 79 | #include "row0upd.h" |
| 80 | #include "srv0mon.h" |
| 81 | #include "srv0start.h" |
| 82 | #include "sync0sync.h" |
| 83 | #include "trx0undo.h" |
| 84 | #include "ut0new.h" |
| 85 | |
| 86 | #include <vector> |
| 87 | #include <algorithm> |
| 88 | |
| 89 | /** the dictionary system */ |
| 90 | dict_sys_t* dict_sys = NULL; |
| 91 | |
| 92 | /** @brief the data dictionary rw-latch protecting dict_sys |
| 93 | |
| 94 | table create, drop, etc. reserve this in X-mode; implicit or |
| 95 | backround operations purge, rollback, foreign key checks reserve this |
| 96 | in S-mode; we cannot trust that MySQL protects implicit or background |
| 97 | operations a table drop since MySQL does not know of them; therefore |
| 98 | we need this; NOTE: a transaction which reserves this must keep book |
| 99 | on the mode in trx_t::dict_operation_lock_mode */ |
| 100 | rw_lock_t* dict_operation_lock; |
| 101 | |
| 102 | /** Percentage of compression failures that are allowed in a single |
| 103 | round */ |
| 104 | ulong zip_failure_threshold_pct = 5; |
| 105 | |
| 106 | /** Maximum percentage of a page that can be allowed as a pad to avoid |
| 107 | compression failures */ |
| 108 | ulong zip_pad_max = 50; |
| 109 | |
| 110 | #define DICT_HEAP_SIZE 100 /*!< initial memory heap size when |
| 111 | creating a table or index object */ |
| 112 | #define DICT_POOL_PER_TABLE_HASH 512 /*!< buffer pool max size per table |
| 113 | hash table fixed size in bytes */ |
| 114 | #define DICT_POOL_PER_VARYING 4 /*!< buffer pool max size per data |
| 115 | dictionary varying size in bytes */ |
| 116 | |
| 117 | /** Identifies generated InnoDB foreign key names */ |
| 118 | static char dict_ibfk[] = "_ibfk_" ; |
| 119 | |
| 120 | bool innodb_table_stats_not_found = false; |
| 121 | bool innodb_index_stats_not_found = false; |
| 122 | static bool innodb_table_stats_not_found_reported = false; |
| 123 | static bool innodb_index_stats_not_found_reported = false; |
| 124 | |
| 125 | /*******************************************************************//** |
| 126 | Tries to find column names for the index and sets the col field of the |
| 127 | index. |
| 128 | @param[in] index index |
| 129 | @param[in] add_v new virtual columns added along with an add index call |
| 130 | @return whether the column names were found */ |
| 131 | static |
| 132 | bool |
| 133 | dict_index_find_cols( |
| 134 | dict_index_t* index, |
| 135 | const dict_add_v_col_t* add_v); |
| 136 | /*******************************************************************//** |
| 137 | Builds the internal dictionary cache representation for a clustered |
| 138 | index, containing also system fields not defined by the user. |
| 139 | @return own: the internal representation of the clustered index */ |
| 140 | static |
| 141 | dict_index_t* |
| 142 | dict_index_build_internal_clust( |
| 143 | /*============================*/ |
| 144 | dict_index_t* index); /*!< in: user representation of |
| 145 | a clustered index */ |
| 146 | /*******************************************************************//** |
| 147 | Builds the internal dictionary cache representation for a non-clustered |
| 148 | index, containing also system fields not defined by the user. |
| 149 | @return own: the internal representation of the non-clustered index */ |
| 150 | static |
| 151 | dict_index_t* |
| 152 | dict_index_build_internal_non_clust( |
| 153 | /*================================*/ |
| 154 | dict_index_t* index); /*!< in: user representation of |
| 155 | a non-clustered index */ |
| 156 | /**********************************************************************//** |
| 157 | Builds the internal dictionary cache representation for an FTS index. |
| 158 | @return own: the internal representation of the FTS index */ |
| 159 | static |
| 160 | dict_index_t* |
| 161 | dict_index_build_internal_fts( |
| 162 | /*==========================*/ |
| 163 | dict_index_t* index); /*!< in: user representation of an FTS index */ |
| 164 | |
| 165 | /**********************************************************************//** |
| 166 | Removes an index from the dictionary cache. */ |
| 167 | static |
| 168 | void |
| 169 | dict_index_remove_from_cache_low( |
| 170 | /*=============================*/ |
| 171 | dict_table_t* table, /*!< in/out: table */ |
| 172 | dict_index_t* index, /*!< in, own: index */ |
| 173 | ibool lru_evict); /*!< in: TRUE if page being evicted |
| 174 | to make room in the table LRU list */ |
| 175 | #ifdef UNIV_DEBUG |
| 176 | /**********************************************************************//** |
| 177 | Validate the dictionary table LRU list. |
| 178 | @return TRUE if validate OK */ |
| 179 | static |
| 180 | ibool |
| 181 | dict_lru_validate(void); |
| 182 | /*===================*/ |
| 183 | /**********************************************************************//** |
| 184 | Check if table is in the dictionary table LRU list. |
| 185 | @return TRUE if table found */ |
| 186 | static |
| 187 | ibool |
| 188 | dict_lru_find_table( |
| 189 | /*================*/ |
| 190 | const dict_table_t* find_table); /*!< in: table to find */ |
| 191 | /**********************************************************************//** |
| 192 | Check if a table exists in the dict table non-LRU list. |
| 193 | @return TRUE if table found */ |
| 194 | static |
| 195 | ibool |
| 196 | dict_non_lru_find_table( |
| 197 | /*====================*/ |
| 198 | const dict_table_t* find_table); /*!< in: table to find */ |
| 199 | #endif /* UNIV_DEBUG */ |
| 200 | |
| 201 | /* Stream for storing detailed information about the latest foreign key |
| 202 | and unique key errors. Only created if !srv_read_only_mode */ |
| 203 | FILE* dict_foreign_err_file = NULL; |
| 204 | /* mutex protecting the foreign and unique error buffers */ |
| 205 | ib_mutex_t dict_foreign_err_mutex; |
| 206 | |
| 207 | /********************************************************************//** |
| 208 | Checks if the database name in two table names is the same. |
| 209 | @return TRUE if same db name */ |
| 210 | ibool |
| 211 | dict_tables_have_same_db( |
| 212 | /*=====================*/ |
| 213 | const char* name1, /*!< in: table name in the form |
| 214 | dbname '/' tablename */ |
| 215 | const char* name2) /*!< in: table name in the form |
| 216 | dbname '/' tablename */ |
| 217 | { |
| 218 | for (; *name1 == *name2; name1++, name2++) { |
| 219 | if (*name1 == '/') { |
| 220 | return(TRUE); |
| 221 | } |
| 222 | ut_a(*name1); /* the names must contain '/' */ |
| 223 | } |
| 224 | return(FALSE); |
| 225 | } |
| 226 | |
| 227 | /********************************************************************//** |
| 228 | Return the end of table name where we have removed dbname and '/'. |
| 229 | @return table name */ |
| 230 | const char* |
| 231 | dict_remove_db_name( |
| 232 | /*================*/ |
| 233 | const char* name) /*!< in: table name in the form |
| 234 | dbname '/' tablename */ |
| 235 | { |
| 236 | const char* s = strchr(name, '/'); |
| 237 | ut_a(s); |
| 238 | |
| 239 | return(s + 1); |
| 240 | } |
| 241 | |
| 242 | /********************************************************************//** |
| 243 | Get the database name length in a table name. |
| 244 | @return database name length */ |
| 245 | ulint |
| 246 | dict_get_db_name_len( |
| 247 | /*=================*/ |
| 248 | const char* name) /*!< in: table name in the form |
| 249 | dbname '/' tablename */ |
| 250 | { |
| 251 | const char* s; |
| 252 | s = strchr(name, '/'); |
| 253 | ut_a(s); |
| 254 | return ulint(s - name); |
| 255 | } |
| 256 | |
| 257 | /** Reserve the dictionary system mutex. */ |
| 258 | void |
| 259 | dict_mutex_enter_for_mysql_func(const char *file, unsigned line) |
| 260 | { |
| 261 | mutex_enter_loc(&dict_sys->mutex, file, line); |
| 262 | } |
| 263 | |
| 264 | /********************************************************************//** |
| 265 | Releases the dictionary system mutex for MySQL. */ |
| 266 | void |
| 267 | dict_mutex_exit_for_mysql(void) |
| 268 | /*===========================*/ |
| 269 | { |
| 270 | mutex_exit(&dict_sys->mutex); |
| 271 | } |
| 272 | |
| 273 | /** Allocate and init a dict_table_t's stats latch. |
| 274 | This function must not be called concurrently on the same table object. |
| 275 | @param[in,out] table_void table whose stats latch to create */ |
| 276 | static |
| 277 | void |
| 278 | dict_table_stats_latch_alloc( |
| 279 | void* table_void) |
| 280 | { |
| 281 | dict_table_t* table = static_cast<dict_table_t*>(table_void); |
| 282 | |
| 283 | /* Note: rw_lock_create() will call the constructor */ |
| 284 | |
| 285 | table->stats_latch = static_cast<rw_lock_t*>( |
| 286 | ut_malloc_nokey(sizeof(rw_lock_t))); |
| 287 | |
| 288 | ut_a(table->stats_latch != NULL); |
| 289 | |
| 290 | rw_lock_create(dict_table_stats_key, table->stats_latch, |
| 291 | SYNC_INDEX_TREE); |
| 292 | } |
| 293 | |
| 294 | /** Deinit and free a dict_table_t's stats latch. |
| 295 | This function must not be called concurrently on the same table object. |
| 296 | @param[in,out] table table whose stats latch to free */ |
| 297 | static |
| 298 | void |
| 299 | dict_table_stats_latch_free( |
| 300 | dict_table_t* table) |
| 301 | { |
| 302 | rw_lock_free(table->stats_latch); |
| 303 | ut_free(table->stats_latch); |
| 304 | } |
| 305 | |
| 306 | /** Create a dict_table_t's stats latch or delay for lazy creation. |
| 307 | This function is only called from either single threaded environment |
| 308 | or from a thread that has not shared the table object with other threads. |
| 309 | @param[in,out] table table whose stats latch to create |
| 310 | @param[in] enabled if false then the latch is disabled |
| 311 | and dict_table_stats_lock()/unlock() become noop on this table. */ |
| 312 | void |
| 313 | dict_table_stats_latch_create( |
| 314 | dict_table_t* table, |
| 315 | bool enabled) |
| 316 | { |
| 317 | if (!enabled) { |
| 318 | table->stats_latch = NULL; |
| 319 | table->stats_latch_created = os_once::DONE; |
| 320 | return; |
| 321 | } |
| 322 | |
| 323 | /* We create this lazily the first time it is used. */ |
| 324 | table->stats_latch = NULL; |
| 325 | table->stats_latch_created = os_once::NEVER_DONE; |
| 326 | } |
| 327 | |
| 328 | /** Destroy a dict_table_t's stats latch. |
| 329 | This function is only called from either single threaded environment |
| 330 | or from a thread that has not shared the table object with other threads. |
| 331 | @param[in,out] table table whose stats latch to destroy */ |
| 332 | void |
| 333 | dict_table_stats_latch_destroy( |
| 334 | dict_table_t* table) |
| 335 | { |
| 336 | if (table->stats_latch_created == os_once::DONE |
| 337 | && table->stats_latch != NULL) { |
| 338 | |
| 339 | dict_table_stats_latch_free(table); |
| 340 | } |
| 341 | } |
| 342 | |
| 343 | /** Lock the appropriate latch to protect a given table's statistics. |
| 344 | @param[in] table table whose stats to lock |
| 345 | @param[in] latch_mode RW_S_LATCH or RW_X_LATCH */ |
| 346 | void |
| 347 | dict_table_stats_lock( |
| 348 | dict_table_t* table, |
| 349 | ulint latch_mode) |
| 350 | { |
| 351 | ut_ad(table != NULL); |
| 352 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 353 | |
| 354 | os_once::do_or_wait_for_done( |
| 355 | &table->stats_latch_created, |
| 356 | dict_table_stats_latch_alloc, table); |
| 357 | |
| 358 | if (table->stats_latch == NULL) { |
| 359 | /* This is a dummy table object that is private in the current |
| 360 | thread and is not shared between multiple threads, thus we |
| 361 | skip any locking. */ |
| 362 | return; |
| 363 | } |
| 364 | |
| 365 | switch (latch_mode) { |
| 366 | case RW_S_LATCH: |
| 367 | rw_lock_s_lock(table->stats_latch); |
| 368 | break; |
| 369 | case RW_X_LATCH: |
| 370 | rw_lock_x_lock(table->stats_latch); |
| 371 | break; |
| 372 | case RW_NO_LATCH: |
| 373 | /* fall through */ |
| 374 | default: |
| 375 | ut_error; |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | /** Unlock the latch that has been locked by dict_table_stats_lock(). |
| 380 | @param[in] table table whose stats to unlock |
| 381 | @param[in] latch_mode RW_S_LATCH or RW_X_LATCH */ |
| 382 | void |
| 383 | dict_table_stats_unlock( |
| 384 | dict_table_t* table, |
| 385 | ulint latch_mode) |
| 386 | { |
| 387 | ut_ad(table != NULL); |
| 388 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 389 | |
| 390 | if (table->stats_latch == NULL) { |
| 391 | /* This is a dummy table object that is private in the current |
| 392 | thread and is not shared between multiple threads, thus we |
| 393 | skip any locking. */ |
| 394 | return; |
| 395 | } |
| 396 | |
| 397 | switch (latch_mode) { |
| 398 | case RW_S_LATCH: |
| 399 | rw_lock_s_unlock(table->stats_latch); |
| 400 | break; |
| 401 | case RW_X_LATCH: |
| 402 | rw_lock_x_unlock(table->stats_latch); |
| 403 | break; |
| 404 | case RW_NO_LATCH: |
| 405 | /* fall through */ |
| 406 | default: |
| 407 | ut_error; |
| 408 | } |
| 409 | } |
| 410 | |
| 411 | /**********************************************************************//** |
| 412 | Try to drop any indexes after an aborted index creation. |
| 413 | This can also be after a server kill during DROP INDEX. */ |
| 414 | static |
| 415 | void |
| 416 | dict_table_try_drop_aborted( |
| 417 | /*========================*/ |
| 418 | dict_table_t* table, /*!< in: table, or NULL if it |
| 419 | needs to be looked up again */ |
| 420 | table_id_t table_id, /*!< in: table identifier */ |
| 421 | ulint ref_count) /*!< in: expected table->n_ref_count */ |
| 422 | { |
| 423 | trx_t* trx; |
| 424 | |
| 425 | trx = trx_create(); |
| 426 | trx->op_info = "try to drop any indexes after an aborted index creation" ; |
| 427 | row_mysql_lock_data_dictionary(trx); |
| 428 | trx_set_dict_operation(trx, TRX_DICT_OP_INDEX); |
| 429 | |
| 430 | if (table == NULL) { |
| 431 | table = dict_table_open_on_id_low( |
| 432 | table_id, DICT_ERR_IGNORE_NONE, FALSE); |
| 433 | } else { |
| 434 | ut_ad(table->id == table_id); |
| 435 | } |
| 436 | |
| 437 | if (table && table->get_ref_count() == ref_count && table->drop_aborted |
| 438 | && !UT_LIST_GET_FIRST(table->locks)) { |
| 439 | /* Silence a debug assertion in row_merge_drop_indexes(). */ |
| 440 | ut_d(table->acquire()); |
| 441 | row_merge_drop_indexes(trx, table, TRUE); |
| 442 | ut_d(table->release()); |
| 443 | ut_ad(table->get_ref_count() == ref_count); |
| 444 | trx_commit_for_mysql(trx); |
| 445 | } |
| 446 | |
| 447 | row_mysql_unlock_data_dictionary(trx); |
| 448 | trx_free(trx); |
| 449 | } |
| 450 | |
| 451 | /**********************************************************************//** |
| 452 | When opening a table, |
| 453 | try to drop any indexes after an aborted index creation. |
| 454 | Release the dict_sys->mutex. */ |
| 455 | static |
| 456 | void |
| 457 | dict_table_try_drop_aborted_and_mutex_exit( |
| 458 | /*=======================================*/ |
| 459 | dict_table_t* table, /*!< in: table (may be NULL) */ |
| 460 | ibool try_drop) /*!< in: FALSE if should try to |
| 461 | drop indexes whose online creation |
| 462 | was aborted */ |
| 463 | { |
| 464 | if (try_drop |
| 465 | && table != NULL |
| 466 | && table->drop_aborted |
| 467 | && table->get_ref_count() == 1 |
| 468 | && dict_table_get_first_index(table)) { |
| 469 | |
| 470 | /* Attempt to drop the indexes whose online creation |
| 471 | was aborted. */ |
| 472 | table_id_t table_id = table->id; |
| 473 | |
| 474 | mutex_exit(&dict_sys->mutex); |
| 475 | |
| 476 | dict_table_try_drop_aborted(table, table_id, 1); |
| 477 | } else { |
| 478 | mutex_exit(&dict_sys->mutex); |
| 479 | } |
| 480 | } |
| 481 | |
| 482 | /********************************************************************//** |
| 483 | Decrements the count of open handles to a table. */ |
| 484 | void |
| 485 | dict_table_close( |
| 486 | /*=============*/ |
| 487 | dict_table_t* table, /*!< in/out: table */ |
| 488 | ibool dict_locked, /*!< in: TRUE=data dictionary locked */ |
| 489 | ibool try_drop) /*!< in: TRUE=try to drop any orphan |
| 490 | indexes after an aborted online |
| 491 | index creation */ |
| 492 | { |
| 493 | if (!dict_locked) { |
| 494 | mutex_enter(&dict_sys->mutex); |
| 495 | } |
| 496 | |
| 497 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 498 | ut_a(table->get_ref_count() > 0); |
| 499 | |
| 500 | const bool last_handle = table->release(); |
| 501 | |
| 502 | /* Force persistent stats re-read upon next open of the table |
| 503 | so that FLUSH TABLE can be used to forcibly fetch stats from disk |
| 504 | if they have been manually modified. We reset table->stat_initialized |
| 505 | only if table reference count is 0 because we do not want too frequent |
| 506 | stats re-reads (e.g. in other cases than FLUSH TABLE). */ |
| 507 | if (last_handle && strchr(table->name.m_name, '/') != NULL |
| 508 | && dict_stats_is_persistent_enabled(table)) { |
| 509 | |
| 510 | dict_stats_deinit(table); |
| 511 | } |
| 512 | |
| 513 | MONITOR_DEC(MONITOR_TABLE_REFERENCE); |
| 514 | |
| 515 | ut_ad(dict_lru_validate()); |
| 516 | |
| 517 | #ifdef UNIV_DEBUG |
| 518 | if (table->can_be_evicted) { |
| 519 | ut_ad(dict_lru_find_table(table)); |
| 520 | } else { |
| 521 | ut_ad(dict_non_lru_find_table(table)); |
| 522 | } |
| 523 | #endif /* UNIV_DEBUG */ |
| 524 | |
| 525 | if (!dict_locked) { |
| 526 | table_id_t table_id = table->id; |
| 527 | const bool drop_aborted = last_handle && try_drop |
| 528 | && table->drop_aborted |
| 529 | && dict_table_get_first_index(table); |
| 530 | |
| 531 | mutex_exit(&dict_sys->mutex); |
| 532 | |
| 533 | if (drop_aborted) { |
| 534 | dict_table_try_drop_aborted(NULL, table_id, 0); |
| 535 | } |
| 536 | } |
| 537 | } |
| 538 | |
| 539 | /********************************************************************//** |
| 540 | Closes the only open handle to a table and drops a table while assuring |
| 541 | that dict_sys->mutex is held the whole time. This assures that the table |
| 542 | is not evicted after the close when the count of open handles goes to zero. |
| 543 | Because dict_sys->mutex is held, we do not need to call |
| 544 | dict_table_prevent_eviction(). */ |
| 545 | void |
| 546 | dict_table_close_and_drop( |
| 547 | /*======================*/ |
| 548 | trx_t* trx, /*!< in: data dictionary transaction */ |
| 549 | dict_table_t* table) /*!< in/out: table */ |
| 550 | { |
| 551 | dberr_t err = DB_SUCCESS; |
| 552 | |
| 553 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 554 | ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); |
| 555 | ut_ad(trx->dict_operation != TRX_DICT_OP_NONE); |
| 556 | ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE)); |
| 557 | |
| 558 | dict_table_close(table, TRUE, FALSE); |
| 559 | |
| 560 | #if defined UNIV_DEBUG || defined UNIV_DDL_DEBUG |
| 561 | /* Nobody should have initialized the stats of the newly created |
| 562 | table when this is called. So we know that it has not been added |
| 563 | for background stats gathering. */ |
| 564 | ut_a(!table->stat_initialized); |
| 565 | #endif /* UNIV_DEBUG || UNIV_DDL_DEBUG */ |
| 566 | |
| 567 | err = row_merge_drop_table(trx, table); |
| 568 | |
| 569 | if (err != DB_SUCCESS) { |
| 570 | ib::error() << "At " << __FILE__ << ":" << __LINE__ |
| 571 | << " row_merge_drop_table returned error: " << err |
| 572 | << " table: " << table->name.m_name; |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | /** Check if the table has a given (non_virtual) column. |
| 577 | @param[in] table table object |
| 578 | @param[in] col_name column name |
| 579 | @param[in] col_nr column number guessed, 0 as default |
| 580 | @return column number if the table has the specified column, |
| 581 | otherwise table->n_def */ |
| 582 | ulint |
| 583 | dict_table_has_column( |
| 584 | const dict_table_t* table, |
| 585 | const char* col_name, |
| 586 | ulint col_nr) |
| 587 | { |
| 588 | ulint col_max = table->n_def; |
| 589 | |
| 590 | ut_ad(table); |
| 591 | ut_ad(col_name); |
| 592 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 593 | |
| 594 | if (col_nr < col_max |
| 595 | && innobase_strcasecmp( |
| 596 | col_name, dict_table_get_col_name(table, col_nr)) == 0) { |
| 597 | return(col_nr); |
| 598 | } |
| 599 | |
| 600 | /** The order of column may changed, check it with other columns */ |
| 601 | for (ulint i = 0; i < col_max; i++) { |
| 602 | if (i != col_nr |
| 603 | && innobase_strcasecmp( |
| 604 | col_name, dict_table_get_col_name(table, i)) == 0) { |
| 605 | |
| 606 | return(i); |
| 607 | } |
| 608 | } |
| 609 | |
| 610 | return(col_max); |
| 611 | } |
| 612 | |
| 613 | /** Retrieve the column name. |
| 614 | @param[in] table table name */ |
| 615 | const char* dict_col_t::name(const dict_table_t& table) const |
| 616 | { |
| 617 | ut_ad(table.magic_n == DICT_TABLE_MAGIC_N); |
| 618 | |
| 619 | size_t col_nr; |
| 620 | const char *s; |
| 621 | |
| 622 | if (is_virtual()) { |
| 623 | col_nr = size_t(reinterpret_cast<const dict_v_col_t*>(this) |
| 624 | - table.v_cols); |
| 625 | ut_ad(col_nr < table.n_v_def); |
| 626 | s = table.v_col_names; |
| 627 | } else { |
| 628 | col_nr = size_t(this - table.cols); |
| 629 | ut_ad(col_nr < table.n_def); |
| 630 | s = table.col_names; |
| 631 | } |
| 632 | |
| 633 | if (s) { |
| 634 | for (size_t i = 0; i < col_nr; i++) { |
| 635 | s += strlen(s) + 1; |
| 636 | } |
| 637 | } |
| 638 | |
| 639 | return(s); |
| 640 | } |
| 641 | |
| 642 | /** Returns a virtual column's name. |
| 643 | @param[in] table target table |
| 644 | @param[in] col_nr virtual column number (nth virtual column) |
| 645 | @return column name or NULL if column number out of range. */ |
| 646 | const char* |
| 647 | dict_table_get_v_col_name( |
| 648 | const dict_table_t* table, |
| 649 | ulint col_nr) |
| 650 | { |
| 651 | const char* s; |
| 652 | |
| 653 | ut_ad(table); |
| 654 | ut_ad(col_nr < table->n_v_def); |
| 655 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 656 | |
| 657 | if (col_nr >= table->n_v_def) { |
| 658 | return(NULL); |
| 659 | } |
| 660 | |
| 661 | s = table->v_col_names; |
| 662 | |
| 663 | if (s != NULL) { |
| 664 | for (ulint i = 0; i < col_nr; i++) { |
| 665 | s += strlen(s) + 1; |
| 666 | } |
| 667 | } |
| 668 | |
| 669 | return(s); |
| 670 | } |
| 671 | |
| 672 | /** Search virtual column's position in InnoDB according to its position |
| 673 | in original table's position |
| 674 | @param[in] table target table |
| 675 | @param[in] col_nr column number (nth column in the MySQL table) |
| 676 | @return virtual column's position in InnoDB, ULINT_UNDEFINED if not find */ |
| 677 | static |
| 678 | ulint |
| 679 | dict_table_get_v_col_pos_for_mysql( |
| 680 | const dict_table_t* table, |
| 681 | ulint col_nr) |
| 682 | { |
| 683 | ulint i; |
| 684 | |
| 685 | ut_ad(table); |
| 686 | ut_ad(col_nr < static_cast<ulint>(table->n_t_def)); |
| 687 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 688 | |
| 689 | for (i = 0; i < table->n_v_def; i++) { |
| 690 | if (col_nr == dict_get_v_col_mysql_pos( |
| 691 | table->v_cols[i].m_col.ind)) { |
| 692 | break; |
| 693 | } |
| 694 | } |
| 695 | |
| 696 | if (i == table->n_v_def) { |
| 697 | return(ULINT_UNDEFINED); |
| 698 | } |
| 699 | |
| 700 | return(i); |
| 701 | } |
| 702 | |
| 703 | /** Returns a virtual column's name according to its original |
| 704 | MySQL table position. |
| 705 | @param[in] table target table |
| 706 | @param[in] col_nr column number (nth column in the table) |
| 707 | @return column name. */ |
| 708 | static |
| 709 | const char* |
| 710 | dict_table_get_v_col_name_mysql( |
| 711 | const dict_table_t* table, |
| 712 | ulint col_nr) |
| 713 | { |
| 714 | ulint i = dict_table_get_v_col_pos_for_mysql(table, col_nr); |
| 715 | |
| 716 | if (i == ULINT_UNDEFINED) { |
| 717 | return(NULL); |
| 718 | } |
| 719 | |
| 720 | return(dict_table_get_v_col_name(table, i)); |
| 721 | } |
| 722 | |
| 723 | /** Get nth virtual column according to its original MySQL table position |
| 724 | @param[in] table target table |
| 725 | @param[in] col_nr column number in MySQL Table definition |
| 726 | @return dict_v_col_t ptr */ |
| 727 | dict_v_col_t* |
| 728 | dict_table_get_nth_v_col_mysql( |
| 729 | const dict_table_t* table, |
| 730 | ulint col_nr) |
| 731 | { |
| 732 | ulint i = dict_table_get_v_col_pos_for_mysql(table, col_nr); |
| 733 | |
| 734 | if (i == ULINT_UNDEFINED) { |
| 735 | return(NULL); |
| 736 | } |
| 737 | |
| 738 | return(dict_table_get_nth_v_col(table, i)); |
| 739 | } |
| 740 | |
| 741 | /** Allocate and init the autoinc latch of a given table. |
| 742 | This function must not be called concurrently on the same table object. |
| 743 | @param[in,out] table_void table whose autoinc latch to create */ |
| 744 | static |
| 745 | void |
| 746 | dict_table_autoinc_alloc( |
| 747 | void* table_void) |
| 748 | { |
| 749 | dict_table_t* table = static_cast<dict_table_t*>(table_void); |
| 750 | table->autoinc_mutex = UT_NEW_NOKEY(ib_mutex_t()); |
| 751 | ut_a(table->autoinc_mutex != NULL); |
| 752 | mutex_create(LATCH_ID_AUTOINC, table->autoinc_mutex); |
| 753 | } |
| 754 | |
| 755 | /** Allocate and init the zip_pad_mutex of a given index. |
| 756 | This function must not be called concurrently on the same index object. |
| 757 | @param[in,out] index_void index whose zip_pad_mutex to create */ |
| 758 | static |
| 759 | void |
| 760 | dict_index_zip_pad_alloc( |
| 761 | void* index_void) |
| 762 | { |
| 763 | dict_index_t* index = static_cast<dict_index_t*>(index_void); |
| 764 | index->zip_pad.mutex = UT_NEW_NOKEY(SysMutex()); |
| 765 | ut_a(index->zip_pad.mutex != NULL); |
| 766 | mutex_create(LATCH_ID_ZIP_PAD_MUTEX, index->zip_pad.mutex); |
| 767 | } |
| 768 | |
| 769 | /********************************************************************//** |
| 770 | Acquire the autoinc lock. */ |
| 771 | void |
| 772 | dict_table_autoinc_lock( |
| 773 | /*====================*/ |
| 774 | dict_table_t* table) /*!< in/out: table */ |
| 775 | { |
| 776 | os_once::do_or_wait_for_done( |
| 777 | &table->autoinc_mutex_created, |
| 778 | dict_table_autoinc_alloc, table); |
| 779 | |
| 780 | mutex_enter(table->autoinc_mutex); |
| 781 | } |
| 782 | |
| 783 | /** Acquire the zip_pad_mutex latch. |
| 784 | @param[in,out] index the index whose zip_pad_mutex to acquire.*/ |
| 785 | static |
| 786 | void |
| 787 | dict_index_zip_pad_lock( |
| 788 | dict_index_t* index) |
| 789 | { |
| 790 | os_once::do_or_wait_for_done( |
| 791 | &index->zip_pad.mutex_created, |
| 792 | dict_index_zip_pad_alloc, index); |
| 793 | |
| 794 | mutex_enter(index->zip_pad.mutex); |
| 795 | } |
| 796 | |
| 797 | /** Get all the FTS indexes on a table. |
| 798 | @param[in] table table |
| 799 | @param[out] indexes all FTS indexes on this table |
| 800 | @return number of FTS indexes */ |
| 801 | ulint |
| 802 | dict_table_get_all_fts_indexes( |
| 803 | const dict_table_t* table, |
| 804 | ib_vector_t* indexes) |
| 805 | { |
| 806 | dict_index_t* index; |
| 807 | |
| 808 | ut_a(ib_vector_size(indexes) == 0); |
| 809 | |
| 810 | for (index = dict_table_get_first_index(table); |
| 811 | index; |
| 812 | index = dict_table_get_next_index(index)) { |
| 813 | |
| 814 | if (index->type == DICT_FTS) { |
| 815 | ib_vector_push(indexes, &index); |
| 816 | } |
| 817 | } |
| 818 | |
| 819 | return(ib_vector_size(indexes)); |
| 820 | } |
| 821 | |
| 822 | /********************************************************************//** |
| 823 | Release the autoinc lock. */ |
| 824 | void |
| 825 | dict_table_autoinc_unlock( |
| 826 | /*======================*/ |
| 827 | dict_table_t* table) /*!< in/out: table */ |
| 828 | { |
| 829 | mutex_exit(table->autoinc_mutex); |
| 830 | } |
| 831 | |
| 832 | /** Looks for column n in an index. |
| 833 | @param[in] index index |
| 834 | @param[in] n column number |
| 835 | @param[in] inc_prefix true=consider column prefixes too |
| 836 | @param[in] is_virtual true==virtual column |
| 837 | @param[out] prefix_col_pos col num if prefix |
| 838 | @return position in internal representation of the index; |
| 839 | ULINT_UNDEFINED if not contained */ |
| 840 | ulint |
| 841 | dict_index_get_nth_col_or_prefix_pos( |
| 842 | const dict_index_t* index, |
| 843 | ulint n, |
| 844 | bool inc_prefix, |
| 845 | bool is_virtual, |
| 846 | ulint* prefix_col_pos) |
| 847 | { |
| 848 | const dict_field_t* field; |
| 849 | const dict_col_t* col; |
| 850 | ulint pos; |
| 851 | ulint n_fields; |
| 852 | |
| 853 | ut_ad(index); |
| 854 | ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); |
| 855 | |
| 856 | if (prefix_col_pos) { |
| 857 | *prefix_col_pos = ULINT_UNDEFINED; |
| 858 | } |
| 859 | |
| 860 | if (is_virtual) { |
| 861 | col = &(dict_table_get_nth_v_col(index->table, n)->m_col); |
| 862 | } else { |
| 863 | col = dict_table_get_nth_col(index->table, n); |
| 864 | } |
| 865 | |
| 866 | if (dict_index_is_clust(index)) { |
| 867 | |
| 868 | return(dict_col_get_clust_pos(col, index)); |
| 869 | } |
| 870 | |
| 871 | n_fields = dict_index_get_n_fields(index); |
| 872 | |
| 873 | for (pos = 0; pos < n_fields; pos++) { |
| 874 | field = dict_index_get_nth_field(index, pos); |
| 875 | |
| 876 | if (col == field->col) { |
| 877 | if (prefix_col_pos) { |
| 878 | *prefix_col_pos = pos; |
| 879 | } |
| 880 | if (inc_prefix || field->prefix_len == 0) { |
| 881 | return(pos); |
| 882 | } |
| 883 | } |
| 884 | } |
| 885 | |
| 886 | return(ULINT_UNDEFINED); |
| 887 | } |
| 888 | |
| 889 | /** Returns TRUE if the index contains a column or a prefix of that column. |
| 890 | @param[in] index index |
| 891 | @param[in] n column number |
| 892 | @param[in] is_virtual whether it is a virtual col |
| 893 | @return TRUE if contains the column or its prefix */ |
| 894 | bool |
| 895 | dict_index_contains_col_or_prefix( |
| 896 | const dict_index_t* index, |
| 897 | ulint n, |
| 898 | bool is_virtual) |
| 899 | { |
| 900 | const dict_field_t* field; |
| 901 | const dict_col_t* col; |
| 902 | ulint pos; |
| 903 | ulint n_fields; |
| 904 | |
| 905 | ut_ad(index); |
| 906 | ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); |
| 907 | |
| 908 | if (dict_index_is_clust(index)) { |
| 909 | return(!is_virtual); |
| 910 | } |
| 911 | |
| 912 | if (is_virtual) { |
| 913 | col = &dict_table_get_nth_v_col(index->table, n)->m_col; |
| 914 | } else { |
| 915 | col = dict_table_get_nth_col(index->table, n); |
| 916 | } |
| 917 | |
| 918 | n_fields = dict_index_get_n_fields(index); |
| 919 | |
| 920 | for (pos = 0; pos < n_fields; pos++) { |
| 921 | field = dict_index_get_nth_field(index, pos); |
| 922 | |
| 923 | if (col == field->col) { |
| 924 | |
| 925 | return(true); |
| 926 | } |
| 927 | } |
| 928 | |
| 929 | return(false); |
| 930 | } |
| 931 | |
| 932 | /********************************************************************//** |
| 933 | Looks for a matching field in an index. The column has to be the same. The |
| 934 | column in index must be complete, or must contain a prefix longer than the |
| 935 | column in index2. That is, we must be able to construct the prefix in index2 |
| 936 | from the prefix in index. |
| 937 | @return position in internal representation of the index; |
| 938 | ULINT_UNDEFINED if not contained */ |
| 939 | ulint |
| 940 | dict_index_get_nth_field_pos( |
| 941 | /*=========================*/ |
| 942 | const dict_index_t* index, /*!< in: index from which to search */ |
| 943 | const dict_index_t* index2, /*!< in: index */ |
| 944 | ulint n) /*!< in: field number in index2 */ |
| 945 | { |
| 946 | const dict_field_t* field; |
| 947 | const dict_field_t* field2; |
| 948 | ulint n_fields; |
| 949 | ulint pos; |
| 950 | |
| 951 | ut_ad(index); |
| 952 | ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); |
| 953 | |
| 954 | field2 = dict_index_get_nth_field(index2, n); |
| 955 | |
| 956 | n_fields = dict_index_get_n_fields(index); |
| 957 | |
| 958 | /* Are we looking for a MBR (Minimum Bound Box) field of |
| 959 | a spatial index */ |
| 960 | bool is_mbr_fld = (n == 0 && dict_index_is_spatial(index2)); |
| 961 | |
| 962 | for (pos = 0; pos < n_fields; pos++) { |
| 963 | field = dict_index_get_nth_field(index, pos); |
| 964 | |
| 965 | /* The first field of a spatial index is a transformed |
| 966 | MBR (Minimum Bound Box) field made out of original column, |
| 967 | so its field->col still points to original cluster index |
| 968 | col, but the actual content is different. So we cannot |
| 969 | consider them equal if neither of them is MBR field */ |
| 970 | if (pos == 0 && dict_index_is_spatial(index) && !is_mbr_fld) { |
| 971 | continue; |
| 972 | } |
| 973 | |
| 974 | if (field->col == field2->col |
| 975 | && (field->prefix_len == 0 |
| 976 | || (field->prefix_len >= field2->prefix_len |
| 977 | && field2->prefix_len != 0))) { |
| 978 | |
| 979 | return(pos); |
| 980 | } |
| 981 | } |
| 982 | |
| 983 | return(ULINT_UNDEFINED); |
| 984 | } |
| 985 | |
| 986 | /**********************************************************************//** |
| 987 | Returns a table object based on table id. |
| 988 | @return table, NULL if does not exist */ |
| 989 | dict_table_t* |
| 990 | dict_table_open_on_id( |
| 991 | /*==================*/ |
| 992 | table_id_t table_id, /*!< in: table id */ |
| 993 | ibool dict_locked, /*!< in: TRUE=data dictionary locked */ |
| 994 | dict_table_op_t table_op) /*!< in: operation to perform */ |
| 995 | { |
| 996 | dict_table_t* table; |
| 997 | |
| 998 | if (!dict_locked) { |
| 999 | mutex_enter(&dict_sys->mutex); |
| 1000 | } |
| 1001 | |
| 1002 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1003 | |
| 1004 | table = dict_table_open_on_id_low( |
| 1005 | table_id, |
| 1006 | table_op == DICT_TABLE_OP_LOAD_TABLESPACE |
| 1007 | ? DICT_ERR_IGNORE_RECOVER_LOCK |
| 1008 | : DICT_ERR_IGNORE_NONE, |
| 1009 | table_op == DICT_TABLE_OP_OPEN_ONLY_IF_CACHED); |
| 1010 | |
| 1011 | if (table != NULL) { |
| 1012 | |
| 1013 | if (table->can_be_evicted) { |
| 1014 | dict_move_to_mru(table); |
| 1015 | } |
| 1016 | |
| 1017 | table->acquire(); |
| 1018 | |
| 1019 | MONITOR_INC(MONITOR_TABLE_REFERENCE); |
| 1020 | } |
| 1021 | |
| 1022 | if (!dict_locked) { |
| 1023 | dict_table_try_drop_aborted_and_mutex_exit( |
| 1024 | table, table_op == DICT_TABLE_OP_DROP_ORPHAN); |
| 1025 | } |
| 1026 | |
| 1027 | return(table); |
| 1028 | } |
| 1029 | |
| 1030 | /********************************************************************//** |
| 1031 | Looks for column n position in the clustered index. |
| 1032 | @return position in internal representation of the clustered index */ |
| 1033 | ulint |
| 1034 | dict_table_get_nth_col_pos( |
| 1035 | /*=======================*/ |
| 1036 | const dict_table_t* table, /*!< in: table */ |
| 1037 | ulint n, /*!< in: column number */ |
| 1038 | ulint* prefix_col_pos) |
| 1039 | { |
| 1040 | return(dict_index_get_nth_col_pos(dict_table_get_first_index(table), |
| 1041 | n, prefix_col_pos)); |
| 1042 | } |
| 1043 | |
| 1044 | /********************************************************************//** |
| 1045 | Checks if a column is in the ordering columns of the clustered index of a |
| 1046 | table. Column prefixes are treated like whole columns. |
| 1047 | @return TRUE if the column, or its prefix, is in the clustered key */ |
| 1048 | ibool |
| 1049 | dict_table_col_in_clustered_key( |
| 1050 | /*============================*/ |
| 1051 | const dict_table_t* table, /*!< in: table */ |
| 1052 | ulint n) /*!< in: column number */ |
| 1053 | { |
| 1054 | const dict_index_t* index; |
| 1055 | const dict_field_t* field; |
| 1056 | const dict_col_t* col; |
| 1057 | ulint pos; |
| 1058 | ulint n_fields; |
| 1059 | |
| 1060 | ut_ad(table); |
| 1061 | |
| 1062 | col = dict_table_get_nth_col(table, n); |
| 1063 | |
| 1064 | index = dict_table_get_first_index(table); |
| 1065 | |
| 1066 | n_fields = dict_index_get_n_unique(index); |
| 1067 | |
| 1068 | for (pos = 0; pos < n_fields; pos++) { |
| 1069 | field = dict_index_get_nth_field(index, pos); |
| 1070 | |
| 1071 | if (col == field->col) { |
| 1072 | |
| 1073 | return(TRUE); |
| 1074 | } |
| 1075 | } |
| 1076 | |
| 1077 | return(FALSE); |
| 1078 | } |
| 1079 | |
| 1080 | /**********************************************************************//** |
| 1081 | Inits the data dictionary module. */ |
| 1082 | void |
| 1083 | dict_init(void) |
| 1084 | /*===========*/ |
| 1085 | { |
| 1086 | dict_operation_lock = static_cast<rw_lock_t*>( |
| 1087 | ut_zalloc_nokey(sizeof(*dict_operation_lock))); |
| 1088 | |
| 1089 | dict_sys = static_cast<dict_sys_t*>(ut_zalloc_nokey(sizeof(*dict_sys))); |
| 1090 | |
| 1091 | UT_LIST_INIT(dict_sys->table_LRU, &dict_table_t::table_LRU); |
| 1092 | UT_LIST_INIT(dict_sys->table_non_LRU, &dict_table_t::table_LRU); |
| 1093 | |
| 1094 | mutex_create(LATCH_ID_DICT_SYS, &dict_sys->mutex); |
| 1095 | |
| 1096 | dict_sys->table_hash = hash_create( |
| 1097 | buf_pool_get_curr_size() |
| 1098 | / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE)); |
| 1099 | |
| 1100 | dict_sys->table_id_hash = hash_create( |
| 1101 | buf_pool_get_curr_size() |
| 1102 | / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE)); |
| 1103 | |
| 1104 | rw_lock_create(dict_operation_lock_key, |
| 1105 | dict_operation_lock, SYNC_DICT_OPERATION); |
| 1106 | |
| 1107 | if (!srv_read_only_mode) { |
| 1108 | dict_foreign_err_file = os_file_create_tmpfile(); |
| 1109 | ut_a(dict_foreign_err_file); |
| 1110 | } |
| 1111 | |
| 1112 | mutex_create(LATCH_ID_DICT_FOREIGN_ERR, &dict_foreign_err_mutex); |
| 1113 | } |
| 1114 | |
| 1115 | /**********************************************************************//** |
| 1116 | Move to the most recently used segment of the LRU list. */ |
| 1117 | void |
| 1118 | dict_move_to_mru( |
| 1119 | /*=============*/ |
| 1120 | dict_table_t* table) /*!< in: table to move to MRU */ |
| 1121 | { |
| 1122 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1123 | ut_ad(dict_lru_validate()); |
| 1124 | ut_ad(dict_lru_find_table(table)); |
| 1125 | |
| 1126 | ut_a(table->can_be_evicted); |
| 1127 | |
| 1128 | UT_LIST_REMOVE(dict_sys->table_LRU, table); |
| 1129 | |
| 1130 | UT_LIST_ADD_FIRST(dict_sys->table_LRU, table); |
| 1131 | |
| 1132 | ut_ad(dict_lru_validate()); |
| 1133 | } |
| 1134 | |
| 1135 | /**********************************************************************//** |
| 1136 | Returns a table object and increment its open handle count. |
| 1137 | NOTE! This is a high-level function to be used mainly from outside the |
| 1138 | 'dict' module. Inside this directory dict_table_get_low |
| 1139 | is usually the appropriate function. |
| 1140 | @return table, NULL if does not exist */ |
| 1141 | dict_table_t* |
| 1142 | dict_table_open_on_name( |
| 1143 | /*====================*/ |
| 1144 | const char* table_name, /*!< in: table name */ |
| 1145 | ibool dict_locked, /*!< in: TRUE=data dictionary locked */ |
| 1146 | ibool try_drop, /*!< in: TRUE=try to drop any orphan |
| 1147 | indexes after an aborted online |
| 1148 | index creation */ |
| 1149 | dict_err_ignore_t |
| 1150 | ignore_err) /*!< in: error to be ignored when |
| 1151 | loading a table definition */ |
| 1152 | { |
| 1153 | dict_table_t* table; |
| 1154 | DBUG_ENTER("dict_table_open_on_name" ); |
| 1155 | DBUG_PRINT("dict_table_open_on_name" , ("table: '%s'" , table_name)); |
| 1156 | |
| 1157 | if (!dict_locked) { |
| 1158 | mutex_enter(&dict_sys->mutex); |
| 1159 | } |
| 1160 | |
| 1161 | ut_ad(table_name); |
| 1162 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1163 | |
| 1164 | table = dict_table_check_if_in_cache_low(table_name); |
| 1165 | |
| 1166 | if (table == NULL) { |
| 1167 | table = dict_load_table(table_name, true, ignore_err); |
| 1168 | } |
| 1169 | |
| 1170 | ut_ad(!table || table->cached); |
| 1171 | |
| 1172 | if (table != NULL) { |
| 1173 | |
| 1174 | /* If table is encrypted or corrupted */ |
| 1175 | if (ignore_err == DICT_ERR_IGNORE_NONE |
| 1176 | && !table->is_readable()) { |
| 1177 | /* Make life easy for drop table. */ |
| 1178 | dict_table_prevent_eviction(table); |
| 1179 | |
| 1180 | if (table->corrupted) { |
| 1181 | |
| 1182 | ib::error() << "Table " << table->name |
| 1183 | << " is corrupted. Please " |
| 1184 | "drop the table and recreate." ; |
| 1185 | if (!dict_locked) { |
| 1186 | mutex_exit(&dict_sys->mutex); |
| 1187 | } |
| 1188 | |
| 1189 | DBUG_RETURN(NULL); |
| 1190 | } |
| 1191 | |
| 1192 | if (table->can_be_evicted) { |
| 1193 | dict_move_to_mru(table); |
| 1194 | } |
| 1195 | |
| 1196 | table->acquire(); |
| 1197 | |
| 1198 | if (!dict_locked) { |
| 1199 | mutex_exit(&dict_sys->mutex); |
| 1200 | } |
| 1201 | |
| 1202 | DBUG_RETURN(table); |
| 1203 | } |
| 1204 | |
| 1205 | if (table->can_be_evicted) { |
| 1206 | dict_move_to_mru(table); |
| 1207 | } |
| 1208 | |
| 1209 | table->acquire(); |
| 1210 | |
| 1211 | MONITOR_INC(MONITOR_TABLE_REFERENCE); |
| 1212 | } |
| 1213 | |
| 1214 | ut_ad(dict_lru_validate()); |
| 1215 | |
| 1216 | if (!dict_locked) { |
| 1217 | dict_table_try_drop_aborted_and_mutex_exit(table, try_drop); |
| 1218 | } |
| 1219 | |
| 1220 | DBUG_RETURN(table); |
| 1221 | } |
| 1222 | |
| 1223 | /**********************************************************************//** |
| 1224 | Adds system columns to a table object. */ |
| 1225 | void |
| 1226 | dict_table_add_system_columns( |
| 1227 | /*==========================*/ |
| 1228 | dict_table_t* table, /*!< in/out: table */ |
| 1229 | mem_heap_t* heap) /*!< in: temporary heap */ |
| 1230 | { |
| 1231 | ut_ad(table); |
| 1232 | ut_ad(table->n_def == (table->n_cols - DATA_N_SYS_COLS)); |
| 1233 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 1234 | ut_ad(!table->cached); |
| 1235 | |
| 1236 | /* NOTE: the system columns MUST be added in the following order |
| 1237 | (so that they can be indexed by the numerical value of DATA_ROW_ID, |
| 1238 | etc.) and as the last columns of the table memory object. |
| 1239 | The clustered index will not always physically contain all system |
| 1240 | columns. */ |
| 1241 | |
| 1242 | dict_mem_table_add_col(table, heap, "DB_ROW_ID" , DATA_SYS, |
| 1243 | DATA_ROW_ID | DATA_NOT_NULL, |
| 1244 | DATA_ROW_ID_LEN); |
| 1245 | |
| 1246 | compile_time_assert(DATA_ROW_ID == 0); |
| 1247 | dict_mem_table_add_col(table, heap, "DB_TRX_ID" , DATA_SYS, |
| 1248 | DATA_TRX_ID | DATA_NOT_NULL, |
| 1249 | DATA_TRX_ID_LEN); |
| 1250 | compile_time_assert(DATA_TRX_ID == 1); |
| 1251 | dict_mem_table_add_col(table, heap, "DB_ROLL_PTR" , DATA_SYS, |
| 1252 | DATA_ROLL_PTR | DATA_NOT_NULL, |
| 1253 | DATA_ROLL_PTR_LEN); |
| 1254 | compile_time_assert(DATA_ROLL_PTR == 2); |
| 1255 | |
| 1256 | /* This check reminds that if a new system column is added to |
| 1257 | the program, it should be dealt with here */ |
| 1258 | compile_time_assert(DATA_N_SYS_COLS == 3); |
| 1259 | } |
| 1260 | |
| 1261 | /** Add the table definition to the data dictionary cache */ |
| 1262 | void |
| 1263 | dict_table_t::add_to_cache() |
| 1264 | { |
| 1265 | ut_ad(dict_lru_validate()); |
| 1266 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1267 | |
| 1268 | cached = TRUE; |
| 1269 | |
| 1270 | ulint fold = ut_fold_string(name.m_name); |
| 1271 | ulint id_fold = ut_fold_ull(id); |
| 1272 | |
| 1273 | /* Look for a table with the same name: error if such exists */ |
| 1274 | { |
| 1275 | dict_table_t* table2; |
| 1276 | HASH_SEARCH(name_hash, dict_sys->table_hash, fold, |
| 1277 | dict_table_t*, table2, ut_ad(table2->cached), |
| 1278 | !strcmp(table2->name.m_name, name.m_name)); |
| 1279 | ut_a(table2 == NULL); |
| 1280 | |
| 1281 | #ifdef UNIV_DEBUG |
| 1282 | /* Look for the same table pointer with a different name */ |
| 1283 | HASH_SEARCH_ALL(name_hash, dict_sys->table_hash, |
| 1284 | dict_table_t*, table2, ut_ad(table2->cached), |
| 1285 | table2 == this); |
| 1286 | ut_ad(table2 == NULL); |
| 1287 | #endif /* UNIV_DEBUG */ |
| 1288 | } |
| 1289 | |
| 1290 | /* Look for a table with the same id: error if such exists */ |
| 1291 | { |
| 1292 | dict_table_t* table2; |
| 1293 | HASH_SEARCH(id_hash, dict_sys->table_id_hash, id_fold, |
| 1294 | dict_table_t*, table2, ut_ad(table2->cached), |
| 1295 | table2->id == id); |
| 1296 | ut_a(table2 == NULL); |
| 1297 | |
| 1298 | #ifdef UNIV_DEBUG |
| 1299 | /* Look for the same table pointer with a different id */ |
| 1300 | HASH_SEARCH_ALL(id_hash, dict_sys->table_id_hash, |
| 1301 | dict_table_t*, table2, ut_ad(table2->cached), |
| 1302 | table2 == this); |
| 1303 | ut_ad(table2 == NULL); |
| 1304 | #endif /* UNIV_DEBUG */ |
| 1305 | } |
| 1306 | |
| 1307 | /* Add table to hash table of tables */ |
| 1308 | HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, fold, |
| 1309 | this); |
| 1310 | |
| 1311 | /* Add table to hash table of tables based on table id */ |
| 1312 | HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash, id_fold, |
| 1313 | this); |
| 1314 | |
| 1315 | if (can_be_evicted) { |
| 1316 | UT_LIST_ADD_FIRST(dict_sys->table_LRU, this); |
| 1317 | } else { |
| 1318 | UT_LIST_ADD_FIRST(dict_sys->table_non_LRU, this); |
| 1319 | } |
| 1320 | |
| 1321 | ut_ad(dict_lru_validate()); |
| 1322 | } |
| 1323 | |
| 1324 | /**********************************************************************//** |
| 1325 | Test whether a table can be evicted from the LRU cache. |
| 1326 | @return TRUE if table can be evicted. */ |
| 1327 | static |
| 1328 | ibool |
| 1329 | dict_table_can_be_evicted( |
| 1330 | /*======================*/ |
| 1331 | const dict_table_t* table) /*!< in: table to test */ |
| 1332 | { |
| 1333 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1334 | ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); |
| 1335 | |
| 1336 | ut_a(table->can_be_evicted); |
| 1337 | ut_a(table->foreign_set.empty()); |
| 1338 | ut_a(table->referenced_set.empty()); |
| 1339 | |
| 1340 | if (table->get_ref_count() == 0) { |
| 1341 | /* The transaction commit and rollback are called from |
| 1342 | outside the handler interface. This means that there is |
| 1343 | a window where the table->n_ref_count can be zero but |
| 1344 | the table instance is in "use". */ |
| 1345 | |
| 1346 | if (lock_table_has_locks(table)) { |
| 1347 | return(FALSE); |
| 1348 | } |
| 1349 | |
| 1350 | #ifdef BTR_CUR_HASH_ADAPT |
| 1351 | for (dict_index_t* index = dict_table_get_first_index(table); |
| 1352 | index != NULL; |
| 1353 | index = dict_table_get_next_index(index)) { |
| 1354 | |
| 1355 | btr_search_t* info = btr_search_get_info(index); |
| 1356 | |
| 1357 | /* We are not allowed to free the in-memory index |
| 1358 | struct dict_index_t until all entries in the adaptive |
| 1359 | hash index that point to any of the page belonging to |
| 1360 | his b-tree index are dropped. This is so because |
| 1361 | dropping of these entries require access to |
| 1362 | dict_index_t struct. To avoid such scenario we keep |
| 1363 | a count of number of such pages in the search_info and |
| 1364 | only free the dict_index_t struct when this count |
| 1365 | drops to zero. |
| 1366 | |
| 1367 | See also: dict_index_remove_from_cache_low() */ |
| 1368 | |
| 1369 | if (btr_search_info_get_ref_count(info, index) > 0) { |
| 1370 | return(FALSE); |
| 1371 | } |
| 1372 | } |
| 1373 | #endif /* BTR_CUR_HASH_ADAPT */ |
| 1374 | |
| 1375 | return(TRUE); |
| 1376 | } |
| 1377 | |
| 1378 | return(FALSE); |
| 1379 | } |
| 1380 | |
| 1381 | /**********************************************************************//** |
| 1382 | Make room in the table cache by evicting an unused table. The unused table |
| 1383 | should not be part of FK relationship and currently not used in any user |
| 1384 | transaction. There is no guarantee that it will remove a table. |
| 1385 | @return number of tables evicted. If the number of tables in the dict_LRU |
| 1386 | is less than max_tables it will not do anything. */ |
| 1387 | ulint |
| 1388 | dict_make_room_in_cache( |
| 1389 | /*====================*/ |
| 1390 | ulint max_tables, /*!< in: max tables allowed in cache */ |
| 1391 | ulint pct_check) /*!< in: max percent to check */ |
| 1392 | { |
| 1393 | ulint i; |
| 1394 | ulint len; |
| 1395 | dict_table_t* table; |
| 1396 | ulint check_up_to; |
| 1397 | ulint n_evicted = 0; |
| 1398 | |
| 1399 | ut_a(pct_check > 0); |
| 1400 | ut_a(pct_check <= 100); |
| 1401 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1402 | ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); |
| 1403 | ut_ad(dict_lru_validate()); |
| 1404 | |
| 1405 | i = len = UT_LIST_GET_LEN(dict_sys->table_LRU); |
| 1406 | |
| 1407 | if (len < max_tables) { |
| 1408 | return(0); |
| 1409 | } |
| 1410 | |
| 1411 | check_up_to = len - ((len * pct_check) / 100); |
| 1412 | |
| 1413 | /* Check for overflow */ |
| 1414 | ut_a(i == 0 || check_up_to <= i); |
| 1415 | |
| 1416 | /* Find a suitable candidate to evict from the cache. Don't scan the |
| 1417 | entire LRU list. Only scan pct_check list entries. */ |
| 1418 | |
| 1419 | for (table = UT_LIST_GET_LAST(dict_sys->table_LRU); |
| 1420 | table != NULL |
| 1421 | && i > check_up_to |
| 1422 | && (len - n_evicted) > max_tables; |
| 1423 | --i) { |
| 1424 | |
| 1425 | dict_table_t* prev_table; |
| 1426 | |
| 1427 | prev_table = UT_LIST_GET_PREV(table_LRU, table); |
| 1428 | |
| 1429 | if (dict_table_can_be_evicted(table)) { |
| 1430 | |
| 1431 | DBUG_EXECUTE_IF("crash_if_fts_table_is_evicted" , |
| 1432 | { |
| 1433 | if (table->fts && |
| 1434 | dict_table_has_fts_index(table)) { |
| 1435 | ut_ad(0); |
| 1436 | } |
| 1437 | };); |
| 1438 | dict_table_remove_from_cache_low(table, TRUE); |
| 1439 | |
| 1440 | ++n_evicted; |
| 1441 | } |
| 1442 | |
| 1443 | table = prev_table; |
| 1444 | } |
| 1445 | |
| 1446 | return(n_evicted); |
| 1447 | } |
| 1448 | |
| 1449 | /**********************************************************************//** |
| 1450 | Move a table to the non-LRU list from the LRU list. */ |
| 1451 | void |
| 1452 | dict_table_move_from_lru_to_non_lru( |
| 1453 | /*================================*/ |
| 1454 | dict_table_t* table) /*!< in: table to move from LRU to non-LRU */ |
| 1455 | { |
| 1456 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1457 | ut_ad(dict_lru_find_table(table)); |
| 1458 | |
| 1459 | ut_a(table->can_be_evicted); |
| 1460 | |
| 1461 | UT_LIST_REMOVE(dict_sys->table_LRU, table); |
| 1462 | |
| 1463 | UT_LIST_ADD_LAST(dict_sys->table_non_LRU, table); |
| 1464 | |
| 1465 | table->can_be_evicted = FALSE; |
| 1466 | } |
| 1467 | |
| 1468 | /** Looks for an index with the given id given a table instance. |
| 1469 | @param[in] table table instance |
| 1470 | @param[in] id index id |
| 1471 | @return index or NULL */ |
| 1472 | dict_index_t* |
| 1473 | dict_table_find_index_on_id( |
| 1474 | const dict_table_t* table, |
| 1475 | index_id_t id) |
| 1476 | { |
| 1477 | dict_index_t* index; |
| 1478 | |
| 1479 | for (index = dict_table_get_first_index(table); |
| 1480 | index != NULL; |
| 1481 | index = dict_table_get_next_index(index)) { |
| 1482 | |
| 1483 | if (id == index->id) { |
| 1484 | /* Found */ |
| 1485 | |
| 1486 | return(index); |
| 1487 | } |
| 1488 | } |
| 1489 | |
| 1490 | return(NULL); |
| 1491 | } |
| 1492 | |
| 1493 | /**********************************************************************//** |
| 1494 | Looks for an index with the given id. NOTE that we do not reserve |
| 1495 | the dictionary mutex: this function is for emergency purposes like |
| 1496 | printing info of a corrupt database page! |
| 1497 | @return index or NULL if not found in cache */ |
| 1498 | dict_index_t* |
| 1499 | dict_index_find_on_id_low( |
| 1500 | /*======================*/ |
| 1501 | index_id_t id) /*!< in: index id */ |
| 1502 | { |
| 1503 | dict_table_t* table; |
| 1504 | |
| 1505 | /* This can happen if the system tablespace is the wrong page size */ |
| 1506 | if (dict_sys == NULL) { |
| 1507 | return(NULL); |
| 1508 | } |
| 1509 | |
| 1510 | for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); |
| 1511 | table != NULL; |
| 1512 | table = UT_LIST_GET_NEXT(table_LRU, table)) { |
| 1513 | |
| 1514 | dict_index_t* index = dict_table_find_index_on_id(table, id); |
| 1515 | |
| 1516 | if (index != NULL) { |
| 1517 | return(index); |
| 1518 | } |
| 1519 | } |
| 1520 | |
| 1521 | for (table = UT_LIST_GET_FIRST(dict_sys->table_non_LRU); |
| 1522 | table != NULL; |
| 1523 | table = UT_LIST_GET_NEXT(table_LRU, table)) { |
| 1524 | |
| 1525 | dict_index_t* index = dict_table_find_index_on_id(table, id); |
| 1526 | |
| 1527 | if (index != NULL) { |
| 1528 | return(index); |
| 1529 | } |
| 1530 | } |
| 1531 | |
| 1532 | return(NULL); |
| 1533 | } |
| 1534 | |
| 1535 | /** Function object to remove a foreign key constraint from the |
| 1536 | referenced_set of the referenced table. The foreign key object is |
| 1537 | also removed from the dictionary cache. The foreign key constraint |
| 1538 | is not removed from the foreign_set of the table containing the |
| 1539 | constraint. */ |
| 1540 | struct dict_foreign_remove_partial |
| 1541 | { |
| 1542 | void operator()(dict_foreign_t* foreign) { |
| 1543 | dict_table_t* table = foreign->referenced_table; |
| 1544 | if (table != NULL) { |
| 1545 | table->referenced_set.erase(foreign); |
| 1546 | } |
| 1547 | dict_foreign_free(foreign); |
| 1548 | } |
| 1549 | }; |
| 1550 | |
| 1551 | /**********************************************************************//** |
| 1552 | Renames a table object. |
| 1553 | @return TRUE if success */ |
| 1554 | dberr_t |
| 1555 | dict_table_rename_in_cache( |
| 1556 | /*=======================*/ |
| 1557 | dict_table_t* table, /*!< in/out: table */ |
| 1558 | const char* new_name, /*!< in: new name */ |
| 1559 | ibool rename_also_foreigns)/*!< in: in ALTER TABLE we want |
| 1560 | to preserve the original table name |
| 1561 | in constraints which reference it */ |
| 1562 | { |
| 1563 | dberr_t err; |
| 1564 | dict_foreign_t* foreign; |
| 1565 | ulint fold; |
| 1566 | char old_name[MAX_FULL_NAME_LEN + 1]; |
| 1567 | os_file_type_t ftype; |
| 1568 | |
| 1569 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1570 | |
| 1571 | /* store the old/current name to an automatic variable */ |
| 1572 | if (strlen(table->name.m_name) + 1 <= sizeof(old_name)) { |
| 1573 | strcpy(old_name, table->name.m_name); |
| 1574 | } else { |
| 1575 | ib::fatal() << "Too long table name: " |
| 1576 | << table->name |
| 1577 | << ", max length is " << MAX_FULL_NAME_LEN; |
| 1578 | } |
| 1579 | |
| 1580 | fold = ut_fold_string(new_name); |
| 1581 | |
| 1582 | /* Look for a table with the same name: error if such exists */ |
| 1583 | dict_table_t* table2; |
| 1584 | HASH_SEARCH(name_hash, dict_sys->table_hash, fold, |
| 1585 | dict_table_t*, table2, ut_ad(table2->cached), |
| 1586 | (ut_strcmp(table2->name.m_name, new_name) == 0)); |
| 1587 | DBUG_EXECUTE_IF("dict_table_rename_in_cache_failure" , |
| 1588 | if (table2 == NULL) { |
| 1589 | table2 = (dict_table_t*) -1; |
| 1590 | } ); |
| 1591 | if (table2) { |
| 1592 | ib::error() << "Cannot rename table '" << old_name |
| 1593 | << "' to '" << new_name << "' since the" |
| 1594 | " dictionary cache already contains '" << new_name << "'." ; |
| 1595 | return(DB_ERROR); |
| 1596 | } |
| 1597 | |
| 1598 | /* If the table is stored in a single-table tablespace, rename the |
| 1599 | .ibd file and rebuild the .isl file if needed. */ |
| 1600 | |
| 1601 | if (!table->space) { |
| 1602 | bool exists; |
| 1603 | char* filepath; |
| 1604 | |
| 1605 | ut_ad(dict_table_is_file_per_table(table)); |
| 1606 | ut_ad(!table->is_temporary()); |
| 1607 | |
| 1608 | /* Make sure the data_dir_path is set. */ |
| 1609 | dict_get_and_save_data_dir_path(table, true); |
| 1610 | |
| 1611 | if (DICT_TF_HAS_DATA_DIR(table->flags)) { |
| 1612 | ut_a(table->data_dir_path); |
| 1613 | |
| 1614 | filepath = fil_make_filepath( |
| 1615 | table->data_dir_path, table->name.m_name, |
| 1616 | IBD, true); |
| 1617 | } else { |
| 1618 | filepath = fil_make_filepath( |
| 1619 | NULL, table->name.m_name, IBD, false); |
| 1620 | } |
| 1621 | |
| 1622 | if (filepath == NULL) { |
| 1623 | return(DB_OUT_OF_MEMORY); |
| 1624 | } |
| 1625 | |
| 1626 | fil_delete_tablespace(table->space->id |
| 1627 | #ifdef BTR_CUR_HASH_ADAPT |
| 1628 | , true |
| 1629 | #endif /* BTR_CUR_HASH_ADAPT */ |
| 1630 | ); |
| 1631 | |
| 1632 | /* Delete any temp file hanging around. */ |
| 1633 | if (os_file_status(filepath, &exists, &ftype) |
| 1634 | && exists |
| 1635 | && !os_file_delete_if_exists(innodb_temp_file_key, |
| 1636 | filepath, NULL)) { |
| 1637 | |
| 1638 | ib::info() << "Delete of " << filepath << " failed." ; |
| 1639 | } |
| 1640 | ut_free(filepath); |
| 1641 | |
| 1642 | } else if (dict_table_is_file_per_table(table)) { |
| 1643 | char* new_path; |
| 1644 | const char* old_path = UT_LIST_GET_FIRST(table->space->chain) |
| 1645 | ->name; |
| 1646 | |
| 1647 | ut_ad(!table->is_temporary()); |
| 1648 | |
| 1649 | if (DICT_TF_HAS_DATA_DIR(table->flags)) { |
| 1650 | new_path = os_file_make_new_pathname( |
| 1651 | old_path, new_name); |
| 1652 | err = RemoteDatafile::create_link_file( |
| 1653 | new_name, new_path); |
| 1654 | |
| 1655 | if (err != DB_SUCCESS) { |
| 1656 | ut_free(new_path); |
| 1657 | return(DB_TABLESPACE_EXISTS); |
| 1658 | } |
| 1659 | } else { |
| 1660 | new_path = fil_make_filepath( |
| 1661 | NULL, new_name, IBD, false); |
| 1662 | } |
| 1663 | |
| 1664 | /* New filepath must not exist. */ |
| 1665 | err = table->space->rename(new_name, new_path, true); |
| 1666 | ut_free(new_path); |
| 1667 | |
| 1668 | /* If the tablespace is remote, a new .isl file was created |
| 1669 | If success, delete the old one. If not, delete the new one. */ |
| 1670 | if (DICT_TF_HAS_DATA_DIR(table->flags)) { |
| 1671 | RemoteDatafile::delete_link_file( |
| 1672 | err == DB_SUCCESS ? old_name : new_name); |
| 1673 | } |
| 1674 | |
| 1675 | if (err != DB_SUCCESS) { |
| 1676 | return err; |
| 1677 | } |
| 1678 | } |
| 1679 | |
| 1680 | /* Remove table from the hash tables of tables */ |
| 1681 | HASH_DELETE(dict_table_t, name_hash, dict_sys->table_hash, |
| 1682 | ut_fold_string(old_name), table); |
| 1683 | |
| 1684 | if (strlen(new_name) > strlen(table->name.m_name)) { |
| 1685 | /* We allocate MAX_FULL_NAME_LEN + 1 bytes here to avoid |
| 1686 | memory fragmentation, we assume a repeated calls of |
| 1687 | ut_realloc() with the same size do not cause fragmentation */ |
| 1688 | ut_a(strlen(new_name) <= MAX_FULL_NAME_LEN); |
| 1689 | |
| 1690 | table->name.m_name = static_cast<char*>( |
| 1691 | ut_realloc(table->name.m_name, MAX_FULL_NAME_LEN + 1)); |
| 1692 | } |
| 1693 | strcpy(table->name.m_name, new_name); |
| 1694 | |
| 1695 | /* Add table to hash table of tables */ |
| 1696 | HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, fold, |
| 1697 | table); |
| 1698 | |
| 1699 | if (!rename_also_foreigns) { |
| 1700 | /* In ALTER TABLE we think of the rename table operation |
| 1701 | in the direction table -> temporary table (#sql...) |
| 1702 | as dropping the table with the old name and creating |
| 1703 | a new with the new name. Thus we kind of drop the |
| 1704 | constraints from the dictionary cache here. The foreign key |
| 1705 | constraints will be inherited to the new table from the |
| 1706 | system tables through a call of dict_load_foreigns. */ |
| 1707 | |
| 1708 | /* Remove the foreign constraints from the cache */ |
| 1709 | std::for_each(table->foreign_set.begin(), |
| 1710 | table->foreign_set.end(), |
| 1711 | dict_foreign_remove_partial()); |
| 1712 | table->foreign_set.clear(); |
| 1713 | |
| 1714 | /* Reset table field in referencing constraints */ |
| 1715 | for (dict_foreign_set::iterator it |
| 1716 | = table->referenced_set.begin(); |
| 1717 | it != table->referenced_set.end(); |
| 1718 | ++it) { |
| 1719 | |
| 1720 | foreign = *it; |
| 1721 | foreign->referenced_table = NULL; |
| 1722 | foreign->referenced_index = NULL; |
| 1723 | |
| 1724 | } |
| 1725 | |
| 1726 | /* Make the set of referencing constraints empty */ |
| 1727 | table->referenced_set.clear(); |
| 1728 | |
| 1729 | return(DB_SUCCESS); |
| 1730 | } |
| 1731 | |
| 1732 | /* Update the table name fields in foreign constraints, and update also |
| 1733 | the constraint id of new format >= 4.0.18 constraints. Note that at |
| 1734 | this point we have already changed table->name to the new name. */ |
| 1735 | |
| 1736 | dict_foreign_set fk_set; |
| 1737 | |
| 1738 | for (;;) { |
| 1739 | |
| 1740 | dict_foreign_set::iterator it |
| 1741 | = table->foreign_set.begin(); |
| 1742 | |
| 1743 | if (it == table->foreign_set.end()) { |
| 1744 | break; |
| 1745 | } |
| 1746 | |
| 1747 | foreign = *it; |
| 1748 | |
| 1749 | if (foreign->referenced_table) { |
| 1750 | foreign->referenced_table->referenced_set.erase(foreign); |
| 1751 | } |
| 1752 | |
| 1753 | if (ut_strlen(foreign->foreign_table_name) |
| 1754 | < ut_strlen(table->name.m_name)) { |
| 1755 | /* Allocate a longer name buffer; |
| 1756 | TODO: store buf len to save memory */ |
| 1757 | |
| 1758 | foreign->foreign_table_name = mem_heap_strdup( |
| 1759 | foreign->heap, table->name.m_name); |
| 1760 | dict_mem_foreign_table_name_lookup_set(foreign, TRUE); |
| 1761 | } else { |
| 1762 | strcpy(foreign->foreign_table_name, |
| 1763 | table->name.m_name); |
| 1764 | dict_mem_foreign_table_name_lookup_set(foreign, FALSE); |
| 1765 | } |
| 1766 | if (strchr(foreign->id, '/')) { |
| 1767 | /* This is a >= 4.0.18 format id */ |
| 1768 | |
| 1769 | ulint db_len; |
| 1770 | char* old_id; |
| 1771 | char old_name_cs_filename[MAX_TABLE_NAME_LEN+20]; |
| 1772 | uint errors = 0; |
| 1773 | |
| 1774 | /* All table names are internally stored in charset |
| 1775 | my_charset_filename (except the temp tables and the |
| 1776 | partition identifier suffix in partition tables). The |
| 1777 | foreign key constraint names are internally stored |
| 1778 | in UTF-8 charset. The variable fkid here is used |
| 1779 | to store foreign key constraint name in charset |
| 1780 | my_charset_filename for comparison further below. */ |
| 1781 | char fkid[MAX_TABLE_NAME_LEN+20]; |
| 1782 | ibool on_tmp = FALSE; |
| 1783 | |
| 1784 | /* The old table name in my_charset_filename is stored |
| 1785 | in old_name_cs_filename */ |
| 1786 | |
| 1787 | strncpy(old_name_cs_filename, old_name, |
| 1788 | MAX_TABLE_NAME_LEN); |
| 1789 | if (strstr(old_name, TEMP_TABLE_PATH_PREFIX) == NULL) { |
| 1790 | |
| 1791 | innobase_convert_to_system_charset( |
| 1792 | strchr(old_name_cs_filename, '/') + 1, |
| 1793 | strchr(old_name, '/') + 1, |
| 1794 | MAX_TABLE_NAME_LEN, &errors); |
| 1795 | |
| 1796 | if (errors) { |
| 1797 | /* There has been an error to convert |
| 1798 | old table into UTF-8. This probably |
| 1799 | means that the old table name is |
| 1800 | actually in UTF-8. */ |
| 1801 | innobase_convert_to_filename_charset( |
| 1802 | strchr(old_name_cs_filename, |
| 1803 | '/') + 1, |
| 1804 | strchr(old_name, '/') + 1, |
| 1805 | MAX_TABLE_NAME_LEN); |
| 1806 | } else { |
| 1807 | /* Old name already in |
| 1808 | my_charset_filename */ |
| 1809 | strncpy(old_name_cs_filename, old_name, |
| 1810 | MAX_TABLE_NAME_LEN); |
| 1811 | } |
| 1812 | } |
| 1813 | |
| 1814 | strncpy(fkid, foreign->id, MAX_TABLE_NAME_LEN); |
| 1815 | |
| 1816 | if (strstr(fkid, TEMP_TABLE_PATH_PREFIX) == NULL) { |
| 1817 | innobase_convert_to_filename_charset( |
| 1818 | strchr(fkid, '/') + 1, |
| 1819 | strchr(foreign->id, '/') + 1, |
| 1820 | MAX_TABLE_NAME_LEN+20); |
| 1821 | } else { |
| 1822 | on_tmp = TRUE; |
| 1823 | } |
| 1824 | |
| 1825 | old_id = mem_strdup(foreign->id); |
| 1826 | |
| 1827 | if (ut_strlen(fkid) > ut_strlen(old_name_cs_filename) |
| 1828 | + ((sizeof dict_ibfk) - 1) |
| 1829 | && !memcmp(fkid, old_name_cs_filename, |
| 1830 | ut_strlen(old_name_cs_filename)) |
| 1831 | && !memcmp(fkid + ut_strlen(old_name_cs_filename), |
| 1832 | dict_ibfk, (sizeof dict_ibfk) - 1)) { |
| 1833 | |
| 1834 | /* This is a generated >= 4.0.18 format id */ |
| 1835 | |
| 1836 | char table_name[MAX_TABLE_NAME_LEN] = "" ; |
| 1837 | uint errors = 0; |
| 1838 | |
| 1839 | if (strlen(table->name.m_name) |
| 1840 | > strlen(old_name)) { |
| 1841 | foreign->id = static_cast<char*>( |
| 1842 | mem_heap_alloc( |
| 1843 | foreign->heap, |
| 1844 | strlen(table->name.m_name) |
| 1845 | + strlen(old_id) + 1)); |
| 1846 | } |
| 1847 | |
| 1848 | /* Convert the table name to UTF-8 */ |
| 1849 | strncpy(table_name, table->name.m_name, |
| 1850 | MAX_TABLE_NAME_LEN); |
| 1851 | innobase_convert_to_system_charset( |
| 1852 | strchr(table_name, '/') + 1, |
| 1853 | strchr(table->name.m_name, '/') + 1, |
| 1854 | MAX_TABLE_NAME_LEN, &errors); |
| 1855 | |
| 1856 | if (errors) { |
| 1857 | /* Table name could not be converted |
| 1858 | from charset my_charset_filename to |
| 1859 | UTF-8. This means that the table name |
| 1860 | is already in UTF-8 (#mysql#50). */ |
| 1861 | strncpy(table_name, table->name.m_name, |
| 1862 | MAX_TABLE_NAME_LEN); |
| 1863 | } |
| 1864 | |
| 1865 | /* Replace the prefix 'databasename/tablename' |
| 1866 | with the new names */ |
| 1867 | strcpy(foreign->id, table_name); |
| 1868 | if (on_tmp) { |
| 1869 | strcat(foreign->id, |
| 1870 | old_id + ut_strlen(old_name)); |
| 1871 | } else { |
| 1872 | sprintf(strchr(foreign->id, '/') + 1, |
| 1873 | "%s%s" , |
| 1874 | strchr(table_name, '/') +1, |
| 1875 | strstr(old_id, "_ibfk_" ) ); |
| 1876 | } |
| 1877 | |
| 1878 | } else { |
| 1879 | /* This is a >= 4.0.18 format id where the user |
| 1880 | gave the id name */ |
| 1881 | db_len = dict_get_db_name_len( |
| 1882 | table->name.m_name) + 1; |
| 1883 | |
| 1884 | if (db_len - 1 |
| 1885 | > dict_get_db_name_len(foreign->id)) { |
| 1886 | |
| 1887 | foreign->id = static_cast<char*>( |
| 1888 | mem_heap_alloc( |
| 1889 | foreign->heap, |
| 1890 | db_len + strlen(old_id) + 1)); |
| 1891 | } |
| 1892 | |
| 1893 | /* Replace the database prefix in id with the |
| 1894 | one from table->name */ |
| 1895 | |
| 1896 | ut_memcpy(foreign->id, |
| 1897 | table->name.m_name, db_len); |
| 1898 | |
| 1899 | strcpy(foreign->id + db_len, |
| 1900 | dict_remove_db_name(old_id)); |
| 1901 | } |
| 1902 | |
| 1903 | ut_free(old_id); |
| 1904 | } |
| 1905 | |
| 1906 | table->foreign_set.erase(it); |
| 1907 | fk_set.insert(foreign); |
| 1908 | |
| 1909 | if (foreign->referenced_table) { |
| 1910 | foreign->referenced_table->referenced_set.insert(foreign); |
| 1911 | } |
| 1912 | } |
| 1913 | |
| 1914 | ut_a(table->foreign_set.empty()); |
| 1915 | table->foreign_set.swap(fk_set); |
| 1916 | |
| 1917 | for (dict_foreign_set::iterator it = table->referenced_set.begin(); |
| 1918 | it != table->referenced_set.end(); |
| 1919 | ++it) { |
| 1920 | |
| 1921 | foreign = *it; |
| 1922 | |
| 1923 | if (ut_strlen(foreign->referenced_table_name) |
| 1924 | < ut_strlen(table->name.m_name)) { |
| 1925 | /* Allocate a longer name buffer; |
| 1926 | TODO: store buf len to save memory */ |
| 1927 | |
| 1928 | foreign->referenced_table_name = mem_heap_strdup( |
| 1929 | foreign->heap, table->name.m_name); |
| 1930 | |
| 1931 | dict_mem_referenced_table_name_lookup_set( |
| 1932 | foreign, TRUE); |
| 1933 | } else { |
| 1934 | /* Use the same buffer */ |
| 1935 | strcpy(foreign->referenced_table_name, |
| 1936 | table->name.m_name); |
| 1937 | |
| 1938 | dict_mem_referenced_table_name_lookup_set( |
| 1939 | foreign, FALSE); |
| 1940 | } |
| 1941 | } |
| 1942 | |
| 1943 | return(DB_SUCCESS); |
| 1944 | } |
| 1945 | |
| 1946 | /**********************************************************************//** |
| 1947 | Change the id of a table object in the dictionary cache. This is used in |
| 1948 | DISCARD TABLESPACE. */ |
| 1949 | void |
| 1950 | dict_table_change_id_in_cache( |
| 1951 | /*==========================*/ |
| 1952 | dict_table_t* table, /*!< in/out: table object already in cache */ |
| 1953 | table_id_t new_id) /*!< in: new id to set */ |
| 1954 | { |
| 1955 | ut_ad(table); |
| 1956 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1957 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 1958 | |
| 1959 | /* Remove the table from the hash table of id's */ |
| 1960 | |
| 1961 | HASH_DELETE(dict_table_t, id_hash, dict_sys->table_id_hash, |
| 1962 | ut_fold_ull(table->id), table); |
| 1963 | table->id = new_id; |
| 1964 | |
| 1965 | /* Add the table back to the hash table */ |
| 1966 | HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash, |
| 1967 | ut_fold_ull(table->id), table); |
| 1968 | } |
| 1969 | |
| 1970 | /**********************************************************************//** |
| 1971 | Removes a table object from the dictionary cache. */ |
| 1972 | void |
| 1973 | dict_table_remove_from_cache_low( |
| 1974 | /*=============================*/ |
| 1975 | dict_table_t* table, /*!< in, own: table */ |
| 1976 | ibool lru_evict) /*!< in: TRUE if table being evicted |
| 1977 | to make room in the table LRU list */ |
| 1978 | { |
| 1979 | dict_foreign_t* foreign; |
| 1980 | dict_index_t* index; |
| 1981 | |
| 1982 | ut_ad(table); |
| 1983 | ut_ad(dict_lru_validate()); |
| 1984 | ut_a(table->get_ref_count() == 0); |
| 1985 | ut_a(table->n_rec_locks == 0); |
| 1986 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 1987 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 1988 | |
| 1989 | /* Remove the foreign constraints from the cache */ |
| 1990 | std::for_each(table->foreign_set.begin(), table->foreign_set.end(), |
| 1991 | dict_foreign_remove_partial()); |
| 1992 | table->foreign_set.clear(); |
| 1993 | |
| 1994 | /* Reset table field in referencing constraints */ |
| 1995 | for (dict_foreign_set::iterator it = table->referenced_set.begin(); |
| 1996 | it != table->referenced_set.end(); |
| 1997 | ++it) { |
| 1998 | |
| 1999 | foreign = *it; |
| 2000 | foreign->referenced_table = NULL; |
| 2001 | foreign->referenced_index = NULL; |
| 2002 | } |
| 2003 | |
| 2004 | /* Remove the indexes from the cache */ |
| 2005 | |
| 2006 | for (index = UT_LIST_GET_LAST(table->indexes); |
| 2007 | index != NULL; |
| 2008 | index = UT_LIST_GET_LAST(table->indexes)) { |
| 2009 | |
| 2010 | dict_index_remove_from_cache_low(table, index, lru_evict); |
| 2011 | } |
| 2012 | |
| 2013 | /* Remove table from the hash tables of tables */ |
| 2014 | |
| 2015 | HASH_DELETE(dict_table_t, name_hash, dict_sys->table_hash, |
| 2016 | ut_fold_string(table->name.m_name), table); |
| 2017 | |
| 2018 | HASH_DELETE(dict_table_t, id_hash, dict_sys->table_id_hash, |
| 2019 | ut_fold_ull(table->id), table); |
| 2020 | |
| 2021 | /* Remove table from LRU or non-LRU list. */ |
| 2022 | if (table->can_be_evicted) { |
| 2023 | ut_ad(dict_lru_find_table(table)); |
| 2024 | UT_LIST_REMOVE(dict_sys->table_LRU, table); |
| 2025 | } else { |
| 2026 | ut_ad(dict_non_lru_find_table(table)); |
| 2027 | UT_LIST_REMOVE(dict_sys->table_non_LRU, table); |
| 2028 | } |
| 2029 | |
| 2030 | ut_ad(dict_lru_validate()); |
| 2031 | |
| 2032 | if (lru_evict && table->drop_aborted) { |
| 2033 | /* When evicting the table definition, |
| 2034 | drop the orphan indexes from the data dictionary |
| 2035 | and free the index pages. */ |
| 2036 | trx_t* trx = trx_create(); |
| 2037 | |
| 2038 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 2039 | ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X)); |
| 2040 | |
| 2041 | /* Mimic row_mysql_lock_data_dictionary(). */ |
| 2042 | trx->dict_operation_lock_mode = RW_X_LATCH; |
| 2043 | |
| 2044 | trx_set_dict_operation(trx, TRX_DICT_OP_INDEX); |
| 2045 | row_merge_drop_indexes_dict(trx, table->id); |
| 2046 | trx_commit_for_mysql(trx); |
| 2047 | trx->dict_operation_lock_mode = 0; |
| 2048 | trx_free(trx); |
| 2049 | } |
| 2050 | |
| 2051 | /* Free virtual column template if any */ |
| 2052 | if (table->vc_templ != NULL) { |
| 2053 | dict_free_vc_templ(table->vc_templ); |
| 2054 | UT_DELETE(table->vc_templ); |
| 2055 | } |
| 2056 | |
| 2057 | dict_mem_table_free(table); |
| 2058 | } |
| 2059 | |
| 2060 | /**********************************************************************//** |
| 2061 | Removes a table object from the dictionary cache. */ |
| 2062 | void |
| 2063 | dict_table_remove_from_cache( |
| 2064 | /*=========================*/ |
| 2065 | dict_table_t* table) /*!< in, own: table */ |
| 2066 | { |
| 2067 | dict_table_remove_from_cache_low(table, FALSE); |
| 2068 | } |
| 2069 | |
| 2070 | /****************************************************************//** |
| 2071 | If the given column name is reserved for InnoDB system columns, return |
| 2072 | TRUE. |
| 2073 | @return TRUE if name is reserved */ |
| 2074 | ibool |
| 2075 | dict_col_name_is_reserved( |
| 2076 | /*======================*/ |
| 2077 | const char* name) /*!< in: column name */ |
| 2078 | { |
| 2079 | static const char* reserved_names[] = { |
| 2080 | "DB_ROW_ID" , "DB_TRX_ID" , "DB_ROLL_PTR" |
| 2081 | }; |
| 2082 | |
| 2083 | compile_time_assert(UT_ARR_SIZE(reserved_names) == DATA_N_SYS_COLS); |
| 2084 | |
| 2085 | for (ulint i = 0; i < UT_ARR_SIZE(reserved_names); i++) { |
| 2086 | if (innobase_strcasecmp(name, reserved_names[i]) == 0) { |
| 2087 | |
| 2088 | return(TRUE); |
| 2089 | } |
| 2090 | } |
| 2091 | |
| 2092 | return(FALSE); |
| 2093 | } |
| 2094 | |
| 2095 | /****************************************************************//** |
| 2096 | Return maximum size of the node pointer record. |
| 2097 | @return maximum size of the record in bytes */ |
| 2098 | ulint |
| 2099 | dict_index_node_ptr_max_size( |
| 2100 | /*=========================*/ |
| 2101 | const dict_index_t* index) /*!< in: index */ |
| 2102 | { |
| 2103 | ulint comp; |
| 2104 | ulint i; |
| 2105 | /* maximum possible storage size of a record */ |
| 2106 | ulint rec_max_size; |
| 2107 | |
| 2108 | if (dict_index_is_ibuf(index)) { |
| 2109 | /* cannot estimate accurately */ |
| 2110 | /* This is universal index for change buffer. |
| 2111 | The max size of the entry is about max key length * 2. |
| 2112 | (index key + primary key to be inserted to the index) |
| 2113 | (The max key length is srv_page_size / 16 * 3 at |
| 2114 | ha_innobase::max_supported_key_length(), |
| 2115 | considering MAX_KEY_LENGTH = 3072 at MySQL imposes |
| 2116 | the 3500 historical InnoDB value for 16K page size case.) |
| 2117 | For the universal index, node_ptr contains most of the entry. |
| 2118 | And 512 is enough to contain ibuf columns and meta-data */ |
| 2119 | return(srv_page_size / 8 * 3 + 512); |
| 2120 | } |
| 2121 | |
| 2122 | comp = dict_table_is_comp(index->table); |
| 2123 | |
| 2124 | /* Each record has page_no, length of page_no and header. */ |
| 2125 | rec_max_size = comp |
| 2126 | ? REC_NODE_PTR_SIZE + 1 + REC_N_NEW_EXTRA_BYTES |
| 2127 | : REC_NODE_PTR_SIZE + 2 + REC_N_OLD_EXTRA_BYTES; |
| 2128 | |
| 2129 | if (comp) { |
| 2130 | /* Include the "null" flags in the |
| 2131 | maximum possible record size. */ |
| 2132 | rec_max_size += UT_BITS_IN_BYTES(unsigned(index->n_nullable)); |
| 2133 | } else { |
| 2134 | /* For each column, include a 2-byte offset and a |
| 2135 | "null" flag. */ |
| 2136 | rec_max_size += 2 * unsigned(index->n_fields); |
| 2137 | } |
| 2138 | |
| 2139 | /* Compute the maximum possible record size. */ |
| 2140 | for (i = 0; i < dict_index_get_n_unique_in_tree(index); i++) { |
| 2141 | const dict_field_t* field |
| 2142 | = dict_index_get_nth_field(index, i); |
| 2143 | const dict_col_t* col |
| 2144 | = dict_field_get_col(field); |
| 2145 | ulint field_max_size; |
| 2146 | ulint field_ext_max_size; |
| 2147 | |
| 2148 | /* Determine the maximum length of the index field. */ |
| 2149 | |
| 2150 | field_max_size = dict_col_get_fixed_size(col, comp); |
| 2151 | if (field_max_size) { |
| 2152 | /* dict_index_add_col() should guarantee this */ |
| 2153 | ut_ad(!field->prefix_len |
| 2154 | || field->fixed_len == field->prefix_len); |
| 2155 | /* Fixed lengths are not encoded |
| 2156 | in ROW_FORMAT=COMPACT. */ |
| 2157 | rec_max_size += field_max_size; |
| 2158 | continue; |
| 2159 | } |
| 2160 | |
| 2161 | field_max_size = dict_col_get_max_size(col); |
| 2162 | field_ext_max_size = field_max_size < 256 ? 1 : 2; |
| 2163 | |
| 2164 | if (field->prefix_len |
| 2165 | && field->prefix_len < field_max_size) { |
| 2166 | field_max_size = field->prefix_len; |
| 2167 | } |
| 2168 | |
| 2169 | if (comp) { |
| 2170 | /* Add the extra size for ROW_FORMAT=COMPACT. |
| 2171 | For ROW_FORMAT=REDUNDANT, these bytes were |
| 2172 | added to rec_max_size before this loop. */ |
| 2173 | rec_max_size += field_ext_max_size; |
| 2174 | } |
| 2175 | |
| 2176 | rec_max_size += field_max_size; |
| 2177 | } |
| 2178 | |
| 2179 | return(rec_max_size); |
| 2180 | } |
| 2181 | |
| 2182 | /****************************************************************//** |
| 2183 | If a record of this index might not fit on a single B-tree page, |
| 2184 | return TRUE. |
| 2185 | @return TRUE if the index record could become too big */ |
| 2186 | static |
| 2187 | ibool |
| 2188 | dict_index_too_big_for_tree( |
| 2189 | /*========================*/ |
| 2190 | const dict_table_t* table, /*!< in: table */ |
| 2191 | const dict_index_t* new_index, /*!< in: index */ |
| 2192 | bool strict) /*!< in: TRUE=report error if |
| 2193 | records could be too big to |
| 2194 | fit in an B-tree page */ |
| 2195 | { |
| 2196 | ulint comp; |
| 2197 | ulint i; |
| 2198 | /* maximum possible storage size of a record */ |
| 2199 | ulint rec_max_size; |
| 2200 | /* maximum allowed size of a record on a leaf page */ |
| 2201 | ulint page_rec_max; |
| 2202 | /* maximum allowed size of a node pointer record */ |
| 2203 | ulint page_ptr_max; |
| 2204 | |
| 2205 | /* FTS index consists of auxiliary tables, they shall be excluded from |
| 2206 | index row size check */ |
| 2207 | if (new_index->type & DICT_FTS) { |
| 2208 | return(false); |
| 2209 | } |
| 2210 | |
| 2211 | DBUG_EXECUTE_IF( |
| 2212 | "ib_force_create_table" , |
| 2213 | return(FALSE);); |
| 2214 | |
| 2215 | comp = dict_table_is_comp(table); |
| 2216 | |
| 2217 | const page_size_t page_size(dict_tf_get_page_size(table->flags)); |
| 2218 | |
| 2219 | if (page_size.is_compressed() |
| 2220 | && page_size.physical() < srv_page_size) { |
| 2221 | /* On a compressed page, two records must fit in the |
| 2222 | uncompressed page modification log. On compressed pages |
| 2223 | with size.physical() == srv_page_size, |
| 2224 | this limit will never be reached. */ |
| 2225 | ut_ad(comp); |
| 2226 | /* The maximum allowed record size is the size of |
| 2227 | an empty page, minus a byte for recoding the heap |
| 2228 | number in the page modification log. The maximum |
| 2229 | allowed node pointer size is half that. */ |
| 2230 | page_rec_max = page_zip_empty_size(new_index->n_fields, |
| 2231 | page_size.physical()); |
| 2232 | if (page_rec_max) { |
| 2233 | page_rec_max--; |
| 2234 | } |
| 2235 | page_ptr_max = page_rec_max / 2; |
| 2236 | /* On a compressed page, there is a two-byte entry in |
| 2237 | the dense page directory for every record. But there |
| 2238 | is no record header. */ |
| 2239 | rec_max_size = 2; |
| 2240 | } else { |
| 2241 | /* The maximum allowed record size is half a B-tree |
| 2242 | page(16k for 64k page size). No additional sparse |
| 2243 | page directory entry will be generated for the first |
| 2244 | few user records. */ |
| 2245 | page_rec_max = (comp || srv_page_size < UNIV_PAGE_SIZE_MAX) |
| 2246 | ? page_get_free_space_of_empty(comp) / 2 |
| 2247 | : REDUNDANT_REC_MAX_DATA_SIZE; |
| 2248 | |
| 2249 | page_ptr_max = page_rec_max; |
| 2250 | /* Each record has a header. */ |
| 2251 | rec_max_size = comp |
| 2252 | ? REC_N_NEW_EXTRA_BYTES |
| 2253 | : REC_N_OLD_EXTRA_BYTES; |
| 2254 | } |
| 2255 | |
| 2256 | if (comp) { |
| 2257 | /* Include the "null" flags in the |
| 2258 | maximum possible record size. */ |
| 2259 | rec_max_size += UT_BITS_IN_BYTES( |
| 2260 | unsigned(new_index->n_nullable)); |
| 2261 | } else { |
| 2262 | /* For each column, include a 2-byte offset and a |
| 2263 | "null" flag. The 1-byte format is only used in short |
| 2264 | records that do not contain externally stored columns. |
| 2265 | Such records could never exceed the page limit, even |
| 2266 | when using the 2-byte format. */ |
| 2267 | rec_max_size += 2 * unsigned(new_index->n_fields); |
| 2268 | } |
| 2269 | |
| 2270 | /* Compute the maximum possible record size. */ |
| 2271 | for (i = 0; i < new_index->n_fields; i++) { |
| 2272 | const dict_field_t* field |
| 2273 | = dict_index_get_nth_field(new_index, i); |
| 2274 | const dict_col_t* col |
| 2275 | = dict_field_get_col(field); |
| 2276 | ulint field_max_size; |
| 2277 | ulint field_ext_max_size; |
| 2278 | |
| 2279 | /* In dtuple_convert_big_rec(), variable-length columns |
| 2280 | that are longer than BTR_EXTERN_LOCAL_STORED_MAX_SIZE |
| 2281 | may be chosen for external storage. |
| 2282 | |
| 2283 | Fixed-length columns, and all columns of secondary |
| 2284 | index records are always stored inline. */ |
| 2285 | |
| 2286 | /* Determine the maximum length of the index field. |
| 2287 | The field_ext_max_size should be computed as the worst |
| 2288 | case in rec_get_converted_size_comp() for |
| 2289 | REC_STATUS_ORDINARY records. */ |
| 2290 | |
| 2291 | field_max_size = dict_col_get_fixed_size(col, comp); |
| 2292 | if (field_max_size && field->fixed_len != 0) { |
| 2293 | /* dict_index_add_col() should guarantee this */ |
| 2294 | ut_ad(!field->prefix_len |
| 2295 | || field->fixed_len == field->prefix_len); |
| 2296 | /* Fixed lengths are not encoded |
| 2297 | in ROW_FORMAT=COMPACT. */ |
| 2298 | field_ext_max_size = 0; |
| 2299 | goto add_field_size; |
| 2300 | } |
| 2301 | |
| 2302 | field_max_size = dict_col_get_max_size(col); |
| 2303 | field_ext_max_size = field_max_size < 256 ? 1 : 2; |
| 2304 | |
| 2305 | if (field->prefix_len) { |
| 2306 | if (field->prefix_len < field_max_size) { |
| 2307 | field_max_size = field->prefix_len; |
| 2308 | } |
| 2309 | } else if (field_max_size > BTR_EXTERN_LOCAL_STORED_MAX_SIZE |
| 2310 | && dict_index_is_clust(new_index)) { |
| 2311 | |
| 2312 | /* In the worst case, we have a locally stored |
| 2313 | column of BTR_EXTERN_LOCAL_STORED_MAX_SIZE bytes. |
| 2314 | The length can be stored in one byte. If the |
| 2315 | column were stored externally, the lengths in |
| 2316 | the clustered index page would be |
| 2317 | BTR_EXTERN_FIELD_REF_SIZE and 2. */ |
| 2318 | field_max_size = BTR_EXTERN_LOCAL_STORED_MAX_SIZE; |
| 2319 | field_ext_max_size = 1; |
| 2320 | } |
| 2321 | |
| 2322 | if (comp) { |
| 2323 | /* Add the extra size for ROW_FORMAT=COMPACT. |
| 2324 | For ROW_FORMAT=REDUNDANT, these bytes were |
| 2325 | added to rec_max_size before this loop. */ |
| 2326 | rec_max_size += field_ext_max_size; |
| 2327 | } |
| 2328 | add_field_size: |
| 2329 | rec_max_size += field_max_size; |
| 2330 | |
| 2331 | /* Check the size limit on leaf pages. */ |
| 2332 | if (rec_max_size >= page_rec_max) { |
| 2333 | ib::error_or_warn(strict) |
| 2334 | << "Cannot add field " << field->name |
| 2335 | << " in table " << table->name |
| 2336 | << " because after adding it, the row size is " |
| 2337 | << rec_max_size |
| 2338 | << " which is greater than maximum allowed" |
| 2339 | " size (" << page_rec_max |
| 2340 | << ") for a record on index leaf page." ; |
| 2341 | |
| 2342 | return(TRUE); |
| 2343 | } |
| 2344 | |
| 2345 | /* Check the size limit on non-leaf pages. Records |
| 2346 | stored in non-leaf B-tree pages consist of the unique |
| 2347 | columns of the record (the key columns of the B-tree) |
| 2348 | and a node pointer field. When we have processed the |
| 2349 | unique columns, rec_max_size equals the size of the |
| 2350 | node pointer record minus the node pointer column. */ |
| 2351 | if (i + 1 == dict_index_get_n_unique_in_tree(new_index) |
| 2352 | && rec_max_size + REC_NODE_PTR_SIZE >= page_ptr_max) { |
| 2353 | |
| 2354 | return(TRUE); |
| 2355 | } |
| 2356 | } |
| 2357 | |
| 2358 | return(FALSE); |
| 2359 | } |
| 2360 | |
| 2361 | /** Clears the virtual column's index list before index is |
| 2362 | being freed. |
| 2363 | @param[in] index Index being freed */ |
| 2364 | void dict_index_remove_from_v_col_list(dict_index_t* index) |
| 2365 | { |
| 2366 | /* Index is not completely formed */ |
| 2367 | if (!index->cached) { |
| 2368 | return; |
| 2369 | } |
| 2370 | if (dict_index_has_virtual(index)) { |
| 2371 | const dict_col_t* col; |
| 2372 | const dict_v_col_t* vcol; |
| 2373 | |
| 2374 | for (ulint i = 0; i < dict_index_get_n_fields(index); i++) { |
| 2375 | col = dict_index_get_nth_col(index, i); |
| 2376 | if (col->is_virtual()) { |
| 2377 | vcol = reinterpret_cast<const dict_v_col_t*>( |
| 2378 | col); |
| 2379 | /* This could be NULL, when we do add |
| 2380 | virtual column, add index together. We do not |
| 2381 | need to track this virtual column's index */ |
| 2382 | if (vcol->v_indexes == NULL) { |
| 2383 | continue; |
| 2384 | } |
| 2385 | dict_v_idx_list::iterator it; |
| 2386 | for (it = vcol->v_indexes->begin(); |
| 2387 | it != vcol->v_indexes->end(); ++it) { |
| 2388 | dict_v_idx_t v_index = *it; |
| 2389 | if (v_index.index == index) { |
| 2390 | vcol->v_indexes->erase(it); |
| 2391 | break; |
| 2392 | } |
| 2393 | } |
| 2394 | } |
| 2395 | } |
| 2396 | } |
| 2397 | } |
| 2398 | |
| 2399 | /** Adds an index to the dictionary cache, with possible indexing newly |
| 2400 | added column. |
| 2401 | @param[in] index index; NOTE! The index memory |
| 2402 | object is freed in this function! |
| 2403 | @param[in] page_no root page number of the index |
| 2404 | @param[in] strict TRUE=refuse to create the index |
| 2405 | if records could be too big to fit in |
| 2406 | an B-tree page |
| 2407 | @param[out] err DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION |
| 2408 | @param[in] add_v new virtual column that being added along with |
| 2409 | an add index call |
| 2410 | @return the added index |
| 2411 | @retval NULL on error */ |
| 2412 | dict_index_t* |
| 2413 | dict_index_add_to_cache( |
| 2414 | dict_index_t* index, |
| 2415 | ulint page_no, |
| 2416 | bool strict, |
| 2417 | dberr_t* err, |
| 2418 | const dict_add_v_col_t* add_v) |
| 2419 | { |
| 2420 | dict_index_t* new_index; |
| 2421 | ulint n_ord; |
| 2422 | ulint i; |
| 2423 | |
| 2424 | ut_ad(index); |
| 2425 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 2426 | ut_ad(index->n_def == index->n_fields); |
| 2427 | ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); |
| 2428 | ut_ad(!dict_index_is_online_ddl(index)); |
| 2429 | ut_ad(!dict_index_is_ibuf(index)); |
| 2430 | |
| 2431 | ut_d(mem_heap_validate(index->heap)); |
| 2432 | ut_a(!dict_index_is_clust(index) |
| 2433 | || UT_LIST_GET_LEN(index->table->indexes) == 0); |
| 2434 | ut_ad(dict_index_is_clust(index) || !index->table->no_rollback()); |
| 2435 | |
| 2436 | if (!dict_index_find_cols(index, add_v)) { |
| 2437 | |
| 2438 | dict_mem_index_free(index); |
| 2439 | if (err) *err = DB_CORRUPTION; |
| 2440 | return NULL; |
| 2441 | } |
| 2442 | |
| 2443 | /* Build the cache internal representation of the index, |
| 2444 | containing also the added system fields */ |
| 2445 | |
| 2446 | if (dict_index_is_clust(index)) { |
| 2447 | new_index = dict_index_build_internal_clust(index); |
| 2448 | } else { |
| 2449 | new_index = (index->type & DICT_FTS) |
| 2450 | ? dict_index_build_internal_fts(index) |
| 2451 | : dict_index_build_internal_non_clust(index); |
| 2452 | new_index->n_core_null_bytes = UT_BITS_IN_BYTES( |
| 2453 | unsigned(new_index->n_nullable)); |
| 2454 | } |
| 2455 | |
| 2456 | /* Set the n_fields value in new_index to the actual defined |
| 2457 | number of fields in the cache internal representation */ |
| 2458 | |
| 2459 | new_index->n_fields = new_index->n_def; |
| 2460 | new_index->trx_id = index->trx_id; |
| 2461 | new_index->set_committed(index->is_committed()); |
| 2462 | new_index->nulls_equal = index->nulls_equal; |
| 2463 | #ifdef MYSQL_INDEX_DISABLE_AHI |
| 2464 | new_index->disable_ahi = index->disable_ahi; |
| 2465 | #endif |
| 2466 | |
| 2467 | if (dict_index_too_big_for_tree(index->table, new_index, strict)) { |
| 2468 | |
| 2469 | if (strict) { |
| 2470 | dict_mem_index_free(new_index); |
| 2471 | dict_mem_index_free(index); |
| 2472 | if (err) *err = DB_TOO_BIG_RECORD; |
| 2473 | return NULL; |
| 2474 | } else if (current_thd != NULL) { |
| 2475 | /* Avoid the warning to be printed |
| 2476 | during recovery. */ |
| 2477 | ib_warn_row_too_big(index->table); |
| 2478 | } |
| 2479 | } |
| 2480 | |
| 2481 | n_ord = new_index->n_uniq; |
| 2482 | /* Flag the ordering columns and also set column max_prefix */ |
| 2483 | |
| 2484 | for (i = 0; i < n_ord; i++) { |
| 2485 | const dict_field_t* field |
| 2486 | = dict_index_get_nth_field(new_index, i); |
| 2487 | |
| 2488 | /* Check the column being added in the index for |
| 2489 | the first time and flag the ordering column. */ |
| 2490 | if (field->col->ord_part == 0 ) { |
| 2491 | field->col->max_prefix = field->prefix_len; |
| 2492 | field->col->ord_part = 1; |
| 2493 | } else if (field->prefix_len == 0) { |
| 2494 | /* Set the max_prefix for a column to 0 if |
| 2495 | its prefix length is 0 (for this index) |
| 2496 | even if it was a part of any other index |
| 2497 | with some prefix length. */ |
| 2498 | field->col->max_prefix = 0; |
| 2499 | } else if (field->col->max_prefix != 0 |
| 2500 | && field->prefix_len |
| 2501 | > field->col->max_prefix) { |
| 2502 | /* Set the max_prefix value based on the |
| 2503 | prefix_len. */ |
| 2504 | field->col->max_prefix = field->prefix_len; |
| 2505 | } |
| 2506 | ut_ad(field->col->ord_part == 1); |
| 2507 | } |
| 2508 | |
| 2509 | new_index->stat_n_diff_key_vals = |
| 2510 | static_cast<ib_uint64_t*>(mem_heap_zalloc( |
| 2511 | new_index->heap, |
| 2512 | dict_index_get_n_unique(new_index) |
| 2513 | * sizeof(*new_index->stat_n_diff_key_vals))); |
| 2514 | |
| 2515 | new_index->stat_n_sample_sizes = |
| 2516 | static_cast<ib_uint64_t*>(mem_heap_zalloc( |
| 2517 | new_index->heap, |
| 2518 | dict_index_get_n_unique(new_index) |
| 2519 | * sizeof(*new_index->stat_n_sample_sizes))); |
| 2520 | |
| 2521 | new_index->stat_n_non_null_key_vals = |
| 2522 | static_cast<ib_uint64_t*>(mem_heap_zalloc( |
| 2523 | new_index->heap, |
| 2524 | dict_index_get_n_unique(new_index) |
| 2525 | * sizeof(*new_index->stat_n_non_null_key_vals))); |
| 2526 | |
| 2527 | new_index->stat_index_size = 1; |
| 2528 | new_index->stat_n_leaf_pages = 1; |
| 2529 | |
| 2530 | new_index->stat_defrag_n_pages_freed = 0; |
| 2531 | new_index->stat_defrag_n_page_split = 0; |
| 2532 | |
| 2533 | new_index->stat_defrag_sample_next_slot = 0; |
| 2534 | memset(&new_index->stat_defrag_data_size_sample, |
| 2535 | 0x0, sizeof(ulint) * STAT_DEFRAG_DATA_SIZE_N_SAMPLE); |
| 2536 | |
| 2537 | /* Add the new index as the last index for the table */ |
| 2538 | |
| 2539 | UT_LIST_ADD_LAST(new_index->table->indexes, new_index); |
| 2540 | #ifdef BTR_CUR_ADAPT |
| 2541 | new_index->search_info = btr_search_info_create(new_index->heap); |
| 2542 | #endif /* BTR_CUR_ADAPT */ |
| 2543 | |
| 2544 | new_index->page = unsigned(page_no); |
| 2545 | rw_lock_create(index_tree_rw_lock_key, &new_index->lock, |
| 2546 | SYNC_INDEX_TREE); |
| 2547 | |
| 2548 | new_index->n_core_fields = new_index->n_fields; |
| 2549 | |
| 2550 | dict_mem_index_free(index); |
| 2551 | if (err) *err = DB_SUCCESS; |
| 2552 | return new_index; |
| 2553 | } |
| 2554 | |
| 2555 | /**********************************************************************//** |
| 2556 | Removes an index from the dictionary cache. */ |
| 2557 | static |
| 2558 | void |
| 2559 | dict_index_remove_from_cache_low( |
| 2560 | /*=============================*/ |
| 2561 | dict_table_t* table, /*!< in/out: table */ |
| 2562 | dict_index_t* index, /*!< in, own: index */ |
| 2563 | ibool lru_evict) /*!< in: TRUE if index being evicted |
| 2564 | to make room in the table LRU list */ |
| 2565 | { |
| 2566 | ut_ad(table && index); |
| 2567 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 2568 | ut_ad(index->magic_n == DICT_INDEX_MAGIC_N); |
| 2569 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 2570 | |
| 2571 | /* No need to acquire the dict_index_t::lock here because |
| 2572 | there can't be any active operations on this index (or table). */ |
| 2573 | |
| 2574 | if (index->online_log) { |
| 2575 | ut_ad(index->online_status == ONLINE_INDEX_CREATION); |
| 2576 | row_log_free(index->online_log); |
| 2577 | } |
| 2578 | |
| 2579 | #ifdef BTR_CUR_HASH_ADAPT |
| 2580 | /* We always create search info whether or not adaptive |
| 2581 | hash index is enabled or not. */ |
| 2582 | btr_search_t* info = btr_search_get_info(index); |
| 2583 | ulint retries = 0; |
| 2584 | ut_ad(info); |
| 2585 | |
| 2586 | /* We are not allowed to free the in-memory index struct |
| 2587 | dict_index_t until all entries in the adaptive hash index |
| 2588 | that point to any of the page belonging to his b-tree index |
| 2589 | are dropped. This is so because dropping of these entries |
| 2590 | require access to dict_index_t struct. To avoid such scenario |
| 2591 | We keep a count of number of such pages in the search_info and |
| 2592 | only free the dict_index_t struct when this count drops to |
| 2593 | zero. See also: dict_table_can_be_evicted() */ |
| 2594 | |
| 2595 | do { |
| 2596 | ulint ref_count = btr_search_info_get_ref_count(info, index); |
| 2597 | |
| 2598 | if (ref_count == 0) { |
| 2599 | break; |
| 2600 | } |
| 2601 | |
| 2602 | /* Sleep for 10ms before trying again. */ |
| 2603 | os_thread_sleep(10000); |
| 2604 | ++retries; |
| 2605 | |
| 2606 | if (retries % 500 == 0) { |
| 2607 | /* No luck after 5 seconds of wait. */ |
| 2608 | ib::error() << "Waited for " << retries / 100 |
| 2609 | << " secs for hash index" |
| 2610 | " ref_count (" << ref_count << ") to drop to 0." |
| 2611 | " index: " << index->name |
| 2612 | << " table: " << table->name; |
| 2613 | } |
| 2614 | |
| 2615 | /* To avoid a hang here we commit suicide if the |
| 2616 | ref_count doesn't drop to zero in 600 seconds. */ |
| 2617 | ut_a(retries < 60000); |
| 2618 | } while (srv_shutdown_state == SRV_SHUTDOWN_NONE || !lru_evict); |
| 2619 | #endif /* BTR_CUR_HASH_ADAPT */ |
| 2620 | |
| 2621 | rw_lock_free(&index->lock); |
| 2622 | |
| 2623 | /* The index is being dropped, remove any compression stats for it. */ |
| 2624 | if (!lru_evict && DICT_TF_GET_ZIP_SSIZE(index->table->flags)) { |
| 2625 | mutex_enter(&page_zip_stat_per_index_mutex); |
| 2626 | page_zip_stat_per_index.erase(index->id); |
| 2627 | mutex_exit(&page_zip_stat_per_index_mutex); |
| 2628 | } |
| 2629 | |
| 2630 | /* Remove the index from the list of indexes of the table */ |
| 2631 | UT_LIST_REMOVE(table->indexes, index); |
| 2632 | |
| 2633 | /* Remove the index from affected virtual column index list */ |
| 2634 | if (dict_index_has_virtual(index)) { |
| 2635 | const dict_col_t* col; |
| 2636 | const dict_v_col_t* vcol; |
| 2637 | |
| 2638 | for (ulint i = 0; i < dict_index_get_n_fields(index); i++) { |
| 2639 | col = dict_index_get_nth_col(index, i); |
| 2640 | if (col->is_virtual()) { |
| 2641 | vcol = reinterpret_cast<const dict_v_col_t*>( |
| 2642 | col); |
| 2643 | |
| 2644 | /* This could be NULL, when we do add virtual |
| 2645 | column, add index together. We do not need to |
| 2646 | track this virtual column's index */ |
| 2647 | if (vcol->v_indexes == NULL) { |
| 2648 | continue; |
| 2649 | } |
| 2650 | |
| 2651 | dict_v_idx_list::iterator it; |
| 2652 | |
| 2653 | for (it = vcol->v_indexes->begin(); |
| 2654 | it != vcol->v_indexes->end(); ++it) { |
| 2655 | dict_v_idx_t v_index = *it; |
| 2656 | if (v_index.index == index) { |
| 2657 | vcol->v_indexes->erase(it); |
| 2658 | break; |
| 2659 | } |
| 2660 | } |
| 2661 | } |
| 2662 | |
| 2663 | } |
| 2664 | } |
| 2665 | |
| 2666 | dict_mem_index_free(index); |
| 2667 | } |
| 2668 | |
| 2669 | /**********************************************************************//** |
| 2670 | Removes an index from the dictionary cache. */ |
| 2671 | void |
| 2672 | dict_index_remove_from_cache( |
| 2673 | /*=========================*/ |
| 2674 | dict_table_t* table, /*!< in/out: table */ |
| 2675 | dict_index_t* index) /*!< in, own: index */ |
| 2676 | { |
| 2677 | dict_index_remove_from_cache_low(table, index, FALSE); |
| 2678 | } |
| 2679 | |
| 2680 | /** Tries to find column names for the index and sets the col field of the |
| 2681 | index. |
| 2682 | @param[in] table table |
| 2683 | @param[in,out] index index |
| 2684 | @param[in] add_v new virtual columns added along with an add index call |
| 2685 | @return whether the column names were found */ |
| 2686 | static |
| 2687 | bool |
| 2688 | dict_index_find_cols( |
| 2689 | dict_index_t* index, |
| 2690 | const dict_add_v_col_t* add_v) |
| 2691 | { |
| 2692 | std::vector<ulint, ut_allocator<ulint> > col_added; |
| 2693 | std::vector<ulint, ut_allocator<ulint> > v_col_added; |
| 2694 | |
| 2695 | const dict_table_t* table = index->table; |
| 2696 | ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); |
| 2697 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 2698 | |
| 2699 | for (ulint i = 0; i < index->n_fields; i++) { |
| 2700 | ulint j; |
| 2701 | dict_field_t* field = dict_index_get_nth_field(index, i); |
| 2702 | |
| 2703 | for (j = 0; j < table->n_cols; j++) { |
| 2704 | if (!innobase_strcasecmp(dict_table_get_col_name(table, j), |
| 2705 | field->name)) { |
| 2706 | |
| 2707 | /* Check if same column is being assigned again |
| 2708 | which suggest that column has duplicate name. */ |
| 2709 | bool exists = |
| 2710 | std::find(col_added.begin(), |
| 2711 | col_added.end(), j) |
| 2712 | != col_added.end(); |
| 2713 | |
| 2714 | if (exists) { |
| 2715 | /* Duplicate column found. */ |
| 2716 | goto dup_err; |
| 2717 | } |
| 2718 | |
| 2719 | field->col = dict_table_get_nth_col(table, j); |
| 2720 | |
| 2721 | col_added.push_back(j); |
| 2722 | |
| 2723 | goto found; |
| 2724 | } |
| 2725 | } |
| 2726 | |
| 2727 | /* Let's check if it is a virtual column */ |
| 2728 | for (j = 0; j < table->n_v_cols; j++) { |
| 2729 | if (!strcmp(dict_table_get_v_col_name(table, j), |
| 2730 | field->name)) { |
| 2731 | |
| 2732 | /* Check if same column is being assigned again |
| 2733 | which suggest that column has duplicate name. */ |
| 2734 | bool exists = |
| 2735 | std::find(v_col_added.begin(), |
| 2736 | v_col_added.end(), j) |
| 2737 | != v_col_added.end(); |
| 2738 | |
| 2739 | if (exists) { |
| 2740 | /* Duplicate column found. */ |
| 2741 | break; |
| 2742 | } |
| 2743 | |
| 2744 | field->col = reinterpret_cast<dict_col_t*>( |
| 2745 | dict_table_get_nth_v_col(table, j)); |
| 2746 | |
| 2747 | v_col_added.push_back(j); |
| 2748 | |
| 2749 | goto found; |
| 2750 | } |
| 2751 | } |
| 2752 | |
| 2753 | if (add_v) { |
| 2754 | for (j = 0; j < add_v->n_v_col; j++) { |
| 2755 | if (!strcmp(add_v->v_col_name[j], |
| 2756 | field->name)) { |
| 2757 | field->col = const_cast<dict_col_t*>( |
| 2758 | &add_v->v_col[j].m_col); |
| 2759 | goto found; |
| 2760 | } |
| 2761 | } |
| 2762 | } |
| 2763 | |
| 2764 | dup_err: |
| 2765 | #ifdef UNIV_DEBUG |
| 2766 | /* It is an error not to find a matching column. */ |
| 2767 | ib::error() << "No matching column for " << field->name |
| 2768 | << " in index " << index->name |
| 2769 | << " of table " << table->name; |
| 2770 | #endif /* UNIV_DEBUG */ |
| 2771 | return(FALSE); |
| 2772 | |
| 2773 | found: |
| 2774 | ; |
| 2775 | } |
| 2776 | |
| 2777 | return(TRUE); |
| 2778 | } |
| 2779 | |
| 2780 | /*******************************************************************//** |
| 2781 | Adds a column to index. */ |
| 2782 | void |
| 2783 | dict_index_add_col( |
| 2784 | /*===============*/ |
| 2785 | dict_index_t* index, /*!< in/out: index */ |
| 2786 | const dict_table_t* table, /*!< in: table */ |
| 2787 | dict_col_t* col, /*!< in: column */ |
| 2788 | ulint prefix_len) /*!< in: column prefix length */ |
| 2789 | { |
| 2790 | dict_field_t* field; |
| 2791 | const char* col_name; |
| 2792 | |
| 2793 | if (col->is_virtual()) { |
| 2794 | dict_v_col_t* v_col = reinterpret_cast<dict_v_col_t*>(col); |
| 2795 | |
| 2796 | /* When v_col->v_indexes==NULL, |
| 2797 | ha_innobase::commit_inplace_alter_table(commit=true) |
| 2798 | will evict and reload the table definition, and |
| 2799 | v_col->v_indexes will not be NULL for the new table. */ |
| 2800 | if (v_col->v_indexes != NULL) { |
| 2801 | /* Register the index with the virtual column index |
| 2802 | list */ |
| 2803 | v_col->v_indexes->push_back( |
| 2804 | dict_v_idx_t(index, index->n_def)); |
| 2805 | } |
| 2806 | |
| 2807 | col_name = dict_table_get_v_col_name_mysql( |
| 2808 | table, dict_col_get_no(col)); |
| 2809 | } else { |
| 2810 | col_name = dict_table_get_col_name(table, dict_col_get_no(col)); |
| 2811 | } |
| 2812 | |
| 2813 | dict_mem_index_add_field(index, col_name, prefix_len); |
| 2814 | |
| 2815 | field = dict_index_get_nth_field(index, unsigned(index->n_def) - 1); |
| 2816 | |
| 2817 | field->col = col; |
| 2818 | field->fixed_len = static_cast<unsigned int>( |
| 2819 | dict_col_get_fixed_size( |
| 2820 | col, dict_table_is_comp(table))); |
| 2821 | |
| 2822 | if (prefix_len && field->fixed_len > prefix_len) { |
| 2823 | field->fixed_len = (unsigned int) prefix_len; |
| 2824 | } |
| 2825 | |
| 2826 | /* Long fixed-length fields that need external storage are treated as |
| 2827 | variable-length fields, so that the extern flag can be embedded in |
| 2828 | the length word. */ |
| 2829 | |
| 2830 | if (field->fixed_len > DICT_MAX_FIXED_COL_LEN) { |
| 2831 | field->fixed_len = 0; |
| 2832 | } |
| 2833 | |
| 2834 | /* The comparison limit above must be constant. If it were |
| 2835 | changed, the disk format of some fixed-length columns would |
| 2836 | change, which would be a disaster. */ |
| 2837 | compile_time_assert(DICT_MAX_FIXED_COL_LEN == 768); |
| 2838 | |
| 2839 | if (!(col->prtype & DATA_NOT_NULL)) { |
| 2840 | index->n_nullable++; |
| 2841 | } |
| 2842 | } |
| 2843 | |
| 2844 | /*******************************************************************//** |
| 2845 | Copies fields contained in index2 to index1. */ |
| 2846 | static |
| 2847 | void |
| 2848 | dict_index_copy( |
| 2849 | /*============*/ |
| 2850 | dict_index_t* index1, /*!< in: index to copy to */ |
| 2851 | const dict_index_t* index2, /*!< in: index to copy from */ |
| 2852 | ulint start, /*!< in: first position to copy */ |
| 2853 | ulint end) /*!< in: last position to copy */ |
| 2854 | { |
| 2855 | dict_field_t* field; |
| 2856 | ulint i; |
| 2857 | |
| 2858 | /* Copy fields contained in index2 */ |
| 2859 | |
| 2860 | for (i = start; i < end; i++) { |
| 2861 | |
| 2862 | field = dict_index_get_nth_field(index2, i); |
| 2863 | |
| 2864 | dict_index_add_col(index1, index2->table, field->col, |
| 2865 | field->prefix_len); |
| 2866 | } |
| 2867 | } |
| 2868 | |
| 2869 | /*******************************************************************//** |
| 2870 | Copies types of fields contained in index to tuple. */ |
| 2871 | void |
| 2872 | dict_index_copy_types( |
| 2873 | /*==================*/ |
| 2874 | dtuple_t* tuple, /*!< in/out: data tuple */ |
| 2875 | const dict_index_t* index, /*!< in: index */ |
| 2876 | ulint n_fields) /*!< in: number of |
| 2877 | field types to copy */ |
| 2878 | { |
| 2879 | ulint i; |
| 2880 | |
| 2881 | if (dict_index_is_ibuf(index)) { |
| 2882 | dtuple_set_types_binary(tuple, n_fields); |
| 2883 | |
| 2884 | return; |
| 2885 | } |
| 2886 | |
| 2887 | for (i = 0; i < n_fields; i++) { |
| 2888 | const dict_field_t* ifield; |
| 2889 | dtype_t* dfield_type; |
| 2890 | |
| 2891 | ifield = dict_index_get_nth_field(index, i); |
| 2892 | dfield_type = dfield_get_type(dtuple_get_nth_field(tuple, i)); |
| 2893 | dict_col_copy_type(dict_field_get_col(ifield), dfield_type); |
| 2894 | if (dict_index_is_spatial(index) |
| 2895 | && DATA_GEOMETRY_MTYPE(dfield_type->mtype)) { |
| 2896 | dfield_type->prtype |= DATA_GIS_MBR; |
| 2897 | } |
| 2898 | } |
| 2899 | } |
| 2900 | |
| 2901 | /** Copies types of virtual columns contained in table to tuple and sets all |
| 2902 | fields of the tuple to the SQL NULL value. This function should |
| 2903 | be called right after dtuple_create(). |
| 2904 | @param[in,out] tuple data tuple |
| 2905 | @param[in] table table |
| 2906 | */ |
| 2907 | void |
| 2908 | dict_table_copy_v_types( |
| 2909 | dtuple_t* tuple, |
| 2910 | const dict_table_t* table) |
| 2911 | { |
| 2912 | /* tuple could have more virtual columns than existing table, |
| 2913 | if we are calling this for creating index along with adding |
| 2914 | virtual columns */ |
| 2915 | ulint n_fields = ut_min(dtuple_get_n_v_fields(tuple), |
| 2916 | static_cast<ulint>(table->n_v_def)); |
| 2917 | |
| 2918 | for (ulint i = 0; i < n_fields; i++) { |
| 2919 | |
| 2920 | dfield_t* dfield = dtuple_get_nth_v_field(tuple, i); |
| 2921 | dtype_t* dtype = dfield_get_type(dfield); |
| 2922 | |
| 2923 | dfield_set_null(dfield); |
| 2924 | dict_col_copy_type( |
| 2925 | &(dict_table_get_nth_v_col(table, i)->m_col), |
| 2926 | dtype); |
| 2927 | } |
| 2928 | } |
| 2929 | /*******************************************************************//** |
| 2930 | Copies types of columns contained in table to tuple and sets all |
| 2931 | fields of the tuple to the SQL NULL value. This function should |
| 2932 | be called right after dtuple_create(). */ |
| 2933 | void |
| 2934 | dict_table_copy_types( |
| 2935 | /*==================*/ |
| 2936 | dtuple_t* tuple, /*!< in/out: data tuple */ |
| 2937 | const dict_table_t* table) /*!< in: table */ |
| 2938 | { |
| 2939 | ulint i; |
| 2940 | |
| 2941 | for (i = 0; i < dtuple_get_n_fields(tuple); i++) { |
| 2942 | |
| 2943 | dfield_t* dfield = dtuple_get_nth_field(tuple, i); |
| 2944 | dtype_t* dtype = dfield_get_type(dfield); |
| 2945 | |
| 2946 | dfield_set_null(dfield); |
| 2947 | dict_col_copy_type(dict_table_get_nth_col(table, i), dtype); |
| 2948 | } |
| 2949 | |
| 2950 | dict_table_copy_v_types(tuple, table); |
| 2951 | } |
| 2952 | |
| 2953 | /******************************************************************** |
| 2954 | Wait until all the background threads of the given table have exited, i.e., |
| 2955 | bg_threads == 0. Note: bg_threads_mutex must be reserved when |
| 2956 | calling this. */ |
| 2957 | void |
| 2958 | dict_table_wait_for_bg_threads_to_exit( |
| 2959 | /*===================================*/ |
| 2960 | dict_table_t* table, /*< in: table */ |
| 2961 | ulint delay) /*< in: time in microseconds to wait between |
| 2962 | checks of bg_threads. */ |
| 2963 | { |
| 2964 | fts_t* fts = table->fts; |
| 2965 | |
| 2966 | ut_ad(mutex_own(&fts->bg_threads_mutex)); |
| 2967 | |
| 2968 | while (fts->bg_threads > 0) { |
| 2969 | mutex_exit(&fts->bg_threads_mutex); |
| 2970 | |
| 2971 | os_thread_sleep(delay); |
| 2972 | |
| 2973 | mutex_enter(&fts->bg_threads_mutex); |
| 2974 | } |
| 2975 | } |
| 2976 | |
| 2977 | /*******************************************************************//** |
| 2978 | Builds the internal dictionary cache representation for a clustered |
| 2979 | index, containing also system fields not defined by the user. |
| 2980 | @return own: the internal representation of the clustered index */ |
| 2981 | static |
| 2982 | dict_index_t* |
| 2983 | dict_index_build_internal_clust( |
| 2984 | /*============================*/ |
| 2985 | dict_index_t* index) /*!< in: user representation of |
| 2986 | a clustered index */ |
| 2987 | { |
| 2988 | dict_table_t* table = index->table; |
| 2989 | dict_index_t* new_index; |
| 2990 | dict_field_t* field; |
| 2991 | ulint trx_id_pos; |
| 2992 | ulint i; |
| 2993 | ibool* indexed; |
| 2994 | |
| 2995 | ut_ad(dict_index_is_clust(index)); |
| 2996 | ut_ad(!dict_index_is_ibuf(index)); |
| 2997 | |
| 2998 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 2999 | |
| 3000 | /* Create a new index object with certainly enough fields */ |
| 3001 | new_index = dict_mem_index_create(index->table, index->name, |
| 3002 | index->type, |
| 3003 | unsigned(index->n_fields |
| 3004 | + table->n_cols)); |
| 3005 | |
| 3006 | /* Copy other relevant data from the old index struct to the new |
| 3007 | struct: it inherits the values */ |
| 3008 | |
| 3009 | new_index->n_user_defined_cols = index->n_fields; |
| 3010 | |
| 3011 | new_index->id = index->id; |
| 3012 | |
| 3013 | /* Copy the fields of index */ |
| 3014 | dict_index_copy(new_index, index, 0, index->n_fields); |
| 3015 | |
| 3016 | if (dict_index_is_unique(index)) { |
| 3017 | /* Only the fields defined so far are needed to identify |
| 3018 | the index entry uniquely */ |
| 3019 | |
| 3020 | new_index->n_uniq = new_index->n_def; |
| 3021 | } else { |
| 3022 | /* Also the row id is needed to identify the entry */ |
| 3023 | new_index->n_uniq = 1 + unsigned(new_index->n_def); |
| 3024 | } |
| 3025 | |
| 3026 | new_index->trx_id_offset = 0; |
| 3027 | |
| 3028 | /* Add system columns, trx id first */ |
| 3029 | |
| 3030 | trx_id_pos = new_index->n_def; |
| 3031 | |
| 3032 | compile_time_assert(DATA_ROW_ID == 0); |
| 3033 | compile_time_assert(DATA_TRX_ID == 1); |
| 3034 | compile_time_assert(DATA_ROLL_PTR == 2); |
| 3035 | |
| 3036 | if (!dict_index_is_unique(index)) { |
| 3037 | dict_index_add_col(new_index, table, |
| 3038 | dict_table_get_sys_col( |
| 3039 | table, DATA_ROW_ID), |
| 3040 | 0); |
| 3041 | trx_id_pos++; |
| 3042 | } |
| 3043 | |
| 3044 | dict_index_add_col( |
| 3045 | new_index, table, |
| 3046 | dict_table_get_sys_col(table, DATA_TRX_ID), 0); |
| 3047 | |
| 3048 | for (i = 0; i < trx_id_pos; i++) { |
| 3049 | |
| 3050 | ulint fixed_size = dict_col_get_fixed_size( |
| 3051 | dict_index_get_nth_col(new_index, i), |
| 3052 | dict_table_is_comp(table)); |
| 3053 | |
| 3054 | if (fixed_size == 0) { |
| 3055 | new_index->trx_id_offset = 0; |
| 3056 | |
| 3057 | break; |
| 3058 | } |
| 3059 | |
| 3060 | dict_field_t* field = dict_index_get_nth_field( |
| 3061 | new_index, i); |
| 3062 | if (field->prefix_len > 0) { |
| 3063 | new_index->trx_id_offset = 0; |
| 3064 | |
| 3065 | break; |
| 3066 | } |
| 3067 | |
| 3068 | /* Add fixed_size to new_index->trx_id_offset. |
| 3069 | Because the latter is a bit-field, an overflow |
| 3070 | can theoretically occur. Check for it. */ |
| 3071 | fixed_size += new_index->trx_id_offset; |
| 3072 | |
| 3073 | new_index->trx_id_offset = unsigned(fixed_size); |
| 3074 | |
| 3075 | if (new_index->trx_id_offset != fixed_size) { |
| 3076 | /* Overflow. Pretend that this is a |
| 3077 | variable-length PRIMARY KEY. */ |
| 3078 | ut_ad(0); |
| 3079 | new_index->trx_id_offset = 0; |
| 3080 | break; |
| 3081 | } |
| 3082 | } |
| 3083 | |
| 3084 | dict_index_add_col( |
| 3085 | new_index, table, |
| 3086 | dict_table_get_sys_col(table, DATA_ROLL_PTR), 0); |
| 3087 | |
| 3088 | /* Remember the table columns already contained in new_index */ |
| 3089 | indexed = static_cast<ibool*>( |
| 3090 | ut_zalloc_nokey(table->n_cols * sizeof *indexed)); |
| 3091 | |
| 3092 | /* Mark the table columns already contained in new_index */ |
| 3093 | for (i = 0; i < new_index->n_def; i++) { |
| 3094 | |
| 3095 | field = dict_index_get_nth_field(new_index, i); |
| 3096 | |
| 3097 | /* If there is only a prefix of the column in the index |
| 3098 | field, do not mark the column as contained in the index */ |
| 3099 | |
| 3100 | if (field->prefix_len == 0) { |
| 3101 | |
| 3102 | indexed[field->col->ind] = TRUE; |
| 3103 | } |
| 3104 | } |
| 3105 | |
| 3106 | /* Add to new_index non-system columns of table not yet included |
| 3107 | there */ |
| 3108 | for (i = 0; i + DATA_N_SYS_COLS < ulint(table->n_cols); i++) { |
| 3109 | |
| 3110 | dict_col_t* col = dict_table_get_nth_col(table, i); |
| 3111 | ut_ad(col->mtype != DATA_SYS); |
| 3112 | |
| 3113 | if (!indexed[col->ind]) { |
| 3114 | dict_index_add_col(new_index, table, col, 0); |
| 3115 | } |
| 3116 | } |
| 3117 | |
| 3118 | ut_free(indexed); |
| 3119 | |
| 3120 | ut_ad(UT_LIST_GET_LEN(table->indexes) == 0); |
| 3121 | |
| 3122 | new_index->n_core_null_bytes = table->supports_instant() |
| 3123 | ? dict_index_t::NO_CORE_NULL_BYTES |
| 3124 | : UT_BITS_IN_BYTES(unsigned(new_index->n_nullable)); |
| 3125 | new_index->cached = TRUE; |
| 3126 | |
| 3127 | return(new_index); |
| 3128 | } |
| 3129 | |
| 3130 | /*******************************************************************//** |
| 3131 | Builds the internal dictionary cache representation for a non-clustered |
| 3132 | index, containing also system fields not defined by the user. |
| 3133 | @return own: the internal representation of the non-clustered index */ |
| 3134 | static |
| 3135 | dict_index_t* |
| 3136 | dict_index_build_internal_non_clust( |
| 3137 | /*================================*/ |
| 3138 | dict_index_t* index) /*!< in: user representation of |
| 3139 | a non-clustered index */ |
| 3140 | { |
| 3141 | dict_field_t* field; |
| 3142 | dict_index_t* new_index; |
| 3143 | dict_index_t* clust_index; |
| 3144 | dict_table_t* table = index->table; |
| 3145 | ulint i; |
| 3146 | ibool* indexed; |
| 3147 | |
| 3148 | ut_ad(table && index); |
| 3149 | ut_ad(!dict_index_is_clust(index)); |
| 3150 | ut_ad(!dict_index_is_ibuf(index)); |
| 3151 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 3152 | |
| 3153 | /* The clustered index should be the first in the list of indexes */ |
| 3154 | clust_index = UT_LIST_GET_FIRST(table->indexes); |
| 3155 | |
| 3156 | ut_ad(clust_index); |
| 3157 | ut_ad(dict_index_is_clust(clust_index)); |
| 3158 | ut_ad(!dict_index_is_ibuf(clust_index)); |
| 3159 | |
| 3160 | /* Create a new index */ |
| 3161 | new_index = dict_mem_index_create( |
| 3162 | index->table, index->name, index->type, |
| 3163 | ulint(index->n_fields + 1 + clust_index->n_uniq)); |
| 3164 | |
| 3165 | /* Copy other relevant data from the old index |
| 3166 | struct to the new struct: it inherits the values */ |
| 3167 | |
| 3168 | new_index->n_user_defined_cols = index->n_fields; |
| 3169 | |
| 3170 | new_index->id = index->id; |
| 3171 | |
| 3172 | /* Copy fields from index to new_index */ |
| 3173 | dict_index_copy(new_index, index, 0, index->n_fields); |
| 3174 | |
| 3175 | /* Remember the table columns already contained in new_index */ |
| 3176 | indexed = static_cast<ibool*>( |
| 3177 | ut_zalloc_nokey(table->n_cols * sizeof *indexed)); |
| 3178 | |
| 3179 | /* Mark the table columns already contained in new_index */ |
| 3180 | for (i = 0; i < new_index->n_def; i++) { |
| 3181 | |
| 3182 | field = dict_index_get_nth_field(new_index, i); |
| 3183 | |
| 3184 | if (field->col->is_virtual()) { |
| 3185 | continue; |
| 3186 | } |
| 3187 | |
| 3188 | /* If there is only a prefix of the column in the index |
| 3189 | field, do not mark the column as contained in the index */ |
| 3190 | |
| 3191 | if (field->prefix_len == 0) { |
| 3192 | |
| 3193 | indexed[field->col->ind] = TRUE; |
| 3194 | } |
| 3195 | } |
| 3196 | |
| 3197 | /* Add to new_index the columns necessary to determine the clustered |
| 3198 | index entry uniquely */ |
| 3199 | |
| 3200 | for (i = 0; i < clust_index->n_uniq; i++) { |
| 3201 | |
| 3202 | field = dict_index_get_nth_field(clust_index, i); |
| 3203 | |
| 3204 | if (!indexed[field->col->ind]) { |
| 3205 | dict_index_add_col(new_index, table, field->col, |
| 3206 | field->prefix_len); |
| 3207 | } else if (dict_index_is_spatial(index)) { |
| 3208 | /*For spatial index, we still need to add the |
| 3209 | field to index. */ |
| 3210 | dict_index_add_col(new_index, table, field->col, |
| 3211 | field->prefix_len); |
| 3212 | } |
| 3213 | } |
| 3214 | |
| 3215 | ut_free(indexed); |
| 3216 | |
| 3217 | if (dict_index_is_unique(index)) { |
| 3218 | new_index->n_uniq = index->n_fields; |
| 3219 | } else { |
| 3220 | new_index->n_uniq = new_index->n_def; |
| 3221 | } |
| 3222 | |
| 3223 | /* Set the n_fields value in new_index to the actual defined |
| 3224 | number of fields */ |
| 3225 | |
| 3226 | new_index->n_fields = new_index->n_def; |
| 3227 | |
| 3228 | new_index->cached = TRUE; |
| 3229 | |
| 3230 | return(new_index); |
| 3231 | } |
| 3232 | |
| 3233 | /*********************************************************************** |
| 3234 | Builds the internal dictionary cache representation for an FTS index. |
| 3235 | @return own: the internal representation of the FTS index */ |
| 3236 | static |
| 3237 | dict_index_t* |
| 3238 | dict_index_build_internal_fts( |
| 3239 | /*==========================*/ |
| 3240 | dict_index_t* index) /*!< in: user representation of an FTS index */ |
| 3241 | { |
| 3242 | dict_index_t* new_index; |
| 3243 | |
| 3244 | ut_ad(index->type == DICT_FTS); |
| 3245 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 3246 | |
| 3247 | /* Create a new index */ |
| 3248 | new_index = dict_mem_index_create(index->table, index->name, |
| 3249 | index->type, index->n_fields); |
| 3250 | |
| 3251 | /* Copy other relevant data from the old index struct to the new |
| 3252 | struct: it inherits the values */ |
| 3253 | |
| 3254 | new_index->n_user_defined_cols = index->n_fields; |
| 3255 | |
| 3256 | new_index->id = index->id; |
| 3257 | |
| 3258 | /* Copy fields from index to new_index */ |
| 3259 | dict_index_copy(new_index, index, 0, index->n_fields); |
| 3260 | |
| 3261 | new_index->n_uniq = 0; |
| 3262 | new_index->cached = TRUE; |
| 3263 | |
| 3264 | dict_table_t* table = index->table; |
| 3265 | |
| 3266 | if (table->fts->cache == NULL) { |
| 3267 | table->fts->cache = fts_cache_create(table); |
| 3268 | } |
| 3269 | |
| 3270 | rw_lock_x_lock(&table->fts->cache->init_lock); |
| 3271 | /* Notify the FTS cache about this index. */ |
| 3272 | fts_cache_index_cache_create(table, new_index); |
| 3273 | rw_lock_x_unlock(&table->fts->cache->init_lock); |
| 3274 | |
| 3275 | return(new_index); |
| 3276 | } |
| 3277 | /*====================== FOREIGN KEY PROCESSING ========================*/ |
| 3278 | |
| 3279 | #define DB_FOREIGN_KEY_IS_PREFIX_INDEX 200 |
| 3280 | #define DB_FOREIGN_KEY_COL_NOT_NULL 201 |
| 3281 | #define DB_FOREIGN_KEY_COLS_NOT_EQUAL 202 |
| 3282 | #define DB_FOREIGN_KEY_INDEX_NOT_FOUND 203 |
| 3283 | |
| 3284 | /** Check whether the dict_table_t is a partition. |
| 3285 | A partitioned table on the SQL level is composed of InnoDB tables, |
| 3286 | where each InnoDB table is a [sub]partition including its secondary indexes |
| 3287 | which belongs to the partition. |
| 3288 | @param[in] table Table to check. |
| 3289 | @return true if the dict_table_t is a partition else false. */ |
| 3290 | UNIV_INLINE |
| 3291 | bool |
| 3292 | dict_table_is_partition( |
| 3293 | const dict_table_t* table) |
| 3294 | { |
| 3295 | /* Check both P and p on all platforms in case it was moved to/from |
| 3296 | WIN. */ |
| 3297 | return(strstr(table->name.m_name, "#p#" ) |
| 3298 | || strstr(table->name.m_name, "#P#" )); |
| 3299 | } |
| 3300 | |
| 3301 | /*********************************************************************//** |
| 3302 | Checks if a table is referenced by foreign keys. |
| 3303 | @return TRUE if table is referenced by a foreign key */ |
| 3304 | ibool |
| 3305 | dict_table_is_referenced_by_foreign_key( |
| 3306 | /*====================================*/ |
| 3307 | const dict_table_t* table) /*!< in: InnoDB table */ |
| 3308 | { |
| 3309 | return(!table->referenced_set.empty()); |
| 3310 | } |
| 3311 | |
| 3312 | /**********************************************************************//** |
| 3313 | Removes a foreign constraint struct from the dictionary cache. */ |
| 3314 | void |
| 3315 | dict_foreign_remove_from_cache( |
| 3316 | /*===========================*/ |
| 3317 | dict_foreign_t* foreign) /*!< in, own: foreign constraint */ |
| 3318 | { |
| 3319 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 3320 | ut_a(foreign); |
| 3321 | |
| 3322 | if (foreign->referenced_table != NULL) { |
| 3323 | foreign->referenced_table->referenced_set.erase(foreign); |
| 3324 | } |
| 3325 | |
| 3326 | if (foreign->foreign_table != NULL) { |
| 3327 | foreign->foreign_table->foreign_set.erase(foreign); |
| 3328 | } |
| 3329 | |
| 3330 | dict_foreign_free(foreign); |
| 3331 | } |
| 3332 | |
| 3333 | /**********************************************************************//** |
| 3334 | Looks for the foreign constraint from the foreign and referenced lists |
| 3335 | of a table. |
| 3336 | @return foreign constraint */ |
| 3337 | static |
| 3338 | dict_foreign_t* |
| 3339 | dict_foreign_find( |
| 3340 | /*==============*/ |
| 3341 | dict_table_t* table, /*!< in: table object */ |
| 3342 | dict_foreign_t* foreign) /*!< in: foreign constraint */ |
| 3343 | { |
| 3344 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 3345 | |
| 3346 | ut_ad(dict_foreign_set_validate(table->foreign_set)); |
| 3347 | ut_ad(dict_foreign_set_validate(table->referenced_set)); |
| 3348 | |
| 3349 | dict_foreign_set::iterator it = table->foreign_set.find(foreign); |
| 3350 | |
| 3351 | if (it != table->foreign_set.end()) { |
| 3352 | return(*it); |
| 3353 | } |
| 3354 | |
| 3355 | it = table->referenced_set.find(foreign); |
| 3356 | |
| 3357 | if (it != table->referenced_set.end()) { |
| 3358 | return(*it); |
| 3359 | } |
| 3360 | |
| 3361 | return(NULL); |
| 3362 | } |
| 3363 | |
| 3364 | /*********************************************************************//** |
| 3365 | Tries to find an index whose first fields are the columns in the array, |
| 3366 | in the same order and is not marked for deletion and is not the same |
| 3367 | as types_idx. |
| 3368 | @return matching index, NULL if not found */ |
| 3369 | dict_index_t* |
| 3370 | dict_foreign_find_index( |
| 3371 | /*====================*/ |
| 3372 | const dict_table_t* table, /*!< in: table */ |
| 3373 | const char** col_names, |
| 3374 | /*!< in: column names, or NULL |
| 3375 | to use table->col_names */ |
| 3376 | const char** columns,/*!< in: array of column names */ |
| 3377 | ulint n_cols, /*!< in: number of columns */ |
| 3378 | const dict_index_t* types_idx, |
| 3379 | /*!< in: NULL or an index |
| 3380 | whose types the column types |
| 3381 | must match */ |
| 3382 | bool check_charsets, |
| 3383 | /*!< in: whether to check |
| 3384 | charsets. only has an effect |
| 3385 | if types_idx != NULL */ |
| 3386 | ulint check_null, |
| 3387 | /*!< in: nonzero if none of |
| 3388 | the columns must be declared |
| 3389 | NOT NULL */ |
| 3390 | ulint* error, /*!< out: error code */ |
| 3391 | ulint* err_col_no, |
| 3392 | /*!< out: column number where |
| 3393 | error happened */ |
| 3394 | dict_index_t** err_index) |
| 3395 | /*!< out: index where error |
| 3396 | happened */ |
| 3397 | { |
| 3398 | dict_index_t* index; |
| 3399 | |
| 3400 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 3401 | |
| 3402 | if (error) { |
| 3403 | *error = DB_FOREIGN_KEY_INDEX_NOT_FOUND; |
| 3404 | } |
| 3405 | |
| 3406 | index = dict_table_get_first_index(table); |
| 3407 | |
| 3408 | while (index != NULL) { |
| 3409 | if (types_idx != index |
| 3410 | && !(index->type & DICT_FTS) |
| 3411 | && !dict_index_is_spatial(index) |
| 3412 | && !index->to_be_dropped |
| 3413 | && !dict_index_is_online_ddl(index) |
| 3414 | && dict_foreign_qualify_index( |
| 3415 | table, col_names, columns, n_cols, |
| 3416 | index, types_idx, |
| 3417 | check_charsets, check_null, |
| 3418 | error, err_col_no,err_index)) { |
| 3419 | if (error) { |
| 3420 | *error = DB_SUCCESS; |
| 3421 | } |
| 3422 | |
| 3423 | return(index); |
| 3424 | } |
| 3425 | |
| 3426 | index = dict_table_get_next_index(index); |
| 3427 | } |
| 3428 | |
| 3429 | return(NULL); |
| 3430 | } |
| 3431 | #ifdef WITH_WSREP |
| 3432 | dict_index_t* |
| 3433 | wsrep_dict_foreign_find_index( |
| 3434 | /*====================*/ |
| 3435 | dict_table_t* table, /*!< in: table */ |
| 3436 | const char** col_names, /*!< in: column names, or NULL |
| 3437 | to use table->col_names */ |
| 3438 | const char** columns,/*!< in: array of column names */ |
| 3439 | ulint n_cols, /*!< in: number of columns */ |
| 3440 | dict_index_t* types_idx, /*!< in: NULL or an index to whose types the |
| 3441 | column types must match */ |
| 3442 | ibool check_charsets, |
| 3443 | /*!< in: whether to check charsets. |
| 3444 | only has an effect if types_idx != NULL */ |
| 3445 | ulint check_null) |
| 3446 | /*!< in: nonzero if none of the columns must |
| 3447 | be declared NOT NULL */ |
| 3448 | { |
| 3449 | return dict_foreign_find_index( |
| 3450 | table, col_names, columns, n_cols, types_idx, check_charsets, |
| 3451 | check_null, NULL, NULL, NULL); |
| 3452 | } |
| 3453 | #endif /* WITH_WSREP */ |
| 3454 | /**********************************************************************//** |
| 3455 | Report an error in a foreign key definition. */ |
| 3456 | static |
| 3457 | void |
| 3458 | dict_foreign_error_report_low( |
| 3459 | /*==========================*/ |
| 3460 | FILE* file, /*!< in: output stream */ |
| 3461 | const char* name) /*!< in: table name */ |
| 3462 | { |
| 3463 | rewind(file); |
| 3464 | ut_print_timestamp(file); |
| 3465 | fprintf(file, " Error in foreign key constraint of table %s:\n" , |
| 3466 | name); |
| 3467 | } |
| 3468 | |
| 3469 | /**********************************************************************//** |
| 3470 | Report an error in a foreign key definition. */ |
| 3471 | static |
| 3472 | void |
| 3473 | dict_foreign_error_report( |
| 3474 | /*======================*/ |
| 3475 | FILE* file, /*!< in: output stream */ |
| 3476 | dict_foreign_t* fk, /*!< in: foreign key constraint */ |
| 3477 | const char* msg) /*!< in: the error message */ |
| 3478 | { |
| 3479 | std::string fk_str; |
| 3480 | mutex_enter(&dict_foreign_err_mutex); |
| 3481 | dict_foreign_error_report_low(file, fk->foreign_table_name); |
| 3482 | fputs(msg, file); |
| 3483 | fputs(" Constraint:\n" , file); |
| 3484 | fk_str = dict_print_info_on_foreign_key_in_create_format(NULL, fk, TRUE); |
| 3485 | fputs(fk_str.c_str(), file); |
| 3486 | putc('\n', file); |
| 3487 | if (fk->foreign_index) { |
| 3488 | fprintf(file, "The index in the foreign key in table is" |
| 3489 | " %s\n%s\n" , fk->foreign_index->name(), |
| 3490 | FOREIGN_KEY_CONSTRAINTS_MSG); |
| 3491 | } |
| 3492 | mutex_exit(&dict_foreign_err_mutex); |
| 3493 | } |
| 3494 | |
| 3495 | /**********************************************************************//** |
| 3496 | Adds a foreign key constraint object to the dictionary cache. May free |
| 3497 | the object if there already is an object with the same identifier in. |
| 3498 | At least one of the foreign table and the referenced table must already |
| 3499 | be in the dictionary cache! |
| 3500 | @return DB_SUCCESS or error code */ |
| 3501 | dberr_t |
| 3502 | dict_foreign_add_to_cache( |
| 3503 | /*======================*/ |
| 3504 | dict_foreign_t* foreign, |
| 3505 | /*!< in, own: foreign key constraint */ |
| 3506 | const char** col_names, |
| 3507 | /*!< in: column names, or NULL to use |
| 3508 | foreign->foreign_table->col_names */ |
| 3509 | bool check_charsets, |
| 3510 | /*!< in: whether to check charset |
| 3511 | compatibility */ |
| 3512 | dict_err_ignore_t ignore_err) |
| 3513 | /*!< in: error to be ignored */ |
| 3514 | { |
| 3515 | dict_table_t* for_table; |
| 3516 | dict_table_t* ref_table; |
| 3517 | dict_foreign_t* for_in_cache = NULL; |
| 3518 | dict_index_t* index; |
| 3519 | ibool added_to_referenced_list= FALSE; |
| 3520 | FILE* ef = dict_foreign_err_file; |
| 3521 | |
| 3522 | DBUG_ENTER("dict_foreign_add_to_cache" ); |
| 3523 | DBUG_PRINT("dict_foreign_add_to_cache" , ("id: %s" , foreign->id)); |
| 3524 | |
| 3525 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 3526 | |
| 3527 | for_table = dict_table_check_if_in_cache_low( |
| 3528 | foreign->foreign_table_name_lookup); |
| 3529 | |
| 3530 | ref_table = dict_table_check_if_in_cache_low( |
| 3531 | foreign->referenced_table_name_lookup); |
| 3532 | ut_a(for_table || ref_table); |
| 3533 | |
| 3534 | if (for_table) { |
| 3535 | for_in_cache = dict_foreign_find(for_table, foreign); |
| 3536 | } |
| 3537 | |
| 3538 | if (!for_in_cache && ref_table) { |
| 3539 | for_in_cache = dict_foreign_find(ref_table, foreign); |
| 3540 | } |
| 3541 | |
| 3542 | if (for_in_cache) { |
| 3543 | dict_foreign_free(foreign); |
| 3544 | } else { |
| 3545 | for_in_cache = foreign; |
| 3546 | |
| 3547 | } |
| 3548 | |
| 3549 | if (ref_table && !for_in_cache->referenced_table) { |
| 3550 | ulint index_error; |
| 3551 | ulint err_col; |
| 3552 | dict_index_t *err_index=NULL; |
| 3553 | |
| 3554 | index = dict_foreign_find_index( |
| 3555 | ref_table, NULL, |
| 3556 | for_in_cache->referenced_col_names, |
| 3557 | for_in_cache->n_fields, for_in_cache->foreign_index, |
| 3558 | check_charsets, false, &index_error, &err_col, &err_index); |
| 3559 | |
| 3560 | if (index == NULL |
| 3561 | && !(ignore_err & DICT_ERR_IGNORE_FK_NOKEY)) { |
| 3562 | dict_foreign_error_report( |
| 3563 | ef, for_in_cache, |
| 3564 | "there is no index in referenced table" |
| 3565 | " which would contain\n" |
| 3566 | "the columns as the first columns," |
| 3567 | " or the data types in the\n" |
| 3568 | "referenced table do not match" |
| 3569 | " the ones in table." ); |
| 3570 | |
| 3571 | if (for_in_cache == foreign) { |
| 3572 | dict_foreign_free(foreign); |
| 3573 | } |
| 3574 | |
| 3575 | DBUG_RETURN(DB_CANNOT_ADD_CONSTRAINT); |
| 3576 | } |
| 3577 | |
| 3578 | for_in_cache->referenced_table = ref_table; |
| 3579 | for_in_cache->referenced_index = index; |
| 3580 | |
| 3581 | std::pair<dict_foreign_set::iterator, bool> ret |
| 3582 | = ref_table->referenced_set.insert(for_in_cache); |
| 3583 | |
| 3584 | ut_a(ret.second); /* second is true if the insertion |
| 3585 | took place */ |
| 3586 | added_to_referenced_list = TRUE; |
| 3587 | } |
| 3588 | |
| 3589 | if (for_table && !for_in_cache->foreign_table) { |
| 3590 | ulint index_error; |
| 3591 | ulint err_col; |
| 3592 | dict_index_t *err_index=NULL; |
| 3593 | |
| 3594 | index = dict_foreign_find_index( |
| 3595 | for_table, col_names, |
| 3596 | for_in_cache->foreign_col_names, |
| 3597 | for_in_cache->n_fields, |
| 3598 | for_in_cache->referenced_index, check_charsets, |
| 3599 | for_in_cache->type |
| 3600 | & (DICT_FOREIGN_ON_DELETE_SET_NULL |
| 3601 | | DICT_FOREIGN_ON_UPDATE_SET_NULL), |
| 3602 | &index_error, &err_col, &err_index); |
| 3603 | |
| 3604 | if (index == NULL |
| 3605 | && !(ignore_err & DICT_ERR_IGNORE_FK_NOKEY)) { |
| 3606 | dict_foreign_error_report( |
| 3607 | ef, for_in_cache, |
| 3608 | "there is no index in the table" |
| 3609 | " which would contain\n" |
| 3610 | "the columns as the first columns," |
| 3611 | " or the data types in the\n" |
| 3612 | "table do not match" |
| 3613 | " the ones in the referenced table\n" |
| 3614 | "or one of the ON ... SET NULL columns" |
| 3615 | " is declared NOT NULL." ); |
| 3616 | |
| 3617 | if (for_in_cache == foreign) { |
| 3618 | if (added_to_referenced_list) { |
| 3619 | const dict_foreign_set::size_type |
| 3620 | n = ref_table->referenced_set |
| 3621 | .erase(for_in_cache); |
| 3622 | |
| 3623 | ut_a(n == 1); /* the number of |
| 3624 | elements removed must |
| 3625 | be one */ |
| 3626 | } |
| 3627 | |
| 3628 | dict_foreign_free(foreign); |
| 3629 | } |
| 3630 | |
| 3631 | DBUG_RETURN(DB_CANNOT_ADD_CONSTRAINT); |
| 3632 | } |
| 3633 | |
| 3634 | for_in_cache->foreign_table = for_table; |
| 3635 | for_in_cache->foreign_index = index; |
| 3636 | |
| 3637 | std::pair<dict_foreign_set::iterator, bool> ret |
| 3638 | = for_table->foreign_set.insert(for_in_cache); |
| 3639 | |
| 3640 | ut_a(ret.second); /* second is true if the insertion |
| 3641 | took place */ |
| 3642 | } |
| 3643 | |
| 3644 | /* We need to move the table to the non-LRU end of the table LRU |
| 3645 | list. Otherwise it will be evicted from the cache. */ |
| 3646 | |
| 3647 | if (ref_table != NULL) { |
| 3648 | dict_table_prevent_eviction(ref_table); |
| 3649 | } |
| 3650 | |
| 3651 | if (for_table != NULL) { |
| 3652 | dict_table_prevent_eviction(for_table); |
| 3653 | } |
| 3654 | |
| 3655 | ut_ad(dict_lru_validate()); |
| 3656 | DBUG_RETURN(DB_SUCCESS); |
| 3657 | } |
| 3658 | |
| 3659 | /*********************************************************************//** |
| 3660 | Scans from pointer onwards. Stops if is at the start of a copy of |
| 3661 | 'string' where characters are compared without case sensitivity, and |
| 3662 | only outside `` or "" quotes. Stops also at NUL. |
| 3663 | @return scanned up to this */ |
| 3664 | static |
| 3665 | const char* |
| 3666 | dict_scan_to( |
| 3667 | /*=========*/ |
| 3668 | const char* ptr, /*!< in: scan from */ |
| 3669 | const char* string) /*!< in: look for this */ |
| 3670 | { |
| 3671 | char quote = '\0'; |
| 3672 | bool escape = false; |
| 3673 | |
| 3674 | for (; *ptr; ptr++) { |
| 3675 | if (*ptr == quote) { |
| 3676 | /* Closing quote character: do not look for |
| 3677 | starting quote or the keyword. */ |
| 3678 | |
| 3679 | /* If the quote character is escaped by a |
| 3680 | backslash, ignore it. */ |
| 3681 | if (escape) { |
| 3682 | escape = false; |
| 3683 | } else { |
| 3684 | quote = '\0'; |
| 3685 | } |
| 3686 | } else if (quote) { |
| 3687 | /* Within quotes: do nothing. */ |
| 3688 | if (escape) { |
| 3689 | escape = false; |
| 3690 | } else if (*ptr == '\\') { |
| 3691 | escape = true; |
| 3692 | } |
| 3693 | } else if (*ptr == '`' || *ptr == '"' || *ptr == '\'') { |
| 3694 | /* Starting quote: remember the quote character. */ |
| 3695 | quote = *ptr; |
| 3696 | } else { |
| 3697 | /* Outside quotes: look for the keyword. */ |
| 3698 | ulint i; |
| 3699 | for (i = 0; string[i]; i++) { |
| 3700 | if (toupper((int)(unsigned char)(ptr[i])) |
| 3701 | != toupper((int)(unsigned char) |
| 3702 | (string[i]))) { |
| 3703 | goto nomatch; |
| 3704 | } |
| 3705 | } |
| 3706 | break; |
| 3707 | nomatch: |
| 3708 | ; |
| 3709 | } |
| 3710 | } |
| 3711 | |
| 3712 | return(ptr); |
| 3713 | } |
| 3714 | |
| 3715 | /*********************************************************************//** |
| 3716 | Accepts a specified string. Comparisons are case-insensitive. |
| 3717 | @return if string was accepted, the pointer is moved after that, else |
| 3718 | ptr is returned */ |
| 3719 | static |
| 3720 | const char* |
| 3721 | dict_accept( |
| 3722 | /*========*/ |
| 3723 | CHARSET_INFO* cs, /*!< in: the character set of ptr */ |
| 3724 | const char* ptr, /*!< in: scan from this */ |
| 3725 | const char* string, /*!< in: accept only this string as the next |
| 3726 | non-whitespace string */ |
| 3727 | ibool* success)/*!< out: TRUE if accepted */ |
| 3728 | { |
| 3729 | const char* old_ptr = ptr; |
| 3730 | const char* old_ptr2; |
| 3731 | |
| 3732 | *success = FALSE; |
| 3733 | |
| 3734 | while (my_isspace(cs, *ptr)) { |
| 3735 | ptr++; |
| 3736 | } |
| 3737 | |
| 3738 | old_ptr2 = ptr; |
| 3739 | |
| 3740 | ptr = dict_scan_to(ptr, string); |
| 3741 | |
| 3742 | if (*ptr == '\0' || old_ptr2 != ptr) { |
| 3743 | return(old_ptr); |
| 3744 | } |
| 3745 | |
| 3746 | *success = TRUE; |
| 3747 | |
| 3748 | return(ptr + ut_strlen(string)); |
| 3749 | } |
| 3750 | |
| 3751 | /*********************************************************************//** |
| 3752 | Scans an id. For the lexical definition of an 'id', see the code below. |
| 3753 | Strips backquotes or double quotes from around the id. |
| 3754 | @return scanned to */ |
| 3755 | static |
| 3756 | const char* |
| 3757 | dict_scan_id( |
| 3758 | /*=========*/ |
| 3759 | CHARSET_INFO* cs, /*!< in: the character set of ptr */ |
| 3760 | const char* ptr, /*!< in: scanned to */ |
| 3761 | mem_heap_t* heap, /*!< in: heap where to allocate the id |
| 3762 | (NULL=id will not be allocated, but it |
| 3763 | will point to string near ptr) */ |
| 3764 | const char** id, /*!< out,own: the id; NULL if no id was |
| 3765 | scannable */ |
| 3766 | ibool table_id,/*!< in: TRUE=convert the allocated id |
| 3767 | as a table name; FALSE=convert to UTF-8 */ |
| 3768 | ibool accept_also_dot) |
| 3769 | /*!< in: TRUE if also a dot can appear in a |
| 3770 | non-quoted id; in a quoted id it can appear |
| 3771 | always */ |
| 3772 | { |
| 3773 | char quote = '\0'; |
| 3774 | ulint len = 0; |
| 3775 | const char* s; |
| 3776 | char* str; |
| 3777 | char* dst; |
| 3778 | |
| 3779 | *id = NULL; |
| 3780 | |
| 3781 | while (my_isspace(cs, *ptr)) { |
| 3782 | ptr++; |
| 3783 | } |
| 3784 | |
| 3785 | if (*ptr == '\0') { |
| 3786 | |
| 3787 | return(ptr); |
| 3788 | } |
| 3789 | |
| 3790 | if (*ptr == '`' || *ptr == '"') { |
| 3791 | quote = *ptr++; |
| 3792 | } |
| 3793 | |
| 3794 | s = ptr; |
| 3795 | |
| 3796 | if (quote) { |
| 3797 | for (;;) { |
| 3798 | if (!*ptr) { |
| 3799 | /* Syntax error */ |
| 3800 | return(ptr); |
| 3801 | } |
| 3802 | if (*ptr == quote) { |
| 3803 | ptr++; |
| 3804 | if (*ptr != quote) { |
| 3805 | break; |
| 3806 | } |
| 3807 | } |
| 3808 | ptr++; |
| 3809 | len++; |
| 3810 | } |
| 3811 | } else { |
| 3812 | while (!my_isspace(cs, *ptr) && *ptr != '(' && *ptr != ')' |
| 3813 | && (accept_also_dot || *ptr != '.') |
| 3814 | && *ptr != ',' && *ptr != '\0') { |
| 3815 | |
| 3816 | ptr++; |
| 3817 | } |
| 3818 | |
| 3819 | len = ulint(ptr - s); |
| 3820 | } |
| 3821 | |
| 3822 | if (heap == NULL) { |
| 3823 | /* no heap given: id will point to source string */ |
| 3824 | *id = s; |
| 3825 | return(ptr); |
| 3826 | } |
| 3827 | |
| 3828 | if (quote) { |
| 3829 | char* d; |
| 3830 | |
| 3831 | str = d = static_cast<char*>( |
| 3832 | mem_heap_alloc(heap, len + 1)); |
| 3833 | |
| 3834 | while (len--) { |
| 3835 | if ((*d++ = *s++) == quote) { |
| 3836 | s++; |
| 3837 | } |
| 3838 | } |
| 3839 | *d++ = 0; |
| 3840 | len = ulint(d - str); |
| 3841 | ut_ad(*s == quote); |
| 3842 | ut_ad(s + 1 == ptr); |
| 3843 | } else { |
| 3844 | str = mem_heap_strdupl(heap, s, len); |
| 3845 | } |
| 3846 | |
| 3847 | if (!table_id) { |
| 3848 | convert_id: |
| 3849 | /* Convert the identifier from connection character set |
| 3850 | to UTF-8. */ |
| 3851 | len = 3 * len + 1; |
| 3852 | *id = dst = static_cast<char*>(mem_heap_alloc(heap, len)); |
| 3853 | |
| 3854 | innobase_convert_from_id(cs, dst, str, len); |
| 3855 | } else if (!strncmp(str, srv_mysql50_table_name_prefix, |
| 3856 | sizeof(srv_mysql50_table_name_prefix) - 1)) { |
| 3857 | /* This is a pre-5.1 table name |
| 3858 | containing chars other than [A-Za-z0-9]. |
| 3859 | Discard the prefix and use raw UTF-8 encoding. */ |
| 3860 | str += sizeof(srv_mysql50_table_name_prefix) - 1; |
| 3861 | len -= sizeof(srv_mysql50_table_name_prefix) - 1; |
| 3862 | goto convert_id; |
| 3863 | } else { |
| 3864 | /* Encode using filename-safe characters. */ |
| 3865 | len = 5 * len + 1; |
| 3866 | *id = dst = static_cast<char*>(mem_heap_alloc(heap, len)); |
| 3867 | |
| 3868 | innobase_convert_from_table_id(cs, dst, str, len); |
| 3869 | } |
| 3870 | |
| 3871 | return(ptr); |
| 3872 | } |
| 3873 | |
| 3874 | /*********************************************************************//** |
| 3875 | Tries to scan a column name. |
| 3876 | @return scanned to */ |
| 3877 | static |
| 3878 | const char* |
| 3879 | dict_scan_col( |
| 3880 | /*==========*/ |
| 3881 | CHARSET_INFO* cs, /*!< in: the character set of ptr */ |
| 3882 | const char* ptr, /*!< in: scanned to */ |
| 3883 | ibool* success,/*!< out: TRUE if success */ |
| 3884 | dict_table_t* table, /*!< in: table in which the column is */ |
| 3885 | const dict_col_t** column, /*!< out: pointer to column if success */ |
| 3886 | mem_heap_t* heap, /*!< in: heap where to allocate */ |
| 3887 | const char** name) /*!< out,own: the column name; |
| 3888 | NULL if no name was scannable */ |
| 3889 | { |
| 3890 | ulint i; |
| 3891 | |
| 3892 | *success = FALSE; |
| 3893 | |
| 3894 | ptr = dict_scan_id(cs, ptr, heap, name, FALSE, TRUE); |
| 3895 | |
| 3896 | if (*name == NULL) { |
| 3897 | |
| 3898 | return(ptr); /* Syntax error */ |
| 3899 | } |
| 3900 | |
| 3901 | if (table == NULL) { |
| 3902 | *success = TRUE; |
| 3903 | *column = NULL; |
| 3904 | } else { |
| 3905 | for (i = 0; i < dict_table_get_n_cols(table); i++) { |
| 3906 | |
| 3907 | const char* col_name = dict_table_get_col_name( |
| 3908 | table, i); |
| 3909 | |
| 3910 | if (0 == innobase_strcasecmp(col_name, *name)) { |
| 3911 | /* Found */ |
| 3912 | |
| 3913 | *success = TRUE; |
| 3914 | *column = dict_table_get_nth_col(table, i); |
| 3915 | strcpy((char*) *name, col_name); |
| 3916 | |
| 3917 | break; |
| 3918 | } |
| 3919 | } |
| 3920 | |
| 3921 | for (i = 0; i < dict_table_get_n_v_cols(table); i++) { |
| 3922 | |
| 3923 | const char* col_name = dict_table_get_v_col_name( |
| 3924 | table, i); |
| 3925 | |
| 3926 | if (0 == innobase_strcasecmp(col_name, *name)) { |
| 3927 | /* Found */ |
| 3928 | dict_v_col_t * vcol; |
| 3929 | *success = TRUE; |
| 3930 | vcol = dict_table_get_nth_v_col(table, i); |
| 3931 | *column = &vcol->m_col; |
| 3932 | strcpy((char*) *name, col_name); |
| 3933 | |
| 3934 | break; |
| 3935 | } |
| 3936 | } |
| 3937 | } |
| 3938 | |
| 3939 | return(ptr); |
| 3940 | } |
| 3941 | |
| 3942 | /*********************************************************************//** |
| 3943 | Open a table from its database and table name, this is currently used by |
| 3944 | foreign constraint parser to get the referenced table. |
| 3945 | @return complete table name with database and table name, allocated from |
| 3946 | heap memory passed in */ |
| 3947 | char* |
| 3948 | dict_get_referenced_table( |
| 3949 | /*======================*/ |
| 3950 | const char* name, /*!< in: foreign key table name */ |
| 3951 | const char* database_name, /*!< in: table db name */ |
| 3952 | ulint database_name_len, /*!< in: db name length */ |
| 3953 | const char* table_name, /*!< in: table name */ |
| 3954 | ulint table_name_len, /*!< in: table name length */ |
| 3955 | dict_table_t** table, /*!< out: table object or NULL */ |
| 3956 | mem_heap_t* heap) /*!< in/out: heap memory */ |
| 3957 | { |
| 3958 | char* ref; |
| 3959 | const char* db_name; |
| 3960 | |
| 3961 | if (!database_name) { |
| 3962 | /* Use the database name of the foreign key table */ |
| 3963 | |
| 3964 | db_name = name; |
| 3965 | database_name_len = dict_get_db_name_len(name); |
| 3966 | } else { |
| 3967 | db_name = database_name; |
| 3968 | } |
| 3969 | |
| 3970 | /* Copy database_name, '/', table_name, '\0' */ |
| 3971 | ref = static_cast<char*>( |
| 3972 | mem_heap_alloc(heap, database_name_len + table_name_len + 2)); |
| 3973 | |
| 3974 | memcpy(ref, db_name, database_name_len); |
| 3975 | ref[database_name_len] = '/'; |
| 3976 | memcpy(ref + database_name_len + 1, table_name, table_name_len + 1); |
| 3977 | |
| 3978 | /* Values; 0 = Store and compare as given; case sensitive |
| 3979 | 1 = Store and compare in lower; case insensitive |
| 3980 | 2 = Store as given, compare in lower; case semi-sensitive */ |
| 3981 | if (innobase_get_lower_case_table_names() == 2) { |
| 3982 | innobase_casedn_str(ref); |
| 3983 | *table = dict_table_get_low(ref); |
| 3984 | memcpy(ref, db_name, database_name_len); |
| 3985 | ref[database_name_len] = '/'; |
| 3986 | memcpy(ref + database_name_len + 1, table_name, table_name_len + 1); |
| 3987 | |
| 3988 | } else { |
| 3989 | #ifndef _WIN32 |
| 3990 | if (innobase_get_lower_case_table_names() == 1) { |
| 3991 | innobase_casedn_str(ref); |
| 3992 | } |
| 3993 | #else |
| 3994 | innobase_casedn_str(ref); |
| 3995 | #endif /* !_WIN32 */ |
| 3996 | *table = dict_table_get_low(ref); |
| 3997 | } |
| 3998 | |
| 3999 | return(ref); |
| 4000 | } |
| 4001 | /*********************************************************************//** |
| 4002 | Scans a table name from an SQL string. |
| 4003 | @return scanned to */ |
| 4004 | static |
| 4005 | const char* |
| 4006 | dict_scan_table_name( |
| 4007 | /*=================*/ |
| 4008 | CHARSET_INFO* cs, /*!< in: the character set of ptr */ |
| 4009 | const char* ptr, /*!< in: scanned to */ |
| 4010 | dict_table_t** table, /*!< out: table object or NULL */ |
| 4011 | const char* name, /*!< in: foreign key table name */ |
| 4012 | ibool* success,/*!< out: TRUE if ok name found */ |
| 4013 | mem_heap_t* heap, /*!< in: heap where to allocate the id */ |
| 4014 | const char** ref_name)/*!< out,own: the table name; |
| 4015 | NULL if no name was scannable */ |
| 4016 | { |
| 4017 | const char* database_name = NULL; |
| 4018 | ulint database_name_len = 0; |
| 4019 | const char* table_name = NULL; |
| 4020 | const char* scan_name; |
| 4021 | |
| 4022 | *success = FALSE; |
| 4023 | *table = NULL; |
| 4024 | |
| 4025 | ptr = dict_scan_id(cs, ptr, heap, &scan_name, TRUE, FALSE); |
| 4026 | |
| 4027 | if (scan_name == NULL) { |
| 4028 | |
| 4029 | return(ptr); /* Syntax error */ |
| 4030 | } |
| 4031 | |
| 4032 | if (*ptr == '.') { |
| 4033 | /* We scanned the database name; scan also the table name */ |
| 4034 | |
| 4035 | ptr++; |
| 4036 | |
| 4037 | database_name = scan_name; |
| 4038 | database_name_len = strlen(database_name); |
| 4039 | |
| 4040 | ptr = dict_scan_id(cs, ptr, heap, &table_name, TRUE, FALSE); |
| 4041 | |
| 4042 | if (table_name == NULL) { |
| 4043 | |
| 4044 | return(ptr); /* Syntax error */ |
| 4045 | } |
| 4046 | } else { |
| 4047 | /* To be able to read table dumps made with InnoDB-4.0.17 or |
| 4048 | earlier, we must allow the dot separator between the database |
| 4049 | name and the table name also to appear within a quoted |
| 4050 | identifier! InnoDB used to print a constraint as: |
| 4051 | ... REFERENCES `databasename.tablename` ... |
| 4052 | starting from 4.0.18 it is |
| 4053 | ... REFERENCES `databasename`.`tablename` ... */ |
| 4054 | const char* s; |
| 4055 | |
| 4056 | for (s = scan_name; *s; s++) { |
| 4057 | if (*s == '.') { |
| 4058 | database_name = scan_name; |
| 4059 | database_name_len = ulint(s - scan_name); |
| 4060 | scan_name = ++s; |
| 4061 | break;/* to do: multiple dots? */ |
| 4062 | } |
| 4063 | } |
| 4064 | |
| 4065 | table_name = scan_name; |
| 4066 | } |
| 4067 | |
| 4068 | *ref_name = dict_get_referenced_table( |
| 4069 | name, database_name, database_name_len, |
| 4070 | table_name, strlen(table_name), table, heap); |
| 4071 | |
| 4072 | *success = TRUE; |
| 4073 | return(ptr); |
| 4074 | } |
| 4075 | |
| 4076 | /*********************************************************************//** |
| 4077 | Skips one id. The id is allowed to contain also '.'. |
| 4078 | @return scanned to */ |
| 4079 | static |
| 4080 | const char* |
| 4081 | dict_skip_word( |
| 4082 | /*===========*/ |
| 4083 | CHARSET_INFO* cs, /*!< in: the character set of ptr */ |
| 4084 | const char* ptr, /*!< in: scanned to */ |
| 4085 | ibool* success)/*!< out: TRUE if success, FALSE if just spaces |
| 4086 | left in string or a syntax error */ |
| 4087 | { |
| 4088 | const char* start; |
| 4089 | |
| 4090 | *success = FALSE; |
| 4091 | |
| 4092 | ptr = dict_scan_id(cs, ptr, NULL, &start, FALSE, TRUE); |
| 4093 | |
| 4094 | if (start) { |
| 4095 | *success = TRUE; |
| 4096 | } |
| 4097 | |
| 4098 | return(ptr); |
| 4099 | } |
| 4100 | |
| 4101 | /*********************************************************************//** |
| 4102 | Removes MySQL comments from an SQL string. A comment is either |
| 4103 | (a) '#' to the end of the line, |
| 4104 | (b) '--[space]' to the end of the line, or |
| 4105 | (c) '[slash][asterisk]' till the next '[asterisk][slash]' (like the familiar |
| 4106 | C comment syntax). |
| 4107 | @return own: SQL string stripped from comments; the caller must free |
| 4108 | this with ut_free()! */ |
| 4109 | static |
| 4110 | char* |
| 4111 | ( |
| 4112 | /*================*/ |
| 4113 | const char* sql_string, /*!< in: SQL string */ |
| 4114 | size_t sql_length) /*!< in: length of sql_string */ |
| 4115 | { |
| 4116 | char* str; |
| 4117 | const char* sptr; |
| 4118 | const char* eptr = sql_string + sql_length; |
| 4119 | char* ptr; |
| 4120 | /* unclosed quote character (0 if none) */ |
| 4121 | char quote = 0; |
| 4122 | bool escape = false; |
| 4123 | |
| 4124 | DBUG_ENTER("dict_strip_comments" ); |
| 4125 | |
| 4126 | DBUG_PRINT("dict_strip_comments" , ("%s" , sql_string)); |
| 4127 | |
| 4128 | str = static_cast<char*>(ut_malloc_nokey(sql_length + 1)); |
| 4129 | |
| 4130 | sptr = sql_string; |
| 4131 | ptr = str; |
| 4132 | |
| 4133 | for (;;) { |
| 4134 | scan_more: |
| 4135 | if (sptr >= eptr || *sptr == '\0') { |
| 4136 | end_of_string: |
| 4137 | *ptr = '\0'; |
| 4138 | |
| 4139 | ut_a(ptr <= str + sql_length); |
| 4140 | |
| 4141 | DBUG_PRINT("dict_strip_comments" , ("%s" , str)); |
| 4142 | DBUG_RETURN(str); |
| 4143 | } |
| 4144 | |
| 4145 | if (*sptr == quote) { |
| 4146 | /* Closing quote character: do not look for |
| 4147 | starting quote or comments. */ |
| 4148 | |
| 4149 | /* If the quote character is escaped by a |
| 4150 | backslash, ignore it. */ |
| 4151 | if (escape) { |
| 4152 | escape = false; |
| 4153 | } else { |
| 4154 | quote = 0; |
| 4155 | } |
| 4156 | } else if (quote) { |
| 4157 | /* Within quotes: do not look for |
| 4158 | starting quotes or comments. */ |
| 4159 | if (escape) { |
| 4160 | escape = false; |
| 4161 | } else if (*sptr == '\\') { |
| 4162 | escape = true; |
| 4163 | } |
| 4164 | } else if (*sptr == '"' || *sptr == '`' || *sptr == '\'') { |
| 4165 | /* Starting quote: remember the quote character. */ |
| 4166 | quote = *sptr; |
| 4167 | } else if (*sptr == '#' |
| 4168 | || (sptr[0] == '-' && sptr[1] == '-' |
| 4169 | && sptr[2] == ' ')) { |
| 4170 | for (;;) { |
| 4171 | if (++sptr >= eptr) { |
| 4172 | goto end_of_string; |
| 4173 | } |
| 4174 | |
| 4175 | /* In Unix a newline is 0x0A while in Windows |
| 4176 | it is 0x0D followed by 0x0A */ |
| 4177 | |
| 4178 | switch (*sptr) { |
| 4179 | case (char) 0X0A: |
| 4180 | case (char) 0x0D: |
| 4181 | case '\0': |
| 4182 | goto scan_more; |
| 4183 | } |
| 4184 | } |
| 4185 | } else if (!quote && *sptr == '/' && *(sptr + 1) == '*') { |
| 4186 | sptr += 2; |
| 4187 | for (;;) { |
| 4188 | if (sptr >= eptr) { |
| 4189 | goto end_of_string; |
| 4190 | } |
| 4191 | |
| 4192 | switch (*sptr) { |
| 4193 | case '\0': |
| 4194 | goto scan_more; |
| 4195 | case '*': |
| 4196 | if (sptr[1] == '/') { |
| 4197 | sptr += 2; |
| 4198 | goto scan_more; |
| 4199 | } |
| 4200 | } |
| 4201 | |
| 4202 | sptr++; |
| 4203 | } |
| 4204 | } |
| 4205 | |
| 4206 | *ptr = *sptr; |
| 4207 | |
| 4208 | ptr++; |
| 4209 | sptr++; |
| 4210 | } |
| 4211 | } |
| 4212 | |
| 4213 | /*********************************************************************//** |
| 4214 | Finds the highest [number] for foreign key constraints of the table. Looks |
| 4215 | only at the >= 4.0.18-format id's, which are of the form |
| 4216 | databasename/tablename_ibfk_[number]. |
| 4217 | @return highest number, 0 if table has no new format foreign key constraints */ |
| 4218 | ulint |
| 4219 | dict_table_get_highest_foreign_id( |
| 4220 | /*==============================*/ |
| 4221 | dict_table_t* table) /*!< in: table in the dictionary memory cache */ |
| 4222 | { |
| 4223 | dict_foreign_t* foreign; |
| 4224 | char* endp; |
| 4225 | ulint biggest_id = 0; |
| 4226 | ulint id; |
| 4227 | ulint len; |
| 4228 | |
| 4229 | DBUG_ENTER("dict_table_get_highest_foreign_id" ); |
| 4230 | |
| 4231 | ut_a(table); |
| 4232 | |
| 4233 | len = ut_strlen(table->name.m_name); |
| 4234 | |
| 4235 | for (dict_foreign_set::iterator it = table->foreign_set.begin(); |
| 4236 | it != table->foreign_set.end(); |
| 4237 | ++it) { |
| 4238 | char fkid[MAX_TABLE_NAME_LEN+20]; |
| 4239 | foreign = *it; |
| 4240 | |
| 4241 | strcpy(fkid, foreign->id); |
| 4242 | /* Convert foreign key identifier on dictionary memory |
| 4243 | cache to filename charset. */ |
| 4244 | innobase_convert_to_filename_charset( |
| 4245 | strchr(fkid, '/') + 1, |
| 4246 | strchr(foreign->id, '/') + 1, |
| 4247 | MAX_TABLE_NAME_LEN); |
| 4248 | |
| 4249 | if (ut_strlen(fkid) > ((sizeof dict_ibfk) - 1) + len |
| 4250 | && 0 == ut_memcmp(fkid, table->name.m_name, len) |
| 4251 | && 0 == ut_memcmp(fkid + len, |
| 4252 | dict_ibfk, (sizeof dict_ibfk) - 1) |
| 4253 | && fkid[len + ((sizeof dict_ibfk) - 1)] != '0') { |
| 4254 | /* It is of the >= 4.0.18 format */ |
| 4255 | |
| 4256 | id = strtoul(fkid + len |
| 4257 | + ((sizeof dict_ibfk) - 1), |
| 4258 | &endp, 10); |
| 4259 | if (*endp == '\0') { |
| 4260 | ut_a(id != biggest_id); |
| 4261 | |
| 4262 | if (id > biggest_id) { |
| 4263 | biggest_id = id; |
| 4264 | } |
| 4265 | } |
| 4266 | } |
| 4267 | } |
| 4268 | |
| 4269 | DBUG_PRINT("dict_table_get_highest_foreign_id" , |
| 4270 | ("id: " ULINTPF, biggest_id)); |
| 4271 | |
| 4272 | DBUG_RETURN(biggest_id); |
| 4273 | } |
| 4274 | |
| 4275 | /*********************************************************************//** |
| 4276 | Reports a simple foreign key create clause syntax error. */ |
| 4277 | static |
| 4278 | void |
| 4279 | dict_foreign_report_syntax_err( |
| 4280 | /*===========================*/ |
| 4281 | const char* fmt, /*!< in: syntax err msg */ |
| 4282 | const char* oper, /*!< in: operation */ |
| 4283 | const char* name, /*!< in: table name */ |
| 4284 | const char* start_of_latest_foreign, |
| 4285 | /*!< in: start of the foreign key clause |
| 4286 | in the SQL string */ |
| 4287 | const char* ptr) /*!< in: place of the syntax error */ |
| 4288 | { |
| 4289 | ut_ad(!srv_read_only_mode); |
| 4290 | |
| 4291 | FILE* ef = dict_foreign_err_file; |
| 4292 | |
| 4293 | mutex_enter(&dict_foreign_err_mutex); |
| 4294 | dict_foreign_error_report_low(ef, name); |
| 4295 | fprintf(ef, fmt, oper, name, start_of_latest_foreign, ptr); |
| 4296 | mutex_exit(&dict_foreign_err_mutex); |
| 4297 | } |
| 4298 | |
| 4299 | /*********************************************************************//** |
| 4300 | Push warning message to SQL-layer based on foreign key constraint |
| 4301 | index match error. */ |
| 4302 | static |
| 4303 | void |
| 4304 | dict_foreign_push_index_error( |
| 4305 | /*==========================*/ |
| 4306 | trx_t* trx, /*!< in: trx */ |
| 4307 | const char* operation, /*!< in: operation create or alter |
| 4308 | */ |
| 4309 | const char* create_name, /*!< in: table name in create or |
| 4310 | alter table */ |
| 4311 | const char* latest_foreign, /*!< in: start of latest foreign key |
| 4312 | constraint name */ |
| 4313 | const char** columns, /*!< in: foreign key columns */ |
| 4314 | ulint index_error, /*!< in: error code */ |
| 4315 | ulint err_col, /*!< in: column where error happened |
| 4316 | */ |
| 4317 | dict_index_t* err_index, /*!< in: index where error happened |
| 4318 | */ |
| 4319 | dict_table_t* table, /*!< in: table */ |
| 4320 | FILE* ef) /*!< in: output stream */ |
| 4321 | { |
| 4322 | switch (index_error) { |
| 4323 | case DB_FOREIGN_KEY_INDEX_NOT_FOUND: { |
| 4324 | fprintf(ef, |
| 4325 | "%s table '%s' with foreign key constraint" |
| 4326 | " failed. There is no index in the referenced" |
| 4327 | " table where the referenced columns appear" |
| 4328 | " as the first columns near '%s'.\n" , |
| 4329 | operation, create_name, latest_foreign); |
| 4330 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4331 | "%s table '%s' with foreign key constraint" |
| 4332 | " failed. There is no index in the referenced" |
| 4333 | " table where the referenced columns appear" |
| 4334 | " as the first columns near '%s'." , |
| 4335 | operation, create_name, latest_foreign); |
| 4336 | break; |
| 4337 | } |
| 4338 | case DB_FOREIGN_KEY_IS_PREFIX_INDEX: { |
| 4339 | fprintf(ef, |
| 4340 | "%s table '%s' with foreign key constraint" |
| 4341 | " failed. There is only prefix index in the referenced" |
| 4342 | " table where the referenced columns appear" |
| 4343 | " as the first columns near '%s'.\n" , |
| 4344 | operation, create_name, latest_foreign); |
| 4345 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4346 | "%s table '%s' with foreign key constraint" |
| 4347 | " failed. There is only prefix index in the referenced" |
| 4348 | " table where the referenced columns appear" |
| 4349 | " as the first columns near '%s'." , |
| 4350 | operation, create_name, latest_foreign); |
| 4351 | break; |
| 4352 | } |
| 4353 | case DB_FOREIGN_KEY_COL_NOT_NULL: { |
| 4354 | fprintf(ef, |
| 4355 | "%s table %s with foreign key constraint" |
| 4356 | " failed. You have defined a SET NULL condition but " |
| 4357 | "column '%s' on index is defined as NOT NULL near '%s'.\n" , |
| 4358 | operation, create_name, columns[err_col], latest_foreign); |
| 4359 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4360 | "%s table %s with foreign key constraint" |
| 4361 | " failed. You have defined a SET NULL condition but " |
| 4362 | "column '%s' on index is defined as NOT NULL near '%s'." , |
| 4363 | operation, create_name, columns[err_col], latest_foreign); |
| 4364 | break; |
| 4365 | } |
| 4366 | case DB_FOREIGN_KEY_COLS_NOT_EQUAL: { |
| 4367 | dict_field_t* field; |
| 4368 | const char* col_name; |
| 4369 | field = dict_index_get_nth_field(err_index, err_col); |
| 4370 | |
| 4371 | col_name = field->col->is_virtual() |
| 4372 | ? "(null)" |
| 4373 | : dict_table_get_col_name( |
| 4374 | table, dict_col_get_no(field->col)); |
| 4375 | fprintf(ef, |
| 4376 | "%s table %s with foreign key constraint" |
| 4377 | " failed. Field type or character set for column '%s' " |
| 4378 | "does not mach referenced column '%s' near '%s'.\n" , |
| 4379 | operation, create_name, columns[err_col], col_name, latest_foreign); |
| 4380 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4381 | "%s table %s with foreign key constraint" |
| 4382 | " failed. Field type or character set for column '%s' " |
| 4383 | "does not mach referenced column '%s' near '%s'." , |
| 4384 | operation, create_name, columns[err_col], col_name, latest_foreign); |
| 4385 | break; |
| 4386 | } |
| 4387 | default: |
| 4388 | ut_error; |
| 4389 | } |
| 4390 | } |
| 4391 | |
| 4392 | /*********************************************************************//** |
| 4393 | Scans a table create SQL string and adds to the data dictionary the foreign key |
| 4394 | constraints declared in the string. This function should be called after the |
| 4395 | indexes for a table have been created. Each foreign key constraint must be |
| 4396 | accompanied with indexes in bot participating tables. The indexes are allowed |
| 4397 | to contain more fields than mentioned in the constraint. |
| 4398 | @return error code or DB_SUCCESS */ |
| 4399 | static |
| 4400 | dberr_t |
| 4401 | dict_create_foreign_constraints_low( |
| 4402 | trx_t* trx, |
| 4403 | mem_heap_t* heap, |
| 4404 | CHARSET_INFO* cs, |
| 4405 | const char* sql_string, |
| 4406 | const char* name, |
| 4407 | ibool reject_fks) |
| 4408 | { |
| 4409 | dict_table_t* table = NULL; |
| 4410 | dict_table_t* referenced_table = NULL; |
| 4411 | dict_table_t* table_to_alter = NULL; |
| 4412 | dict_table_t* table_to_create = NULL; |
| 4413 | ulint highest_id_so_far = 0; |
| 4414 | ulint number = 1; |
| 4415 | dict_index_t* index = NULL; |
| 4416 | dict_foreign_t* foreign = NULL; |
| 4417 | const char* ptr = sql_string; |
| 4418 | const char* start_of_latest_foreign = sql_string; |
| 4419 | const char* start_of_latest_set = NULL; |
| 4420 | FILE* ef = dict_foreign_err_file; |
| 4421 | ulint index_error = DB_SUCCESS; |
| 4422 | dict_index_t* err_index = NULL; |
| 4423 | ulint err_col; |
| 4424 | const char* constraint_name; |
| 4425 | ibool success; |
| 4426 | dberr_t error; |
| 4427 | const char* ptr1; |
| 4428 | const char* ptr2; |
| 4429 | ulint i; |
| 4430 | ulint j; |
| 4431 | ibool is_on_delete; |
| 4432 | ulint n_on_deletes; |
| 4433 | ulint n_on_updates; |
| 4434 | const dict_col_t*columns[500]; |
| 4435 | const char* column_names[500]; |
| 4436 | const char* ref_column_names[500]; |
| 4437 | const char* referenced_table_name; |
| 4438 | dict_foreign_set local_fk_set; |
| 4439 | dict_foreign_set_free local_fk_set_free(local_fk_set); |
| 4440 | const char* create_table_name; |
| 4441 | const char* orig; |
| 4442 | char create_name[MAX_TABLE_NAME_LEN + 1]; |
| 4443 | char operation[8]; |
| 4444 | |
| 4445 | ut_ad(!srv_read_only_mode); |
| 4446 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 4447 | |
| 4448 | table = dict_table_get_low(name); |
| 4449 | /* First check if we are actually doing an ALTER TABLE, and in that |
| 4450 | case look for the table being altered */ |
| 4451 | orig = ptr; |
| 4452 | ptr = dict_accept(cs, ptr, "ALTER" , &success); |
| 4453 | |
| 4454 | strcpy((char *)operation, success ? "Alter " : "Create " ); |
| 4455 | |
| 4456 | if (!success) { |
| 4457 | orig = ptr; |
| 4458 | ptr = dict_scan_to(ptr, "CREATE" ); |
| 4459 | ptr = dict_scan_to(ptr, "TABLE" ); |
| 4460 | ptr = dict_accept(cs, ptr, "TABLE" , &success); |
| 4461 | |
| 4462 | if (success) { |
| 4463 | ptr = dict_scan_table_name(cs, ptr, &table_to_create, name, |
| 4464 | &success, heap, &create_table_name); |
| 4465 | } |
| 4466 | |
| 4467 | if (success) { |
| 4468 | char *bufend; |
| 4469 | bufend = innobase_convert_name((char *)create_name, MAX_TABLE_NAME_LEN, |
| 4470 | create_table_name, strlen(create_table_name), |
| 4471 | trx->mysql_thd); |
| 4472 | create_name[bufend-create_name]='\0'; |
| 4473 | ptr = orig; |
| 4474 | } else { |
| 4475 | char *bufend; |
| 4476 | ptr = orig; |
| 4477 | bufend = innobase_convert_name((char *)create_name, MAX_TABLE_NAME_LEN, |
| 4478 | name, strlen(name), trx->mysql_thd); |
| 4479 | create_name[bufend-create_name]='\0'; |
| 4480 | } |
| 4481 | |
| 4482 | goto loop; |
| 4483 | } |
| 4484 | |
| 4485 | if (table == NULL) { |
| 4486 | mutex_enter(&dict_foreign_err_mutex); |
| 4487 | dict_foreign_error_report_low(ef, create_name); |
| 4488 | dict_foreign_error_report_low(ef, create_name); |
| 4489 | fprintf(ef, "%s table %s with foreign key constraint" |
| 4490 | " failed. Table %s not found from data dictionary." |
| 4491 | " Error close to %s.\n" , |
| 4492 | operation, create_name, create_name, start_of_latest_foreign); |
| 4493 | mutex_exit(&dict_foreign_err_mutex); |
| 4494 | ib_push_warning(trx, DB_ERROR, |
| 4495 | "%s table %s with foreign key constraint" |
| 4496 | " failed. Table %s not found from data dictionary." |
| 4497 | " Error close to %s." , |
| 4498 | operation, create_name, create_name, start_of_latest_foreign); |
| 4499 | |
| 4500 | return(DB_ERROR); |
| 4501 | } |
| 4502 | |
| 4503 | /* If not alter table jump to loop */ |
| 4504 | if (!success) { |
| 4505 | |
| 4506 | goto loop; |
| 4507 | } |
| 4508 | |
| 4509 | orig = ptr; |
| 4510 | ptr = dict_accept(cs, ptr, "TABLE" , &success); |
| 4511 | |
| 4512 | if (!success) { |
| 4513 | |
| 4514 | goto loop; |
| 4515 | } |
| 4516 | |
| 4517 | /* We are doing an ALTER TABLE: scan the table name we are altering */ |
| 4518 | |
| 4519 | orig = ptr; |
| 4520 | ptr = dict_scan_table_name(cs, ptr, &table_to_alter, name, |
| 4521 | &success, heap, &referenced_table_name); |
| 4522 | |
| 4523 | if (table_to_alter) { |
| 4524 | char *bufend; |
| 4525 | bufend = innobase_convert_name((char *)create_name, MAX_TABLE_NAME_LEN, |
| 4526 | table_to_alter->name.m_name, strlen(table_to_alter->name.m_name), |
| 4527 | trx->mysql_thd); |
| 4528 | create_name[bufend-create_name]='\0'; |
| 4529 | } else { |
| 4530 | char *bufend; |
| 4531 | bufend = innobase_convert_name((char *)create_name, MAX_TABLE_NAME_LEN, |
| 4532 | referenced_table_name, strlen(referenced_table_name), |
| 4533 | trx->mysql_thd); |
| 4534 | create_name[bufend-create_name]='\0'; |
| 4535 | |
| 4536 | } |
| 4537 | |
| 4538 | if (!success) { |
| 4539 | ib::error() << "Could not find the table " << create_name << " being" << operation << " near to " |
| 4540 | << orig; |
| 4541 | |
| 4542 | ib_push_warning(trx, DB_ERROR, |
| 4543 | "%s table %s with foreign key constraint" |
| 4544 | " failed. Table %s not found from data dictionary." |
| 4545 | " Error close to %s." , |
| 4546 | operation, create_name, create_name, orig); |
| 4547 | |
| 4548 | return(DB_ERROR); |
| 4549 | } |
| 4550 | |
| 4551 | /* Starting from 4.0.18 and 4.1.2, we generate foreign key id's in the |
| 4552 | format databasename/tablename_ibfk_[number], where [number] is local |
| 4553 | to the table; look for the highest [number] for table_to_alter, so |
| 4554 | that we can assign to new constraints higher numbers. */ |
| 4555 | |
| 4556 | /* If we are altering a temporary table, the table name after ALTER |
| 4557 | TABLE does not correspond to the internal table name, and |
| 4558 | table_to_alter is NULL. TODO: should we fix this somehow? */ |
| 4559 | |
| 4560 | if (table_to_alter == NULL) { |
| 4561 | highest_id_so_far = 0; |
| 4562 | } else { |
| 4563 | highest_id_so_far = dict_table_get_highest_foreign_id( |
| 4564 | table_to_alter); |
| 4565 | } |
| 4566 | |
| 4567 | number = highest_id_so_far + 1; |
| 4568 | /* Scan for foreign key declarations in a loop */ |
| 4569 | loop: |
| 4570 | /* Scan either to "CONSTRAINT" or "FOREIGN", whichever is closer */ |
| 4571 | |
| 4572 | ptr1 = dict_scan_to(ptr, "CONSTRAINT" ); |
| 4573 | ptr2 = dict_scan_to(ptr, "FOREIGN" ); |
| 4574 | |
| 4575 | constraint_name = NULL; |
| 4576 | |
| 4577 | if (ptr1 < ptr2) { |
| 4578 | /* The user may have specified a constraint name. Pick it so |
| 4579 | that we can store 'databasename/constraintname' as the id of |
| 4580 | of the constraint to system tables. */ |
| 4581 | ptr = ptr1; |
| 4582 | |
| 4583 | orig = ptr; |
| 4584 | ptr = dict_accept(cs, ptr, "CONSTRAINT" , &success); |
| 4585 | |
| 4586 | ut_a(success); |
| 4587 | |
| 4588 | if (!my_isspace(cs, *ptr) && *ptr != '"' && *ptr != '`') { |
| 4589 | goto loop; |
| 4590 | } |
| 4591 | |
| 4592 | while (my_isspace(cs, *ptr)) { |
| 4593 | ptr++; |
| 4594 | } |
| 4595 | |
| 4596 | /* read constraint name unless got "CONSTRAINT FOREIGN" */ |
| 4597 | if (ptr != ptr2) { |
| 4598 | ptr = dict_scan_id(cs, ptr, heap, |
| 4599 | &constraint_name, FALSE, FALSE); |
| 4600 | } |
| 4601 | } else { |
| 4602 | ptr = ptr2; |
| 4603 | } |
| 4604 | |
| 4605 | if (*ptr == '\0') { |
| 4606 | /* The proper way to reject foreign keys for temporary |
| 4607 | tables would be to split the lexing and syntactical |
| 4608 | analysis of foreign key clauses from the actual adding |
| 4609 | of them, so that ha_innodb.cc could first parse the SQL |
| 4610 | command, determine if there are any foreign keys, and |
| 4611 | if so, immediately reject the command if the table is a |
| 4612 | temporary one. For now, this kludge will work. */ |
| 4613 | if (reject_fks && !local_fk_set.empty()) { |
| 4614 | mutex_enter(&dict_foreign_err_mutex); |
| 4615 | dict_foreign_error_report_low(ef, create_name); |
| 4616 | fprintf(ef, "%s table %s with foreign key constraint" |
| 4617 | " failed. Temporary tables can't have foreign key constraints." |
| 4618 | " Error close to %s.\n" , |
| 4619 | operation, create_name, start_of_latest_foreign); |
| 4620 | mutex_exit(&dict_foreign_err_mutex); |
| 4621 | |
| 4622 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4623 | "%s table %s with foreign key constraint" |
| 4624 | " failed. Temporary tables can't have foreign key constraints." |
| 4625 | " Error close to %s." , |
| 4626 | operation, create_name, start_of_latest_foreign); |
| 4627 | |
| 4628 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4629 | } |
| 4630 | |
| 4631 | if (dict_foreigns_has_s_base_col(local_fk_set, table)) { |
| 4632 | return(DB_NO_FK_ON_S_BASE_COL); |
| 4633 | } |
| 4634 | |
| 4635 | /**********************************************************/ |
| 4636 | /* The following call adds the foreign key constraints |
| 4637 | to the data dictionary system tables on disk */ |
| 4638 | trx->op_info = "adding foreign keys" ; |
| 4639 | |
| 4640 | trx_start_if_not_started_xa(trx, true); |
| 4641 | |
| 4642 | trx_set_dict_operation(trx, TRX_DICT_OP_TABLE); |
| 4643 | |
| 4644 | error = dict_create_add_foreigns_to_dictionary( |
| 4645 | local_fk_set, table, trx); |
| 4646 | |
| 4647 | if (error == DB_SUCCESS) { |
| 4648 | |
| 4649 | table->foreign_set.insert(local_fk_set.begin(), |
| 4650 | local_fk_set.end()); |
| 4651 | std::for_each(local_fk_set.begin(), |
| 4652 | local_fk_set.end(), |
| 4653 | dict_foreign_add_to_referenced_table()); |
| 4654 | local_fk_set.clear(); |
| 4655 | |
| 4656 | dict_mem_table_fill_foreign_vcol_set(table); |
| 4657 | } |
| 4658 | return(error); |
| 4659 | } |
| 4660 | |
| 4661 | start_of_latest_foreign = ptr; |
| 4662 | |
| 4663 | orig = ptr; |
| 4664 | ptr = dict_accept(cs, ptr, "FOREIGN" , &success); |
| 4665 | |
| 4666 | if (!success) { |
| 4667 | goto loop; |
| 4668 | } |
| 4669 | |
| 4670 | if (!my_isspace(cs, *ptr)) { |
| 4671 | goto loop; |
| 4672 | } |
| 4673 | |
| 4674 | orig = ptr; |
| 4675 | ptr = dict_accept(cs, ptr, "KEY" , &success); |
| 4676 | |
| 4677 | if (!success) { |
| 4678 | goto loop; |
| 4679 | } |
| 4680 | |
| 4681 | if (my_isspace(cs, *ptr)) { |
| 4682 | ptr1 = dict_accept(cs, ptr, "IF" , &success); |
| 4683 | |
| 4684 | if (success) { |
| 4685 | if (!my_isspace(cs, *ptr1)) { |
| 4686 | goto loop; |
| 4687 | } |
| 4688 | ptr1 = dict_accept(cs, ptr1, "NOT" , &success); |
| 4689 | if (!success) { |
| 4690 | goto loop; |
| 4691 | } |
| 4692 | ptr1 = dict_accept(cs, ptr1, "EXISTS" , &success); |
| 4693 | if (!success) { |
| 4694 | goto loop; |
| 4695 | } |
| 4696 | ptr = ptr1; |
| 4697 | } |
| 4698 | } |
| 4699 | |
| 4700 | orig = ptr; |
| 4701 | ptr = dict_accept(cs, ptr, "(" , &success); |
| 4702 | |
| 4703 | if (!success) { |
| 4704 | if (constraint_name) { |
| 4705 | /* MySQL allows also an index id before the '('; we |
| 4706 | skip it */ |
| 4707 | ptr = dict_skip_word(cs, ptr, &success); |
| 4708 | if (!success) { |
| 4709 | dict_foreign_report_syntax_err( |
| 4710 | "%s table %s with foreign key constraint" |
| 4711 | " failed. Parse error in '%s'" |
| 4712 | " near '%s'.\n" , |
| 4713 | operation, create_name, start_of_latest_foreign, orig); |
| 4714 | |
| 4715 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4716 | "%s table %s with foreign key constraint" |
| 4717 | " failed. Parse error in '%s'" |
| 4718 | " near '%s'." , |
| 4719 | operation, create_name, start_of_latest_foreign, orig); |
| 4720 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4721 | } |
| 4722 | } else { |
| 4723 | while (my_isspace(cs, *ptr)) { |
| 4724 | ptr++; |
| 4725 | } |
| 4726 | |
| 4727 | ptr = dict_scan_id(cs, ptr, heap, |
| 4728 | &constraint_name, FALSE, FALSE); |
| 4729 | } |
| 4730 | |
| 4731 | ptr = dict_accept(cs, ptr, "(" , &success); |
| 4732 | |
| 4733 | if (!success) { |
| 4734 | /* We do not flag a syntax error here because in an |
| 4735 | ALTER TABLE we may also have DROP FOREIGN KEY abc */ |
| 4736 | |
| 4737 | goto loop; |
| 4738 | } |
| 4739 | } |
| 4740 | |
| 4741 | i = 0; |
| 4742 | |
| 4743 | /* Scan the columns in the first list */ |
| 4744 | col_loop1: |
| 4745 | ut_a(i < (sizeof column_names) / sizeof *column_names); |
| 4746 | orig = ptr; |
| 4747 | ptr = dict_scan_col(cs, ptr, &success, table, columns + i, |
| 4748 | heap, column_names + i); |
| 4749 | if (!success) { |
| 4750 | mutex_enter(&dict_foreign_err_mutex); |
| 4751 | dict_foreign_error_report_low(ef, create_name); |
| 4752 | fprintf(ef, |
| 4753 | "%s table %s with foreign key constraint" |
| 4754 | " failed. Parse error in '%s'" |
| 4755 | " near '%s'.\n" , |
| 4756 | operation, create_name, start_of_latest_foreign, orig); |
| 4757 | |
| 4758 | mutex_exit(&dict_foreign_err_mutex); |
| 4759 | |
| 4760 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4761 | "%s table %s with foreign key constraint" |
| 4762 | " failed. Parse error in '%s'" |
| 4763 | " near '%s'." , |
| 4764 | operation, create_name, start_of_latest_foreign, orig); |
| 4765 | |
| 4766 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4767 | } |
| 4768 | |
| 4769 | i++; |
| 4770 | |
| 4771 | ptr = dict_accept(cs, ptr, "," , &success); |
| 4772 | |
| 4773 | if (success) { |
| 4774 | goto col_loop1; |
| 4775 | } |
| 4776 | |
| 4777 | orig = ptr; |
| 4778 | ptr = dict_accept(cs, ptr, ")" , &success); |
| 4779 | |
| 4780 | if (!success) { |
| 4781 | dict_foreign_report_syntax_err( |
| 4782 | "%s table %s with foreign key constraint" |
| 4783 | " failed. Parse error in '%s'" |
| 4784 | " near '%s'.\n" , |
| 4785 | operation, create_name, start_of_latest_foreign, orig); |
| 4786 | |
| 4787 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4788 | "%s table %s with foreign key constraint" |
| 4789 | " failed. Parse error in '%s'" |
| 4790 | " near '%s'." , |
| 4791 | operation, create_name, start_of_latest_foreign, orig); |
| 4792 | |
| 4793 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4794 | } |
| 4795 | |
| 4796 | /* Try to find an index which contains the columns |
| 4797 | as the first fields and in the right order. There is |
| 4798 | no need to check column type match (on types_idx), since |
| 4799 | the referenced table can be NULL if foreign_key_checks is |
| 4800 | set to 0 */ |
| 4801 | |
| 4802 | index = dict_foreign_find_index( |
| 4803 | table, NULL, column_names, i, |
| 4804 | NULL, TRUE, FALSE, &index_error, &err_col, &err_index); |
| 4805 | |
| 4806 | if (!index) { |
| 4807 | mutex_enter(&dict_foreign_err_mutex); |
| 4808 | dict_foreign_error_report_low(ef, create_name); |
| 4809 | fputs("There is no index in table " , ef); |
| 4810 | ut_print_name(ef, NULL, create_name); |
| 4811 | fprintf(ef, " where the columns appear\n" |
| 4812 | "as the first columns. Constraint:\n%s\n%s" , |
| 4813 | start_of_latest_foreign, |
| 4814 | FOREIGN_KEY_CONSTRAINTS_MSG); |
| 4815 | dict_foreign_push_index_error(trx, operation, create_name, start_of_latest_foreign, |
| 4816 | column_names, index_error, err_col, err_index, table, ef); |
| 4817 | |
| 4818 | mutex_exit(&dict_foreign_err_mutex); |
| 4819 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4820 | } |
| 4821 | |
| 4822 | orig = ptr; |
| 4823 | ptr = dict_accept(cs, ptr, "REFERENCES" , &success); |
| 4824 | |
| 4825 | if (!success || !my_isspace(cs, *ptr)) { |
| 4826 | dict_foreign_report_syntax_err( |
| 4827 | "%s table %s with foreign key constraint" |
| 4828 | " failed. Parse error in '%s'" |
| 4829 | " near '%s'.\n" , |
| 4830 | operation, create_name, start_of_latest_foreign, orig); |
| 4831 | |
| 4832 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4833 | "%s table %s with foreign key constraint" |
| 4834 | " failed. Parse error in '%s'" |
| 4835 | " near '%s'." , |
| 4836 | operation, create_name, start_of_latest_foreign, orig); |
| 4837 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4838 | } |
| 4839 | |
| 4840 | /* Don't allow foreign keys on partitioned tables yet. */ |
| 4841 | ptr1 = dict_scan_to(ptr, "PARTITION" ); |
| 4842 | if (ptr1) { |
| 4843 | ptr1 = dict_accept(cs, ptr1, "PARTITION" , &success); |
| 4844 | if (success && my_isspace(cs, *ptr1)) { |
| 4845 | ptr2 = dict_accept(cs, ptr1, "BY" , &success); |
| 4846 | if (success) { |
| 4847 | my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0)); |
| 4848 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4849 | } |
| 4850 | } |
| 4851 | } |
| 4852 | if (dict_table_is_partition(table)) { |
| 4853 | my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0)); |
| 4854 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4855 | } |
| 4856 | |
| 4857 | /* Let us create a constraint struct */ |
| 4858 | |
| 4859 | foreign = dict_mem_foreign_create(); |
| 4860 | |
| 4861 | if (constraint_name) { |
| 4862 | ulint db_len; |
| 4863 | |
| 4864 | /* Catenate 'databasename/' to the constraint name specified |
| 4865 | by the user: we conceive the constraint as belonging to the |
| 4866 | same MySQL 'database' as the table itself. We store the name |
| 4867 | to foreign->id. */ |
| 4868 | |
| 4869 | db_len = dict_get_db_name_len(table->name.m_name); |
| 4870 | |
| 4871 | foreign->id = static_cast<char*>(mem_heap_alloc( |
| 4872 | foreign->heap, db_len + strlen(constraint_name) + 2)); |
| 4873 | |
| 4874 | ut_memcpy(foreign->id, table->name.m_name, db_len); |
| 4875 | foreign->id[db_len] = '/'; |
| 4876 | strcpy(foreign->id + db_len + 1, constraint_name); |
| 4877 | } |
| 4878 | |
| 4879 | if (foreign->id == NULL) { |
| 4880 | error = dict_create_add_foreign_id( |
| 4881 | &number, table->name.m_name, foreign); |
| 4882 | if (error != DB_SUCCESS) { |
| 4883 | dict_foreign_free(foreign); |
| 4884 | return(error); |
| 4885 | } |
| 4886 | } |
| 4887 | |
| 4888 | std::pair<dict_foreign_set::iterator, bool> ret |
| 4889 | = local_fk_set.insert(foreign); |
| 4890 | |
| 4891 | if (!ret.second) { |
| 4892 | /* A duplicate foreign key name has been found */ |
| 4893 | dict_foreign_free(foreign); |
| 4894 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4895 | } |
| 4896 | |
| 4897 | foreign->foreign_table = table; |
| 4898 | foreign->foreign_table_name = mem_heap_strdup( |
| 4899 | foreign->heap, table->name.m_name); |
| 4900 | dict_mem_foreign_table_name_lookup_set(foreign, TRUE); |
| 4901 | |
| 4902 | foreign->foreign_index = index; |
| 4903 | foreign->n_fields = (unsigned int) i; |
| 4904 | |
| 4905 | foreign->foreign_col_names = static_cast<const char**>( |
| 4906 | mem_heap_alloc(foreign->heap, i * sizeof(void*))); |
| 4907 | |
| 4908 | for (i = 0; i < foreign->n_fields; i++) { |
| 4909 | foreign->foreign_col_names[i] = mem_heap_strdup( |
| 4910 | foreign->heap, column_names[i]); |
| 4911 | } |
| 4912 | |
| 4913 | ptr = dict_scan_table_name(cs, ptr, &referenced_table, name, |
| 4914 | &success, heap, &referenced_table_name); |
| 4915 | |
| 4916 | /* Note that referenced_table can be NULL if the user has suppressed |
| 4917 | checking of foreign key constraints! */ |
| 4918 | |
| 4919 | if (!success || (!referenced_table && trx->check_foreigns)) { |
| 4920 | char buf[MAX_TABLE_NAME_LEN + 1] = "" ; |
| 4921 | char* bufend; |
| 4922 | |
| 4923 | bufend = innobase_convert_name(buf, MAX_TABLE_NAME_LEN, |
| 4924 | referenced_table_name, strlen(referenced_table_name), |
| 4925 | trx->mysql_thd); |
| 4926 | buf[bufend - buf] = '\0'; |
| 4927 | |
| 4928 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4929 | "%s table %s with foreign key constraint failed. Referenced table %s not found in the data dictionary " |
| 4930 | "near '%s'." , |
| 4931 | operation, create_name, buf, start_of_latest_foreign); |
| 4932 | mutex_enter(&dict_foreign_err_mutex); |
| 4933 | dict_foreign_error_report_low(ef, create_name); |
| 4934 | fprintf(ef, |
| 4935 | "%s table %s with foreign key constraint failed. Referenced table %s not found in the data dictionary " |
| 4936 | "near '%s'.\n" , |
| 4937 | operation, create_name, buf, start_of_latest_foreign); |
| 4938 | |
| 4939 | mutex_exit(&dict_foreign_err_mutex); |
| 4940 | |
| 4941 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4942 | } |
| 4943 | |
| 4944 | /* Don't allow foreign keys on partitioned tables yet. */ |
| 4945 | if (referenced_table && dict_table_is_partition(referenced_table)) { |
| 4946 | /* How could one make a referenced table to be a partition? */ |
| 4947 | ut_ad(0); |
| 4948 | my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0)); |
| 4949 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4950 | } |
| 4951 | |
| 4952 | ptr = dict_accept(cs, ptr, "(" , &success); |
| 4953 | |
| 4954 | if (!success) { |
| 4955 | dict_foreign_report_syntax_err( |
| 4956 | "%s table %s with foreign key constraint" |
| 4957 | " failed. Parse error in '%s'" |
| 4958 | " near '%s'.\n" , |
| 4959 | operation, create_name, start_of_latest_foreign, orig); |
| 4960 | |
| 4961 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4962 | "%s table %s with foreign key constraint" |
| 4963 | " failed. Parse error in '%s'" |
| 4964 | " near '%s'." , |
| 4965 | operation, create_name, start_of_latest_foreign, orig); |
| 4966 | |
| 4967 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4968 | } |
| 4969 | |
| 4970 | /* Scan the columns in the second list */ |
| 4971 | i = 0; |
| 4972 | |
| 4973 | col_loop2: |
| 4974 | orig = ptr; |
| 4975 | ptr = dict_scan_col(cs, ptr, &success, referenced_table, columns + i, |
| 4976 | heap, ref_column_names + i); |
| 4977 | i++; |
| 4978 | |
| 4979 | if (!success) { |
| 4980 | |
| 4981 | mutex_enter(&dict_foreign_err_mutex); |
| 4982 | dict_foreign_error_report_low(ef, create_name); |
| 4983 | fprintf(ef, |
| 4984 | "%s table %s with foreign key constraint" |
| 4985 | " failed. Parse error in '%s'" |
| 4986 | " near '%s'.\n" , |
| 4987 | operation, create_name, start_of_latest_foreign, orig); |
| 4988 | mutex_exit(&dict_foreign_err_mutex); |
| 4989 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 4990 | "%s table %s with foreign key constraint" |
| 4991 | " failed. Parse error in '%s'" |
| 4992 | " near '%s'." , |
| 4993 | operation, create_name, start_of_latest_foreign, orig); |
| 4994 | |
| 4995 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 4996 | } |
| 4997 | |
| 4998 | orig = ptr; |
| 4999 | ptr = dict_accept(cs, ptr, "," , &success); |
| 5000 | |
| 5001 | if (success) { |
| 5002 | goto col_loop2; |
| 5003 | } |
| 5004 | |
| 5005 | orig = ptr; |
| 5006 | ptr = dict_accept(cs, ptr, ")" , &success); |
| 5007 | |
| 5008 | if (!success || foreign->n_fields != i) { |
| 5009 | |
| 5010 | dict_foreign_report_syntax_err( |
| 5011 | "%s table %s with foreign key constraint" |
| 5012 | " failed. Parse error in '%s' near '%s'. Referencing column count does not match referenced column count.\n" , |
| 5013 | operation, create_name, start_of_latest_foreign, orig); |
| 5014 | |
| 5015 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 5016 | "%s table %s with foreign key constraint" |
| 5017 | " failed. Parse error in '%s' near '%s'. Referencing column count %d does not match referenced column count %d.\n" , |
| 5018 | operation, create_name, start_of_latest_foreign, orig, i, foreign->n_fields); |
| 5019 | |
| 5020 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 5021 | } |
| 5022 | |
| 5023 | n_on_deletes = 0; |
| 5024 | n_on_updates = 0; |
| 5025 | |
| 5026 | scan_on_conditions: |
| 5027 | /* Loop here as long as we can find ON ... conditions */ |
| 5028 | |
| 5029 | start_of_latest_set = ptr; |
| 5030 | ptr = dict_accept(cs, ptr, "ON" , &success); |
| 5031 | |
| 5032 | if (!success) { |
| 5033 | |
| 5034 | goto try_find_index; |
| 5035 | } |
| 5036 | |
| 5037 | orig = ptr; |
| 5038 | ptr = dict_accept(cs, ptr, "DELETE" , &success); |
| 5039 | |
| 5040 | if (!success) { |
| 5041 | orig = ptr; |
| 5042 | ptr = dict_accept(cs, ptr, "UPDATE" , &success); |
| 5043 | |
| 5044 | if (!success) { |
| 5045 | |
| 5046 | dict_foreign_report_syntax_err( |
| 5047 | "%s table %s with foreign key constraint" |
| 5048 | " failed. Parse error in '%s'" |
| 5049 | " near '%s'.\n" , |
| 5050 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5051 | |
| 5052 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 5053 | "%s table %s with foreign key constraint" |
| 5054 | " failed. Parse error in '%s'" |
| 5055 | " near '%s'." , |
| 5056 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5057 | |
| 5058 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 5059 | } |
| 5060 | |
| 5061 | is_on_delete = FALSE; |
| 5062 | n_on_updates++; |
| 5063 | } else { |
| 5064 | is_on_delete = TRUE; |
| 5065 | n_on_deletes++; |
| 5066 | } |
| 5067 | |
| 5068 | orig = ptr; |
| 5069 | ptr = dict_accept(cs, ptr, "RESTRICT" , &success); |
| 5070 | |
| 5071 | if (success) { |
| 5072 | goto scan_on_conditions; |
| 5073 | } |
| 5074 | |
| 5075 | orig = ptr; |
| 5076 | ptr = dict_accept(cs, ptr, "CASCADE" , &success); |
| 5077 | |
| 5078 | if (success) { |
| 5079 | if (is_on_delete) { |
| 5080 | foreign->type |= DICT_FOREIGN_ON_DELETE_CASCADE; |
| 5081 | } else { |
| 5082 | foreign->type |= DICT_FOREIGN_ON_UPDATE_CASCADE; |
| 5083 | } |
| 5084 | |
| 5085 | goto scan_on_conditions; |
| 5086 | } |
| 5087 | |
| 5088 | orig = ptr; |
| 5089 | ptr = dict_accept(cs, ptr, "NO" , &success); |
| 5090 | |
| 5091 | if (success) { |
| 5092 | orig = ptr; |
| 5093 | ptr = dict_accept(cs, ptr, "ACTION" , &success); |
| 5094 | |
| 5095 | if (!success) { |
| 5096 | dict_foreign_report_syntax_err( |
| 5097 | "%s table %s with foreign key constraint" |
| 5098 | " failed. Parse error in '%s'" |
| 5099 | " near '%s'.\n" , |
| 5100 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5101 | |
| 5102 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 5103 | "%s table %s with foreign key constraint" |
| 5104 | " failed. Parse error in '%s'" |
| 5105 | " near '%s'." , |
| 5106 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5107 | |
| 5108 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 5109 | } |
| 5110 | |
| 5111 | if (is_on_delete) { |
| 5112 | foreign->type |= DICT_FOREIGN_ON_DELETE_NO_ACTION; |
| 5113 | } else { |
| 5114 | foreign->type |= DICT_FOREIGN_ON_UPDATE_NO_ACTION; |
| 5115 | } |
| 5116 | |
| 5117 | goto scan_on_conditions; |
| 5118 | } |
| 5119 | |
| 5120 | orig = ptr; |
| 5121 | ptr = dict_accept(cs, ptr, "SET" , &success); |
| 5122 | |
| 5123 | if (!success) { |
| 5124 | dict_foreign_report_syntax_err( |
| 5125 | "%s table %s with foreign key constraint" |
| 5126 | " failed. Parse error in '%s'" |
| 5127 | " near '%s'.\n" , |
| 5128 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5129 | |
| 5130 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 5131 | "%s table %s with foreign key constraint" |
| 5132 | " failed. Parse error in '%s'" |
| 5133 | " near '%s'." , |
| 5134 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5135 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 5136 | } |
| 5137 | |
| 5138 | orig = ptr; |
| 5139 | ptr = dict_accept(cs, ptr, "NULL" , &success); |
| 5140 | |
| 5141 | if (!success) { |
| 5142 | dict_foreign_report_syntax_err( |
| 5143 | "%s table %s with foreign key constraint" |
| 5144 | " failed. Parse error in '%s'" |
| 5145 | " near '%s'.\n" , |
| 5146 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5147 | |
| 5148 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 5149 | "%s table %s with foreign key constraint" |
| 5150 | " failed. Parse error in '%s'" |
| 5151 | " near '%s'." , |
| 5152 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5153 | |
| 5154 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 5155 | } |
| 5156 | |
| 5157 | for (j = 0; j < foreign->n_fields; j++) { |
| 5158 | if ((dict_index_get_nth_col(foreign->foreign_index, j)->prtype) |
| 5159 | & DATA_NOT_NULL) { |
| 5160 | const dict_col_t* col |
| 5161 | = dict_index_get_nth_col(foreign->foreign_index, j); |
| 5162 | const char* col_name = dict_table_get_col_name(foreign->foreign_index->table, |
| 5163 | dict_col_get_no(col)); |
| 5164 | |
| 5165 | /* It is not sensible to define SET NULL |
| 5166 | if the column is not allowed to be NULL! */ |
| 5167 | |
| 5168 | mutex_enter(&dict_foreign_err_mutex); |
| 5169 | dict_foreign_error_report_low(ef, create_name); |
| 5170 | fprintf(ef, |
| 5171 | "%s table %s with foreign key constraint" |
| 5172 | " failed. You have defined a SET NULL condition but column '%s' is defined as NOT NULL" |
| 5173 | " in '%s' near '%s'.\n" , |
| 5174 | operation, create_name, col_name, start_of_latest_foreign, start_of_latest_set); |
| 5175 | mutex_exit(&dict_foreign_err_mutex); |
| 5176 | |
| 5177 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 5178 | "%s table %s with foreign key constraint" |
| 5179 | " failed. You have defined a SET NULL condition but column '%s' is defined as NOT NULL" |
| 5180 | " in '%s' near '%s'." , |
| 5181 | operation, create_name, col_name, start_of_latest_foreign, start_of_latest_set); |
| 5182 | |
| 5183 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 5184 | } |
| 5185 | } |
| 5186 | |
| 5187 | if (is_on_delete) { |
| 5188 | foreign->type |= DICT_FOREIGN_ON_DELETE_SET_NULL; |
| 5189 | } else { |
| 5190 | foreign->type |= DICT_FOREIGN_ON_UPDATE_SET_NULL; |
| 5191 | } |
| 5192 | |
| 5193 | goto scan_on_conditions; |
| 5194 | |
| 5195 | try_find_index: |
| 5196 | if (n_on_deletes > 1 || n_on_updates > 1) { |
| 5197 | /* It is an error to define more than 1 action */ |
| 5198 | |
| 5199 | mutex_enter(&dict_foreign_err_mutex); |
| 5200 | dict_foreign_error_report_low(ef, create_name); |
| 5201 | fprintf(ef, |
| 5202 | "%s table %s with foreign key constraint" |
| 5203 | " failed. You have more than one on delete or on update clause" |
| 5204 | " in '%s' near '%s'.\n" , |
| 5205 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5206 | mutex_exit(&dict_foreign_err_mutex); |
| 5207 | |
| 5208 | ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT, |
| 5209 | "%s table %s with foreign key constraint" |
| 5210 | " failed. You have more than one on delete or on update clause" |
| 5211 | " in '%s' near '%s'." , |
| 5212 | operation, create_name, start_of_latest_foreign, start_of_latest_set); |
| 5213 | |
| 5214 | dict_foreign_free(foreign); |
| 5215 | |
| 5216 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 5217 | } |
| 5218 | |
| 5219 | /* Try to find an index which contains the columns as the first fields |
| 5220 | and in the right order, and the types are the same as in |
| 5221 | foreign->foreign_index */ |
| 5222 | |
| 5223 | if (referenced_table) { |
| 5224 | index = dict_foreign_find_index(referenced_table, NULL, |
| 5225 | ref_column_names, i, |
| 5226 | foreign->foreign_index, |
| 5227 | TRUE, FALSE, &index_error, &err_col, &err_index); |
| 5228 | |
| 5229 | if (!index) { |
| 5230 | mutex_enter(&dict_foreign_err_mutex); |
| 5231 | dict_foreign_error_report_low(ef, create_name); |
| 5232 | fprintf(ef, "%s:\n" |
| 5233 | "Cannot find an index in the" |
| 5234 | " referenced table where the\n" |
| 5235 | "referenced columns appear as the" |
| 5236 | " first columns, or column types\n" |
| 5237 | "in the table and the referenced table" |
| 5238 | " do not match for constraint.\n" |
| 5239 | "Note that the internal storage type of" |
| 5240 | " ENUM and SET changed in\n" |
| 5241 | "tables created with >= InnoDB-4.1.12," |
| 5242 | " and such columns in old tables\n" |
| 5243 | "cannot be referenced by such columns" |
| 5244 | " in new tables.\n%s\n" , |
| 5245 | start_of_latest_foreign, |
| 5246 | FOREIGN_KEY_CONSTRAINTS_MSG); |
| 5247 | |
| 5248 | dict_foreign_push_index_error(trx, operation, create_name, start_of_latest_foreign, |
| 5249 | column_names, index_error, err_col, err_index, referenced_table, ef); |
| 5250 | |
| 5251 | mutex_exit(&dict_foreign_err_mutex); |
| 5252 | |
| 5253 | return(DB_CANNOT_ADD_CONSTRAINT); |
| 5254 | } |
| 5255 | } else { |
| 5256 | ut_a(trx->check_foreigns == FALSE); |
| 5257 | index = NULL; |
| 5258 | } |
| 5259 | |
| 5260 | foreign->referenced_index = index; |
| 5261 | foreign->referenced_table = referenced_table; |
| 5262 | |
| 5263 | foreign->referenced_table_name = mem_heap_strdup( |
| 5264 | foreign->heap, referenced_table_name); |
| 5265 | dict_mem_referenced_table_name_lookup_set(foreign, TRUE); |
| 5266 | |
| 5267 | foreign->referenced_col_names = static_cast<const char**>( |
| 5268 | mem_heap_alloc(foreign->heap, i * sizeof(void*))); |
| 5269 | |
| 5270 | for (i = 0; i < foreign->n_fields; i++) { |
| 5271 | foreign->referenced_col_names[i] |
| 5272 | = mem_heap_strdup(foreign->heap, ref_column_names[i]); |
| 5273 | } |
| 5274 | |
| 5275 | goto loop; |
| 5276 | } |
| 5277 | |
| 5278 | /** Scans a table create SQL string and adds to the data dictionary |
| 5279 | the foreign key constraints declared in the string. This function |
| 5280 | should be called after the indexes for a table have been created. |
| 5281 | Each foreign key constraint must be accompanied with indexes in |
| 5282 | bot participating tables. The indexes are allowed to contain more |
| 5283 | fields than mentioned in the constraint. |
| 5284 | |
| 5285 | @param[in] trx transaction |
| 5286 | @param[in] sql_string table create statement where |
| 5287 | foreign keys are declared like: |
| 5288 | FOREIGN KEY (a, b) REFERENCES table2(c, d), |
| 5289 | table2 can be written also with the database |
| 5290 | name before it: test.table2; the default |
| 5291 | database id the database of parameter name |
| 5292 | @param[in] sql_length length of sql_string |
| 5293 | @param[in] name table full name in normalized form |
| 5294 | @param[in] reject_fks if TRUE, fail with error code |
| 5295 | DB_CANNOT_ADD_CONSTRAINT if any |
| 5296 | foreign keys are found. |
| 5297 | @return error code or DB_SUCCESS */ |
| 5298 | dberr_t |
| 5299 | dict_create_foreign_constraints( |
| 5300 | trx_t* trx, |
| 5301 | const char* sql_string, |
| 5302 | size_t sql_length, |
| 5303 | const char* name, |
| 5304 | ibool reject_fks) |
| 5305 | { |
| 5306 | char* str; |
| 5307 | dberr_t err; |
| 5308 | mem_heap_t* heap; |
| 5309 | |
| 5310 | ut_a(trx); |
| 5311 | ut_a(trx->mysql_thd); |
| 5312 | |
| 5313 | str = dict_strip_comments(sql_string, sql_length); |
| 5314 | heap = mem_heap_create(10000); |
| 5315 | |
| 5316 | err = dict_create_foreign_constraints_low( |
| 5317 | trx, heap, innobase_get_charset(trx->mysql_thd), |
| 5318 | str, name, reject_fks); |
| 5319 | |
| 5320 | mem_heap_free(heap); |
| 5321 | ut_free(str); |
| 5322 | |
| 5323 | return(err); |
| 5324 | } |
| 5325 | |
| 5326 | /**********************************************************************//** |
| 5327 | Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement. |
| 5328 | @return DB_SUCCESS or DB_CANNOT_DROP_CONSTRAINT if syntax error or the |
| 5329 | constraint id does not match */ |
| 5330 | dberr_t |
| 5331 | dict_foreign_parse_drop_constraints( |
| 5332 | /*================================*/ |
| 5333 | mem_heap_t* heap, /*!< in: heap from which we can |
| 5334 | allocate memory */ |
| 5335 | trx_t* trx, /*!< in: transaction */ |
| 5336 | dict_table_t* table, /*!< in: table */ |
| 5337 | ulint* n, /*!< out: number of constraints |
| 5338 | to drop */ |
| 5339 | const char*** constraints_to_drop) /*!< out: id's of the |
| 5340 | constraints to drop */ |
| 5341 | { |
| 5342 | ibool success; |
| 5343 | char* str; |
| 5344 | size_t len; |
| 5345 | const char* ptr; |
| 5346 | const char* ptr1; |
| 5347 | const char* id; |
| 5348 | CHARSET_INFO* cs; |
| 5349 | |
| 5350 | ut_a(trx); |
| 5351 | ut_a(trx->mysql_thd); |
| 5352 | |
| 5353 | cs = innobase_get_charset(trx->mysql_thd); |
| 5354 | |
| 5355 | *n = 0; |
| 5356 | |
| 5357 | *constraints_to_drop = static_cast<const char**>( |
| 5358 | mem_heap_alloc(heap, 1000 * sizeof(char*))); |
| 5359 | |
| 5360 | ptr = innobase_get_stmt_unsafe(trx->mysql_thd, &len); |
| 5361 | |
| 5362 | str = dict_strip_comments(ptr, len); |
| 5363 | |
| 5364 | ptr = str; |
| 5365 | |
| 5366 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 5367 | loop: |
| 5368 | ptr = dict_scan_to(ptr, "DROP" ); |
| 5369 | |
| 5370 | if (*ptr == '\0') { |
| 5371 | ut_free(str); |
| 5372 | |
| 5373 | return(DB_SUCCESS); |
| 5374 | } |
| 5375 | |
| 5376 | ptr = dict_accept(cs, ptr, "DROP" , &success); |
| 5377 | |
| 5378 | if (!my_isspace(cs, *ptr)) { |
| 5379 | |
| 5380 | goto loop; |
| 5381 | } |
| 5382 | |
| 5383 | ptr = dict_accept(cs, ptr, "FOREIGN" , &success); |
| 5384 | |
| 5385 | if (!success || !my_isspace(cs, *ptr)) { |
| 5386 | |
| 5387 | goto loop; |
| 5388 | } |
| 5389 | |
| 5390 | ptr = dict_accept(cs, ptr, "KEY" , &success); |
| 5391 | |
| 5392 | if (!success) { |
| 5393 | |
| 5394 | goto syntax_error; |
| 5395 | } |
| 5396 | |
| 5397 | ptr1 = dict_accept(cs, ptr, "IF" , &success); |
| 5398 | |
| 5399 | if (success && my_isspace(cs, *ptr1)) { |
| 5400 | ptr1 = dict_accept(cs, ptr1, "EXISTS" , &success); |
| 5401 | if (success) { |
| 5402 | ptr = ptr1; |
| 5403 | } |
| 5404 | } |
| 5405 | |
| 5406 | ptr = dict_scan_id(cs, ptr, heap, &id, FALSE, TRUE); |
| 5407 | |
| 5408 | if (id == NULL) { |
| 5409 | |
| 5410 | goto syntax_error; |
| 5411 | } |
| 5412 | |
| 5413 | ut_a(*n < 1000); |
| 5414 | (*constraints_to_drop)[*n] = id; |
| 5415 | (*n)++; |
| 5416 | |
| 5417 | if (std::find_if(table->foreign_set.begin(), |
| 5418 | table->foreign_set.end(), |
| 5419 | dict_foreign_matches_id(id)) |
| 5420 | == table->foreign_set.end()) { |
| 5421 | |
| 5422 | if (!srv_read_only_mode) { |
| 5423 | FILE* ef = dict_foreign_err_file; |
| 5424 | |
| 5425 | mutex_enter(&dict_foreign_err_mutex); |
| 5426 | rewind(ef); |
| 5427 | ut_print_timestamp(ef); |
| 5428 | fputs(" Error in dropping of a foreign key" |
| 5429 | " constraint of table " , ef); |
| 5430 | ut_print_name(ef, NULL, table->name.m_name); |
| 5431 | fprintf(ef, ",\nin SQL command\n%s" |
| 5432 | "\nCannot find a constraint with the" |
| 5433 | " given id %s.\n" , str, id); |
| 5434 | mutex_exit(&dict_foreign_err_mutex); |
| 5435 | } |
| 5436 | |
| 5437 | ut_free(str); |
| 5438 | |
| 5439 | return(DB_CANNOT_DROP_CONSTRAINT); |
| 5440 | } |
| 5441 | |
| 5442 | goto loop; |
| 5443 | |
| 5444 | syntax_error: |
| 5445 | if (!srv_read_only_mode) { |
| 5446 | FILE* ef = dict_foreign_err_file; |
| 5447 | |
| 5448 | mutex_enter(&dict_foreign_err_mutex); |
| 5449 | rewind(ef); |
| 5450 | ut_print_timestamp(ef); |
| 5451 | fputs(" Syntax error in dropping of a" |
| 5452 | " foreign key constraint of table " , ef); |
| 5453 | ut_print_name(ef, NULL, table->name.m_name); |
| 5454 | fprintf(ef, ",\n" |
| 5455 | "close to:\n%s\n in SQL command\n%s\n" , ptr, str); |
| 5456 | mutex_exit(&dict_foreign_err_mutex); |
| 5457 | } |
| 5458 | |
| 5459 | ut_free(str); |
| 5460 | |
| 5461 | return(DB_CANNOT_DROP_CONSTRAINT); |
| 5462 | } |
| 5463 | |
| 5464 | /*==================== END OF FOREIGN KEY PROCESSING ====================*/ |
| 5465 | |
| 5466 | /**********************************************************************//** |
| 5467 | Returns an index object if it is found in the dictionary cache. |
| 5468 | Assumes that dict_sys->mutex is already being held. |
| 5469 | @return index, NULL if not found */ |
| 5470 | dict_index_t* |
| 5471 | dict_index_get_if_in_cache_low( |
| 5472 | /*===========================*/ |
| 5473 | index_id_t index_id) /*!< in: index id */ |
| 5474 | { |
| 5475 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 5476 | |
| 5477 | return(dict_index_find_on_id_low(index_id)); |
| 5478 | } |
| 5479 | |
| 5480 | #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG |
| 5481 | /**********************************************************************//** |
| 5482 | Returns an index object if it is found in the dictionary cache. |
| 5483 | @return index, NULL if not found */ |
| 5484 | dict_index_t* |
| 5485 | dict_index_get_if_in_cache( |
| 5486 | /*=======================*/ |
| 5487 | index_id_t index_id) /*!< in: index id */ |
| 5488 | { |
| 5489 | dict_index_t* index; |
| 5490 | |
| 5491 | if (dict_sys == NULL) { |
| 5492 | return(NULL); |
| 5493 | } |
| 5494 | |
| 5495 | mutex_enter(&dict_sys->mutex); |
| 5496 | |
| 5497 | index = dict_index_get_if_in_cache_low(index_id); |
| 5498 | |
| 5499 | mutex_exit(&dict_sys->mutex); |
| 5500 | |
| 5501 | return(index); |
| 5502 | } |
| 5503 | #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */ |
| 5504 | |
| 5505 | #ifdef UNIV_DEBUG |
| 5506 | /**********************************************************************//** |
| 5507 | Checks that a tuple has n_fields_cmp value in a sensible range, so that |
| 5508 | no comparison can occur with the page number field in a node pointer. |
| 5509 | @return TRUE if ok */ |
| 5510 | ibool |
| 5511 | dict_index_check_search_tuple( |
| 5512 | /*==========================*/ |
| 5513 | const dict_index_t* index, /*!< in: index tree */ |
| 5514 | const dtuple_t* tuple) /*!< in: tuple used in a search */ |
| 5515 | { |
| 5516 | ut_a(index); |
| 5517 | ut_a(dtuple_get_n_fields_cmp(tuple) |
| 5518 | <= dict_index_get_n_unique_in_tree(index)); |
| 5519 | return(TRUE); |
| 5520 | } |
| 5521 | #endif /* UNIV_DEBUG */ |
| 5522 | |
| 5523 | /**********************************************************************//** |
| 5524 | Builds a node pointer out of a physical record and a page number. |
| 5525 | @return own: node pointer */ |
| 5526 | dtuple_t* |
| 5527 | dict_index_build_node_ptr( |
| 5528 | /*======================*/ |
| 5529 | const dict_index_t* index, /*!< in: index */ |
| 5530 | const rec_t* rec, /*!< in: record for which to build node |
| 5531 | pointer */ |
| 5532 | ulint page_no,/*!< in: page number to put in node |
| 5533 | pointer */ |
| 5534 | mem_heap_t* heap, /*!< in: memory heap where pointer |
| 5535 | created */ |
| 5536 | ulint level) /*!< in: level of rec in tree: |
| 5537 | 0 means leaf level */ |
| 5538 | { |
| 5539 | dtuple_t* tuple; |
| 5540 | dfield_t* field; |
| 5541 | byte* buf; |
| 5542 | ulint n_unique; |
| 5543 | |
| 5544 | if (dict_index_is_ibuf(index)) { |
| 5545 | /* In a universal index tree, we take the whole record as |
| 5546 | the node pointer if the record is on the leaf level, |
| 5547 | on non-leaf levels we remove the last field, which |
| 5548 | contains the page number of the child page */ |
| 5549 | |
| 5550 | ut_a(!dict_table_is_comp(index->table)); |
| 5551 | n_unique = rec_get_n_fields_old(rec); |
| 5552 | |
| 5553 | if (level > 0) { |
| 5554 | ut_a(n_unique > 1); |
| 5555 | n_unique--; |
| 5556 | } |
| 5557 | } else { |
| 5558 | n_unique = dict_index_get_n_unique_in_tree_nonleaf(index); |
| 5559 | } |
| 5560 | |
| 5561 | tuple = dtuple_create(heap, n_unique + 1); |
| 5562 | |
| 5563 | /* When searching in the tree for the node pointer, we must not do |
| 5564 | comparison on the last field, the page number field, as on upper |
| 5565 | levels in the tree there may be identical node pointers with a |
| 5566 | different page number; therefore, we set the n_fields_cmp to one |
| 5567 | less: */ |
| 5568 | |
| 5569 | dtuple_set_n_fields_cmp(tuple, n_unique); |
| 5570 | |
| 5571 | dict_index_copy_types(tuple, index, n_unique); |
| 5572 | |
| 5573 | buf = static_cast<byte*>(mem_heap_alloc(heap, 4)); |
| 5574 | |
| 5575 | mach_write_to_4(buf, page_no); |
| 5576 | |
| 5577 | field = dtuple_get_nth_field(tuple, n_unique); |
| 5578 | dfield_set_data(field, buf, 4); |
| 5579 | |
| 5580 | dtype_set(dfield_get_type(field), DATA_SYS_CHILD, DATA_NOT_NULL, 4); |
| 5581 | |
| 5582 | rec_copy_prefix_to_dtuple(tuple, rec, index, !level, n_unique, heap); |
| 5583 | dtuple_set_info_bits(tuple, dtuple_get_info_bits(tuple) |
| 5584 | | REC_STATUS_NODE_PTR); |
| 5585 | |
| 5586 | ut_ad(dtuple_check_typed(tuple)); |
| 5587 | |
| 5588 | return(tuple); |
| 5589 | } |
| 5590 | |
| 5591 | /**********************************************************************//** |
| 5592 | Copies an initial segment of a physical record, long enough to specify an |
| 5593 | index entry uniquely. |
| 5594 | @return pointer to the prefix record */ |
| 5595 | rec_t* |
| 5596 | dict_index_copy_rec_order_prefix( |
| 5597 | /*=============================*/ |
| 5598 | const dict_index_t* index, /*!< in: index */ |
| 5599 | const rec_t* rec, /*!< in: record for which to |
| 5600 | copy prefix */ |
| 5601 | ulint* n_fields,/*!< out: number of fields copied */ |
| 5602 | byte** buf, /*!< in/out: memory buffer for the |
| 5603 | copied prefix, or NULL */ |
| 5604 | ulint* buf_size)/*!< in/out: buffer size */ |
| 5605 | { |
| 5606 | ulint n; |
| 5607 | |
| 5608 | UNIV_PREFETCH_R(rec); |
| 5609 | |
| 5610 | if (dict_index_is_ibuf(index)) { |
| 5611 | ut_ad(!dict_table_is_comp(index->table)); |
| 5612 | n = rec_get_n_fields_old(rec); |
| 5613 | } else { |
| 5614 | if (page_rec_is_leaf(rec)) { |
| 5615 | n = dict_index_get_n_unique_in_tree(index); |
| 5616 | } else if (dict_index_is_spatial(index)) { |
| 5617 | ut_ad(dict_index_get_n_unique_in_tree_nonleaf(index) |
| 5618 | == DICT_INDEX_SPATIAL_NODEPTR_SIZE); |
| 5619 | /* For R-tree, we have to compare |
| 5620 | the child page numbers as well. */ |
| 5621 | n = DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1; |
| 5622 | } else { |
| 5623 | n = dict_index_get_n_unique_in_tree(index); |
| 5624 | } |
| 5625 | } |
| 5626 | |
| 5627 | *n_fields = n; |
| 5628 | return(rec_copy_prefix_to_buf(rec, index, n, buf, buf_size)); |
| 5629 | } |
| 5630 | |
| 5631 | /** Convert a physical record into a search tuple. |
| 5632 | @param[in] rec index record (not necessarily in an index page) |
| 5633 | @param[in] index index |
| 5634 | @param[in] leaf whether rec is in a leaf page |
| 5635 | @param[in] n_fields number of data fields |
| 5636 | @param[in,out] heap memory heap for allocation |
| 5637 | @return own: data tuple */ |
| 5638 | dtuple_t* |
| 5639 | dict_index_build_data_tuple( |
| 5640 | const rec_t* rec, |
| 5641 | const dict_index_t* index, |
| 5642 | bool leaf, |
| 5643 | ulint n_fields, |
| 5644 | mem_heap_t* heap) |
| 5645 | { |
| 5646 | dtuple_t* tuple = dtuple_create(heap, n_fields); |
| 5647 | |
| 5648 | dict_index_copy_types(tuple, index, n_fields); |
| 5649 | |
| 5650 | rec_copy_prefix_to_dtuple(tuple, rec, index, leaf, n_fields, heap); |
| 5651 | |
| 5652 | ut_ad(dtuple_check_typed(tuple)); |
| 5653 | |
| 5654 | return(tuple); |
| 5655 | } |
| 5656 | |
| 5657 | /*********************************************************************//** |
| 5658 | Calculates the minimum record length in an index. */ |
| 5659 | ulint |
| 5660 | dict_index_calc_min_rec_len( |
| 5661 | /*========================*/ |
| 5662 | const dict_index_t* index) /*!< in: index */ |
| 5663 | { |
| 5664 | ulint sum = 0; |
| 5665 | ulint i; |
| 5666 | ulint comp = dict_table_is_comp(index->table); |
| 5667 | |
| 5668 | if (comp) { |
| 5669 | ulint nullable = 0; |
| 5670 | sum = REC_N_NEW_EXTRA_BYTES; |
| 5671 | for (i = 0; i < dict_index_get_n_fields(index); i++) { |
| 5672 | const dict_col_t* col |
| 5673 | = dict_index_get_nth_col(index, i); |
| 5674 | ulint size = dict_col_get_fixed_size(col, comp); |
| 5675 | sum += size; |
| 5676 | if (!size) { |
| 5677 | size = col->len; |
| 5678 | sum += size < 128 ? 1 : 2; |
| 5679 | } |
| 5680 | if (!(col->prtype & DATA_NOT_NULL)) { |
| 5681 | nullable++; |
| 5682 | } |
| 5683 | } |
| 5684 | |
| 5685 | /* round the NULL flags up to full bytes */ |
| 5686 | sum += UT_BITS_IN_BYTES(nullable); |
| 5687 | |
| 5688 | return(sum); |
| 5689 | } |
| 5690 | |
| 5691 | for (i = 0; i < dict_index_get_n_fields(index); i++) { |
| 5692 | sum += dict_col_get_fixed_size( |
| 5693 | dict_index_get_nth_col(index, i), comp); |
| 5694 | } |
| 5695 | |
| 5696 | if (sum > 127) { |
| 5697 | sum += 2 * dict_index_get_n_fields(index); |
| 5698 | } else { |
| 5699 | sum += dict_index_get_n_fields(index); |
| 5700 | } |
| 5701 | |
| 5702 | sum += REC_N_OLD_EXTRA_BYTES; |
| 5703 | |
| 5704 | return(sum); |
| 5705 | } |
| 5706 | |
| 5707 | /**********************************************************************//** |
| 5708 | Outputs info on a foreign key of a table in a format suitable for |
| 5709 | CREATE TABLE. */ |
| 5710 | std::string |
| 5711 | dict_print_info_on_foreign_key_in_create_format( |
| 5712 | /*============================================*/ |
| 5713 | trx_t* trx, /*!< in: transaction */ |
| 5714 | dict_foreign_t* foreign, /*!< in: foreign key constraint */ |
| 5715 | ibool add_newline) /*!< in: whether to add a newline */ |
| 5716 | { |
| 5717 | const char* stripped_id; |
| 5718 | ulint i; |
| 5719 | std::string str; |
| 5720 | |
| 5721 | if (strchr(foreign->id, '/')) { |
| 5722 | /* Strip the preceding database name from the constraint id */ |
| 5723 | stripped_id = foreign->id + 1 |
| 5724 | + dict_get_db_name_len(foreign->id); |
| 5725 | } else { |
| 5726 | stripped_id = foreign->id; |
| 5727 | } |
| 5728 | |
| 5729 | str.append("," ); |
| 5730 | |
| 5731 | if (add_newline) { |
| 5732 | /* SHOW CREATE TABLE wants constraints each printed nicely |
| 5733 | on its own line, while error messages want no newlines |
| 5734 | inserted. */ |
| 5735 | str.append("\n " ); |
| 5736 | } |
| 5737 | |
| 5738 | str.append(" CONSTRAINT " ); |
| 5739 | |
| 5740 | str.append(innobase_quote_identifier(trx, stripped_id)); |
| 5741 | str.append(" FOREIGN KEY (" ); |
| 5742 | |
| 5743 | for (i = 0;;) { |
| 5744 | str.append(innobase_quote_identifier(trx, foreign->foreign_col_names[i])); |
| 5745 | |
| 5746 | if (++i < foreign->n_fields) { |
| 5747 | str.append(", " ); |
| 5748 | } else { |
| 5749 | break; |
| 5750 | } |
| 5751 | } |
| 5752 | |
| 5753 | str.append(") REFERENCES " ); |
| 5754 | |
| 5755 | if (dict_tables_have_same_db(foreign->foreign_table_name_lookup, |
| 5756 | foreign->referenced_table_name_lookup)) { |
| 5757 | /* Do not print the database name of the referenced table */ |
| 5758 | str.append(ut_get_name(trx, |
| 5759 | dict_remove_db_name( |
| 5760 | foreign->referenced_table_name))); |
| 5761 | } else { |
| 5762 | str.append(ut_get_name(trx, |
| 5763 | foreign->referenced_table_name)); |
| 5764 | } |
| 5765 | |
| 5766 | str.append(" (" ); |
| 5767 | |
| 5768 | for (i = 0;;) { |
| 5769 | str.append(innobase_quote_identifier(trx, |
| 5770 | foreign->referenced_col_names[i])); |
| 5771 | |
| 5772 | if (++i < foreign->n_fields) { |
| 5773 | str.append(", " ); |
| 5774 | } else { |
| 5775 | break; |
| 5776 | } |
| 5777 | } |
| 5778 | |
| 5779 | str.append(")" ); |
| 5780 | |
| 5781 | if (foreign->type & DICT_FOREIGN_ON_DELETE_CASCADE) { |
| 5782 | str.append(" ON DELETE CASCADE" ); |
| 5783 | } |
| 5784 | |
| 5785 | if (foreign->type & DICT_FOREIGN_ON_DELETE_SET_NULL) { |
| 5786 | str.append(" ON DELETE SET NULL" ); |
| 5787 | } |
| 5788 | |
| 5789 | if (foreign->type & DICT_FOREIGN_ON_DELETE_NO_ACTION) { |
| 5790 | str.append(" ON DELETE NO ACTION" ); |
| 5791 | } |
| 5792 | |
| 5793 | if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE) { |
| 5794 | str.append(" ON UPDATE CASCADE" ); |
| 5795 | } |
| 5796 | |
| 5797 | if (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL) { |
| 5798 | str.append(" ON UPDATE SET NULL" ); |
| 5799 | } |
| 5800 | |
| 5801 | if (foreign->type & DICT_FOREIGN_ON_UPDATE_NO_ACTION) { |
| 5802 | str.append(" ON UPDATE NO ACTION" ); |
| 5803 | } |
| 5804 | |
| 5805 | return str; |
| 5806 | } |
| 5807 | |
| 5808 | /**********************************************************************//** |
| 5809 | Outputs info on foreign keys of a table. */ |
| 5810 | std::string |
| 5811 | dict_print_info_on_foreign_keys( |
| 5812 | /*============================*/ |
| 5813 | ibool create_table_format, /*!< in: if TRUE then print in |
| 5814 | a format suitable to be inserted into |
| 5815 | a CREATE TABLE, otherwise in the format |
| 5816 | of SHOW TABLE STATUS */ |
| 5817 | trx_t* trx, /*!< in: transaction */ |
| 5818 | dict_table_t* table) /*!< in: table */ |
| 5819 | { |
| 5820 | dict_foreign_t* foreign; |
| 5821 | std::string str; |
| 5822 | |
| 5823 | mutex_enter(&dict_sys->mutex); |
| 5824 | |
| 5825 | for (dict_foreign_set::iterator it = table->foreign_set.begin(); |
| 5826 | it != table->foreign_set.end(); |
| 5827 | ++it) { |
| 5828 | |
| 5829 | foreign = *it; |
| 5830 | |
| 5831 | if (create_table_format) { |
| 5832 | str.append( |
| 5833 | dict_print_info_on_foreign_key_in_create_format( |
| 5834 | trx, foreign, TRUE)); |
| 5835 | } else { |
| 5836 | ulint i; |
| 5837 | str.append("; (" ); |
| 5838 | |
| 5839 | for (i = 0; i < foreign->n_fields; i++) { |
| 5840 | if (i) { |
| 5841 | str.append(" " ); |
| 5842 | } |
| 5843 | |
| 5844 | str.append(innobase_quote_identifier(trx, |
| 5845 | foreign->foreign_col_names[i])); |
| 5846 | } |
| 5847 | |
| 5848 | str.append(") REFER " ); |
| 5849 | str.append(ut_get_name(trx, |
| 5850 | foreign->referenced_table_name)); |
| 5851 | str.append(")" ); |
| 5852 | |
| 5853 | for (i = 0; i < foreign->n_fields; i++) { |
| 5854 | if (i) { |
| 5855 | str.append(" " ); |
| 5856 | } |
| 5857 | str.append(innobase_quote_identifier( |
| 5858 | trx, |
| 5859 | foreign->referenced_col_names[i])); |
| 5860 | } |
| 5861 | |
| 5862 | str.append(")" ); |
| 5863 | |
| 5864 | if (foreign->type == DICT_FOREIGN_ON_DELETE_CASCADE) { |
| 5865 | str.append(" ON DELETE CASCADE" ); |
| 5866 | } |
| 5867 | |
| 5868 | if (foreign->type == DICT_FOREIGN_ON_DELETE_SET_NULL) { |
| 5869 | str.append(" ON DELETE SET NULL" ); |
| 5870 | } |
| 5871 | |
| 5872 | if (foreign->type & DICT_FOREIGN_ON_DELETE_NO_ACTION) { |
| 5873 | str.append(" ON DELETE NO ACTION" ); |
| 5874 | } |
| 5875 | |
| 5876 | if (foreign->type & DICT_FOREIGN_ON_UPDATE_CASCADE) { |
| 5877 | str.append(" ON UPDATE CASCADE" ); |
| 5878 | } |
| 5879 | |
| 5880 | if (foreign->type & DICT_FOREIGN_ON_UPDATE_SET_NULL) { |
| 5881 | str.append(" ON UPDATE SET NULL" ); |
| 5882 | } |
| 5883 | |
| 5884 | if (foreign->type & DICT_FOREIGN_ON_UPDATE_NO_ACTION) { |
| 5885 | str.append(" ON UPDATE NO ACTION" ); |
| 5886 | } |
| 5887 | } |
| 5888 | } |
| 5889 | |
| 5890 | mutex_exit(&dict_sys->mutex); |
| 5891 | return str; |
| 5892 | } |
| 5893 | |
| 5894 | /** Given a space_id of a file-per-table tablespace, search the |
| 5895 | dict_sys->table_LRU list and return the dict_table_t* pointer for it. |
| 5896 | @param space tablespace |
| 5897 | @return table if found, NULL if not */ |
| 5898 | static |
| 5899 | dict_table_t* |
| 5900 | dict_find_single_table_by_space(const fil_space_t* space) |
| 5901 | { |
| 5902 | dict_table_t* table; |
| 5903 | ulint num_item; |
| 5904 | ulint count = 0; |
| 5905 | |
| 5906 | ut_ad(space->id > 0); |
| 5907 | |
| 5908 | if (dict_sys == NULL) { |
| 5909 | /* This could happen when it's in redo processing. */ |
| 5910 | return(NULL); |
| 5911 | } |
| 5912 | |
| 5913 | table = UT_LIST_GET_FIRST(dict_sys->table_LRU); |
| 5914 | num_item = UT_LIST_GET_LEN(dict_sys->table_LRU); |
| 5915 | |
| 5916 | /* This function intentionally does not acquire mutex as it is used |
| 5917 | by error handling code in deep call stack as last means to avoid |
| 5918 | killing the server, so it worth to risk some consequences for |
| 5919 | the action. */ |
| 5920 | while (table && count < num_item) { |
| 5921 | if (table->space == space) { |
| 5922 | if (dict_table_is_file_per_table(table)) { |
| 5923 | return(table); |
| 5924 | } |
| 5925 | return(NULL); |
| 5926 | } |
| 5927 | |
| 5928 | table = UT_LIST_GET_NEXT(table_LRU, table); |
| 5929 | count++; |
| 5930 | } |
| 5931 | |
| 5932 | return(NULL); |
| 5933 | } |
| 5934 | |
| 5935 | /**********************************************************************//** |
| 5936 | Flags a table with specified space_id corrupted in the data dictionary |
| 5937 | cache |
| 5938 | @return true if successful */ |
| 5939 | bool dict_set_corrupted_by_space(const fil_space_t* space) |
| 5940 | { |
| 5941 | dict_table_t* table; |
| 5942 | |
| 5943 | table = dict_find_single_table_by_space(space); |
| 5944 | |
| 5945 | if (!table) { |
| 5946 | return false; |
| 5947 | } |
| 5948 | |
| 5949 | /* mark the table->corrupted bit only, since the caller |
| 5950 | could be too deep in the stack for SYS_INDEXES update */ |
| 5951 | table->corrupted = true; |
| 5952 | table->file_unreadable = true; |
| 5953 | return true; |
| 5954 | } |
| 5955 | |
| 5956 | /** Flag a table encrypted in the data dictionary cache. */ |
| 5957 | void dict_set_encrypted_by_space(const fil_space_t* space) |
| 5958 | { |
| 5959 | if (dict_table_t* table = dict_find_single_table_by_space(space)) { |
| 5960 | table->file_unreadable = true; |
| 5961 | } |
| 5962 | } |
| 5963 | |
| 5964 | /**********************************************************************//** |
| 5965 | Flags an index corrupted both in the data dictionary cache |
| 5966 | and in the SYS_INDEXES */ |
| 5967 | void |
| 5968 | dict_set_corrupted( |
| 5969 | /*===============*/ |
| 5970 | dict_index_t* index, /*!< in/out: index */ |
| 5971 | trx_t* trx, /*!< in/out: transaction */ |
| 5972 | const char* ctx) /*!< in: context */ |
| 5973 | { |
| 5974 | mem_heap_t* heap; |
| 5975 | mtr_t mtr; |
| 5976 | dict_index_t* sys_index; |
| 5977 | dtuple_t* tuple; |
| 5978 | dfield_t* dfield; |
| 5979 | byte* buf; |
| 5980 | const char* status; |
| 5981 | btr_cur_t cursor; |
| 5982 | bool locked = RW_X_LATCH == trx->dict_operation_lock_mode; |
| 5983 | |
| 5984 | if (!locked) { |
| 5985 | row_mysql_lock_data_dictionary(trx); |
| 5986 | } |
| 5987 | |
| 5988 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 5989 | ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); |
| 5990 | ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); |
| 5991 | ut_ad(!sync_check_iterate(dict_sync_check())); |
| 5992 | |
| 5993 | /* Mark the table as corrupted only if the clustered index |
| 5994 | is corrupted */ |
| 5995 | if (dict_index_is_clust(index)) { |
| 5996 | index->table->corrupted = TRUE; |
| 5997 | } |
| 5998 | |
| 5999 | if (index->type & DICT_CORRUPT) { |
| 6000 | /* The index was already flagged corrupted. */ |
| 6001 | ut_ad(!dict_index_is_clust(index) || index->table->corrupted); |
| 6002 | goto func_exit; |
| 6003 | } |
| 6004 | |
| 6005 | /* If this is read only mode, do not update SYS_INDEXES, just |
| 6006 | mark it as corrupted in memory */ |
| 6007 | if (srv_read_only_mode) { |
| 6008 | index->type |= DICT_CORRUPT; |
| 6009 | goto func_exit; |
| 6010 | } |
| 6011 | |
| 6012 | heap = mem_heap_create(sizeof(dtuple_t) + 2 * (sizeof(dfield_t) |
| 6013 | + sizeof(que_fork_t) + sizeof(upd_node_t) |
| 6014 | + sizeof(upd_t) + 12)); |
| 6015 | mtr_start(&mtr); |
| 6016 | index->type |= DICT_CORRUPT; |
| 6017 | |
| 6018 | sys_index = UT_LIST_GET_FIRST(dict_sys->sys_indexes->indexes); |
| 6019 | |
| 6020 | /* Find the index row in SYS_INDEXES */ |
| 6021 | tuple = dtuple_create(heap, 2); |
| 6022 | |
| 6023 | dfield = dtuple_get_nth_field(tuple, 0); |
| 6024 | buf = static_cast<byte*>(mem_heap_alloc(heap, 8)); |
| 6025 | mach_write_to_8(buf, index->table->id); |
| 6026 | dfield_set_data(dfield, buf, 8); |
| 6027 | |
| 6028 | dfield = dtuple_get_nth_field(tuple, 1); |
| 6029 | buf = static_cast<byte*>(mem_heap_alloc(heap, 8)); |
| 6030 | mach_write_to_8(buf, index->id); |
| 6031 | dfield_set_data(dfield, buf, 8); |
| 6032 | |
| 6033 | dict_index_copy_types(tuple, sys_index, 2); |
| 6034 | |
| 6035 | btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_LE, |
| 6036 | BTR_MODIFY_LEAF, |
| 6037 | &cursor, 0, __FILE__, __LINE__, &mtr); |
| 6038 | |
| 6039 | if (cursor.low_match == dtuple_get_n_fields(tuple)) { |
| 6040 | /* UPDATE SYS_INDEXES SET TYPE=index->type |
| 6041 | WHERE TABLE_ID=index->table->id AND INDEX_ID=index->id */ |
| 6042 | ulint len; |
| 6043 | byte* field = rec_get_nth_field_old( |
| 6044 | btr_cur_get_rec(&cursor), |
| 6045 | DICT_FLD__SYS_INDEXES__TYPE, &len); |
| 6046 | if (len != 4) { |
| 6047 | goto fail; |
| 6048 | } |
| 6049 | mlog_write_ulint(field, index->type, MLOG_4BYTES, &mtr); |
| 6050 | status = "Flagged" ; |
| 6051 | } else { |
| 6052 | fail: |
| 6053 | status = "Unable to flag" ; |
| 6054 | } |
| 6055 | |
| 6056 | mtr_commit(&mtr); |
| 6057 | mem_heap_empty(heap); |
| 6058 | ib::error() << status << " corruption of " << index->name |
| 6059 | << " in table " << index->table->name << " in " << ctx; |
| 6060 | mem_heap_free(heap); |
| 6061 | |
| 6062 | func_exit: |
| 6063 | if (!locked) { |
| 6064 | row_mysql_unlock_data_dictionary(trx); |
| 6065 | } |
| 6066 | } |
| 6067 | |
| 6068 | /** Flags an index corrupted in the data dictionary cache only. This |
| 6069 | is used mostly to mark a corrupted index when index's own dictionary |
| 6070 | is corrupted, and we force to load such index for repair purpose |
| 6071 | @param[in,out] index index which is corrupted */ |
| 6072 | void |
| 6073 | dict_set_corrupted_index_cache_only( |
| 6074 | dict_index_t* index) |
| 6075 | { |
| 6076 | ut_ad(index != NULL); |
| 6077 | ut_ad(index->table != NULL); |
| 6078 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 6079 | ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); |
| 6080 | ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); |
| 6081 | |
| 6082 | /* Mark the table as corrupted only if the clustered index |
| 6083 | is corrupted */ |
| 6084 | if (dict_index_is_clust(index)) { |
| 6085 | index->table->corrupted = TRUE; |
| 6086 | } |
| 6087 | |
| 6088 | index->type |= DICT_CORRUPT; |
| 6089 | } |
| 6090 | |
| 6091 | /** Sets merge_threshold in the SYS_INDEXES |
| 6092 | @param[in,out] index index |
| 6093 | @param[in] merge_threshold value to set */ |
| 6094 | void |
| 6095 | dict_index_set_merge_threshold( |
| 6096 | dict_index_t* index, |
| 6097 | ulint merge_threshold) |
| 6098 | { |
| 6099 | mem_heap_t* heap; |
| 6100 | mtr_t mtr; |
| 6101 | dict_index_t* sys_index; |
| 6102 | dtuple_t* tuple; |
| 6103 | dfield_t* dfield; |
| 6104 | byte* buf; |
| 6105 | btr_cur_t cursor; |
| 6106 | |
| 6107 | ut_ad(index != NULL); |
| 6108 | ut_ad(!dict_table_is_comp(dict_sys->sys_tables)); |
| 6109 | ut_ad(!dict_table_is_comp(dict_sys->sys_indexes)); |
| 6110 | |
| 6111 | rw_lock_x_lock(dict_operation_lock); |
| 6112 | mutex_enter(&(dict_sys->mutex)); |
| 6113 | |
| 6114 | heap = mem_heap_create(sizeof(dtuple_t) + 2 * (sizeof(dfield_t) |
| 6115 | + sizeof(que_fork_t) + sizeof(upd_node_t) |
| 6116 | + sizeof(upd_t) + 12)); |
| 6117 | |
| 6118 | mtr_start(&mtr); |
| 6119 | |
| 6120 | sys_index = UT_LIST_GET_FIRST(dict_sys->sys_indexes->indexes); |
| 6121 | |
| 6122 | /* Find the index row in SYS_INDEXES */ |
| 6123 | tuple = dtuple_create(heap, 2); |
| 6124 | |
| 6125 | dfield = dtuple_get_nth_field(tuple, 0); |
| 6126 | buf = static_cast<byte*>(mem_heap_alloc(heap, 8)); |
| 6127 | mach_write_to_8(buf, index->table->id); |
| 6128 | dfield_set_data(dfield, buf, 8); |
| 6129 | |
| 6130 | dfield = dtuple_get_nth_field(tuple, 1); |
| 6131 | buf = static_cast<byte*>(mem_heap_alloc(heap, 8)); |
| 6132 | mach_write_to_8(buf, index->id); |
| 6133 | dfield_set_data(dfield, buf, 8); |
| 6134 | |
| 6135 | dict_index_copy_types(tuple, sys_index, 2); |
| 6136 | |
| 6137 | btr_cur_search_to_nth_level(sys_index, 0, tuple, PAGE_CUR_GE, |
| 6138 | BTR_MODIFY_LEAF, |
| 6139 | &cursor, 0, __FILE__, __LINE__, &mtr); |
| 6140 | |
| 6141 | if (cursor.up_match == dtuple_get_n_fields(tuple) |
| 6142 | && rec_get_n_fields_old(btr_cur_get_rec(&cursor)) |
| 6143 | == DICT_NUM_FIELDS__SYS_INDEXES) { |
| 6144 | ulint len; |
| 6145 | byte* field = rec_get_nth_field_old( |
| 6146 | btr_cur_get_rec(&cursor), |
| 6147 | DICT_FLD__SYS_INDEXES__MERGE_THRESHOLD, &len); |
| 6148 | |
| 6149 | ut_ad(len == 4); |
| 6150 | |
| 6151 | if (len == 4) { |
| 6152 | mlog_write_ulint(field, merge_threshold, |
| 6153 | MLOG_4BYTES, &mtr); |
| 6154 | } |
| 6155 | } |
| 6156 | |
| 6157 | mtr_commit(&mtr); |
| 6158 | mem_heap_free(heap); |
| 6159 | |
| 6160 | mutex_exit(&(dict_sys->mutex)); |
| 6161 | rw_lock_x_unlock(dict_operation_lock); |
| 6162 | } |
| 6163 | |
| 6164 | #ifdef UNIV_DEBUG |
| 6165 | /** Sets merge_threshold for all indexes in the list of tables |
| 6166 | @param[in] list pointer to the list of tables */ |
| 6167 | inline |
| 6168 | void |
| 6169 | dict_set_merge_threshold_list_debug( |
| 6170 | UT_LIST_BASE_NODE_T(dict_table_t)* list, |
| 6171 | uint merge_threshold_all) |
| 6172 | { |
| 6173 | for (dict_table_t* table = UT_LIST_GET_FIRST(*list); |
| 6174 | table != NULL; |
| 6175 | table = UT_LIST_GET_NEXT(table_LRU, table)) { |
| 6176 | for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); |
| 6177 | index != NULL; |
| 6178 | index = UT_LIST_GET_NEXT(indexes, index)) { |
| 6179 | rw_lock_x_lock(dict_index_get_lock(index)); |
| 6180 | index->merge_threshold = merge_threshold_all; |
| 6181 | rw_lock_x_unlock(dict_index_get_lock(index)); |
| 6182 | } |
| 6183 | } |
| 6184 | } |
| 6185 | |
| 6186 | /** Sets merge_threshold for all indexes in dictionary cache for debug. |
| 6187 | @param[in] merge_threshold_all value to set for all indexes */ |
| 6188 | void |
| 6189 | dict_set_merge_threshold_all_debug( |
| 6190 | uint merge_threshold_all) |
| 6191 | { |
| 6192 | mutex_enter(&dict_sys->mutex); |
| 6193 | |
| 6194 | dict_set_merge_threshold_list_debug( |
| 6195 | &dict_sys->table_LRU, merge_threshold_all); |
| 6196 | dict_set_merge_threshold_list_debug( |
| 6197 | &dict_sys->table_non_LRU, merge_threshold_all); |
| 6198 | |
| 6199 | mutex_exit(&dict_sys->mutex); |
| 6200 | } |
| 6201 | |
| 6202 | #endif /* UNIV_DEBUG */ |
| 6203 | |
| 6204 | /** Initialize dict_ind_redundant. */ |
| 6205 | void |
| 6206 | dict_ind_init() |
| 6207 | { |
| 6208 | dict_table_t* table; |
| 6209 | |
| 6210 | /* create dummy table and index for REDUNDANT infimum and supremum */ |
| 6211 | table = dict_mem_table_create("SYS_DUMMY1" , NULL, 1, 0, 0, 0); |
| 6212 | dict_mem_table_add_col(table, NULL, NULL, DATA_CHAR, |
| 6213 | DATA_ENGLISH | DATA_NOT_NULL, 8); |
| 6214 | |
| 6215 | dict_ind_redundant = dict_mem_index_create(table, "SYS_DUMMY1" , 0, 1); |
| 6216 | dict_index_add_col(dict_ind_redundant, table, |
| 6217 | dict_table_get_nth_col(table, 0), 0); |
| 6218 | /* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */ |
| 6219 | dict_ind_redundant->cached = TRUE; |
| 6220 | } |
| 6221 | |
| 6222 | /** Free dict_ind_redundant. */ |
| 6223 | void |
| 6224 | dict_ind_free() |
| 6225 | { |
| 6226 | dict_table_t* table = dict_ind_redundant->table; |
| 6227 | dict_mem_index_free(dict_ind_redundant); |
| 6228 | dict_ind_redundant = NULL; |
| 6229 | dict_mem_table_free(table); |
| 6230 | } |
| 6231 | |
| 6232 | /** Get an index by name. |
| 6233 | @param[in] table the table where to look for the index |
| 6234 | @param[in] name the index name to look for |
| 6235 | @param[in] committed true=search for committed, |
| 6236 | false=search for uncommitted |
| 6237 | @return index, NULL if does not exist */ |
| 6238 | dict_index_t* |
| 6239 | dict_table_get_index_on_name( |
| 6240 | dict_table_t* table, |
| 6241 | const char* name, |
| 6242 | bool committed) |
| 6243 | { |
| 6244 | dict_index_t* index; |
| 6245 | |
| 6246 | index = dict_table_get_first_index(table); |
| 6247 | |
| 6248 | while (index != NULL) { |
| 6249 | if (index->is_committed() == committed |
| 6250 | && innobase_strcasecmp(index->name, name) == 0) { |
| 6251 | |
| 6252 | return(index); |
| 6253 | } |
| 6254 | |
| 6255 | index = dict_table_get_next_index(index); |
| 6256 | } |
| 6257 | |
| 6258 | return(NULL); |
| 6259 | } |
| 6260 | |
| 6261 | /**********************************************************************//** |
| 6262 | Replace the index passed in with another equivalent index in the |
| 6263 | foreign key lists of the table. |
| 6264 | @return whether all replacements were found */ |
| 6265 | bool |
| 6266 | dict_foreign_replace_index( |
| 6267 | /*=======================*/ |
| 6268 | dict_table_t* table, /*!< in/out: table */ |
| 6269 | const char** col_names, |
| 6270 | /*!< in: column names, or NULL |
| 6271 | to use table->col_names */ |
| 6272 | const dict_index_t* index) /*!< in: index to be replaced */ |
| 6273 | { |
| 6274 | bool found = true; |
| 6275 | dict_foreign_t* foreign; |
| 6276 | |
| 6277 | ut_ad(index->to_be_dropped); |
| 6278 | ut_ad(index->table == table); |
| 6279 | |
| 6280 | for (dict_foreign_set::iterator it = table->foreign_set.begin(); |
| 6281 | it != table->foreign_set.end(); |
| 6282 | ++it) { |
| 6283 | |
| 6284 | foreign = *it; |
| 6285 | if (foreign->foreign_index == index) { |
| 6286 | ut_ad(foreign->foreign_table == index->table); |
| 6287 | |
| 6288 | dict_index_t* new_index = dict_foreign_find_index( |
| 6289 | foreign->foreign_table, col_names, |
| 6290 | foreign->foreign_col_names, |
| 6291 | foreign->n_fields, index, |
| 6292 | /*check_charsets=*/TRUE, /*check_null=*/FALSE, |
| 6293 | NULL, NULL, NULL); |
| 6294 | if (new_index) { |
| 6295 | ut_ad(new_index->table == index->table); |
| 6296 | ut_ad(!new_index->to_be_dropped); |
| 6297 | } else { |
| 6298 | found = false; |
| 6299 | } |
| 6300 | |
| 6301 | foreign->foreign_index = new_index; |
| 6302 | } |
| 6303 | } |
| 6304 | |
| 6305 | for (dict_foreign_set::iterator it = table->referenced_set.begin(); |
| 6306 | it != table->referenced_set.end(); |
| 6307 | ++it) { |
| 6308 | |
| 6309 | foreign = *it; |
| 6310 | if (foreign->referenced_index == index) { |
| 6311 | ut_ad(foreign->referenced_table == index->table); |
| 6312 | |
| 6313 | dict_index_t* new_index = dict_foreign_find_index( |
| 6314 | foreign->referenced_table, NULL, |
| 6315 | foreign->referenced_col_names, |
| 6316 | foreign->n_fields, index, |
| 6317 | /*check_charsets=*/TRUE, /*check_null=*/FALSE, |
| 6318 | NULL, NULL, NULL); |
| 6319 | /* There must exist an alternative index, |
| 6320 | since this must have been checked earlier. */ |
| 6321 | if (new_index) { |
| 6322 | ut_ad(new_index->table == index->table); |
| 6323 | ut_ad(!new_index->to_be_dropped); |
| 6324 | } else { |
| 6325 | found = false; |
| 6326 | } |
| 6327 | |
| 6328 | foreign->referenced_index = new_index; |
| 6329 | } |
| 6330 | } |
| 6331 | |
| 6332 | return(found); |
| 6333 | } |
| 6334 | |
| 6335 | #ifdef UNIV_DEBUG |
| 6336 | /**********************************************************************//** |
| 6337 | Check for duplicate index entries in a table [using the index name] */ |
| 6338 | void |
| 6339 | dict_table_check_for_dup_indexes( |
| 6340 | /*=============================*/ |
| 6341 | const dict_table_t* table, /*!< in: Check for dup indexes |
| 6342 | in this table */ |
| 6343 | enum check_name check) /*!< in: whether and when to allow |
| 6344 | temporary index names */ |
| 6345 | { |
| 6346 | /* Check for duplicates, ignoring indexes that are marked |
| 6347 | as to be dropped */ |
| 6348 | |
| 6349 | const dict_index_t* index1; |
| 6350 | const dict_index_t* index2; |
| 6351 | |
| 6352 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 6353 | |
| 6354 | /* The primary index _must_ exist */ |
| 6355 | ut_a(UT_LIST_GET_LEN(table->indexes) > 0); |
| 6356 | |
| 6357 | index1 = UT_LIST_GET_FIRST(table->indexes); |
| 6358 | |
| 6359 | do { |
| 6360 | if (!index1->is_committed()) { |
| 6361 | ut_a(!dict_index_is_clust(index1)); |
| 6362 | |
| 6363 | switch (check) { |
| 6364 | case CHECK_ALL_COMPLETE: |
| 6365 | ut_error; |
| 6366 | case CHECK_ABORTED_OK: |
| 6367 | switch (dict_index_get_online_status(index1)) { |
| 6368 | case ONLINE_INDEX_COMPLETE: |
| 6369 | case ONLINE_INDEX_CREATION: |
| 6370 | ut_error; |
| 6371 | break; |
| 6372 | case ONLINE_INDEX_ABORTED: |
| 6373 | case ONLINE_INDEX_ABORTED_DROPPED: |
| 6374 | break; |
| 6375 | } |
| 6376 | /* fall through */ |
| 6377 | case CHECK_PARTIAL_OK: |
| 6378 | break; |
| 6379 | } |
| 6380 | } |
| 6381 | |
| 6382 | for (index2 = UT_LIST_GET_NEXT(indexes, index1); |
| 6383 | index2 != NULL; |
| 6384 | index2 = UT_LIST_GET_NEXT(indexes, index2)) { |
| 6385 | ut_ad(index1->is_committed() |
| 6386 | != index2->is_committed() |
| 6387 | || strcmp(index1->name, index2->name) != 0); |
| 6388 | } |
| 6389 | |
| 6390 | index1 = UT_LIST_GET_NEXT(indexes, index1); |
| 6391 | } while (index1); |
| 6392 | } |
| 6393 | #endif /* UNIV_DEBUG */ |
| 6394 | |
| 6395 | /** Auxiliary macro used inside dict_table_schema_check(). */ |
| 6396 | #define CREATE_TYPES_NAMES() \ |
| 6397 | dtype_sql_name((unsigned) req_schema->columns[i].mtype, \ |
| 6398 | (unsigned) req_schema->columns[i].prtype_mask, \ |
| 6399 | (unsigned) req_schema->columns[i].len, \ |
| 6400 | req_type, sizeof(req_type)); \ |
| 6401 | dtype_sql_name(table->cols[j].mtype, \ |
| 6402 | table->cols[j].prtype, \ |
| 6403 | table->cols[j].len, \ |
| 6404 | actual_type, sizeof(actual_type)) |
| 6405 | |
| 6406 | /*********************************************************************//** |
| 6407 | Checks whether a table exists and whether it has the given structure. |
| 6408 | The table must have the same number of columns with the same names and |
| 6409 | types. The order of the columns does not matter. |
| 6410 | The caller must own the dictionary mutex. |
| 6411 | dict_table_schema_check() @{ |
| 6412 | @return DB_SUCCESS if the table exists and contains the necessary columns */ |
| 6413 | dberr_t |
| 6414 | dict_table_schema_check( |
| 6415 | /*====================*/ |
| 6416 | dict_table_schema_t* req_schema, /*!< in/out: required table |
| 6417 | schema */ |
| 6418 | char* errstr, /*!< out: human readable error |
| 6419 | message if != DB_SUCCESS is |
| 6420 | returned */ |
| 6421 | size_t errstr_sz) /*!< in: errstr size */ |
| 6422 | { |
| 6423 | char buf[MAX_FULL_NAME_LEN]; |
| 6424 | char req_type[64]; |
| 6425 | char actual_type[64]; |
| 6426 | dict_table_t* table; |
| 6427 | ulint i; |
| 6428 | |
| 6429 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 6430 | |
| 6431 | table = dict_table_get_low(req_schema->table_name); |
| 6432 | |
| 6433 | if (table == NULL) { |
| 6434 | bool should_print=true; |
| 6435 | /* no such table */ |
| 6436 | |
| 6437 | if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_table_stats" ) == 0) { |
| 6438 | if (innodb_table_stats_not_found_reported == false) { |
| 6439 | innodb_table_stats_not_found = true; |
| 6440 | innodb_table_stats_not_found_reported = true; |
| 6441 | } else { |
| 6442 | should_print = false; |
| 6443 | } |
| 6444 | } else if (innobase_strcasecmp(req_schema->table_name, "mysql/innodb_index_stats" ) == 0 ) { |
| 6445 | if (innodb_index_stats_not_found_reported == false) { |
| 6446 | innodb_index_stats_not_found = true; |
| 6447 | innodb_index_stats_not_found_reported = true; |
| 6448 | } else { |
| 6449 | should_print = false; |
| 6450 | } |
| 6451 | } |
| 6452 | |
| 6453 | if (should_print) { |
| 6454 | snprintf(errstr, errstr_sz, |
| 6455 | "Table %s not found." , |
| 6456 | ut_format_name(req_schema->table_name, |
| 6457 | buf, sizeof(buf))); |
| 6458 | return(DB_TABLE_NOT_FOUND); |
| 6459 | } else { |
| 6460 | return(DB_STATS_DO_NOT_EXIST); |
| 6461 | } |
| 6462 | } |
| 6463 | |
| 6464 | if (!table->is_readable() && !table->space) { |
| 6465 | /* missing tablespace */ |
| 6466 | |
| 6467 | snprintf(errstr, errstr_sz, |
| 6468 | "Tablespace for table %s is missing." , |
| 6469 | ut_format_name(req_schema->table_name, |
| 6470 | buf, sizeof(buf))); |
| 6471 | |
| 6472 | return(DB_TABLE_NOT_FOUND); |
| 6473 | } |
| 6474 | |
| 6475 | if (ulint(table->n_def - DATA_N_SYS_COLS) != req_schema->n_cols) { |
| 6476 | /* the table has a different number of columns than required */ |
| 6477 | snprintf(errstr, errstr_sz, |
| 6478 | "%s has %d columns but should have " ULINTPF "." , |
| 6479 | ut_format_name(req_schema->table_name, buf, |
| 6480 | sizeof buf), |
| 6481 | table->n_def - DATA_N_SYS_COLS, |
| 6482 | req_schema->n_cols); |
| 6483 | |
| 6484 | return(DB_ERROR); |
| 6485 | } |
| 6486 | |
| 6487 | /* For each column from req_schema->columns[] search |
| 6488 | whether it is present in table->cols[]. |
| 6489 | The following algorithm is O(n_cols^2), but is optimized to |
| 6490 | be O(n_cols) if the columns are in the same order in both arrays. */ |
| 6491 | |
| 6492 | for (i = 0; i < req_schema->n_cols; i++) { |
| 6493 | ulint j = dict_table_has_column( |
| 6494 | table, req_schema->columns[i].name, i); |
| 6495 | |
| 6496 | if (j == table->n_def) { |
| 6497 | |
| 6498 | snprintf(errstr, errstr_sz, |
| 6499 | "required column %s" |
| 6500 | " not found in table %s." , |
| 6501 | req_schema->columns[i].name, |
| 6502 | ut_format_name( |
| 6503 | req_schema->table_name, |
| 6504 | buf, sizeof(buf))); |
| 6505 | |
| 6506 | return(DB_ERROR); |
| 6507 | } |
| 6508 | |
| 6509 | /* we found a column with the same name on j'th position, |
| 6510 | compare column types and flags */ |
| 6511 | |
| 6512 | /* check length for exact match */ |
| 6513 | if (req_schema->columns[i].len != table->cols[j].len) { |
| 6514 | |
| 6515 | CREATE_TYPES_NAMES(); |
| 6516 | |
| 6517 | snprintf(errstr, errstr_sz, |
| 6518 | "Column %s in table %s is %s" |
| 6519 | " but should be %s (length mismatch)." , |
| 6520 | req_schema->columns[i].name, |
| 6521 | ut_format_name(req_schema->table_name, |
| 6522 | buf, sizeof(buf)), |
| 6523 | actual_type, req_type); |
| 6524 | |
| 6525 | return(DB_ERROR); |
| 6526 | } |
| 6527 | |
| 6528 | /* |
| 6529 | check mtype for exact match. |
| 6530 | This check is relaxed to allow use to use TIMESTAMP |
| 6531 | (ie INT) for last_update instead of DATA_BINARY. |
| 6532 | We have to test for both values as the innodb_table_stats |
| 6533 | table may come from MySQL and have the old type. |
| 6534 | */ |
| 6535 | if (req_schema->columns[i].mtype != table->cols[j].mtype && |
| 6536 | !(req_schema->columns[i].mtype == DATA_INT && |
| 6537 | table->cols[j].mtype == DATA_FIXBINARY)) |
| 6538 | { |
| 6539 | CREATE_TYPES_NAMES(); |
| 6540 | |
| 6541 | snprintf(errstr, errstr_sz, |
| 6542 | "Column %s in table %s is %s" |
| 6543 | " but should be %s (type mismatch)." , |
| 6544 | req_schema->columns[i].name, |
| 6545 | ut_format_name(req_schema->table_name, |
| 6546 | buf, sizeof(buf)), |
| 6547 | actual_type, req_type); |
| 6548 | |
| 6549 | return(DB_ERROR); |
| 6550 | } |
| 6551 | |
| 6552 | /* check whether required prtype mask is set */ |
| 6553 | if (req_schema->columns[i].prtype_mask != 0 |
| 6554 | && (table->cols[j].prtype |
| 6555 | & req_schema->columns[i].prtype_mask) |
| 6556 | != req_schema->columns[i].prtype_mask) { |
| 6557 | |
| 6558 | CREATE_TYPES_NAMES(); |
| 6559 | |
| 6560 | snprintf(errstr, errstr_sz, |
| 6561 | "Column %s in table %s is %s" |
| 6562 | " but should be %s (flags mismatch)." , |
| 6563 | req_schema->columns[i].name, |
| 6564 | ut_format_name(req_schema->table_name, |
| 6565 | buf, sizeof(buf)), |
| 6566 | actual_type, req_type); |
| 6567 | |
| 6568 | return(DB_ERROR); |
| 6569 | } |
| 6570 | } |
| 6571 | |
| 6572 | if (req_schema->n_foreign != table->foreign_set.size()) { |
| 6573 | snprintf( |
| 6574 | errstr, errstr_sz, |
| 6575 | "Table %s has " ULINTPF " foreign key(s) pointing" |
| 6576 | " to other tables, but it must have " ULINTPF "." , |
| 6577 | ut_format_name(req_schema->table_name, |
| 6578 | buf, sizeof(buf)), |
| 6579 | static_cast<ulint>(table->foreign_set.size()), |
| 6580 | req_schema->n_foreign); |
| 6581 | return(DB_ERROR); |
| 6582 | } |
| 6583 | |
| 6584 | if (req_schema->n_referenced != table->referenced_set.size()) { |
| 6585 | snprintf( |
| 6586 | errstr, errstr_sz, |
| 6587 | "There are " ULINTPF " foreign key(s) pointing to %s, " |
| 6588 | "but there must be " ULINTPF "." , |
| 6589 | static_cast<ulint>(table->referenced_set.size()), |
| 6590 | ut_format_name(req_schema->table_name, |
| 6591 | buf, sizeof(buf)), |
| 6592 | req_schema->n_referenced); |
| 6593 | return(DB_ERROR); |
| 6594 | } |
| 6595 | |
| 6596 | return(DB_SUCCESS); |
| 6597 | } |
| 6598 | /* @} */ |
| 6599 | |
| 6600 | /*********************************************************************//** |
| 6601 | Converts a database and table name from filesystem encoding |
| 6602 | (e.g. d@i1b/a@q1b@1Kc, same format as used in dict_table_t::name) in two |
| 6603 | strings in UTF8 encoding (e.g. dцb and aюbØc). The output buffers must be |
| 6604 | at least MAX_DB_UTF8_LEN and MAX_TABLE_UTF8_LEN bytes. */ |
| 6605 | void |
| 6606 | dict_fs2utf8( |
| 6607 | /*=========*/ |
| 6608 | const char* db_and_table, /*!< in: database and table names, |
| 6609 | e.g. d@i1b/a@q1b@1Kc */ |
| 6610 | char* db_utf8, /*!< out: database name, e.g. dцb */ |
| 6611 | size_t db_utf8_size, /*!< in: dbname_utf8 size */ |
| 6612 | char* table_utf8, /*!< out: table name, e.g. aюbØc */ |
| 6613 | size_t table_utf8_size)/*!< in: table_utf8 size */ |
| 6614 | { |
| 6615 | char db[MAX_DATABASE_NAME_LEN + 1]; |
| 6616 | ulint db_len; |
| 6617 | uint errors; |
| 6618 | |
| 6619 | db_len = dict_get_db_name_len(db_and_table); |
| 6620 | |
| 6621 | ut_a(db_len <= sizeof(db)); |
| 6622 | |
| 6623 | memcpy(db, db_and_table, db_len); |
| 6624 | db[db_len] = '\0'; |
| 6625 | |
| 6626 | strconvert( |
| 6627 | &my_charset_filename, db, uint(db_len), system_charset_info, |
| 6628 | db_utf8, uint(db_utf8_size), &errors); |
| 6629 | |
| 6630 | /* convert each # to @0023 in table name and store the result in buf */ |
| 6631 | const char* table = dict_remove_db_name(db_and_table); |
| 6632 | const char* table_p; |
| 6633 | char buf[MAX_TABLE_NAME_LEN * 5 + 1]; |
| 6634 | char* buf_p; |
| 6635 | for (table_p = table, buf_p = buf; table_p[0] != '\0'; table_p++) { |
| 6636 | if (table_p[0] != '#') { |
| 6637 | buf_p[0] = table_p[0]; |
| 6638 | buf_p++; |
| 6639 | } else { |
| 6640 | buf_p[0] = '@'; |
| 6641 | buf_p[1] = '0'; |
| 6642 | buf_p[2] = '0'; |
| 6643 | buf_p[3] = '2'; |
| 6644 | buf_p[4] = '3'; |
| 6645 | buf_p += 5; |
| 6646 | } |
| 6647 | ut_a((size_t) (buf_p - buf) < sizeof(buf)); |
| 6648 | } |
| 6649 | buf_p[0] = '\0'; |
| 6650 | |
| 6651 | errors = 0; |
| 6652 | strconvert( |
| 6653 | &my_charset_filename, buf, (uint) (buf_p - buf), |
| 6654 | system_charset_info, |
| 6655 | table_utf8, uint(table_utf8_size), |
| 6656 | &errors); |
| 6657 | |
| 6658 | if (errors != 0) { |
| 6659 | snprintf(table_utf8, table_utf8_size, "%s%s" , |
| 6660 | srv_mysql50_table_name_prefix, table); |
| 6661 | } |
| 6662 | } |
| 6663 | |
| 6664 | /** Resize the hash tables besed on the current buffer pool size. */ |
| 6665 | void |
| 6666 | dict_resize() |
| 6667 | { |
| 6668 | dict_table_t* table; |
| 6669 | |
| 6670 | mutex_enter(&dict_sys->mutex); |
| 6671 | |
| 6672 | /* all table entries are in table_LRU and table_non_LRU lists */ |
| 6673 | hash_table_free(dict_sys->table_hash); |
| 6674 | hash_table_free(dict_sys->table_id_hash); |
| 6675 | |
| 6676 | dict_sys->table_hash = hash_create( |
| 6677 | buf_pool_get_curr_size() |
| 6678 | / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE)); |
| 6679 | |
| 6680 | dict_sys->table_id_hash = hash_create( |
| 6681 | buf_pool_get_curr_size() |
| 6682 | / (DICT_POOL_PER_TABLE_HASH * UNIV_WORD_SIZE)); |
| 6683 | |
| 6684 | for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); table; |
| 6685 | table = UT_LIST_GET_NEXT(table_LRU, table)) { |
| 6686 | ulint fold = ut_fold_string(table->name.m_name); |
| 6687 | ulint id_fold = ut_fold_ull(table->id); |
| 6688 | |
| 6689 | HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, |
| 6690 | fold, table); |
| 6691 | |
| 6692 | HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash, |
| 6693 | id_fold, table); |
| 6694 | } |
| 6695 | |
| 6696 | for (table = UT_LIST_GET_FIRST(dict_sys->table_non_LRU); table; |
| 6697 | table = UT_LIST_GET_NEXT(table_LRU, table)) { |
| 6698 | ulint fold = ut_fold_string(table->name.m_name); |
| 6699 | ulint id_fold = ut_fold_ull(table->id); |
| 6700 | |
| 6701 | HASH_INSERT(dict_table_t, name_hash, dict_sys->table_hash, |
| 6702 | fold, table); |
| 6703 | |
| 6704 | HASH_INSERT(dict_table_t, id_hash, dict_sys->table_id_hash, |
| 6705 | id_fold, table); |
| 6706 | } |
| 6707 | |
| 6708 | mutex_exit(&dict_sys->mutex); |
| 6709 | } |
| 6710 | |
| 6711 | /**********************************************************************//** |
| 6712 | Closes the data dictionary module. */ |
| 6713 | void |
| 6714 | dict_close(void) |
| 6715 | /*============*/ |
| 6716 | { |
| 6717 | if (dict_sys == NULL) { |
| 6718 | /* This should only happen if a failure occurred |
| 6719 | during redo log processing. */ |
| 6720 | return; |
| 6721 | } |
| 6722 | |
| 6723 | /* Acquire only because it's a pre-condition. */ |
| 6724 | mutex_enter(&dict_sys->mutex); |
| 6725 | |
| 6726 | /* Free the hash elements. We don't remove them from the table |
| 6727 | because we are going to destroy the table anyway. */ |
| 6728 | for (ulint i = 0; i < hash_get_n_cells(dict_sys->table_id_hash); i++) { |
| 6729 | dict_table_t* table; |
| 6730 | |
| 6731 | table = static_cast<dict_table_t*>( |
| 6732 | HASH_GET_FIRST(dict_sys->table_hash, i)); |
| 6733 | |
| 6734 | while (table) { |
| 6735 | dict_table_t* prev_table = table; |
| 6736 | |
| 6737 | table = static_cast<dict_table_t*>( |
| 6738 | HASH_GET_NEXT(name_hash, prev_table)); |
| 6739 | ut_ad(prev_table->magic_n == DICT_TABLE_MAGIC_N); |
| 6740 | dict_table_remove_from_cache(prev_table); |
| 6741 | } |
| 6742 | } |
| 6743 | |
| 6744 | hash_table_free(dict_sys->table_hash); |
| 6745 | |
| 6746 | /* The elements are the same instance as in dict_sys->table_hash, |
| 6747 | therefore we don't delete the individual elements. */ |
| 6748 | hash_table_free(dict_sys->table_id_hash); |
| 6749 | |
| 6750 | mutex_exit(&dict_sys->mutex); |
| 6751 | mutex_free(&dict_sys->mutex); |
| 6752 | |
| 6753 | rw_lock_free(dict_operation_lock); |
| 6754 | |
| 6755 | ut_free(dict_operation_lock); |
| 6756 | dict_operation_lock = NULL; |
| 6757 | |
| 6758 | mutex_free(&dict_foreign_err_mutex); |
| 6759 | |
| 6760 | if (dict_foreign_err_file) { |
| 6761 | fclose(dict_foreign_err_file); |
| 6762 | dict_foreign_err_file = NULL; |
| 6763 | } |
| 6764 | |
| 6765 | ut_free(dict_sys); |
| 6766 | |
| 6767 | dict_sys = NULL; |
| 6768 | } |
| 6769 | |
| 6770 | #ifdef UNIV_DEBUG |
| 6771 | /**********************************************************************//** |
| 6772 | Validate the dictionary table LRU list. |
| 6773 | @return TRUE if valid */ |
| 6774 | static |
| 6775 | ibool |
| 6776 | dict_lru_validate(void) |
| 6777 | /*===================*/ |
| 6778 | { |
| 6779 | dict_table_t* table; |
| 6780 | |
| 6781 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 6782 | |
| 6783 | for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); |
| 6784 | table != NULL; |
| 6785 | table = UT_LIST_GET_NEXT(table_LRU, table)) { |
| 6786 | |
| 6787 | ut_a(table->can_be_evicted); |
| 6788 | } |
| 6789 | |
| 6790 | for (table = UT_LIST_GET_FIRST(dict_sys->table_non_LRU); |
| 6791 | table != NULL; |
| 6792 | table = UT_LIST_GET_NEXT(table_LRU, table)) { |
| 6793 | |
| 6794 | ut_a(!table->can_be_evicted); |
| 6795 | } |
| 6796 | |
| 6797 | return(TRUE); |
| 6798 | } |
| 6799 | |
| 6800 | /**********************************************************************//** |
| 6801 | Check if a table exists in the dict table LRU list. |
| 6802 | @return TRUE if table found in LRU list */ |
| 6803 | static |
| 6804 | ibool |
| 6805 | dict_lru_find_table( |
| 6806 | /*================*/ |
| 6807 | const dict_table_t* find_table) /*!< in: table to find */ |
| 6808 | { |
| 6809 | dict_table_t* table; |
| 6810 | |
| 6811 | ut_ad(find_table != NULL); |
| 6812 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 6813 | |
| 6814 | for (table = UT_LIST_GET_FIRST(dict_sys->table_LRU); |
| 6815 | table != NULL; |
| 6816 | table = UT_LIST_GET_NEXT(table_LRU, table)) { |
| 6817 | |
| 6818 | ut_a(table->can_be_evicted); |
| 6819 | |
| 6820 | if (table == find_table) { |
| 6821 | return(TRUE); |
| 6822 | } |
| 6823 | } |
| 6824 | |
| 6825 | return(FALSE); |
| 6826 | } |
| 6827 | |
| 6828 | /**********************************************************************//** |
| 6829 | Check if a table exists in the dict table non-LRU list. |
| 6830 | @return TRUE if table found in non-LRU list */ |
| 6831 | static |
| 6832 | ibool |
| 6833 | dict_non_lru_find_table( |
| 6834 | /*====================*/ |
| 6835 | const dict_table_t* find_table) /*!< in: table to find */ |
| 6836 | { |
| 6837 | dict_table_t* table; |
| 6838 | |
| 6839 | ut_ad(find_table != NULL); |
| 6840 | ut_ad(mutex_own(&dict_sys->mutex)); |
| 6841 | |
| 6842 | for (table = UT_LIST_GET_FIRST(dict_sys->table_non_LRU); |
| 6843 | table != NULL; |
| 6844 | table = UT_LIST_GET_NEXT(table_LRU, table)) { |
| 6845 | |
| 6846 | ut_a(!table->can_be_evicted); |
| 6847 | |
| 6848 | if (table == find_table) { |
| 6849 | return(TRUE); |
| 6850 | } |
| 6851 | } |
| 6852 | |
| 6853 | return(FALSE); |
| 6854 | } |
| 6855 | #endif /* UNIV_DEBUG */ |
| 6856 | /*********************************************************************//** |
| 6857 | Check an index to see whether its first fields are the columns in the array, |
| 6858 | in the same order and is not marked for deletion and is not the same |
| 6859 | as types_idx. |
| 6860 | @return true if the index qualifies, otherwise false */ |
| 6861 | bool |
| 6862 | dict_foreign_qualify_index( |
| 6863 | /*=======================*/ |
| 6864 | const dict_table_t* table, /*!< in: table */ |
| 6865 | const char** col_names, |
| 6866 | /*!< in: column names, or NULL |
| 6867 | to use table->col_names */ |
| 6868 | const char** columns,/*!< in: array of column names */ |
| 6869 | ulint n_cols, /*!< in: number of columns */ |
| 6870 | const dict_index_t* index, /*!< in: index to check */ |
| 6871 | const dict_index_t* types_idx, |
| 6872 | /*!< in: NULL or an index |
| 6873 | whose types the column types |
| 6874 | must match */ |
| 6875 | bool check_charsets, |
| 6876 | /*!< in: whether to check |
| 6877 | charsets. only has an effect |
| 6878 | if types_idx != NULL */ |
| 6879 | ulint check_null, |
| 6880 | /*!< in: nonzero if none of |
| 6881 | the columns must be declared |
| 6882 | NOT NULL */ |
| 6883 | ulint* error, /*!< out: error code */ |
| 6884 | ulint* err_col_no, |
| 6885 | /*!< out: column number where |
| 6886 | error happened */ |
| 6887 | dict_index_t** err_index) |
| 6888 | /*!< out: index where error |
| 6889 | happened */ |
| 6890 | { |
| 6891 | if (dict_index_get_n_fields(index) < n_cols) { |
| 6892 | return(false); |
| 6893 | } |
| 6894 | |
| 6895 | for (ulint i = 0; i < n_cols; i++) { |
| 6896 | dict_field_t* field; |
| 6897 | const char* col_name; |
| 6898 | ulint col_no; |
| 6899 | |
| 6900 | field = dict_index_get_nth_field(index, i); |
| 6901 | col_no = dict_col_get_no(field->col); |
| 6902 | |
| 6903 | if (field->prefix_len != 0) { |
| 6904 | /* We do not accept column prefix |
| 6905 | indexes here */ |
| 6906 | if (error && err_col_no && err_index) { |
| 6907 | *error = DB_FOREIGN_KEY_IS_PREFIX_INDEX; |
| 6908 | *err_col_no = i; |
| 6909 | *err_index = (dict_index_t*)index; |
| 6910 | } |
| 6911 | return(false); |
| 6912 | } |
| 6913 | |
| 6914 | if (check_null |
| 6915 | && (field->col->prtype & DATA_NOT_NULL)) { |
| 6916 | if (error && err_col_no && err_index) { |
| 6917 | *error = DB_FOREIGN_KEY_COL_NOT_NULL; |
| 6918 | *err_col_no = i; |
| 6919 | *err_index = (dict_index_t*)index; |
| 6920 | } |
| 6921 | return(false); |
| 6922 | } |
| 6923 | |
| 6924 | if (field->col->is_virtual()) { |
| 6925 | for (ulint j = 0; j < table->n_v_def; j++) { |
| 6926 | col_name = dict_table_get_v_col_name(table, j); |
| 6927 | if (innobase_strcasecmp(field->name,col_name) == 0) { |
| 6928 | break; |
| 6929 | } |
| 6930 | } |
| 6931 | } else { |
| 6932 | col_name = col_names |
| 6933 | ? col_names[col_no] |
| 6934 | : dict_table_get_col_name(table, col_no); |
| 6935 | } |
| 6936 | |
| 6937 | if (0 != innobase_strcasecmp(columns[i], col_name)) { |
| 6938 | return(false); |
| 6939 | } |
| 6940 | |
| 6941 | if (types_idx && !cmp_cols_are_equal( |
| 6942 | dict_index_get_nth_col(index, i), |
| 6943 | dict_index_get_nth_col(types_idx, i), |
| 6944 | check_charsets)) { |
| 6945 | if (error && err_col_no && err_index) { |
| 6946 | *error = DB_FOREIGN_KEY_COLS_NOT_EQUAL; |
| 6947 | *err_col_no = i; |
| 6948 | *err_index = (dict_index_t*)index; |
| 6949 | } |
| 6950 | |
| 6951 | return(false); |
| 6952 | } |
| 6953 | } |
| 6954 | |
| 6955 | return(true); |
| 6956 | } |
| 6957 | |
| 6958 | /*********************************************************************//** |
| 6959 | Update the state of compression failure padding heuristics. This is |
| 6960 | called whenever a compression operation succeeds or fails. |
| 6961 | The caller must be holding info->mutex */ |
| 6962 | static |
| 6963 | void |
| 6964 | dict_index_zip_pad_update( |
| 6965 | /*======================*/ |
| 6966 | zip_pad_info_t* info, /*<! in/out: info to be updated */ |
| 6967 | ulint zip_threshold) /*<! in: zip threshold value */ |
| 6968 | { |
| 6969 | ulint total; |
| 6970 | ulint fail_pct; |
| 6971 | |
| 6972 | ut_ad(info); |
| 6973 | |
| 6974 | total = info->success + info->failure; |
| 6975 | |
| 6976 | ut_ad(total > 0); |
| 6977 | |
| 6978 | if (zip_threshold == 0) { |
| 6979 | /* User has just disabled the padding. */ |
| 6980 | return; |
| 6981 | } |
| 6982 | |
| 6983 | if (total < ZIP_PAD_ROUND_LEN) { |
| 6984 | /* We are in middle of a round. Do nothing. */ |
| 6985 | return; |
| 6986 | } |
| 6987 | |
| 6988 | /* We are at a 'round' boundary. Reset the values but first |
| 6989 | calculate fail rate for our heuristic. */ |
| 6990 | fail_pct = (info->failure * 100) / total; |
| 6991 | info->failure = 0; |
| 6992 | info->success = 0; |
| 6993 | |
| 6994 | if (fail_pct > zip_threshold) { |
| 6995 | /* Compression failures are more then user defined |
| 6996 | threshold. Increase the pad size to reduce chances of |
| 6997 | compression failures. */ |
| 6998 | ut_ad(info->pad % ZIP_PAD_INCR == 0); |
| 6999 | |
| 7000 | /* Only do increment if it won't increase padding |
| 7001 | beyond max pad size. */ |
| 7002 | if (info->pad + ZIP_PAD_INCR |
| 7003 | < (srv_page_size * zip_pad_max) / 100) { |
| 7004 | /* Use atomics even though we have the mutex. |
| 7005 | This is to ensure that we are able to read |
| 7006 | info->pad atomically. */ |
| 7007 | my_atomic_addlint(&info->pad, ZIP_PAD_INCR); |
| 7008 | |
| 7009 | MONITOR_INC(MONITOR_PAD_INCREMENTS); |
| 7010 | } |
| 7011 | |
| 7012 | info->n_rounds = 0; |
| 7013 | |
| 7014 | } else { |
| 7015 | /* Failure rate was OK. Another successful round |
| 7016 | completed. */ |
| 7017 | ++info->n_rounds; |
| 7018 | |
| 7019 | /* If enough successful rounds are completed with |
| 7020 | compression failure rate in control, decrease the |
| 7021 | padding. */ |
| 7022 | if (info->n_rounds >= ZIP_PAD_SUCCESSFUL_ROUND_LIMIT |
| 7023 | && info->pad > 0) { |
| 7024 | |
| 7025 | ut_ad(info->pad % ZIP_PAD_INCR == 0); |
| 7026 | /* Use atomics even though we have the mutex. |
| 7027 | This is to ensure that we are able to read |
| 7028 | info->pad atomically. */ |
| 7029 | my_atomic_addlint(&info->pad, ulint(-ZIP_PAD_INCR)); |
| 7030 | |
| 7031 | info->n_rounds = 0; |
| 7032 | |
| 7033 | MONITOR_INC(MONITOR_PAD_DECREMENTS); |
| 7034 | } |
| 7035 | } |
| 7036 | } |
| 7037 | |
| 7038 | /*********************************************************************//** |
| 7039 | This function should be called whenever a page is successfully |
| 7040 | compressed. Updates the compression padding information. */ |
| 7041 | void |
| 7042 | dict_index_zip_success( |
| 7043 | /*===================*/ |
| 7044 | dict_index_t* index) /*!< in/out: index to be updated. */ |
| 7045 | { |
| 7046 | ut_ad(index); |
| 7047 | |
| 7048 | ulint zip_threshold = zip_failure_threshold_pct; |
| 7049 | if (!zip_threshold) { |
| 7050 | /* Disabled by user. */ |
| 7051 | return; |
| 7052 | } |
| 7053 | |
| 7054 | dict_index_zip_pad_lock(index); |
| 7055 | ++index->zip_pad.success; |
| 7056 | dict_index_zip_pad_update(&index->zip_pad, zip_threshold); |
| 7057 | dict_index_zip_pad_unlock(index); |
| 7058 | } |
| 7059 | |
| 7060 | /*********************************************************************//** |
| 7061 | This function should be called whenever a page compression attempt |
| 7062 | fails. Updates the compression padding information. */ |
| 7063 | void |
| 7064 | dict_index_zip_failure( |
| 7065 | /*===================*/ |
| 7066 | dict_index_t* index) /*!< in/out: index to be updated. */ |
| 7067 | { |
| 7068 | ut_ad(index); |
| 7069 | |
| 7070 | ulint zip_threshold = zip_failure_threshold_pct; |
| 7071 | if (!zip_threshold) { |
| 7072 | /* Disabled by user. */ |
| 7073 | return; |
| 7074 | } |
| 7075 | |
| 7076 | dict_index_zip_pad_lock(index); |
| 7077 | ++index->zip_pad.failure; |
| 7078 | dict_index_zip_pad_update(&index->zip_pad, zip_threshold); |
| 7079 | dict_index_zip_pad_unlock(index); |
| 7080 | } |
| 7081 | |
| 7082 | /*********************************************************************//** |
| 7083 | Return the optimal page size, for which page will likely compress. |
| 7084 | @return page size beyond which page might not compress */ |
| 7085 | ulint |
| 7086 | dict_index_zip_pad_optimal_page_size( |
| 7087 | /*=================================*/ |
| 7088 | dict_index_t* index) /*!< in: index for which page size |
| 7089 | is requested */ |
| 7090 | { |
| 7091 | ulint pad; |
| 7092 | ulint min_sz; |
| 7093 | ulint sz; |
| 7094 | |
| 7095 | ut_ad(index); |
| 7096 | |
| 7097 | if (!zip_failure_threshold_pct) { |
| 7098 | /* Disabled by user. */ |
| 7099 | return(srv_page_size); |
| 7100 | } |
| 7101 | |
| 7102 | pad = my_atomic_loadlint(&index->zip_pad.pad); |
| 7103 | |
| 7104 | ut_ad(pad < srv_page_size); |
| 7105 | sz = srv_page_size - pad; |
| 7106 | |
| 7107 | /* Min size allowed by user. */ |
| 7108 | ut_ad(zip_pad_max < 100); |
| 7109 | min_sz = (srv_page_size * (100 - zip_pad_max)) / 100; |
| 7110 | |
| 7111 | return(ut_max(sz, min_sz)); |
| 7112 | } |
| 7113 | |
| 7114 | /*************************************************************//** |
| 7115 | Convert table flag to row format string. |
| 7116 | @return row format name. */ |
| 7117 | const char* |
| 7118 | dict_tf_to_row_format_string( |
| 7119 | /*=========================*/ |
| 7120 | ulint table_flag) /*!< in: row format setting */ |
| 7121 | { |
| 7122 | switch (dict_tf_get_rec_format(table_flag)) { |
| 7123 | case REC_FORMAT_REDUNDANT: |
| 7124 | return("ROW_TYPE_REDUNDANT" ); |
| 7125 | case REC_FORMAT_COMPACT: |
| 7126 | return("ROW_TYPE_COMPACT" ); |
| 7127 | case REC_FORMAT_COMPRESSED: |
| 7128 | return("ROW_TYPE_COMPRESSED" ); |
| 7129 | case REC_FORMAT_DYNAMIC: |
| 7130 | return("ROW_TYPE_DYNAMIC" ); |
| 7131 | } |
| 7132 | |
| 7133 | ut_error; |
| 7134 | return(0); |
| 7135 | } |
| 7136 | |
| 7137 | /** Calculate the used memory occupied by the data dictionary |
| 7138 | table and index objects. |
| 7139 | @return number of bytes occupied. */ |
| 7140 | UNIV_INTERN |
| 7141 | ulint |
| 7142 | dict_sys_get_size() |
| 7143 | { |
| 7144 | ulint size = 0; |
| 7145 | |
| 7146 | ut_ad(dict_sys); |
| 7147 | |
| 7148 | mutex_enter(&dict_sys->mutex); |
| 7149 | |
| 7150 | for(ulint i = 0; i < hash_get_n_cells(dict_sys->table_hash); i++) { |
| 7151 | dict_table_t* table; |
| 7152 | |
| 7153 | for (table = static_cast<dict_table_t*>(HASH_GET_FIRST(dict_sys->table_hash,i)); |
| 7154 | table != NULL; |
| 7155 | table = static_cast<dict_table_t*>(HASH_GET_NEXT(name_hash, table))) { |
| 7156 | dict_index_t* index; |
| 7157 | size += mem_heap_get_size(table->heap) + strlen(table->name.m_name) +1; |
| 7158 | |
| 7159 | for(index = dict_table_get_first_index(table); |
| 7160 | index != NULL; |
| 7161 | index = dict_table_get_next_index(index)) { |
| 7162 | size += mem_heap_get_size(index->heap); |
| 7163 | } |
| 7164 | } |
| 7165 | } |
| 7166 | |
| 7167 | mutex_exit(&dict_sys->mutex); |
| 7168 | |
| 7169 | return (size); |
| 7170 | } |
| 7171 | |
| 7172 | /** Look for any dictionary objects that are found in the given tablespace. |
| 7173 | @param[in] space_id Tablespace ID to search for. |
| 7174 | @return true if tablespace is empty. */ |
| 7175 | bool |
| 7176 | dict_space_is_empty( |
| 7177 | ulint space_id) |
| 7178 | { |
| 7179 | btr_pcur_t pcur; |
| 7180 | const rec_t* rec; |
| 7181 | mtr_t mtr; |
| 7182 | bool found = false; |
| 7183 | |
| 7184 | rw_lock_x_lock(dict_operation_lock); |
| 7185 | mutex_enter(&dict_sys->mutex); |
| 7186 | mtr_start(&mtr); |
| 7187 | |
| 7188 | for (rec = dict_startscan_system(&pcur, &mtr, SYS_TABLES); |
| 7189 | rec != NULL; |
| 7190 | rec = dict_getnext_system(&pcur, &mtr)) { |
| 7191 | const byte* field; |
| 7192 | ulint len; |
| 7193 | ulint space_id_for_table; |
| 7194 | |
| 7195 | field = rec_get_nth_field_old( |
| 7196 | rec, DICT_FLD__SYS_TABLES__SPACE, &len); |
| 7197 | ut_ad(len == 4); |
| 7198 | space_id_for_table = mach_read_from_4(field); |
| 7199 | |
| 7200 | if (space_id_for_table == space_id) { |
| 7201 | found = true; |
| 7202 | } |
| 7203 | } |
| 7204 | |
| 7205 | mtr_commit(&mtr); |
| 7206 | mutex_exit(&dict_sys->mutex); |
| 7207 | rw_lock_x_unlock(dict_operation_lock); |
| 7208 | |
| 7209 | return(!found); |
| 7210 | } |
| 7211 | |
| 7212 | /** Find the space_id for the given name in sys_tablespaces. |
| 7213 | @param[in] name Tablespace name to search for. |
| 7214 | @return the tablespace ID. */ |
| 7215 | ulint |
| 7216 | dict_space_get_id( |
| 7217 | const char* name) |
| 7218 | { |
| 7219 | btr_pcur_t pcur; |
| 7220 | const rec_t* rec; |
| 7221 | mtr_t mtr; |
| 7222 | ulint name_len = strlen(name); |
| 7223 | ulint id = ULINT_UNDEFINED; |
| 7224 | |
| 7225 | rw_lock_x_lock(dict_operation_lock); |
| 7226 | mutex_enter(&dict_sys->mutex); |
| 7227 | mtr_start(&mtr); |
| 7228 | |
| 7229 | for (rec = dict_startscan_system(&pcur, &mtr, SYS_TABLESPACES); |
| 7230 | rec != NULL; |
| 7231 | rec = dict_getnext_system(&pcur, &mtr)) { |
| 7232 | const byte* field; |
| 7233 | ulint len; |
| 7234 | |
| 7235 | field = rec_get_nth_field_old( |
| 7236 | rec, DICT_FLD__SYS_TABLESPACES__NAME, &len); |
| 7237 | ut_ad(len > 0); |
| 7238 | ut_ad(len < OS_FILE_MAX_PATH); |
| 7239 | |
| 7240 | if (len == name_len && ut_memcmp(name, field, len) == 0) { |
| 7241 | |
| 7242 | field = rec_get_nth_field_old( |
| 7243 | rec, DICT_FLD__SYS_TABLESPACES__SPACE, &len); |
| 7244 | ut_ad(len == 4); |
| 7245 | id = mach_read_from_4(field); |
| 7246 | |
| 7247 | /* This is normally called by dict_getnext_system() |
| 7248 | at the end of the index. */ |
| 7249 | btr_pcur_close(&pcur); |
| 7250 | break; |
| 7251 | } |
| 7252 | } |
| 7253 | |
| 7254 | mtr_commit(&mtr); |
| 7255 | mutex_exit(&dict_sys->mutex); |
| 7256 | rw_lock_x_unlock(dict_operation_lock); |
| 7257 | |
| 7258 | return(id); |
| 7259 | } |
| 7260 | |
| 7261 | /** Determine the extent size (in pages) for the given table |
| 7262 | @param[in] table the table whose extent size is being |
| 7263 | calculated. |
| 7264 | @return extent size in pages (256, 128 or 64) */ |
| 7265 | ulint |
| 7266 | dict_table_extent_size( |
| 7267 | const dict_table_t* table) |
| 7268 | { |
| 7269 | const ulint mb_1 = 1024 * 1024; |
| 7270 | const ulint mb_2 = 2 * mb_1; |
| 7271 | const ulint mb_4 = 4 * mb_1; |
| 7272 | |
| 7273 | page_size_t page_size = dict_table_page_size(table); |
| 7274 | ulint pages_in_extent = FSP_EXTENT_SIZE; |
| 7275 | |
| 7276 | if (page_size.is_compressed()) { |
| 7277 | |
| 7278 | ulint disk_page_size = page_size.physical(); |
| 7279 | |
| 7280 | switch (disk_page_size) { |
| 7281 | case 1024: |
| 7282 | pages_in_extent = mb_1/1024; |
| 7283 | break; |
| 7284 | case 2048: |
| 7285 | pages_in_extent = mb_1/2048; |
| 7286 | break; |
| 7287 | case 4096: |
| 7288 | pages_in_extent = mb_1/4096; |
| 7289 | break; |
| 7290 | case 8192: |
| 7291 | pages_in_extent = mb_1/8192; |
| 7292 | break; |
| 7293 | case 16384: |
| 7294 | pages_in_extent = mb_1/16384; |
| 7295 | break; |
| 7296 | case 32768: |
| 7297 | pages_in_extent = mb_2/32768; |
| 7298 | break; |
| 7299 | case 65536: |
| 7300 | pages_in_extent = mb_4/65536; |
| 7301 | break; |
| 7302 | default: |
| 7303 | ut_ad(0); |
| 7304 | } |
| 7305 | } |
| 7306 | |
| 7307 | return(pages_in_extent); |
| 7308 | } |
| 7309 | |