| 1 | /* Copyright (c) 2004, 2011, Oracle and/or its affiliates. |
| 2 | |
| 3 | This program is free software; you can redistribute it and/or modify |
| 4 | it under the terms of the GNU General Public License as published by |
| 5 | the Free Software Foundation; version 2 of the License. |
| 6 | |
| 7 | This program is distributed in the hope that it will be useful, |
| 8 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | GNU General Public License for more details. |
| 11 | |
| 12 | You should have received a copy of the GNU General Public License |
| 13 | along with this program; if not, write to the Free Software |
| 14 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ |
| 15 | |
| 16 | /* |
| 17 | Make sure to look at ha_tina.h for more details. |
| 18 | |
| 19 | First off, this is a play thing for me, there are a number of things |
| 20 | wrong with it: |
| 21 | *) It was designed for csv and therefore its performance is highly |
| 22 | questionable. |
| 23 | *) Indexes have not been implemented. This is because the files can |
| 24 | be traded in and out of the table directory without having to worry |
| 25 | about rebuilding anything. |
| 26 | *) NULLs and "" are treated equally (like a spreadsheet). |
| 27 | *) There was in the beginning no point to anyone seeing this other |
| 28 | then me, so there is a good chance that I haven't quite documented |
| 29 | it well. |
| 30 | *) Less design, more "make it work" |
| 31 | |
| 32 | Now there are a few cool things with it: |
| 33 | *) Errors can result in corrupted data files. |
| 34 | *) Data files can be read by spreadsheets directly. |
| 35 | |
| 36 | TODO: |
| 37 | *) Move to a block system for larger files |
| 38 | *) Error recovery, its all there, just need to finish it |
| 39 | *) Document how the chains work. |
| 40 | |
| 41 | -Brian |
| 42 | */ |
| 43 | |
| 44 | #include <my_global.h> |
| 45 | #include "sql_priv.h" |
| 46 | #include "sql_class.h" // SSV |
| 47 | #include <mysql/psi/mysql_file.h> |
| 48 | #include "ha_tina.h" |
| 49 | #include "probes_mysql.h" |
| 50 | |
| 51 | /* |
| 52 | uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + uchar |
| 53 | */ |
| 54 | #define META_BUFFER_SIZE sizeof(uchar) + sizeof(uchar) + sizeof(ulonglong) \ |
| 55 | + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(uchar) |
| 56 | #define 254 // The number we use to determine corruption |
| 57 | #define BLOB_MEMROOT_ALLOC_SIZE 8192 |
| 58 | |
| 59 | /* The file extension */ |
| 60 | #define CSV_EXT ".CSV" // The data file |
| 61 | #define CSN_EXT ".CSN" // Files used during repair and update |
| 62 | #define CSM_EXT ".CSM" // Meta file |
| 63 | |
| 64 | struct ha_table_option_struct |
| 65 | { |
| 66 | bool ietf_quotes; |
| 67 | }; |
| 68 | |
| 69 | ha_create_table_option csv_table_option_list[]= |
| 70 | { |
| 71 | HA_TOPTION_BOOL("IETF_QUOTES" , ietf_quotes, 0), |
| 72 | HA_TOPTION_END |
| 73 | }; |
| 74 | |
| 75 | static TINA_SHARE *get_share(const char *table_name, TABLE *table); |
| 76 | static int free_share(TINA_SHARE *share); |
| 77 | static int read_meta_file(File meta_file, ha_rows *rows); |
| 78 | static int write_meta_file(File meta_file, ha_rows rows, bool dirty); |
| 79 | |
| 80 | extern "C" void tina_get_status(void* param, int concurrent_insert); |
| 81 | extern "C" void tina_update_status(void* param); |
| 82 | extern "C" my_bool tina_check_status(void* param); |
| 83 | |
| 84 | /* Stuff for shares */ |
| 85 | mysql_mutex_t tina_mutex; |
| 86 | static HASH tina_open_tables; |
| 87 | static handler *tina_create_handler(handlerton *hton, |
| 88 | TABLE_SHARE *table, |
| 89 | MEM_ROOT *mem_root); |
| 90 | |
| 91 | |
| 92 | /***************************************************************************** |
| 93 | ** TINA tables |
| 94 | *****************************************************************************/ |
| 95 | |
| 96 | /* |
| 97 | Used for sorting chains with qsort(). |
| 98 | */ |
| 99 | int sort_set (tina_set *a, tina_set *b) |
| 100 | { |
| 101 | /* |
| 102 | We assume that intervals do not intersect. So, it is enought to compare |
| 103 | any two points. Here we take start of intervals for comparison. |
| 104 | */ |
| 105 | return ( a->begin > b->begin ? 1 : ( a->begin < b->begin ? -1 : 0 ) ); |
| 106 | } |
| 107 | |
| 108 | static uchar* tina_get_key(TINA_SHARE *share, size_t *length, |
| 109 | my_bool not_used __attribute__((unused))) |
| 110 | { |
| 111 | *length=share->table_name_length; |
| 112 | return (uchar*) share->table_name; |
| 113 | } |
| 114 | |
| 115 | #ifdef HAVE_PSI_INTERFACE |
| 116 | |
| 117 | static PSI_mutex_key csv_key_mutex_tina, csv_key_mutex_TINA_SHARE_mutex; |
| 118 | |
| 119 | static PSI_mutex_info all_tina_mutexes[]= |
| 120 | { |
| 121 | { &csv_key_mutex_tina, "tina" , PSI_FLAG_GLOBAL}, |
| 122 | { &csv_key_mutex_TINA_SHARE_mutex, "TINA_SHARE::mutex" , 0} |
| 123 | }; |
| 124 | |
| 125 | static PSI_file_key csv_key_file_metadata, csv_key_file_data, |
| 126 | csv_key_file_update; |
| 127 | |
| 128 | static PSI_file_info all_tina_files[]= |
| 129 | { |
| 130 | { &csv_key_file_metadata, "metadata" , 0}, |
| 131 | { &csv_key_file_data, "data" , 0}, |
| 132 | { &csv_key_file_update, "update" , 0} |
| 133 | }; |
| 134 | |
| 135 | static void init_tina_psi_keys(void) |
| 136 | { |
| 137 | const char* category= "csv" ; |
| 138 | int count; |
| 139 | |
| 140 | count= array_elements(all_tina_mutexes); |
| 141 | mysql_mutex_register(category, all_tina_mutexes, count); |
| 142 | |
| 143 | count= array_elements(all_tina_files); |
| 144 | mysql_file_register(category, all_tina_files, count); |
| 145 | } |
| 146 | #endif /* HAVE_PSI_INTERFACE */ |
| 147 | |
| 148 | /* |
| 149 | If frm_error() is called in table.cc this is called to find out what file |
| 150 | extensions exist for this handler. |
| 151 | */ |
| 152 | static const char *ha_tina_exts[] = { |
| 153 | CSV_EXT, |
| 154 | CSM_EXT, |
| 155 | CSN_EXT, |
| 156 | NullS |
| 157 | }; |
| 158 | |
| 159 | static int tina_init_func(void *p) |
| 160 | { |
| 161 | handlerton *tina_hton; |
| 162 | |
| 163 | #ifdef HAVE_PSI_INTERFACE |
| 164 | init_tina_psi_keys(); |
| 165 | #endif |
| 166 | |
| 167 | tina_hton= (handlerton *)p; |
| 168 | mysql_mutex_init(csv_key_mutex_tina, &tina_mutex, MY_MUTEX_INIT_FAST); |
| 169 | (void) my_hash_init(&tina_open_tables,system_charset_info,32,0,0, |
| 170 | (my_hash_get_key) tina_get_key,0,0); |
| 171 | tina_hton->state= SHOW_OPTION_YES; |
| 172 | tina_hton->db_type= DB_TYPE_CSV_DB; |
| 173 | tina_hton->create= tina_create_handler; |
| 174 | tina_hton->flags= (HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES | |
| 175 | HTON_NO_PARTITION); |
| 176 | tina_hton->tablefile_extensions= ha_tina_exts; |
| 177 | tina_hton->table_options= csv_table_option_list; |
| 178 | return 0; |
| 179 | } |
| 180 | |
| 181 | static int tina_done_func(void *p) |
| 182 | { |
| 183 | my_hash_free(&tina_open_tables); |
| 184 | mysql_mutex_destroy(&tina_mutex); |
| 185 | |
| 186 | return 0; |
| 187 | } |
| 188 | |
| 189 | |
| 190 | /* |
| 191 | Simple lock controls. |
| 192 | */ |
| 193 | static TINA_SHARE *get_share(const char *table_name, TABLE *table) |
| 194 | { |
| 195 | TINA_SHARE *share; |
| 196 | char meta_file_name[FN_REFLEN]; |
| 197 | MY_STAT file_stat; /* Stat information for the data file */ |
| 198 | char *tmp_name; |
| 199 | uint length; |
| 200 | |
| 201 | mysql_mutex_lock(&tina_mutex); |
| 202 | length=(uint) strlen(table_name); |
| 203 | |
| 204 | /* |
| 205 | If share is not present in the hash, create a new share and |
| 206 | initialize its members. |
| 207 | */ |
| 208 | if (!(share=(TINA_SHARE*) my_hash_search(&tina_open_tables, |
| 209 | (uchar*) table_name, |
| 210 | length))) |
| 211 | { |
| 212 | if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), |
| 213 | &share, sizeof(*share), |
| 214 | &tmp_name, length+1, |
| 215 | NullS)) |
| 216 | { |
| 217 | mysql_mutex_unlock(&tina_mutex); |
| 218 | return NULL; |
| 219 | } |
| 220 | |
| 221 | share->use_count= 0; |
| 222 | share->is_log_table= FALSE; |
| 223 | share->table_name_length= length; |
| 224 | share->table_name= tmp_name; |
| 225 | share->crashed= FALSE; |
| 226 | share->rows_recorded= 0; |
| 227 | share->update_file_opened= FALSE; |
| 228 | share->tina_write_opened= FALSE; |
| 229 | share->data_file_version= 0; |
| 230 | strmov(share->table_name, table_name); |
| 231 | fn_format(share->data_file_name, table_name, "" , CSV_EXT, |
| 232 | MY_REPLACE_EXT|MY_UNPACK_FILENAME); |
| 233 | fn_format(meta_file_name, table_name, "" , CSM_EXT, |
| 234 | MY_REPLACE_EXT|MY_UNPACK_FILENAME); |
| 235 | |
| 236 | if (mysql_file_stat(csv_key_file_data, |
| 237 | share->data_file_name, &file_stat, MYF(MY_WME)) == NULL) |
| 238 | goto error; |
| 239 | share->saved_data_file_length= file_stat.st_size; |
| 240 | |
| 241 | if (my_hash_insert(&tina_open_tables, (uchar*) share)) |
| 242 | goto error; |
| 243 | thr_lock_init(&share->lock); |
| 244 | mysql_mutex_init(csv_key_mutex_TINA_SHARE_mutex, |
| 245 | &share->mutex, MY_MUTEX_INIT_FAST); |
| 246 | |
| 247 | /* |
| 248 | Open or create the meta file. In the latter case, we'll get |
| 249 | an error during read_meta_file and mark the table as crashed. |
| 250 | Usually this will result in auto-repair, and we will get a good |
| 251 | meta-file in the end. |
| 252 | */ |
| 253 | if (((share->meta_file= mysql_file_open(csv_key_file_metadata, |
| 254 | meta_file_name, |
| 255 | O_RDWR|O_CREAT, |
| 256 | MYF(MY_WME))) == -1) || |
| 257 | read_meta_file(share->meta_file, &share->rows_recorded)) |
| 258 | share->crashed= TRUE; |
| 259 | } |
| 260 | |
| 261 | share->use_count++; |
| 262 | mysql_mutex_unlock(&tina_mutex); |
| 263 | |
| 264 | return share; |
| 265 | |
| 266 | error: |
| 267 | mysql_mutex_unlock(&tina_mutex); |
| 268 | my_free(share); |
| 269 | |
| 270 | return NULL; |
| 271 | } |
| 272 | |
| 273 | |
| 274 | /* |
| 275 | Read CSV meta-file |
| 276 | |
| 277 | SYNOPSIS |
| 278 | read_meta_file() |
| 279 | meta_file The meta-file filedes |
| 280 | ha_rows Pointer to the var we use to store rows count. |
| 281 | These are read from the meta-file. |
| 282 | |
| 283 | DESCRIPTION |
| 284 | |
| 285 | Read the meta-file info. For now we are only interested in |
| 286 | rows counf, crashed bit and magic number. |
| 287 | |
| 288 | RETURN |
| 289 | 0 - OK |
| 290 | non-zero - error occurred |
| 291 | */ |
| 292 | |
| 293 | static int read_meta_file(File meta_file, ha_rows *rows) |
| 294 | { |
| 295 | uchar meta_buffer[META_BUFFER_SIZE]; |
| 296 | uchar *ptr= meta_buffer; |
| 297 | |
| 298 | DBUG_ENTER("ha_tina::read_meta_file" ); |
| 299 | |
| 300 | mysql_file_seek(meta_file, 0, MY_SEEK_SET, MYF(0)); |
| 301 | if (mysql_file_read(meta_file, (uchar*)meta_buffer, META_BUFFER_SIZE, 0) |
| 302 | != META_BUFFER_SIZE) |
| 303 | DBUG_RETURN(my_errno= HA_ERR_CRASHED_ON_USAGE); |
| 304 | |
| 305 | /* |
| 306 | Parse out the meta data, we ignore version at the moment |
| 307 | */ |
| 308 | |
| 309 | ptr+= sizeof(uchar)*2; // Move past header |
| 310 | *rows= (ha_rows)uint8korr(ptr); |
| 311 | ptr+= sizeof(ulonglong); // Move past rows |
| 312 | /* |
| 313 | Move past check_point, auto_increment and forced_flushes fields. |
| 314 | They are present in the format, but we do not use them yet. |
| 315 | */ |
| 316 | ptr+= 3*sizeof(ulonglong); |
| 317 | |
| 318 | /* check crashed bit and magic number */ |
| 319 | if ((meta_buffer[0] != (uchar)TINA_CHECK_HEADER) || |
| 320 | ((bool)(*ptr)== TRUE)) |
| 321 | DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); |
| 322 | |
| 323 | mysql_file_sync(meta_file, MYF(MY_WME)); |
| 324 | |
| 325 | DBUG_RETURN(0); |
| 326 | } |
| 327 | |
| 328 | |
| 329 | /* |
| 330 | Write CSV meta-file |
| 331 | |
| 332 | SYNOPSIS |
| 333 | write_meta_file() |
| 334 | meta_file The meta-file filedes |
| 335 | ha_rows The number of rows we have in the datafile. |
| 336 | dirty A flag, which marks whether we have a corrupt table |
| 337 | |
| 338 | DESCRIPTION |
| 339 | |
| 340 | Write meta-info the the file. Only rows count, crashed bit and |
| 341 | magic number matter now. |
| 342 | |
| 343 | RETURN |
| 344 | 0 - OK |
| 345 | non-zero - error occurred |
| 346 | */ |
| 347 | |
| 348 | static int write_meta_file(File meta_file, ha_rows rows, bool dirty) |
| 349 | { |
| 350 | uchar meta_buffer[META_BUFFER_SIZE]; |
| 351 | uchar *ptr= meta_buffer; |
| 352 | |
| 353 | DBUG_ENTER("ha_tina::write_meta_file" ); |
| 354 | |
| 355 | *ptr= (uchar)TINA_CHECK_HEADER; |
| 356 | ptr+= sizeof(uchar); |
| 357 | *ptr= (uchar)TINA_VERSION; |
| 358 | ptr+= sizeof(uchar); |
| 359 | int8store(ptr, (ulonglong)rows); |
| 360 | ptr+= sizeof(ulonglong); |
| 361 | memset(ptr, 0, 3*sizeof(ulonglong)); |
| 362 | /* |
| 363 | Skip over checkpoint, autoincrement and forced_flushes fields. |
| 364 | We'll need them later. |
| 365 | */ |
| 366 | ptr+= 3*sizeof(ulonglong); |
| 367 | *ptr= (uchar)dirty; |
| 368 | |
| 369 | mysql_file_seek(meta_file, 0, MY_SEEK_SET, MYF(0)); |
| 370 | if (mysql_file_write(meta_file, (uchar *)meta_buffer, META_BUFFER_SIZE, 0) |
| 371 | != META_BUFFER_SIZE) |
| 372 | DBUG_RETURN(-1); |
| 373 | |
| 374 | mysql_file_sync(meta_file, MYF(MY_WME)); |
| 375 | |
| 376 | DBUG_RETURN(0); |
| 377 | } |
| 378 | |
| 379 | bool ha_tina::check_and_repair(THD *thd) |
| 380 | { |
| 381 | HA_CHECK_OPT check_opt; |
| 382 | DBUG_ENTER("ha_tina::check_and_repair" ); |
| 383 | |
| 384 | check_opt.init(); |
| 385 | |
| 386 | DBUG_RETURN(repair(thd, &check_opt)); |
| 387 | } |
| 388 | |
| 389 | |
| 390 | int ha_tina::init_tina_writer() |
| 391 | { |
| 392 | DBUG_ENTER("ha_tina::init_tina_writer" ); |
| 393 | |
| 394 | /* |
| 395 | Mark the file as crashed. We will set the flag back when we close |
| 396 | the file. In the case of the crash it will remain marked crashed, |
| 397 | which enforce recovery. |
| 398 | */ |
| 399 | (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); |
| 400 | |
| 401 | if ((share->tina_write_filedes= |
| 402 | mysql_file_open(csv_key_file_data, |
| 403 | share->data_file_name, O_RDWR|O_APPEND, |
| 404 | MYF(MY_WME))) == -1) |
| 405 | { |
| 406 | DBUG_PRINT("info" , ("Could not open tina file writes" )); |
| 407 | share->crashed= TRUE; |
| 408 | DBUG_RETURN(my_errno ? my_errno : -1); |
| 409 | } |
| 410 | share->tina_write_opened= TRUE; |
| 411 | |
| 412 | DBUG_RETURN(0); |
| 413 | } |
| 414 | |
| 415 | |
| 416 | bool ha_tina::is_crashed() const |
| 417 | { |
| 418 | DBUG_ENTER("ha_tina::is_crashed" ); |
| 419 | DBUG_RETURN(share->crashed); |
| 420 | } |
| 421 | |
| 422 | /* |
| 423 | Free lock controls. |
| 424 | */ |
| 425 | static int free_share(TINA_SHARE *share) |
| 426 | { |
| 427 | DBUG_ENTER("ha_tina::free_share" ); |
| 428 | mysql_mutex_lock(&tina_mutex); |
| 429 | int result_code= 0; |
| 430 | if (!--share->use_count){ |
| 431 | /* Write the meta file. Mark it as crashed if needed. */ |
| 432 | if (share->meta_file != -1) |
| 433 | { |
| 434 | (void)write_meta_file(share->meta_file, share->rows_recorded, |
| 435 | share->crashed ? TRUE :FALSE); |
| 436 | if (mysql_file_close(share->meta_file, MYF(0))) |
| 437 | result_code= 1; |
| 438 | } |
| 439 | if (share->tina_write_opened) |
| 440 | { |
| 441 | if (mysql_file_close(share->tina_write_filedes, MYF(0))) |
| 442 | result_code= 1; |
| 443 | share->tina_write_opened= FALSE; |
| 444 | } |
| 445 | |
| 446 | my_hash_delete(&tina_open_tables, (uchar*) share); |
| 447 | thr_lock_delete(&share->lock); |
| 448 | mysql_mutex_destroy(&share->mutex); |
| 449 | my_free(share); |
| 450 | } |
| 451 | mysql_mutex_unlock(&tina_mutex); |
| 452 | |
| 453 | DBUG_RETURN(result_code); |
| 454 | } |
| 455 | |
| 456 | |
| 457 | /* |
| 458 | This function finds the end of a line and returns the length |
| 459 | of the line ending. |
| 460 | |
| 461 | We support three kinds of line endings: |
| 462 | '\r' -- Old Mac OS line ending |
| 463 | '\n' -- Traditional Unix and Mac OS X line ending |
| 464 | '\r''\n' -- DOS\Windows line ending |
| 465 | */ |
| 466 | |
| 467 | my_off_t find_eoln_buff(Transparent_file *data_buff, my_off_t begin, |
| 468 | my_off_t end, int *eoln_len) |
| 469 | { |
| 470 | *eoln_len= 0; |
| 471 | |
| 472 | for (my_off_t x= begin; x < end; x++) |
| 473 | { |
| 474 | /* Unix (includes Mac OS X) */ |
| 475 | if (data_buff->get_value(x) == '\n') |
| 476 | *eoln_len= 1; |
| 477 | else |
| 478 | if (data_buff->get_value(x) == '\r') // Mac or Dos |
| 479 | { |
| 480 | /* old Mac line ending */ |
| 481 | if (x + 1 == end || (data_buff->get_value(x + 1) != '\n')) |
| 482 | *eoln_len= 1; |
| 483 | else // DOS style ending |
| 484 | *eoln_len= 2; |
| 485 | } |
| 486 | |
| 487 | if (*eoln_len) // end of line was found |
| 488 | return x; |
| 489 | } |
| 490 | |
| 491 | return 0; |
| 492 | } |
| 493 | |
| 494 | |
| 495 | static handler *tina_create_handler(handlerton *hton, |
| 496 | TABLE_SHARE *table, |
| 497 | MEM_ROOT *mem_root) |
| 498 | { |
| 499 | return new (mem_root) ha_tina(hton, table); |
| 500 | } |
| 501 | |
| 502 | |
| 503 | ha_tina::ha_tina(handlerton *hton, TABLE_SHARE *table_arg) |
| 504 | :handler(hton, table_arg), |
| 505 | /* |
| 506 | These definitions are found in handler.h |
| 507 | They are not probably completely right. |
| 508 | */ |
| 509 | current_position(0), next_position(0), local_saved_data_file_length(0), |
| 510 | file_buff(0), chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH), |
| 511 | local_data_file_version(0), records_is_known(0) |
| 512 | { |
| 513 | /* Set our original buffers from pre-allocated memory */ |
| 514 | buffer.set((char*)byte_buffer, IO_SIZE, &my_charset_bin); |
| 515 | chain= chain_buffer; |
| 516 | file_buff= new Transparent_file(); |
| 517 | init_alloc_root(&blobroot, "ha_tina" , BLOB_MEMROOT_ALLOC_SIZE, 0, MYF(0)); |
| 518 | } |
| 519 | |
| 520 | |
| 521 | /* |
| 522 | Encode a buffer into the quoted format. |
| 523 | */ |
| 524 | |
| 525 | int ha_tina::encode_quote(const uchar *buf) |
| 526 | { |
| 527 | char attribute_buffer[1024]; |
| 528 | String attribute(attribute_buffer, sizeof(attribute_buffer), |
| 529 | &my_charset_bin); |
| 530 | bool ietf_quotes= table_share->option_struct->ietf_quotes; |
| 531 | my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); |
| 532 | buffer.length(0); |
| 533 | |
| 534 | for (Field **field=table->field ; *field ; field++) |
| 535 | { |
| 536 | const char *ptr; |
| 537 | const char *end_ptr; |
| 538 | const bool was_null= (*field)->is_null(); |
| 539 | |
| 540 | /* |
| 541 | assistance for backwards compatibility in production builds. |
| 542 | note: this will not work for ENUM columns. |
| 543 | */ |
| 544 | if (was_null) |
| 545 | { |
| 546 | (*field)->set_default(); |
| 547 | (*field)->set_notnull(); |
| 548 | } |
| 549 | |
| 550 | (*field)->val_str(&attribute,&attribute); |
| 551 | |
| 552 | if (was_null) |
| 553 | (*field)->set_null(); |
| 554 | |
| 555 | if ((*field)->str_needs_quotes()) |
| 556 | { |
| 557 | ptr= attribute.ptr(); |
| 558 | end_ptr= attribute.length() + ptr; |
| 559 | |
| 560 | /* |
| 561 | Ensure that buffer is big enough. This will also speed things up |
| 562 | as we don't have to do any new allocation in the loop below |
| 563 | */ |
| 564 | if (buffer.realloc(buffer.length() + attribute.length()*2+2)) |
| 565 | return 0; // Failure |
| 566 | |
| 567 | buffer.append('"'); |
| 568 | |
| 569 | for (; ptr < end_ptr; ptr++) |
| 570 | { |
| 571 | if (*ptr == '"') |
| 572 | { |
| 573 | buffer.append(ietf_quotes ? '"' : '\\'); |
| 574 | buffer.append('"'); |
| 575 | } |
| 576 | else if (*ptr == '\r') |
| 577 | { |
| 578 | buffer.append('\\'); |
| 579 | buffer.append('r'); |
| 580 | } |
| 581 | else if (*ptr == '\\') |
| 582 | { |
| 583 | buffer.append('\\'); |
| 584 | buffer.append('\\'); |
| 585 | } |
| 586 | else if (*ptr == '\n') |
| 587 | { |
| 588 | buffer.append('\\'); |
| 589 | buffer.append('n'); |
| 590 | } |
| 591 | else |
| 592 | buffer.append(*ptr); |
| 593 | } |
| 594 | buffer.append('"'); |
| 595 | } |
| 596 | else |
| 597 | { |
| 598 | buffer.append(attribute); |
| 599 | } |
| 600 | |
| 601 | buffer.append(','); |
| 602 | } |
| 603 | // Remove the comma, add a line feed |
| 604 | buffer.length(buffer.length() - 1); |
| 605 | buffer.append('\n'); |
| 606 | |
| 607 | //buffer.replace(buffer.length(), 0, "\n", 1); |
| 608 | |
| 609 | dbug_tmp_restore_column_map(table->read_set, org_bitmap); |
| 610 | return (buffer.length()); |
| 611 | } |
| 612 | |
| 613 | /* |
| 614 | chain_append() adds delete positions to the chain that we use to keep |
| 615 | track of space. Then the chain will be used to cleanup "holes", occurred |
| 616 | due to deletes and updates. |
| 617 | */ |
| 618 | int ha_tina::chain_append() |
| 619 | { |
| 620 | if ( chain_ptr != chain && (chain_ptr -1)->end == current_position) |
| 621 | (chain_ptr -1)->end= next_position; |
| 622 | else |
| 623 | { |
| 624 | /* We set up for the next position */ |
| 625 | if ((off_t)(chain_ptr - chain) == (chain_size -1)) |
| 626 | { |
| 627 | my_off_t location= chain_ptr - chain; |
| 628 | chain_size += DEFAULT_CHAIN_LENGTH; |
| 629 | if (chain_alloced) |
| 630 | { |
| 631 | /* Must cast since my_malloc unlike malloc doesn't have a void ptr */ |
| 632 | if ((chain= (tina_set *) my_realloc((uchar*)chain, |
| 633 | chain_size, MYF(MY_WME))) == NULL) |
| 634 | return -1; |
| 635 | } |
| 636 | else |
| 637 | { |
| 638 | tina_set *ptr= (tina_set *) my_malloc(chain_size * sizeof(tina_set), |
| 639 | MYF(MY_WME)); |
| 640 | memcpy(ptr, chain, DEFAULT_CHAIN_LENGTH * sizeof(tina_set)); |
| 641 | chain= ptr; |
| 642 | chain_alloced++; |
| 643 | } |
| 644 | chain_ptr= chain + location; |
| 645 | } |
| 646 | chain_ptr->begin= current_position; |
| 647 | chain_ptr->end= next_position; |
| 648 | chain_ptr++; |
| 649 | } |
| 650 | |
| 651 | return 0; |
| 652 | } |
| 653 | |
| 654 | |
| 655 | /* |
| 656 | Scans for a row. |
| 657 | */ |
| 658 | int ha_tina::find_current_row(uchar *buf) |
| 659 | { |
| 660 | my_off_t end_offset, curr_offset= current_position; |
| 661 | int eoln_len; |
| 662 | my_bitmap_map *org_bitmap; |
| 663 | int error; |
| 664 | bool read_all; |
| 665 | bool ietf_quotes= table_share->option_struct->ietf_quotes; |
| 666 | DBUG_ENTER("ha_tina::find_current_row" ); |
| 667 | |
| 668 | free_root(&blobroot, MYF(0)); |
| 669 | |
| 670 | /* |
| 671 | We do not read further then local_saved_data_file_length in order |
| 672 | not to conflict with undergoing concurrent insert. |
| 673 | */ |
| 674 | if ((end_offset= |
| 675 | find_eoln_buff(file_buff, current_position, |
| 676 | local_saved_data_file_length, &eoln_len)) == 0) |
| 677 | DBUG_RETURN(HA_ERR_END_OF_FILE); |
| 678 | |
| 679 | /* We must read all columns in case a table is opened for update */ |
| 680 | read_all= !bitmap_is_clear_all(table->write_set); |
| 681 | /* Avoid asserts in ::store() for columns that are not going to be updated */ |
| 682 | org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); |
| 683 | error= HA_ERR_CRASHED_ON_USAGE; |
| 684 | |
| 685 | memset(buf, 0, table->s->null_bytes); |
| 686 | |
| 687 | /* |
| 688 | Parse the line obtained using the following algorithm |
| 689 | |
| 690 | BEGIN |
| 691 | 1) Store the EOL (end of line) for the current row |
| 692 | 2) Until all the fields in the current query have not been |
| 693 | filled |
| 694 | 2.1) If the current character is a quote |
| 695 | 2.1.1) Until EOL has not been reached |
| 696 | a) If end of current field is reached, move |
| 697 | to next field and jump to step 2.3 |
| 698 | b) If current character is a \\ handle |
| 699 | \\n, \\r, \\, and \\" if not in ietf_quotes mode |
| 700 | c) if in ietf_quotes mode and the current character is |
| 701 | a ", handle "" |
| 702 | d) else append the current character into the buffer |
| 703 | before checking that EOL has not been reached. |
| 704 | 2.2) If the current character does not begin with a quote |
| 705 | 2.2.1) Until EOL has not been reached |
| 706 | a) If the end of field has been reached move to the |
| 707 | next field and jump to step 2.3 |
| 708 | b) If current character begins with \\ handle |
| 709 | \\n, \\r, \\, \\" |
| 710 | c) else append the current character into the buffer |
| 711 | before checking that EOL has not been reached. |
| 712 | 2.3) Store the current field value and jump to 2) |
| 713 | TERMINATE |
| 714 | */ |
| 715 | |
| 716 | for (Field **field=table->field ; *field ; field++) |
| 717 | { |
| 718 | char curr_char; |
| 719 | |
| 720 | buffer.length(0); |
| 721 | if (curr_offset >= end_offset) |
| 722 | goto err; |
| 723 | curr_char= file_buff->get_value(curr_offset); |
| 724 | /* Handle the case where the first character is a quote */ |
| 725 | if (curr_char == '"') |
| 726 | { |
| 727 | /* Increment past the first quote */ |
| 728 | curr_offset++; |
| 729 | |
| 730 | /* Loop through the row to extract the values for the current field */ |
| 731 | for ( ; curr_offset < end_offset; curr_offset++) |
| 732 | { |
| 733 | curr_char= file_buff->get_value(curr_offset); |
| 734 | /* check for end of the current field */ |
| 735 | if (curr_char == '"' && |
| 736 | (curr_offset == end_offset - 1 || |
| 737 | file_buff->get_value(curr_offset + 1) == ',')) |
| 738 | { |
| 739 | /* Move past the , and the " */ |
| 740 | curr_offset+= 2; |
| 741 | break; |
| 742 | } |
| 743 | if (ietf_quotes && curr_char == '"' |
| 744 | && file_buff->get_value(curr_offset + 1) == '"') |
| 745 | { |
| 746 | /* Embedded IETF quote */ |
| 747 | curr_offset++; |
| 748 | buffer.append('"'); |
| 749 | } |
| 750 | else if (curr_char == '\\' && curr_offset != (end_offset - 1)) |
| 751 | { |
| 752 | /* A quote followed by something else than a comma, end of line, or |
| 753 | (in IETF mode) another quote will be handled as a regular |
| 754 | character. */ |
| 755 | curr_offset++; |
| 756 | curr_char= file_buff->get_value(curr_offset); |
| 757 | if (curr_char == 'r') |
| 758 | buffer.append('\r'); |
| 759 | else if (curr_char == 'n' ) |
| 760 | buffer.append('\n'); |
| 761 | else if (curr_char == '\\' || (!ietf_quotes && curr_char == '"')) |
| 762 | buffer.append(curr_char); |
| 763 | else /* This could only happed with an externally created file */ |
| 764 | { |
| 765 | buffer.append('\\'); |
| 766 | buffer.append(curr_char); |
| 767 | } |
| 768 | } |
| 769 | else // ordinary symbol |
| 770 | { |
| 771 | /* |
| 772 | If we are at final symbol and no last quote was found => |
| 773 | we are working with a damaged file. |
| 774 | */ |
| 775 | if (curr_offset == end_offset - 1) |
| 776 | goto err; |
| 777 | buffer.append(curr_char); |
| 778 | } |
| 779 | } |
| 780 | } |
| 781 | else |
| 782 | { |
| 783 | for ( ; curr_offset < end_offset; curr_offset++) |
| 784 | { |
| 785 | curr_char= file_buff->get_value(curr_offset); |
| 786 | /* Move past the ,*/ |
| 787 | if (curr_char == ',') |
| 788 | { |
| 789 | curr_offset++; |
| 790 | break; |
| 791 | } |
| 792 | if (curr_char == '\\' && curr_offset != (end_offset - 1)) |
| 793 | { |
| 794 | curr_offset++; |
| 795 | curr_char= file_buff->get_value(curr_offset); |
| 796 | if (curr_char == 'r') |
| 797 | buffer.append('\r'); |
| 798 | else if (curr_char == 'n' ) |
| 799 | buffer.append('\n'); |
| 800 | else if (curr_char == '\\' || curr_char == '"') |
| 801 | buffer.append(curr_char); |
| 802 | else /* This could only happed with an externally created file */ |
| 803 | { |
| 804 | buffer.append('\\'); |
| 805 | buffer.append(curr_char); |
| 806 | } |
| 807 | } |
| 808 | else |
| 809 | { |
| 810 | /* |
| 811 | We are at the final symbol and a quote was found for the |
| 812 | unquoted field => We are working with a damaged field. |
| 813 | */ |
| 814 | if (curr_offset == end_offset - 1 && curr_char == '"') |
| 815 | goto err; |
| 816 | buffer.append(curr_char); |
| 817 | } |
| 818 | } |
| 819 | } |
| 820 | |
| 821 | if (read_all || bitmap_is_set(table->read_set, (*field)->field_index)) |
| 822 | { |
| 823 | bool is_enum= ((*field)->real_type() == MYSQL_TYPE_ENUM); |
| 824 | /* |
| 825 | Here CHECK_FIELD_WARN checks that all values in the csv file are valid |
| 826 | which is normally the case, if they were written by |
| 827 | INSERT -> ha_tina::write_row. '0' values on ENUM fields are considered |
| 828 | invalid by Field_enum::store() but it can store them on INSERT anyway. |
| 829 | Thus, for enums we silence the warning, as it doesn't really mean |
| 830 | an invalid value. |
| 831 | */ |
| 832 | if ((*field)->store(buffer.ptr(), buffer.length(), buffer.charset(), |
| 833 | is_enum ? CHECK_FIELD_IGNORE : CHECK_FIELD_WARN)) |
| 834 | { |
| 835 | if (!is_enum) |
| 836 | goto err; |
| 837 | } |
| 838 | if ((*field)->flags & BLOB_FLAG) |
| 839 | { |
| 840 | Field_blob *blob= *(Field_blob**) field; |
| 841 | uchar *src, *tgt; |
| 842 | uint length, packlength; |
| 843 | |
| 844 | packlength= blob->pack_length_no_ptr(); |
| 845 | length= blob->get_length(blob->ptr); |
| 846 | memcpy(&src, blob->ptr + packlength, sizeof(char*)); |
| 847 | if (src) |
| 848 | { |
| 849 | tgt= (uchar*) alloc_root(&blobroot, length); |
| 850 | bmove(tgt, src, length); |
| 851 | memcpy(blob->ptr + packlength, &tgt, sizeof(char*)); |
| 852 | } |
| 853 | } |
| 854 | } |
| 855 | } |
| 856 | next_position= end_offset + eoln_len; |
| 857 | error= 0; |
| 858 | |
| 859 | err: |
| 860 | dbug_tmp_restore_column_map(table->write_set, org_bitmap); |
| 861 | |
| 862 | DBUG_RETURN(error); |
| 863 | } |
| 864 | |
| 865 | /* |
| 866 | Three functions below are needed to enable concurrent insert functionality |
| 867 | for CSV engine. For more details see mysys/thr_lock.c |
| 868 | */ |
| 869 | |
| 870 | void tina_get_status(void* param, my_bool concurrent_insert) |
| 871 | { |
| 872 | ha_tina *tina= (ha_tina*) param; |
| 873 | tina->get_status(); |
| 874 | } |
| 875 | |
| 876 | void tina_update_status(void* param) |
| 877 | { |
| 878 | ha_tina *tina= (ha_tina*) param; |
| 879 | tina->update_status(); |
| 880 | } |
| 881 | |
| 882 | /* this should exist and return 0 for concurrent insert to work */ |
| 883 | my_bool tina_check_status(void* param) |
| 884 | { |
| 885 | return 0; |
| 886 | } |
| 887 | |
| 888 | /* |
| 889 | Save the state of the table |
| 890 | |
| 891 | SYNOPSIS |
| 892 | get_status() |
| 893 | |
| 894 | DESCRIPTION |
| 895 | This function is used to retrieve the file length. During the lock |
| 896 | phase of concurrent insert. For more details see comment to |
| 897 | ha_tina::update_status below. |
| 898 | */ |
| 899 | |
| 900 | void ha_tina::get_status() |
| 901 | { |
| 902 | if (share->is_log_table) |
| 903 | { |
| 904 | /* |
| 905 | We have to use mutex to follow pthreads memory visibility |
| 906 | rules for share->saved_data_file_length |
| 907 | */ |
| 908 | mysql_mutex_lock(&share->mutex); |
| 909 | local_saved_data_file_length= share->saved_data_file_length; |
| 910 | mysql_mutex_unlock(&share->mutex); |
| 911 | return; |
| 912 | } |
| 913 | local_saved_data_file_length= share->saved_data_file_length; |
| 914 | } |
| 915 | |
| 916 | |
| 917 | /* |
| 918 | Correct the state of the table. Called by unlock routines |
| 919 | before the write lock is released. |
| 920 | |
| 921 | SYNOPSIS |
| 922 | update_status() |
| 923 | |
| 924 | DESCRIPTION |
| 925 | When we employ concurrent insert lock, we save current length of the file |
| 926 | during the lock phase. We do not read further saved value, as we don't |
| 927 | want to interfere with undergoing concurrent insert. Writers update file |
| 928 | length info during unlock with update_status(). |
| 929 | |
| 930 | NOTE |
| 931 | For log tables concurrent insert works different. The reason is that |
| 932 | log tables are always opened and locked. And as they do not unlock |
| 933 | tables, the file length after writes should be updated in a different |
| 934 | way. For this purpose we need is_log_table flag. When this flag is set |
| 935 | we call update_status() explicitly after each row write. |
| 936 | */ |
| 937 | |
| 938 | void ha_tina::update_status() |
| 939 | { |
| 940 | /* correct local_saved_data_file_length for writers */ |
| 941 | share->saved_data_file_length= local_saved_data_file_length; |
| 942 | } |
| 943 | |
| 944 | |
| 945 | /* |
| 946 | Open a database file. Keep in mind that tables are caches, so |
| 947 | this will not be called for every request. Any sort of positions |
| 948 | that need to be reset should be kept in the ::extra() call. |
| 949 | */ |
| 950 | int ha_tina::open(const char *name, int mode, uint open_options) |
| 951 | { |
| 952 | DBUG_ENTER("ha_tina::open" ); |
| 953 | |
| 954 | if (!(share= get_share(name, table))) |
| 955 | DBUG_RETURN(HA_ERR_OUT_OF_MEM); |
| 956 | |
| 957 | if (share->crashed && !(open_options & HA_OPEN_FOR_REPAIR)) |
| 958 | { |
| 959 | free_share(share); |
| 960 | DBUG_RETURN(my_errno ? my_errno : HA_ERR_CRASHED_ON_USAGE); |
| 961 | } |
| 962 | |
| 963 | local_data_file_version= share->data_file_version; |
| 964 | if ((data_file= mysql_file_open(csv_key_file_data, |
| 965 | share->data_file_name, |
| 966 | O_RDONLY, MYF(MY_WME))) == -1) |
| 967 | { |
| 968 | free_share(share); |
| 969 | DBUG_RETURN(my_errno ? my_errno : -1); |
| 970 | } |
| 971 | |
| 972 | /* |
| 973 | Init locking. Pass handler object to the locking routines, |
| 974 | so that they could save/update local_saved_data_file_length value |
| 975 | during locking. This is needed to enable concurrent inserts. |
| 976 | */ |
| 977 | thr_lock_data_init(&share->lock, &lock, (void*) this); |
| 978 | ref_length= sizeof(my_off_t); |
| 979 | init_alloc_root(&blobroot, "ha_tina" , BLOB_MEMROOT_ALLOC_SIZE, 0, MYF(0)); |
| 980 | |
| 981 | share->lock.get_status= tina_get_status; |
| 982 | share->lock.update_status= tina_update_status; |
| 983 | share->lock.check_status= tina_check_status; |
| 984 | |
| 985 | DBUG_RETURN(0); |
| 986 | } |
| 987 | |
| 988 | |
| 989 | /* |
| 990 | Close a database file. We remove ourselves from the shared strucutre. |
| 991 | If it is empty we destroy it. |
| 992 | */ |
| 993 | int ha_tina::close(void) |
| 994 | { |
| 995 | int rc= 0; |
| 996 | DBUG_ENTER("ha_tina::close" ); |
| 997 | free_root(&blobroot, MYF(0)); |
| 998 | rc= mysql_file_close(data_file, MYF(0)); |
| 999 | DBUG_RETURN(free_share(share) || rc); |
| 1000 | } |
| 1001 | |
| 1002 | /* |
| 1003 | This is an INSERT. At the moment this handler just seeks to the end |
| 1004 | of the file and appends the data. In an error case it really should |
| 1005 | just truncate to the original position (this is not done yet). |
| 1006 | */ |
| 1007 | int ha_tina::write_row(uchar * buf) |
| 1008 | { |
| 1009 | int size; |
| 1010 | DBUG_ENTER("ha_tina::write_row" ); |
| 1011 | |
| 1012 | if (share->crashed) |
| 1013 | DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); |
| 1014 | |
| 1015 | size= encode_quote(buf); |
| 1016 | |
| 1017 | if (!share->tina_write_opened) |
| 1018 | if (init_tina_writer()) |
| 1019 | DBUG_RETURN(-1); |
| 1020 | |
| 1021 | /* use pwrite, as concurrent reader could have changed the position */ |
| 1022 | if (mysql_file_write(share->tina_write_filedes, (uchar*)buffer.ptr(), size, |
| 1023 | MYF(MY_WME | MY_NABP))) |
| 1024 | DBUG_RETURN(-1); |
| 1025 | |
| 1026 | /* update local copy of the max position to see our own changes */ |
| 1027 | local_saved_data_file_length+= size; |
| 1028 | |
| 1029 | /* update shared info */ |
| 1030 | mysql_mutex_lock(&share->mutex); |
| 1031 | share->rows_recorded++; |
| 1032 | /* update status for the log tables */ |
| 1033 | if (share->is_log_table) |
| 1034 | update_status(); |
| 1035 | mysql_mutex_unlock(&share->mutex); |
| 1036 | |
| 1037 | stats.records++; |
| 1038 | DBUG_RETURN(0); |
| 1039 | } |
| 1040 | |
| 1041 | |
| 1042 | int ha_tina::open_update_temp_file_if_needed() |
| 1043 | { |
| 1044 | char updated_fname[FN_REFLEN]; |
| 1045 | |
| 1046 | if (!share->update_file_opened) |
| 1047 | { |
| 1048 | if ((update_temp_file= |
| 1049 | mysql_file_create(csv_key_file_update, |
| 1050 | fn_format(updated_fname, share->table_name, |
| 1051 | "" , CSN_EXT, |
| 1052 | MY_REPLACE_EXT | MY_UNPACK_FILENAME), |
| 1053 | 0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0) |
| 1054 | return 1; |
| 1055 | share->update_file_opened= TRUE; |
| 1056 | temp_file_length= 0; |
| 1057 | } |
| 1058 | return 0; |
| 1059 | } |
| 1060 | |
| 1061 | /* |
| 1062 | This is called for an update. |
| 1063 | Make sure you put in code to increment the auto increment. |
| 1064 | Currently auto increment is not being |
| 1065 | fixed since autoincrements have yet to be added to this table handler. |
| 1066 | This will be called in a table scan right before the previous ::rnd_next() |
| 1067 | call. |
| 1068 | */ |
| 1069 | int ha_tina::update_row(const uchar * old_data, const uchar * new_data) |
| 1070 | { |
| 1071 | int size; |
| 1072 | int rc= -1; |
| 1073 | DBUG_ENTER("ha_tina::update_row" ); |
| 1074 | |
| 1075 | size= encode_quote(new_data); |
| 1076 | |
| 1077 | /* |
| 1078 | During update we mark each updating record as deleted |
| 1079 | (see the chain_append()) then write new one to the temporary data file. |
| 1080 | At the end of the sequence in the rnd_end() we append all non-marked |
| 1081 | records from the data file to the temporary data file then rename it. |
| 1082 | The temp_file_length is used to calculate new data file length. |
| 1083 | */ |
| 1084 | if (chain_append()) |
| 1085 | goto err; |
| 1086 | |
| 1087 | if (open_update_temp_file_if_needed()) |
| 1088 | goto err; |
| 1089 | |
| 1090 | if (mysql_file_write(update_temp_file, (uchar*)buffer.ptr(), size, |
| 1091 | MYF(MY_WME | MY_NABP))) |
| 1092 | goto err; |
| 1093 | temp_file_length+= size; |
| 1094 | rc= 0; |
| 1095 | |
| 1096 | /* UPDATE should never happen on the log tables */ |
| 1097 | DBUG_ASSERT(!share->is_log_table); |
| 1098 | |
| 1099 | err: |
| 1100 | DBUG_PRINT("info" ,("rc = %d" , rc)); |
| 1101 | DBUG_RETURN(rc); |
| 1102 | } |
| 1103 | |
| 1104 | |
| 1105 | /* |
| 1106 | Deletes a row. First the database will find the row, and then call this |
| 1107 | method. In the case of a table scan, the previous call to this will be |
| 1108 | the ::rnd_next() that found this row. |
| 1109 | The exception to this is an ORDER BY. This will cause the table handler |
| 1110 | to walk the table noting the positions of all rows that match a query. |
| 1111 | The table will then be deleted/positioned based on the ORDER (so RANDOM, |
| 1112 | DESC, ASC). |
| 1113 | */ |
| 1114 | int ha_tina::delete_row(const uchar * buf) |
| 1115 | { |
| 1116 | DBUG_ENTER("ha_tina::delete_row" ); |
| 1117 | |
| 1118 | if (chain_append()) |
| 1119 | DBUG_RETURN(-1); |
| 1120 | |
| 1121 | stats.records--; |
| 1122 | /* Update shared info */ |
| 1123 | DBUG_ASSERT(share->rows_recorded); |
| 1124 | mysql_mutex_lock(&share->mutex); |
| 1125 | share->rows_recorded--; |
| 1126 | mysql_mutex_unlock(&share->mutex); |
| 1127 | |
| 1128 | /* DELETE should never happen on the log table */ |
| 1129 | DBUG_ASSERT(!share->is_log_table); |
| 1130 | |
| 1131 | DBUG_RETURN(0); |
| 1132 | } |
| 1133 | |
| 1134 | |
| 1135 | /** |
| 1136 | @brief Initialize the data file. |
| 1137 | |
| 1138 | @details Compare the local version of the data file with the shared one. |
| 1139 | If they differ, there are some changes behind and we have to reopen |
| 1140 | the data file to make the changes visible. |
| 1141 | Call @c file_buff->init_buff() at the end to read the beginning of the |
| 1142 | data file into buffer. |
| 1143 | |
| 1144 | @retval 0 OK. |
| 1145 | @retval 1 There was an error. |
| 1146 | */ |
| 1147 | |
| 1148 | int ha_tina::init_data_file() |
| 1149 | { |
| 1150 | if (local_data_file_version != share->data_file_version) |
| 1151 | { |
| 1152 | local_data_file_version= share->data_file_version; |
| 1153 | if (mysql_file_close(data_file, MYF(0)) || |
| 1154 | (data_file= mysql_file_open(csv_key_file_data, |
| 1155 | share->data_file_name, O_RDONLY, |
| 1156 | MYF(MY_WME))) == -1) |
| 1157 | return my_errno ? my_errno : -1; |
| 1158 | } |
| 1159 | file_buff->init_buff(data_file); |
| 1160 | return 0; |
| 1161 | } |
| 1162 | |
| 1163 | |
| 1164 | /* |
| 1165 | All table scans call this first. |
| 1166 | The order of a table scan is: |
| 1167 | |
| 1168 | ha_tina::store_lock |
| 1169 | ha_tina::external_lock |
| 1170 | ha_tina::info |
| 1171 | ha_tina::rnd_init |
| 1172 | ha_tina::extra |
| 1173 | ENUM HA_EXTRA_CACHE Cash record in HA_rrnd() |
| 1174 | ha_tina::rnd_next |
| 1175 | ha_tina::rnd_next |
| 1176 | ha_tina::rnd_next |
| 1177 | ha_tina::rnd_next |
| 1178 | ha_tina::rnd_next |
| 1179 | ha_tina::rnd_next |
| 1180 | ha_tina::rnd_next |
| 1181 | ha_tina::rnd_next |
| 1182 | ha_tina::rnd_next |
| 1183 | ha_tina::extra |
| 1184 | ENUM HA_EXTRA_NO_CACHE End cacheing of records (def) |
| 1185 | ha_tina::external_lock |
| 1186 | ha_tina::extra |
| 1187 | ENUM HA_EXTRA_RESET Reset database to after open |
| 1188 | |
| 1189 | Each call to ::rnd_next() represents a row returned in the can. When no more |
| 1190 | rows can be returned, rnd_next() returns a value of HA_ERR_END_OF_FILE. |
| 1191 | The ::info() call is just for the optimizer. |
| 1192 | |
| 1193 | */ |
| 1194 | |
| 1195 | int ha_tina::rnd_init(bool scan) |
| 1196 | { |
| 1197 | DBUG_ENTER("ha_tina::rnd_init" ); |
| 1198 | |
| 1199 | /* set buffer to the beginning of the file */ |
| 1200 | if (share->crashed || init_data_file()) |
| 1201 | DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); |
| 1202 | |
| 1203 | current_position= next_position= 0; |
| 1204 | stats.records= 0; |
| 1205 | records_is_known= found_end_of_file= 0; |
| 1206 | chain_ptr= chain; |
| 1207 | |
| 1208 | DBUG_RETURN(0); |
| 1209 | } |
| 1210 | |
| 1211 | /* |
| 1212 | ::rnd_next() does all the heavy lifting for a table scan. You will need to |
| 1213 | populate *buf with the correct field data. You can walk the field to |
| 1214 | determine at what position you should store the data (take a look at how |
| 1215 | ::find_current_row() works). The structure is something like: |
| 1216 | 0Foo Dog Friend |
| 1217 | The first offset is for the first attribute. All space before that is |
| 1218 | reserved for null count. |
| 1219 | Basically this works as a mask for which rows are nulled (compared to just |
| 1220 | empty). |
| 1221 | This table handler doesn't do nulls and does not know the difference between |
| 1222 | NULL and "". This is ok since this table handler is for spreadsheets and |
| 1223 | they don't know about them either :) |
| 1224 | */ |
| 1225 | int ha_tina::rnd_next(uchar *buf) |
| 1226 | { |
| 1227 | int rc; |
| 1228 | DBUG_ENTER("ha_tina::rnd_next" ); |
| 1229 | MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str, |
| 1230 | TRUE); |
| 1231 | |
| 1232 | if (share->crashed) |
| 1233 | { |
| 1234 | rc= HA_ERR_CRASHED_ON_USAGE; |
| 1235 | goto end; |
| 1236 | } |
| 1237 | |
| 1238 | current_position= next_position; |
| 1239 | |
| 1240 | /* don't scan an empty file */ |
| 1241 | if (!local_saved_data_file_length) |
| 1242 | { |
| 1243 | rc= HA_ERR_END_OF_FILE; |
| 1244 | goto end; |
| 1245 | } |
| 1246 | |
| 1247 | if ((rc= find_current_row(buf))) |
| 1248 | goto end; |
| 1249 | |
| 1250 | stats.records++; |
| 1251 | rc= 0; |
| 1252 | end: |
| 1253 | found_end_of_file= (rc == HA_ERR_END_OF_FILE); |
| 1254 | MYSQL_READ_ROW_DONE(rc); |
| 1255 | DBUG_RETURN(rc); |
| 1256 | } |
| 1257 | |
| 1258 | /* |
| 1259 | In the case of an order by rows will need to be sorted. |
| 1260 | ::position() is called after each call to ::rnd_next(), |
| 1261 | the data it stores is to a byte array. You can store this |
| 1262 | data via my_store_ptr(). ref_length is a variable defined to the |
| 1263 | class that is the sizeof() of position being stored. In our case |
| 1264 | its just a position. Look at the bdb code if you want to see a case |
| 1265 | where something other then a number is stored. |
| 1266 | */ |
| 1267 | void ha_tina::position(const uchar *record) |
| 1268 | { |
| 1269 | DBUG_ENTER("ha_tina::position" ); |
| 1270 | my_store_ptr(ref, ref_length, current_position); |
| 1271 | DBUG_VOID_RETURN; |
| 1272 | } |
| 1273 | |
| 1274 | |
| 1275 | /* |
| 1276 | Used to fetch a row from a posiion stored with ::position(). |
| 1277 | my_get_ptr() retrieves the data for you. |
| 1278 | */ |
| 1279 | |
| 1280 | int ha_tina::rnd_pos(uchar * buf, uchar *pos) |
| 1281 | { |
| 1282 | int rc; |
| 1283 | DBUG_ENTER("ha_tina::rnd_pos" ); |
| 1284 | MYSQL_READ_ROW_START(table_share->db.str, table_share->table_name.str, |
| 1285 | FALSE); |
| 1286 | current_position= my_get_ptr(pos,ref_length); |
| 1287 | rc= find_current_row(buf); |
| 1288 | MYSQL_READ_ROW_DONE(rc); |
| 1289 | DBUG_RETURN(rc); |
| 1290 | } |
| 1291 | |
| 1292 | /* |
| 1293 | ::info() is used to return information to the optimizer. |
| 1294 | Currently this table handler doesn't implement most of the fields |
| 1295 | really needed. SHOW also makes use of this data |
| 1296 | */ |
| 1297 | int ha_tina::info(uint flag) |
| 1298 | { |
| 1299 | DBUG_ENTER("ha_tina::info" ); |
| 1300 | /* This is a lie, but you don't want the optimizer to see zero or 1 */ |
| 1301 | if (!records_is_known && stats.records < 2) |
| 1302 | stats.records= 2; |
| 1303 | DBUG_RETURN(0); |
| 1304 | } |
| 1305 | |
| 1306 | /* |
| 1307 | Grab bag of flags that are sent to the able handler every so often. |
| 1308 | HA_EXTRA_RESET and HA_EXTRA_RESET_STATE are the most frequently called. |
| 1309 | You are not required to implement any of these. |
| 1310 | */ |
| 1311 | int ha_tina::(enum ha_extra_function operation) |
| 1312 | { |
| 1313 | DBUG_ENTER("ha_tina::extra" ); |
| 1314 | if (operation == HA_EXTRA_MARK_AS_LOG_TABLE) |
| 1315 | { |
| 1316 | mysql_mutex_lock(&share->mutex); |
| 1317 | share->is_log_table= TRUE; |
| 1318 | mysql_mutex_unlock(&share->mutex); |
| 1319 | } |
| 1320 | DBUG_RETURN(0); |
| 1321 | } |
| 1322 | |
| 1323 | |
| 1324 | /* |
| 1325 | Set end_pos to the last valid byte of continuous area, closest |
| 1326 | to the given "hole", stored in the buffer. "Valid" here means, |
| 1327 | not listed in the chain of deleted records ("holes"). |
| 1328 | */ |
| 1329 | bool ha_tina::get_write_pos(my_off_t *end_pos, tina_set *closest_hole) |
| 1330 | { |
| 1331 | if (closest_hole == chain_ptr) /* no more chains */ |
| 1332 | *end_pos= file_buff->end(); |
| 1333 | else |
| 1334 | *end_pos= MY_MIN(file_buff->end(), closest_hole->begin); |
| 1335 | return (closest_hole != chain_ptr) && (*end_pos == closest_hole->begin); |
| 1336 | } |
| 1337 | |
| 1338 | |
| 1339 | /* |
| 1340 | Called after each table scan. In particular after deletes, |
| 1341 | and updates. In the last case we employ chain of deleted |
| 1342 | slots to clean up all of the dead space we have collected while |
| 1343 | performing deletes/updates. |
| 1344 | */ |
| 1345 | int ha_tina::rnd_end() |
| 1346 | { |
| 1347 | char updated_fname[FN_REFLEN]; |
| 1348 | my_off_t file_buffer_start= 0; |
| 1349 | DBUG_ENTER("ha_tina::rnd_end" ); |
| 1350 | |
| 1351 | records_is_known= found_end_of_file; |
| 1352 | |
| 1353 | if ((chain_ptr - chain) > 0) |
| 1354 | { |
| 1355 | tina_set *ptr= chain; |
| 1356 | |
| 1357 | /* |
| 1358 | Re-read the beginning of a file (as the buffer should point to the |
| 1359 | end of file after the scan). |
| 1360 | */ |
| 1361 | file_buff->init_buff(data_file); |
| 1362 | |
| 1363 | /* |
| 1364 | The sort is needed when there were updates/deletes with random orders. |
| 1365 | It sorts so that we move the firts blocks to the beginning. |
| 1366 | */ |
| 1367 | my_qsort(chain, (size_t)(chain_ptr - chain), sizeof(tina_set), |
| 1368 | (qsort_cmp)sort_set); |
| 1369 | |
| 1370 | my_off_t write_begin= 0, write_end; |
| 1371 | |
| 1372 | /* create the file to write updated table if it wasn't yet created */ |
| 1373 | if (open_update_temp_file_if_needed()) |
| 1374 | DBUG_RETURN(-1); |
| 1375 | |
| 1376 | /* write the file with updated info */ |
| 1377 | while ((file_buffer_start != (my_off_t)-1)) // while not end of file |
| 1378 | { |
| 1379 | bool in_hole= get_write_pos(&write_end, ptr); |
| 1380 | my_off_t write_length= write_end - write_begin; |
| 1381 | |
| 1382 | /* if there is something to write, write it */ |
| 1383 | if (write_length) |
| 1384 | { |
| 1385 | if (mysql_file_write(update_temp_file, |
| 1386 | (uchar*) (file_buff->ptr() + |
| 1387 | (write_begin - file_buff->start())), |
| 1388 | (size_t)write_length, MYF_RW)) |
| 1389 | goto error; |
| 1390 | temp_file_length+= write_length; |
| 1391 | } |
| 1392 | if (in_hole) |
| 1393 | { |
| 1394 | /* skip hole */ |
| 1395 | while (file_buff->end() <= ptr->end && |
| 1396 | file_buffer_start != (my_off_t)-1) |
| 1397 | file_buffer_start= file_buff->read_next(); |
| 1398 | write_begin= ptr->end; |
| 1399 | ptr++; |
| 1400 | } |
| 1401 | else |
| 1402 | write_begin= write_end; |
| 1403 | |
| 1404 | if (write_end == file_buff->end()) |
| 1405 | file_buffer_start= file_buff->read_next(); /* shift the buffer */ |
| 1406 | |
| 1407 | } |
| 1408 | |
| 1409 | if (mysql_file_sync(update_temp_file, MYF(MY_WME)) || |
| 1410 | mysql_file_close(update_temp_file, MYF(0))) |
| 1411 | DBUG_RETURN(-1); |
| 1412 | |
| 1413 | share->update_file_opened= FALSE; |
| 1414 | |
| 1415 | if (share->tina_write_opened) |
| 1416 | { |
| 1417 | if (mysql_file_close(share->tina_write_filedes, MYF(0))) |
| 1418 | DBUG_RETURN(-1); |
| 1419 | /* |
| 1420 | Mark that the writer fd is closed, so that init_tina_writer() |
| 1421 | will reopen it later. |
| 1422 | */ |
| 1423 | share->tina_write_opened= FALSE; |
| 1424 | } |
| 1425 | |
| 1426 | /* |
| 1427 | Close opened fildes's. Then move updated file in place |
| 1428 | of the old datafile. |
| 1429 | */ |
| 1430 | if (mysql_file_close(data_file, MYF(0)) || |
| 1431 | mysql_file_rename(csv_key_file_data, |
| 1432 | fn_format(updated_fname, share->table_name, |
| 1433 | "" , CSN_EXT, |
| 1434 | MY_REPLACE_EXT | MY_UNPACK_FILENAME), |
| 1435 | share->data_file_name, MYF(0))) |
| 1436 | DBUG_RETURN(-1); |
| 1437 | |
| 1438 | /* Open the file again */ |
| 1439 | if ((data_file= mysql_file_open(csv_key_file_data, |
| 1440 | share->data_file_name, |
| 1441 | O_RDONLY, MYF(MY_WME))) == -1) |
| 1442 | DBUG_RETURN(my_errno ? my_errno : -1); |
| 1443 | /* |
| 1444 | As we reopened the data file, increase share->data_file_version |
| 1445 | in order to force other threads waiting on a table lock and |
| 1446 | have already opened the table to reopen the data file. |
| 1447 | That makes the latest changes become visible to them. |
| 1448 | Update local_data_file_version as no need to reopen it in the |
| 1449 | current thread. |
| 1450 | */ |
| 1451 | share->data_file_version++; |
| 1452 | local_data_file_version= share->data_file_version; |
| 1453 | /* |
| 1454 | The datafile is consistent at this point and the write filedes is |
| 1455 | closed, so nothing worrying will happen to it in case of a crash. |
| 1456 | Here we record this fact to the meta-file. |
| 1457 | */ |
| 1458 | (void)write_meta_file(share->meta_file, share->rows_recorded, FALSE); |
| 1459 | /* |
| 1460 | Update local_saved_data_file_length with the real length of the |
| 1461 | data file. |
| 1462 | */ |
| 1463 | local_saved_data_file_length= temp_file_length; |
| 1464 | } |
| 1465 | |
| 1466 | DBUG_RETURN(0); |
| 1467 | error: |
| 1468 | mysql_file_close(update_temp_file, MYF(0)); |
| 1469 | share->update_file_opened= FALSE; |
| 1470 | DBUG_RETURN(-1); |
| 1471 | } |
| 1472 | |
| 1473 | |
| 1474 | /* |
| 1475 | Repair CSV table in the case, it is crashed. |
| 1476 | |
| 1477 | SYNOPSIS |
| 1478 | repair() |
| 1479 | thd The thread, performing repair |
| 1480 | check_opt The options for repair. We do not use it currently. |
| 1481 | |
| 1482 | DESCRIPTION |
| 1483 | If the file is empty, change # of rows in the file and complete recovery. |
| 1484 | Otherwise, scan the table looking for bad rows. If none were found, |
| 1485 | we mark file as a good one and return. If a bad row was encountered, |
| 1486 | we truncate the datafile up to the last good row. |
| 1487 | |
| 1488 | TODO: Make repair more clever - it should try to recover subsequent |
| 1489 | rows (after the first bad one) as well. |
| 1490 | */ |
| 1491 | |
| 1492 | int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt) |
| 1493 | { |
| 1494 | char repaired_fname[FN_REFLEN]; |
| 1495 | uchar *buf; |
| 1496 | File repair_file; |
| 1497 | int rc; |
| 1498 | ha_rows rows_repaired= 0; |
| 1499 | my_off_t write_begin= 0, write_end; |
| 1500 | DBUG_ENTER("ha_tina::repair" ); |
| 1501 | |
| 1502 | /* empty file */ |
| 1503 | if (!share->saved_data_file_length) |
| 1504 | { |
| 1505 | share->rows_recorded= 0; |
| 1506 | goto end; |
| 1507 | } |
| 1508 | |
| 1509 | /* Don't assert in field::val() functions */ |
| 1510 | table->use_all_columns(); |
| 1511 | |
| 1512 | /* position buffer to the start of the file */ |
| 1513 | if (init_data_file()) |
| 1514 | DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR); |
| 1515 | |
| 1516 | if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) |
| 1517 | DBUG_RETURN(HA_ERR_OUT_OF_MEM); |
| 1518 | /* |
| 1519 | Local_saved_data_file_length is initialized during the lock phase. |
| 1520 | Sometimes this is not getting executed before ::repair (e.g. for |
| 1521 | the log tables). We set it manually here. |
| 1522 | */ |
| 1523 | local_saved_data_file_length= share->saved_data_file_length; |
| 1524 | /* set current position to the beginning of the file */ |
| 1525 | current_position= next_position= 0; |
| 1526 | |
| 1527 | /* Read the file row-by-row. If everything is ok, repair is not needed. */ |
| 1528 | while (!(rc= find_current_row(buf))) |
| 1529 | { |
| 1530 | thd_inc_error_row(thd); |
| 1531 | rows_repaired++; |
| 1532 | current_position= next_position; |
| 1533 | } |
| 1534 | |
| 1535 | free_root(&blobroot, MYF(0)); |
| 1536 | |
| 1537 | my_free(buf); |
| 1538 | |
| 1539 | if (rc == HA_ERR_END_OF_FILE) |
| 1540 | { |
| 1541 | /* |
| 1542 | All rows were read ok until end of file, the file does not need repair. |
| 1543 | If rows_recorded != rows_repaired, we should update rows_recorded value |
| 1544 | to the current amount of rows. |
| 1545 | */ |
| 1546 | share->rows_recorded= rows_repaired; |
| 1547 | goto end; |
| 1548 | } |
| 1549 | |
| 1550 | /* |
| 1551 | Otherwise we've encountered a bad row => repair is needed. |
| 1552 | Let us create a temporary file. |
| 1553 | */ |
| 1554 | if ((repair_file= mysql_file_create(csv_key_file_update, |
| 1555 | fn_format(repaired_fname, |
| 1556 | share->table_name, |
| 1557 | "" , CSN_EXT, |
| 1558 | MY_REPLACE_EXT|MY_UNPACK_FILENAME), |
| 1559 | 0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0) |
| 1560 | DBUG_RETURN(HA_ERR_CRASHED_ON_REPAIR); |
| 1561 | |
| 1562 | file_buff->init_buff(data_file); |
| 1563 | |
| 1564 | |
| 1565 | /* we just truncated the file up to the first bad row. update rows count. */ |
| 1566 | share->rows_recorded= rows_repaired; |
| 1567 | |
| 1568 | /* write repaired file */ |
| 1569 | while (1) |
| 1570 | { |
| 1571 | write_end= MY_MIN(file_buff->end(), current_position); |
| 1572 | if ((write_end - write_begin) && |
| 1573 | (mysql_file_write(repair_file, (uchar*)file_buff->ptr(), |
| 1574 | (size_t) (write_end - write_begin), MYF_RW))) |
| 1575 | DBUG_RETURN(-1); |
| 1576 | |
| 1577 | write_begin= write_end; |
| 1578 | if (write_end== current_position) |
| 1579 | break; |
| 1580 | else |
| 1581 | file_buff->read_next(); /* shift the buffer */ |
| 1582 | } |
| 1583 | |
| 1584 | /* |
| 1585 | Close the files and rename repaired file to the datafile. |
| 1586 | We have to close the files, as on Windows one cannot rename |
| 1587 | a file, which descriptor is still open. EACCES will be returned |
| 1588 | when trying to delete the "to"-file in mysql_file_rename(). |
| 1589 | */ |
| 1590 | if (share->tina_write_opened) |
| 1591 | { |
| 1592 | /* |
| 1593 | Data file might be opened twice, on table opening stage and |
| 1594 | during write_row execution. We need to close both instances |
| 1595 | to satisfy Win. |
| 1596 | */ |
| 1597 | if (mysql_file_close(share->tina_write_filedes, MYF(0))) |
| 1598 | DBUG_RETURN(my_errno ? my_errno : -1); |
| 1599 | share->tina_write_opened= FALSE; |
| 1600 | } |
| 1601 | mysql_file_close(data_file, MYF(0)); |
| 1602 | mysql_file_close(repair_file, MYF(0)); |
| 1603 | if (mysql_file_rename(csv_key_file_data, |
| 1604 | repaired_fname, share->data_file_name, MYF(0))) |
| 1605 | DBUG_RETURN(-1); |
| 1606 | |
| 1607 | /* Open the file again, it should now be repaired */ |
| 1608 | if ((data_file= mysql_file_open(csv_key_file_data, |
| 1609 | share->data_file_name, O_RDWR|O_APPEND, |
| 1610 | MYF(MY_WME))) == -1) |
| 1611 | DBUG_RETURN(my_errno ? my_errno : -1); |
| 1612 | |
| 1613 | /* Set new file size. The file size will be updated by ::update_status() */ |
| 1614 | local_saved_data_file_length= (size_t) current_position; |
| 1615 | |
| 1616 | end: |
| 1617 | share->crashed= FALSE; |
| 1618 | DBUG_RETURN(HA_ADMIN_OK); |
| 1619 | } |
| 1620 | |
| 1621 | /* |
| 1622 | DELETE without WHERE calls this |
| 1623 | */ |
| 1624 | |
| 1625 | int ha_tina::delete_all_rows() |
| 1626 | { |
| 1627 | int rc; |
| 1628 | DBUG_ENTER("ha_tina::delete_all_rows" ); |
| 1629 | |
| 1630 | if (!records_is_known) |
| 1631 | DBUG_RETURN(my_errno=HA_ERR_WRONG_COMMAND); |
| 1632 | |
| 1633 | if (!share->tina_write_opened) |
| 1634 | if (init_tina_writer()) |
| 1635 | DBUG_RETURN(-1); |
| 1636 | |
| 1637 | /* Truncate the file to zero size */ |
| 1638 | rc= mysql_file_chsize(share->tina_write_filedes, 0, 0, MYF(MY_WME)); |
| 1639 | |
| 1640 | stats.records=0; |
| 1641 | /* Update shared info */ |
| 1642 | mysql_mutex_lock(&share->mutex); |
| 1643 | share->rows_recorded= 0; |
| 1644 | mysql_mutex_unlock(&share->mutex); |
| 1645 | local_saved_data_file_length= 0; |
| 1646 | DBUG_RETURN(rc); |
| 1647 | } |
| 1648 | |
| 1649 | int ha_tina::external_lock(THD *thd __attribute__((unused)), int lock_type) |
| 1650 | { |
| 1651 | if (lock_type==F_UNLCK && curr_lock_type == F_WRLCK) |
| 1652 | update_status(); |
| 1653 | curr_lock_type= lock_type; |
| 1654 | return 0; |
| 1655 | } |
| 1656 | |
| 1657 | /* |
| 1658 | Called by the database to lock the table. Keep in mind that this |
| 1659 | is an internal lock. |
| 1660 | */ |
| 1661 | THR_LOCK_DATA **ha_tina::store_lock(THD *thd, |
| 1662 | THR_LOCK_DATA **to, |
| 1663 | enum thr_lock_type lock_type) |
| 1664 | { |
| 1665 | if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) |
| 1666 | lock.type=lock_type; |
| 1667 | *to++= &lock; |
| 1668 | return to; |
| 1669 | } |
| 1670 | |
| 1671 | /* |
| 1672 | Create a table. You do not want to leave the table open after a call to |
| 1673 | this (the database will call ::open() if it needs to). |
| 1674 | */ |
| 1675 | |
| 1676 | int ha_tina::create(const char *name, TABLE *table_arg, |
| 1677 | HA_CREATE_INFO *create_info) |
| 1678 | { |
| 1679 | char name_buff[FN_REFLEN]; |
| 1680 | File create_file; |
| 1681 | DBUG_ENTER("ha_tina::create" ); |
| 1682 | |
| 1683 | /* |
| 1684 | check columns |
| 1685 | */ |
| 1686 | for (Field **field= table_arg->s->field; *field; field++) |
| 1687 | { |
| 1688 | if ((*field)->real_maybe_null()) |
| 1689 | { |
| 1690 | my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "nullable columns" ); |
| 1691 | DBUG_RETURN(HA_ERR_UNSUPPORTED); |
| 1692 | } |
| 1693 | } |
| 1694 | |
| 1695 | |
| 1696 | if ((create_file= mysql_file_create(csv_key_file_metadata, |
| 1697 | fn_format(name_buff, name, "" , CSM_EXT, |
| 1698 | MY_REPLACE_EXT|MY_UNPACK_FILENAME), |
| 1699 | 0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0) |
| 1700 | DBUG_RETURN(-1); |
| 1701 | |
| 1702 | write_meta_file(create_file, 0, FALSE); |
| 1703 | mysql_file_close(create_file, MYF(0)); |
| 1704 | |
| 1705 | if ((create_file= mysql_file_create(csv_key_file_data, |
| 1706 | fn_format(name_buff, name, "" , CSV_EXT, |
| 1707 | MY_REPLACE_EXT|MY_UNPACK_FILENAME), |
| 1708 | 0, O_RDWR | O_TRUNC, MYF(MY_WME))) < 0) |
| 1709 | DBUG_RETURN(-1); |
| 1710 | |
| 1711 | mysql_file_close(create_file, MYF(0)); |
| 1712 | |
| 1713 | DBUG_RETURN(0); |
| 1714 | } |
| 1715 | |
| 1716 | int ha_tina::check(THD* thd, HA_CHECK_OPT* check_opt) |
| 1717 | { |
| 1718 | int rc= 0; |
| 1719 | uchar *buf; |
| 1720 | const char *old_proc_info; |
| 1721 | ha_rows count= share->rows_recorded; |
| 1722 | DBUG_ENTER("ha_tina::check" ); |
| 1723 | |
| 1724 | old_proc_info= thd_proc_info(thd, "Checking table" ); |
| 1725 | |
| 1726 | /* position buffer to the start of the file */ |
| 1727 | if (init_data_file()) |
| 1728 | DBUG_RETURN(HA_ERR_CRASHED); |
| 1729 | |
| 1730 | if (!(buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME)))) |
| 1731 | DBUG_RETURN(HA_ERR_OUT_OF_MEM); |
| 1732 | |
| 1733 | /* |
| 1734 | Local_saved_data_file_length is initialized during the lock phase. |
| 1735 | Check does not use store_lock in certain cases. So, we set it |
| 1736 | manually here. |
| 1737 | */ |
| 1738 | local_saved_data_file_length= share->saved_data_file_length; |
| 1739 | /* set current position to the beginning of the file */ |
| 1740 | current_position= next_position= 0; |
| 1741 | |
| 1742 | /* Read the file row-by-row. If everything is ok, repair is not needed. */ |
| 1743 | while (!(rc= find_current_row(buf))) |
| 1744 | { |
| 1745 | thd_inc_error_row(thd); |
| 1746 | count--; |
| 1747 | current_position= next_position; |
| 1748 | } |
| 1749 | |
| 1750 | free_root(&blobroot, MYF(0)); |
| 1751 | |
| 1752 | my_free(buf); |
| 1753 | thd_proc_info(thd, old_proc_info); |
| 1754 | |
| 1755 | if ((rc != HA_ERR_END_OF_FILE) || count) |
| 1756 | { |
| 1757 | share->crashed= TRUE; |
| 1758 | DBUG_RETURN(HA_ADMIN_CORRUPT); |
| 1759 | } |
| 1760 | |
| 1761 | DBUG_RETURN(HA_ADMIN_OK); |
| 1762 | } |
| 1763 | |
| 1764 | |
| 1765 | int ha_tina::reset(void) |
| 1766 | { |
| 1767 | free_root(&blobroot, MYF(0)); |
| 1768 | return 0; |
| 1769 | } |
| 1770 | |
| 1771 | |
| 1772 | bool ha_tina::check_if_incompatible_data(HA_CREATE_INFO *info_arg, |
| 1773 | uint table_changes) |
| 1774 | { |
| 1775 | if (info_arg->option_struct->ietf_quotes != |
| 1776 | table_share->option_struct->ietf_quotes) |
| 1777 | return COMPATIBLE_DATA_NO; |
| 1778 | |
| 1779 | return COMPATIBLE_DATA_YES; |
| 1780 | } |
| 1781 | |
| 1782 | struct st_mysql_storage_engine csv_storage_engine= |
| 1783 | { MYSQL_HANDLERTON_INTERFACE_VERSION }; |
| 1784 | |
| 1785 | maria_declare_plugin(csv) |
| 1786 | { |
| 1787 | MYSQL_STORAGE_ENGINE_PLUGIN, |
| 1788 | &csv_storage_engine, |
| 1789 | "CSV" , |
| 1790 | "Brian Aker, MySQL AB" , |
| 1791 | "CSV storage engine" , |
| 1792 | PLUGIN_LICENSE_GPL, |
| 1793 | tina_init_func, /* Plugin Init */ |
| 1794 | tina_done_func, /* Plugin Deinit */ |
| 1795 | 0x0100 /* 1.0 */, |
| 1796 | NULL, /* status variables */ |
| 1797 | NULL, /* system variables */ |
| 1798 | "1.0" , /* string version */ |
| 1799 | MariaDB_PLUGIN_MATURITY_STABLE /* maturity */ |
| 1800 | } |
| 1801 | maria_declare_plugin_end; |
| 1802 | |