1/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
2 Copyright (c) 2011, 2016, MariaDB
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; version 2 of the License.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program; if not, write to the Free Software
15 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
16
17
18/*
19 Single table and multi table updates of tables.
20 Multi-table updates were introduced by Sinisa & Monty
21*/
22
23#include "mariadb.h" /* NO_EMBEDDED_ACCESS_CHECKS */
24#include "sql_priv.h"
25#include "sql_update.h"
26#include "sql_cache.h" // query_cache_*
27#include "sql_base.h" // close_tables_for_reopen
28#include "sql_parse.h" // cleanup_items
29#include "sql_partition.h" // partition_key_modified
30#include "sql_select.h"
31#include "sql_view.h" // check_key_in_view
32#include "sp_head.h"
33#include "sql_trigger.h"
34#include "sql_statistics.h"
35#include "probes_mysql.h"
36#include "debug_sync.h"
37#include "key.h" // is_key_used
38#include "sql_acl.h" // *_ACL, check_grant
39#include "records.h" // init_read_record,
40 // end_read_record
41#include "filesort.h" // filesort
42#include "sql_derived.h" // mysql_derived_prepare,
43 // mysql_handle_derived,
44 // mysql_derived_filling
45
46
47#include "sql_insert.h" // For vers_insert_history_row() that may be
48 // needed for System Versioning.
49
50/**
51 True if the table's input and output record buffers are comparable using
52 compare_record(TABLE*).
53 */
54bool records_are_comparable(const TABLE *table) {
55 return ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
56 bitmap_is_subset(table->write_set, table->read_set);
57}
58
59
60/**
61 Compares the input and outbut record buffers of the table to see if a row
62 has changed.
63
64 @return true if row has changed.
65 @return false otherwise.
66*/
67
68bool compare_record(const TABLE *table)
69{
70 DBUG_ASSERT(records_are_comparable(table));
71
72 if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0)
73 {
74 /*
75 Storage engine may not have read all columns of the record. Fields
76 (including NULL bits) not in the write_set may not have been read and
77 can therefore not be compared.
78 */
79 for (Field **ptr= table->field ; *ptr != NULL; ptr++)
80 {
81 Field *field= *ptr;
82 if (bitmap_is_set(table->write_set, field->field_index))
83 {
84 if (field->real_maybe_null())
85 {
86 uchar null_byte_index= (uchar)(field->null_ptr - table->record[0]);
87
88 if (((table->record[0][null_byte_index]) & field->null_bit) !=
89 ((table->record[1][null_byte_index]) & field->null_bit))
90 return TRUE;
91 }
92 if (field->cmp_binary_offset(table->s->rec_buff_length))
93 return TRUE;
94 }
95 }
96 return FALSE;
97 }
98
99 /*
100 The storage engine has read all columns, so it's safe to compare all bits
101 including those not in the write_set. This is cheaper than the
102 field-by-field comparison done above.
103 */
104 if (table->s->can_cmp_whole_record)
105 return cmp_record(table,record[1]);
106 /* Compare null bits */
107 if (memcmp(table->null_flags,
108 table->null_flags+table->s->rec_buff_length,
109 table->s->null_bytes_for_compare))
110 return TRUE; // Diff in NULL value
111 /* Compare updated fields */
112 for (Field **ptr= table->field ; *ptr ; ptr++)
113 {
114 if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
115 (*ptr)->cmp_binary_offset(table->s->rec_buff_length))
116 return TRUE;
117 }
118 return FALSE;
119}
120
121
122/*
123 check that all fields are real fields
124
125 SYNOPSIS
126 check_fields()
127 thd thread handler
128 items Items for check
129
130 RETURN
131 TRUE Items can't be used in UPDATE
132 FALSE Items are OK
133*/
134
135static bool check_fields(THD *thd, List<Item> &items, bool update_view)
136{
137 Item *item;
138 if (update_view)
139 {
140 List_iterator<Item> it(items);
141 Item_field *field;
142 while ((item= it++))
143 {
144 if (!(field= item->field_for_view_update()))
145 {
146 /* item has name, because it comes from VIEW SELECT list */
147 my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name.str);
148 return TRUE;
149 }
150 /*
151 we make temporary copy of Item_field, to avoid influence of changing
152 result_field on Item_ref which refer on this field
153 */
154 thd->change_item_tree(it.ref(),
155 new (thd->mem_root) Item_field(thd, field));
156 }
157 }
158
159 if (thd->variables.sql_mode & MODE_SIMULTANEOUS_ASSIGNMENT)
160 {
161 // Make sure that a column is updated only once
162 List_iterator_fast<Item> it(items);
163 while ((item= it++))
164 {
165 item->field_for_view_update()->field->clear_has_explicit_value();
166 }
167 it.rewind();
168 while ((item= it++))
169 {
170 Field *f= item->field_for_view_update()->field;
171 if (f->has_explicit_value())
172 {
173 my_error(ER_UPDATED_COLUMN_ONLY_ONCE, MYF(0),
174 *(f->table_name), f->field_name.str);
175 return TRUE;
176 }
177 f->set_has_explicit_value();
178 }
179 }
180 return FALSE;
181}
182
183static bool check_has_vers_fields(TABLE *table, List<Item> &items)
184{
185 List_iterator<Item> it(items);
186 if (!table->versioned())
187 return false;
188
189 while (Item *item= it++)
190 {
191 if (Item_field *item_field= item->field_for_view_update())
192 {
193 Field *field= item_field->field;
194 if (field->table == table && !field->vers_update_unversioned())
195 return true;
196 }
197 }
198 return false;
199}
200
201/**
202 Re-read record if more columns are needed for error message.
203
204 If we got a duplicate key error, we want to write an error
205 message containing the value of the duplicate key. If we do not have
206 all fields of the key value in record[0], we need to re-read the
207 record with a proper read_set.
208
209 @param[in] error error number
210 @param[in] table table
211*/
212
213static void prepare_record_for_error_message(int error, TABLE *table)
214{
215 Field **field_p;
216 Field *field;
217 uint keynr;
218 MY_BITMAP unique_map; /* Fields in offended unique. */
219 my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)];
220 DBUG_ENTER("prepare_record_for_error_message");
221
222 /*
223 Only duplicate key errors print the key value.
224 If storage engine does always read all columns, we have the value alraedy.
225 */
226 if ((error != HA_ERR_FOUND_DUPP_KEY) ||
227 !(table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ))
228 DBUG_VOID_RETURN;
229
230 /*
231 Get the number of the offended index.
232 We will see MAX_KEY if the engine cannot determine the affected index.
233 */
234 if (unlikely((keynr= table->file->get_dup_key(error)) >= MAX_KEY))
235 DBUG_VOID_RETURN;
236
237 /* Create unique_map with all fields used by that index. */
238 my_bitmap_init(&unique_map, unique_map_buf, table->s->fields, FALSE);
239 table->mark_columns_used_by_index(keynr, &unique_map);
240
241 /* Subtract read_set and write_set. */
242 bitmap_subtract(&unique_map, table->read_set);
243 bitmap_subtract(&unique_map, table->write_set);
244
245 /*
246 If the unique index uses columns that are neither in read_set
247 nor in write_set, we must re-read the record.
248 Otherwise no need to do anything.
249 */
250 if (bitmap_is_clear_all(&unique_map))
251 DBUG_VOID_RETURN;
252
253 /* Get identifier of last read record into table->file->ref. */
254 table->file->position(table->record[0]);
255 /* Add all fields used by unique index to read_set. */
256 bitmap_union(table->read_set, &unique_map);
257 /* Tell the engine about the new set. */
258 table->file->column_bitmaps_signal();
259 /* Read record that is identified by table->file->ref. */
260 (void) table->file->ha_rnd_pos(table->record[1], table->file->ref);
261 /* Copy the newly read columns into the new record. */
262 for (field_p= table->field; (field= *field_p); field_p++)
263 if (bitmap_is_set(&unique_map, field->field_index))
264 field->copy_from_tmp(table->s->rec_buff_length);
265
266 DBUG_VOID_RETURN;
267}
268
269
270/*
271 Process usual UPDATE
272
273 SYNOPSIS
274 mysql_update()
275 thd thread handler
276 fields fields for update
277 values values of fields for update
278 conds WHERE clause expression
279 order_num number of elemen in ORDER BY clause
280 order ORDER BY clause list
281 limit limit clause
282 handle_duplicates how to handle duplicates
283
284 RETURN
285 0 - OK
286 2 - privilege check and openning table passed, but we need to convert to
287 multi-update because of view substitution
288 1 - error
289*/
290
291int mysql_update(THD *thd,
292 TABLE_LIST *table_list,
293 List<Item> &fields,
294 List<Item> &values,
295 COND *conds,
296 uint order_num, ORDER *order,
297 ha_rows limit,
298 enum enum_duplicates handle_duplicates, bool ignore,
299 ha_rows *found_return, ha_rows *updated_return)
300{
301 bool using_limit= limit != HA_POS_ERROR;
302 bool safe_update= thd->variables.option_bits & OPTION_SAFE_UPDATES;
303 bool used_key_is_modified= FALSE, transactional_table;
304 bool will_batch= FALSE;
305 bool can_compare_record;
306 int res;
307 int error, loc_error;
308 ha_rows dup_key_found;
309 bool need_sort= TRUE;
310 bool reverse= FALSE;
311#ifndef NO_EMBEDDED_ACCESS_CHECKS
312 uint want_privilege;
313#endif
314 uint table_count= 0;
315 ha_rows updated, found;
316 key_map old_covering_keys;
317 TABLE *table;
318 SQL_SELECT *select= NULL;
319 SORT_INFO *file_sort= 0;
320 READ_RECORD info;
321 SELECT_LEX *select_lex= &thd->lex->select_lex;
322 ulonglong id;
323 List<Item> all_fields;
324 killed_state killed_status= NOT_KILLED;
325 bool has_triggers, binlog_is_row, do_direct_update= FALSE;
326 Update_plan query_plan(thd->mem_root);
327 Explain_update *explain;
328 TABLE_LIST *update_source_table;
329 query_plan.index= MAX_KEY;
330 query_plan.using_filesort= FALSE;
331
332 // For System Versioning (may need to insert new fields to a table).
333 ha_rows updated_sys_ver= 0;
334
335 DBUG_ENTER("mysql_update");
336
337 create_explain_query(thd->lex, thd->mem_root);
338 if (open_tables(thd, &table_list, &table_count, 0))
339 DBUG_RETURN(1);
340
341 /* Prepare views so they are handled correctly */
342 if (mysql_handle_derived(thd->lex, DT_INIT))
343 DBUG_RETURN(1);
344
345 if (((update_source_table=unique_table(thd, table_list,
346 table_list->next_global, 0)) ||
347 table_list->is_multitable()))
348 {
349 DBUG_ASSERT(update_source_table || table_list->view != 0);
350 DBUG_PRINT("info", ("Switch to multi-update"));
351 /* pass counter value */
352 thd->lex->table_count= table_count;
353 /* convert to multiupdate */
354 DBUG_RETURN(2);
355 }
356 if (lock_tables(thd, table_list, table_count, 0))
357 DBUG_RETURN(1);
358
359 THD_STAGE_INFO(thd, stage_init_update);
360 if (table_list->handle_derived(thd->lex, DT_MERGE_FOR_INSERT))
361 DBUG_RETURN(1);
362 if (table_list->handle_derived(thd->lex, DT_PREPARE))
363 DBUG_RETURN(1);
364
365 table= table_list->table;
366
367 if (!table_list->single_table_updatable())
368 {
369 my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias.str, "UPDATE");
370 DBUG_RETURN(1);
371 }
372 query_plan.updating_a_view= MY_TEST(table_list->view);
373
374 /* Calculate "table->covering_keys" based on the WHERE */
375 table->covering_keys= table->s->keys_in_use;
376 table->quick_keys.clear_all();
377
378 query_plan.select_lex= &thd->lex->select_lex;
379 query_plan.table= table;
380#ifndef NO_EMBEDDED_ACCESS_CHECKS
381 /* Force privilege re-checking for views after they have been opened. */
382 want_privilege= (table_list->view ? UPDATE_ACL :
383 table_list->grant.want_privilege);
384#endif
385 if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
386 DBUG_RETURN(1);
387
388 old_covering_keys= table->covering_keys; // Keys used in WHERE
389 /* Check the fields we are going to modify */
390#ifndef NO_EMBEDDED_ACCESS_CHECKS
391 table_list->grant.want_privilege= table->grant.want_privilege= want_privilege;
392 table_list->register_want_access(want_privilege);
393#endif
394 /* 'Unfix' fields to allow correct marking by the setup_fields function. */
395 if (table_list->is_view())
396 unfix_fields(fields);
397
398 if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
399 fields, MARK_COLUMNS_WRITE, 0, 0))
400 DBUG_RETURN(1); /* purecov: inspected */
401 if (check_fields(thd, fields, table_list->view))
402 {
403 DBUG_RETURN(1);
404 }
405 bool has_vers_fields= check_has_vers_fields(table, fields);
406 if (check_key_in_view(thd, table_list))
407 {
408 my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias.str, "UPDATE");
409 DBUG_RETURN(1);
410 }
411
412 if (table->default_field)
413 table->mark_default_fields_for_write(false);
414
415#ifndef NO_EMBEDDED_ACCESS_CHECKS
416 /* Check values */
417 table_list->grant.want_privilege= table->grant.want_privilege=
418 (SELECT_ACL & ~table->grant.privilege);
419#endif
420 if (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_READ, 0, NULL, 0))
421 {
422 free_underlaid_joins(thd, select_lex);
423 DBUG_RETURN(1); /* purecov: inspected */
424 }
425
426 if (check_unique_table(thd, table_list))
427 DBUG_RETURN(TRUE);
428
429 switch_to_nullable_trigger_fields(fields, table);
430 switch_to_nullable_trigger_fields(values, table);
431
432 /* Apply the IN=>EXISTS transformation to all subqueries and optimize them */
433 if (select_lex->optimize_unflattened_subqueries(false))
434 DBUG_RETURN(TRUE);
435
436 if (select_lex->inner_refs_list.elements &&
437 fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
438 DBUG_RETURN(1);
439
440 if (conds)
441 {
442 Item::cond_result cond_value;
443 conds= conds->remove_eq_conds(thd, &cond_value, true);
444 if (cond_value == Item::COND_FALSE)
445 {
446 limit= 0; // Impossible WHERE
447 query_plan.set_impossible_where();
448 if (thd->lex->describe || thd->lex->analyze_stmt)
449 goto produce_explain_and_leave;
450 }
451 }
452
453 // Don't count on usage of 'only index' when calculating which key to use
454 table->covering_keys.clear_all();
455
456#ifdef WITH_PARTITION_STORAGE_ENGINE
457 if (prune_partitions(thd, table, conds))
458 {
459 free_underlaid_joins(thd, select_lex);
460
461 query_plan.set_no_partitions();
462 if (thd->lex->describe || thd->lex->analyze_stmt)
463 goto produce_explain_and_leave;
464
465 my_ok(thd); // No matching records
466 DBUG_RETURN(0);
467 }
468#endif
469 /* Update the table->file->stats.records number */
470 table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
471 set_statistics_for_table(thd, table);
472
473 select= make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
474 if (unlikely(error || !limit || thd->is_error() ||
475 (select && select->check_quick(thd, safe_update, limit))))
476 {
477 query_plan.set_impossible_where();
478 if (thd->lex->describe || thd->lex->analyze_stmt)
479 goto produce_explain_and_leave;
480
481 delete select;
482 free_underlaid_joins(thd, select_lex);
483 /*
484 There was an error or the error was already sent by
485 the quick select evaluation.
486 TODO: Add error code output parameter to Item::val_xxx() methods.
487 Currently they rely on the user checking DA for
488 errors when unwinding the stack after calling Item::val_xxx().
489 */
490 if (error || thd->is_error())
491 {
492 DBUG_RETURN(1); // Error in where
493 }
494 my_ok(thd); // No matching records
495 DBUG_RETURN(0);
496 }
497
498 /* If running in safe sql mode, don't allow updates without keys */
499 if (table->quick_keys.is_clear_all())
500 {
501 thd->set_status_no_index_used();
502 if (safe_update && !using_limit)
503 {
504 my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
505 ER_THD(thd, ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
506 goto err;
507 }
508 }
509 if (unlikely(init_ftfuncs(thd, select_lex, 1)))
510 goto err;
511
512 table->mark_columns_needed_for_update();
513
514 table->update_const_key_parts(conds);
515 order= simple_remove_const(order, conds);
516 query_plan.scanned_rows= select? select->records: table->file->stats.records;
517
518 if (select && select->quick && select->quick->unique_key_range())
519 {
520 /* Single row select (always "ordered"): Ok to use with key field UPDATE */
521 need_sort= FALSE;
522 query_plan.index= MAX_KEY;
523 used_key_is_modified= FALSE;
524 }
525 else
526 {
527 ha_rows scanned_limit= query_plan.scanned_rows;
528 query_plan.index= get_index_for_order(order, table, select, limit,
529 &scanned_limit, &need_sort,
530 &reverse);
531 if (!need_sort)
532 query_plan.scanned_rows= scanned_limit;
533
534 if (select && select->quick)
535 {
536 DBUG_ASSERT(need_sort || query_plan.index == select->quick->index);
537 used_key_is_modified= (!select->quick->unique_key_range() &&
538 select->quick->is_keys_used(table->write_set));
539 }
540 else
541 {
542 if (need_sort)
543 {
544 /* Assign table scan index to check below for modified key fields: */
545 query_plan.index= table->file->key_used_on_scan;
546 }
547 if (query_plan.index != MAX_KEY)
548 {
549 /* Check if we are modifying a key that we are used to search with: */
550 used_key_is_modified= is_key_used(table, query_plan.index,
551 table->write_set);
552 }
553 }
554 }
555
556 /*
557 Query optimization is finished at this point.
558 - Save the decisions in the query plan
559 - if we're running EXPLAIN UPDATE, get out
560 */
561 query_plan.select= select;
562 query_plan.possible_keys= select? select->possible_keys: key_map(0);
563
564 if (used_key_is_modified || order ||
565 partition_key_modified(table, table->write_set))
566 {
567 if (order && need_sort)
568 query_plan.using_filesort= true;
569 else
570 query_plan.using_io_buffer= true;
571 }
572
573 /*
574 Ok, we have generated a query plan for the UPDATE.
575 - if we're running EXPLAIN UPDATE, goto produce explain output
576 - otherwise, execute the query plan
577 */
578 if (thd->lex->describe)
579 goto produce_explain_and_leave;
580 if (!(explain= query_plan.save_explain_update_data(query_plan.mem_root, thd)))
581 goto err;
582
583 ANALYZE_START_TRACKING(&explain->command_tracker);
584
585 DBUG_EXECUTE_IF("show_explain_probe_update_exec_start",
586 dbug_serve_apcs(thd, 1););
587
588 has_triggers= (table->triggers &&
589 (table->triggers->has_triggers(TRG_EVENT_UPDATE,
590 TRG_ACTION_BEFORE) ||
591 table->triggers->has_triggers(TRG_EVENT_UPDATE,
592 TRG_ACTION_AFTER)));
593 DBUG_PRINT("info", ("has_triggers: %s", has_triggers ? "TRUE" : "FALSE"));
594 binlog_is_row= thd->is_current_stmt_binlog_format_row();
595 DBUG_PRINT("info", ("binlog_is_row: %s", binlog_is_row ? "TRUE" : "FALSE"));
596
597 if (!(select && select->quick))
598 status_var_increment(thd->status_var.update_scan_count);
599
600 /*
601 We can use direct update (update that is done silently in the handler)
602 if none of the following conditions are true:
603 - There are triggers
604 - There is binary logging
605 - using_io_buffer
606 - This means that the partition changed or the key we want
607 to use for scanning the table is changed
608 - ignore is set
609 - Direct updates don't return the number of ignored rows
610 - There is a virtual not stored column in the WHERE clause
611 - Changing a field used by a stored virtual column, which
612 would require the column to be recalculated.
613 - ORDER BY or LIMIT
614 - As this requires the rows to be updated in a specific order
615 - Note that Spider can handle ORDER BY and LIMIT in a cluster with
616 one data node. These conditions are therefore checked in
617 direct_update_rows_init().
618
619 Direct update does not require a WHERE clause
620
621 Later we also ensure that we are only using one table (no sub queries)
622 */
623 if ((table->file->ha_table_flags() & HA_CAN_DIRECT_UPDATE_AND_DELETE) &&
624 !has_triggers && !binlog_is_row &&
625 !query_plan.using_io_buffer && !ignore &&
626 !table->check_virtual_columns_marked_for_read() &&
627 !table->check_virtual_columns_marked_for_write())
628 {
629 DBUG_PRINT("info", ("Trying direct update"));
630 if (select && select->cond &&
631 (select->cond->used_tables() == table->map))
632 {
633 DBUG_ASSERT(!table->file->pushed_cond);
634 if (!table->file->cond_push(select->cond))
635 table->file->pushed_cond= select->cond;
636 }
637
638 if (!table->file->info_push(INFO_KIND_UPDATE_FIELDS, &fields) &&
639 !table->file->info_push(INFO_KIND_UPDATE_VALUES, &values) &&
640 !table->file->direct_update_rows_init())
641 {
642 do_direct_update= TRUE;
643
644 /* Direct update is not using_filesort and is not using_io_buffer */
645 goto update_begin;
646 }
647 }
648
649 if (query_plan.using_filesort || query_plan.using_io_buffer)
650 {
651 /*
652 We can't update table directly; We must first search after all
653 matching rows before updating the table!
654
655 note: We avoid sorting if we sort on the used index
656 */
657 if (query_plan.using_filesort)
658 {
659 /*
660 Doing an ORDER BY; Let filesort find and sort the rows we are going
661 to update
662 NOTE: filesort will call table->prepare_for_position()
663 */
664 Filesort fsort(order, limit, true, select);
665
666 Filesort_tracker *fs_tracker=
667 thd->lex->explain->get_upd_del_plan()->filesort_tracker;
668
669 if (!(file_sort= filesort(thd, table, &fsort, fs_tracker)))
670 goto err;
671 thd->inc_examined_row_count(file_sort->examined_rows);
672
673 /*
674 Filesort has already found and selected the rows we want to update,
675 so we don't need the where clause
676 */
677 delete select;
678 select= 0;
679 }
680 else
681 {
682 MY_BITMAP *save_read_set= table->read_set;
683 MY_BITMAP *save_write_set= table->write_set;
684
685 if (query_plan.index < MAX_KEY && old_covering_keys.is_set(query_plan.index))
686 table->prepare_for_keyread(query_plan.index);
687 else
688 table->use_all_columns();
689
690 /*
691 We are doing a search on a key that is updated. In this case
692 we go trough the matching rows, save a pointer to them and
693 update these in a separate loop based on the pointer.
694 */
695 explain->buf_tracker.on_scan_init();
696 IO_CACHE tempfile;
697 if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
698 DISK_BUFFER_SIZE, MYF(MY_WME)))
699 goto err;
700
701 /* If quick select is used, initialize it before retrieving rows. */
702 if (select && select->quick && select->quick->reset())
703 {
704 close_cached_file(&tempfile);
705 goto err;
706 }
707
708 table->file->try_semi_consistent_read(1);
709
710 /*
711 When we get here, we have one of the following options:
712 A. query_plan.index == MAX_KEY
713 This means we should use full table scan, and start it with
714 init_read_record call
715 B. query_plan.index != MAX_KEY
716 B.1 quick select is used, start the scan with init_read_record
717 B.2 quick select is not used, this is full index scan (with LIMIT)
718 Full index scan must be started with init_read_record_idx
719 */
720
721 if (query_plan.index == MAX_KEY || (select && select->quick))
722 error= init_read_record(&info, thd, table, select, NULL, 0, 1, FALSE);
723 else
724 error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
725 reverse);
726
727 if (unlikely(error))
728 {
729 close_cached_file(&tempfile);
730 goto err;
731 }
732
733 THD_STAGE_INFO(thd, stage_searching_rows_for_update);
734 ha_rows tmp_limit= limit;
735
736 while (likely(!(error=info.read_record())) && likely(!thd->killed))
737 {
738 explain->buf_tracker.on_record_read();
739 thd->inc_examined_row_count(1);
740 if (!select || (error= select->skip_record(thd)) > 0)
741 {
742 if (table->file->ha_was_semi_consistent_read())
743 continue; /* repeat the read of the same row if it still exists */
744
745 explain->buf_tracker.on_record_after_where();
746 table->file->position(table->record[0]);
747 if (unlikely(my_b_write(&tempfile,table->file->ref,
748 table->file->ref_length)))
749 {
750 error=1; /* purecov: inspected */
751 break; /* purecov: inspected */
752 }
753 if (!--limit && using_limit)
754 {
755 error= -1;
756 break;
757 }
758 }
759 else
760 {
761 /*
762 Don't try unlocking the row if skip_record reported an
763 error since in this case the transaction might have been
764 rolled back already.
765 */
766 if (unlikely(error < 0))
767 {
768 /* Fatal error from select->skip_record() */
769 error= 1;
770 break;
771 }
772 else
773 table->file->unlock_row();
774 }
775 }
776 if (unlikely(thd->killed) && !error)
777 error= 1; // Aborted
778 limit= tmp_limit;
779 table->file->try_semi_consistent_read(0);
780 end_read_record(&info);
781
782 /* Change select to use tempfile */
783 if (select)
784 {
785 delete select->quick;
786 if (select->free_cond)
787 delete select->cond;
788 select->quick=0;
789 select->cond=0;
790 }
791 else
792 {
793 if (!(select= new SQL_SELECT))
794 goto err;
795 select->head=table;
796 }
797
798 if (unlikely(reinit_io_cache(&tempfile,READ_CACHE,0L,0,0)))
799 error= 1; /* purecov: inspected */
800 select->file= tempfile; // Read row ptrs from this file
801 if (unlikely(error >= 0))
802 goto err;
803
804 table->file->ha_end_keyread();
805 table->column_bitmaps_set(save_read_set, save_write_set);
806 }
807 }
808
809update_begin:
810 if (ignore)
811 table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
812
813 if (select && select->quick && select->quick->reset())
814 goto err;
815 table->file->try_semi_consistent_read(1);
816 if (init_read_record(&info, thd, table, select, file_sort, 0, 1, FALSE))
817 goto err;
818
819 updated= found= 0;
820 /*
821 Generate an error (in TRADITIONAL mode) or warning
822 when trying to set a NOT NULL field to NULL.
823 */
824 thd->count_cuted_fields= CHECK_FIELD_WARN;
825 thd->cuted_fields=0L;
826
827 transactional_table= table->file->has_transactions();
828 thd->abort_on_warning= !ignore && thd->is_strict_mode();
829
830 if (do_direct_update)
831 {
832 /* Direct updating is supported */
833 DBUG_PRINT("info", ("Using direct update"));
834 table->reset_default_fields();
835 if (unlikely(!(error= table->file->ha_direct_update_rows(&updated))))
836 error= -1;
837 found= updated;
838 goto update_end;
839 }
840
841 if ((table->file->ha_table_flags() & HA_CAN_FORCE_BULK_UPDATE) &&
842 !table->prepare_triggers_for_update_stmt_or_event())
843 will_batch= !table->file->start_bulk_update();
844
845 /*
846 Assure that we can use position()
847 if we need to create an error message.
848 */
849 if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
850 table->prepare_for_position();
851
852 table->reset_default_fields();
853
854 /*
855 We can use compare_record() to optimize away updates if
856 the table handler is returning all columns OR if
857 if all updated columns are read
858 */
859 can_compare_record= records_are_comparable(table);
860 explain->tracker.on_scan_init();
861
862 THD_STAGE_INFO(thd, stage_updating);
863 while (!(error=info.read_record()) && !thd->killed)
864 {
865 if (table->versioned() && !table->vers_end_field()->is_max())
866 {
867 continue;
868 }
869
870 explain->tracker.on_record_read();
871 thd->inc_examined_row_count(1);
872 if (!select || select->skip_record(thd) > 0)
873 {
874 if (table->file->ha_was_semi_consistent_read())
875 continue; /* repeat the read of the same row if it still exists */
876
877 explain->tracker.on_record_after_where();
878 store_record(table,record[1]);
879
880 if (fill_record_n_invoke_before_triggers(thd, table, fields, values, 0,
881 TRG_EVENT_UPDATE))
882 break; /* purecov: inspected */
883
884 found++;
885
886 if (!can_compare_record || compare_record(table))
887 {
888 if (table->default_field && table->update_default_fields(1, ignore))
889 {
890 error= 1;
891 break;
892 }
893 if ((res= table_list->view_check_option(thd, ignore)) !=
894 VIEW_CHECK_OK)
895 {
896 found--;
897 if (res == VIEW_CHECK_SKIP)
898 continue;
899 else if (res == VIEW_CHECK_ERROR)
900 {
901 error= 1;
902 break;
903 }
904 }
905 if (will_batch)
906 {
907 /*
908 Typically a batched handler can execute the batched jobs when:
909 1) When specifically told to do so
910 2) When it is not a good idea to batch anymore
911 3) When it is necessary to send batch for other reasons
912 (One such reason is when READ's must be performed)
913
914 1) is covered by exec_bulk_update calls.
915 2) and 3) is handled by the bulk_update_row method.
916
917 bulk_update_row can execute the updates including the one
918 defined in the bulk_update_row or not including the row
919 in the call. This is up to the handler implementation and can
920 vary from call to call.
921
922 The dup_key_found reports the number of duplicate keys found
923 in those updates actually executed. It only reports those if
924 the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
925 If this hasn't been issued it returns an error code and can
926 ignore this number. Thus any handler that implements batching
927 for UPDATE IGNORE must also handle this extra call properly.
928
929 If a duplicate key is found on the record included in this
930 call then it should be included in the count of dup_key_found
931 and error should be set to 0 (only if these errors are ignored).
932 */
933 DBUG_PRINT("info", ("Batched update"));
934 error= table->file->ha_bulk_update_row(table->record[1],
935 table->record[0],
936 &dup_key_found);
937 limit+= dup_key_found;
938 updated-= dup_key_found;
939 }
940 else
941 {
942 /* Non-batched update */
943 error= table->file->ha_update_row(table->record[1],
944 table->record[0]);
945 }
946 if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
947 {
948 error= 0;
949 }
950 else if (likely(!error))
951 {
952 if (has_vers_fields && table->versioned())
953 {
954 if (table->versioned(VERS_TIMESTAMP))
955 {
956 store_record(table, record[2]);
957 error= vers_insert_history_row(table);
958 restore_record(table, record[2]);
959 }
960 if (likely(!error))
961 updated_sys_ver++;
962 }
963 if (likely(!error))
964 updated++;
965 }
966
967 if (unlikely(error) &&
968 (!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL)))
969 {
970 /*
971 If (ignore && error is ignorable) we don't have to
972 do anything; otherwise...
973 */
974 myf flags= 0;
975
976 if (table->file->is_fatal_error(error, HA_CHECK_ALL))
977 flags|= ME_FATALERROR; /* Other handler errors are fatal */
978
979 prepare_record_for_error_message(error, table);
980 table->file->print_error(error,MYF(flags));
981 error= 1;
982 break;
983 }
984 }
985
986 if (table->triggers &&
987 unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
988 TRG_ACTION_AFTER, TRUE)))
989 {
990 error= 1;
991 break;
992 }
993
994 if (!--limit && using_limit)
995 {
996 /*
997 We have reached end-of-file in most common situations where no
998 batching has occurred and if batching was supposed to occur but
999 no updates were made and finally when the batch execution was
1000 performed without error and without finding any duplicate keys.
1001 If the batched updates were performed with errors we need to
1002 check and if no error but duplicate key's found we need to
1003 continue since those are not counted for in limit.
1004 */
1005 if (will_batch &&
1006 ((error= table->file->exec_bulk_update(&dup_key_found)) ||
1007 dup_key_found))
1008 {
1009 if (error)
1010 {
1011 /* purecov: begin inspected */
1012 /*
1013 The handler should not report error of duplicate keys if they
1014 are ignored. This is a requirement on batching handlers.
1015 */
1016 prepare_record_for_error_message(error, table);
1017 table->file->print_error(error,MYF(0));
1018 error= 1;
1019 break;
1020 /* purecov: end */
1021 }
1022 /*
1023 Either an error was found and we are ignoring errors or there
1024 were duplicate keys found. In both cases we need to correct
1025 the counters and continue the loop.
1026 */
1027 limit= dup_key_found; //limit is 0 when we get here so need to +
1028 updated-= dup_key_found;
1029 }
1030 else
1031 {
1032 error= -1; // Simulate end of file
1033 break;
1034 }
1035 }
1036 }
1037 /*
1038 Don't try unlocking the row if skip_record reported an error since in
1039 this case the transaction might have been rolled back already.
1040 */
1041 else if (likely(!thd->is_error()))
1042 table->file->unlock_row();
1043 else
1044 {
1045 error= 1;
1046 break;
1047 }
1048 thd->get_stmt_da()->inc_current_row_for_warning();
1049 if (unlikely(thd->is_error()))
1050 {
1051 error= 1;
1052 break;
1053 }
1054 }
1055 ANALYZE_STOP_TRACKING(&explain->command_tracker);
1056 table->auto_increment_field_not_null= FALSE;
1057 dup_key_found= 0;
1058 /*
1059 Caching the killed status to pass as the arg to query event constuctor;
1060 The cached value can not change whereas the killed status can
1061 (externally) since this point and change of the latter won't affect
1062 binlogging.
1063 It's assumed that if an error was set in combination with an effective
1064 killed status then the error is due to killing.
1065 */
1066 killed_status= thd->killed; // get the status of the volatile
1067 // simulated killing after the loop must be ineffective for binlogging
1068 DBUG_EXECUTE_IF("simulate_kill_bug27571",
1069 {
1070 thd->set_killed(KILL_QUERY);
1071 };);
1072 error= (killed_status == NOT_KILLED)? error : 1;
1073
1074 if (likely(error) &&
1075 will_batch &&
1076 (loc_error= table->file->exec_bulk_update(&dup_key_found)))
1077 /*
1078 An error has occurred when a batched update was performed and returned
1079 an error indication. It cannot be an allowed duplicate key error since
1080 we require the batching handler to treat this as a normal behavior.
1081
1082 Otherwise we simply remove the number of duplicate keys records found
1083 in the batched update.
1084 */
1085 {
1086 /* purecov: begin inspected */
1087 prepare_record_for_error_message(loc_error, table);
1088 table->file->print_error(loc_error,MYF(ME_FATALERROR));
1089 error= 1;
1090 /* purecov: end */
1091 }
1092 else
1093 updated-= dup_key_found;
1094 if (will_batch)
1095 table->file->end_bulk_update();
1096
1097update_end:
1098 table->file->try_semi_consistent_read(0);
1099
1100 if (!transactional_table && updated > 0)
1101 thd->transaction.stmt.modified_non_trans_table= TRUE;
1102
1103 end_read_record(&info);
1104 delete select;
1105 select= NULL;
1106 THD_STAGE_INFO(thd, stage_end);
1107 (void) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1108
1109 /*
1110 Invalidate the table in the query cache if something changed.
1111 This must be before binlog writing and ha_autocommit_...
1112 */
1113 if (updated)
1114 {
1115 query_cache_invalidate3(thd, table_list, 1);
1116 }
1117
1118 if (thd->transaction.stmt.modified_non_trans_table)
1119 thd->transaction.all.modified_non_trans_table= TRUE;
1120 thd->transaction.all.m_unsafe_rollback_flags|=
1121 (thd->transaction.stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
1122
1123 /*
1124 error < 0 means really no error at all: we processed all rows until the
1125 last one without error. error > 0 means an error (e.g. unique key
1126 violation and no IGNORE or REPLACE). error == 0 is also an error (if
1127 preparing the record or invoking before triggers fails). See
1128 ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below.
1129 Sometimes we want to binlog even if we updated no rows, in case user used
1130 it to be sure master and slave are in same state.
1131 */
1132 if (likely(error < 0) || thd->transaction.stmt.modified_non_trans_table)
1133 {
1134 if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
1135 {
1136 int errcode= 0;
1137 if (likely(error < 0))
1138 thd->clear_error();
1139 else
1140 errcode= query_error_code(thd, killed_status == NOT_KILLED);
1141
1142 ScopedStatementReplication scoped_stmt_rpl(
1143 table->versioned(VERS_TRX_ID) ? thd : NULL);
1144
1145 if (thd->binlog_query(THD::ROW_QUERY_TYPE,
1146 thd->query(), thd->query_length(),
1147 transactional_table, FALSE, FALSE, errcode))
1148 {
1149 error=1; // Rollback update
1150 }
1151 }
1152 }
1153 DBUG_ASSERT(transactional_table || !updated || thd->transaction.stmt.modified_non_trans_table);
1154 free_underlaid_joins(thd, select_lex);
1155 delete file_sort;
1156 if (table->file->pushed_cond)
1157 {
1158 table->file->pushed_cond= 0;
1159 table->file->cond_pop();
1160 }
1161
1162 /* If LAST_INSERT_ID(X) was used, report X */
1163 id= thd->arg_of_last_insert_id_function ?
1164 thd->first_successful_insert_id_in_prev_stmt : 0;
1165
1166 if (likely(error < 0) && likely(!thd->lex->analyze_stmt))
1167 {
1168 char buff[MYSQL_ERRMSG_SIZE];
1169 if (!table->versioned(VERS_TIMESTAMP))
1170 my_snprintf(buff, sizeof(buff), ER_THD(thd, ER_UPDATE_INFO), (ulong) found,
1171 (ulong) updated,
1172 (ulong) thd->get_stmt_da()->current_statement_warn_count());
1173 else
1174 my_snprintf(buff, sizeof(buff),
1175 ER_THD(thd, ER_UPDATE_INFO_WITH_SYSTEM_VERSIONING),
1176 (ulong) found, (ulong) updated, (ulong) updated_sys_ver,
1177 (ulong) thd->get_stmt_da()->current_statement_warn_count());
1178 my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
1179 id, buff);
1180 DBUG_PRINT("info",("%ld records updated", (long) updated));
1181 }
1182 thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
1183 thd->abort_on_warning= 0;
1184 if (thd->lex->current_select->first_cond_optimization)
1185 {
1186 thd->lex->current_select->save_leaf_tables(thd);
1187 thd->lex->current_select->first_cond_optimization= 0;
1188 }
1189 *found_return= found;
1190 *updated_return= updated;
1191
1192 if (unlikely(thd->lex->analyze_stmt))
1193 goto emit_explain_and_leave;
1194
1195 DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
1196
1197err:
1198 delete select;
1199 delete file_sort;
1200 free_underlaid_joins(thd, select_lex);
1201 table->file->ha_end_keyread();
1202 if (table->file->pushed_cond)
1203 table->file->cond_pop();
1204 thd->abort_on_warning= 0;
1205 DBUG_RETURN(1);
1206
1207produce_explain_and_leave:
1208 /*
1209 We come here for various "degenerate" query plans: impossible WHERE,
1210 no-partitions-used, impossible-range, etc.
1211 */
1212 if (unlikely(!query_plan.save_explain_update_data(query_plan.mem_root, thd)))
1213 goto err;
1214
1215emit_explain_and_leave:
1216 int err2= thd->lex->explain->send_explain(thd);
1217
1218 delete select;
1219 free_underlaid_joins(thd, select_lex);
1220 DBUG_RETURN((err2 || thd->is_error()) ? 1 : 0);
1221}
1222
1223/*
1224 Prepare items in UPDATE statement
1225
1226 SYNOPSIS
1227 mysql_prepare_update()
1228 thd - thread handler
1229 table_list - global/local table list
1230 conds - conditions
1231 order_num - number of ORDER BY list entries
1232 order - ORDER BY clause list
1233
1234 RETURN VALUE
1235 FALSE OK
1236 TRUE error
1237*/
1238bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
1239 Item **conds, uint order_num, ORDER *order)
1240{
1241 Item *fake_conds= 0;
1242#ifndef NO_EMBEDDED_ACCESS_CHECKS
1243 TABLE *table= table_list->table;
1244#endif
1245 List<Item> all_fields;
1246 SELECT_LEX *select_lex= &thd->lex->select_lex;
1247 DBUG_ENTER("mysql_prepare_update");
1248
1249#ifndef NO_EMBEDDED_ACCESS_CHECKS
1250 table_list->grant.want_privilege= table->grant.want_privilege=
1251 (SELECT_ACL & ~table->grant.privilege);
1252 table_list->register_want_access(SELECT_ACL);
1253#endif
1254
1255 thd->lex->allow_sum_func= 0;
1256
1257 /*
1258 We do not call DT_MERGE_FOR_INSERT because it has no sense for simple
1259 (not multi-) update
1260 */
1261 if (mysql_handle_derived(thd->lex, DT_PREPARE))
1262 DBUG_RETURN(TRUE);
1263
1264 if (setup_tables_and_check_access(thd, &select_lex->context,
1265 &select_lex->top_join_list,
1266 table_list,
1267 select_lex->leaf_tables,
1268 FALSE, UPDATE_ACL, SELECT_ACL, TRUE) ||
1269 setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
1270 select_lex->setup_ref_array(thd, order_num) ||
1271 setup_order(thd, select_lex->ref_pointer_array,
1272 table_list, all_fields, all_fields, order) ||
1273 setup_ftfuncs(select_lex))
1274 DBUG_RETURN(TRUE);
1275
1276 select_lex->fix_prepare_information(thd, conds, &fake_conds);
1277 DBUG_RETURN(FALSE);
1278}
1279
1280/**
1281 Check that we are not using table that we are updating in a sub select
1282
1283 @param thd Thread handle
1284 @param table_list List of table with first to check
1285
1286 @retval TRUE Error
1287 @retval FALSE OK
1288*/
1289bool check_unique_table(THD *thd, TABLE_LIST *table_list)
1290{
1291 TABLE_LIST *duplicate;
1292 DBUG_ENTER("check_unique_table");
1293 if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0)))
1294 {
1295 update_non_unique_table_error(table_list, "UPDATE", duplicate);
1296 DBUG_RETURN(TRUE);
1297 }
1298 DBUG_RETURN(FALSE);
1299}
1300
1301/***************************************************************************
1302 Update multiple tables from join
1303***************************************************************************/
1304
1305/*
1306 Get table map for list of Item_field
1307*/
1308
1309static table_map get_table_map(List<Item> *items)
1310{
1311 List_iterator_fast<Item> item_it(*items);
1312 Item_field *item;
1313 table_map map= 0;
1314
1315 while ((item= (Item_field *) item_it++))
1316 map|= item->all_used_tables();
1317 DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map));
1318 return map;
1319}
1320
1321/**
1322 If one row is updated through two different aliases and the first
1323 update physically moves the row, the second update will error
1324 because the row is no longer located where expected. This function
1325 checks if the multiple-table update is about to do that and if so
1326 returns with an error.
1327
1328 The following update operations physically moves rows:
1329 1) Update of a column in a clustered primary key
1330 2) Update of a column used to calculate which partition the row belongs to
1331
1332 This function returns with an error if both of the following are
1333 true:
1334
1335 a) A table in the multiple-table update statement is updated
1336 through multiple aliases (including views)
1337 b) At least one of the updates on the table from a) may physically
1338 moves the row. Note: Updating a column used to calculate which
1339 partition a row belongs to does not necessarily mean that the
1340 row is moved. The new value may or may not belong to the same
1341 partition.
1342
1343 @param leaves First leaf table
1344 @param tables_for_update Map of tables that are updated
1345
1346 @return
1347 true if the update is unsafe, in which case an error message is also set,
1348 false otherwise.
1349*/
1350static
1351bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update)
1352{
1353 List_iterator_fast<TABLE_LIST> it(leaves), it2(leaves);
1354 TABLE_LIST *tl, *tl2;
1355
1356 while ((tl= it++))
1357 {
1358 if (!tl->is_jtbm() && (tl->table->map & tables_for_update))
1359 {
1360 TABLE *table1= tl->table;
1361 bool primkey_clustered= (table1->file->primary_key_is_clustered() &&
1362 table1->s->primary_key != MAX_KEY);
1363
1364 bool table_partitioned= false;
1365#ifdef WITH_PARTITION_STORAGE_ENGINE
1366 table_partitioned= (table1->part_info != NULL);
1367#endif
1368
1369 if (!table_partitioned && !primkey_clustered)
1370 continue;
1371
1372 it2.rewind();
1373 while ((tl2= it2++))
1374 {
1375 if (tl2->is_jtbm())
1376 continue;
1377 /*
1378 Look at "next" tables only since all previous tables have
1379 already been checked
1380 */
1381 TABLE *table2= tl2->table;
1382 if (tl2 != tl &&
1383 table2->map & tables_for_update && table1->s == table2->s)
1384 {
1385 // A table is updated through two aliases
1386 if (table_partitioned &&
1387 (partition_key_modified(table1, table1->write_set) ||
1388 partition_key_modified(table2, table2->write_set)))
1389 {
1390 // Partitioned key is updated
1391 my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1392 tl->top_table()->alias.str,
1393 tl2->top_table()->alias.str);
1394 return true;
1395 }
1396
1397 if (primkey_clustered)
1398 {
1399 // The primary key can cover multiple columns
1400 KEY key_info= table1->key_info[table1->s->primary_key];
1401 KEY_PART_INFO *key_part= key_info.key_part;
1402 KEY_PART_INFO *key_part_end= key_part + key_info.user_defined_key_parts;
1403
1404 for (;key_part != key_part_end; ++key_part)
1405 {
1406 if (bitmap_is_set(table1->write_set, key_part->fieldnr-1) ||
1407 bitmap_is_set(table2->write_set, key_part->fieldnr-1))
1408 {
1409 // Clustered primary key is updated
1410 my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1411 tl->top_table()->alias.str,
1412 tl2->top_table()->alias.str);
1413 return true;
1414 }
1415 }
1416 }
1417 }
1418 }
1419 }
1420 }
1421 return false;
1422}
1423
1424/**
1425 Check if there is enough privilege on specific table used by the
1426 main select list of multi-update directly or indirectly (through
1427 a view).
1428
1429 @param[in] thd Thread context.
1430 @param[in] table Table list element for the table.
1431 @param[in] tables_for_update Bitmap with tables being updated.
1432 @param[in/out] updated_arg Set to true if table in question is
1433 updated, also set to true if it is
1434 a view and one of its underlying
1435 tables is updated. Should be
1436 initialized to false by the caller
1437 before a sequence of calls to this
1438 function.
1439
1440 @note To determine which tables/views are updated we have to go from
1441 leaves to root since tables_for_update contains map of leaf
1442 tables being updated and doesn't include non-leaf tables
1443 (fields are already resolved to leaf tables).
1444
1445 @retval false - Success, all necessary privileges on all tables are
1446 present or might be present on column-level.
1447 @retval true - Failure, some necessary privilege on some table is
1448 missing.
1449*/
1450
1451static bool multi_update_check_table_access(THD *thd, TABLE_LIST *table,
1452 table_map tables_for_update,
1453 bool *updated_arg)
1454{
1455 if (table->view)
1456 {
1457 bool updated= false;
1458 /*
1459 If it is a mergeable view then we need to check privileges on its
1460 underlying tables being merged (including views). We also need to
1461 check if any of them is updated in order to find if this view is
1462 updated.
1463 If it is a non-mergeable view then it can't be updated.
1464 */
1465 DBUG_ASSERT(table->merge_underlying_list ||
1466 (!table->updatable &&
1467 !(table->table->map & tables_for_update)));
1468
1469 for (TABLE_LIST *tbl= table->merge_underlying_list; tbl;
1470 tbl= tbl->next_local)
1471 {
1472 if (multi_update_check_table_access(thd, tbl, tables_for_update,
1473 &updated))
1474 {
1475 tbl->hide_view_error(thd);
1476 return true;
1477 }
1478 }
1479 if (check_table_access(thd, updated ? UPDATE_ACL: SELECT_ACL, table,
1480 FALSE, 1, FALSE))
1481 return true;
1482 *updated_arg|= updated;
1483 /* We only need SELECT privilege for columns in the values list. */
1484 table->grant.want_privilege= SELECT_ACL & ~table->grant.privilege;
1485 }
1486 else
1487 {
1488 /* Must be a base or derived table. */
1489 const bool updated= table->table->map & tables_for_update;
1490 if (check_table_access(thd, updated ? UPDATE_ACL : SELECT_ACL, table,
1491 FALSE, 1, FALSE))
1492 return true;
1493 *updated_arg|= updated;
1494 /* We only need SELECT privilege for columns in the values list. */
1495 if (!table->derived)
1496 {
1497 table->grant.want_privilege= SELECT_ACL & ~table->grant.privilege;
1498 table->table->grant.want_privilege= (SELECT_ACL &
1499 ~table->table->grant.privilege);
1500 }
1501 }
1502 return false;
1503}
1504
1505
1506/*
1507 make update specific preparation and checks after opening tables
1508
1509 SYNOPSIS
1510 mysql_multi_update_prepare()
1511 thd thread handler
1512
1513 RETURN
1514 FALSE OK
1515 TRUE Error
1516*/
1517
1518int mysql_multi_update_prepare(THD *thd)
1519{
1520 LEX *lex= thd->lex;
1521 TABLE_LIST *table_list= lex->query_tables;
1522 TABLE_LIST *tl;
1523 List<Item> *fields= &lex->select_lex.item_list;
1524 table_map tables_for_update;
1525 bool update_view= 0;
1526 /*
1527 if this multi-update was converted from usual update, here is table
1528 counter else junk will be assigned here, but then replaced with real
1529 count in open_tables()
1530 */
1531 uint table_count= lex->table_count;
1532 const bool using_lock_tables= thd->locked_tables_mode != LTM_NONE;
1533 bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI);
1534 DBUG_ENTER("mysql_multi_update_prepare");
1535
1536 /* following need for prepared statements, to run next time multi-update */
1537 thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
1538
1539 /*
1540 Open tables and create derived ones, but do not lock and fill them yet.
1541
1542 During prepare phase acquire only S metadata locks instead of SW locks to
1543 keep prepare of multi-UPDATE compatible with concurrent LOCK TABLES WRITE
1544 and global read lock.
1545 */
1546 if ((original_multiupdate &&
1547 open_tables(thd, &table_list, &table_count,
1548 (thd->stmt_arena->is_stmt_prepare() ?
1549 MYSQL_OPEN_FORCE_SHARED_MDL : 0))) ||
1550 mysql_handle_derived(lex, DT_INIT))
1551 DBUG_RETURN(TRUE);
1552 /*
1553 setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
1554 second time, but this call will do nothing (there are check for second
1555 call in setup_tables()).
1556 */
1557
1558 //We need to merge for insert prior to prepare.
1559 if (mysql_handle_derived(lex, DT_MERGE_FOR_INSERT))
1560 DBUG_RETURN(TRUE);
1561
1562 if (mysql_handle_derived(lex, DT_PREPARE))
1563 DBUG_RETURN(TRUE);
1564
1565 if (setup_tables_and_check_access(thd, &lex->select_lex.context,
1566 &lex->select_lex.top_join_list,
1567 table_list,
1568 lex->select_lex.leaf_tables, FALSE,
1569 UPDATE_ACL, SELECT_ACL, FALSE))
1570 DBUG_RETURN(TRUE);
1571
1572 if (lex->select_lex.handle_derived(thd->lex, DT_MERGE))
1573 DBUG_RETURN(TRUE);
1574
1575 if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
1576 *fields, MARK_COLUMNS_WRITE, 0, 0))
1577 DBUG_RETURN(TRUE);
1578
1579 for (tl= table_list; tl ; tl= tl->next_local)
1580 {
1581 if (tl->view)
1582 {
1583 update_view= 1;
1584 break;
1585 }
1586 }
1587
1588 if (check_fields(thd, *fields, update_view))
1589 {
1590 DBUG_RETURN(TRUE);
1591 }
1592
1593 thd->table_map_for_update= tables_for_update= get_table_map(fields);
1594
1595 if (unsafe_key_update(lex->select_lex.leaf_tables, tables_for_update))
1596 DBUG_RETURN(true);
1597
1598 /*
1599 Setup timestamp handling and locking mode
1600 */
1601 List_iterator<TABLE_LIST> ti(lex->select_lex.leaf_tables);
1602 while ((tl= ti++))
1603 {
1604 TABLE *table= tl->table;
1605
1606 if (tl->is_jtbm())
1607 continue;
1608
1609 /* if table will be updated then check that it is unique */
1610 if (table->map & tables_for_update)
1611 {
1612 if (!tl->single_table_updatable() || check_key_in_view(thd, tl))
1613 {
1614 my_error(ER_NON_UPDATABLE_TABLE, MYF(0),
1615 tl->top_table()->alias.str, "UPDATE");
1616 DBUG_RETURN(TRUE);
1617 }
1618
1619 DBUG_PRINT("info",("setting table `%s` for update",
1620 tl->top_table()->alias.str));
1621 /*
1622 If table will be updated we should not downgrade lock for it and
1623 leave it as is.
1624 */
1625 }
1626 else
1627 {
1628 DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias.str));
1629 /*
1630 If we are using the binary log, we need TL_READ_NO_INSERT to get
1631 correct order of statements. Otherwise, we use a TL_READ lock to
1632 improve performance.
1633 We don't downgrade metadata lock from SW to SR in this case as
1634 there is no guarantee that the same ticket is not used by
1635 another table instance used by this statement which is going to
1636 be write-locked (for example, trigger to be invoked might try
1637 to update this table).
1638 Last argument routine_modifies_data for read_lock_type_for_table()
1639 is ignored, as prelocking placeholder will never be set here.
1640 */
1641 DBUG_ASSERT(tl->prelocking_placeholder == false);
1642 thr_lock_type lock_type= read_lock_type_for_table(thd, lex, tl, true);
1643 if (using_lock_tables)
1644 tl->lock_type= lock_type;
1645 else
1646 tl->set_lock_type(thd, lock_type);
1647 tl->updating= 0;
1648 }
1649 }
1650
1651 /*
1652 Check access privileges for tables being updated or read.
1653 Note that unlike in the above loop we need to iterate here not only
1654 through all leaf tables but also through all view hierarchy.
1655 */
1656 for (tl= table_list; tl; tl= tl->next_local)
1657 {
1658 bool not_used= false;
1659 if (tl->is_jtbm())
1660 continue;
1661 if (multi_update_check_table_access(thd, tl, tables_for_update, &not_used))
1662 DBUG_RETURN(TRUE);
1663 }
1664
1665 /* check single table update for view compound from several tables */
1666 for (tl= table_list; tl; tl= tl->next_local)
1667 {
1668 if (tl->is_jtbm())
1669 continue;
1670 if (tl->is_merged_derived())
1671 {
1672 TABLE_LIST *for_update= 0;
1673 if (tl->check_single_table(&for_update, tables_for_update, tl))
1674 {
1675 my_error(ER_VIEW_MULTIUPDATE, MYF(0),
1676 tl->view_db.str, tl->view_name.str);
1677 DBUG_RETURN(-1);
1678 }
1679 }
1680 }
1681
1682 /* now lock and fill tables */
1683 if (!thd->stmt_arena->is_stmt_prepare() &&
1684 lock_tables(thd, table_list, table_count, 0))
1685 {
1686 DBUG_RETURN(TRUE);
1687 }
1688 /* @todo: downgrade the metadata locks here. */
1689
1690 /*
1691 Check that we are not using table that we are updating, but we should
1692 skip all tables of UPDATE SELECT itself
1693 */
1694 lex->select_lex.exclude_from_table_unique_test= TRUE;
1695 /* We only need SELECT privilege for columns in the values list */
1696 ti.rewind();
1697 while ((tl= ti++))
1698 {
1699 if (tl->is_jtbm())
1700 continue;
1701 TABLE *table= tl->table;
1702 TABLE_LIST *tlist;
1703 if (!(tlist= tl->top_table())->derived)
1704 {
1705 tlist->grant.want_privilege=
1706 (SELECT_ACL & ~tlist->grant.privilege);
1707 table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege);
1708 }
1709 DBUG_PRINT("info", ("table: %s want_privilege: %u", tl->alias.str,
1710 (uint) table->grant.want_privilege));
1711 }
1712 /*
1713 Set exclude_from_table_unique_test value back to FALSE. It is needed for
1714 further check in multi_update::prepare whether to use record cache.
1715 */
1716 lex->select_lex.exclude_from_table_unique_test= FALSE;
1717
1718 if (lex->save_prep_leaf_tables())
1719 DBUG_RETURN(TRUE);
1720
1721 DBUG_RETURN (FALSE);
1722}
1723
1724
1725/*
1726 Setup multi-update handling and call SELECT to do the join
1727*/
1728
1729bool mysql_multi_update(THD *thd,
1730 TABLE_LIST *table_list,
1731 List<Item> *fields,
1732 List<Item> *values,
1733 COND *conds,
1734 ulonglong options,
1735 enum enum_duplicates handle_duplicates,
1736 bool ignore,
1737 SELECT_LEX_UNIT *unit,
1738 SELECT_LEX *select_lex,
1739 multi_update **result)
1740{
1741 bool res;
1742 DBUG_ENTER("mysql_multi_update");
1743
1744 if (!(*result= new (thd->mem_root) multi_update(thd, table_list,
1745 &thd->lex->select_lex.leaf_tables,
1746 fields, values,
1747 handle_duplicates, ignore)))
1748 {
1749 DBUG_RETURN(TRUE);
1750 }
1751
1752 thd->abort_on_warning= !ignore && thd->is_strict_mode();
1753 List<Item> total_list;
1754
1755 res= mysql_select(thd,
1756 table_list, select_lex->with_wild, total_list, conds,
1757 select_lex->order_list.elements, select_lex->order_list.first,
1758 (ORDER *)NULL, (Item *) NULL, (ORDER *)NULL,
1759 options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
1760 OPTION_SETUP_TABLES_DONE,
1761 *result, unit, select_lex);
1762
1763 DBUG_PRINT("info",("res: %d report_error: %d", res, (int) thd->is_error()));
1764 res|= thd->is_error();
1765 if (unlikely(res))
1766 (*result)->abort_result_set();
1767 else
1768 {
1769 if (thd->lex->describe || thd->lex->analyze_stmt)
1770 res= thd->lex->explain->send_explain(thd);
1771 }
1772 thd->abort_on_warning= 0;
1773 DBUG_RETURN(res);
1774}
1775
1776
1777multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list,
1778 List<TABLE_LIST> *leaves_list,
1779 List<Item> *field_list, List<Item> *value_list,
1780 enum enum_duplicates handle_duplicates_arg,
1781 bool ignore_arg):
1782 select_result_interceptor(thd_arg),
1783 all_tables(table_list), leaves(leaves_list), update_tables(0),
1784 tmp_tables(0), updated(0), found(0), fields(field_list),
1785 values(value_list), table_count(0), copy_field(0),
1786 handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
1787 transactional_tables(0), ignore(ignore_arg), error_handled(0), prepared(0),
1788 updated_sys_ver(0)
1789{
1790}
1791
1792
1793/*
1794 Connect fields with tables and create list of tables that are updated
1795*/
1796
1797int multi_update::prepare(List<Item> &not_used_values,
1798 SELECT_LEX_UNIT *lex_unit)
1799
1800{
1801 TABLE_LIST *table_ref;
1802 SQL_I_List<TABLE_LIST> update;
1803 table_map tables_to_update;
1804 Item_field *item;
1805 List_iterator_fast<Item> field_it(*fields);
1806 List_iterator_fast<Item> value_it(*values);
1807 uint i, max_fields;
1808 uint leaf_table_count= 0;
1809 List_iterator<TABLE_LIST> ti(*leaves);
1810 DBUG_ENTER("multi_update::prepare");
1811
1812 if (prepared)
1813 DBUG_RETURN(0);
1814 prepared= true;
1815
1816 thd->count_cuted_fields= CHECK_FIELD_WARN;
1817 thd->cuted_fields=0L;
1818 THD_STAGE_INFO(thd, stage_updating_main_table);
1819
1820 tables_to_update= get_table_map(fields);
1821
1822 if (!tables_to_update)
1823 {
1824 my_message(ER_NO_TABLES_USED, ER_THD(thd, ER_NO_TABLES_USED), MYF(0));
1825 DBUG_RETURN(1);
1826 }
1827
1828 /*
1829 We gather the set of columns read during evaluation of SET expression in
1830 TABLE::tmp_set by pointing TABLE::read_set to it and then restore it after
1831 setup_fields().
1832 */
1833 while ((table_ref= ti++))
1834 {
1835 if (table_ref->is_jtbm())
1836 continue;
1837
1838 TABLE *table= table_ref->table;
1839 if (tables_to_update & table->map)
1840 {
1841 DBUG_ASSERT(table->read_set == &table->def_read_set);
1842 table->read_set= &table->tmp_set;
1843 bitmap_clear_all(table->read_set);
1844 }
1845 }
1846
1847 /*
1848 We have to check values after setup_tables to get covering_keys right in
1849 reference tables
1850 */
1851
1852 int error= setup_fields(thd, Ref_ptr_array(),
1853 *values, MARK_COLUMNS_READ, 0, NULL, 0);
1854
1855 ti.rewind();
1856 while ((table_ref= ti++))
1857 {
1858 if (table_ref->is_jtbm())
1859 continue;
1860
1861 TABLE *table= table_ref->table;
1862 if (tables_to_update & table->map)
1863 {
1864 table->read_set= &table->def_read_set;
1865 bitmap_union(table->read_set, &table->tmp_set);
1866 }
1867 }
1868 if (unlikely(error))
1869 DBUG_RETURN(1);
1870
1871 /*
1872 Save tables beeing updated in update_tables
1873 update_table->shared is position for table
1874 Don't use key read on tables that are updated
1875 */
1876
1877 update.empty();
1878 ti.rewind();
1879 while ((table_ref= ti++))
1880 {
1881 /* TODO: add support of view of join support */
1882 if (table_ref->is_jtbm())
1883 continue;
1884 TABLE *table=table_ref->table;
1885 leaf_table_count++;
1886 if (tables_to_update & table->map)
1887 {
1888 TABLE_LIST *tl= (TABLE_LIST*) thd->memdup(table_ref,
1889 sizeof(*tl));
1890 if (!tl)
1891 DBUG_RETURN(1);
1892 update.link_in_list(tl, &tl->next_local);
1893 tl->shared= table_count++;
1894 table->no_keyread=1;
1895 table->covering_keys.clear_all();
1896 table->pos_in_table_list= tl;
1897 table->prepare_triggers_for_update_stmt_or_event();
1898 table->reset_default_fields();
1899 }
1900 }
1901
1902 table_count= update.elements;
1903 update_tables= update.first;
1904
1905 tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count);
1906 tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
1907 table_count);
1908 fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1909 table_count);
1910 values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1911 table_count);
1912 if (unlikely(thd->is_fatal_error))
1913 DBUG_RETURN(1);
1914 for (i=0 ; i < table_count ; i++)
1915 {
1916 fields_for_table[i]= new List_item;
1917 values_for_table[i]= new List_item;
1918 }
1919 if (unlikely(thd->is_fatal_error))
1920 DBUG_RETURN(1);
1921
1922 /* Split fields into fields_for_table[] and values_by_table[] */
1923
1924 while ((item= (Item_field *) field_it++))
1925 {
1926 Item *value= value_it++;
1927 uint offset= item->field->table->pos_in_table_list->shared;
1928 fields_for_table[offset]->push_back(item, thd->mem_root);
1929 values_for_table[offset]->push_back(value, thd->mem_root);
1930 }
1931 if (unlikely(thd->is_fatal_error))
1932 DBUG_RETURN(1);
1933
1934 /* Allocate copy fields */
1935 max_fields=0;
1936 for (i=0 ; i < table_count ; i++)
1937 {
1938 set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1939 if (fields_for_table[i]->elements)
1940 {
1941 TABLE *table= ((Item_field*)(fields_for_table[i]->head()))->field->table;
1942 switch_to_nullable_trigger_fields(*fields_for_table[i], table);
1943 switch_to_nullable_trigger_fields(*values_for_table[i], table);
1944 }
1945 }
1946 copy_field= new (thd->mem_root) Copy_field[max_fields];
1947 DBUG_RETURN(thd->is_fatal_error != 0);
1948}
1949
1950void multi_update::update_used_tables()
1951{
1952 Item *item;
1953 List_iterator_fast<Item> it(*values);
1954 while ((item= it++))
1955 {
1956 item->update_used_tables();
1957 }
1958}
1959
1960void multi_update::prepare_to_read_rows()
1961{
1962 /*
1963 update column maps now. it cannot be done in ::prepare() before the
1964 optimizer, because the optimize might reset them (in
1965 SELECT_LEX::update_used_tables()), it cannot be done in
1966 ::initialize_tables() after the optimizer, because the optimizer
1967 might read rows from const tables
1968 */
1969
1970 for (TABLE_LIST *tl= update_tables; tl; tl= tl->next_local)
1971 tl->table->mark_columns_needed_for_update();
1972}
1973
1974
1975/*
1976 Check if table is safe to update on fly
1977
1978 SYNOPSIS
1979 safe_update_on_fly()
1980 thd Thread handler
1981 join_tab How table is used in join
1982 all_tables List of tables
1983
1984 NOTES
1985 We can update the first table in join on the fly if we know that
1986 a row in this table will never be read twice. This is true under
1987 the following conditions:
1988
1989 - No column is both written to and read in SET expressions.
1990
1991 - We are doing a table scan and the data is in a separate file (MyISAM) or
1992 if we don't update a clustered key.
1993
1994 - We are doing a range scan and we don't update the scan key or
1995 the primary key for a clustered table handler.
1996
1997 - Table is not joined to itself.
1998
1999 This function gets information about fields to be updated from
2000 the TABLE::write_set bitmap.
2001
2002 WARNING
2003 This code is a bit dependent of how make_join_readinfo() works.
2004
2005 The field table->tmp_set is used for keeping track of which fields are
2006 read during evaluation of the SET expression. See multi_update::prepare.
2007
2008 RETURN
2009 0 Not safe to update
2010 1 Safe to update
2011*/
2012
2013static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
2014 TABLE_LIST *table_ref, TABLE_LIST *all_tables)
2015{
2016 TABLE *table= join_tab->table;
2017 if (unique_table(thd, table_ref, all_tables, 0))
2018 return 0;
2019 if (join_tab->join->order) // FIXME this is probably too strong
2020 return 0;
2021 switch (join_tab->type) {
2022 case JT_SYSTEM:
2023 case JT_CONST:
2024 case JT_EQ_REF:
2025 return TRUE; // At most one matching row
2026 case JT_REF:
2027 case JT_REF_OR_NULL:
2028 return !is_key_used(table, join_tab->ref.key, table->write_set);
2029 case JT_ALL:
2030 if (bitmap_is_overlapping(&table->tmp_set, table->write_set))
2031 return FALSE;
2032 /* If range search on index */
2033 if (join_tab->quick)
2034 return !join_tab->quick->is_keys_used(table->write_set);
2035 /* If scanning in clustered key */
2036 if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
2037 table->s->primary_key < MAX_KEY)
2038 return !is_key_used(table, table->s->primary_key, table->write_set);
2039 return TRUE;
2040 default:
2041 break; // Avoid compiler warning
2042 }
2043 return FALSE;
2044
2045}
2046
2047
2048/*
2049 Initialize table for multi table
2050
2051 IMPLEMENTATION
2052 - Update first table in join on the fly, if possible
2053 - Create temporary tables to store changed values for all other tables
2054 that are updated (and main_table if the above doesn't hold).
2055*/
2056
2057bool
2058multi_update::initialize_tables(JOIN *join)
2059{
2060 TABLE_LIST *table_ref;
2061 DBUG_ENTER("initialize_tables");
2062
2063 if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
2064 error_if_full_join(join)))
2065 DBUG_RETURN(1);
2066 main_table=join->join_tab->table;
2067 table_to_update= 0;
2068
2069 /* Any update has at least one pair (field, value) */
2070 DBUG_ASSERT(fields->elements);
2071 /*
2072 Only one table may be modified by UPDATE of an updatable view.
2073 For an updatable view first_table_for_update indicates this
2074 table.
2075 For a regular multi-update it refers to some updated table.
2076 */
2077 TABLE *first_table_for_update= ((Item_field *) fields->head())->field->table;
2078
2079 /* Create a temporary table for keys to all tables, except main table */
2080 for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
2081 {
2082 TABLE *table=table_ref->table;
2083 uint cnt= table_ref->shared;
2084 List<Item> temp_fields;
2085 ORDER group;
2086 TMP_TABLE_PARAM *tmp_param;
2087
2088 if (ignore)
2089 table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
2090 if (table == main_table) // First table in join
2091 {
2092 if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
2093 {
2094 table_to_update= table; // Update table on the fly
2095 has_vers_fields= check_has_vers_fields(table, *fields);
2096 continue;
2097 }
2098 }
2099 table->prepare_for_position();
2100 join->map2table[table->tablenr]->keep_current_rowid= true;
2101
2102 /*
2103 enable uncacheable flag if we update a view with check option
2104 and check option has a subselect, otherwise, the check option
2105 can be evaluated after the subselect was freed as independent
2106 (See full_local in JOIN::join_free()).
2107 */
2108 if (table_ref->check_option && !join->select_lex->uncacheable)
2109 {
2110 SELECT_LEX_UNIT *tmp_unit;
2111 SELECT_LEX *sl;
2112 for (tmp_unit= join->select_lex->first_inner_unit();
2113 tmp_unit;
2114 tmp_unit= tmp_unit->next_unit())
2115 {
2116 for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
2117 {
2118 if (sl->master_unit()->item)
2119 {
2120 join->select_lex->uncacheable|= UNCACHEABLE_CHECKOPTION;
2121 goto loop_end;
2122 }
2123 }
2124 }
2125 }
2126loop_end:
2127
2128 if (table == first_table_for_update && table_ref->check_option)
2129 {
2130 table_map unupdated_tables= table_ref->check_option->used_tables() &
2131 ~first_table_for_update->map;
2132 List_iterator<TABLE_LIST> ti(*leaves);
2133 TABLE_LIST *tbl_ref;
2134 while ((tbl_ref= ti++) && unupdated_tables)
2135 {
2136 if (unupdated_tables & tbl_ref->table->map)
2137 unupdated_tables&= ~tbl_ref->table->map;
2138 else
2139 continue;
2140 if (unupdated_check_opt_tables.push_back(tbl_ref->table))
2141 DBUG_RETURN(1);
2142 }
2143 }
2144
2145 tmp_param= tmp_table_param+cnt;
2146
2147 /*
2148 Create a temporary table to store all fields that are changed for this
2149 table. The first field in the temporary table is a pointer to the
2150 original row so that we can find and update it. For the updatable
2151 VIEW a few following fields are rowids of tables used in the CHECK
2152 OPTION condition.
2153 */
2154
2155 List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
2156 TABLE *tbl= table;
2157 do
2158 {
2159 LEX_CSTRING field_name;
2160 field_name.str= tbl->alias.c_ptr();
2161 field_name.length= strlen(field_name.str);
2162 /*
2163 Signal each table (including tables referenced by WITH CHECK OPTION
2164 clause) for which we will store row position in the temporary table
2165 that we need a position to be read first.
2166 */
2167 tbl->prepare_for_position();
2168 join->map2table[tbl->tablenr]->keep_current_rowid= true;
2169
2170 Item_temptable_rowid *item=
2171 new (thd->mem_root) Item_temptable_rowid(tbl);
2172 if (!item)
2173 DBUG_RETURN(1);
2174 item->fix_fields(thd, 0);
2175 if (temp_fields.push_back(item, thd->mem_root))
2176 DBUG_RETURN(1);
2177 } while ((tbl= tbl_it++));
2178
2179 temp_fields.append(fields_for_table[cnt]);
2180
2181 /* Make an unique key over the first field to avoid duplicated updates */
2182 bzero((char*) &group, sizeof(group));
2183 group.direction= ORDER::ORDER_ASC;
2184 group.item= (Item**) temp_fields.head_ref();
2185
2186 tmp_param->quick_group= 1;
2187 tmp_param->field_count= temp_fields.elements;
2188 tmp_param->func_count= temp_fields.elements - 1;
2189 calc_group_buffer(tmp_param, &group);
2190 /* small table, ignore SQL_BIG_TABLES */
2191 my_bool save_big_tables= thd->variables.big_tables;
2192 thd->variables.big_tables= FALSE;
2193 tmp_tables[cnt]=create_tmp_table(thd, tmp_param, temp_fields,
2194 (ORDER*) &group, 0, 0,
2195 TMP_TABLE_ALL_COLUMNS, HA_POS_ERROR, &empty_clex_str);
2196 thd->variables.big_tables= save_big_tables;
2197 if (!tmp_tables[cnt])
2198 DBUG_RETURN(1);
2199 tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
2200 }
2201 join->tmp_table_keep_current_rowid= TRUE;
2202 DBUG_RETURN(0);
2203}
2204
2205
2206static TABLE *item_rowid_table(Item *item)
2207{
2208 if (item->type() != Item::FUNC_ITEM)
2209 return NULL;
2210 Item_func *func= (Item_func *)item;
2211 if (func->functype() != Item_func::TEMPTABLE_ROWID)
2212 return NULL;
2213 Item_temptable_rowid *itr= (Item_temptable_rowid *)func;
2214 return itr->table;
2215}
2216
2217
2218/*
2219 multi_update stores a rowid and new field values for every updated row in a
2220 temporary table (one temporary table per updated table). These rowids are
2221 obtained via Item_temptable_rowid's by calling handler::position(). But if
2222 the join is resolved via a temp table, rowids cannot be obtained from
2223 handler::position() in the multi_update::send_data(). So, they're stored in
2224 the join's temp table (JOIN::add_fields_for_current_rowid()) and here we
2225 replace Item_temptable_rowid's (that would've done handler::position()) with
2226 Item_field's (that will simply take the corresponding field value from the
2227 temp table).
2228*/
2229int multi_update::prepare2(JOIN *join)
2230{
2231 if (!join->need_tmp || !join->tmp_table_keep_current_rowid)
2232 return 0;
2233
2234 // there cannot be many tmp tables in multi-update
2235 JOIN_TAB *tmptab= join->join_tab + join->exec_join_tab_cnt();
2236
2237 for (Item **it= tmptab->tmp_table_param->items_to_copy; *it ; it++)
2238 {
2239 TABLE *tbl= item_rowid_table(*it);
2240 if (!tbl)
2241 continue;
2242 for (uint i= 0; i < table_count; i++)
2243 {
2244 for (Item **it2= tmp_table_param[i].items_to_copy; *it2; it2++)
2245 {
2246 if (item_rowid_table(*it2) != tbl)
2247 continue;
2248 Item *fld= new (thd->mem_root)
2249 Item_field(thd, (*it)->get_tmp_table_field());
2250 if (!fld)
2251 return 1;
2252 fld->set_result_field((*it2)->get_tmp_table_field());
2253 *it2= fld;
2254 }
2255 }
2256 }
2257 return 0;
2258}
2259
2260
2261multi_update::~multi_update()
2262{
2263 TABLE_LIST *table;
2264 for (table= update_tables ; table; table= table->next_local)
2265 {
2266 table->table->no_keyread= table->table->no_cache= 0;
2267 if (ignore)
2268 table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
2269 }
2270
2271 if (tmp_tables)
2272 {
2273 for (uint cnt = 0; cnt < table_count; cnt++)
2274 {
2275 if (tmp_tables[cnt])
2276 {
2277 free_tmp_table(thd, tmp_tables[cnt]);
2278 tmp_table_param[cnt].cleanup();
2279 }
2280 }
2281 }
2282 if (copy_field)
2283 delete [] copy_field;
2284 thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
2285 DBUG_ASSERT(trans_safe || !updated ||
2286 thd->transaction.all.modified_non_trans_table);
2287}
2288
2289
2290int multi_update::send_data(List<Item> &not_used_values)
2291{
2292 TABLE_LIST *cur_table;
2293 DBUG_ENTER("multi_update::send_data");
2294
2295 for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2296 {
2297 TABLE *table= cur_table->table;
2298 uint offset= cur_table->shared;
2299 /*
2300 Check if we are using outer join and we didn't find the row
2301 or if we have already updated this row in the previous call to this
2302 function.
2303
2304 The same row may be presented here several times in a join of type
2305 UPDATE t1 FROM t1,t2 SET t1.a=t2.a
2306
2307 In this case we will do the update for the first found row combination.
2308 The join algorithm guarantees that we will not find the a row in
2309 t1 several times.
2310 */
2311 if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
2312 continue;
2313
2314 if (table->versioned() && !table->vers_end_field()->is_max())
2315 {
2316 continue;
2317 }
2318
2319 if (table == table_to_update)
2320 {
2321 /*
2322 We can use compare_record() to optimize away updates if
2323 the table handler is returning all columns OR if
2324 if all updated columns are read
2325 */
2326 bool can_compare_record;
2327 can_compare_record= records_are_comparable(table);
2328
2329 table->status|= STATUS_UPDATED;
2330 store_record(table,record[1]);
2331
2332 if (fill_record_n_invoke_before_triggers(thd, table,
2333 *fields_for_table[offset],
2334 *values_for_table[offset], 0,
2335 TRG_EVENT_UPDATE))
2336 DBUG_RETURN(1);
2337 /*
2338 Reset the table->auto_increment_field_not_null as it is valid for
2339 only one row.
2340 */
2341 table->auto_increment_field_not_null= FALSE;
2342 found++;
2343 if (!can_compare_record || compare_record(table))
2344 {
2345 int error;
2346
2347 if (table->default_field &&
2348 unlikely(table->update_default_fields(1, ignore)))
2349 DBUG_RETURN(1);
2350
2351 if ((error= cur_table->view_check_option(thd, ignore)) !=
2352 VIEW_CHECK_OK)
2353 {
2354 found--;
2355 if (error == VIEW_CHECK_SKIP)
2356 continue;
2357 else if (unlikely(error == VIEW_CHECK_ERROR))
2358 DBUG_RETURN(1);
2359 }
2360 if (unlikely(!updated++))
2361 {
2362 /*
2363 Inform the main table that we are going to update the table even
2364 while we may be scanning it. This will flush the read cache
2365 if it's used.
2366 */
2367 main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
2368 }
2369 if (unlikely((error=table->file->ha_update_row(table->record[1],
2370 table->record[0]))) &&
2371 error != HA_ERR_RECORD_IS_THE_SAME)
2372 {
2373 updated--;
2374 if (!ignore ||
2375 table->file->is_fatal_error(error, HA_CHECK_ALL))
2376 {
2377 /*
2378 If (ignore && error == is ignorable) we don't have to
2379 do anything; otherwise...
2380 */
2381 myf flags= 0;
2382
2383 if (table->file->is_fatal_error(error, HA_CHECK_ALL))
2384 flags|= ME_FATALERROR; /* Other handler errors are fatal */
2385
2386 prepare_record_for_error_message(error, table);
2387 table->file->print_error(error,MYF(flags));
2388 DBUG_RETURN(1);
2389 }
2390 }
2391 else
2392 {
2393 if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
2394 {
2395 error= 0;
2396 updated--;
2397 }
2398 else if (has_vers_fields && table->versioned())
2399 {
2400 if (table->versioned(VERS_TIMESTAMP))
2401 {
2402 store_record(table, record[2]);
2403 if (vers_insert_history_row(table))
2404 {
2405 restore_record(table, record[2]);
2406 error= 1;
2407 break;
2408 }
2409 restore_record(table, record[2]);
2410 }
2411 updated_sys_ver++;
2412 }
2413 /* non-transactional or transactional table got modified */
2414 /* either multi_update class' flag is raised in its branch */
2415 if (table->file->has_transactions())
2416 transactional_tables= TRUE;
2417 else
2418 {
2419 trans_safe= FALSE;
2420 thd->transaction.stmt.modified_non_trans_table= TRUE;
2421 }
2422 }
2423 }
2424 if (table->triggers &&
2425 unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2426 TRG_ACTION_AFTER, TRUE)))
2427 DBUG_RETURN(1);
2428 }
2429 else
2430 {
2431 int error;
2432 TABLE *tmp_table= tmp_tables[offset];
2433 if (copy_funcs(tmp_table_param[offset].items_to_copy, thd))
2434 DBUG_RETURN(1);
2435 /* Store regular updated fields in the row. */
2436 DBUG_ASSERT(1 + unupdated_check_opt_tables.elements ==
2437 tmp_table_param[offset].func_count);
2438 fill_record(thd, tmp_table,
2439 tmp_table->field + 1 + unupdated_check_opt_tables.elements,
2440 *values_for_table[offset], TRUE, FALSE);
2441
2442 /* Write row, ignoring duplicated updates to a row */
2443 error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
2444 found++;
2445 if (unlikely(error))
2446 {
2447 found--;
2448 if (error != HA_ERR_FOUND_DUPP_KEY &&
2449 error != HA_ERR_FOUND_DUPP_UNIQUE)
2450 {
2451 if (create_internal_tmp_table_from_heap(thd, tmp_table,
2452 tmp_table_param[offset].start_recinfo,
2453 &tmp_table_param[offset].recinfo,
2454 error, 1, NULL))
2455 {
2456 do_update= 0;
2457 DBUG_RETURN(1); // Not a table_is_full error
2458 }
2459 found++;
2460 }
2461 }
2462 }
2463 }
2464 DBUG_RETURN(0);
2465}
2466
2467
2468void multi_update::abort_result_set()
2469{
2470 /* the error was handled or nothing deleted and no side effects return */
2471 if (unlikely(error_handled ||
2472 (!thd->transaction.stmt.modified_non_trans_table && !updated)))
2473 return;
2474
2475 /* Something already updated so we have to invalidate cache */
2476 if (updated)
2477 query_cache_invalidate3(thd, update_tables, 1);
2478 /*
2479 If all tables that has been updated are trans safe then just do rollback.
2480 If not attempt to do remaining updates.
2481 */
2482
2483 if (! trans_safe)
2484 {
2485 DBUG_ASSERT(thd->transaction.stmt.modified_non_trans_table);
2486 if (do_update && table_count > 1)
2487 {
2488 /* Add warning here */
2489 (void) do_updates();
2490 }
2491 }
2492 if (thd->transaction.stmt.modified_non_trans_table)
2493 {
2494 /*
2495 The query has to binlog because there's a modified non-transactional table
2496 either from the query's list or via a stored routine: bug#13270,23333
2497 */
2498 if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
2499 {
2500 /*
2501 THD::killed status might not have been set ON at time of an error
2502 got caught and if happens later the killed error is written
2503 into repl event.
2504 */
2505 int errcode= query_error_code(thd, thd->killed == NOT_KILLED);
2506 /* the error of binary logging is ignored */
2507 (void)thd->binlog_query(THD::ROW_QUERY_TYPE,
2508 thd->query(), thd->query_length(),
2509 transactional_tables, FALSE, FALSE, errcode);
2510 }
2511 thd->transaction.all.modified_non_trans_table= TRUE;
2512 }
2513 thd->transaction.all.m_unsafe_rollback_flags|=
2514 (thd->transaction.stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
2515 DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.modified_non_trans_table);
2516}
2517
2518
2519int multi_update::do_updates()
2520{
2521 TABLE_LIST *cur_table;
2522 int local_error= 0;
2523 ha_rows org_updated;
2524 TABLE *table, *tmp_table, *err_table;
2525 List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
2526 DBUG_ENTER("multi_update::do_updates");
2527
2528 do_update= 0; // Don't retry this function
2529 if (!found)
2530 DBUG_RETURN(0);
2531
2532 /*
2533 Update read_set to include all fields that virtual columns may depend on.
2534 Usually they're already in the read_set, but if the previous access
2535 method was keyread, only the virtual column itself will be in read_set,
2536 not its dependencies
2537 */
2538 while(TABLE *tbl= check_opt_it++)
2539 {
2540 if (tbl->vcol_set)
2541 {
2542 bitmap_clear_all(tbl->vcol_set);
2543 for (Field **vf= tbl->vfield; *vf; vf++)
2544 {
2545 if (bitmap_is_set(tbl->read_set, (*vf)->field_index))
2546 tbl->mark_virtual_col(*vf);
2547 }
2548 }
2549 }
2550
2551 for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2552 {
2553 bool can_compare_record;
2554 uint offset= cur_table->shared;
2555
2556 table = cur_table->table;
2557 if (table == table_to_update)
2558 continue; // Already updated
2559 org_updated= updated;
2560 tmp_table= tmp_tables[cur_table->shared];
2561 tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
2562 if (unlikely((local_error= table->file->ha_rnd_init(0))))
2563 {
2564 err_table= table;
2565 goto err;
2566 }
2567 table->file->extra(HA_EXTRA_NO_CACHE);
2568 /*
2569 We have to clear the base record, if we have virtual indexed
2570 blob fields, as some storage engines will access the blob fields
2571 to calculate the keys to see if they have changed. Without
2572 clearing the blob pointers will contain random values which can
2573 cause a crash.
2574 This is a workaround for engines that access columns not present in
2575 either read or write set.
2576 */
2577 if (table->vfield)
2578 empty_record(table);
2579
2580 has_vers_fields= check_has_vers_fields(table, *fields);
2581
2582 check_opt_it.rewind();
2583 while(TABLE *tbl= check_opt_it++)
2584 {
2585 if (unlikely((local_error= tbl->file->ha_rnd_init(1))))
2586 {
2587 err_table= tbl;
2588 goto err;
2589 }
2590 tbl->file->extra(HA_EXTRA_CACHE);
2591 }
2592
2593 /*
2594 Setup copy functions to copy fields from temporary table
2595 */
2596 List_iterator_fast<Item> field_it(*fields_for_table[offset]);
2597 Field **field;
2598 Copy_field *copy_field_ptr= copy_field, *copy_field_end;
2599
2600 /* Skip row pointers */
2601 field= tmp_table->field + 1 + unupdated_check_opt_tables.elements;
2602 for ( ; *field ; field++)
2603 {
2604 Item_field *item= (Item_field* ) field_it++;
2605 (copy_field_ptr++)->set(item->field, *field, 0);
2606 }
2607 copy_field_end=copy_field_ptr;
2608
2609 if (unlikely((local_error= tmp_table->file->ha_rnd_init(1))))
2610 {
2611 err_table= tmp_table;
2612 goto err;
2613 }
2614
2615 can_compare_record= records_are_comparable(table);
2616
2617 for (;;)
2618 {
2619 if (thd->killed && trans_safe)
2620 {
2621 thd->fatal_error();
2622 goto err2;
2623 }
2624 if (unlikely((local_error=
2625 tmp_table->file->ha_rnd_next(tmp_table->record[0]))))
2626 {
2627 if (local_error == HA_ERR_END_OF_FILE)
2628 break;
2629 err_table= tmp_table;
2630 goto err;
2631 }
2632
2633 /* call rnd_pos() using rowids from temporary table */
2634 check_opt_it.rewind();
2635 TABLE *tbl= table;
2636 uint field_num= 0;
2637 do
2638 {
2639 if (unlikely((local_error=
2640 tbl->file->ha_rnd_pos(tbl->record[0],
2641 (uchar *) tmp_table->
2642 field[field_num]->ptr))))
2643 {
2644 err_table= tbl;
2645 goto err;
2646 }
2647 field_num++;
2648 } while ((tbl= check_opt_it++));
2649
2650 if (table->vfield &&
2651 unlikely(table->update_virtual_fields(table->file,
2652 VCOL_UPDATE_INDEXED_FOR_UPDATE)))
2653 goto err2;
2654
2655 table->status|= STATUS_UPDATED;
2656 store_record(table,record[1]);
2657
2658 /* Copy data from temporary table to current table */
2659 for (copy_field_ptr=copy_field;
2660 copy_field_ptr != copy_field_end;
2661 copy_field_ptr++)
2662 {
2663 (*copy_field_ptr->do_copy)(copy_field_ptr);
2664 copy_field_ptr->to_field->set_has_explicit_value();
2665 }
2666
2667 if (table->triggers &&
2668 table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2669 TRG_ACTION_BEFORE, TRUE))
2670 goto err2;
2671
2672 if (!can_compare_record || compare_record(table))
2673 {
2674 int error;
2675 if (table->default_field &&
2676 (error= table->update_default_fields(1, ignore)))
2677 goto err2;
2678 if (table->vfield &&
2679 table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE))
2680 goto err2;
2681 if ((error= cur_table->view_check_option(thd, ignore)) !=
2682 VIEW_CHECK_OK)
2683 {
2684 if (error == VIEW_CHECK_SKIP)
2685 continue;
2686 else if (unlikely(error == VIEW_CHECK_ERROR))
2687 {
2688 thd->fatal_error();
2689 goto err2;
2690 }
2691 }
2692 if (has_vers_fields && table->versioned())
2693 table->vers_update_fields();
2694
2695 if (unlikely((local_error=
2696 table->file->ha_update_row(table->record[1],
2697 table->record[0]))) &&
2698 local_error != HA_ERR_RECORD_IS_THE_SAME)
2699 {
2700 if (!ignore ||
2701 table->file->is_fatal_error(local_error, HA_CHECK_ALL))
2702 {
2703 err_table= table;
2704 goto err;
2705 }
2706 }
2707 if (local_error != HA_ERR_RECORD_IS_THE_SAME)
2708 {
2709 updated++;
2710
2711 if (has_vers_fields && table->versioned())
2712 {
2713 if (table->versioned(VERS_TIMESTAMP))
2714 {
2715 store_record(table, record[2]);
2716 if ((local_error= vers_insert_history_row(table)))
2717 {
2718 restore_record(table, record[2]);
2719 err_table = table;
2720 goto err;
2721 }
2722 restore_record(table, record[2]);
2723 }
2724 updated_sys_ver++;
2725 }
2726 }
2727 else
2728 local_error= 0;
2729 }
2730
2731 if (table->triggers &&
2732 unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2733 TRG_ACTION_AFTER, TRUE)))
2734 goto err2;
2735 }
2736
2737 if (updated != org_updated)
2738 {
2739 if (table->file->has_transactions())
2740 transactional_tables= TRUE;
2741 else
2742 {
2743 trans_safe= FALSE; // Can't do safe rollback
2744 thd->transaction.stmt.modified_non_trans_table= TRUE;
2745 }
2746 }
2747 (void) table->file->ha_rnd_end();
2748 (void) tmp_table->file->ha_rnd_end();
2749 check_opt_it.rewind();
2750 while (TABLE *tbl= check_opt_it++)
2751 tbl->file->ha_rnd_end();
2752
2753 }
2754 DBUG_RETURN(0);
2755
2756err:
2757 {
2758 prepare_record_for_error_message(local_error, err_table);
2759 err_table->file->print_error(local_error,MYF(ME_FATALERROR));
2760 }
2761
2762err2:
2763 if (table->file->inited)
2764 (void) table->file->ha_rnd_end();
2765 if (tmp_table->file->inited)
2766 (void) tmp_table->file->ha_rnd_end();
2767 check_opt_it.rewind();
2768 while (TABLE *tbl= check_opt_it++)
2769 {
2770 if (tbl->file->inited)
2771 (void) tbl->file->ha_rnd_end();
2772 }
2773
2774 if (updated != org_updated)
2775 {
2776 if (table->file->has_transactions())
2777 transactional_tables= TRUE;
2778 else
2779 {
2780 trans_safe= FALSE;
2781 thd->transaction.stmt.modified_non_trans_table= TRUE;
2782 }
2783 }
2784 DBUG_RETURN(1);
2785}
2786
2787
2788/* out: 1 if error, 0 if success */
2789
2790bool multi_update::send_eof()
2791{
2792 char buff[STRING_BUFFER_USUAL_SIZE];
2793 ulonglong id;
2794 killed_state killed_status= NOT_KILLED;
2795 DBUG_ENTER("multi_update::send_eof");
2796 THD_STAGE_INFO(thd, stage_updating_reference_tables);
2797
2798 /*
2799 Does updates for the last n - 1 tables, returns 0 if ok;
2800 error takes into account killed status gained in do_updates()
2801 */
2802 int local_error= thd->is_error();
2803 if (likely(!local_error))
2804 local_error = (table_count) ? do_updates() : 0;
2805 /*
2806 if local_error is not set ON until after do_updates() then
2807 later carried out killing should not affect binlogging.
2808 */
2809 killed_status= (local_error == 0) ? NOT_KILLED : thd->killed;
2810 THD_STAGE_INFO(thd, stage_end);
2811
2812 /* We must invalidate the query cache before binlog writing and
2813 ha_autocommit_... */
2814
2815 if (updated)
2816 {
2817 query_cache_invalidate3(thd, update_tables, 1);
2818 }
2819 /*
2820 Write the SQL statement to the binlog if we updated
2821 rows and we succeeded or if we updated some non
2822 transactional tables.
2823
2824 The query has to binlog because there's a modified non-transactional table
2825 either from the query's list or via a stored routine: bug#13270,23333
2826 */
2827
2828 if (thd->transaction.stmt.modified_non_trans_table)
2829 thd->transaction.all.modified_non_trans_table= TRUE;
2830 thd->transaction.all.m_unsafe_rollback_flags|=
2831 (thd->transaction.stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
2832
2833 if (likely(local_error == 0 ||
2834 thd->transaction.stmt.modified_non_trans_table))
2835 {
2836 if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
2837 {
2838 int errcode= 0;
2839 if (likely(local_error == 0))
2840 thd->clear_error();
2841 else
2842 errcode= query_error_code(thd, killed_status == NOT_KILLED);
2843
2844 bool force_stmt= false;
2845 for (TABLE *table= all_tables->table; table; table= table->next)
2846 {
2847 if (table->versioned(VERS_TRX_ID))
2848 {
2849 force_stmt= true;
2850 break;
2851 }
2852 }
2853 ScopedStatementReplication scoped_stmt_rpl(force_stmt ? thd : NULL);
2854
2855 if (thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query(),
2856 thd->query_length(), transactional_tables, FALSE,
2857 FALSE, errcode))
2858 {
2859 local_error= 1; // Rollback update
2860 }
2861 }
2862 }
2863 DBUG_ASSERT(trans_safe || !updated ||
2864 thd->transaction.stmt.modified_non_trans_table);
2865
2866 if (likely(local_error != 0))
2867 error_handled= TRUE; // to force early leave from ::abort_result_set()
2868
2869 if (unlikely(local_error > 0)) // if the above log write did not fail ...
2870 {
2871 /* Safety: If we haven't got an error before (can happen in do_updates) */
2872 my_message(ER_UNKNOWN_ERROR, "An error occurred in multi-table update",
2873 MYF(0));
2874 DBUG_RETURN(TRUE);
2875 }
2876
2877 if (!thd->lex->analyze_stmt)
2878 {
2879 id= thd->arg_of_last_insert_id_function ?
2880 thd->first_successful_insert_id_in_prev_stmt : 0;
2881 my_snprintf(buff, sizeof(buff), ER_THD(thd, ER_UPDATE_INFO),
2882 (ulong) found, (ulong) updated, (ulong) thd->cuted_fields);
2883 ::my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
2884 id, buff);
2885 }
2886 DBUG_RETURN(FALSE);
2887}
2888