1 | /***************************************************************************** |
2 | |
3 | Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. |
4 | Copyright (c) 2015, 2018, MariaDB Corporation. |
5 | |
6 | This program is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free Software |
8 | Foundation; version 2 of the License. |
9 | |
10 | This program is distributed in the hope that it will be useful, but WITHOUT |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
12 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. |
13 | |
14 | You should have received a copy of the GNU General Public License along with |
15 | this program; if not, write to the Free Software Foundation, Inc., |
16 | 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA |
17 | |
18 | *****************************************************************************/ |
19 | |
20 | /**************************************************//** |
21 | @file row/row0upd.cc |
22 | Update of a row |
23 | |
24 | Created 12/27/1996 Heikki Tuuri |
25 | *******************************************************/ |
26 | |
27 | #include "ha_prototypes.h" |
28 | |
29 | #include "row0upd.h" |
30 | #include "dict0dict.h" |
31 | #include "dict0mem.h" |
32 | #include "trx0undo.h" |
33 | #include "rem0rec.h" |
34 | #include "dict0boot.h" |
35 | #include "dict0crea.h" |
36 | #include "mach0data.h" |
37 | #include "btr0btr.h" |
38 | #include "btr0cur.h" |
39 | #include "que0que.h" |
40 | #include "row0ext.h" |
41 | #include "row0ins.h" |
42 | #include "row0log.h" |
43 | #include "row0row.h" |
44 | #include "row0sel.h" |
45 | #include "rem0cmp.h" |
46 | #include "lock0lock.h" |
47 | #include "log0log.h" |
48 | #include "pars0sym.h" |
49 | #include "eval0eval.h" |
50 | #include "buf0lru.h" |
51 | #include "trx0rec.h" |
52 | #include "fts0fts.h" |
53 | #include "fts0types.h" |
54 | #include <algorithm> |
55 | #include <mysql/plugin.h> |
56 | #include <mysql/service_wsrep.h> |
57 | |
58 | /* What kind of latch and lock can we assume when the control comes to |
59 | ------------------------------------------------------------------- |
60 | an update node? |
61 | -------------- |
62 | Efficiency of massive updates would require keeping an x-latch on a |
63 | clustered index page through many updates, and not setting an explicit |
64 | x-lock on clustered index records, as they anyway will get an implicit |
65 | x-lock when they are updated. A problem is that the read nodes in the |
66 | graph should know that they must keep the latch when passing the control |
67 | up to the update node, and not set any record lock on the record which |
68 | will be updated. Another problem occurs if the execution is stopped, |
69 | as the kernel switches to another query thread, or the transaction must |
70 | wait for a lock. Then we should be able to release the latch and, maybe, |
71 | acquire an explicit x-lock on the record. |
72 | Because this seems too complicated, we conclude that the less |
73 | efficient solution of releasing all the latches when the control is |
74 | transferred to another node, and acquiring explicit x-locks, is better. */ |
75 | |
76 | /* How is a delete performed? If there is a delete without an |
77 | explicit cursor, i.e., a searched delete, there are at least |
78 | two different situations: |
79 | the implicit select cursor may run on (1) the clustered index or |
80 | on (2) a secondary index. The delete is performed by setting |
81 | the delete bit in the record and substituting the id of the |
82 | deleting transaction for the original trx id, and substituting a |
83 | new roll ptr for previous roll ptr. The old trx id and roll ptr |
84 | are saved in the undo log record. Thus, no physical changes occur |
85 | in the index tree structure at the time of the delete. Only |
86 | when the undo log is purged, the index records will be physically |
87 | deleted from the index trees. |
88 | |
89 | The query graph executing a searched delete would consist of |
90 | a delete node which has as a subtree a select subgraph. |
91 | The select subgraph should return a (persistent) cursor |
92 | in the clustered index, placed on page which is x-latched. |
93 | The delete node should look for all secondary index records for |
94 | this clustered index entry and mark them as deleted. When is |
95 | the x-latch freed? The most efficient way for performing a |
96 | searched delete is obviously to keep the x-latch for several |
97 | steps of query graph execution. */ |
98 | |
99 | /************************************************************************* |
100 | IMPORTANT NOTE: Any operation that generates redo MUST check that there |
101 | is enough space in the redo log before for that operation. This is |
102 | done by calling log_free_check(). The reason for checking the |
103 | availability of the redo log space before the start of the operation is |
104 | that we MUST not hold any synchonization objects when performing the |
105 | check. |
106 | If you make a change in this module make sure that no codepath is |
107 | introduced where a call to log_free_check() is bypassed. */ |
108 | |
109 | /***********************************************************//** |
110 | Checks if an update vector changes some of the first ordering fields of an |
111 | index record. This is only used in foreign key checks and we can assume |
112 | that index does not contain column prefixes. |
113 | @return TRUE if changes */ |
114 | static |
115 | ibool |
116 | row_upd_changes_first_fields_binary( |
117 | /*================================*/ |
118 | dtuple_t* entry, /*!< in: old value of index entry */ |
119 | dict_index_t* index, /*!< in: index of entry */ |
120 | const upd_t* update, /*!< in: update vector for the row */ |
121 | ulint n); /*!< in: how many first fields to check */ |
122 | |
123 | /*********************************************************************//** |
124 | Checks if index currently is mentioned as a referenced index in a foreign |
125 | key constraint. |
126 | |
127 | NOTE that since we do not hold dict_operation_lock when leaving the |
128 | function, it may be that the referencing table has been dropped when |
129 | we leave this function: this function is only for heuristic use! |
130 | |
131 | @return TRUE if referenced */ |
132 | static |
133 | ibool |
134 | row_upd_index_is_referenced( |
135 | /*========================*/ |
136 | dict_index_t* index, /*!< in: index */ |
137 | trx_t* trx) /*!< in: transaction */ |
138 | { |
139 | dict_table_t* table = index->table; |
140 | ibool froze_data_dict = FALSE; |
141 | ibool is_referenced = FALSE; |
142 | |
143 | if (table->referenced_set.empty()) { |
144 | return(FALSE); |
145 | } |
146 | |
147 | if (trx->dict_operation_lock_mode == 0) { |
148 | row_mysql_freeze_data_dictionary(trx); |
149 | froze_data_dict = TRUE; |
150 | } |
151 | |
152 | dict_foreign_set::iterator it |
153 | = std::find_if(table->referenced_set.begin(), |
154 | table->referenced_set.end(), |
155 | dict_foreign_with_index(index)); |
156 | |
157 | is_referenced = (it != table->referenced_set.end()); |
158 | |
159 | if (froze_data_dict) { |
160 | row_mysql_unfreeze_data_dictionary(trx); |
161 | } |
162 | |
163 | return(is_referenced); |
164 | } |
165 | |
166 | #ifdef WITH_WSREP |
167 | static |
168 | ibool |
169 | wsrep_row_upd_index_is_foreign( |
170 | /*========================*/ |
171 | dict_index_t* index, /*!< in: index */ |
172 | trx_t* trx) /*!< in: transaction */ |
173 | { |
174 | dict_table_t* table = index->table; |
175 | ibool froze_data_dict = FALSE; |
176 | ibool is_referenced = FALSE; |
177 | |
178 | if (table->foreign_set.empty()) { |
179 | return(FALSE); |
180 | } |
181 | |
182 | if (trx->dict_operation_lock_mode == 0) { |
183 | row_mysql_freeze_data_dictionary(trx); |
184 | froze_data_dict = TRUE; |
185 | } |
186 | |
187 | dict_foreign_set::iterator it |
188 | = std::find_if(table->foreign_set.begin(), |
189 | table->foreign_set.end(), |
190 | dict_foreign_with_foreign_index(index)); |
191 | |
192 | is_referenced = (it != table->foreign_set.end()); |
193 | |
194 | if (froze_data_dict) { |
195 | row_mysql_unfreeze_data_dictionary(trx); |
196 | } |
197 | |
198 | return(is_referenced); |
199 | } |
200 | #endif /* WITH_WSREP */ |
201 | |
202 | /*********************************************************************//** |
203 | Checks if possible foreign key constraints hold after a delete of the record |
204 | under pcur. |
205 | |
206 | NOTE that this function will temporarily commit mtr and lose the |
207 | pcur position! |
208 | |
209 | @return DB_SUCCESS or an error code */ |
210 | static MY_ATTRIBUTE((nonnull, warn_unused_result)) |
211 | dberr_t |
212 | row_upd_check_references_constraints( |
213 | /*=================================*/ |
214 | upd_node_t* node, /*!< in: row update node */ |
215 | btr_pcur_t* pcur, /*!< in: cursor positioned on a record; NOTE: the |
216 | cursor position is lost in this function! */ |
217 | dict_table_t* table, /*!< in: table in question */ |
218 | dict_index_t* index, /*!< in: index of the cursor */ |
219 | ulint* offsets,/*!< in/out: rec_get_offsets(pcur.rec, index) */ |
220 | que_thr_t* thr, /*!< in: query thread */ |
221 | mtr_t* mtr) /*!< in: mtr */ |
222 | { |
223 | dict_foreign_t* foreign; |
224 | mem_heap_t* heap; |
225 | dtuple_t* entry; |
226 | trx_t* trx; |
227 | const rec_t* rec; |
228 | ulint n_ext; |
229 | dberr_t err; |
230 | ibool got_s_lock = FALSE; |
231 | |
232 | DBUG_ENTER("row_upd_check_references_constraints" ); |
233 | |
234 | if (table->referenced_set.empty()) { |
235 | DBUG_RETURN(DB_SUCCESS); |
236 | } |
237 | |
238 | trx = thr_get_trx(thr); |
239 | |
240 | rec = btr_pcur_get_rec(pcur); |
241 | ut_ad(rec_offs_validate(rec, index, offsets)); |
242 | |
243 | heap = mem_heap_create(500); |
244 | |
245 | entry = row_rec_to_index_entry(rec, index, offsets, &n_ext, heap); |
246 | |
247 | mtr_commit(mtr); |
248 | |
249 | DEBUG_SYNC_C("foreign_constraint_check_for_update" ); |
250 | |
251 | mtr->start(); |
252 | |
253 | if (trx->dict_operation_lock_mode == 0) { |
254 | got_s_lock = TRUE; |
255 | |
256 | row_mysql_freeze_data_dictionary(trx); |
257 | } |
258 | |
259 | DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, |
260 | "foreign_constraint_check_for_insert" ); |
261 | |
262 | for (dict_foreign_set::iterator it = table->referenced_set.begin(); |
263 | it != table->referenced_set.end(); |
264 | ++it) { |
265 | |
266 | foreign = *it; |
267 | |
268 | /* Note that we may have an update which updates the index |
269 | record, but does NOT update the first fields which are |
270 | referenced in a foreign key constraint. Then the update does |
271 | NOT break the constraint. */ |
272 | |
273 | if (foreign->referenced_index == index |
274 | && (node->is_delete |
275 | || row_upd_changes_first_fields_binary( |
276 | entry, index, node->update, |
277 | foreign->n_fields))) { |
278 | dict_table_t* foreign_table = foreign->foreign_table; |
279 | |
280 | dict_table_t* ref_table = NULL; |
281 | |
282 | if (foreign_table == NULL) { |
283 | |
284 | ref_table = dict_table_open_on_name( |
285 | foreign->foreign_table_name_lookup, |
286 | FALSE, FALSE, DICT_ERR_IGNORE_NONE); |
287 | } |
288 | |
289 | /* dict_operation_lock is held both here |
290 | (UPDATE or DELETE with FOREIGN KEY) and by TRUNCATE |
291 | TABLE operations. |
292 | If a TRUNCATE TABLE operation is in progress, |
293 | there can be 2 possible conditions: |
294 | 1) row_truncate_table_for_mysql() is not yet called. |
295 | 2) Truncate releases dict_operation_lock |
296 | during eviction of pages from buffer pool |
297 | for a file-per-table tablespace. |
298 | |
299 | In case of (1), truncate will wait for FK operation |
300 | to complete. |
301 | In case of (2), truncate will be rolled forward even |
302 | if it is interrupted. So if the foreign table is |
303 | undergoing a truncate, ignore the FK check. */ |
304 | |
305 | if (foreign_table) { |
306 | if (foreign_table->space |
307 | && foreign_table->space |
308 | ->is_being_truncated) { |
309 | continue; |
310 | } |
311 | |
312 | foreign_table->inc_fk_checks(); |
313 | } |
314 | |
315 | /* NOTE that if the thread ends up waiting for a lock |
316 | we will release dict_operation_lock temporarily! |
317 | But the inc_fk_checks() protects foreign_table from |
318 | being dropped while the check is running. */ |
319 | |
320 | err = row_ins_check_foreign_constraint( |
321 | FALSE, foreign, table, entry, thr); |
322 | |
323 | if (foreign_table) { |
324 | foreign_table->dec_fk_checks(); |
325 | } |
326 | if (ref_table != NULL) { |
327 | dict_table_close(ref_table, FALSE, FALSE); |
328 | } |
329 | |
330 | if (err != DB_SUCCESS) { |
331 | goto func_exit; |
332 | } |
333 | } |
334 | } |
335 | |
336 | err = DB_SUCCESS; |
337 | |
338 | func_exit: |
339 | if (got_s_lock) { |
340 | row_mysql_unfreeze_data_dictionary(trx); |
341 | } |
342 | |
343 | mem_heap_free(heap); |
344 | |
345 | DEBUG_SYNC_C("foreign_constraint_check_for_update_done" ); |
346 | DBUG_RETURN(err); |
347 | } |
348 | |
349 | #ifdef WITH_WSREP |
350 | static |
351 | dberr_t |
352 | wsrep_row_upd_check_foreign_constraints( |
353 | /*=================================*/ |
354 | upd_node_t* node, /*!< in: row update node */ |
355 | btr_pcur_t* pcur, /*!< in: cursor positioned on a record; NOTE: the |
356 | cursor position is lost in this function! */ |
357 | dict_table_t* table, /*!< in: table in question */ |
358 | dict_index_t* index, /*!< in: index of the cursor */ |
359 | ulint* offsets,/*!< in/out: rec_get_offsets(pcur.rec, index) */ |
360 | que_thr_t* thr, /*!< in: query thread */ |
361 | mtr_t* mtr) /*!< in: mtr */ |
362 | { |
363 | dict_foreign_t* foreign; |
364 | mem_heap_t* heap; |
365 | dtuple_t* entry; |
366 | trx_t* trx; |
367 | const rec_t* rec; |
368 | ulint n_ext; |
369 | dberr_t err; |
370 | ibool got_s_lock = FALSE; |
371 | ibool opened = FALSE; |
372 | |
373 | if (table->foreign_set.empty()) { |
374 | return(DB_SUCCESS); |
375 | } |
376 | |
377 | trx = thr_get_trx(thr); |
378 | |
379 | /* TODO: make native slave thread bail out here */ |
380 | |
381 | rec = btr_pcur_get_rec(pcur); |
382 | ut_ad(rec_offs_validate(rec, index, offsets)); |
383 | |
384 | heap = mem_heap_create(500); |
385 | |
386 | entry = row_rec_to_index_entry(rec, index, offsets, |
387 | &n_ext, heap); |
388 | |
389 | mtr_commit(mtr); |
390 | |
391 | mtr_start(mtr); |
392 | |
393 | if (trx->dict_operation_lock_mode == 0) { |
394 | got_s_lock = TRUE; |
395 | |
396 | row_mysql_freeze_data_dictionary(trx); |
397 | } |
398 | |
399 | for (dict_foreign_set::iterator it = table->foreign_set.begin(); |
400 | it != table->foreign_set.end(); |
401 | ++it) { |
402 | |
403 | foreign = *it; |
404 | /* Note that we may have an update which updates the index |
405 | record, but does NOT update the first fields which are |
406 | referenced in a foreign key constraint. Then the update does |
407 | NOT break the constraint. */ |
408 | |
409 | if (foreign->foreign_index == index |
410 | && (node->is_delete |
411 | || row_upd_changes_first_fields_binary( |
412 | entry, index, node->update, |
413 | foreign->n_fields))) { |
414 | |
415 | if (foreign->referenced_table == NULL) { |
416 | foreign->referenced_table = |
417 | dict_table_open_on_name( |
418 | foreign->referenced_table_name_lookup, |
419 | FALSE, FALSE, DICT_ERR_IGNORE_NONE); |
420 | opened = (foreign->referenced_table) ? TRUE : FALSE; |
421 | } |
422 | |
423 | /* NOTE that if the thread ends up waiting for a lock |
424 | we will release dict_operation_lock temporarily! |
425 | But the counter on the table protects 'foreign' from |
426 | being dropped while the check is running. */ |
427 | |
428 | err = row_ins_check_foreign_constraint( |
429 | TRUE, foreign, table, entry, thr); |
430 | |
431 | if (foreign->referenced_table) { |
432 | if (opened == TRUE) { |
433 | dict_table_close(foreign->referenced_table, FALSE, FALSE); |
434 | opened = FALSE; |
435 | } |
436 | } |
437 | |
438 | if (err != DB_SUCCESS) { |
439 | goto func_exit; |
440 | } |
441 | } |
442 | } |
443 | |
444 | err = DB_SUCCESS; |
445 | func_exit: |
446 | if (got_s_lock) { |
447 | row_mysql_unfreeze_data_dictionary(trx); |
448 | } |
449 | |
450 | mem_heap_free(heap); |
451 | |
452 | return(err); |
453 | } |
454 | |
455 | /** Determine if a FOREIGN KEY constraint needs to be processed. |
456 | @param[in] node query node |
457 | @param[in] trx transaction |
458 | @return whether the node cannot be ignored */ |
459 | |
460 | inline bool wsrep_must_process_fk(const upd_node_t* node, const trx_t* trx) |
461 | { |
462 | if (!wsrep_on_trx(trx)) { |
463 | return false; |
464 | } |
465 | return que_node_get_type(node->common.parent) != QUE_NODE_UPDATE |
466 | || static_cast<upd_node_t*>(node->common.parent)->cascade_node |
467 | != node; |
468 | } |
469 | #endif /* WITH_WSREP */ |
470 | |
471 | /*********************************************************************//** |
472 | Creates an update node for a query graph. |
473 | @return own: update node */ |
474 | upd_node_t* |
475 | upd_node_create( |
476 | /*============*/ |
477 | mem_heap_t* heap) /*!< in: mem heap where created */ |
478 | { |
479 | upd_node_t* node; |
480 | |
481 | node = static_cast<upd_node_t*>( |
482 | mem_heap_zalloc(heap, sizeof(upd_node_t))); |
483 | |
484 | node->common.type = QUE_NODE_UPDATE; |
485 | node->state = UPD_NODE_UPDATE_CLUSTERED; |
486 | node->heap = mem_heap_create(128); |
487 | node->magic_n = UPD_NODE_MAGIC_N; |
488 | |
489 | return(node); |
490 | } |
491 | |
492 | /*********************************************************************//** |
493 | Updates the trx id and roll ptr field in a clustered index record in database |
494 | recovery. */ |
495 | void |
496 | row_upd_rec_sys_fields_in_recovery( |
497 | /*===============================*/ |
498 | rec_t* rec, /*!< in/out: record */ |
499 | page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */ |
500 | const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ |
501 | ulint pos, /*!< in: TRX_ID position in rec */ |
502 | trx_id_t trx_id, /*!< in: transaction id */ |
503 | roll_ptr_t roll_ptr)/*!< in: roll ptr of the undo log record */ |
504 | { |
505 | ut_ad(rec_offs_validate(rec, NULL, offsets)); |
506 | |
507 | if (page_zip) { |
508 | page_zip_write_trx_id_and_roll_ptr( |
509 | page_zip, rec, offsets, pos, trx_id, roll_ptr); |
510 | } else { |
511 | byte* field; |
512 | ulint len; |
513 | |
514 | field = rec_get_nth_field(rec, offsets, pos, &len); |
515 | ut_ad(len == DATA_TRX_ID_LEN); |
516 | compile_time_assert(DATA_TRX_ID + 1 == DATA_ROLL_PTR); |
517 | trx_write_trx_id(field, trx_id); |
518 | trx_write_roll_ptr(field + DATA_TRX_ID_LEN, roll_ptr); |
519 | } |
520 | } |
521 | |
522 | /*********************************************************************//** |
523 | Sets the trx id or roll ptr field of a clustered index entry. */ |
524 | void |
525 | row_upd_index_entry_sys_field( |
526 | /*==========================*/ |
527 | dtuple_t* entry, /*!< in/out: index entry, where the memory |
528 | buffers for sys fields are already allocated: |
529 | the function just copies the new values to |
530 | them */ |
531 | dict_index_t* index, /*!< in: clustered index */ |
532 | ulint type, /*!< in: DATA_TRX_ID or DATA_ROLL_PTR */ |
533 | ib_uint64_t val) /*!< in: value to write */ |
534 | { |
535 | dfield_t* dfield; |
536 | byte* field; |
537 | ulint pos; |
538 | |
539 | ut_ad(dict_index_is_clust(index)); |
540 | |
541 | pos = dict_index_get_sys_col_pos(index, type); |
542 | |
543 | dfield = dtuple_get_nth_field(entry, pos); |
544 | field = static_cast<byte*>(dfield_get_data(dfield)); |
545 | |
546 | if (type == DATA_TRX_ID) { |
547 | ut_ad(val > 0); |
548 | trx_write_trx_id(field, val); |
549 | } else { |
550 | ut_ad(type == DATA_ROLL_PTR); |
551 | trx_write_roll_ptr(field, val); |
552 | } |
553 | } |
554 | |
555 | /***********************************************************//** |
556 | Returns TRUE if row update changes size of some field in index or if some |
557 | field to be updated is stored externally in rec or update. |
558 | @return TRUE if the update changes the size of some field in index or |
559 | the field is external in rec or update */ |
560 | ibool |
561 | row_upd_changes_field_size_or_external( |
562 | /*===================================*/ |
563 | dict_index_t* index, /*!< in: index */ |
564 | const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ |
565 | const upd_t* update) /*!< in: update vector */ |
566 | { |
567 | const upd_field_t* upd_field; |
568 | const dfield_t* new_val; |
569 | ulint old_len; |
570 | ulint new_len; |
571 | ulint n_fields; |
572 | ulint i; |
573 | |
574 | ut_ad(rec_offs_validate(NULL, index, offsets)); |
575 | ut_ad(!index->table->skip_alter_undo); |
576 | n_fields = upd_get_n_fields(update); |
577 | |
578 | for (i = 0; i < n_fields; i++) { |
579 | upd_field = upd_get_nth_field(update, i); |
580 | |
581 | /* We should ignore virtual field if the index is not |
582 | a virtual index */ |
583 | if (upd_fld_is_virtual_col(upd_field) |
584 | && dict_index_has_virtual(index) != DICT_VIRTUAL) { |
585 | continue; |
586 | } |
587 | |
588 | new_val = &(upd_field->new_val); |
589 | if (dfield_is_ext(new_val)) { |
590 | return(TRUE); |
591 | } |
592 | new_len = dfield_get_len(new_val); |
593 | ut_ad(new_len != UNIV_SQL_DEFAULT); |
594 | |
595 | if (dfield_is_null(new_val) && !rec_offs_comp(offsets)) { |
596 | /* A bug fixed on Dec 31st, 2004: we looked at the |
597 | SQL NULL size from the wrong field! We may backport |
598 | this fix also to 4.0. The merge to 5.0 will be made |
599 | manually immediately after we commit this to 4.1. */ |
600 | |
601 | new_len = dict_col_get_sql_null_size( |
602 | dict_index_get_nth_col(index, |
603 | upd_field->field_no), |
604 | 0); |
605 | } |
606 | |
607 | if (rec_offs_nth_default(offsets, upd_field->field_no)) { |
608 | /* This is an instantly added column that is |
609 | at the initial default value. */ |
610 | return(TRUE); |
611 | } |
612 | |
613 | if (rec_offs_comp(offsets) |
614 | && rec_offs_nth_sql_null(offsets, upd_field->field_no)) { |
615 | /* Note that in the compact table format, for a |
616 | variable length field, an SQL NULL will use zero |
617 | bytes in the offset array at the start of the physical |
618 | record, but a zero-length value (empty string) will |
619 | use one byte! Thus, we cannot use update-in-place |
620 | if we update an SQL NULL varchar to an empty string! */ |
621 | |
622 | old_len = UNIV_SQL_NULL; |
623 | } else { |
624 | old_len = rec_offs_nth_size(offsets, |
625 | upd_field->field_no); |
626 | } |
627 | |
628 | if (old_len != new_len |
629 | || rec_offs_nth_extern(offsets, upd_field->field_no)) { |
630 | |
631 | return(TRUE); |
632 | } |
633 | } |
634 | |
635 | return(FALSE); |
636 | } |
637 | |
638 | /***********************************************************//** |
639 | Returns true if row update contains disowned external fields. |
640 | @return true if the update contains disowned external fields. */ |
641 | bool |
642 | row_upd_changes_disowned_external( |
643 | /*==============================*/ |
644 | const upd_t* update) /*!< in: update vector */ |
645 | { |
646 | const upd_field_t* upd_field; |
647 | const dfield_t* new_val; |
648 | ulint new_len; |
649 | ulint n_fields; |
650 | ulint i; |
651 | |
652 | n_fields = upd_get_n_fields(update); |
653 | |
654 | for (i = 0; i < n_fields; i++) { |
655 | const byte* field_ref; |
656 | |
657 | upd_field = upd_get_nth_field(update, i); |
658 | new_val = &(upd_field->new_val); |
659 | new_len = dfield_get_len(new_val); |
660 | |
661 | if (!dfield_is_ext(new_val)) { |
662 | continue; |
663 | } |
664 | |
665 | ut_ad(new_len >= BTR_EXTERN_FIELD_REF_SIZE); |
666 | |
667 | field_ref = static_cast<const byte*>(dfield_get_data(new_val)) |
668 | + new_len - BTR_EXTERN_FIELD_REF_SIZE; |
669 | |
670 | if (field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG) { |
671 | return(true); |
672 | } |
673 | } |
674 | |
675 | return(false); |
676 | } |
677 | |
678 | /***********************************************************//** |
679 | Replaces the new column values stored in the update vector to the |
680 | record given. No field size changes are allowed. This function is |
681 | usually invoked on a clustered index. The only use case for a |
682 | secondary index is row_ins_sec_index_entry_by_modify() or its |
683 | counterpart in ibuf_insert_to_index_page(). */ |
684 | void |
685 | row_upd_rec_in_place( |
686 | /*=================*/ |
687 | rec_t* rec, /*!< in/out: record where replaced */ |
688 | dict_index_t* index, /*!< in: the index the record belongs to */ |
689 | const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ |
690 | const upd_t* update, /*!< in: update vector */ |
691 | page_zip_des_t* page_zip)/*!< in: compressed page with enough space |
692 | available, or NULL */ |
693 | { |
694 | const upd_field_t* upd_field; |
695 | const dfield_t* new_val; |
696 | ulint n_fields; |
697 | ulint i; |
698 | |
699 | ut_ad(rec_offs_validate(rec, index, offsets)); |
700 | ut_ad(!index->table->skip_alter_undo); |
701 | |
702 | if (rec_offs_comp(offsets)) { |
703 | #ifdef UNIV_DEBUG |
704 | switch (rec_get_status(rec)) { |
705 | case REC_STATUS_ORDINARY: |
706 | break; |
707 | case REC_STATUS_COLUMNS_ADDED: |
708 | ut_ad(index->is_instant()); |
709 | break; |
710 | case REC_STATUS_NODE_PTR: |
711 | if (index->is_dummy |
712 | && fil_page_get_type(page_align(rec)) |
713 | == FIL_PAGE_RTREE) { |
714 | /* The function rtr_update_mbr_field_in_place() |
715 | is generating MLOG_COMP_REC_UPDATE_IN_PLACE |
716 | and MLOG_REC_UPDATE_IN_PLACE records for |
717 | node pointer pages. */ |
718 | break; |
719 | } |
720 | /* fall through */ |
721 | case REC_STATUS_INFIMUM: |
722 | case REC_STATUS_SUPREMUM: |
723 | ut_ad(!"wrong record status in update" ); |
724 | } |
725 | #endif /* UNIV_DEBUG */ |
726 | |
727 | rec_set_info_bits_new(rec, update->info_bits); |
728 | } else { |
729 | rec_set_info_bits_old(rec, update->info_bits); |
730 | } |
731 | |
732 | n_fields = upd_get_n_fields(update); |
733 | |
734 | for (i = 0; i < n_fields; i++) { |
735 | upd_field = upd_get_nth_field(update, i); |
736 | |
737 | /* No need to update virtual columns for non-virtual index */ |
738 | if (upd_fld_is_virtual_col(upd_field) |
739 | && !dict_index_has_virtual(index)) { |
740 | continue; |
741 | } |
742 | |
743 | new_val = &(upd_field->new_val); |
744 | ut_ad(!dfield_is_ext(new_val) == |
745 | !rec_offs_nth_extern(offsets, upd_field->field_no)); |
746 | |
747 | rec_set_nth_field(rec, offsets, upd_field->field_no, |
748 | dfield_get_data(new_val), |
749 | dfield_get_len(new_val)); |
750 | } |
751 | |
752 | if (page_zip) { |
753 | page_zip_write_rec(page_zip, rec, index, offsets, 0); |
754 | } |
755 | } |
756 | |
757 | /*********************************************************************//** |
758 | Writes into the redo log the values of trx id and roll ptr and enough info |
759 | to determine their positions within a clustered index record. |
760 | @return new pointer to mlog */ |
761 | byte* |
762 | row_upd_write_sys_vals_to_log( |
763 | /*==========================*/ |
764 | dict_index_t* index, /*!< in: clustered index */ |
765 | trx_id_t trx_id, /*!< in: transaction id */ |
766 | roll_ptr_t roll_ptr,/*!< in: roll ptr of the undo log record */ |
767 | byte* log_ptr,/*!< pointer to a buffer of size > 20 opened |
768 | in mlog */ |
769 | mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in: mtr */ |
770 | { |
771 | ut_ad(dict_index_is_clust(index)); |
772 | ut_ad(mtr); |
773 | |
774 | log_ptr += mach_write_compressed(log_ptr, |
775 | dict_index_get_sys_col_pos( |
776 | index, DATA_TRX_ID)); |
777 | |
778 | trx_write_roll_ptr(log_ptr, roll_ptr); |
779 | log_ptr += DATA_ROLL_PTR_LEN; |
780 | |
781 | log_ptr += mach_u64_write_compressed(log_ptr, trx_id); |
782 | |
783 | return(log_ptr); |
784 | } |
785 | |
786 | /*********************************************************************//** |
787 | Parses the log data of system field values. |
788 | @return log data end or NULL */ |
789 | byte* |
790 | row_upd_parse_sys_vals( |
791 | /*===================*/ |
792 | const byte* ptr, /*!< in: buffer */ |
793 | const byte* end_ptr,/*!< in: buffer end */ |
794 | ulint* pos, /*!< out: TRX_ID position in record */ |
795 | trx_id_t* trx_id, /*!< out: trx id */ |
796 | roll_ptr_t* roll_ptr)/*!< out: roll ptr */ |
797 | { |
798 | *pos = mach_parse_compressed(&ptr, end_ptr); |
799 | |
800 | if (ptr == NULL) { |
801 | |
802 | return(NULL); |
803 | } |
804 | |
805 | if (end_ptr < ptr + DATA_ROLL_PTR_LEN) { |
806 | |
807 | return(NULL); |
808 | } |
809 | |
810 | *roll_ptr = trx_read_roll_ptr(ptr); |
811 | ptr += DATA_ROLL_PTR_LEN; |
812 | |
813 | *trx_id = mach_u64_parse_compressed(&ptr, end_ptr); |
814 | |
815 | return(const_cast<byte*>(ptr)); |
816 | } |
817 | |
818 | /***********************************************************//** |
819 | Writes to the redo log the new values of the fields occurring in the index. */ |
820 | void |
821 | row_upd_index_write_log( |
822 | /*====================*/ |
823 | const upd_t* update, /*!< in: update vector */ |
824 | byte* log_ptr,/*!< in: pointer to mlog buffer: must |
825 | contain at least MLOG_BUF_MARGIN bytes |
826 | of free space; the buffer is closed |
827 | within this function */ |
828 | mtr_t* mtr) /*!< in: mtr into whose log to write */ |
829 | { |
830 | const upd_field_t* upd_field; |
831 | const dfield_t* new_val; |
832 | ulint len; |
833 | ulint n_fields; |
834 | byte* buf_end; |
835 | ulint i; |
836 | |
837 | n_fields = upd_get_n_fields(update); |
838 | |
839 | buf_end = log_ptr + MLOG_BUF_MARGIN; |
840 | |
841 | mach_write_to_1(log_ptr, update->info_bits); |
842 | log_ptr++; |
843 | log_ptr += mach_write_compressed(log_ptr, n_fields); |
844 | |
845 | for (i = 0; i < n_fields; i++) { |
846 | compile_time_assert(MLOG_BUF_MARGIN > 30); |
847 | |
848 | if (log_ptr + 30 > buf_end) { |
849 | mlog_close(mtr, log_ptr); |
850 | |
851 | log_ptr = mlog_open(mtr, MLOG_BUF_MARGIN); |
852 | buf_end = log_ptr + MLOG_BUF_MARGIN; |
853 | } |
854 | |
855 | upd_field = upd_get_nth_field(update, i); |
856 | |
857 | new_val = &(upd_field->new_val); |
858 | |
859 | len = dfield_get_len(new_val); |
860 | |
861 | /* If this is a virtual column, mark it using special |
862 | field_no */ |
863 | ulint field_no = upd_fld_is_virtual_col(upd_field) |
864 | ? REC_MAX_N_FIELDS + unsigned(upd_field->field_no) |
865 | : unsigned(upd_field->field_no); |
866 | |
867 | log_ptr += mach_write_compressed(log_ptr, field_no); |
868 | log_ptr += mach_write_compressed(log_ptr, len); |
869 | |
870 | if (len != UNIV_SQL_NULL) { |
871 | if (log_ptr + len < buf_end) { |
872 | memcpy(log_ptr, dfield_get_data(new_val), len); |
873 | |
874 | log_ptr += len; |
875 | } else { |
876 | mlog_close(mtr, log_ptr); |
877 | |
878 | mlog_catenate_string( |
879 | mtr, |
880 | static_cast<byte*>( |
881 | dfield_get_data(new_val)), |
882 | len); |
883 | |
884 | log_ptr = mlog_open(mtr, MLOG_BUF_MARGIN); |
885 | buf_end = log_ptr + MLOG_BUF_MARGIN; |
886 | } |
887 | } |
888 | } |
889 | |
890 | mlog_close(mtr, log_ptr); |
891 | } |
892 | |
893 | /*********************************************************************//** |
894 | Parses the log data written by row_upd_index_write_log. |
895 | @return log data end or NULL */ |
896 | byte* |
897 | row_upd_index_parse( |
898 | /*================*/ |
899 | const byte* ptr, /*!< in: buffer */ |
900 | const byte* end_ptr,/*!< in: buffer end */ |
901 | mem_heap_t* heap, /*!< in: memory heap where update vector is |
902 | built */ |
903 | upd_t** update_out)/*!< out: update vector */ |
904 | { |
905 | upd_t* update; |
906 | upd_field_t* upd_field; |
907 | dfield_t* new_val; |
908 | ulint len; |
909 | ulint n_fields; |
910 | ulint info_bits; |
911 | ulint i; |
912 | |
913 | if (end_ptr < ptr + 1) { |
914 | |
915 | return(NULL); |
916 | } |
917 | |
918 | info_bits = mach_read_from_1(ptr); |
919 | ptr++; |
920 | n_fields = mach_parse_compressed(&ptr, end_ptr); |
921 | |
922 | if (ptr == NULL) { |
923 | |
924 | return(NULL); |
925 | } |
926 | |
927 | update = upd_create(n_fields, heap); |
928 | update->info_bits = info_bits; |
929 | |
930 | for (i = 0; i < n_fields; i++) { |
931 | ulint field_no; |
932 | upd_field = upd_get_nth_field(update, i); |
933 | new_val = &(upd_field->new_val); |
934 | |
935 | field_no = mach_parse_compressed(&ptr, end_ptr); |
936 | |
937 | if (ptr == NULL) { |
938 | |
939 | return(NULL); |
940 | } |
941 | |
942 | /* Check if this is a virtual column, mark the prtype |
943 | if that is the case */ |
944 | if (field_no >= REC_MAX_N_FIELDS) { |
945 | new_val->type.prtype |= DATA_VIRTUAL; |
946 | field_no -= REC_MAX_N_FIELDS; |
947 | } |
948 | |
949 | upd_field->field_no = field_no; |
950 | |
951 | len = mach_parse_compressed(&ptr, end_ptr); |
952 | |
953 | if (ptr == NULL) { |
954 | |
955 | return(NULL); |
956 | } |
957 | |
958 | if (len != UNIV_SQL_NULL) { |
959 | |
960 | if (end_ptr < ptr + len) { |
961 | |
962 | return(NULL); |
963 | } |
964 | |
965 | dfield_set_data(new_val, |
966 | mem_heap_dup(heap, ptr, len), len); |
967 | ptr += len; |
968 | } else { |
969 | dfield_set_null(new_val); |
970 | } |
971 | } |
972 | |
973 | *update_out = update; |
974 | |
975 | return(const_cast<byte*>(ptr)); |
976 | } |
977 | |
978 | /***************************************************************//** |
979 | Builds an update vector from those fields which in a secondary index entry |
980 | differ from a record that has the equal ordering fields. NOTE: we compare |
981 | the fields as binary strings! |
982 | @return own: update vector of differing fields */ |
983 | upd_t* |
984 | row_upd_build_sec_rec_difference_binary( |
985 | /*====================================*/ |
986 | const rec_t* rec, /*!< in: secondary index record */ |
987 | dict_index_t* index, /*!< in: index */ |
988 | const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */ |
989 | const dtuple_t* entry, /*!< in: entry to insert */ |
990 | mem_heap_t* heap) /*!< in: memory heap from which allocated */ |
991 | { |
992 | upd_field_t* upd_field; |
993 | const dfield_t* dfield; |
994 | const byte* data; |
995 | ulint len; |
996 | upd_t* update; |
997 | ulint n_diff; |
998 | ulint i; |
999 | |
1000 | /* This function is used only for a secondary index */ |
1001 | ut_a(!dict_index_is_clust(index)); |
1002 | ut_ad(rec_offs_validate(rec, index, offsets)); |
1003 | ut_ad(rec_offs_n_fields(offsets) == dtuple_get_n_fields(entry)); |
1004 | ut_ad(!rec_offs_any_extern(offsets)); |
1005 | ut_ad(!rec_offs_any_default(offsets)); |
1006 | ut_ad(!index->table->skip_alter_undo); |
1007 | |
1008 | update = upd_create(dtuple_get_n_fields(entry), heap); |
1009 | |
1010 | n_diff = 0; |
1011 | |
1012 | for (i = 0; i < dtuple_get_n_fields(entry); i++) { |
1013 | |
1014 | data = rec_get_nth_field(rec, offsets, i, &len); |
1015 | |
1016 | dfield = dtuple_get_nth_field(entry, i); |
1017 | |
1018 | /* NOTE that it may be that len != dfield_get_len(dfield) if we |
1019 | are updating in a character set and collation where strings of |
1020 | different length can be equal in an alphabetical comparison, |
1021 | and also in the case where we have a column prefix index |
1022 | and the last characters in the index field are spaces; the |
1023 | latter case probably caused the assertion failures reported at |
1024 | row0upd.cc line 713 in versions 4.0.14 - 4.0.16. */ |
1025 | |
1026 | /* NOTE: we compare the fields as binary strings! |
1027 | (No collation) */ |
1028 | |
1029 | if (!dfield_data_is_binary_equal(dfield, len, data)) { |
1030 | |
1031 | upd_field = upd_get_nth_field(update, n_diff); |
1032 | |
1033 | dfield_copy(&(upd_field->new_val), dfield); |
1034 | |
1035 | upd_field_set_field_no(upd_field, i, index); |
1036 | |
1037 | n_diff++; |
1038 | } |
1039 | } |
1040 | |
1041 | update->n_fields = n_diff; |
1042 | |
1043 | return(update); |
1044 | } |
1045 | |
1046 | /** Builds an update vector from those fields, excluding the roll ptr and |
1047 | trx id fields, which in an index entry differ from a record that has |
1048 | the equal ordering fields. NOTE: we compare the fields as binary strings! |
1049 | @param[in] index clustered index |
1050 | @param[in] entry clustered index entry to insert |
1051 | @param[in] rec clustered index record |
1052 | @param[in] offsets rec_get_offsets(rec,index), or NULL |
1053 | @param[in] no_sys skip the system columns |
1054 | DB_TRX_ID and DB_ROLL_PTR |
1055 | @param[in] trx transaction (for diagnostics), |
1056 | or NULL |
1057 | @param[in] heap memory heap from which allocated |
1058 | @param[in] mysql_table NULL, or mysql table object when |
1059 | user thread invokes dml |
1060 | @return own: update vector of differing fields, excluding roll ptr and |
1061 | trx id */ |
1062 | upd_t* |
1063 | row_upd_build_difference_binary( |
1064 | dict_index_t* index, |
1065 | const dtuple_t* entry, |
1066 | const rec_t* rec, |
1067 | const ulint* offsets, |
1068 | bool no_sys, |
1069 | trx_t* trx, |
1070 | mem_heap_t* heap, |
1071 | TABLE* mysql_table) |
1072 | { |
1073 | upd_field_t* upd_field; |
1074 | dfield_t* dfield; |
1075 | const byte* data; |
1076 | ulint len; |
1077 | upd_t* update; |
1078 | ulint n_diff; |
1079 | ulint trx_id_pos; |
1080 | ulint i; |
1081 | ulint offsets_[REC_OFFS_NORMAL_SIZE]; |
1082 | ulint n_fld = dtuple_get_n_fields(entry); |
1083 | ulint n_v_fld = dtuple_get_n_v_fields(entry); |
1084 | rec_offs_init(offsets_); |
1085 | |
1086 | /* This function is used only for a clustered index */ |
1087 | ut_a(dict_index_is_clust(index)); |
1088 | ut_ad(!index->table->skip_alter_undo); |
1089 | |
1090 | update = upd_create(n_fld + n_v_fld, heap); |
1091 | |
1092 | n_diff = 0; |
1093 | |
1094 | trx_id_pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID); |
1095 | ut_ad(dict_index_get_sys_col_pos(index, DATA_ROLL_PTR) |
1096 | == trx_id_pos + 1); |
1097 | |
1098 | if (!offsets) { |
1099 | offsets = rec_get_offsets(rec, index, offsets_, true, |
1100 | ULINT_UNDEFINED, &heap); |
1101 | } else { |
1102 | ut_ad(rec_offs_validate(rec, index, offsets)); |
1103 | } |
1104 | |
1105 | for (i = 0; i < n_fld; i++) { |
1106 | data = rec_get_nth_cfield(rec, index, offsets, i, &len); |
1107 | |
1108 | dfield = dtuple_get_nth_field(entry, i); |
1109 | |
1110 | /* NOTE: we compare the fields as binary strings! |
1111 | (No collation) */ |
1112 | if (no_sys) { |
1113 | /* TRX_ID */ |
1114 | if (i == trx_id_pos) { |
1115 | continue; |
1116 | } |
1117 | |
1118 | /* DB_ROLL_PTR */ |
1119 | if (i == trx_id_pos + 1) { |
1120 | continue; |
1121 | } |
1122 | } |
1123 | |
1124 | if (!dfield_is_ext(dfield) |
1125 | != !rec_offs_nth_extern(offsets, i) |
1126 | || !dfield_data_is_binary_equal(dfield, len, data)) { |
1127 | |
1128 | upd_field = upd_get_nth_field(update, n_diff); |
1129 | |
1130 | dfield_copy(&(upd_field->new_val), dfield); |
1131 | |
1132 | upd_field_set_field_no(upd_field, i, index); |
1133 | |
1134 | n_diff++; |
1135 | } |
1136 | } |
1137 | |
1138 | /* Check the virtual columns updates. Even if there is no non-virtual |
1139 | column (base columns) change, we will still need to build the |
1140 | indexed virtual column value so that undo log would log them ( |
1141 | for purge/mvcc purpose) */ |
1142 | if (n_v_fld > 0) { |
1143 | row_ext_t* ext; |
1144 | mem_heap_t* v_heap = NULL; |
1145 | THD* thd; |
1146 | |
1147 | if (trx == NULL) { |
1148 | thd = current_thd; |
1149 | } else { |
1150 | thd = trx->mysql_thd; |
1151 | } |
1152 | |
1153 | ut_ad(!update->old_vrow); |
1154 | |
1155 | for (i = 0; i < n_v_fld; i++) { |
1156 | const dict_v_col_t* col |
1157 | = dict_table_get_nth_v_col(index->table, i); |
1158 | |
1159 | if (!col->m_col.ord_part) { |
1160 | continue; |
1161 | } |
1162 | |
1163 | if (update->old_vrow == NULL) { |
1164 | update->old_vrow = row_build( |
1165 | ROW_COPY_POINTERS, index, rec, offsets, |
1166 | index->table, NULL, NULL, &ext, heap); |
1167 | } |
1168 | |
1169 | dfield = dtuple_get_nth_v_field(entry, i); |
1170 | |
1171 | dfield_t* vfield = innobase_get_computed_value( |
1172 | update->old_vrow, col, index, |
1173 | &v_heap, heap, NULL, thd, mysql_table, |
1174 | NULL, NULL, NULL); |
1175 | |
1176 | if (!dfield_data_is_binary_equal( |
1177 | dfield, vfield->len, |
1178 | static_cast<byte*>(vfield->data))) { |
1179 | upd_field = upd_get_nth_field(update, n_diff); |
1180 | |
1181 | upd_field->old_v_val = static_cast<dfield_t*>( |
1182 | mem_heap_alloc( |
1183 | heap, |
1184 | sizeof *upd_field->old_v_val)); |
1185 | |
1186 | dfield_copy(upd_field->old_v_val, vfield); |
1187 | |
1188 | dfield_copy(&(upd_field->new_val), dfield); |
1189 | |
1190 | upd_field_set_v_field_no( |
1191 | upd_field, i, index); |
1192 | |
1193 | n_diff++; |
1194 | |
1195 | } |
1196 | } |
1197 | |
1198 | if (v_heap) { |
1199 | mem_heap_free(v_heap); |
1200 | } |
1201 | } |
1202 | |
1203 | update->n_fields = n_diff; |
1204 | ut_ad(update->validate()); |
1205 | |
1206 | return(update); |
1207 | } |
1208 | |
1209 | /** Fetch a prefix of an externally stored column. |
1210 | This is similar to row_ext_lookup(), but the row_ext_t holds the old values |
1211 | of the column and must not be poisoned with the new values. |
1212 | @param[in] data 'internally' stored part of the field |
1213 | containing also the reference to the external part |
1214 | @param[in] local_len length of data, in bytes |
1215 | @param[in] page_size BLOB page size |
1216 | @param[in,out] len input - length of prefix to |
1217 | fetch; output: fetched length of the prefix |
1218 | @param[in,out] heap heap where to allocate |
1219 | @return BLOB prefix */ |
1220 | static |
1221 | byte* |
1222 | row_upd_ext_fetch( |
1223 | const byte* data, |
1224 | ulint local_len, |
1225 | const page_size_t& page_size, |
1226 | ulint* len, |
1227 | mem_heap_t* heap) |
1228 | { |
1229 | byte* buf = static_cast<byte*>(mem_heap_alloc(heap, *len)); |
1230 | |
1231 | *len = btr_copy_externally_stored_field_prefix( |
1232 | buf, *len, page_size, data, local_len); |
1233 | |
1234 | /* We should never update records containing a half-deleted BLOB. */ |
1235 | ut_a(*len); |
1236 | |
1237 | return(buf); |
1238 | } |
1239 | |
1240 | /** Replaces the new column value stored in the update vector in |
1241 | the given index entry field. |
1242 | @param[in,out] dfield data field of the index entry |
1243 | @param[in] field index field |
1244 | @param[in] col field->col |
1245 | @param[in] uf update field |
1246 | @param[in,out] heap memory heap for allocating and copying |
1247 | the new value |
1248 | @param[in] page_size page size */ |
1249 | static |
1250 | void |
1251 | row_upd_index_replace_new_col_val( |
1252 | dfield_t* dfield, |
1253 | const dict_field_t* field, |
1254 | const dict_col_t* col, |
1255 | const upd_field_t* uf, |
1256 | mem_heap_t* heap, |
1257 | const page_size_t& page_size) |
1258 | { |
1259 | ulint len; |
1260 | const byte* data; |
1261 | |
1262 | dfield_copy_data(dfield, &uf->new_val); |
1263 | |
1264 | if (dfield_is_null(dfield)) { |
1265 | return; |
1266 | } |
1267 | |
1268 | len = dfield_get_len(dfield); |
1269 | data = static_cast<const byte*>(dfield_get_data(dfield)); |
1270 | |
1271 | if (field->prefix_len > 0) { |
1272 | ibool fetch_ext = dfield_is_ext(dfield) |
1273 | && len < (ulint) field->prefix_len |
1274 | + BTR_EXTERN_FIELD_REF_SIZE; |
1275 | |
1276 | if (fetch_ext) { |
1277 | ulint l = len; |
1278 | |
1279 | len = field->prefix_len; |
1280 | |
1281 | data = row_upd_ext_fetch(data, l, page_size, |
1282 | &len, heap); |
1283 | } |
1284 | |
1285 | len = dtype_get_at_most_n_mbchars(col->prtype, |
1286 | col->mbminlen, col->mbmaxlen, |
1287 | field->prefix_len, len, |
1288 | (const char*) data); |
1289 | |
1290 | dfield_set_data(dfield, data, len); |
1291 | |
1292 | if (!fetch_ext) { |
1293 | dfield_dup(dfield, heap); |
1294 | } |
1295 | |
1296 | return; |
1297 | } |
1298 | |
1299 | switch (uf->orig_len) { |
1300 | byte* buf; |
1301 | case BTR_EXTERN_FIELD_REF_SIZE: |
1302 | /* Restore the original locally stored |
1303 | part of the column. In the undo log, |
1304 | InnoDB writes a longer prefix of externally |
1305 | stored columns, so that column prefixes |
1306 | in secondary indexes can be reconstructed. */ |
1307 | dfield_set_data(dfield, |
1308 | data + len - BTR_EXTERN_FIELD_REF_SIZE, |
1309 | BTR_EXTERN_FIELD_REF_SIZE); |
1310 | dfield_set_ext(dfield); |
1311 | /* fall through */ |
1312 | case 0: |
1313 | dfield_dup(dfield, heap); |
1314 | break; |
1315 | default: |
1316 | /* Reconstruct the original locally |
1317 | stored part of the column. The data |
1318 | will have to be copied. */ |
1319 | ut_a(uf->orig_len > BTR_EXTERN_FIELD_REF_SIZE); |
1320 | buf = static_cast<byte*>(mem_heap_alloc(heap, uf->orig_len)); |
1321 | |
1322 | /* Copy the locally stored prefix. */ |
1323 | memcpy(buf, data, |
1324 | unsigned(uf->orig_len) - BTR_EXTERN_FIELD_REF_SIZE); |
1325 | |
1326 | /* Copy the BLOB pointer. */ |
1327 | memcpy(buf + uf->orig_len - BTR_EXTERN_FIELD_REF_SIZE, |
1328 | data + len - BTR_EXTERN_FIELD_REF_SIZE, |
1329 | BTR_EXTERN_FIELD_REF_SIZE); |
1330 | |
1331 | dfield_set_data(dfield, buf, uf->orig_len); |
1332 | dfield_set_ext(dfield); |
1333 | break; |
1334 | } |
1335 | } |
1336 | |
1337 | /** Apply an update vector to an index entry. |
1338 | @param[in,out] entry index entry to be updated; the clustered index record |
1339 | must be covered by a lock or a page latch to prevent |
1340 | deletion (rollback or purge) |
1341 | @param[in] index index of the entry |
1342 | @param[in] update update vector built for the entry |
1343 | @param[in,out] heap memory heap for copying off-page columns */ |
1344 | void |
1345 | row_upd_index_replace_new_col_vals_index_pos( |
1346 | dtuple_t* entry, |
1347 | const dict_index_t* index, |
1348 | const upd_t* update, |
1349 | mem_heap_t* heap) |
1350 | { |
1351 | ut_ad(!index->table->skip_alter_undo); |
1352 | |
1353 | const page_size_t& page_size = dict_table_page_size(index->table); |
1354 | |
1355 | dtuple_set_info_bits(entry, update->info_bits); |
1356 | |
1357 | for (unsigned i = index->n_fields; i--; ) { |
1358 | const dict_field_t* field; |
1359 | const dict_col_t* col; |
1360 | const upd_field_t* uf; |
1361 | |
1362 | field = dict_index_get_nth_field(index, i); |
1363 | col = dict_field_get_col(field); |
1364 | if (col->is_virtual()) { |
1365 | const dict_v_col_t* vcol = reinterpret_cast< |
1366 | const dict_v_col_t*>( |
1367 | col); |
1368 | |
1369 | uf = upd_get_field_by_field_no( |
1370 | update, vcol->v_pos, true); |
1371 | } else { |
1372 | uf = upd_get_field_by_field_no( |
1373 | update, i, false); |
1374 | } |
1375 | |
1376 | if (uf) { |
1377 | row_upd_index_replace_new_col_val( |
1378 | dtuple_get_nth_field(entry, i), |
1379 | field, col, uf, heap, page_size); |
1380 | } |
1381 | } |
1382 | } |
1383 | |
1384 | /***********************************************************//** |
1385 | Replaces the new column values stored in the update vector to the index entry |
1386 | given. */ |
1387 | void |
1388 | row_upd_index_replace_new_col_vals( |
1389 | /*===============================*/ |
1390 | dtuple_t* entry, /*!< in/out: index entry where replaced; |
1391 | the clustered index record must be |
1392 | covered by a lock or a page latch to |
1393 | prevent deletion (rollback or purge) */ |
1394 | dict_index_t* index, /*!< in: index; NOTE that this may also be a |
1395 | non-clustered index */ |
1396 | const upd_t* update, /*!< in: an update vector built for the |
1397 | CLUSTERED index so that the field number in |
1398 | an upd_field is the clustered index position */ |
1399 | mem_heap_t* heap) /*!< in: memory heap for allocating and |
1400 | copying the new values */ |
1401 | { |
1402 | ulint i; |
1403 | const dict_index_t* clust_index |
1404 | = dict_table_get_first_index(index->table); |
1405 | const page_size_t& page_size = dict_table_page_size(index->table); |
1406 | |
1407 | ut_ad(!index->table->skip_alter_undo); |
1408 | |
1409 | dtuple_set_info_bits(entry, update->info_bits); |
1410 | |
1411 | for (i = 0; i < dict_index_get_n_fields(index); i++) { |
1412 | const dict_field_t* field; |
1413 | const dict_col_t* col; |
1414 | const upd_field_t* uf; |
1415 | |
1416 | field = dict_index_get_nth_field(index, i); |
1417 | col = dict_field_get_col(field); |
1418 | if (col->is_virtual()) { |
1419 | const dict_v_col_t* vcol = reinterpret_cast< |
1420 | const dict_v_col_t*>( |
1421 | col); |
1422 | |
1423 | uf = upd_get_field_by_field_no( |
1424 | update, vcol->v_pos, true); |
1425 | } else { |
1426 | uf = upd_get_field_by_field_no( |
1427 | update, |
1428 | dict_col_get_clust_pos(col, clust_index), |
1429 | false); |
1430 | } |
1431 | |
1432 | if (uf) { |
1433 | row_upd_index_replace_new_col_val( |
1434 | dtuple_get_nth_field(entry, i), |
1435 | field, col, uf, heap, page_size); |
1436 | } |
1437 | } |
1438 | } |
1439 | |
1440 | /** Replaces the virtual column values stored in the update vector. |
1441 | @param[in,out] row row whose column to be set |
1442 | @param[in] field data to set |
1443 | @param[in] len data length |
1444 | @param[in] vcol virtual column info */ |
1445 | static |
1446 | void |
1447 | row_upd_set_vcol_data( |
1448 | dtuple_t* row, |
1449 | const byte* field, |
1450 | ulint len, |
1451 | dict_v_col_t* vcol) |
1452 | { |
1453 | dfield_t* dfield = dtuple_get_nth_v_field(row, vcol->v_pos); |
1454 | |
1455 | if (dfield_get_type(dfield)->mtype == DATA_MISSING) { |
1456 | dict_col_copy_type(&vcol->m_col, dfield_get_type(dfield)); |
1457 | |
1458 | dfield_set_data(dfield, field, len); |
1459 | } |
1460 | } |
1461 | |
1462 | /** Replaces the virtual column values stored in a dtuple with that of |
1463 | a update vector. |
1464 | @param[in,out] row row whose column to be updated |
1465 | @param[in] table table |
1466 | @param[in] update an update vector built for the clustered index |
1467 | @param[in] upd_new update to new or old value |
1468 | @param[in,out] undo_row undo row (if needs to be updated) |
1469 | @param[in] ptr remaining part in update undo log */ |
1470 | void |
1471 | row_upd_replace_vcol( |
1472 | dtuple_t* row, |
1473 | const dict_table_t* table, |
1474 | const upd_t* update, |
1475 | bool upd_new, |
1476 | dtuple_t* undo_row, |
1477 | const byte* ptr) |
1478 | { |
1479 | ulint col_no; |
1480 | ulint i; |
1481 | ulint n_cols; |
1482 | |
1483 | ut_ad(!table->skip_alter_undo); |
1484 | |
1485 | n_cols = dtuple_get_n_v_fields(row); |
1486 | for (col_no = 0; col_no < n_cols; col_no++) { |
1487 | dfield_t* dfield; |
1488 | |
1489 | const dict_v_col_t* col |
1490 | = dict_table_get_nth_v_col(table, col_no); |
1491 | |
1492 | /* If there is no index on the column, do not bother for |
1493 | value update */ |
1494 | if (!col->m_col.ord_part) { |
1495 | dict_index_t* clust_index |
1496 | = dict_table_get_first_index(table); |
1497 | |
1498 | /* Skip the column if there is no online alter |
1499 | table in progress or it is not being indexed |
1500 | in new table */ |
1501 | if (!dict_index_is_online_ddl(clust_index) |
1502 | || !row_log_col_is_indexed(clust_index, col_no)) { |
1503 | continue; |
1504 | } |
1505 | } |
1506 | |
1507 | dfield = dtuple_get_nth_v_field(row, col_no); |
1508 | |
1509 | for (i = 0; i < upd_get_n_fields(update); i++) { |
1510 | const upd_field_t* upd_field |
1511 | = upd_get_nth_field(update, i); |
1512 | if (!upd_fld_is_virtual_col(upd_field) |
1513 | || upd_field->field_no != col->v_pos) { |
1514 | continue; |
1515 | } |
1516 | |
1517 | if (upd_new) { |
1518 | dfield_copy_data(dfield, &upd_field->new_val); |
1519 | } else { |
1520 | dfield_copy_data(dfield, upd_field->old_v_val); |
1521 | } |
1522 | |
1523 | dfield->type = upd_field->new_val.type; |
1524 | break; |
1525 | } |
1526 | } |
1527 | |
1528 | bool first_v_col = true; |
1529 | bool is_undo_log = true; |
1530 | |
1531 | /* We will read those unchanged (but indexed) virtual columns in */ |
1532 | if (ptr != NULL) { |
1533 | const byte* end_ptr; |
1534 | |
1535 | end_ptr = ptr + mach_read_from_2(ptr); |
1536 | ptr += 2; |
1537 | |
1538 | while (ptr != end_ptr) { |
1539 | const byte* field; |
1540 | ulint field_no; |
1541 | ulint len; |
1542 | ulint orig_len; |
1543 | bool is_v; |
1544 | |
1545 | field_no = mach_read_next_compressed(&ptr); |
1546 | |
1547 | is_v = (field_no >= REC_MAX_N_FIELDS); |
1548 | |
1549 | if (is_v) { |
1550 | ptr = trx_undo_read_v_idx( |
1551 | table, ptr, first_v_col, &is_undo_log, |
1552 | &field_no); |
1553 | first_v_col = false; |
1554 | } |
1555 | |
1556 | ptr = trx_undo_rec_get_col_val( |
1557 | ptr, &field, &len, &orig_len); |
1558 | |
1559 | if (field_no == ULINT_UNDEFINED) { |
1560 | ut_ad(is_v); |
1561 | continue; |
1562 | } |
1563 | |
1564 | if (is_v) { |
1565 | dict_v_col_t* vcol = dict_table_get_nth_v_col( |
1566 | table, field_no); |
1567 | |
1568 | row_upd_set_vcol_data(row, field, len, vcol); |
1569 | |
1570 | if (undo_row) { |
1571 | row_upd_set_vcol_data( |
1572 | undo_row, field, len, vcol); |
1573 | } |
1574 | } |
1575 | ut_ad(ptr<= end_ptr); |
1576 | } |
1577 | } |
1578 | } |
1579 | |
1580 | /***********************************************************//** |
1581 | Replaces the new column values stored in the update vector. */ |
1582 | void |
1583 | row_upd_replace( |
1584 | /*============*/ |
1585 | dtuple_t* row, /*!< in/out: row where replaced, |
1586 | indexed by col_no; |
1587 | the clustered index record must be |
1588 | covered by a lock or a page latch to |
1589 | prevent deletion (rollback or purge) */ |
1590 | row_ext_t** ext, /*!< out, own: NULL, or externally |
1591 | stored column prefixes */ |
1592 | const dict_index_t* index, /*!< in: clustered index */ |
1593 | const upd_t* update, /*!< in: an update vector built for the |
1594 | clustered index */ |
1595 | mem_heap_t* heap) /*!< in: memory heap */ |
1596 | { |
1597 | ulint col_no; |
1598 | ulint i; |
1599 | ulint n_cols; |
1600 | ulint n_ext_cols; |
1601 | ulint* ext_cols; |
1602 | const dict_table_t* table; |
1603 | |
1604 | ut_ad(row); |
1605 | ut_ad(ext); |
1606 | ut_ad(index); |
1607 | ut_ad(dict_index_is_clust(index)); |
1608 | ut_ad(update); |
1609 | ut_ad(heap); |
1610 | ut_ad(update->validate()); |
1611 | |
1612 | n_cols = dtuple_get_n_fields(row); |
1613 | table = index->table; |
1614 | ut_ad(n_cols == dict_table_get_n_cols(table)); |
1615 | |
1616 | ext_cols = static_cast<ulint*>( |
1617 | mem_heap_alloc(heap, n_cols * sizeof *ext_cols)); |
1618 | |
1619 | n_ext_cols = 0; |
1620 | |
1621 | dtuple_set_info_bits(row, update->info_bits); |
1622 | |
1623 | for (col_no = 0; col_no < n_cols; col_no++) { |
1624 | |
1625 | const dict_col_t* col |
1626 | = dict_table_get_nth_col(table, col_no); |
1627 | const ulint clust_pos |
1628 | = dict_col_get_clust_pos(col, index); |
1629 | dfield_t* dfield; |
1630 | |
1631 | if (UNIV_UNLIKELY(clust_pos == ULINT_UNDEFINED)) { |
1632 | |
1633 | continue; |
1634 | } |
1635 | |
1636 | dfield = dtuple_get_nth_field(row, col_no); |
1637 | |
1638 | for (i = 0; i < upd_get_n_fields(update); i++) { |
1639 | |
1640 | const upd_field_t* upd_field |
1641 | = upd_get_nth_field(update, i); |
1642 | |
1643 | if (upd_field->field_no != clust_pos |
1644 | || upd_fld_is_virtual_col(upd_field)) { |
1645 | |
1646 | continue; |
1647 | } |
1648 | |
1649 | dfield_copy_data(dfield, &upd_field->new_val); |
1650 | break; |
1651 | } |
1652 | |
1653 | if (dfield_is_ext(dfield) && col->ord_part) { |
1654 | ext_cols[n_ext_cols++] = col_no; |
1655 | } |
1656 | } |
1657 | |
1658 | if (n_ext_cols) { |
1659 | *ext = row_ext_create(n_ext_cols, ext_cols, table->flags, row, |
1660 | heap); |
1661 | } else { |
1662 | *ext = NULL; |
1663 | } |
1664 | |
1665 | row_upd_replace_vcol(row, table, update, true, NULL, NULL); |
1666 | } |
1667 | |
1668 | /***********************************************************//** |
1669 | Checks if an update vector changes an ordering field of an index record. |
1670 | |
1671 | This function is fast if the update vector is short or the number of ordering |
1672 | fields in the index is small. Otherwise, this can be quadratic. |
1673 | NOTE: we compare the fields as binary strings! |
1674 | @return TRUE if update vector changes an ordering field in the index record */ |
1675 | ibool |
1676 | row_upd_changes_ord_field_binary_func( |
1677 | /*==================================*/ |
1678 | dict_index_t* index, /*!< in: index of the record */ |
1679 | const upd_t* update, /*!< in: update vector for the row; NOTE: the |
1680 | field numbers in this MUST be clustered index |
1681 | positions! */ |
1682 | #ifdef UNIV_DEBUG |
1683 | const que_thr_t*thr, /*!< in: query thread */ |
1684 | #endif /* UNIV_DEBUG */ |
1685 | const dtuple_t* row, /*!< in: old value of row, or NULL if the |
1686 | row and the data values in update are not |
1687 | known when this function is called, e.g., at |
1688 | compile time */ |
1689 | const row_ext_t*ext, /*!< NULL, or prefixes of the externally |
1690 | stored columns in the old row */ |
1691 | ulint flag) /*!< in: ROW_BUILD_NORMAL, |
1692 | ROW_BUILD_FOR_PURGE or ROW_BUILD_FOR_UNDO */ |
1693 | { |
1694 | ulint n_unique; |
1695 | ulint i; |
1696 | const dict_index_t* clust_index; |
1697 | |
1698 | ut_ad(thr); |
1699 | ut_ad(thr->graph); |
1700 | ut_ad(thr->graph->trx); |
1701 | ut_ad(!index->table->skip_alter_undo); |
1702 | |
1703 | n_unique = dict_index_get_n_unique(index); |
1704 | |
1705 | clust_index = dict_table_get_first_index(index->table); |
1706 | |
1707 | for (i = 0; i < n_unique; i++) { |
1708 | |
1709 | const dict_field_t* ind_field; |
1710 | const dict_col_t* col; |
1711 | ulint col_no; |
1712 | const upd_field_t* upd_field; |
1713 | const dfield_t* dfield; |
1714 | dfield_t dfield_ext; |
1715 | ulint dfield_len= 0; |
1716 | const byte* buf; |
1717 | bool is_virtual; |
1718 | const dict_v_col_t* vcol = NULL; |
1719 | |
1720 | ind_field = dict_index_get_nth_field(index, i); |
1721 | col = dict_field_get_col(ind_field); |
1722 | col_no = dict_col_get_no(col); |
1723 | is_virtual = col->is_virtual(); |
1724 | |
1725 | if (is_virtual) { |
1726 | vcol = reinterpret_cast<const dict_v_col_t*>(col); |
1727 | |
1728 | upd_field = upd_get_field_by_field_no( |
1729 | update, vcol->v_pos, true); |
1730 | } else { |
1731 | upd_field = upd_get_field_by_field_no( |
1732 | update, |
1733 | dict_col_get_clust_pos(col, clust_index), |
1734 | false); |
1735 | } |
1736 | |
1737 | if (upd_field == NULL) { |
1738 | continue; |
1739 | } |
1740 | |
1741 | if (row == NULL) { |
1742 | ut_ad(ext == NULL); |
1743 | return(TRUE); |
1744 | } |
1745 | |
1746 | if (is_virtual) { |
1747 | dfield = dtuple_get_nth_v_field( |
1748 | row, vcol->v_pos); |
1749 | } else { |
1750 | dfield = dtuple_get_nth_field(row, col_no); |
1751 | } |
1752 | |
1753 | /* For spatial index update, since the different geometry |
1754 | data could generate same MBR, so, if the new index entry is |
1755 | same as old entry, which means the MBR is not changed, we |
1756 | don't need to do anything. */ |
1757 | if (dict_index_is_spatial(index) && i == 0) { |
1758 | double mbr1[SPDIMS * 2]; |
1759 | double mbr2[SPDIMS * 2]; |
1760 | rtr_mbr_t* old_mbr; |
1761 | rtr_mbr_t* new_mbr; |
1762 | uchar* dptr = NULL; |
1763 | ulint flen = 0; |
1764 | ulint dlen = 0; |
1765 | mem_heap_t* temp_heap = NULL; |
1766 | const dfield_t* new_field = &upd_field->new_val; |
1767 | |
1768 | const page_size_t page_size |
1769 | = (ext != NULL) |
1770 | ? ext->page_size |
1771 | : dict_table_page_size( |
1772 | index->table); |
1773 | |
1774 | ut_ad(dfield->data != NULL |
1775 | && dfield->len > GEO_DATA_HEADER_SIZE); |
1776 | ut_ad(dict_col_get_spatial_status(col) != SPATIAL_NONE); |
1777 | |
1778 | /* Get the old mbr. */ |
1779 | if (dfield_is_ext(dfield)) { |
1780 | /* For off-page stored data, we |
1781 | need to read the whole field data. */ |
1782 | flen = dfield_get_len(dfield); |
1783 | dptr = static_cast<byte*>( |
1784 | dfield_get_data(dfield)); |
1785 | temp_heap = mem_heap_create(1000); |
1786 | |
1787 | dptr = btr_copy_externally_stored_field( |
1788 | &dlen, dptr, |
1789 | page_size, |
1790 | flen, |
1791 | temp_heap); |
1792 | } else { |
1793 | dptr = static_cast<uchar*>(dfield->data); |
1794 | dlen = dfield->len; |
1795 | } |
1796 | |
1797 | rtree_mbr_from_wkb(dptr + GEO_DATA_HEADER_SIZE, |
1798 | static_cast<uint>(dlen |
1799 | - GEO_DATA_HEADER_SIZE), |
1800 | SPDIMS, mbr1); |
1801 | old_mbr = reinterpret_cast<rtr_mbr_t*>(mbr1); |
1802 | |
1803 | /* Get the new mbr. */ |
1804 | if (dfield_is_ext(new_field)) { |
1805 | if (flag == ROW_BUILD_FOR_UNDO |
1806 | && dict_table_has_atomic_blobs( |
1807 | index->table)) { |
1808 | /* For undo, and the table is Barrcuda, |
1809 | we need to skip the prefix data. */ |
1810 | flen = BTR_EXTERN_FIELD_REF_SIZE; |
1811 | ut_ad(dfield_get_len(new_field) >= |
1812 | BTR_EXTERN_FIELD_REF_SIZE); |
1813 | dptr = static_cast<byte*>( |
1814 | dfield_get_data(new_field)) |
1815 | + dfield_get_len(new_field) |
1816 | - BTR_EXTERN_FIELD_REF_SIZE; |
1817 | } else { |
1818 | flen = dfield_get_len(new_field); |
1819 | dptr = static_cast<byte*>( |
1820 | dfield_get_data(new_field)); |
1821 | } |
1822 | |
1823 | if (temp_heap == NULL) { |
1824 | temp_heap = mem_heap_create(1000); |
1825 | } |
1826 | |
1827 | dptr = btr_copy_externally_stored_field( |
1828 | &dlen, dptr, |
1829 | page_size, |
1830 | flen, |
1831 | temp_heap); |
1832 | } else { |
1833 | dptr = static_cast<uchar*>(upd_field->new_val.data); |
1834 | dlen = upd_field->new_val.len; |
1835 | } |
1836 | rtree_mbr_from_wkb(dptr + GEO_DATA_HEADER_SIZE, |
1837 | static_cast<uint>(dlen |
1838 | - GEO_DATA_HEADER_SIZE), |
1839 | SPDIMS, mbr2); |
1840 | new_mbr = reinterpret_cast<rtr_mbr_t*>(mbr2); |
1841 | |
1842 | if (temp_heap) { |
1843 | mem_heap_free(temp_heap); |
1844 | } |
1845 | |
1846 | if (!MBR_EQUAL_CMP(old_mbr, new_mbr)) { |
1847 | return(TRUE); |
1848 | } else { |
1849 | continue; |
1850 | } |
1851 | } |
1852 | |
1853 | /* This treatment of column prefix indexes is loosely |
1854 | based on row_build_index_entry(). */ |
1855 | |
1856 | if (UNIV_LIKELY(ind_field->prefix_len == 0) |
1857 | || dfield_is_null(dfield)) { |
1858 | /* do nothing special */ |
1859 | } else if (ext) { |
1860 | /* Silence a compiler warning without |
1861 | silencing a Valgrind error. */ |
1862 | dfield_len = 0; |
1863 | UNIV_MEM_INVALID(&dfield_len, sizeof dfield_len); |
1864 | /* See if the column is stored externally. */ |
1865 | buf = row_ext_lookup(ext, col_no, &dfield_len); |
1866 | |
1867 | ut_ad(col->ord_part); |
1868 | |
1869 | if (UNIV_LIKELY_NULL(buf)) { |
1870 | if (UNIV_UNLIKELY(buf == field_ref_zero)) { |
1871 | /* The externally stored field |
1872 | was not written yet. This |
1873 | record should only be seen by |
1874 | recv_recovery_rollback_active(), |
1875 | when the server had crashed before |
1876 | storing the field. */ |
1877 | ut_ad(thr->graph->trx->is_recovered); |
1878 | ut_ad(trx_is_recv(thr->graph->trx)); |
1879 | return(TRUE); |
1880 | } |
1881 | |
1882 | goto copy_dfield; |
1883 | } |
1884 | } else if (dfield_is_ext(dfield)) { |
1885 | dfield_len = dfield_get_len(dfield); |
1886 | ut_a(dfield_len > BTR_EXTERN_FIELD_REF_SIZE); |
1887 | dfield_len -= BTR_EXTERN_FIELD_REF_SIZE; |
1888 | ut_a(dict_index_is_clust(index) |
1889 | || ind_field->prefix_len <= dfield_len); |
1890 | |
1891 | buf = static_cast<byte*>(dfield_get_data(dfield)); |
1892 | copy_dfield: |
1893 | ut_a(dfield_len > 0); |
1894 | dfield_copy(&dfield_ext, dfield); |
1895 | dfield_set_data(&dfield_ext, buf, dfield_len); |
1896 | dfield = &dfield_ext; |
1897 | } |
1898 | |
1899 | if (!dfield_datas_are_binary_equal( |
1900 | dfield, &upd_field->new_val, |
1901 | ind_field->prefix_len)) { |
1902 | |
1903 | return(TRUE); |
1904 | } |
1905 | } |
1906 | |
1907 | return(FALSE); |
1908 | } |
1909 | |
1910 | /***********************************************************//** |
1911 | Checks if an update vector changes an ordering field of an index record. |
1912 | NOTE: we compare the fields as binary strings! |
1913 | @return TRUE if update vector may change an ordering field in an index |
1914 | record */ |
1915 | ibool |
1916 | row_upd_changes_some_index_ord_field_binary( |
1917 | /*========================================*/ |
1918 | const dict_table_t* table, /*!< in: table */ |
1919 | const upd_t* update) /*!< in: update vector for the row */ |
1920 | { |
1921 | upd_field_t* upd_field; |
1922 | dict_index_t* index; |
1923 | ulint i; |
1924 | |
1925 | index = dict_table_get_first_index(table); |
1926 | |
1927 | for (i = 0; i < upd_get_n_fields(update); i++) { |
1928 | |
1929 | upd_field = upd_get_nth_field(update, i); |
1930 | |
1931 | if (upd_fld_is_virtual_col(upd_field)) { |
1932 | if (dict_table_get_nth_v_col(index->table, |
1933 | upd_field->field_no) |
1934 | ->m_col.ord_part) { |
1935 | return(TRUE); |
1936 | } |
1937 | } else { |
1938 | if (dict_field_get_col(dict_index_get_nth_field( |
1939 | index, upd_field->field_no))->ord_part) { |
1940 | return(TRUE); |
1941 | } |
1942 | } |
1943 | } |
1944 | |
1945 | return(FALSE); |
1946 | } |
1947 | |
1948 | /***********************************************************//** |
1949 | Checks if an FTS Doc ID column is affected by an UPDATE. |
1950 | @return whether the Doc ID column is changed */ |
1951 | bool |
1952 | row_upd_changes_doc_id( |
1953 | /*===================*/ |
1954 | dict_table_t* table, /*!< in: table */ |
1955 | upd_field_t* upd_field) /*!< in: field to check */ |
1956 | { |
1957 | ulint col_no; |
1958 | dict_index_t* clust_index; |
1959 | fts_t* fts = table->fts; |
1960 | |
1961 | ut_ad(!table->skip_alter_undo); |
1962 | |
1963 | clust_index = dict_table_get_first_index(table); |
1964 | |
1965 | /* Convert from index-specific column number to table-global |
1966 | column number. */ |
1967 | col_no = dict_index_get_nth_col_no(clust_index, upd_field->field_no); |
1968 | |
1969 | return(col_no == fts->doc_col); |
1970 | } |
1971 | /***********************************************************//** |
1972 | Checks if an FTS indexed column is affected by an UPDATE. |
1973 | @return offset within fts_t::indexes if FTS indexed column updated else |
1974 | ULINT_UNDEFINED */ |
1975 | ulint |
1976 | row_upd_changes_fts_column( |
1977 | /*=======================*/ |
1978 | dict_table_t* table, /*!< in: table */ |
1979 | upd_field_t* upd_field) /*!< in: field to check */ |
1980 | { |
1981 | ulint col_no; |
1982 | dict_index_t* clust_index; |
1983 | fts_t* fts = table->fts; |
1984 | |
1985 | ut_ad(!table->skip_alter_undo); |
1986 | |
1987 | if (upd_fld_is_virtual_col(upd_field)) { |
1988 | col_no = upd_field->field_no; |
1989 | return(dict_table_is_fts_column(fts->indexes, col_no, true)); |
1990 | } else { |
1991 | clust_index = dict_table_get_first_index(table); |
1992 | |
1993 | /* Convert from index-specific column number to table-global |
1994 | column number. */ |
1995 | col_no = dict_index_get_nth_col_no(clust_index, |
1996 | upd_field->field_no); |
1997 | return(dict_table_is_fts_column(fts->indexes, col_no, false)); |
1998 | } |
1999 | |
2000 | } |
2001 | |
2002 | /***********************************************************//** |
2003 | Checks if an update vector changes some of the first ordering fields of an |
2004 | index record. This is only used in foreign key checks and we can assume |
2005 | that index does not contain column prefixes. |
2006 | @return TRUE if changes */ |
2007 | static |
2008 | ibool |
2009 | row_upd_changes_first_fields_binary( |
2010 | /*================================*/ |
2011 | dtuple_t* entry, /*!< in: index entry */ |
2012 | dict_index_t* index, /*!< in: index of entry */ |
2013 | const upd_t* update, /*!< in: update vector for the row */ |
2014 | ulint n) /*!< in: how many first fields to check */ |
2015 | { |
2016 | ulint n_upd_fields; |
2017 | ulint i, j; |
2018 | dict_index_t* clust_index; |
2019 | |
2020 | ut_ad(update && index); |
2021 | ut_ad(n <= dict_index_get_n_fields(index)); |
2022 | |
2023 | n_upd_fields = upd_get_n_fields(update); |
2024 | clust_index = dict_table_get_first_index(index->table); |
2025 | |
2026 | for (i = 0; i < n; i++) { |
2027 | |
2028 | const dict_field_t* ind_field; |
2029 | const dict_col_t* col; |
2030 | ulint col_pos; |
2031 | |
2032 | ind_field = dict_index_get_nth_field(index, i); |
2033 | col = dict_field_get_col(ind_field); |
2034 | col_pos = dict_col_get_clust_pos(col, clust_index); |
2035 | |
2036 | ut_a(ind_field->prefix_len == 0); |
2037 | |
2038 | for (j = 0; j < n_upd_fields; j++) { |
2039 | |
2040 | upd_field_t* upd_field |
2041 | = upd_get_nth_field(update, j); |
2042 | |
2043 | if (col_pos == upd_field->field_no |
2044 | && !dfield_datas_are_binary_equal( |
2045 | dtuple_get_nth_field(entry, i), |
2046 | &upd_field->new_val, 0)) { |
2047 | |
2048 | return(TRUE); |
2049 | } |
2050 | } |
2051 | } |
2052 | |
2053 | return(FALSE); |
2054 | } |
2055 | |
2056 | /*********************************************************************//** |
2057 | Copies the column values from a record. */ |
2058 | UNIV_INLINE |
2059 | void |
2060 | row_upd_copy_columns( |
2061 | /*=================*/ |
2062 | rec_t* rec, /*!< in: record in a clustered index */ |
2063 | const ulint* offsets,/*!< in: array returned by rec_get_offsets() */ |
2064 | const dict_index_t* index, /*!< in: index of rec */ |
2065 | sym_node_t* column) /*!< in: first column in a column list, or |
2066 | NULL */ |
2067 | { |
2068 | ut_ad(dict_index_is_clust(index)); |
2069 | |
2070 | const byte* data; |
2071 | ulint len; |
2072 | |
2073 | while (column) { |
2074 | data = rec_get_nth_cfield( |
2075 | rec, index, offsets, |
2076 | column->field_nos[SYM_CLUST_FIELD_NO], &len); |
2077 | eval_node_copy_and_alloc_val(column, data, len); |
2078 | |
2079 | column = UT_LIST_GET_NEXT(col_var_list, column); |
2080 | } |
2081 | } |
2082 | |
2083 | /*********************************************************************//** |
2084 | Calculates the new values for fields to update. Note that row_upd_copy_columns |
2085 | must have been called first. */ |
2086 | UNIV_INLINE |
2087 | void |
2088 | row_upd_eval_new_vals( |
2089 | /*==================*/ |
2090 | upd_t* update) /*!< in/out: update vector */ |
2091 | { |
2092 | que_node_t* exp; |
2093 | upd_field_t* upd_field; |
2094 | ulint n_fields; |
2095 | ulint i; |
2096 | |
2097 | n_fields = upd_get_n_fields(update); |
2098 | |
2099 | for (i = 0; i < n_fields; i++) { |
2100 | upd_field = upd_get_nth_field(update, i); |
2101 | |
2102 | exp = upd_field->exp; |
2103 | |
2104 | eval_exp(exp); |
2105 | |
2106 | dfield_copy_data(&(upd_field->new_val), que_node_get_val(exp)); |
2107 | } |
2108 | } |
2109 | |
2110 | /** Stores to the heap the virtual columns that need for any indexes |
2111 | @param[in,out] node row update node |
2112 | @param[in] update an update vector if it is update |
2113 | @param[in] thd mysql thread handle |
2114 | @param[in,out] mysql_table mysql table object */ |
2115 | static |
2116 | void |
2117 | row_upd_store_v_row( |
2118 | upd_node_t* node, |
2119 | const upd_t* update, |
2120 | THD* thd, |
2121 | TABLE* mysql_table) |
2122 | { |
2123 | mem_heap_t* heap = NULL; |
2124 | dict_index_t* index = dict_table_get_first_index(node->table); |
2125 | |
2126 | for (ulint col_no = 0; col_no < dict_table_get_n_v_cols(node->table); |
2127 | col_no++) { |
2128 | |
2129 | const dict_v_col_t* col |
2130 | = dict_table_get_nth_v_col(node->table, col_no); |
2131 | |
2132 | if (col->m_col.ord_part) { |
2133 | dfield_t* dfield |
2134 | = dtuple_get_nth_v_field(node->row, col_no); |
2135 | ulint n_upd |
2136 | = update ? upd_get_n_fields(update) : 0; |
2137 | ulint i = 0; |
2138 | |
2139 | /* Check if the value is already in update vector */ |
2140 | for (i = 0; i < n_upd; i++) { |
2141 | const upd_field_t* upd_field |
2142 | = upd_get_nth_field(update, i); |
2143 | if (!(upd_field->new_val.type.prtype |
2144 | & DATA_VIRTUAL) |
2145 | || upd_field->field_no != col->v_pos) { |
2146 | continue; |
2147 | } |
2148 | |
2149 | dfield_copy_data(dfield, upd_field->old_v_val); |
2150 | break; |
2151 | } |
2152 | |
2153 | /* Not updated */ |
2154 | if (i >= n_upd) { |
2155 | /* If this is an update, then the value |
2156 | should be in update->old_vrow */ |
2157 | if (update) { |
2158 | if (update->old_vrow == NULL) { |
2159 | /* This only happens in |
2160 | cascade update. And virtual |
2161 | column can't be affected, |
2162 | so it is Ok to set it to NULL */ |
2163 | dfield_set_null(dfield); |
2164 | } else { |
2165 | dfield_t* vfield |
2166 | = dtuple_get_nth_v_field( |
2167 | update->old_vrow, |
2168 | col_no); |
2169 | dfield_copy_data(dfield, vfield); |
2170 | } |
2171 | } else { |
2172 | /* Need to compute, this happens when |
2173 | deleting row */ |
2174 | innobase_get_computed_value( |
2175 | node->row, col, index, |
2176 | &heap, node->heap, NULL, |
2177 | thd, mysql_table, NULL, |
2178 | NULL, NULL); |
2179 | } |
2180 | } |
2181 | } |
2182 | } |
2183 | |
2184 | if (heap) { |
2185 | mem_heap_free(heap); |
2186 | } |
2187 | } |
2188 | |
2189 | /** Stores to the heap the row on which the node->pcur is positioned. |
2190 | @param[in] node row update node |
2191 | @param[in] thd mysql thread handle |
2192 | @param[in,out] mysql_table NULL, or mysql table object when |
2193 | user thread invokes dml */ |
2194 | void |
2195 | row_upd_store_row( |
2196 | upd_node_t* node, |
2197 | THD* thd, |
2198 | TABLE* mysql_table) |
2199 | { |
2200 | dict_index_t* clust_index; |
2201 | rec_t* rec; |
2202 | mem_heap_t* heap = NULL; |
2203 | row_ext_t** ext; |
2204 | ulint offsets_[REC_OFFS_NORMAL_SIZE]; |
2205 | const ulint* offsets; |
2206 | rec_offs_init(offsets_); |
2207 | |
2208 | ut_ad(node->pcur->latch_mode != BTR_NO_LATCHES); |
2209 | |
2210 | if (node->row != NULL) { |
2211 | mem_heap_empty(node->heap); |
2212 | } |
2213 | |
2214 | clust_index = dict_table_get_first_index(node->table); |
2215 | |
2216 | rec = btr_pcur_get_rec(node->pcur); |
2217 | |
2218 | offsets = rec_get_offsets(rec, clust_index, offsets_, true, |
2219 | ULINT_UNDEFINED, &heap); |
2220 | |
2221 | if (dict_table_has_atomic_blobs(node->table)) { |
2222 | /* There is no prefix of externally stored columns in |
2223 | the clustered index record. Build a cache of column |
2224 | prefixes. */ |
2225 | ext = &node->ext; |
2226 | } else { |
2227 | /* REDUNDANT and COMPACT formats store a local |
2228 | 768-byte prefix of each externally stored column. |
2229 | No cache is needed. */ |
2230 | ext = NULL; |
2231 | node->ext = NULL; |
2232 | } |
2233 | |
2234 | node->row = row_build(ROW_COPY_DATA, clust_index, rec, offsets, |
2235 | NULL, NULL, NULL, ext, node->heap); |
2236 | |
2237 | if (node->table->n_v_cols) { |
2238 | row_upd_store_v_row(node, node->is_delete ? NULL : node->update, |
2239 | thd, mysql_table); |
2240 | } |
2241 | |
2242 | if (node->is_delete == PLAIN_DELETE) { |
2243 | node->upd_row = NULL; |
2244 | node->upd_ext = NULL; |
2245 | } else { |
2246 | node->upd_row = dtuple_copy(node->row, node->heap); |
2247 | row_upd_replace(node->upd_row, &node->upd_ext, |
2248 | clust_index, node->update, node->heap); |
2249 | } |
2250 | |
2251 | if (UNIV_LIKELY_NULL(heap)) { |
2252 | mem_heap_free(heap); |
2253 | } |
2254 | } |
2255 | |
2256 | /***********************************************************//** |
2257 | Updates a secondary index entry of a row. |
2258 | @return DB_SUCCESS if operation successfully completed, else error |
2259 | code or DB_LOCK_WAIT */ |
2260 | static MY_ATTRIBUTE((nonnull, warn_unused_result)) |
2261 | dberr_t |
2262 | row_upd_sec_index_entry( |
2263 | /*====================*/ |
2264 | upd_node_t* node, /*!< in: row update node */ |
2265 | que_thr_t* thr) /*!< in: query thread */ |
2266 | { |
2267 | mtr_t mtr; |
2268 | const rec_t* rec; |
2269 | btr_pcur_t pcur; |
2270 | mem_heap_t* heap; |
2271 | dtuple_t* entry; |
2272 | dict_index_t* index; |
2273 | btr_cur_t* btr_cur; |
2274 | ibool referenced; |
2275 | dberr_t err = DB_SUCCESS; |
2276 | trx_t* trx = thr_get_trx(thr); |
2277 | ulint mode; |
2278 | ulint flags; |
2279 | enum row_search_result search_result; |
2280 | |
2281 | ut_ad(trx->id != 0); |
2282 | |
2283 | index = node->index; |
2284 | |
2285 | referenced = row_upd_index_is_referenced(index, trx); |
2286 | #ifdef WITH_WSREP |
2287 | bool foreign = wsrep_row_upd_index_is_foreign(index, trx); |
2288 | #endif /* WITH_WSREP */ |
2289 | |
2290 | heap = mem_heap_create(1024); |
2291 | |
2292 | /* Build old index entry */ |
2293 | entry = row_build_index_entry(node->row, node->ext, index, heap); |
2294 | ut_a(entry); |
2295 | |
2296 | log_free_check(); |
2297 | |
2298 | DEBUG_SYNC_C_IF_THD(trx->mysql_thd, |
2299 | "before_row_upd_sec_index_entry" ); |
2300 | |
2301 | mtr.start(); |
2302 | |
2303 | switch (index->table->space->id) { |
2304 | case SRV_TMP_SPACE_ID: |
2305 | mtr.set_log_mode(MTR_LOG_NO_REDO); |
2306 | flags = BTR_NO_LOCKING_FLAG; |
2307 | break; |
2308 | default: |
2309 | index->set_modified(mtr); |
2310 | /* fall through */ |
2311 | case IBUF_SPACE_ID: |
2312 | flags = index->table->no_rollback() ? BTR_NO_ROLLBACK : 0; |
2313 | break; |
2314 | } |
2315 | |
2316 | if (!index->is_committed()) { |
2317 | /* The index->online_status may change if the index is |
2318 | or was being created online, but not committed yet. It |
2319 | is protected by index->lock. */ |
2320 | |
2321 | mtr_s_lock(dict_index_get_lock(index), &mtr); |
2322 | |
2323 | switch (dict_index_get_online_status(index)) { |
2324 | case ONLINE_INDEX_COMPLETE: |
2325 | /* This is a normal index. Do not log anything. |
2326 | Perform the update on the index tree directly. */ |
2327 | break; |
2328 | case ONLINE_INDEX_CREATION: |
2329 | /* Log a DELETE and optionally INSERT. */ |
2330 | row_log_online_op(index, entry, 0); |
2331 | |
2332 | if (!node->is_delete) { |
2333 | mem_heap_empty(heap); |
2334 | entry = row_build_index_entry( |
2335 | node->upd_row, node->upd_ext, |
2336 | index, heap); |
2337 | ut_a(entry); |
2338 | row_log_online_op(index, entry, trx->id); |
2339 | } |
2340 | /* fall through */ |
2341 | case ONLINE_INDEX_ABORTED: |
2342 | case ONLINE_INDEX_ABORTED_DROPPED: |
2343 | mtr_commit(&mtr); |
2344 | goto func_exit; |
2345 | } |
2346 | |
2347 | /* We can only buffer delete-mark operations if there |
2348 | are no foreign key constraints referring to the index. |
2349 | Change buffering is disabled for temporary tables and |
2350 | spatial index. */ |
2351 | mode = (referenced || index->table->is_temporary() |
2352 | || dict_index_is_spatial(index)) |
2353 | ? BTR_MODIFY_LEAF_ALREADY_S_LATCHED |
2354 | : BTR_DELETE_MARK_LEAF_ALREADY_S_LATCHED; |
2355 | } else { |
2356 | /* For secondary indexes, |
2357 | index->online_status==ONLINE_INDEX_COMPLETE if |
2358 | index->is_committed(). */ |
2359 | ut_ad(!dict_index_is_online_ddl(index)); |
2360 | |
2361 | /* We can only buffer delete-mark operations if there |
2362 | are no foreign key constraints referring to the index. |
2363 | Change buffering is disabled for temporary tables and |
2364 | spatial index. */ |
2365 | mode = (referenced || index->table->is_temporary() |
2366 | || dict_index_is_spatial(index)) |
2367 | ? BTR_MODIFY_LEAF |
2368 | : BTR_DELETE_MARK_LEAF; |
2369 | } |
2370 | |
2371 | if (dict_index_is_spatial(index)) { |
2372 | ut_ad(mode & BTR_MODIFY_LEAF); |
2373 | mode |= BTR_RTREE_DELETE_MARK; |
2374 | } |
2375 | |
2376 | /* Set the query thread, so that ibuf_insert_low() will be |
2377 | able to invoke thd_get_trx(). */ |
2378 | btr_pcur_get_btr_cur(&pcur)->thr = thr; |
2379 | |
2380 | search_result = row_search_index_entry(index, entry, mode, |
2381 | &pcur, &mtr); |
2382 | |
2383 | btr_cur = btr_pcur_get_btr_cur(&pcur); |
2384 | |
2385 | rec = btr_cur_get_rec(btr_cur); |
2386 | |
2387 | switch (search_result) { |
2388 | case ROW_NOT_DELETED_REF: /* should only occur for BTR_DELETE */ |
2389 | ut_error; |
2390 | break; |
2391 | case ROW_BUFFERED: |
2392 | /* Entry was delete marked already. */ |
2393 | break; |
2394 | |
2395 | case ROW_NOT_FOUND: |
2396 | if (!index->is_committed()) { |
2397 | /* When online CREATE INDEX copied the update |
2398 | that we already made to the clustered index, |
2399 | and completed the secondary index creation |
2400 | before we got here, the old secondary index |
2401 | record would not exist. The CREATE INDEX |
2402 | should be waiting for a MySQL meta-data lock |
2403 | upgrade at least until this UPDATE returns. |
2404 | After that point, set_committed(true) would be |
2405 | invoked by commit_inplace_alter_table(). */ |
2406 | break; |
2407 | } |
2408 | |
2409 | if (dict_index_is_spatial(index) && btr_cur->rtr_info->fd_del) { |
2410 | /* We found the record, but a delete marked */ |
2411 | break; |
2412 | } |
2413 | |
2414 | ib::error() |
2415 | << "Record in index " << index->name |
2416 | << " of table " << index->table->name |
2417 | << " was not found on update: " << *entry |
2418 | << " at: " << rec_index_print(rec, index); |
2419 | #ifdef UNIV_DEBUG |
2420 | mtr_commit(&mtr); |
2421 | mtr_start(&mtr); |
2422 | ut_ad(btr_validate_index(index, 0, false)); |
2423 | ut_ad(0); |
2424 | #endif /* UNIV_DEBUG */ |
2425 | break; |
2426 | case ROW_FOUND: |
2427 | ut_ad(err == DB_SUCCESS); |
2428 | |
2429 | /* Delete mark the old index record; it can already be |
2430 | delete marked if we return after a lock wait in |
2431 | row_ins_sec_index_entry() below */ |
2432 | if (!rec_get_deleted_flag( |
2433 | rec, dict_table_is_comp(index->table))) { |
2434 | err = btr_cur_del_mark_set_sec_rec( |
2435 | flags, btr_cur, TRUE, thr, &mtr); |
2436 | if (err != DB_SUCCESS) { |
2437 | break; |
2438 | } |
2439 | #ifdef WITH_WSREP |
2440 | if (!referenced && foreign |
2441 | && wsrep_must_process_fk(node, trx) |
2442 | && !wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { |
2443 | |
2444 | ulint* offsets = rec_get_offsets( |
2445 | rec, index, NULL, true, |
2446 | ULINT_UNDEFINED, &heap); |
2447 | |
2448 | err = wsrep_row_upd_check_foreign_constraints( |
2449 | node, &pcur, index->table, |
2450 | index, offsets, thr, &mtr); |
2451 | |
2452 | switch (err) { |
2453 | case DB_SUCCESS: |
2454 | case DB_NO_REFERENCED_ROW: |
2455 | err = DB_SUCCESS; |
2456 | break; |
2457 | case DB_DEADLOCK: |
2458 | if (wsrep_debug) { |
2459 | ib::warn() << "WSREP: sec index FK check fail for deadlock" |
2460 | << " index " << index->name |
2461 | << " table " << index->table->name; |
2462 | } |
2463 | break; |
2464 | default: |
2465 | ib::error() << "WSREP: referenced FK check fail: " << ut_strerr(err) |
2466 | << " index " << index->name |
2467 | << " table " << index->table->name; |
2468 | |
2469 | break; |
2470 | } |
2471 | } |
2472 | #endif /* WITH_WSREP */ |
2473 | } |
2474 | |
2475 | ut_ad(err == DB_SUCCESS); |
2476 | |
2477 | if (referenced) { |
2478 | |
2479 | ulint* offsets; |
2480 | |
2481 | offsets = rec_get_offsets( |
2482 | rec, index, NULL, true, ULINT_UNDEFINED, |
2483 | &heap); |
2484 | |
2485 | /* NOTE that the following call loses |
2486 | the position of pcur ! */ |
2487 | err = row_upd_check_references_constraints( |
2488 | node, &pcur, index->table, |
2489 | index, offsets, thr, &mtr); |
2490 | } |
2491 | } |
2492 | |
2493 | btr_pcur_close(&pcur); |
2494 | mtr_commit(&mtr); |
2495 | |
2496 | if (node->is_delete == PLAIN_DELETE || err != DB_SUCCESS) { |
2497 | |
2498 | goto func_exit; |
2499 | } |
2500 | |
2501 | mem_heap_empty(heap); |
2502 | |
2503 | /* Build a new index entry */ |
2504 | entry = row_build_index_entry(node->upd_row, node->upd_ext, |
2505 | index, heap); |
2506 | ut_a(entry); |
2507 | |
2508 | /* Insert new index entry */ |
2509 | err = row_ins_sec_index_entry(index, entry, thr, false); |
2510 | |
2511 | func_exit: |
2512 | mem_heap_free(heap); |
2513 | |
2514 | return(err); |
2515 | } |
2516 | |
2517 | /***********************************************************//** |
2518 | Updates the secondary index record if it is changed in the row update or |
2519 | deletes it if this is a delete. |
2520 | @return DB_SUCCESS if operation successfully completed, else error |
2521 | code or DB_LOCK_WAIT */ |
2522 | static MY_ATTRIBUTE((nonnull, warn_unused_result)) |
2523 | dberr_t |
2524 | row_upd_sec_step( |
2525 | /*=============*/ |
2526 | upd_node_t* node, /*!< in: row update node */ |
2527 | que_thr_t* thr) /*!< in: query thread */ |
2528 | { |
2529 | ut_ad((node->state == UPD_NODE_UPDATE_ALL_SEC) |
2530 | || (node->state == UPD_NODE_UPDATE_SOME_SEC)); |
2531 | ut_ad(!dict_index_is_clust(node->index)); |
2532 | |
2533 | if (node->state == UPD_NODE_UPDATE_ALL_SEC |
2534 | || row_upd_changes_ord_field_binary(node->index, node->update, |
2535 | thr, node->row, node->ext)) { |
2536 | return(row_upd_sec_index_entry(node, thr)); |
2537 | } |
2538 | |
2539 | return(DB_SUCCESS); |
2540 | } |
2541 | |
2542 | #ifdef UNIV_DEBUG |
2543 | # define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \ |
2544 | row_upd_clust_rec_by_insert_inherit_func(rec,offsets,entry,update) |
2545 | #else /* UNIV_DEBUG */ |
2546 | # define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \ |
2547 | row_upd_clust_rec_by_insert_inherit_func(rec,entry,update) |
2548 | #endif /* UNIV_DEBUG */ |
2549 | /*******************************************************************//** |
2550 | Mark non-updated off-page columns inherited when the primary key is |
2551 | updated. We must mark them as inherited in entry, so that they are not |
2552 | freed in a rollback. A limited version of this function used to be |
2553 | called btr_cur_mark_dtuple_inherited_extern(). |
2554 | @return whether any columns were inherited */ |
2555 | static |
2556 | bool |
2557 | row_upd_clust_rec_by_insert_inherit_func( |
2558 | /*=====================================*/ |
2559 | const rec_t* rec, /*!< in: old record, or NULL */ |
2560 | #ifdef UNIV_DEBUG |
2561 | const ulint* offsets,/*!< in: rec_get_offsets(rec), or NULL */ |
2562 | #endif /* UNIV_DEBUG */ |
2563 | dtuple_t* entry, /*!< in/out: updated entry to be |
2564 | inserted into the clustered index */ |
2565 | const upd_t* update) /*!< in: update vector */ |
2566 | { |
2567 | bool inherit = false; |
2568 | ulint i; |
2569 | |
2570 | ut_ad(!rec == !offsets); |
2571 | ut_ad(!rec || rec_offs_any_extern(offsets)); |
2572 | |
2573 | for (i = 0; i < dtuple_get_n_fields(entry); i++) { |
2574 | dfield_t* dfield = dtuple_get_nth_field(entry, i); |
2575 | byte* data; |
2576 | ulint len; |
2577 | |
2578 | ut_ad(!offsets |
2579 | || !rec_offs_nth_extern(offsets, i) |
2580 | == !dfield_is_ext(dfield) |
2581 | || upd_get_field_by_field_no(update, i, false)); |
2582 | if (!dfield_is_ext(dfield) |
2583 | || upd_get_field_by_field_no(update, i, false)) { |
2584 | continue; |
2585 | } |
2586 | |
2587 | #ifdef UNIV_DEBUG |
2588 | if (UNIV_LIKELY(rec != NULL)) { |
2589 | ut_ad(!rec_offs_nth_default(offsets, i)); |
2590 | const byte* rec_data |
2591 | = rec_get_nth_field(rec, offsets, i, &len); |
2592 | ut_ad(len == dfield_get_len(dfield)); |
2593 | ut_ad(len != UNIV_SQL_NULL); |
2594 | ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE); |
2595 | |
2596 | rec_data += len - BTR_EXTERN_FIELD_REF_SIZE; |
2597 | |
2598 | /* The pointer must not be zero. */ |
2599 | ut_ad(memcmp(rec_data, field_ref_zero, |
2600 | BTR_EXTERN_FIELD_REF_SIZE)); |
2601 | /* The BLOB must be owned. */ |
2602 | ut_ad(!(rec_data[BTR_EXTERN_LEN] |
2603 | & BTR_EXTERN_OWNER_FLAG)); |
2604 | } |
2605 | #endif /* UNIV_DEBUG */ |
2606 | |
2607 | len = dfield_get_len(dfield); |
2608 | ut_a(len != UNIV_SQL_NULL); |
2609 | ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE); |
2610 | |
2611 | data = static_cast<byte*>(dfield_get_data(dfield)); |
2612 | |
2613 | data += len - BTR_EXTERN_FIELD_REF_SIZE; |
2614 | /* The pointer must not be zero. */ |
2615 | ut_a(memcmp(data, field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE)); |
2616 | |
2617 | /* The BLOB must be owned, unless we are resuming from |
2618 | a lock wait and we already had disowned the BLOB. */ |
2619 | ut_a(rec == NULL |
2620 | || !(data[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG)); |
2621 | data[BTR_EXTERN_LEN] &= ~BTR_EXTERN_OWNER_FLAG; |
2622 | data[BTR_EXTERN_LEN] |= BTR_EXTERN_INHERITED_FLAG; |
2623 | /* The BTR_EXTERN_INHERITED_FLAG only matters in |
2624 | rollback of a fresh insert. Purge will always free |
2625 | the extern fields of a delete-marked row. */ |
2626 | |
2627 | inherit = true; |
2628 | } |
2629 | |
2630 | return(inherit); |
2631 | } |
2632 | |
2633 | /***********************************************************//** |
2634 | Marks the clustered index record deleted and inserts the updated version |
2635 | of the record to the index. This function should be used when the ordering |
2636 | fields of the clustered index record change. This should be quite rare in |
2637 | database applications. |
2638 | @return DB_SUCCESS if operation successfully completed, else error |
2639 | code or DB_LOCK_WAIT */ |
2640 | static MY_ATTRIBUTE((nonnull, warn_unused_result)) |
2641 | dberr_t |
2642 | row_upd_clust_rec_by_insert( |
2643 | /*========================*/ |
2644 | upd_node_t* node, /*!< in/out: row update node */ |
2645 | dict_index_t* index, /*!< in: clustered index of the record */ |
2646 | que_thr_t* thr, /*!< in: query thread */ |
2647 | ibool referenced,/*!< in: TRUE if index may be referenced in |
2648 | a foreign key constraint */ |
2649 | #ifdef WITH_WSREP |
2650 | bool foreign,/*!< in: whether this is a foreign key */ |
2651 | #endif |
2652 | mtr_t* mtr) /*!< in/out: mtr; gets committed here */ |
2653 | { |
2654 | mem_heap_t* heap; |
2655 | btr_pcur_t* pcur; |
2656 | btr_cur_t* btr_cur; |
2657 | trx_t* trx; |
2658 | dict_table_t* table; |
2659 | dtuple_t* entry; |
2660 | dberr_t err; |
2661 | rec_t* rec; |
2662 | ulint* offsets = NULL; |
2663 | |
2664 | ut_ad(node); |
2665 | ut_ad(dict_index_is_clust(index)); |
2666 | |
2667 | trx = thr_get_trx(thr); |
2668 | table = node->table; |
2669 | pcur = node->pcur; |
2670 | btr_cur = btr_pcur_get_btr_cur(pcur); |
2671 | |
2672 | heap = mem_heap_create(1000); |
2673 | |
2674 | entry = row_build_index_entry_low(node->upd_row, node->upd_ext, |
2675 | index, heap, ROW_BUILD_FOR_INSERT); |
2676 | if (index->is_instant()) entry->trim(*index); |
2677 | ut_ad(dtuple_get_info_bits(entry) == 0); |
2678 | |
2679 | row_upd_index_entry_sys_field(entry, index, DATA_TRX_ID, trx->id); |
2680 | |
2681 | switch (node->state) { |
2682 | default: |
2683 | ut_error; |
2684 | case UPD_NODE_INSERT_CLUSTERED: |
2685 | /* A lock wait occurred in row_ins_clust_index_entry() in |
2686 | the previous invocation of this function. */ |
2687 | row_upd_clust_rec_by_insert_inherit( |
2688 | NULL, NULL, entry, node->update); |
2689 | break; |
2690 | case UPD_NODE_UPDATE_CLUSTERED: |
2691 | /* This is the first invocation of the function where |
2692 | we update the primary key. Delete-mark the old record |
2693 | in the clustered index and prepare to insert a new entry. */ |
2694 | rec = btr_cur_get_rec(btr_cur); |
2695 | offsets = rec_get_offsets(rec, index, NULL, true, |
2696 | ULINT_UNDEFINED, &heap); |
2697 | ut_ad(page_rec_is_user_rec(rec)); |
2698 | |
2699 | if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))) { |
2700 | /* If the clustered index record is already delete |
2701 | marked, then we are here after a DB_LOCK_WAIT. |
2702 | Skip delete marking clustered index and disowning |
2703 | its blobs. */ |
2704 | ut_ad(row_get_rec_trx_id(rec, index, offsets) |
2705 | == trx->id); |
2706 | ut_ad(!trx_undo_roll_ptr_is_insert( |
2707 | row_get_rec_roll_ptr(rec, index, |
2708 | offsets))); |
2709 | goto check_fk; |
2710 | } |
2711 | |
2712 | err = btr_cur_del_mark_set_clust_rec( |
2713 | btr_cur_get_block(btr_cur), rec, index, offsets, |
2714 | thr, node->row, mtr); |
2715 | if (err != DB_SUCCESS) { |
2716 | err_exit: |
2717 | mtr_commit(mtr); |
2718 | mem_heap_free(heap); |
2719 | return(err); |
2720 | } |
2721 | |
2722 | /* If the the new row inherits externally stored |
2723 | fields (off-page columns a.k.a. BLOBs) from the |
2724 | delete-marked old record, mark them disowned by the |
2725 | old record and owned by the new entry. */ |
2726 | |
2727 | if (rec_offs_any_extern(offsets)) { |
2728 | if (row_upd_clust_rec_by_insert_inherit( |
2729 | rec, offsets, entry, node->update)) { |
2730 | /* The blobs are disowned here, expecting the |
2731 | insert down below to inherit them. But if the |
2732 | insert fails, then this disown will be undone |
2733 | when the operation is rolled back. */ |
2734 | btr_cur_disown_inherited_fields( |
2735 | btr_cur_get_page_zip(btr_cur), |
2736 | rec, index, offsets, node->update, |
2737 | mtr); |
2738 | } |
2739 | } |
2740 | check_fk: |
2741 | if (referenced) { |
2742 | /* NOTE that the following call loses |
2743 | the position of pcur ! */ |
2744 | |
2745 | err = row_upd_check_references_constraints( |
2746 | node, pcur, table, index, offsets, thr, mtr); |
2747 | |
2748 | if (err != DB_SUCCESS) { |
2749 | goto err_exit; |
2750 | } |
2751 | #ifdef WITH_WSREP |
2752 | } else if (foreign && wsrep_must_process_fk(node, trx)) { |
2753 | err = wsrep_row_upd_check_foreign_constraints( |
2754 | node, pcur, table, index, offsets, thr, mtr); |
2755 | |
2756 | switch (err) { |
2757 | case DB_SUCCESS: |
2758 | case DB_NO_REFERENCED_ROW: |
2759 | err = DB_SUCCESS; |
2760 | break; |
2761 | case DB_DEADLOCK: |
2762 | if (wsrep_debug) { |
2763 | ib::warn() << "WSREP: sec index FK check fail for deadlock" |
2764 | << " index " << index->name |
2765 | << " table " << index->table->name; |
2766 | } |
2767 | goto err_exit; |
2768 | default: |
2769 | ib::error() << "WSREP: referenced FK check fail: " << ut_strerr(err) |
2770 | << " index " << index->name |
2771 | << " table " << index->table->name; |
2772 | goto err_exit; |
2773 | } |
2774 | #endif /* WITH_WSREP */ |
2775 | } |
2776 | } |
2777 | |
2778 | mtr_commit(mtr); |
2779 | |
2780 | err = row_ins_clust_index_entry( |
2781 | index, entry, thr, |
2782 | node->upd_ext ? node->upd_ext->n_ext : 0, false); |
2783 | node->state = UPD_NODE_INSERT_CLUSTERED; |
2784 | |
2785 | mem_heap_free(heap); |
2786 | |
2787 | return(err); |
2788 | } |
2789 | |
2790 | /***********************************************************//** |
2791 | Updates a clustered index record of a row when the ordering fields do |
2792 | not change. |
2793 | @return DB_SUCCESS if operation successfully completed, else error |
2794 | code or DB_LOCK_WAIT */ |
2795 | static MY_ATTRIBUTE((nonnull, warn_unused_result)) |
2796 | dberr_t |
2797 | row_upd_clust_rec( |
2798 | /*==============*/ |
2799 | ulint flags, /*!< in: undo logging and locking flags */ |
2800 | upd_node_t* node, /*!< in: row update node */ |
2801 | dict_index_t* index, /*!< in: clustered index */ |
2802 | ulint* offsets,/*!< in: rec_get_offsets() on node->pcur */ |
2803 | mem_heap_t** offsets_heap, |
2804 | /*!< in/out: memory heap, can be emptied */ |
2805 | que_thr_t* thr, /*!< in: query thread */ |
2806 | mtr_t* mtr) /*!< in: mtr; gets committed here */ |
2807 | { |
2808 | mem_heap_t* heap = NULL; |
2809 | big_rec_t* big_rec = NULL; |
2810 | btr_pcur_t* pcur; |
2811 | btr_cur_t* btr_cur; |
2812 | dberr_t err; |
2813 | const dtuple_t* rebuilt_old_pk = NULL; |
2814 | |
2815 | ut_ad(node); |
2816 | ut_ad(dict_index_is_clust(index)); |
2817 | ut_ad(!thr_get_trx(thr)->in_rollback); |
2818 | ut_ad(!node->table->skip_alter_undo); |
2819 | |
2820 | pcur = node->pcur; |
2821 | btr_cur = btr_pcur_get_btr_cur(pcur); |
2822 | |
2823 | ut_ad(btr_cur_get_index(btr_cur) == index); |
2824 | ut_ad(!rec_get_deleted_flag(btr_cur_get_rec(btr_cur), |
2825 | dict_table_is_comp(index->table))); |
2826 | ut_ad(rec_offs_validate(btr_cur_get_rec(btr_cur), index, offsets)); |
2827 | |
2828 | if (dict_index_is_online_ddl(index)) { |
2829 | rebuilt_old_pk = row_log_table_get_pk( |
2830 | btr_cur_get_rec(btr_cur), index, offsets, NULL, &heap); |
2831 | } |
2832 | |
2833 | /* Try optimistic updating of the record, keeping changes within |
2834 | the page; we do not check locks because we assume the x-lock on the |
2835 | record to update */ |
2836 | |
2837 | if (node->cmpl_info & UPD_NODE_NO_SIZE_CHANGE) { |
2838 | err = btr_cur_update_in_place( |
2839 | flags | BTR_NO_LOCKING_FLAG, btr_cur, |
2840 | offsets, node->update, |
2841 | node->cmpl_info, thr, thr_get_trx(thr)->id, mtr); |
2842 | } else { |
2843 | err = btr_cur_optimistic_update( |
2844 | flags | BTR_NO_LOCKING_FLAG, btr_cur, |
2845 | &offsets, offsets_heap, node->update, |
2846 | node->cmpl_info, thr, thr_get_trx(thr)->id, mtr); |
2847 | } |
2848 | |
2849 | if (err == DB_SUCCESS) { |
2850 | goto success; |
2851 | } |
2852 | |
2853 | mtr_commit(mtr); |
2854 | |
2855 | if (buf_LRU_buf_pool_running_out()) { |
2856 | |
2857 | err = DB_LOCK_TABLE_FULL; |
2858 | goto func_exit; |
2859 | } |
2860 | /* We may have to modify the tree structure: do a pessimistic descent |
2861 | down the index tree */ |
2862 | |
2863 | mtr->start(); |
2864 | |
2865 | if (index->table->is_temporary()) { |
2866 | /* Disable locking, because temporary tables are never |
2867 | shared between transactions or connections. */ |
2868 | flags |= BTR_NO_LOCKING_FLAG; |
2869 | mtr->set_log_mode(MTR_LOG_NO_REDO); |
2870 | } else { |
2871 | index->set_modified(*mtr); |
2872 | } |
2873 | |
2874 | /* NOTE: this transaction has an s-lock or x-lock on the record and |
2875 | therefore other transactions cannot modify the record when we have no |
2876 | latch on the page. In addition, we assume that other query threads of |
2877 | the same transaction do not modify the record in the meantime. |
2878 | Therefore we can assert that the restoration of the cursor succeeds. */ |
2879 | |
2880 | ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr)); |
2881 | |
2882 | ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur), |
2883 | dict_table_is_comp(index->table))); |
2884 | |
2885 | if (!heap) { |
2886 | heap = mem_heap_create(1024); |
2887 | } |
2888 | |
2889 | err = btr_cur_pessimistic_update( |
2890 | flags | BTR_NO_LOCKING_FLAG | BTR_KEEP_POS_FLAG, btr_cur, |
2891 | &offsets, offsets_heap, heap, &big_rec, |
2892 | node->update, node->cmpl_info, |
2893 | thr, thr_get_trx(thr)->id, mtr); |
2894 | if (big_rec) { |
2895 | ut_a(err == DB_SUCCESS); |
2896 | |
2897 | DEBUG_SYNC_C("before_row_upd_extern" ); |
2898 | err = btr_store_big_rec_extern_fields( |
2899 | pcur, offsets, big_rec, mtr, BTR_STORE_UPDATE); |
2900 | DEBUG_SYNC_C("after_row_upd_extern" ); |
2901 | } |
2902 | |
2903 | if (err == DB_SUCCESS) { |
2904 | success: |
2905 | if (dict_index_is_online_ddl(index)) { |
2906 | row_log_table_update( |
2907 | btr_cur_get_rec(btr_cur), |
2908 | index, offsets, rebuilt_old_pk); |
2909 | } |
2910 | } |
2911 | |
2912 | mtr_commit(mtr); |
2913 | func_exit: |
2914 | if (heap) { |
2915 | mem_heap_free(heap); |
2916 | } |
2917 | |
2918 | if (big_rec) { |
2919 | dtuple_big_rec_free(big_rec); |
2920 | } |
2921 | |
2922 | return(err); |
2923 | } |
2924 | |
2925 | /***********************************************************//** |
2926 | Delete marks a clustered index record. |
2927 | @return DB_SUCCESS if operation successfully completed, else error code */ |
2928 | static MY_ATTRIBUTE((nonnull, warn_unused_result)) |
2929 | dberr_t |
2930 | row_upd_del_mark_clust_rec( |
2931 | /*=======================*/ |
2932 | upd_node_t* node, /*!< in: row update node */ |
2933 | dict_index_t* index, /*!< in: clustered index */ |
2934 | ulint* offsets,/*!< in/out: rec_get_offsets() for the |
2935 | record under the cursor */ |
2936 | que_thr_t* thr, /*!< in: query thread */ |
2937 | ibool referenced, |
2938 | /*!< in: TRUE if index may be referenced in |
2939 | a foreign key constraint */ |
2940 | #ifdef WITH_WSREP |
2941 | bool foreign,/*!< in: whether this is a foreign key */ |
2942 | #endif |
2943 | mtr_t* mtr) /*!< in: mtr; gets committed here */ |
2944 | { |
2945 | btr_pcur_t* pcur; |
2946 | btr_cur_t* btr_cur; |
2947 | dberr_t err; |
2948 | rec_t* rec; |
2949 | trx_t* trx = thr_get_trx(thr); |
2950 | |
2951 | ut_ad(node); |
2952 | ut_ad(dict_index_is_clust(index)); |
2953 | ut_ad(node->is_delete == PLAIN_DELETE); |
2954 | |
2955 | pcur = node->pcur; |
2956 | btr_cur = btr_pcur_get_btr_cur(pcur); |
2957 | |
2958 | /* Store row because we have to build also the secondary index |
2959 | entries */ |
2960 | |
2961 | row_upd_store_row(node, trx->mysql_thd, |
2962 | thr->prebuilt && thr->prebuilt->table == node->table |
2963 | ? thr->prebuilt->m_mysql_table : NULL); |
2964 | |
2965 | /* Mark the clustered index record deleted; we do not have to check |
2966 | locks, because we assume that we have an x-lock on the record */ |
2967 | |
2968 | rec = btr_cur_get_rec(btr_cur); |
2969 | |
2970 | err = btr_cur_del_mark_set_clust_rec( |
2971 | btr_cur_get_block(btr_cur), rec, |
2972 | index, offsets, thr, node->row, mtr); |
2973 | |
2974 | if (err != DB_SUCCESS) { |
2975 | } else if (referenced) { |
2976 | /* NOTE that the following call loses the position of pcur ! */ |
2977 | |
2978 | err = row_upd_check_references_constraints( |
2979 | node, pcur, index->table, index, offsets, thr, mtr); |
2980 | #ifdef WITH_WSREP |
2981 | } else if (foreign && wsrep_must_process_fk(node, trx)) { |
2982 | err = wsrep_row_upd_check_foreign_constraints( |
2983 | node, pcur, index->table, index, offsets, thr, mtr); |
2984 | |
2985 | switch (err) { |
2986 | case DB_SUCCESS: |
2987 | case DB_NO_REFERENCED_ROW: |
2988 | err = DB_SUCCESS; |
2989 | break; |
2990 | case DB_DEADLOCK: |
2991 | if (wsrep_debug) { |
2992 | ib::warn() << "WSREP: sec index FK check fail for deadlock" |
2993 | << " index " << index->name |
2994 | << " table " << index->table->name; |
2995 | } |
2996 | break; |
2997 | default: |
2998 | ib::error() << "WSREP: referenced FK check fail: " << ut_strerr(err) |
2999 | << " index " << index->name |
3000 | << " table " << index->table->name; |
3001 | |
3002 | break; |
3003 | } |
3004 | #endif /* WITH_WSREP */ |
3005 | } |
3006 | |
3007 | mtr_commit(mtr); |
3008 | |
3009 | return(err); |
3010 | } |
3011 | |
3012 | /***********************************************************//** |
3013 | Updates the clustered index record. |
3014 | @return DB_SUCCESS if operation successfully completed, DB_LOCK_WAIT |
3015 | in case of a lock wait, else error code */ |
3016 | static MY_ATTRIBUTE((nonnull, warn_unused_result)) |
3017 | dberr_t |
3018 | row_upd_clust_step( |
3019 | /*===============*/ |
3020 | upd_node_t* node, /*!< in: row update node */ |
3021 | que_thr_t* thr) /*!< in: query thread */ |
3022 | { |
3023 | dict_index_t* index; |
3024 | btr_pcur_t* pcur; |
3025 | ibool success; |
3026 | dberr_t err; |
3027 | mtr_t mtr; |
3028 | rec_t* rec; |
3029 | mem_heap_t* heap = NULL; |
3030 | ulint offsets_[REC_OFFS_NORMAL_SIZE]; |
3031 | ulint* offsets; |
3032 | ibool referenced; |
3033 | ulint flags; |
3034 | trx_t* trx = thr_get_trx(thr); |
3035 | |
3036 | rec_offs_init(offsets_); |
3037 | |
3038 | index = dict_table_get_first_index(node->table); |
3039 | |
3040 | referenced = row_upd_index_is_referenced(index, trx); |
3041 | |
3042 | #ifdef WITH_WSREP |
3043 | const bool foreign = wsrep_row_upd_index_is_foreign(index, trx); |
3044 | #endif |
3045 | |
3046 | pcur = node->pcur; |
3047 | |
3048 | /* We have to restore the cursor to its position */ |
3049 | |
3050 | mtr.start(); |
3051 | |
3052 | if (node->table->is_temporary()) { |
3053 | /* Disable locking, because temporary tables are |
3054 | private to the connection (no concurrent access). */ |
3055 | flags = node->table->no_rollback() |
3056 | ? BTR_NO_ROLLBACK |
3057 | : BTR_NO_LOCKING_FLAG; |
3058 | /* Redo logging only matters for persistent tables. */ |
3059 | mtr.set_log_mode(MTR_LOG_NO_REDO); |
3060 | } else { |
3061 | flags = node->table->no_rollback() ? BTR_NO_ROLLBACK : 0; |
3062 | index->set_modified(mtr); |
3063 | } |
3064 | |
3065 | /* If the restoration does not succeed, then the same |
3066 | transaction has deleted the record on which the cursor was, |
3067 | and that is an SQL error. If the restoration succeeds, it may |
3068 | still be that the same transaction has successively deleted |
3069 | and inserted a record with the same ordering fields, but in |
3070 | that case we know that the transaction has at least an |
3071 | implicit x-lock on the record. */ |
3072 | |
3073 | ut_a(pcur->rel_pos == BTR_PCUR_ON); |
3074 | |
3075 | ulint mode; |
3076 | |
3077 | DEBUG_SYNC_C_IF_THD( |
3078 | thr_get_trx(thr)->mysql_thd, |
3079 | "innodb_row_upd_clust_step_enter" ); |
3080 | |
3081 | if (dict_index_is_online_ddl(index)) { |
3082 | ut_ad(node->table->id != DICT_INDEXES_ID); |
3083 | mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED; |
3084 | mtr_s_lock(dict_index_get_lock(index), &mtr); |
3085 | } else { |
3086 | mode = BTR_MODIFY_LEAF; |
3087 | } |
3088 | |
3089 | success = btr_pcur_restore_position(mode, pcur, &mtr); |
3090 | |
3091 | if (!success) { |
3092 | err = DB_RECORD_NOT_FOUND; |
3093 | |
3094 | mtr_commit(&mtr); |
3095 | |
3096 | return(err); |
3097 | } |
3098 | |
3099 | /* If this is a row in SYS_INDEXES table of the data dictionary, |
3100 | then we have to free the file segments of the index tree associated |
3101 | with the index */ |
3102 | |
3103 | if (node->is_delete == PLAIN_DELETE |
3104 | && node->table->id == DICT_INDEXES_ID) { |
3105 | |
3106 | ut_ad(!dict_index_is_online_ddl(index)); |
3107 | |
3108 | dict_drop_index_tree( |
3109 | btr_pcur_get_rec(pcur), pcur, &mtr); |
3110 | |
3111 | mtr.commit(); |
3112 | |
3113 | mtr.start(); |
3114 | index->set_modified(mtr); |
3115 | |
3116 | success = btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, |
3117 | &mtr); |
3118 | if (!success) { |
3119 | err = DB_ERROR; |
3120 | |
3121 | mtr.commit(); |
3122 | |
3123 | return(err); |
3124 | } |
3125 | } |
3126 | |
3127 | rec = btr_pcur_get_rec(pcur); |
3128 | offsets = rec_get_offsets(rec, index, offsets_, true, |
3129 | ULINT_UNDEFINED, &heap); |
3130 | |
3131 | if (!flags && !node->has_clust_rec_x_lock) { |
3132 | err = lock_clust_rec_modify_check_and_lock( |
3133 | 0, btr_pcur_get_block(pcur), |
3134 | rec, index, offsets, thr); |
3135 | if (err != DB_SUCCESS) { |
3136 | mtr.commit(); |
3137 | goto exit_func; |
3138 | } |
3139 | } |
3140 | |
3141 | ut_ad(index->table->no_rollback() |
3142 | || lock_trx_has_rec_x_lock(thr_get_trx(thr), index->table, |
3143 | btr_pcur_get_block(pcur), |
3144 | page_rec_get_heap_no(rec))); |
3145 | |
3146 | /* NOTE: the following function calls will also commit mtr */ |
3147 | |
3148 | if (node->is_delete == PLAIN_DELETE) { |
3149 | err = row_upd_del_mark_clust_rec( |
3150 | node, index, offsets, thr, referenced, |
3151 | #ifdef WITH_WSREP |
3152 | foreign, |
3153 | #endif |
3154 | &mtr); |
3155 | |
3156 | if (err == DB_SUCCESS) { |
3157 | node->state = UPD_NODE_UPDATE_ALL_SEC; |
3158 | node->index = dict_table_get_next_index(index); |
3159 | } |
3160 | |
3161 | goto exit_func; |
3162 | } |
3163 | |
3164 | /* If the update is made for MySQL, we already have the update vector |
3165 | ready, else we have to do some evaluation: */ |
3166 | |
3167 | if (UNIV_UNLIKELY(!node->in_mysql_interface)) { |
3168 | /* Copy the necessary columns from clust_rec and calculate the |
3169 | new values to set */ |
3170 | row_upd_copy_columns(rec, offsets, index, |
3171 | UT_LIST_GET_FIRST(node->columns)); |
3172 | row_upd_eval_new_vals(node->update); |
3173 | } |
3174 | |
3175 | if (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) { |
3176 | |
3177 | err = row_upd_clust_rec( |
3178 | flags, node, index, offsets, &heap, thr, &mtr); |
3179 | goto exit_func; |
3180 | } |
3181 | |
3182 | row_upd_store_row(node, trx->mysql_thd, |
3183 | thr->prebuilt ? thr->prebuilt->m_mysql_table : NULL); |
3184 | |
3185 | if (row_upd_changes_ord_field_binary(index, node->update, thr, |
3186 | node->row, node->ext)) { |
3187 | |
3188 | /* Update causes an ordering field (ordering fields within |
3189 | the B-tree) of the clustered index record to change: perform |
3190 | the update by delete marking and inserting. |
3191 | |
3192 | TODO! What to do to the 'Halloween problem', where an update |
3193 | moves the record forward in index so that it is again |
3194 | updated when the cursor arrives there? Solution: the |
3195 | read operation must check the undo record undo number when |
3196 | choosing records to update. MySQL solves now the problem |
3197 | externally! */ |
3198 | |
3199 | err = row_upd_clust_rec_by_insert( |
3200 | node, index, thr, referenced, |
3201 | #ifdef WITH_WSREP |
3202 | foreign, |
3203 | #endif |
3204 | &mtr); |
3205 | if (err != DB_SUCCESS) { |
3206 | |
3207 | goto exit_func; |
3208 | } |
3209 | |
3210 | node->state = UPD_NODE_UPDATE_ALL_SEC; |
3211 | } else { |
3212 | err = row_upd_clust_rec( |
3213 | flags, node, index, offsets, &heap, thr, &mtr); |
3214 | |
3215 | if (err != DB_SUCCESS) { |
3216 | |
3217 | goto exit_func; |
3218 | } |
3219 | |
3220 | node->state = UPD_NODE_UPDATE_SOME_SEC; |
3221 | } |
3222 | |
3223 | node->index = dict_table_get_next_index(index); |
3224 | |
3225 | exit_func: |
3226 | if (heap) { |
3227 | mem_heap_free(heap); |
3228 | } |
3229 | return(err); |
3230 | } |
3231 | |
3232 | /***********************************************************//** |
3233 | Updates the affected index records of a row. When the control is transferred |
3234 | to this node, we assume that we have a persistent cursor which was on a |
3235 | record, and the position of the cursor is stored in the cursor. |
3236 | @return DB_SUCCESS if operation successfully completed, else error |
3237 | code or DB_LOCK_WAIT */ |
3238 | static |
3239 | dberr_t |
3240 | row_upd( |
3241 | /*====*/ |
3242 | upd_node_t* node, /*!< in: row update node */ |
3243 | que_thr_t* thr) /*!< in: query thread */ |
3244 | { |
3245 | dberr_t err = DB_SUCCESS; |
3246 | DBUG_ENTER("row_upd" ); |
3247 | |
3248 | ut_ad(!thr_get_trx(thr)->in_rollback); |
3249 | |
3250 | DBUG_PRINT("row_upd" , ("table: %s" , node->table->name.m_name)); |
3251 | DBUG_PRINT("row_upd" , ("info bits in update vector: 0x" ULINTPFx, |
3252 | node->update ? node->update->info_bits: 0)); |
3253 | DBUG_PRINT("row_upd" , ("foreign_id: %s" , |
3254 | node->foreign ? node->foreign->id: "NULL" )); |
3255 | |
3256 | if (UNIV_LIKELY(node->in_mysql_interface)) { |
3257 | |
3258 | /* We do not get the cmpl_info value from the MySQL |
3259 | interpreter: we must calculate it on the fly: */ |
3260 | |
3261 | if (node->is_delete == PLAIN_DELETE |
3262 | || row_upd_changes_some_index_ord_field_binary( |
3263 | node->table, node->update)) { |
3264 | node->cmpl_info = 0; |
3265 | } else { |
3266 | node->cmpl_info = UPD_NODE_NO_ORD_CHANGE; |
3267 | } |
3268 | } |
3269 | |
3270 | switch (node->state) { |
3271 | case UPD_NODE_UPDATE_CLUSTERED: |
3272 | case UPD_NODE_INSERT_CLUSTERED: |
3273 | log_free_check(); |
3274 | |
3275 | err = row_upd_clust_step(node, thr); |
3276 | |
3277 | if (err != DB_SUCCESS) { |
3278 | |
3279 | DBUG_RETURN(err); |
3280 | } |
3281 | } |
3282 | |
3283 | DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd, |
3284 | "after_row_upd_clust" ); |
3285 | |
3286 | if (node->index == NULL |
3287 | || (!node->is_delete |
3288 | && (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE))) { |
3289 | |
3290 | DBUG_RETURN(DB_SUCCESS); |
3291 | } |
3292 | |
3293 | DBUG_EXECUTE_IF("row_upd_skip_sec" , node->index = NULL;); |
3294 | |
3295 | do { |
3296 | /* Skip corrupted index */ |
3297 | dict_table_skip_corrupt_index(node->index); |
3298 | |
3299 | if (!node->index) { |
3300 | break; |
3301 | } |
3302 | |
3303 | if (node->index->type != DICT_FTS) { |
3304 | err = row_upd_sec_step(node, thr); |
3305 | |
3306 | if (err != DB_SUCCESS) { |
3307 | |
3308 | DBUG_RETURN(err); |
3309 | } |
3310 | } |
3311 | |
3312 | node->index = dict_table_get_next_index(node->index); |
3313 | } while (node->index != NULL); |
3314 | |
3315 | ut_ad(err == DB_SUCCESS); |
3316 | |
3317 | /* Do some cleanup */ |
3318 | |
3319 | if (node->row != NULL) { |
3320 | node->row = NULL; |
3321 | node->ext = NULL; |
3322 | node->upd_row = NULL; |
3323 | node->upd_ext = NULL; |
3324 | mem_heap_empty(node->heap); |
3325 | } |
3326 | |
3327 | node->state = UPD_NODE_UPDATE_CLUSTERED; |
3328 | |
3329 | DBUG_RETURN(err); |
3330 | } |
3331 | |
3332 | /***********************************************************//** |
3333 | Updates a row in a table. This is a high-level function used in SQL execution |
3334 | graphs. |
3335 | @return query thread to run next or NULL */ |
3336 | que_thr_t* |
3337 | row_upd_step( |
3338 | /*=========*/ |
3339 | que_thr_t* thr) /*!< in: query thread */ |
3340 | { |
3341 | upd_node_t* node; |
3342 | sel_node_t* sel_node; |
3343 | que_node_t* parent; |
3344 | dberr_t err = DB_SUCCESS; |
3345 | trx_t* trx; |
3346 | DBUG_ENTER("row_upd_step" ); |
3347 | |
3348 | ut_ad(thr); |
3349 | |
3350 | trx = thr_get_trx(thr); |
3351 | |
3352 | node = static_cast<upd_node_t*>(thr->run_node); |
3353 | |
3354 | sel_node = node->select; |
3355 | |
3356 | parent = que_node_get_parent(node); |
3357 | |
3358 | ut_ad(que_node_get_type(node) == QUE_NODE_UPDATE); |
3359 | |
3360 | if (thr->prev_node == parent) { |
3361 | node->state = UPD_NODE_SET_IX_LOCK; |
3362 | } |
3363 | |
3364 | if (node->state == UPD_NODE_SET_IX_LOCK) { |
3365 | |
3366 | if (!node->has_clust_rec_x_lock) { |
3367 | /* It may be that the current session has not yet |
3368 | started its transaction, or it has been committed: */ |
3369 | |
3370 | err = lock_table(0, node->table, LOCK_IX, thr); |
3371 | |
3372 | if (err != DB_SUCCESS) { |
3373 | |
3374 | goto error_handling; |
3375 | } |
3376 | } |
3377 | |
3378 | node->state = UPD_NODE_UPDATE_CLUSTERED; |
3379 | |
3380 | if (node->searched_update) { |
3381 | /* Reset the cursor */ |
3382 | sel_node->state = SEL_NODE_OPEN; |
3383 | |
3384 | /* Fetch a row to update */ |
3385 | |
3386 | thr->run_node = sel_node; |
3387 | |
3388 | DBUG_RETURN(thr); |
3389 | } |
3390 | } |
3391 | |
3392 | /* sel_node is NULL if we are in the MySQL interface */ |
3393 | |
3394 | if (sel_node && (sel_node->state != SEL_NODE_FETCH)) { |
3395 | |
3396 | if (!node->searched_update) { |
3397 | /* An explicit cursor should be positioned on a row |
3398 | to update */ |
3399 | |
3400 | ut_error; |
3401 | |
3402 | err = DB_ERROR; |
3403 | |
3404 | goto error_handling; |
3405 | } |
3406 | |
3407 | ut_ad(sel_node->state == SEL_NODE_NO_MORE_ROWS); |
3408 | |
3409 | /* No more rows to update, or the select node performed the |
3410 | updates directly in-place */ |
3411 | |
3412 | thr->run_node = parent; |
3413 | |
3414 | DBUG_RETURN(thr); |
3415 | } |
3416 | |
3417 | /* DO THE CHECKS OF THE CONSISTENCY CONSTRAINTS HERE */ |
3418 | |
3419 | err = row_upd(node, thr); |
3420 | |
3421 | error_handling: |
3422 | trx->error_state = err; |
3423 | |
3424 | if (err != DB_SUCCESS) { |
3425 | DBUG_RETURN(NULL); |
3426 | } |
3427 | |
3428 | /* DO THE TRIGGER ACTIONS HERE */ |
3429 | |
3430 | if (node->searched_update) { |
3431 | /* Fetch next row to update */ |
3432 | |
3433 | thr->run_node = sel_node; |
3434 | } else { |
3435 | /* It was an explicit cursor update */ |
3436 | |
3437 | thr->run_node = parent; |
3438 | } |
3439 | |
3440 | node->state = UPD_NODE_UPDATE_CLUSTERED; |
3441 | |
3442 | DBUG_RETURN(thr); |
3443 | } |
3444 | |
3445 | /** Write query start time as SQL field data to a buffer. Needed by InnoDB. |
3446 | @param thd Thread object |
3447 | @param buf Buffer to hold start time data */ |
3448 | void thd_get_query_start_data(THD *thd, char *buf); |
3449 | |
3450 | /** Appends row_start or row_end field to update vector and sets a |
3451 | CURRENT_TIMESTAMP/trx->id value to it. |
3452 | Supposed to be called only by make_versioned_update() and |
3453 | make_versioned_delete(). |
3454 | @param[in] trx transaction |
3455 | @param[in] vers_sys_idx table->row_start or table->row_end */ |
3456 | void upd_node_t::make_versioned_helper(const trx_t* trx, ulint idx) |
3457 | { |
3458 | ut_ad(in_mysql_interface); // otherwise needs to recalculate |
3459 | // node->cmpl_info |
3460 | ut_ad(idx == table->vers_start || idx == table->vers_end); |
3461 | |
3462 | dict_index_t* clust_index = dict_table_get_first_index(table); |
3463 | |
3464 | update->n_fields++; |
3465 | upd_field_t* ufield = |
3466 | upd_get_nth_field(update, upd_get_n_fields(update) - 1); |
3467 | const dict_col_t* col = dict_table_get_nth_col(table, idx); |
3468 | |
3469 | upd_field_set_field_no(ufield, dict_col_get_clust_pos(col, clust_index), |
3470 | clust_index); |
3471 | |
3472 | char* where = reinterpret_cast<char*>(update->vers_sys_value); |
3473 | if (col->vers_native()) { |
3474 | mach_write_to_8(where, trx->id); |
3475 | } else { |
3476 | thd_get_query_start_data(trx->mysql_thd, where); |
3477 | } |
3478 | |
3479 | dfield_set_data(&ufield->new_val, update->vers_sys_value, col->len); |
3480 | } |
3481 | |
3482 | /** Also set row_start = CURRENT_TIMESTAMP/trx->id |
3483 | @param[in] trx transaction */ |
3484 | void upd_node_t::make_versioned_update(const trx_t* trx) |
3485 | { |
3486 | make_versioned_helper(trx, table->vers_start); |
3487 | } |
3488 | |
3489 | /** Only set row_end = CURRENT_TIMESTAMP/trx->id. |
3490 | Do not touch other fields at all. |
3491 | @param[in] trx transaction */ |
3492 | void upd_node_t::make_versioned_delete(const trx_t* trx) |
3493 | { |
3494 | update->n_fields = 0; |
3495 | is_delete = VERSIONED_DELETE; |
3496 | make_versioned_helper(trx, table->vers_end); |
3497 | } |
3498 | |