1/*****************************************************************************
2
3Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2017, MariaDB Corporation.
5
6This program is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free Software
8Foundation; version 2 of the License.
9
10This program is distributed in the hope that it will be useful, but WITHOUT
11ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13
14You should have received a copy of the GNU General Public License along with
15this program; if not, write to the Free Software Foundation, Inc.,
1651 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
17
18*****************************************************************************/
19
20/**************************************************//**
21@file trx/trx0i_s.cc
22INFORMATION SCHEMA innodb_trx, innodb_locks and
23innodb_lock_waits tables fetch code.
24
25The code below fetches information needed to fill those
263 dynamic tables and uploads it into a "transactions
27table cache" for later retrieval.
28
29Created July 17, 2007 Vasil Dimov
30*******************************************************/
31
32/* Found during the build of 5.5.3 on Linux 2.4 and early 2.6 kernels:
33 The includes "univ.i" -> "my_global.h" cause a different path
34 to be taken further down with pthread functions and types,
35 so they must come first.
36 From the symptoms, this is related to bug#46587 in the MySQL bug DB.
37*/
38
39#include "ha_prototypes.h"
40#include <sql_class.h>
41
42#include "buf0buf.h"
43#include "dict0dict.h"
44#include "ha0storage.h"
45#include "hash0hash.h"
46#include "lock0iter.h"
47#include "lock0lock.h"
48#include "mem0mem.h"
49#include "page0page.h"
50#include "rem0rec.h"
51#include "row0row.h"
52#include "srv0srv.h"
53#include "sync0rw.h"
54#include "sync0sync.h"
55#include "trx0i_s.h"
56#include "trx0sys.h"
57#include "trx0trx.h"
58#include "ut0mem.h"
59#include "que0que.h"
60#include "trx0purge.h"
61
62/** Initial number of rows in the table cache */
63#define TABLE_CACHE_INITIAL_ROWSNUM 1024
64
65/** @brief The maximum number of chunks to allocate for a table cache.
66
67The rows of a table cache are stored in a set of chunks. When a new
68row is added a new chunk is allocated if necessary. Assuming that the
69first one is 1024 rows (TABLE_CACHE_INITIAL_ROWSNUM) and each
70subsequent is N/2 where N is the number of rows we have allocated till
71now, then 39th chunk would accommodate 1677416425 rows and all chunks
72would accommodate 3354832851 rows. */
73#define MEM_CHUNKS_IN_TABLE_CACHE 39
74
75/** The following are some testing auxiliary macros. Do not enable them
76in a production environment. */
77/* @{ */
78
79#if 0
80/** If this is enabled then lock folds will always be different
81resulting in equal rows being put in a different cells of the hash
82table. Checking for duplicates will be flawed because different
83fold will be calculated when a row is searched in the hash table. */
84#define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
85#endif
86
87#if 0
88/** This effectively kills the search-for-duplicate-before-adding-a-row
89function, but searching in the hash is still performed. It will always
90be assumed that lock is not present and insertion will be performed in
91the hash table. */
92#define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
93#endif
94
95#if 0
96/** This aggressively repeats adding each row many times. Depending on
97the above settings this may be noop or may result in lots of rows being
98added. */
99#define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
100#endif
101
102#if 0
103/** Very similar to TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T but hash
104table search is not performed at all. */
105#define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
106#endif
107
108#if 0
109/** Do not insert each row into the hash table, duplicates may appear
110if this is enabled, also if this is enabled searching into the hash is
111noop because it will be empty. */
112#define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
113#endif
114/* @} */
115
116/** Memory limit passed to ha_storage_put_memlim().
117@param cache hash storage
118@return maximum allowed allocation size */
119#define MAX_ALLOWED_FOR_STORAGE(cache) \
120 (TRX_I_S_MEM_LIMIT \
121 - (cache)->mem_allocd)
122
123/** Memory limit in table_cache_create_empty_row().
124@param cache hash storage
125@return maximum allowed allocation size */
126#define MAX_ALLOWED_FOR_ALLOC(cache) \
127 (TRX_I_S_MEM_LIMIT \
128 - (cache)->mem_allocd \
129 - ha_storage_get_size((cache)->storage))
130
131/** Memory for each table in the intermediate buffer is allocated in
132separate chunks. These chunks are considered to be concatenated to
133represent one flat array of rows. */
134struct i_s_mem_chunk_t {
135 ulint offset; /*!< offset, in number of rows */
136 ulint rows_allocd; /*!< the size of this chunk, in number
137 of rows */
138 void* base; /*!< start of the chunk */
139};
140
141/** This represents one table's cache. */
142struct i_s_table_cache_t {
143 ulint rows_used; /*!< number of used rows */
144 ulint rows_allocd; /*!< number of allocated rows */
145 ulint row_size; /*!< size of a single row */
146 i_s_mem_chunk_t chunks[MEM_CHUNKS_IN_TABLE_CACHE]; /*!< array of
147 memory chunks that stores the
148 rows */
149};
150
151/** This structure describes the intermediate buffer */
152struct trx_i_s_cache_t {
153 rw_lock_t* rw_lock; /*!< read-write lock protecting
154 the rest of this structure */
155 uintmax_t last_read; /*!< last time the cache was read;
156 measured in microseconds since
157 epoch */
158 ib_mutex_t last_read_mutex;/*!< mutex protecting the
159 last_read member - it is updated
160 inside a shared lock of the
161 rw_lock member */
162 i_s_table_cache_t innodb_trx; /*!< innodb_trx table */
163 i_s_table_cache_t innodb_locks; /*!< innodb_locks table */
164 i_s_table_cache_t innodb_lock_waits;/*!< innodb_lock_waits table */
165/** the hash table size is LOCKS_HASH_CELLS_NUM * sizeof(void*) bytes */
166#define LOCKS_HASH_CELLS_NUM 10000
167 hash_table_t* locks_hash; /*!< hash table used to eliminate
168 duplicate entries in the
169 innodb_locks table */
170/** Initial size of the cache storage */
171#define CACHE_STORAGE_INITIAL_SIZE 1024
172/** Number of hash cells in the cache storage */
173#define CACHE_STORAGE_HASH_CELLS 2048
174 ha_storage_t* storage; /*!< storage for external volatile
175 data that may become unavailable
176 when we release
177 lock_sys.mutex or trx_sys.mutex */
178 ulint mem_allocd; /*!< the amount of memory
179 allocated with mem_alloc*() */
180 bool is_truncated; /*!< this is true if the memory
181 limit was hit and thus the data
182 in the cache is truncated */
183};
184
185/** This is the intermediate buffer where data needed to fill the
186INFORMATION SCHEMA tables is fetched and later retrieved by the C++
187code in handler/i_s.cc. */
188static trx_i_s_cache_t trx_i_s_cache_static;
189/** This is the intermediate buffer where data needed to fill the
190INFORMATION SCHEMA tables is fetched and later retrieved by the C++
191code in handler/i_s.cc. */
192trx_i_s_cache_t* trx_i_s_cache = &trx_i_s_cache_static;
193
194/*******************************************************************//**
195For a record lock that is in waiting state retrieves the only bit that
196is set, for a table lock returns ULINT_UNDEFINED.
197@return record number within the heap */
198static
199ulint
200wait_lock_get_heap_no(
201/*==================*/
202 const lock_t* lock) /*!< in: lock */
203{
204 ulint ret;
205
206 switch (lock_get_type(lock)) {
207 case LOCK_REC:
208 ret = lock_rec_find_set_bit(lock);
209 ut_a(ret != ULINT_UNDEFINED);
210 break;
211 case LOCK_TABLE:
212 ret = ULINT_UNDEFINED;
213 break;
214 default:
215 ut_error;
216 }
217
218 return(ret);
219}
220
221/*******************************************************************//**
222Initializes the members of a table cache. */
223static
224void
225table_cache_init(
226/*=============*/
227 i_s_table_cache_t* table_cache, /*!< out: table cache */
228 size_t row_size) /*!< in: the size of a
229 row */
230{
231 ulint i;
232
233 table_cache->rows_used = 0;
234 table_cache->rows_allocd = 0;
235 table_cache->row_size = row_size;
236
237 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
238
239 /* the memory is actually allocated in
240 table_cache_create_empty_row() */
241 table_cache->chunks[i].base = NULL;
242 }
243}
244
245/*******************************************************************//**
246Frees a table cache. */
247static
248void
249table_cache_free(
250/*=============*/
251 i_s_table_cache_t* table_cache) /*!< in/out: table cache */
252{
253 ulint i;
254
255 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
256
257 /* the memory is actually allocated in
258 table_cache_create_empty_row() */
259 if (table_cache->chunks[i].base) {
260 ut_free(table_cache->chunks[i].base);
261 table_cache->chunks[i].base = NULL;
262 }
263 }
264}
265
266/*******************************************************************//**
267Returns an empty row from a table cache. The row is allocated if no more
268empty rows are available. The number of used rows is incremented.
269If the memory limit is hit then NULL is returned and nothing is
270allocated.
271@return empty row, or NULL if out of memory */
272static
273void*
274table_cache_create_empty_row(
275/*=========================*/
276 i_s_table_cache_t* table_cache, /*!< in/out: table cache */
277 trx_i_s_cache_t* cache) /*!< in/out: cache to record
278 how many bytes are
279 allocated */
280{
281 ulint i;
282 void* row;
283
284 ut_a(table_cache->rows_used <= table_cache->rows_allocd);
285
286 if (table_cache->rows_used == table_cache->rows_allocd) {
287
288 /* rows_used == rows_allocd means that new chunk needs
289 to be allocated: either no more empty rows in the
290 last allocated chunk or nothing has been allocated yet
291 (rows_num == rows_allocd == 0); */
292
293 i_s_mem_chunk_t* chunk;
294 ulint req_bytes;
295 ulint got_bytes;
296 ulint req_rows;
297 ulint got_rows;
298
299 /* find the first not allocated chunk */
300 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
301
302 if (table_cache->chunks[i].base == NULL) {
303
304 break;
305 }
306 }
307
308 /* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
309 have been allocated :-X */
310 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
311
312 /* allocate the chunk we just found */
313
314 if (i == 0) {
315
316 /* first chunk, nothing is allocated yet */
317 req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
318 } else {
319
320 /* Memory is increased by the formula
321 new = old + old / 2; We are trying not to be
322 aggressive here (= using the common new = old * 2)
323 because the allocated memory will not be freed
324 until InnoDB exit (it is reused). So it is better
325 to once allocate the memory in more steps, but
326 have less unused/wasted memory than to use less
327 steps in allocation (which is done once in a
328 lifetime) but end up with lots of unused/wasted
329 memory. */
330 req_rows = table_cache->rows_allocd / 2;
331 }
332 req_bytes = req_rows * table_cache->row_size;
333
334 if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
335
336 return(NULL);
337 }
338
339 chunk = &table_cache->chunks[i];
340
341 got_bytes = req_bytes;
342 chunk->base = ut_malloc_nokey(req_bytes);
343
344 got_rows = got_bytes / table_cache->row_size;
345
346 cache->mem_allocd += got_bytes;
347
348#if 0
349 printf("allocating chunk %d req bytes=%lu, got bytes=%lu,"
350 " row size=%lu,"
351 " req rows=%lu, got rows=%lu\n",
352 i, req_bytes, got_bytes,
353 table_cache->row_size,
354 req_rows, got_rows);
355#endif
356
357 chunk->rows_allocd = got_rows;
358
359 table_cache->rows_allocd += got_rows;
360
361 /* adjust the offset of the next chunk */
362 if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
363
364 table_cache->chunks[i + 1].offset
365 = chunk->offset + chunk->rows_allocd;
366 }
367
368 /* return the first empty row in the newly allocated
369 chunk */
370 row = chunk->base;
371 } else {
372
373 char* chunk_start;
374 ulint offset;
375
376 /* there is an empty row, no need to allocate new
377 chunks */
378
379 /* find the first chunk that contains allocated but
380 empty/unused rows */
381 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
382
383 if (table_cache->chunks[i].offset
384 + table_cache->chunks[i].rows_allocd
385 > table_cache->rows_used) {
386
387 break;
388 }
389 }
390
391 /* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
392 are full, but
393 table_cache->rows_used != table_cache->rows_allocd means
394 exactly the opposite - there are allocated but
395 empty/unused rows :-X */
396 ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
397
398 chunk_start = (char*) table_cache->chunks[i].base;
399 offset = table_cache->rows_used
400 - table_cache->chunks[i].offset;
401
402 row = chunk_start + offset * table_cache->row_size;
403 }
404
405 table_cache->rows_used++;
406
407 return(row);
408}
409
410#ifdef UNIV_DEBUG
411/*******************************************************************//**
412Validates a row in the locks cache.
413@return TRUE if valid */
414static
415ibool
416i_s_locks_row_validate(
417/*===================*/
418 const i_s_locks_row_t* row) /*!< in: row to validate */
419{
420 ut_ad(row->lock_mode != NULL);
421 ut_ad(row->lock_type != NULL);
422 ut_ad(row->lock_table != NULL);
423 ut_ad(row->lock_table_id != 0);
424
425 if (row->lock_space == ULINT_UNDEFINED) {
426 /* table lock */
427 ut_ad(!strcmp("TABLE", row->lock_type));
428 ut_ad(row->lock_index == NULL);
429 ut_ad(row->lock_data == NULL);
430 ut_ad(row->lock_page == ULINT_UNDEFINED);
431 ut_ad(row->lock_rec == ULINT_UNDEFINED);
432 } else {
433 /* record lock */
434 ut_ad(!strcmp("RECORD", row->lock_type));
435 ut_ad(row->lock_index != NULL);
436 /* row->lock_data == NULL if buf_page_try_get() == NULL */
437 ut_ad(row->lock_page != ULINT_UNDEFINED);
438 ut_ad(row->lock_rec != ULINT_UNDEFINED);
439 }
440
441 return(TRUE);
442}
443#endif /* UNIV_DEBUG */
444
445/*******************************************************************//**
446Fills i_s_trx_row_t object.
447If memory can not be allocated then FALSE is returned.
448@return FALSE if allocation fails */
449static
450ibool
451fill_trx_row(
452/*=========*/
453 i_s_trx_row_t* row, /*!< out: result object
454 that's filled */
455 const trx_t* trx, /*!< in: transaction to
456 get data from */
457 const i_s_locks_row_t* requested_lock_row,/*!< in: pointer to the
458 corresponding row in
459 innodb_locks if trx is
460 waiting or NULL if trx
461 is not waiting */
462 trx_i_s_cache_t* cache) /*!< in/out: cache into
463 which to copy volatile
464 strings */
465{
466 size_t stmt_len;
467 const char* s;
468
469 ut_ad(lock_mutex_own());
470
471 row->trx_id = trx_get_id_for_print(trx);
472 row->trx_started = (ib_time_t) trx->start_time;
473 row->trx_state = trx_get_que_state_str(trx);
474 row->requested_lock_row = requested_lock_row;
475 ut_ad(requested_lock_row == NULL
476 || i_s_locks_row_validate(requested_lock_row));
477
478 if (trx->lock.wait_lock != NULL) {
479
480 ut_a(requested_lock_row != NULL);
481 row->trx_wait_started = (ib_time_t) trx->lock.wait_started;
482 } else {
483 ut_a(requested_lock_row == NULL);
484 row->trx_wait_started = 0;
485 }
486
487 row->trx_weight = static_cast<uintmax_t>(TRX_WEIGHT(trx));
488
489 if (trx->mysql_thd == NULL) {
490 /* For internal transactions e.g., purge and transactions
491 being recovered at startup there is no associated MySQL
492 thread data structure. */
493 row->trx_mysql_thread_id = 0;
494 row->trx_query = NULL;
495 goto thd_done;
496 }
497
498 row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd);
499
500 char query[TRX_I_S_TRX_QUERY_MAX_LEN + 1];
501 stmt_len = innobase_get_stmt_safe(trx->mysql_thd, query, sizeof(query));
502
503 if (stmt_len > 0) {
504
505 row->trx_query = static_cast<const char*>(
506 ha_storage_put_memlim(
507 cache->storage, query, stmt_len + 1,
508 MAX_ALLOWED_FOR_STORAGE(cache)));
509
510 row->trx_query_cs = innobase_get_charset(trx->mysql_thd);
511
512 if (row->trx_query == NULL) {
513
514 return(FALSE);
515 }
516 } else {
517
518 row->trx_query = NULL;
519 }
520
521thd_done:
522 s = trx->op_info;
523
524 if (s != NULL && s[0] != '\0') {
525
526 TRX_I_S_STRING_COPY(s, row->trx_operation_state,
527 TRX_I_S_TRX_OP_STATE_MAX_LEN, cache);
528
529 if (row->trx_operation_state == NULL) {
530
531 return(FALSE);
532 }
533 } else {
534
535 row->trx_operation_state = NULL;
536 }
537
538 row->trx_tables_in_use = trx->n_mysql_tables_in_use;
539
540 row->trx_tables_locked = lock_number_of_tables_locked(&trx->lock);
541
542 /* These are protected by both trx->mutex or lock_sys.mutex,
543 or just lock_sys.mutex. For reading, it suffices to hold
544 lock_sys.mutex. */
545
546 row->trx_lock_structs = UT_LIST_GET_LEN(trx->lock.trx_locks);
547
548 row->trx_lock_memory_bytes = mem_heap_get_size(trx->lock.lock_heap);
549
550 row->trx_rows_locked = lock_number_of_rows_locked(&trx->lock);
551
552 row->trx_rows_modified = trx->undo_no;
553
554 row->trx_concurrency_tickets = trx->n_tickets_to_enter_innodb;
555
556 switch (trx->isolation_level) {
557 case TRX_ISO_READ_UNCOMMITTED:
558 row->trx_isolation_level = "READ UNCOMMITTED";
559 break;
560 case TRX_ISO_READ_COMMITTED:
561 row->trx_isolation_level = "READ COMMITTED";
562 break;
563 case TRX_ISO_REPEATABLE_READ:
564 row->trx_isolation_level = "REPEATABLE READ";
565 break;
566 case TRX_ISO_SERIALIZABLE:
567 row->trx_isolation_level = "SERIALIZABLE";
568 break;
569 /* Should not happen as TRX_ISO_READ_COMMITTED is default */
570 default:
571 row->trx_isolation_level = "UNKNOWN";
572 }
573
574 row->trx_unique_checks = (ibool) trx->check_unique_secondary;
575
576 row->trx_foreign_key_checks = (ibool) trx->check_foreigns;
577
578 s = trx->detailed_error;
579
580 if (s != NULL && s[0] != '\0') {
581
582 TRX_I_S_STRING_COPY(s,
583 row->trx_foreign_key_error,
584 TRX_I_S_TRX_FK_ERROR_MAX_LEN, cache);
585
586 if (row->trx_foreign_key_error == NULL) {
587
588 return(FALSE);
589 }
590 } else {
591 row->trx_foreign_key_error = NULL;
592 }
593
594 row->trx_is_read_only = trx->read_only;
595
596 row->trx_is_autocommit_non_locking = trx_is_autocommit_non_locking(trx);
597
598 return(TRUE);
599}
600
601/*******************************************************************//**
602Format the nth field of "rec" and put it in "buf". The result is always
603NUL-terminated. Returns the number of bytes that were written to "buf"
604(including the terminating NUL).
605@return end of the result */
606static
607ulint
608put_nth_field(
609/*==========*/
610 char* buf, /*!< out: buffer */
611 ulint buf_size,/*!< in: buffer size in bytes */
612 ulint n, /*!< in: number of field */
613 const dict_index_t* index, /*!< in: index */
614 const rec_t* rec, /*!< in: record */
615 const ulint* offsets)/*!< in: record offsets, returned
616 by rec_get_offsets() */
617{
618 const byte* data;
619 ulint data_len;
620 dict_field_t* dict_field;
621 ulint ret;
622
623 ut_ad(rec_offs_validate(rec, NULL, offsets));
624
625 if (buf_size == 0) {
626
627 return(0);
628 }
629
630 ret = 0;
631
632 if (n > 0) {
633 /* we must append ", " before the actual data */
634
635 if (buf_size < 3) {
636
637 buf[0] = '\0';
638 return(1);
639 }
640
641 memcpy(buf, ", ", 3);
642
643 buf += 2;
644 buf_size -= 2;
645 ret += 2;
646 }
647
648 /* now buf_size >= 1 */
649
650 data = rec_get_nth_field(rec, offsets, n, &data_len);
651
652 dict_field = dict_index_get_nth_field(index, n);
653
654 ret += row_raw_format((const char*) data, data_len,
655 dict_field, buf, buf_size);
656
657 return(ret);
658}
659
660/*******************************************************************//**
661Fills the "lock_data" member of i_s_locks_row_t object.
662If memory can not be allocated then FALSE is returned.
663@return FALSE if allocation fails */
664static
665ibool
666fill_lock_data(
667/*===========*/
668 const char** lock_data,/*!< out: "lock_data" to fill */
669 const lock_t* lock, /*!< in: lock used to find the data */
670 ulint heap_no,/*!< in: rec num used to find the data */
671 trx_i_s_cache_t* cache) /*!< in/out: cache where to store
672 volatile data */
673{
674 ut_a(lock_get_type(lock) == LOCK_REC);
675
676 switch (heap_no) {
677 case PAGE_HEAP_NO_INFIMUM:
678 case PAGE_HEAP_NO_SUPREMUM:
679 *lock_data = ha_storage_put_str_memlim(
680 cache->storage,
681 heap_no == PAGE_HEAP_NO_INFIMUM
682 ? "infimum pseudo-record"
683 : "supremum pseudo-record",
684 MAX_ALLOWED_FOR_STORAGE(cache));
685 return(*lock_data != NULL);
686 }
687
688 mtr_t mtr;
689
690 const buf_block_t* block;
691 const page_t* page;
692 const rec_t* rec;
693 const dict_index_t* index;
694 ulint n_fields;
695 mem_heap_t* heap;
696 ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
697 ulint* offsets;
698 char buf[TRX_I_S_LOCK_DATA_MAX_LEN];
699 ulint buf_used;
700 ulint i;
701
702 mtr_start(&mtr);
703
704 block = buf_page_try_get(page_id_t(lock_rec_get_space_id(lock),
705 lock_rec_get_page_no(lock)),
706 &mtr);
707
708 if (block == NULL) {
709
710 *lock_data = NULL;
711
712 mtr_commit(&mtr);
713
714 return(TRUE);
715 }
716
717 page = reinterpret_cast<const page_t*>(buf_block_get_frame(block));
718
719 rec_offs_init(offsets_onstack);
720 offsets = offsets_onstack;
721
722 rec = page_find_rec_with_heap_no(page, heap_no);
723
724 index = lock_rec_get_index(lock);
725
726 n_fields = dict_index_get_n_unique(index);
727
728 ut_a(n_fields > 0);
729
730 heap = NULL;
731 offsets = rec_get_offsets(rec, index, offsets, true, n_fields, &heap);
732
733 /* format and store the data */
734
735 buf_used = 0;
736 for (i = 0; i < n_fields; i++) {
737
738 buf_used += put_nth_field(
739 buf + buf_used, sizeof(buf) - buf_used,
740 i, index, rec, offsets) - 1;
741 }
742
743 *lock_data = (const char*) ha_storage_put_memlim(
744 cache->storage, buf, buf_used + 1,
745 MAX_ALLOWED_FOR_STORAGE(cache));
746
747 if (heap != NULL) {
748
749 /* this means that rec_get_offsets() has created a new
750 heap and has stored offsets in it; check that this is
751 really the case and free the heap */
752 ut_a(offsets != offsets_onstack);
753 mem_heap_free(heap);
754 }
755
756 mtr_commit(&mtr);
757
758 if (*lock_data == NULL) {
759
760 return(FALSE);
761 }
762
763 return(TRUE);
764}
765
766/*******************************************************************//**
767Fills i_s_locks_row_t object. Returns its first argument.
768If memory can not be allocated then FALSE is returned.
769@return FALSE if allocation fails */
770static
771ibool
772fill_locks_row(
773/*===========*/
774 i_s_locks_row_t* row, /*!< out: result object that's filled */
775 const lock_t* lock, /*!< in: lock to get data from */
776 ulint heap_no,/*!< in: lock's record number
777 or ULINT_UNDEFINED if the lock
778 is a table lock */
779 trx_i_s_cache_t* cache) /*!< in/out: cache into which to copy
780 volatile strings */
781{
782 row->lock_trx_id = lock_get_trx_id(lock);
783 row->lock_mode = lock_get_mode_str(lock);
784 row->lock_type = lock_get_type_str(lock);
785
786 row->lock_table = ha_storage_put_str_memlim(
787 cache->storage, lock_get_table_name(lock).m_name,
788 MAX_ALLOWED_FOR_STORAGE(cache));
789
790 /* memory could not be allocated */
791 if (row->lock_table == NULL) {
792
793 return(FALSE);
794 }
795
796 switch (lock_get_type(lock)) {
797 case LOCK_REC:
798 row->lock_index = ha_storage_put_str_memlim(
799 cache->storage, lock_rec_get_index_name(lock),
800 MAX_ALLOWED_FOR_STORAGE(cache));
801
802 /* memory could not be allocated */
803 if (row->lock_index == NULL) {
804
805 return(FALSE);
806 }
807
808 row->lock_space = lock_rec_get_space_id(lock);
809 row->lock_page = lock_rec_get_page_no(lock);
810 row->lock_rec = heap_no;
811
812 if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) {
813
814 /* memory could not be allocated */
815 return(FALSE);
816 }
817
818 break;
819 case LOCK_TABLE:
820 row->lock_index = NULL;
821
822 row->lock_space = ULINT_UNDEFINED;
823 row->lock_page = ULINT_UNDEFINED;
824 row->lock_rec = ULINT_UNDEFINED;
825
826 row->lock_data = NULL;
827
828 break;
829 default:
830 ut_error;
831 }
832
833 row->lock_table_id = lock_get_table_id(lock);
834
835 row->hash_chain.value = row;
836 ut_ad(i_s_locks_row_validate(row));
837
838 return(TRUE);
839}
840
841/*******************************************************************//**
842Fills i_s_lock_waits_row_t object. Returns its first argument.
843@return result object that's filled */
844static
845i_s_lock_waits_row_t*
846fill_lock_waits_row(
847/*================*/
848 i_s_lock_waits_row_t* row, /*!< out: result object
849 that's filled */
850 const i_s_locks_row_t* requested_lock_row,/*!< in: pointer to the
851 relevant requested lock
852 row in innodb_locks */
853 const i_s_locks_row_t* blocking_lock_row)/*!< in: pointer to the
854 relevant blocking lock
855 row in innodb_locks */
856{
857 ut_ad(i_s_locks_row_validate(requested_lock_row));
858 ut_ad(i_s_locks_row_validate(blocking_lock_row));
859
860 row->requested_lock_row = requested_lock_row;
861 row->blocking_lock_row = blocking_lock_row;
862
863 return(row);
864}
865
866/*******************************************************************//**
867Calculates a hash fold for a lock. For a record lock the fold is
868calculated from 4 elements, which uniquely identify a lock at a given
869point in time: transaction id, space id, page number, record number.
870For a table lock the fold is table's id.
871@return fold */
872static
873ulint
874fold_lock(
875/*======*/
876 const lock_t* lock, /*!< in: lock object to fold */
877 ulint heap_no)/*!< in: lock's record number
878 or ULINT_UNDEFINED if the lock
879 is a table lock */
880{
881#ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
882 static ulint fold = 0;
883
884 return(fold++);
885#else
886 ulint ret;
887
888 switch (lock_get_type(lock)) {
889 case LOCK_REC:
890 ut_a(heap_no != ULINT_UNDEFINED);
891
892 ret = ut_fold_ulint_pair((ulint) lock_get_trx_id(lock),
893 lock_rec_get_space_id(lock));
894
895 ret = ut_fold_ulint_pair(ret,
896 lock_rec_get_page_no(lock));
897
898 ret = ut_fold_ulint_pair(ret, heap_no);
899
900 break;
901 case LOCK_TABLE:
902 /* this check is actually not necessary for continuing
903 correct operation, but something must have gone wrong if
904 it fails. */
905 ut_a(heap_no == ULINT_UNDEFINED);
906
907 ret = (ulint) lock_get_table_id(lock);
908
909 break;
910 default:
911 ut_error;
912 }
913
914 return(ret);
915#endif
916}
917
918/*******************************************************************//**
919Checks whether i_s_locks_row_t object represents a lock_t object.
920@return TRUE if they match */
921static
922ibool
923locks_row_eq_lock(
924/*==============*/
925 const i_s_locks_row_t* row, /*!< in: innodb_locks row */
926 const lock_t* lock, /*!< in: lock object */
927 ulint heap_no)/*!< in: lock's record number
928 or ULINT_UNDEFINED if the lock
929 is a table lock */
930{
931 ut_ad(i_s_locks_row_validate(row));
932#ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
933 return(0);
934#else
935 switch (lock_get_type(lock)) {
936 case LOCK_REC:
937 ut_a(heap_no != ULINT_UNDEFINED);
938
939 return(row->lock_trx_id == lock_get_trx_id(lock)
940 && row->lock_space == lock_rec_get_space_id(lock)
941 && row->lock_page == lock_rec_get_page_no(lock)
942 && row->lock_rec == heap_no);
943
944 case LOCK_TABLE:
945 /* this check is actually not necessary for continuing
946 correct operation, but something must have gone wrong if
947 it fails. */
948 ut_a(heap_no == ULINT_UNDEFINED);
949
950 return(row->lock_trx_id == lock_get_trx_id(lock)
951 && row->lock_table_id == lock_get_table_id(lock));
952
953 default:
954 ut_error;
955 return(FALSE);
956 }
957#endif
958}
959
960/*******************************************************************//**
961Searches for a row in the innodb_locks cache that has a specified id.
962This happens in O(1) time since a hash table is used. Returns pointer to
963the row or NULL if none is found.
964@return row or NULL */
965static
966i_s_locks_row_t*
967search_innodb_locks(
968/*================*/
969 trx_i_s_cache_t* cache, /*!< in: cache */
970 const lock_t* lock, /*!< in: lock to search for */
971 ulint heap_no)/*!< in: lock's record number
972 or ULINT_UNDEFINED if the lock
973 is a table lock */
974{
975 i_s_hash_chain_t* hash_chain;
976
977 HASH_SEARCH(
978 /* hash_chain->"next" */
979 next,
980 /* the hash table */
981 cache->locks_hash,
982 /* fold */
983 fold_lock(lock, heap_no),
984 /* the type of the next variable */
985 i_s_hash_chain_t*,
986 /* auxiliary variable */
987 hash_chain,
988 /* assertion on every traversed item */
989 ut_ad(i_s_locks_row_validate(hash_chain->value)),
990 /* this determines if we have found the lock */
991 locks_row_eq_lock(hash_chain->value, lock, heap_no));
992
993 if (hash_chain == NULL) {
994
995 return(NULL);
996 }
997 /* else */
998
999 return(hash_chain->value);
1000}
1001
1002/*******************************************************************//**
1003Adds new element to the locks cache, enlarging it if necessary.
1004Returns a pointer to the added row. If the row is already present then
1005no row is added and a pointer to the existing row is returned.
1006If row can not be allocated then NULL is returned.
1007@return row */
1008static
1009i_s_locks_row_t*
1010add_lock_to_cache(
1011/*==============*/
1012 trx_i_s_cache_t* cache, /*!< in/out: cache */
1013 const lock_t* lock, /*!< in: the element to add */
1014 ulint heap_no)/*!< in: lock's record number
1015 or ULINT_UNDEFINED if the lock
1016 is a table lock */
1017{
1018 i_s_locks_row_t* dst_row;
1019
1020#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1021 ulint i;
1022 for (i = 0; i < 10000; i++) {
1023#endif
1024#ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
1025 /* quit if this lock is already present */
1026 dst_row = search_innodb_locks(cache, lock, heap_no);
1027 if (dst_row != NULL) {
1028
1029 ut_ad(i_s_locks_row_validate(dst_row));
1030 return(dst_row);
1031 }
1032#endif
1033
1034 dst_row = (i_s_locks_row_t*)
1035 table_cache_create_empty_row(&cache->innodb_locks, cache);
1036
1037 /* memory could not be allocated */
1038 if (dst_row == NULL) {
1039
1040 return(NULL);
1041 }
1042
1043 if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
1044
1045 /* memory could not be allocated */
1046 cache->innodb_locks.rows_used--;
1047 return(NULL);
1048 }
1049
1050#ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
1051 HASH_INSERT(
1052 /* the type used in the hash chain */
1053 i_s_hash_chain_t,
1054 /* hash_chain->"next" */
1055 next,
1056 /* the hash table */
1057 cache->locks_hash,
1058 /* fold */
1059 fold_lock(lock, heap_no),
1060 /* add this data to the hash */
1061 &dst_row->hash_chain);
1062#endif
1063#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
1064 } /* for()-loop */
1065#endif
1066
1067 ut_ad(i_s_locks_row_validate(dst_row));
1068 return(dst_row);
1069}
1070
1071/*******************************************************************//**
1072Adds new pair of locks to the lock waits cache.
1073If memory can not be allocated then FALSE is returned.
1074@return FALSE if allocation fails */
1075static
1076ibool
1077add_lock_wait_to_cache(
1078/*===================*/
1079 trx_i_s_cache_t* cache, /*!< in/out: cache */
1080 const i_s_locks_row_t* requested_lock_row,/*!< in: pointer to the
1081 relevant requested lock
1082 row in innodb_locks */
1083 const i_s_locks_row_t* blocking_lock_row)/*!< in: pointer to the
1084 relevant blocking lock
1085 row in innodb_locks */
1086{
1087 i_s_lock_waits_row_t* dst_row;
1088
1089 dst_row = (i_s_lock_waits_row_t*)
1090 table_cache_create_empty_row(&cache->innodb_lock_waits,
1091 cache);
1092
1093 /* memory could not be allocated */
1094 if (dst_row == NULL) {
1095
1096 return(FALSE);
1097 }
1098
1099 fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
1100
1101 return(TRUE);
1102}
1103
1104/*******************************************************************//**
1105Adds transaction's relevant (important) locks to cache.
1106If the transaction is waiting, then the wait lock is added to
1107innodb_locks and a pointer to the added row is returned in
1108requested_lock_row, otherwise requested_lock_row is set to NULL.
1109If rows can not be allocated then FALSE is returned and the value of
1110requested_lock_row is undefined.
1111@return FALSE if allocation fails */
1112static
1113ibool
1114add_trx_relevant_locks_to_cache(
1115/*============================*/
1116 trx_i_s_cache_t* cache, /*!< in/out: cache */
1117 const trx_t* trx, /*!< in: transaction */
1118 i_s_locks_row_t** requested_lock_row)/*!< out: pointer to the
1119 requested lock row, or NULL or
1120 undefined */
1121{
1122 ut_ad(lock_mutex_own());
1123
1124 /* If transaction is waiting we add the wait lock and all locks
1125 from another transactions that are blocking the wait lock. */
1126 if (trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
1127
1128 const lock_t* curr_lock;
1129 ulint wait_lock_heap_no;
1130 i_s_locks_row_t* blocking_lock_row;
1131 lock_queue_iterator_t iter;
1132
1133 ut_a(trx->lock.wait_lock != NULL);
1134
1135 wait_lock_heap_no
1136 = wait_lock_get_heap_no(trx->lock.wait_lock);
1137
1138 /* add the requested lock */
1139 *requested_lock_row
1140 = add_lock_to_cache(cache, trx->lock.wait_lock,
1141 wait_lock_heap_no);
1142
1143 /* memory could not be allocated */
1144 if (*requested_lock_row == NULL) {
1145
1146 return(FALSE);
1147 }
1148
1149 /* then iterate over the locks before the wait lock and
1150 add the ones that are blocking it */
1151
1152 lock_queue_iterator_reset(&iter, trx->lock.wait_lock,
1153 ULINT_UNDEFINED);
1154
1155 for (curr_lock = lock_queue_iterator_get_prev(&iter);
1156 curr_lock != NULL;
1157 curr_lock = lock_queue_iterator_get_prev(&iter)) {
1158
1159 if (lock_has_to_wait(trx->lock.wait_lock,
1160 curr_lock)) {
1161
1162 /* add the lock that is
1163 blocking trx->lock.wait_lock */
1164 blocking_lock_row
1165 = add_lock_to_cache(
1166 cache, curr_lock,
1167 /* heap_no is the same
1168 for the wait and waited
1169 locks */
1170 wait_lock_heap_no);
1171
1172 /* memory could not be allocated */
1173 if (blocking_lock_row == NULL) {
1174
1175 return(FALSE);
1176 }
1177
1178 /* add the relation between both locks
1179 to innodb_lock_waits */
1180 if (!add_lock_wait_to_cache(
1181 cache, *requested_lock_row,
1182 blocking_lock_row)) {
1183
1184 /* memory could not be allocated */
1185 return(FALSE);
1186 }
1187 }
1188 }
1189 } else {
1190
1191 *requested_lock_row = NULL;
1192 }
1193
1194 return(TRUE);
1195}
1196
1197/** The minimum time that a cache must not be updated after it has been
1198read for the last time; measured in microseconds. We use this technique
1199to ensure that SELECTs which join several INFORMATION SCHEMA tables read
1200the same version of the cache. */
1201#define CACHE_MIN_IDLE_TIME_US 100000 /* 0.1 sec */
1202
1203/*******************************************************************//**
1204Checks if the cache can safely be updated.
1205@return TRUE if can be updated */
1206static
1207ibool
1208can_cache_be_updated(
1209/*=================*/
1210 trx_i_s_cache_t* cache) /*!< in: cache */
1211{
1212 uintmax_t now;
1213
1214 /* Here we read cache->last_read without acquiring its mutex
1215 because last_read is only updated when a shared rw lock on the
1216 whole cache is being held (see trx_i_s_cache_end_read()) and
1217 we are currently holding an exclusive rw lock on the cache.
1218 So it is not possible for last_read to be updated while we are
1219 reading it. */
1220
1221 ut_ad(rw_lock_own(cache->rw_lock, RW_LOCK_X));
1222
1223 now = ut_time_us(NULL);
1224 if (now - cache->last_read > CACHE_MIN_IDLE_TIME_US) {
1225
1226 return(TRUE);
1227 }
1228
1229 return(FALSE);
1230}
1231
1232/*******************************************************************//**
1233Declare a cache empty, preparing it to be filled up. Not all resources
1234are freed because they can be reused. */
1235static
1236void
1237trx_i_s_cache_clear(
1238/*================*/
1239 trx_i_s_cache_t* cache) /*!< out: cache to clear */
1240{
1241 cache->innodb_trx.rows_used = 0;
1242 cache->innodb_locks.rows_used = 0;
1243 cache->innodb_lock_waits.rows_used = 0;
1244
1245 hash_table_clear(cache->locks_hash);
1246
1247 ha_storage_empty(&cache->storage);
1248}
1249
1250
1251/**
1252 Add transactions to innodb_trx's cache.
1253
1254 We also add all locks that are relevant to each transaction into
1255 innodb_locks' and innodb_lock_waits' caches.
1256*/
1257
1258static void fetch_data_into_cache_low(trx_i_s_cache_t *cache, const trx_t *trx)
1259{
1260 i_s_locks_row_t *requested_lock_row;
1261
1262 assert_trx_nonlocking_or_in_list(trx);
1263
1264 if (add_trx_relevant_locks_to_cache(cache, trx, &requested_lock_row))
1265 {
1266 if (i_s_trx_row_t *trx_row= reinterpret_cast<i_s_trx_row_t*>(
1267 table_cache_create_empty_row(&cache->innodb_trx, cache)))
1268 {
1269 if (fill_trx_row(trx_row, trx, requested_lock_row, cache))
1270 return;
1271 --cache->innodb_trx.rows_used;
1272 }
1273 }
1274
1275 /* memory could not be allocated */
1276 cache->is_truncated= true;
1277}
1278
1279
1280/**
1281 Fetches the data needed to fill the 3 INFORMATION SCHEMA tables into the
1282 table cache buffer. Cache must be locked for write.
1283*/
1284
1285static void fetch_data_into_cache(trx_i_s_cache_t *cache)
1286{
1287 ut_ad(lock_mutex_own());
1288 trx_i_s_cache_clear(cache);
1289
1290 /* Capture the state of transactions */
1291 mutex_enter(&trx_sys.mutex);
1292 for (const trx_t *trx= UT_LIST_GET_FIRST(trx_sys.trx_list);
1293 trx != NULL;
1294 trx= UT_LIST_GET_NEXT(trx_list, trx))
1295 {
1296 if (trx_is_started(trx) && trx != purge_sys.query->trx)
1297 {
1298 fetch_data_into_cache_low(cache, trx);
1299 if (cache->is_truncated)
1300 break;
1301 }
1302 }
1303 mutex_exit(&trx_sys.mutex);
1304 cache->is_truncated= false;
1305}
1306
1307
1308/*******************************************************************//**
1309Update the transactions cache if it has not been read for some time.
1310Called from handler/i_s.cc.
1311@return 0 - fetched, 1 - not */
1312int
1313trx_i_s_possibly_fetch_data_into_cache(
1314/*===================================*/
1315 trx_i_s_cache_t* cache) /*!< in/out: cache */
1316{
1317 if (!can_cache_be_updated(cache)) {
1318
1319 return(1);
1320 }
1321
1322 /* We need to read trx_sys and record/table lock queues */
1323
1324 lock_mutex_enter();
1325 fetch_data_into_cache(cache);
1326 lock_mutex_exit();
1327
1328 /* update cache last read time */
1329 time_t now = ut_time_us(NULL);
1330 cache->last_read = now;
1331
1332 return(0);
1333}
1334
1335/*******************************************************************//**
1336Returns TRUE if the data in the cache is truncated due to the memory
1337limit posed by TRX_I_S_MEM_LIMIT.
1338@return TRUE if truncated */
1339bool
1340trx_i_s_cache_is_truncated(
1341/*=======================*/
1342 trx_i_s_cache_t* cache) /*!< in: cache */
1343{
1344 return(cache->is_truncated);
1345}
1346
1347/*******************************************************************//**
1348Initialize INFORMATION SCHEMA trx related cache. */
1349void
1350trx_i_s_cache_init(
1351/*===============*/
1352 trx_i_s_cache_t* cache) /*!< out: cache to init */
1353{
1354 /* The latching is done in the following order:
1355 acquire trx_i_s_cache_t::rw_lock, X
1356 acquire lock mutex
1357 release lock mutex
1358 release trx_i_s_cache_t::rw_lock
1359 acquire trx_i_s_cache_t::rw_lock, S
1360 acquire trx_i_s_cache_t::last_read_mutex
1361 release trx_i_s_cache_t::last_read_mutex
1362 release trx_i_s_cache_t::rw_lock */
1363
1364 cache->rw_lock = static_cast<rw_lock_t*>(
1365 ut_malloc_nokey(sizeof(*cache->rw_lock)));
1366
1367 rw_lock_create(trx_i_s_cache_lock_key, cache->rw_lock,
1368 SYNC_TRX_I_S_RWLOCK);
1369
1370 cache->last_read = 0;
1371
1372 mutex_create(LATCH_ID_CACHE_LAST_READ, &cache->last_read_mutex);
1373
1374 table_cache_init(&cache->innodb_trx, sizeof(i_s_trx_row_t));
1375 table_cache_init(&cache->innodb_locks, sizeof(i_s_locks_row_t));
1376 table_cache_init(&cache->innodb_lock_waits,
1377 sizeof(i_s_lock_waits_row_t));
1378
1379 cache->locks_hash = hash_create(LOCKS_HASH_CELLS_NUM);
1380
1381 cache->storage = ha_storage_create(CACHE_STORAGE_INITIAL_SIZE,
1382 CACHE_STORAGE_HASH_CELLS);
1383
1384 cache->mem_allocd = 0;
1385
1386 cache->is_truncated = false;
1387}
1388
1389/*******************************************************************//**
1390Free the INFORMATION SCHEMA trx related cache. */
1391void
1392trx_i_s_cache_free(
1393/*===============*/
1394 trx_i_s_cache_t* cache) /*!< in, own: cache to free */
1395{
1396 rw_lock_free(cache->rw_lock);
1397 ut_free(cache->rw_lock);
1398 cache->rw_lock = NULL;
1399
1400 mutex_free(&cache->last_read_mutex);
1401
1402 hash_table_free(cache->locks_hash);
1403 ha_storage_free(cache->storage);
1404 table_cache_free(&cache->innodb_trx);
1405 table_cache_free(&cache->innodb_locks);
1406 table_cache_free(&cache->innodb_lock_waits);
1407}
1408
1409/*******************************************************************//**
1410Issue a shared/read lock on the tables cache. */
1411void
1412trx_i_s_cache_start_read(
1413/*=====================*/
1414 trx_i_s_cache_t* cache) /*!< in: cache */
1415{
1416 rw_lock_s_lock(cache->rw_lock);
1417}
1418
1419/*******************************************************************//**
1420Release a shared/read lock on the tables cache. */
1421void
1422trx_i_s_cache_end_read(
1423/*===================*/
1424 trx_i_s_cache_t* cache) /*!< in: cache */
1425{
1426 uintmax_t now;
1427
1428 ut_ad(rw_lock_own(cache->rw_lock, RW_LOCK_S));
1429
1430 /* update cache last read time */
1431 now = ut_time_us(NULL);
1432 mutex_enter(&cache->last_read_mutex);
1433 cache->last_read = now;
1434 mutex_exit(&cache->last_read_mutex);
1435
1436 rw_lock_s_unlock(cache->rw_lock);
1437}
1438
1439/*******************************************************************//**
1440Issue an exclusive/write lock on the tables cache. */
1441void
1442trx_i_s_cache_start_write(
1443/*======================*/
1444 trx_i_s_cache_t* cache) /*!< in: cache */
1445{
1446 rw_lock_x_lock(cache->rw_lock);
1447}
1448
1449/*******************************************************************//**
1450Release an exclusive/write lock on the tables cache. */
1451void
1452trx_i_s_cache_end_write(
1453/*====================*/
1454 trx_i_s_cache_t* cache) /*!< in: cache */
1455{
1456 ut_ad(rw_lock_own(cache->rw_lock, RW_LOCK_X));
1457
1458 rw_lock_x_unlock(cache->rw_lock);
1459}
1460
1461/*******************************************************************//**
1462Selects a INFORMATION SCHEMA table cache from the whole cache.
1463@return table cache */
1464static
1465i_s_table_cache_t*
1466cache_select_table(
1467/*===============*/
1468 trx_i_s_cache_t* cache, /*!< in: whole cache */
1469 enum i_s_table table) /*!< in: which table */
1470{
1471 i_s_table_cache_t* table_cache;
1472
1473 ut_ad(rw_lock_own(cache->rw_lock, RW_LOCK_S)
1474 || rw_lock_own(cache->rw_lock, RW_LOCK_X));
1475
1476 switch (table) {
1477 case I_S_INNODB_TRX:
1478 table_cache = &cache->innodb_trx;
1479 break;
1480 case I_S_INNODB_LOCKS:
1481 table_cache = &cache->innodb_locks;
1482 break;
1483 case I_S_INNODB_LOCK_WAITS:
1484 table_cache = &cache->innodb_lock_waits;
1485 break;
1486 default:
1487 ut_error;
1488 }
1489
1490 return(table_cache);
1491}
1492
1493/*******************************************************************//**
1494Retrieves the number of used rows in the cache for a given
1495INFORMATION SCHEMA table.
1496@return number of rows */
1497ulint
1498trx_i_s_cache_get_rows_used(
1499/*========================*/
1500 trx_i_s_cache_t* cache, /*!< in: cache */
1501 enum i_s_table table) /*!< in: which table */
1502{
1503 i_s_table_cache_t* table_cache;
1504
1505 table_cache = cache_select_table(cache, table);
1506
1507 return(table_cache->rows_used);
1508}
1509
1510/*******************************************************************//**
1511Retrieves the nth row (zero-based) in the cache for a given
1512INFORMATION SCHEMA table.
1513@return row */
1514void*
1515trx_i_s_cache_get_nth_row(
1516/*======================*/
1517 trx_i_s_cache_t* cache, /*!< in: cache */
1518 enum i_s_table table, /*!< in: which table */
1519 ulint n) /*!< in: row number */
1520{
1521 i_s_table_cache_t* table_cache;
1522 ulint i;
1523 void* row;
1524
1525 table_cache = cache_select_table(cache, table);
1526
1527 ut_a(n < table_cache->rows_used);
1528
1529 row = NULL;
1530
1531 for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
1532
1533 if (table_cache->chunks[i].offset
1534 + table_cache->chunks[i].rows_allocd > n) {
1535
1536 row = (char*) table_cache->chunks[i].base
1537 + (n - table_cache->chunks[i].offset)
1538 * table_cache->row_size;
1539 break;
1540 }
1541 }
1542
1543 ut_a(row != NULL);
1544
1545 return(row);
1546}
1547
1548/*******************************************************************//**
1549Crafts a lock id string from a i_s_locks_row_t object. Returns its
1550second argument. This function aborts if there is not enough space in
1551lock_id. Be sure to provide at least TRX_I_S_LOCK_ID_MAX_LEN + 1 if you
1552want to be 100% sure that it will not abort.
1553@return resulting lock id */
1554char*
1555trx_i_s_create_lock_id(
1556/*===================*/
1557 const i_s_locks_row_t* row, /*!< in: innodb_locks row */
1558 char* lock_id,/*!< out: resulting lock_id */
1559 ulint lock_id_size)/*!< in: size of the lock id
1560 buffer */
1561{
1562 int res_len;
1563
1564 /* please adjust TRX_I_S_LOCK_ID_MAX_LEN if you change this */
1565
1566 if (row->lock_space != ULINT_UNDEFINED) {
1567 /* record lock */
1568 res_len = snprintf(lock_id, lock_id_size,
1569 TRX_ID_FMT
1570 ":" ULINTPF ":" ULINTPF ":" ULINTPF,
1571 row->lock_trx_id, row->lock_space,
1572 row->lock_page, row->lock_rec);
1573 } else {
1574 /* table lock */
1575 res_len = snprintf(lock_id, lock_id_size,
1576 TRX_ID_FMT":" UINT64PF,
1577 row->lock_trx_id,
1578 row->lock_table_id);
1579 }
1580
1581 /* the typecast is safe because snprintf(3) never returns
1582 negative result */
1583 ut_a(res_len >= 0);
1584 ut_a((ulint) res_len < lock_id_size);
1585
1586 return(lock_id);
1587}
1588