1/* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License as published by
5 the Free Software Foundation; version 2 of the License.
6
7 This program is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU General Public License for more details.
11
12 You should have received a copy of the GNU General Public License
13 along with this program; if not, write to the Free Software Foundation,
14 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
15
16/**
17 @file storage/perfschema/pfs_instr.cc
18 Performance schema instruments (implementation).
19*/
20
21#include <my_global.h>
22#include <string.h>
23
24#include "my_sys.h"
25#include "pfs.h"
26#include "pfs_stat.h"
27#include "pfs_instr.h"
28#include "pfs_host.h"
29#include "pfs_user.h"
30#include "pfs_account.h"
31#include "pfs_global.h"
32#include "pfs_instr_class.h"
33
34/**
35 @addtogroup Performance_schema_buffers
36 @{
37*/
38
39/** Size of the mutex instances array. @sa mutex_array */
40ulong mutex_max;
41/** True when @c mutex_array is full. */
42bool mutex_full;
43/** Number of mutexes instance lost. @sa mutex_array */
44ulong mutex_lost;
45/** Size of the rwlock instances array. @sa rwlock_array */
46ulong rwlock_max;
47/** True when @c rwlock_array is full. */
48bool rwlock_full;
49/** Number or rwlock instances lost. @sa rwlock_array */
50ulong rwlock_lost;
51/** Size of the conditions instances array. @sa cond_array */
52ulong cond_max;
53/** True when @c cond_array is full. */
54bool cond_full;
55/** Number of conditions instances lost. @sa cond_array */
56ulong cond_lost;
57/** Size of the thread instances array. @sa thread_array */
58ulong thread_max;
59/** True when @c thread_array is full. */
60bool thread_full;
61/** Number or thread instances lost. @sa thread_array */
62ulong thread_lost;
63/** Size of the file instances array. @sa file_array */
64ulong file_max;
65/** True when @c file_array is full. */
66bool file_full;
67/** Number of file instances lost. @sa file_array */
68ulong file_lost;
69/**
70 Size of the file handle array. @sa file_handle_array.
71 Signed value, for easier comparisons with a file descriptor number.
72*/
73long file_handle_max;
74/** True when @c file_handle_array is full. */
75bool file_handle_full;
76/** Number of file handle lost. @sa file_handle_array */
77ulong file_handle_lost;
78/** Size of the table instances array. @sa table_array */
79ulong table_max;
80/** True when @c table_array is full. */
81bool table_full;
82/** Number of table instances lost. @sa table_array */
83ulong table_lost;
84/** Size of the socket instances array. @sa socket_array */
85ulong socket_max;
86/** True when @c socket_array is full. */
87bool socket_full;
88/** Number of socket instances lost. @sa socket_array */
89ulong socket_lost;
90/** Number of EVENTS_WAITS_HISTORY records per thread. */
91ulong events_waits_history_per_thread;
92/** Number of EVENTS_STAGES_HISTORY records per thread. */
93ulong events_stages_history_per_thread;
94/** Number of EVENTS_STATEMENTS_HISTORY records per thread. */
95ulong events_statements_history_per_thread;
96uint statement_stack_max;
97size_t pfs_max_digest_length= 0;
98/** Number of locker lost. @sa LOCKER_STACK_SIZE. */
99ulong locker_lost= 0;
100/** Number of statement lost. @sa STATEMENT_STACK_SIZE. */
101ulong statement_lost= 0;
102/** Size of connection attribute storage per thread */
103ulong session_connect_attrs_size_per_thread;
104/** Number of connection attributes lost */
105ulong session_connect_attrs_lost= 0;
106
107/**
108 Mutex instrumentation instances array.
109 @sa mutex_max
110 @sa mutex_lost
111*/
112PFS_mutex *mutex_array= NULL;
113
114/**
115 RWLock instrumentation instances array.
116 @sa rwlock_max
117 @sa rwlock_lost
118*/
119PFS_rwlock *rwlock_array= NULL;
120
121/**
122 Condition instrumentation instances array.
123 @sa cond_max
124 @sa cond_lost
125*/
126PFS_cond *cond_array= NULL;
127
128/**
129 Thread instrumentation instances array.
130 @sa thread_max
131 @sa thread_lost
132*/
133PFS_thread *thread_array= NULL;
134
135/**
136 File instrumentation instances array.
137 @sa file_max
138 @sa file_lost
139 @sa filename_hash
140*/
141PFS_file *file_array= NULL;
142
143/**
144 File instrumentation handle array.
145 @sa file_handle_max
146 @sa file_handle_lost
147*/
148PFS_file **file_handle_array= NULL;
149
150/**
151 Table instrumentation instances array.
152 @sa table_max
153 @sa table_lost
154*/
155PFS_table *table_array= NULL;
156
157/**
158 Socket instrumentation instances array.
159 @sa socket_max
160 @sa socket_lost
161*/
162PFS_socket *socket_array= NULL;
163
164PFS_stage_stat *global_instr_class_stages_array= NULL;
165PFS_statement_stat *global_instr_class_statements_array= NULL;
166
167static volatile uint64 thread_internal_id_counter= 0;
168
169static uint thread_instr_class_waits_sizing;
170static uint thread_instr_class_stages_sizing;
171static uint thread_instr_class_statements_sizing;
172static PFS_single_stat *thread_instr_class_waits_array= NULL;
173static PFS_stage_stat *thread_instr_class_stages_array= NULL;
174static PFS_statement_stat *thread_instr_class_statements_array= NULL;
175
176static PFS_events_waits *thread_waits_history_array= NULL;
177static PFS_events_stages *thread_stages_history_array= NULL;
178static PFS_events_statements *thread_statements_history_array= NULL;
179static PFS_events_statements *thread_statements_stack_array= NULL;
180static unsigned char *current_stmts_digest_token_array= NULL;
181static unsigned char *history_stmts_digest_token_array= NULL;
182static char *thread_session_connect_attrs_array= NULL;
183
184/** Hash table for instrumented files. */
185LF_HASH filename_hash;
186/** True if filename_hash is initialized. */
187static bool filename_hash_inited= false;
188
189/**
190 Initialize all the instruments instance buffers.
191 @param param sizing parameters
192 @return 0 on success
193*/
194int init_instruments(const PFS_global_param *param)
195{
196 PFS_events_statements *pfs_stmt;
197 unsigned char *pfs_tokens;
198
199 uint thread_waits_history_sizing;
200 uint thread_stages_history_sizing;
201 uint thread_statements_history_sizing;
202 uint thread_statements_stack_sizing;
203 uint thread_session_connect_attrs_sizing;
204 uint index;
205
206 /* Make sure init_event_name_sizing is called */
207 DBUG_ASSERT(wait_class_max != 0);
208
209 mutex_max= param->m_mutex_sizing;
210 mutex_full= false;
211 mutex_lost= 0;
212 rwlock_max= param->m_rwlock_sizing;
213 rwlock_full= false;
214 rwlock_lost= 0;
215 cond_max= param->m_cond_sizing;
216 cond_full= false;
217 cond_lost= 0;
218 file_max= param->m_file_sizing;
219 file_full= false;
220 file_lost= 0;
221 file_handle_max= param->m_file_handle_sizing;
222 file_handle_full= false;
223 file_handle_lost= 0;
224
225 pfs_max_digest_length= param->m_max_digest_length;
226
227 table_max= param->m_table_sizing;
228 table_full= false;
229 table_lost= 0;
230 thread_max= param->m_thread_sizing;
231 thread_full= false;
232 thread_lost= 0;
233 socket_max= param->m_socket_sizing;
234 socket_full= false;
235 socket_lost= 0;
236
237 events_waits_history_per_thread= param->m_events_waits_history_sizing;
238 thread_waits_history_sizing= param->m_thread_sizing
239 * events_waits_history_per_thread;
240
241 thread_instr_class_waits_sizing= param->m_thread_sizing
242 * wait_class_max;
243
244 events_stages_history_per_thread= param->m_events_stages_history_sizing;
245 thread_stages_history_sizing= param->m_thread_sizing
246 * events_stages_history_per_thread;
247
248 events_statements_history_per_thread= param->m_events_statements_history_sizing;
249 thread_statements_history_sizing= param->m_thread_sizing
250 * events_statements_history_per_thread;
251
252 statement_stack_max= 1;
253 thread_statements_stack_sizing= param->m_thread_sizing * statement_stack_max;
254
255 thread_instr_class_stages_sizing= param->m_thread_sizing
256 * param->m_stage_class_sizing;
257
258 thread_instr_class_statements_sizing= param->m_thread_sizing
259 * param->m_statement_class_sizing;
260
261 session_connect_attrs_size_per_thread= param->m_session_connect_attrs_sizing;
262 thread_session_connect_attrs_sizing= param->m_thread_sizing
263 * session_connect_attrs_size_per_thread;
264 session_connect_attrs_lost= 0;
265
266 size_t current_digest_tokens_sizing= param->m_thread_sizing * pfs_max_digest_length * statement_stack_max;
267 size_t history_digest_tokens_sizing= param->m_thread_sizing * pfs_max_digest_length * events_statements_history_per_thread;
268
269 mutex_array= NULL;
270 rwlock_array= NULL;
271 cond_array= NULL;
272 file_array= NULL;
273 file_handle_array= NULL;
274 table_array= NULL;
275 socket_array= NULL;
276 thread_array= NULL;
277 thread_waits_history_array= NULL;
278 thread_stages_history_array= NULL;
279 thread_statements_history_array= NULL;
280 thread_statements_stack_array= NULL;
281 current_stmts_digest_token_array= NULL;
282 history_stmts_digest_token_array= NULL;
283 thread_instr_class_waits_array= NULL;
284 thread_instr_class_stages_array= NULL;
285 thread_instr_class_statements_array= NULL;
286 thread_internal_id_counter= 0;
287
288 if (mutex_max > 0)
289 {
290 mutex_array= PFS_MALLOC_ARRAY(mutex_max, sizeof(PFS_mutex), PFS_mutex, MYF(MY_ZEROFILL));
291 if (unlikely(mutex_array == NULL))
292 return 1;
293 }
294
295 if (rwlock_max > 0)
296 {
297 rwlock_array= PFS_MALLOC_ARRAY(rwlock_max, sizeof(PFS_rwlock), PFS_rwlock, MYF(MY_ZEROFILL));
298 if (unlikely(rwlock_array == NULL))
299 return 1;
300 }
301
302 if (cond_max > 0)
303 {
304 cond_array= PFS_MALLOC_ARRAY(cond_max, sizeof(PFS_cond), PFS_cond, MYF(MY_ZEROFILL));
305 if (unlikely(cond_array == NULL))
306 return 1;
307 }
308
309 if (file_max > 0)
310 {
311 file_array= PFS_MALLOC_ARRAY(file_max, sizeof(PFS_file), PFS_file, MYF(MY_ZEROFILL));
312 if (unlikely(file_array == NULL))
313 return 1;
314 }
315
316 if (file_handle_max > 0)
317 {
318 file_handle_array= PFS_MALLOC_ARRAY(file_handle_max, sizeof(PFS_file*), PFS_file*, MYF(MY_ZEROFILL));
319 if (unlikely(file_handle_array == NULL))
320 return 1;
321 }
322
323 if (table_max > 0)
324 {
325 table_array= PFS_MALLOC_ARRAY(table_max, sizeof(PFS_table), PFS_table, MYF(MY_ZEROFILL));
326 if (unlikely(table_array == NULL))
327 return 1;
328 }
329
330 if (socket_max > 0)
331 {
332 socket_array= PFS_MALLOC_ARRAY(socket_max, sizeof(PFS_socket), PFS_socket, MYF(MY_ZEROFILL));
333 if (unlikely(socket_array == NULL))
334 return 1;
335 }
336
337 if (thread_max > 0)
338 {
339 thread_array= PFS_MALLOC_ARRAY(thread_max, sizeof(PFS_thread), PFS_thread, MYF(MY_ZEROFILL));
340 if (unlikely(thread_array == NULL))
341 return 1;
342 }
343
344 if (thread_waits_history_sizing > 0)
345 {
346 thread_waits_history_array=
347 PFS_MALLOC_ARRAY(thread_waits_history_sizing, sizeof(PFS_events_waits), PFS_events_waits,
348 MYF(MY_ZEROFILL));
349 if (unlikely(thread_waits_history_array == NULL))
350 return 1;
351 }
352
353 if (thread_instr_class_waits_sizing > 0)
354 {
355 thread_instr_class_waits_array=
356 PFS_MALLOC_ARRAY(thread_instr_class_waits_sizing,
357 sizeof(PFS_single_stat), PFS_single_stat, MYF(MY_ZEROFILL));
358 if (unlikely(thread_instr_class_waits_array == NULL))
359 return 1;
360
361 for (index= 0; index < thread_instr_class_waits_sizing; index++)
362 thread_instr_class_waits_array[index].reset();
363 }
364
365 if (thread_stages_history_sizing > 0)
366 {
367 thread_stages_history_array=
368 PFS_MALLOC_ARRAY(thread_stages_history_sizing, sizeof(PFS_events_stages), PFS_events_stages,
369 MYF(MY_ZEROFILL));
370 if (unlikely(thread_stages_history_array == NULL))
371 return 1;
372 }
373
374 if (thread_instr_class_stages_sizing > 0)
375 {
376 thread_instr_class_stages_array=
377 PFS_MALLOC_ARRAY(thread_instr_class_stages_sizing,
378 sizeof(PFS_stage_stat), PFS_stage_stat, MYF(MY_ZEROFILL));
379 if (unlikely(thread_instr_class_stages_array == NULL))
380 return 1;
381
382 for (index= 0; index < thread_instr_class_stages_sizing; index++)
383 thread_instr_class_stages_array[index].reset();
384 }
385
386 if (thread_statements_history_sizing > 0)
387 {
388 thread_statements_history_array=
389 PFS_MALLOC_ARRAY(thread_statements_history_sizing, sizeof(PFS_events_statements),
390 PFS_events_statements, MYF(MY_ZEROFILL));
391 if (unlikely(thread_statements_history_array == NULL))
392 return 1;
393 }
394
395 if (thread_statements_stack_sizing > 0)
396 {
397 thread_statements_stack_array=
398 PFS_MALLOC_ARRAY(thread_statements_stack_sizing, sizeof(PFS_events_statements),
399 PFS_events_statements, MYF(MY_ZEROFILL));
400 if (unlikely(thread_statements_stack_array == NULL))
401 return 1;
402 }
403
404 if (thread_instr_class_statements_sizing > 0)
405 {
406 thread_instr_class_statements_array=
407 PFS_MALLOC_ARRAY(thread_instr_class_statements_sizing,
408 sizeof(PFS_statement_stat), PFS_statement_stat, MYF(MY_ZEROFILL));
409 if (unlikely(thread_instr_class_statements_array == NULL))
410 return 1;
411
412 for (index= 0; index < thread_instr_class_statements_sizing; index++)
413 thread_instr_class_statements_array[index].reset();
414 }
415
416 if (thread_session_connect_attrs_sizing > 0)
417 {
418 thread_session_connect_attrs_array=
419 (char *)pfs_malloc(thread_session_connect_attrs_sizing, MYF(MY_ZEROFILL));
420 if (unlikely(thread_session_connect_attrs_array == NULL))
421 return 1;
422 }
423
424 if (current_digest_tokens_sizing > 0)
425 {
426 current_stmts_digest_token_array=
427 (unsigned char *)pfs_malloc(current_digest_tokens_sizing, MYF(MY_ZEROFILL));
428 if (unlikely(current_stmts_digest_token_array == NULL))
429 return 1;
430 }
431
432 if (history_digest_tokens_sizing > 0)
433 {
434 history_stmts_digest_token_array=
435 (unsigned char *)pfs_malloc(history_digest_tokens_sizing, MYF(MY_ZEROFILL));
436 if (unlikely(history_stmts_digest_token_array == NULL))
437 return 1;
438 }
439
440 for (index= 0; index < thread_max; index++)
441 {
442 thread_array[index].m_waits_history=
443 &thread_waits_history_array[index * events_waits_history_per_thread];
444 thread_array[index].m_instr_class_waits_stats=
445 &thread_instr_class_waits_array[index * wait_class_max];
446 thread_array[index].m_stages_history=
447 &thread_stages_history_array[index * events_stages_history_per_thread];
448 thread_array[index].m_instr_class_stages_stats=
449 &thread_instr_class_stages_array[index * stage_class_max];
450 thread_array[index].m_statements_history=
451 &thread_statements_history_array[index * events_statements_history_per_thread];
452 thread_array[index].m_statement_stack=
453 &thread_statements_stack_array[index * statement_stack_max];
454 thread_array[index].m_instr_class_statements_stats=
455 &thread_instr_class_statements_array[index * statement_class_max];
456 thread_array[index].m_session_connect_attrs=
457 &thread_session_connect_attrs_array[index * session_connect_attrs_size_per_thread];
458 }
459
460 for (index= 0; index < thread_statements_stack_sizing; index++)
461 {
462 pfs_stmt= & thread_statements_stack_array[index];
463
464 pfs_tokens= & current_stmts_digest_token_array[index * pfs_max_digest_length];
465 pfs_stmt->m_digest_storage.reset(pfs_tokens, pfs_max_digest_length);
466 }
467
468 for (index= 0; index < thread_statements_history_sizing; index++)
469 {
470 pfs_stmt= & thread_statements_history_array[index];
471
472 pfs_tokens= & history_stmts_digest_token_array[index * pfs_max_digest_length];
473 pfs_stmt->m_digest_storage.reset(pfs_tokens, pfs_max_digest_length);
474 }
475
476 if (stage_class_max > 0)
477 {
478 global_instr_class_stages_array=
479 PFS_MALLOC_ARRAY(stage_class_max,
480 sizeof(PFS_stage_stat), PFS_stage_stat, MYF(MY_ZEROFILL));
481 if (unlikely(global_instr_class_stages_array == NULL))
482 return 1;
483
484 for (index= 0; index < stage_class_max; index++)
485 global_instr_class_stages_array[index].reset();
486 }
487
488 if (statement_class_max > 0)
489 {
490 global_instr_class_statements_array=
491 PFS_MALLOC_ARRAY(statement_class_max,
492 sizeof(PFS_statement_stat), PFS_statement_stat, MYF(MY_ZEROFILL));
493 if (unlikely(global_instr_class_statements_array == NULL))
494 return 1;
495
496 for (index= 0; index < statement_class_max; index++)
497 global_instr_class_statements_array[index].reset();
498 }
499
500 return 0;
501}
502
503/** Cleanup all the instruments buffers. */
504void cleanup_instruments(void)
505{
506 pfs_free(mutex_array);
507 mutex_array= NULL;
508 mutex_max= 0;
509 pfs_free(rwlock_array);
510 rwlock_array= NULL;
511 rwlock_max= 0;
512 pfs_free(cond_array);
513 cond_array= NULL;
514 cond_max= 0;
515 pfs_free(file_array);
516 file_array= NULL;
517 file_max= 0;
518 pfs_free(file_handle_array);
519 file_handle_array= NULL;
520 file_handle_max= 0;
521 pfs_free(table_array);
522 table_array= NULL;
523 table_max= 0;
524 pfs_free(socket_array);
525 socket_array= NULL;
526 socket_max= 0;
527 pfs_free(thread_array);
528 thread_array= NULL;
529 thread_max= 0;
530 pfs_free(thread_waits_history_array);
531 thread_waits_history_array= NULL;
532 pfs_free(thread_stages_history_array);
533 thread_stages_history_array= NULL;
534 pfs_free(thread_statements_history_array);
535 thread_statements_history_array= NULL;
536 pfs_free(thread_statements_stack_array);
537 thread_statements_stack_array= NULL;
538 pfs_free(thread_instr_class_waits_array);
539 thread_instr_class_waits_array= NULL;
540 pfs_free(global_instr_class_stages_array);
541 global_instr_class_stages_array= NULL;
542 pfs_free(global_instr_class_statements_array);
543 global_instr_class_statements_array= NULL;
544 pfs_free(thread_instr_class_statements_array);
545 thread_instr_class_statements_array= NULL;
546 pfs_free(thread_instr_class_stages_array);
547 thread_instr_class_stages_array= NULL;
548 pfs_free(thread_session_connect_attrs_array);
549 thread_session_connect_attrs_array=NULL;
550 pfs_free(current_stmts_digest_token_array);
551 current_stmts_digest_token_array= NULL;
552 pfs_free(history_stmts_digest_token_array);
553 history_stmts_digest_token_array= NULL;
554}
555
556C_MODE_START
557/** Get hash table key for instrumented files. */
558static uchar *filename_hash_get_key(const uchar *entry, size_t *length,
559 my_bool)
560{
561 const PFS_file * const *typed_entry;
562 const PFS_file *file;
563 const void *result;
564 typed_entry= reinterpret_cast<const PFS_file* const *> (entry);
565 DBUG_ASSERT(typed_entry != NULL);
566 file= *typed_entry;
567 DBUG_ASSERT(file != NULL);
568 *length= file->m_filename_length;
569 result= file->m_filename;
570 return const_cast<uchar*> (reinterpret_cast<const uchar*> (result));
571}
572C_MODE_END
573
574/**
575 Initialize the file name hash.
576 @return 0 on success
577*/
578int init_file_hash(void)
579{
580 if ((! filename_hash_inited) && (file_max > 0))
581 {
582 lf_hash_init(&filename_hash, sizeof(PFS_file*), LF_HASH_UNIQUE,
583 0, 0, filename_hash_get_key, &my_charset_bin);
584 /* filename_hash.size= file_max; */
585 filename_hash_inited= true;
586 }
587 return 0;
588}
589
590/** Cleanup the file name hash. */
591void cleanup_file_hash(void)
592{
593 if (filename_hash_inited)
594 {
595 lf_hash_destroy(&filename_hash);
596 filename_hash_inited= false;
597 }
598}
599
600void PFS_scan::init(uint random, uint max_size)
601{
602 m_pass= 0;
603
604 if (max_size == 0)
605 {
606 /* Degenerated case, no buffer */
607 m_pass_max= 0;
608 return;
609 }
610
611 DBUG_ASSERT(random < max_size);
612
613 if (PFS_MAX_ALLOC_RETRY < max_size)
614 {
615 /*
616 The buffer is big compared to PFS_MAX_ALLOC_RETRY,
617 scan it only partially.
618 */
619 if (random + PFS_MAX_ALLOC_RETRY < max_size)
620 {
621 /*
622 Pass 1: [random, random + PFS_MAX_ALLOC_RETRY - 1]
623 Pass 2: not used.
624 */
625 m_pass_max= 1;
626 m_first[0]= random;
627 m_last[0]= random + PFS_MAX_ALLOC_RETRY;
628 m_first[1]= 0;
629 m_last[1]= 0;
630 }
631 else
632 {
633 /*
634 Pass 1: [random, max_size - 1]
635 Pass 2: [0, ...]
636 The combined length of pass 1 and 2 is PFS_MAX_ALLOC_RETRY.
637 */
638 m_pass_max= 2;
639 m_first[0]= random;
640 m_last[0]= max_size;
641 m_first[1]= 0;
642 m_last[1]= PFS_MAX_ALLOC_RETRY - (max_size - random);
643 }
644 }
645 else
646 {
647 /*
648 The buffer is small compared to PFS_MAX_ALLOC_RETRY,
649 scan it in full in two passes.
650 Pass 1: [random, max_size - 1]
651 Pass 2: [0, random - 1]
652 */
653 m_pass_max= 2;
654 m_first[0]= random;
655 m_last[0]= max_size;
656 m_first[1]= 0;
657 m_last[1]= random;
658 }
659
660 DBUG_ASSERT(m_first[0] < max_size);
661 DBUG_ASSERT(m_first[1] < max_size);
662 DBUG_ASSERT(m_last[1] <= max_size);
663 DBUG_ASSERT(m_last[1] <= max_size);
664 /* The combined length of all passes should not exceed PFS_MAX_ALLOC_RETRY. */
665 DBUG_ASSERT((m_last[0] - m_first[0]) +
666 (m_last[1] - m_first[1]) <= PFS_MAX_ALLOC_RETRY);
667}
668
669/**
670 Create instrumentation for a mutex instance.
671 @param klass the mutex class
672 @param identity the mutex address
673 @return a mutex instance, or NULL
674*/
675PFS_mutex* create_mutex(PFS_mutex_class *klass, const void *identity)
676{
677 static uint PFS_ALIGNED mutex_monotonic_index= 0;
678 uint index;
679 uint attempts= 0;
680 PFS_mutex *pfs;
681
682 if (mutex_full)
683 {
684 /*
685 This is a safety plug.
686 When mutex_array is severely undersized,
687 do not spin to death for each call.
688 */
689 mutex_lost++;
690 return NULL;
691 }
692
693 while (++attempts <= mutex_max)
694 {
695 /*
696 Problem:
697 Multiple threads running concurrently may need to create a new
698 instrumented mutex, and find an empty slot in mutex_array[].
699 With N1 threads running on a N2 core hardware:
700 - up to N2 hardware threads can run concurrently,
701 causing contention if looking at the same array[i] slot.
702 - up to N1 threads can run almost concurrently (with thread scheduling),
703 scanning maybe overlapping regions in the [0-mutex_max] array.
704
705 Solution:
706 Instead of letting different threads compete on the same array[i] entry,
707 this code forces all threads to cooperate with the monotonic_index.
708 Only one thread will be allowed to test a given array[i] slot.
709 All threads do scan from the same region, starting at monotonic_index.
710 Serializing on monotonic_index ensures that when a slot is found occupied
711 in a given loop by a given thread, other threads will not attempt this
712 slot.
713 */
714 index= PFS_atomic::add_u32(& mutex_monotonic_index, 1) % mutex_max;
715 pfs= mutex_array + index;
716
717 if (pfs->m_lock.is_free())
718 {
719 if (pfs->m_lock.free_to_dirty())
720 {
721 pfs->m_identity= identity;
722 pfs->m_class= klass;
723 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
724 pfs->m_timed= klass->m_timed;
725 pfs->m_mutex_stat.reset();
726 pfs->m_owner= NULL;
727 pfs->m_last_locked= 0;
728 pfs->m_lock.dirty_to_allocated();
729 if (klass->is_singleton())
730 klass->m_singleton= pfs;
731 return pfs;
732 }
733 }
734 }
735
736 mutex_lost++;
737 /*
738 Race condition.
739 The mutex_array might not be full if a concurrent thread
740 called destroy_mutex() during the scan, leaving one
741 empty slot we did not find.
742 However, 99.999 percent full tables or 100 percent full tables
743 are treated the same here, we declare the array overloaded.
744 */
745 mutex_full= true;
746 return NULL;
747}
748
749/**
750 Destroy instrumentation for a mutex instance.
751 @param pfs the mutex to destroy
752*/
753void destroy_mutex(PFS_mutex *pfs)
754{
755 DBUG_ASSERT(pfs != NULL);
756 PFS_mutex_class *klass= pfs->m_class;
757 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
758 klass->m_mutex_stat.aggregate(& pfs->m_mutex_stat);
759 pfs->m_mutex_stat.reset();
760 if (klass->is_singleton())
761 klass->m_singleton= NULL;
762 pfs->m_lock.allocated_to_free();
763 mutex_full= false;
764}
765
766/**
767 Create instrumentation for a rwlock instance.
768 @param klass the rwlock class
769 @param identity the rwlock address
770 @return a rwlock instance, or NULL
771*/
772PFS_rwlock* create_rwlock(PFS_rwlock_class *klass, const void *identity)
773{
774 static uint PFS_ALIGNED rwlock_monotonic_index= 0;
775 uint index;
776 uint attempts= 0;
777 PFS_rwlock *pfs;
778
779 if (rwlock_full)
780 {
781 rwlock_lost++;
782 return NULL;
783 }
784
785 while (++attempts <= rwlock_max)
786 {
787 /* See create_mutex() */
788 index= PFS_atomic::add_u32(& rwlock_monotonic_index, 1) % rwlock_max;
789 pfs= rwlock_array + index;
790
791 if (pfs->m_lock.is_free())
792 {
793 if (pfs->m_lock.free_to_dirty())
794 {
795 pfs->m_identity= identity;
796 pfs->m_class= klass;
797 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
798 pfs->m_timed= klass->m_timed;
799 pfs->m_rwlock_stat.reset();
800 pfs->m_lock.dirty_to_allocated();
801 pfs->m_writer= NULL;
802 pfs->m_readers= 0;
803 pfs->m_last_written= 0;
804 pfs->m_last_read= 0;
805 if (klass->is_singleton())
806 klass->m_singleton= pfs;
807 return pfs;
808 }
809 }
810 }
811
812 rwlock_lost++;
813 rwlock_full= true;
814 return NULL;
815}
816
817/**
818 Destroy instrumentation for a rwlock instance.
819 @param pfs the rwlock to destroy
820*/
821void destroy_rwlock(PFS_rwlock *pfs)
822{
823 DBUG_ASSERT(pfs != NULL);
824 PFS_rwlock_class *klass= pfs->m_class;
825 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
826 klass->m_rwlock_stat.aggregate(& pfs->m_rwlock_stat);
827 pfs->m_rwlock_stat.reset();
828 if (klass->is_singleton())
829 klass->m_singleton= NULL;
830 pfs->m_lock.allocated_to_free();
831 rwlock_full= false;
832}
833
834/**
835 Create instrumentation for a condition instance.
836 @param klass the condition class
837 @param identity the condition address
838 @return a condition instance, or NULL
839*/
840PFS_cond* create_cond(PFS_cond_class *klass, const void *identity)
841{
842 static uint PFS_ALIGNED cond_monotonic_index= 0;
843 uint index;
844 uint attempts= 0;
845 PFS_cond *pfs;
846
847 if (cond_full)
848 {
849 cond_lost++;
850 return NULL;
851 }
852
853 while (++attempts <= cond_max)
854 {
855 /* See create_mutex() */
856 index= PFS_atomic::add_u32(& cond_monotonic_index, 1) % cond_max;
857 pfs= cond_array + index;
858
859 if (pfs->m_lock.is_free())
860 {
861 if (pfs->m_lock.free_to_dirty())
862 {
863 pfs->m_identity= identity;
864 pfs->m_class= klass;
865 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
866 pfs->m_timed= klass->m_timed;
867 pfs->m_cond_stat.m_signal_count= 0;
868 pfs->m_cond_stat.m_broadcast_count= 0;
869 pfs->m_wait_stat.reset();
870 pfs->m_lock.dirty_to_allocated();
871 if (klass->is_singleton())
872 klass->m_singleton= pfs;
873 return pfs;
874 }
875 }
876 }
877
878 cond_lost++;
879 cond_full= true;
880 return NULL;
881}
882
883/**
884 Destroy instrumentation for a condition instance.
885 @param pfs the condition to destroy
886*/
887void destroy_cond(PFS_cond *pfs)
888{
889 DBUG_ASSERT(pfs != NULL);
890 PFS_cond_class *klass= pfs->m_class;
891 /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
892 klass->m_cond_stat.aggregate(& pfs->m_cond_stat);
893 pfs->m_wait_stat.reset();
894 if (klass->is_singleton())
895 klass->m_singleton= NULL;
896 pfs->m_lock.allocated_to_free();
897 cond_full= false;
898}
899
900PFS_thread* PFS_thread::get_current_thread()
901{
902 PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
903 return pfs;
904}
905
906void PFS_thread::reset_session_connect_attrs()
907{
908 m_session_connect_attrs_length= 0;
909 m_session_connect_attrs_cs_number= 0;
910
911 if ((m_session_connect_attrs != NULL) &&
912 (session_connect_attrs_size_per_thread > 0) )
913 {
914 /* Do not keep user data */
915 memset(m_session_connect_attrs, 0, session_connect_attrs_size_per_thread);
916 }
917}
918
919/**
920 Create instrumentation for a thread instance.
921 @param klass the thread class
922 @param identity the thread address,
923 or a value characteristic of this thread
924 @param processlist_id the PROCESSLIST id,
925 or 0 if unknown
926 @return a thread instance, or NULL
927*/
928PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
929 ulonglong processlist_id)
930{
931 static uint PFS_ALIGNED thread_monotonic_index= 0;
932 uint index;
933 uint attempts= 0;
934 PFS_thread *pfs;
935
936 if (thread_full)
937 {
938 thread_lost++;
939 return NULL;
940 }
941
942 while (++attempts <= thread_max)
943 {
944 /* See create_mutex() */
945 index= PFS_atomic::add_u32(& thread_monotonic_index, 1) % thread_max;
946 pfs= thread_array + index;
947
948 if (pfs->m_lock.is_free())
949 {
950 if (pfs->m_lock.free_to_dirty())
951 {
952 pfs->m_thread_internal_id=
953 PFS_atomic::add_u64(&thread_internal_id_counter, 1);
954 pfs->m_parent_thread_internal_id= 0;
955 pfs->m_processlist_id= (ulong)processlist_id;
956 pfs->m_event_id= 1;
957 pfs->m_stmt_lock.set_allocated();
958 pfs->m_session_lock.set_allocated();
959 pfs->m_enabled= true;
960 pfs->m_class= klass;
961 pfs->m_events_waits_current= & pfs->m_events_waits_stack[WAIT_STACK_BOTTOM];
962 pfs->m_waits_history_full= false;
963 pfs->m_waits_history_index= 0;
964 pfs->m_stages_history_full= false;
965 pfs->m_stages_history_index= 0;
966 pfs->m_statements_history_full= false;
967 pfs->m_statements_history_index= 0;
968
969 pfs->reset_stats();
970 pfs->reset_session_connect_attrs();
971
972 pfs->m_filename_hash_pins= NULL;
973 pfs->m_table_share_hash_pins= NULL;
974 pfs->m_setup_actor_hash_pins= NULL;
975 pfs->m_setup_object_hash_pins= NULL;
976 pfs->m_user_hash_pins= NULL;
977 pfs->m_account_hash_pins= NULL;
978 pfs->m_host_hash_pins= NULL;
979 pfs->m_digest_hash_pins= NULL;
980
981 pfs->m_username_length= 0;
982 pfs->m_hostname_length= 0;
983 pfs->m_dbname_length= 0;
984 pfs->m_command= 0;
985 pfs->m_start_time= 0;
986 pfs->m_stage= 0;
987 pfs->m_processlist_info[0]= '\0';
988 pfs->m_processlist_info_length= 0;
989
990 pfs->m_host= NULL;
991 pfs->m_user= NULL;
992 pfs->m_account= NULL;
993 set_thread_account(pfs);
994
995 PFS_events_waits *child_wait;
996 for (index= 0; index < WAIT_STACK_SIZE; index++)
997 {
998 child_wait= & pfs->m_events_waits_stack[index];
999 child_wait->m_thread_internal_id= pfs->m_thread_internal_id;
1000 child_wait->m_event_id= 0;
1001 child_wait->m_end_event_id= 0;
1002 child_wait->m_event_type= EVENT_TYPE_STATEMENT;
1003 child_wait->m_wait_class= NO_WAIT_CLASS;
1004 }
1005
1006 PFS_events_stages *child_stage= & pfs->m_stage_current;
1007 child_stage->m_thread_internal_id= pfs->m_thread_internal_id;
1008 child_stage->m_event_id= 0;
1009 child_stage->m_end_event_id= 0;
1010 child_stage->m_event_type= EVENT_TYPE_STATEMENT;
1011 child_stage->m_class= NULL;
1012 child_stage->m_timer_start= 0;
1013 child_stage->m_timer_end= 0;
1014 child_stage->m_source_file= NULL;
1015 child_stage->m_source_line= 0;
1016
1017 PFS_events_statements *child_statement;
1018 for (index= 0; index < statement_stack_max; index++)
1019 {
1020 child_statement= & pfs->m_statement_stack[index];
1021 child_statement->m_thread_internal_id= pfs->m_thread_internal_id;
1022 child_statement->m_event_id= 0;
1023 child_statement->m_end_event_id= 0;
1024 child_statement->m_event_type= EVENT_TYPE_STATEMENT;
1025 child_statement->m_class= NULL;
1026 child_statement->m_timer_start= 0;
1027 child_statement->m_timer_end= 0;
1028 child_statement->m_lock_time= 0;
1029 child_statement->m_source_file= NULL;
1030 child_statement->m_source_line= 0;
1031 child_statement->m_current_schema_name_length= 0;
1032 child_statement->m_sqltext_length= 0;
1033
1034 child_statement->m_message_text[0]= '\0';
1035 child_statement->m_sql_errno= 0;
1036 child_statement->m_sqlstate[0]= '\0';
1037 child_statement->m_error_count= 0;
1038 child_statement->m_warning_count= 0;
1039 child_statement->m_rows_affected= 0;
1040
1041 child_statement->m_rows_sent= 0;
1042 child_statement->m_rows_examined= 0;
1043 child_statement->m_created_tmp_disk_tables= 0;
1044 child_statement->m_created_tmp_tables= 0;
1045 child_statement->m_select_full_join= 0;
1046 child_statement->m_select_full_range_join= 0;
1047 child_statement->m_select_range= 0;
1048 child_statement->m_select_range_check= 0;
1049 child_statement->m_select_scan= 0;
1050 child_statement->m_sort_merge_passes= 0;
1051 child_statement->m_sort_range= 0;
1052 child_statement->m_sort_rows= 0;
1053 child_statement->m_sort_scan= 0;
1054 child_statement->m_no_index_used= 0;
1055 child_statement->m_no_good_index_used= 0;
1056 }
1057 pfs->m_events_statements_count= 0;
1058
1059 pfs->m_lock.dirty_to_allocated();
1060 return pfs;
1061 }
1062 }
1063 }
1064
1065 thread_lost++;
1066 thread_full= true;
1067 return NULL;
1068}
1069
1070PFS_mutex *sanitize_mutex(PFS_mutex *unsafe)
1071{
1072 SANITIZE_ARRAY_BODY(PFS_mutex, mutex_array, mutex_max, unsafe);
1073}
1074
1075PFS_rwlock *sanitize_rwlock(PFS_rwlock *unsafe)
1076{
1077 SANITIZE_ARRAY_BODY(PFS_rwlock, rwlock_array, rwlock_max, unsafe);
1078}
1079
1080PFS_cond *sanitize_cond(PFS_cond *unsafe)
1081{
1082 SANITIZE_ARRAY_BODY(PFS_cond, cond_array, cond_max, unsafe);
1083}
1084
1085/**
1086 Sanitize a PFS_thread pointer.
1087 Validate that the PFS_thread is part of thread_array.
1088 Sanitizing data is required when the data can be
1089 damaged with expected race conditions, for example
1090 involving EVENTS_WAITS_HISTORY_LONG.
1091 @param unsafe the pointer to sanitize
1092 @return a valid pointer, or NULL
1093*/
1094PFS_thread *sanitize_thread(PFS_thread *unsafe)
1095{
1096 SANITIZE_ARRAY_BODY(PFS_thread, thread_array, thread_max, unsafe);
1097}
1098
1099PFS_file *sanitize_file(PFS_file *unsafe)
1100{
1101 SANITIZE_ARRAY_BODY(PFS_file, file_array, file_max, unsafe);
1102}
1103
1104PFS_socket *sanitize_socket(PFS_socket *unsafe)
1105{
1106 SANITIZE_ARRAY_BODY(PFS_socket, socket_array, socket_max, unsafe);
1107}
1108
1109/**
1110 Destroy instrumentation for a thread instance.
1111 @param pfs the thread to destroy
1112*/
1113void destroy_thread(PFS_thread *pfs)
1114{
1115 DBUG_ASSERT(pfs != NULL);
1116 pfs->reset_session_connect_attrs();
1117 if (pfs->m_account != NULL)
1118 {
1119 pfs->m_account->release();
1120 pfs->m_account= NULL;
1121 DBUG_ASSERT(pfs->m_user == NULL);
1122 DBUG_ASSERT(pfs->m_host == NULL);
1123 }
1124 else
1125 {
1126 if (pfs->m_user != NULL)
1127 {
1128 pfs->m_user->release();
1129 pfs->m_user= NULL;
1130 }
1131 if (pfs->m_host != NULL)
1132 {
1133 pfs->m_host->release();
1134 pfs->m_host= NULL;
1135 }
1136 }
1137 if (pfs->m_filename_hash_pins)
1138 {
1139 lf_hash_put_pins(pfs->m_filename_hash_pins);
1140 pfs->m_filename_hash_pins= NULL;
1141 }
1142 if (pfs->m_table_share_hash_pins)
1143 {
1144 lf_hash_put_pins(pfs->m_table_share_hash_pins);
1145 pfs->m_table_share_hash_pins= NULL;
1146 }
1147 if (pfs->m_setup_actor_hash_pins)
1148 {
1149 lf_hash_put_pins(pfs->m_setup_actor_hash_pins);
1150 pfs->m_setup_actor_hash_pins= NULL;
1151 }
1152 if (pfs->m_setup_object_hash_pins)
1153 {
1154 lf_hash_put_pins(pfs->m_setup_object_hash_pins);
1155 pfs->m_setup_object_hash_pins= NULL;
1156 }
1157 if (pfs->m_user_hash_pins)
1158 {
1159 lf_hash_put_pins(pfs->m_user_hash_pins);
1160 pfs->m_user_hash_pins= NULL;
1161 }
1162 if (pfs->m_account_hash_pins)
1163 {
1164 lf_hash_put_pins(pfs->m_account_hash_pins);
1165 pfs->m_account_hash_pins= NULL;
1166 }
1167 if (pfs->m_host_hash_pins)
1168 {
1169 lf_hash_put_pins(pfs->m_host_hash_pins);
1170 pfs->m_host_hash_pins= NULL;
1171 }
1172 if (pfs->m_digest_hash_pins)
1173 {
1174 lf_hash_put_pins(pfs->m_digest_hash_pins);
1175 pfs->m_digest_hash_pins= NULL;
1176 }
1177 pfs->m_lock.allocated_to_free();
1178 thread_full= false;
1179}
1180
1181/**
1182 Get the hash pins for @filename_hash.
1183 @param thread The running thread.
1184 @returns The LF_HASH pins for the thread.
1185*/
1186LF_PINS* get_filename_hash_pins(PFS_thread *thread)
1187{
1188 if (unlikely(thread->m_filename_hash_pins == NULL))
1189 {
1190 if (! filename_hash_inited)
1191 return NULL;
1192 thread->m_filename_hash_pins= lf_hash_get_pins(&filename_hash);
1193 }
1194 return thread->m_filename_hash_pins;
1195}
1196
1197/**
1198 Find or create instrumentation for a file instance by file name.
1199 @param thread the executing instrumented thread
1200 @param klass the file class
1201 @param filename the file name
1202 @param len the length in bytes of filename
1203 @param create create a file instance if none found
1204 @return a file instance, or NULL
1205*/
1206PFS_file*
1207find_or_create_file(PFS_thread *thread, PFS_file_class *klass,
1208 const char *filename, uint len, bool create)
1209{
1210 PFS_file *pfs;
1211
1212 DBUG_ASSERT(klass != NULL || ! create);
1213
1214 LF_PINS *pins= get_filename_hash_pins(thread);
1215 if (unlikely(pins == NULL))
1216 {
1217 file_lost++;
1218 return NULL;
1219 }
1220
1221 char safe_buffer[FN_REFLEN];
1222 const char *safe_filename;
1223
1224 if (len >= FN_REFLEN)
1225 {
1226 /*
1227 The instrumented code uses file names that exceeds FN_REFLEN.
1228 This could be legal for instrumentation on non mysys APIs,
1229 so we support it.
1230 Truncate the file name so that:
1231 - it fits into pfs->m_filename
1232 - it is safe to use mysys apis to normalize the file name.
1233 */
1234 memcpy(safe_buffer, filename, FN_REFLEN - 1);
1235 safe_buffer[FN_REFLEN - 1]= 0;
1236 safe_filename= safe_buffer;
1237 }
1238 else
1239 safe_filename= filename;
1240
1241 /*
1242 Normalize the file name to avoid duplicates when using aliases:
1243 - absolute or relative paths
1244 - symbolic links
1245 Names are resolved as follows:
1246 - /real/path/to/real_file ==> same
1247 - /path/with/link/to/real_file ==> /real/path/to/real_file
1248 - real_file ==> /real/path/to/real_file
1249 - ./real_file ==> /real/path/to/real_file
1250 - /real/path/to/sym_link ==> same
1251 - /path/with/link/to/sym_link ==> /real/path/to/sym_link
1252 - sym_link ==> /real/path/to/sym_link
1253 - ./sym_link ==> /real/path/to/sym_link
1254 When the last component of a file is a symbolic link,
1255 the last component is *not* resolved, so that all file io
1256 operations on a link (create, read, write, delete) are counted
1257 against the link itself, not the target file.
1258 Resolving the name would lead to create counted against the link,
1259 and read/write/delete counted against the target, leading to
1260 incoherent results and instrumentation leaks.
1261 Also note that, when creating files, this name resolution
1262 works properly for files that do not exist (yet) on the file system.
1263 */
1264 char buffer[FN_REFLEN];
1265 char dirbuffer[FN_REFLEN];
1266 size_t dirlen;
1267 const char *normalized_filename;
1268 int normalized_length;
1269
1270 dirlen= dirname_length(safe_filename);
1271 if (dirlen == 0)
1272 {
1273 dirbuffer[0]= FN_CURLIB;
1274 dirbuffer[1]= FN_LIBCHAR;
1275 dirbuffer[2]= '\0';
1276 }
1277 else
1278 {
1279 memcpy(dirbuffer, safe_filename, dirlen);
1280 dirbuffer[dirlen]= '\0';
1281 }
1282
1283 if (my_realpath(buffer, dirbuffer, MYF(0)) != 0)
1284 {
1285 file_lost++;
1286 return NULL;
1287 }
1288
1289 /* Append the unresolved file name to the resolved path */
1290 char *ptr= buffer + strlen(buffer);
1291 char *buf_end= &buffer[sizeof(buffer)-1];
1292 if ((buf_end > ptr) && (*(ptr-1) != FN_LIBCHAR))
1293 *ptr++= FN_LIBCHAR;
1294 if (buf_end > ptr)
1295 strncpy(ptr, safe_filename + dirlen, buf_end - ptr);
1296 *buf_end= '\0';
1297
1298 normalized_filename= buffer;
1299 normalized_length= (int)strlen(normalized_filename);
1300
1301 PFS_file **entry;
1302 uint retry_count= 0;
1303 const uint retry_max= 3;
1304 static uint PFS_ALIGNED file_monotonic_index= 0;
1305 uint index;
1306 uint attempts= 0;
1307
1308search:
1309
1310 entry= reinterpret_cast<PFS_file**>
1311 (lf_hash_search(&filename_hash, pins,
1312 normalized_filename, normalized_length));
1313 if (entry && (entry != MY_ERRPTR))
1314 {
1315 pfs= *entry;
1316 pfs->m_file_stat.m_open_count++;
1317 lf_hash_search_unpin(pins);
1318 return pfs;
1319 }
1320
1321 lf_hash_search_unpin(pins);
1322
1323 if (! create)
1324 {
1325 /* No lost counter, just looking for the file existence. */
1326 return NULL;
1327 }
1328
1329 if (file_full)
1330 {
1331 file_lost++;
1332 return NULL;
1333 }
1334
1335 while (++attempts <= file_max)
1336 {
1337 /* See create_mutex() */
1338 index= PFS_atomic::add_u32(& file_monotonic_index, 1) % file_max;
1339 pfs= file_array + index;
1340
1341 if (pfs->m_lock.is_free())
1342 {
1343 if (pfs->m_lock.free_to_dirty())
1344 {
1345 pfs->m_class= klass;
1346 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
1347 pfs->m_timed= klass->m_timed;
1348 strncpy(pfs->m_filename, normalized_filename, normalized_length);
1349 pfs->m_filename[normalized_length]= '\0';
1350 pfs->m_filename_length= normalized_length;
1351 pfs->m_file_stat.m_open_count= 1;
1352 pfs->m_file_stat.m_io_stat.reset();
1353 pfs->m_identity= (const void *)pfs;
1354
1355 int res;
1356 res= lf_hash_insert(&filename_hash, thread->m_filename_hash_pins,
1357 &pfs);
1358 if (likely(res == 0))
1359 {
1360 pfs->m_lock.dirty_to_allocated();
1361 if (klass->is_singleton())
1362 klass->m_singleton= pfs;
1363 return pfs;
1364 }
1365
1366 pfs->m_lock.dirty_to_free();
1367
1368 if (res > 0)
1369 {
1370 /* Duplicate insert by another thread */
1371 if (++retry_count > retry_max)
1372 {
1373 /* Avoid infinite loops */
1374 file_lost++;
1375 return NULL;
1376 }
1377 goto search;
1378 }
1379
1380 /* OOM in lf_hash_insert */
1381 file_lost++;
1382 return NULL;
1383 }
1384 }
1385 }
1386
1387 file_lost++;
1388 file_full= true;
1389 return NULL;
1390}
1391
1392/**
1393 Release instrumentation for a file instance.
1394 @param pfs the file to release
1395*/
1396void release_file(PFS_file *pfs)
1397{
1398 DBUG_ASSERT(pfs != NULL);
1399 pfs->m_file_stat.m_open_count--;
1400}
1401
1402/**
1403 Destroy instrumentation for a file instance.
1404 @param thread the executing thread instrumentation
1405 @param pfs the file to destroy
1406*/
1407void destroy_file(PFS_thread *thread, PFS_file *pfs)
1408{
1409 DBUG_ASSERT(thread != NULL);
1410 DBUG_ASSERT(pfs != NULL);
1411 PFS_file_class *klass= pfs->m_class;
1412
1413 /* Aggregate to FILE_SUMMARY_BY_EVENT_NAME */
1414 klass->m_file_stat.aggregate(& pfs->m_file_stat);
1415 pfs->m_file_stat.reset();
1416
1417 if (klass->is_singleton())
1418 klass->m_singleton= NULL;
1419
1420 LF_PINS *pins= get_filename_hash_pins(thread);
1421 DBUG_ASSERT(pins != NULL);
1422
1423 lf_hash_delete(&filename_hash, pins,
1424 pfs->m_filename, pfs->m_filename_length);
1425 if (klass->is_singleton())
1426 klass->m_singleton= NULL;
1427 pfs->m_lock.allocated_to_free();
1428 file_full= false;
1429}
1430
1431/**
1432 Create instrumentation for a table instance.
1433 @param share the table share
1434 @param opening_thread the opening thread
1435 @param identity the table address
1436 @return a table instance, or NULL
1437*/
1438PFS_table* create_table(PFS_table_share *share, PFS_thread *opening_thread,
1439 const void *identity)
1440{
1441 static uint PFS_ALIGNED table_monotonic_index= 0;
1442 uint index;
1443 uint attempts= 0;
1444 PFS_table *pfs;
1445
1446 if (table_full)
1447 {
1448 table_lost++;
1449 return NULL;
1450 }
1451
1452 while (++attempts <= table_max)
1453 {
1454 /* See create_mutex() */
1455 index= PFS_atomic::add_u32(& table_monotonic_index, 1) % table_max;
1456 pfs= table_array + index;
1457
1458 if (pfs->m_lock.is_free())
1459 {
1460 if (pfs->m_lock.free_to_dirty())
1461 {
1462 pfs->m_identity= identity;
1463 pfs->m_share= share;
1464 pfs->m_io_enabled= share->m_enabled &&
1465 flag_global_instrumentation && global_table_io_class.m_enabled;
1466 pfs->m_io_timed= share->m_timed && global_table_io_class.m_timed;
1467 pfs->m_lock_enabled= share->m_enabled &&
1468 flag_global_instrumentation && global_table_lock_class.m_enabled;
1469 pfs->m_lock_timed= share->m_timed && global_table_lock_class.m_timed;
1470 pfs->m_has_io_stats= false;
1471 pfs->m_has_lock_stats= false;
1472 share->inc_refcount();
1473 pfs->m_table_stat.fast_reset();
1474 pfs->m_thread_owner= opening_thread;
1475 pfs->m_lock.dirty_to_allocated();
1476 return pfs;
1477 }
1478 }
1479 }
1480
1481 table_lost++;
1482 table_full= true;
1483 return NULL;
1484}
1485
1486void PFS_table::sanitized_aggregate(void)
1487{
1488 /*
1489 This thread could be a TRUNCATE on an aggregated summary table,
1490 and not own the table handle.
1491 */
1492 PFS_table_share *safe_share= sanitize_table_share(m_share);
1493 if (safe_share != NULL)
1494 {
1495 if (m_has_io_stats && m_has_lock_stats)
1496 {
1497 safe_aggregate(& m_table_stat, safe_share);
1498 m_has_io_stats= false;
1499 m_has_lock_stats= false;
1500 }
1501 else if (m_has_io_stats)
1502 {
1503 safe_aggregate_io(& m_table_stat, safe_share);
1504 m_has_io_stats= false;
1505 }
1506 else if (m_has_lock_stats)
1507 {
1508 safe_aggregate_lock(& m_table_stat, safe_share);
1509 m_has_lock_stats= false;
1510 }
1511 }
1512}
1513
1514void PFS_table::sanitized_aggregate_io(void)
1515{
1516 PFS_table_share *safe_share= sanitize_table_share(m_share);
1517 if (safe_share != NULL && m_has_io_stats)
1518 {
1519 safe_aggregate_io(& m_table_stat, safe_share);
1520 m_has_io_stats= false;
1521 }
1522}
1523
1524void PFS_table::sanitized_aggregate_lock(void)
1525{
1526 PFS_table_share *safe_share= sanitize_table_share(m_share);
1527 if (safe_share != NULL && m_has_lock_stats)
1528 {
1529 safe_aggregate_lock(& m_table_stat, safe_share);
1530 m_has_lock_stats= false;
1531 }
1532}
1533
1534void PFS_table::safe_aggregate(PFS_table_stat *table_stat,
1535 PFS_table_share *table_share)
1536{
1537 DBUG_ASSERT(table_stat != NULL);
1538 DBUG_ASSERT(table_share != NULL);
1539
1540 uint key_count= sanitize_index_count(table_share->m_key_count);
1541
1542 /* Aggregate to TABLE_IO_SUMMARY, TABLE_LOCK_SUMMARY */
1543 table_share->m_table_stat.aggregate(table_stat, key_count);
1544 table_stat->fast_reset();
1545}
1546
1547void PFS_table::safe_aggregate_io(PFS_table_stat *table_stat,
1548 PFS_table_share *table_share)
1549{
1550 DBUG_ASSERT(table_stat != NULL);
1551 DBUG_ASSERT(table_share != NULL);
1552
1553 uint key_count= sanitize_index_count(table_share->m_key_count);
1554
1555 /* Aggregate to TABLE_IO_SUMMARY */
1556 table_share->m_table_stat.aggregate_io(table_stat, key_count);
1557 table_stat->fast_reset_io();
1558}
1559
1560void PFS_table::safe_aggregate_lock(PFS_table_stat *table_stat,
1561 PFS_table_share *table_share)
1562{
1563 DBUG_ASSERT(table_stat != NULL);
1564 DBUG_ASSERT(table_share != NULL);
1565
1566 /* Aggregate to TABLE_LOCK_SUMMARY */
1567 table_share->m_table_stat.aggregate_lock(table_stat);
1568 table_stat->fast_reset_lock();
1569}
1570
1571/**
1572 Destroy instrumentation for a table instance.
1573 @param pfs the table to destroy
1574*/
1575void destroy_table(PFS_table *pfs)
1576{
1577 DBUG_ASSERT(pfs != NULL);
1578 pfs->m_share->dec_refcount();
1579 pfs->m_lock.allocated_to_free();
1580 table_full= false;
1581}
1582
1583/**
1584 Create instrumentation for a socket instance.
1585 @param klass the socket class
1586 @param identity the socket descriptor
1587 @return a socket instance, or NULL
1588*/
1589PFS_socket* create_socket(PFS_socket_class *klass, const my_socket *fd,
1590 const struct sockaddr *addr, socklen_t addr_len)
1591{
1592 static uint PFS_ALIGNED socket_monotonic_index= 0;
1593 uint index;
1594 uint attempts= 0;
1595 PFS_socket *pfs;
1596
1597 if (socket_full)
1598 {
1599 socket_lost++;
1600 return NULL;
1601 }
1602
1603 uint fd_used= 0;
1604 uint addr_len_used= addr_len;
1605
1606 if (fd != NULL)
1607 fd_used= (int)*fd;
1608
1609 if (addr_len_used > sizeof(sockaddr_storage))
1610 addr_len_used= sizeof(sockaddr_storage);
1611
1612 while (++attempts <= socket_max)
1613 {
1614 index= PFS_atomic::add_u32(& socket_monotonic_index, 1) % socket_max;
1615 pfs= socket_array + index;
1616
1617 if (pfs->m_lock.is_free())
1618 {
1619 if (pfs->m_lock.free_to_dirty())
1620 {
1621 pfs->m_fd= fd_used;
1622 /* There is no socket object, so we use the instrumentation. */
1623 pfs->m_identity= pfs;
1624 pfs->m_class= klass;
1625 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
1626 pfs->m_timed= klass->m_timed;
1627 pfs->m_idle= false;
1628 pfs->m_socket_stat.reset();
1629 pfs->m_thread_owner= NULL;
1630
1631 pfs->m_addr_len= addr_len_used;
1632 if ((addr != NULL) && (addr_len_used > 0))
1633 {
1634 pfs->m_addr_len= addr_len_used;
1635 memcpy(&pfs->m_sock_addr, addr, addr_len_used);
1636 }
1637 else
1638 {
1639 pfs->m_addr_len= 0;
1640 }
1641
1642 pfs->m_lock.dirty_to_allocated();
1643
1644 if (klass->is_singleton())
1645 klass->m_singleton= pfs;
1646 return pfs;
1647 }
1648 }
1649 }
1650
1651 socket_lost++;
1652 socket_full= true;
1653 return NULL;
1654}
1655
1656/**
1657 Destroy instrumentation for a socket instance.
1658 @param pfs the socket to destroy
1659*/
1660void destroy_socket(PFS_socket *pfs)
1661{
1662 DBUG_ASSERT(pfs != NULL);
1663 PFS_socket_class *klass= pfs->m_class;
1664
1665 /* Aggregate to SOCKET_SUMMARY_BY_EVENT_NAME */
1666 klass->m_socket_stat.m_io_stat.aggregate(&pfs->m_socket_stat.m_io_stat);
1667
1668 if (klass->is_singleton())
1669 klass->m_singleton= NULL;
1670
1671 /* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME */
1672 PFS_thread *thread= pfs->m_thread_owner;
1673 if (thread != NULL)
1674 {
1675 PFS_single_stat *event_name_array;
1676 event_name_array= thread->m_instr_class_waits_stats;
1677 uint index= pfs->m_class->m_event_name_index;
1678
1679 /* Combine stats for all operations */
1680 PFS_single_stat stat;
1681 pfs->m_socket_stat.m_io_stat.sum_waits(&stat);
1682 event_name_array[index].aggregate(&stat);
1683 }
1684
1685 pfs->m_socket_stat.reset();
1686 pfs->m_thread_owner= NULL;
1687 pfs->m_fd= 0;
1688 pfs->m_addr_len= 0;
1689 pfs->m_lock.allocated_to_free();
1690 socket_full= false;
1691}
1692
1693static void reset_mutex_waits_by_instance(void)
1694{
1695 PFS_mutex *pfs= mutex_array;
1696 PFS_mutex *pfs_last= mutex_array + mutex_max;
1697
1698 for ( ; pfs < pfs_last; pfs++)
1699 pfs->m_mutex_stat.reset();
1700}
1701
1702static void reset_rwlock_waits_by_instance(void)
1703{
1704 PFS_rwlock *pfs= rwlock_array;
1705 PFS_rwlock *pfs_last= rwlock_array + rwlock_max;
1706
1707 for ( ; pfs < pfs_last; pfs++)
1708 pfs->m_rwlock_stat.reset();
1709}
1710
1711static void reset_cond_waits_by_instance(void)
1712{
1713 PFS_cond *pfs= cond_array;
1714 PFS_cond *pfs_last= cond_array + cond_max;
1715
1716 for ( ; pfs < pfs_last; pfs++)
1717 pfs->m_cond_stat.reset();
1718}
1719
1720static void reset_file_waits_by_instance(void)
1721{
1722 PFS_file *pfs= file_array;
1723 PFS_file *pfs_last= file_array + file_max;
1724
1725 for ( ; pfs < pfs_last; pfs++)
1726 pfs->m_file_stat.reset();
1727}
1728
1729static void reset_socket_waits_by_instance(void)
1730{
1731 PFS_socket *pfs= socket_array;
1732 PFS_socket *pfs_last= socket_array + socket_max;
1733
1734 for ( ; pfs < pfs_last; pfs++)
1735 pfs->m_socket_stat.reset();
1736}
1737
1738/** Reset the wait statistics per object instance. */
1739void reset_events_waits_by_instance(void)
1740{
1741 reset_mutex_waits_by_instance();
1742 reset_rwlock_waits_by_instance();
1743 reset_cond_waits_by_instance();
1744 reset_file_waits_by_instance();
1745 reset_socket_waits_by_instance();
1746}
1747
1748/** Reset the io statistics per file instance. */
1749void reset_file_instance_io(void)
1750{
1751 PFS_file *pfs= file_array;
1752 PFS_file *pfs_last= file_array + file_max;
1753
1754 for ( ; pfs < pfs_last; pfs++)
1755 pfs->m_file_stat.m_io_stat.reset();
1756}
1757
1758/** Reset the io statistics per socket instance. */
1759void reset_socket_instance_io(void)
1760{
1761 PFS_socket *pfs= socket_array;
1762 PFS_socket *pfs_last= socket_array + socket_max;
1763
1764 for ( ; pfs < pfs_last; pfs++)
1765 pfs->m_socket_stat.m_io_stat.reset();
1766}
1767
1768void aggregate_all_event_names(PFS_single_stat *from_array,
1769 PFS_single_stat *to_array)
1770{
1771 PFS_single_stat *from;
1772 PFS_single_stat *from_last;
1773 PFS_single_stat *to;
1774
1775 from= from_array;
1776 from_last= from_array + wait_class_max;
1777 to= to_array;
1778
1779 for ( ; from < from_last ; from++, to++)
1780 {
1781 if (from->m_count > 0)
1782 {
1783 to->aggregate(from);
1784 from->reset();
1785 }
1786 }
1787}
1788
1789void aggregate_all_event_names(PFS_single_stat *from_array,
1790 PFS_single_stat *to_array_1,
1791 PFS_single_stat *to_array_2)
1792{
1793 PFS_single_stat *from;
1794 PFS_single_stat *from_last;
1795 PFS_single_stat *to_1;
1796 PFS_single_stat *to_2;
1797
1798 from= from_array;
1799 from_last= from_array + wait_class_max;
1800 to_1= to_array_1;
1801 to_2= to_array_2;
1802
1803 for ( ; from < from_last ; from++, to_1++, to_2++)
1804 {
1805 if (from->m_count > 0)
1806 {
1807 to_1->aggregate(from);
1808 to_2->aggregate(from);
1809 from->reset();
1810 }
1811 }
1812}
1813
1814void aggregate_all_stages(PFS_stage_stat *from_array,
1815 PFS_stage_stat *to_array)
1816{
1817 PFS_stage_stat *from;
1818 PFS_stage_stat *from_last;
1819 PFS_stage_stat *to;
1820
1821 from= from_array;
1822 from_last= from_array + stage_class_max;
1823 to= to_array;
1824
1825 for ( ; from < from_last ; from++, to++)
1826 {
1827 if (from->m_timer1_stat.m_count > 0)
1828 {
1829 to->aggregate(from);
1830 from->reset();
1831 }
1832 }
1833}
1834
1835void aggregate_all_stages(PFS_stage_stat *from_array,
1836 PFS_stage_stat *to_array_1,
1837 PFS_stage_stat *to_array_2)
1838{
1839 PFS_stage_stat *from;
1840 PFS_stage_stat *from_last;
1841 PFS_stage_stat *to_1;
1842 PFS_stage_stat *to_2;
1843
1844 from= from_array;
1845 from_last= from_array + stage_class_max;
1846 to_1= to_array_1;
1847 to_2= to_array_2;
1848
1849 for ( ; from < from_last ; from++, to_1++, to_2++)
1850 {
1851 if (from->m_timer1_stat.m_count > 0)
1852 {
1853 to_1->aggregate(from);
1854 to_2->aggregate(from);
1855 from->reset();
1856 }
1857 }
1858}
1859
1860void aggregate_all_statements(PFS_statement_stat *from_array,
1861 PFS_statement_stat *to_array)
1862{
1863 PFS_statement_stat *from;
1864 PFS_statement_stat *from_last;
1865 PFS_statement_stat *to;
1866
1867 from= from_array;
1868 from_last= from_array + statement_class_max;
1869 to= to_array;
1870
1871 for ( ; from < from_last ; from++, to++)
1872 {
1873 if (from->m_timer1_stat.m_count > 0)
1874 {
1875 to->aggregate(from);
1876 from->reset();
1877 }
1878 }
1879}
1880
1881void aggregate_all_statements(PFS_statement_stat *from_array,
1882 PFS_statement_stat *to_array_1,
1883 PFS_statement_stat *to_array_2)
1884{
1885 PFS_statement_stat *from;
1886 PFS_statement_stat *from_last;
1887 PFS_statement_stat *to_1;
1888 PFS_statement_stat *to_2;
1889
1890 from= from_array;
1891 from_last= from_array + statement_class_max;
1892 to_1= to_array_1;
1893 to_2= to_array_2;
1894
1895 for ( ; from < from_last ; from++, to_1++, to_2++)
1896 {
1897 if (from->m_timer1_stat.m_count > 0)
1898 {
1899 to_1->aggregate(from);
1900 to_2->aggregate(from);
1901 from->reset();
1902 }
1903 }
1904}
1905
1906void aggregate_thread_stats(PFS_thread *thread,
1907 PFS_account *safe_account,
1908 PFS_user *safe_user,
1909 PFS_host *safe_host)
1910{
1911 if (likely(safe_account != NULL))
1912 {
1913 safe_account->m_disconnected_count++;
1914 return;
1915 }
1916
1917 if (safe_user != NULL)
1918 safe_user->m_disconnected_count++;
1919
1920 if (safe_host != NULL)
1921 safe_host->m_disconnected_count++;
1922
1923 /* There is no global table for connections statistics. */
1924 return;
1925}
1926
1927void aggregate_thread(PFS_thread *thread,
1928 PFS_account *safe_account,
1929 PFS_user *safe_user,
1930 PFS_host *safe_host)
1931{
1932 aggregate_thread_waits(thread, safe_account, safe_user, safe_host);
1933 aggregate_thread_stages(thread, safe_account, safe_user, safe_host);
1934 aggregate_thread_statements(thread, safe_account, safe_user, safe_host);
1935 aggregate_thread_stats(thread, safe_account, safe_user, safe_host);
1936}
1937
1938void aggregate_thread_waits(PFS_thread *thread,
1939 PFS_account *safe_account,
1940 PFS_user *safe_user,
1941 PFS_host *safe_host)
1942{
1943 if (likely(safe_account != NULL))
1944 {
1945 /*
1946 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1947 to EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
1948 */
1949 aggregate_all_event_names(thread->m_instr_class_waits_stats,
1950 safe_account->m_instr_class_waits_stats);
1951
1952 return;
1953 }
1954
1955 if ((safe_user != NULL) && (safe_host != NULL))
1956 {
1957 /*
1958 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
1959 - EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME
1960 - EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME
1961 in parallel.
1962 */
1963 aggregate_all_event_names(thread->m_instr_class_waits_stats,
1964 safe_user->m_instr_class_waits_stats,
1965 safe_host->m_instr_class_waits_stats);
1966 return;
1967 }
1968
1969 if (safe_user != NULL)
1970 {
1971 /*
1972 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1973 to EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME, directly.
1974 */
1975 aggregate_all_event_names(thread->m_instr_class_waits_stats,
1976 safe_user->m_instr_class_waits_stats);
1977 return;
1978 }
1979
1980 if (safe_host != NULL)
1981 {
1982 /*
1983 Aggregate EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
1984 to EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
1985 */
1986 aggregate_all_event_names(thread->m_instr_class_waits_stats,
1987 safe_host->m_instr_class_waits_stats);
1988 return;
1989 }
1990
1991 /* Orphan thread, clean the waits stats. */
1992 thread->reset_waits_stats();
1993}
1994
1995void aggregate_thread_stages(PFS_thread *thread,
1996 PFS_account *safe_account,
1997 PFS_user *safe_user,
1998 PFS_host *safe_host)
1999{
2000 if (likely(safe_account != NULL))
2001 {
2002 /*
2003 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
2004 to EVENTS_STAGES_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
2005 */
2006 aggregate_all_stages(thread->m_instr_class_stages_stats,
2007 safe_account->m_instr_class_stages_stats);
2008
2009 return;
2010 }
2011
2012 if ((safe_user != NULL) && (safe_host != NULL))
2013 {
2014 /*
2015 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2016 - EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME
2017 - EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME
2018 in parallel.
2019 */
2020 aggregate_all_stages(thread->m_instr_class_stages_stats,
2021 safe_user->m_instr_class_stages_stats,
2022 safe_host->m_instr_class_stages_stats);
2023 return;
2024 }
2025
2026 if (safe_user != NULL)
2027 {
2028 /*
2029 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2030 - EVENTS_STAGES_SUMMARY_BY_USER_BY_EVENT_NAME
2031 - EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME
2032 in parallel.
2033 */
2034 aggregate_all_stages(thread->m_instr_class_stages_stats,
2035 safe_user->m_instr_class_stages_stats,
2036 global_instr_class_stages_array);
2037 return;
2038 }
2039
2040 if (safe_host != NULL)
2041 {
2042 /*
2043 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
2044 to EVENTS_STAGES_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
2045 */
2046 aggregate_all_stages(thread->m_instr_class_stages_stats,
2047 safe_host->m_instr_class_stages_stats);
2048 return;
2049 }
2050
2051 /*
2052 Aggregate EVENTS_STAGES_SUMMARY_BY_THREAD_BY_EVENT_NAME
2053 to EVENTS_STAGES_SUMMARY_GLOBAL_BY_EVENT_NAME.
2054 */
2055 aggregate_all_stages(thread->m_instr_class_stages_stats,
2056 global_instr_class_stages_array);
2057}
2058
2059void aggregate_thread_statements(PFS_thread *thread,
2060 PFS_account *safe_account,
2061 PFS_user *safe_user,
2062 PFS_host *safe_host)
2063{
2064 if (likely(safe_account != NULL))
2065 {
2066 /*
2067 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
2068 to EVENTS_STATEMENTS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME.
2069 */
2070 aggregate_all_statements(thread->m_instr_class_statements_stats,
2071 safe_account->m_instr_class_statements_stats);
2072
2073 return;
2074 }
2075
2076 if ((safe_user != NULL) && (safe_host != NULL))
2077 {
2078 /*
2079 Aggregate EVENTS_STATEMENT_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2080 - EVENTS_STATEMENT_SUMMARY_BY_USER_BY_EVENT_NAME
2081 - EVENTS_STATEMENT_SUMMARY_BY_HOST_BY_EVENT_NAME
2082 in parallel.
2083 */
2084 aggregate_all_statements(thread->m_instr_class_statements_stats,
2085 safe_user->m_instr_class_statements_stats,
2086 safe_host->m_instr_class_statements_stats);
2087 return;
2088 }
2089
2090 if (safe_user != NULL)
2091 {
2092 /*
2093 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME to:
2094 - EVENTS_STATEMENTS_SUMMARY_BY_USER_BY_EVENT_NAME
2095 - EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME
2096 in parallel.
2097 */
2098 aggregate_all_statements(thread->m_instr_class_statements_stats,
2099 safe_user->m_instr_class_statements_stats,
2100 global_instr_class_statements_array);
2101 return;
2102 }
2103
2104 if (safe_host != NULL)
2105 {
2106 /*
2107 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
2108 to EVENTS_STATEMENTS_SUMMARY_BY_HOST_BY_EVENT_NAME, directly.
2109 */
2110 aggregate_all_statements(thread->m_instr_class_statements_stats,
2111 safe_host->m_instr_class_statements_stats);
2112 return;
2113 }
2114
2115 /*
2116 Aggregate EVENTS_STATEMENTS_SUMMARY_BY_THREAD_BY_EVENT_NAME
2117 to EVENTS_STATEMENTS_SUMMARY_GLOBAL_BY_EVENT_NAME.
2118 */
2119 aggregate_all_statements(thread->m_instr_class_statements_stats,
2120 global_instr_class_statements_array);
2121}
2122
2123void clear_thread_account(PFS_thread *thread)
2124{
2125 if (thread->m_account != NULL)
2126 {
2127 thread->m_account->release();
2128 thread->m_account= NULL;
2129 }
2130
2131 if (thread->m_user != NULL)
2132 {
2133 thread->m_user->release();
2134 thread->m_user= NULL;
2135 }
2136
2137 if (thread->m_host != NULL)
2138 {
2139 thread->m_host->release();
2140 thread->m_host= NULL;
2141 }
2142}
2143
2144void set_thread_account(PFS_thread *thread)
2145{
2146 DBUG_ASSERT(thread->m_account == NULL);
2147 DBUG_ASSERT(thread->m_user == NULL);
2148 DBUG_ASSERT(thread->m_host == NULL);
2149
2150 thread->m_account= find_or_create_account(thread,
2151 thread->m_username,
2152 thread->m_username_length,
2153 thread->m_hostname,
2154 thread->m_hostname_length);
2155
2156 if ((thread->m_account == NULL) && (thread->m_username_length > 0))
2157 thread->m_user= find_or_create_user(thread,
2158 thread->m_username,
2159 thread->m_username_length);
2160
2161 if ((thread->m_account == NULL) && (thread->m_hostname_length > 0))
2162 thread->m_host= find_or_create_host(thread,
2163 thread->m_hostname,
2164 thread->m_hostname_length);
2165}
2166
2167void update_mutex_derived_flags()
2168{
2169 PFS_mutex *pfs= mutex_array;
2170 PFS_mutex *pfs_last= mutex_array + mutex_max;
2171 PFS_mutex_class *klass;
2172
2173 for ( ; pfs < pfs_last; pfs++)
2174 {
2175 klass= sanitize_mutex_class(pfs->m_class);
2176 if (likely(klass != NULL))
2177 {
2178 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2179 pfs->m_timed= klass->m_timed;
2180 }
2181 else
2182 {
2183 pfs->m_enabled= false;
2184 pfs->m_timed= false;
2185 }
2186 }
2187}
2188
2189void update_rwlock_derived_flags()
2190{
2191 PFS_rwlock *pfs= rwlock_array;
2192 PFS_rwlock *pfs_last= rwlock_array + rwlock_max;
2193 PFS_rwlock_class *klass;
2194
2195 for ( ; pfs < pfs_last; pfs++)
2196 {
2197 klass= sanitize_rwlock_class(pfs->m_class);
2198 if (likely(klass != NULL))
2199 {
2200 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2201 pfs->m_timed= klass->m_timed;
2202 }
2203 else
2204 {
2205 pfs->m_enabled= false;
2206 pfs->m_timed= false;
2207 }
2208 }
2209}
2210
2211void update_cond_derived_flags()
2212{
2213 PFS_cond *pfs= cond_array;
2214 PFS_cond *pfs_last= cond_array + cond_max;
2215 PFS_cond_class *klass;
2216
2217 for ( ; pfs < pfs_last; pfs++)
2218 {
2219 klass= sanitize_cond_class(pfs->m_class);
2220 if (likely(klass != NULL))
2221 {
2222 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2223 pfs->m_timed= klass->m_timed;
2224 }
2225 else
2226 {
2227 pfs->m_enabled= false;
2228 pfs->m_timed= false;
2229 }
2230 }
2231}
2232
2233void update_file_derived_flags()
2234{
2235 PFS_file *pfs= file_array;
2236 PFS_file *pfs_last= file_array + file_max;
2237 PFS_file_class *klass;
2238
2239 for ( ; pfs < pfs_last; pfs++)
2240 {
2241 klass= sanitize_file_class(pfs->m_class);
2242 if (likely(klass != NULL))
2243 {
2244 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2245 pfs->m_timed= klass->m_timed;
2246 }
2247 else
2248 {
2249 pfs->m_enabled= false;
2250 pfs->m_timed= false;
2251 }
2252 }
2253}
2254
2255void update_table_derived_flags()
2256{
2257 PFS_table *pfs= table_array;
2258 PFS_table *pfs_last= table_array + table_max;
2259 PFS_table_share *share;
2260
2261 for ( ; pfs < pfs_last; pfs++)
2262 {
2263 share= sanitize_table_share(pfs->m_share);
2264 if (likely(share != NULL))
2265 {
2266 pfs->m_io_enabled= share->m_enabled &&
2267 flag_global_instrumentation && global_table_io_class.m_enabled;
2268 pfs->m_io_timed= share->m_timed && global_table_io_class.m_timed;
2269 pfs->m_lock_enabled= share->m_enabled &&
2270 flag_global_instrumentation && global_table_lock_class.m_enabled;
2271 pfs->m_lock_timed= share->m_timed && global_table_lock_class.m_timed;
2272 }
2273 else
2274 {
2275 pfs->m_io_enabled= false;
2276 pfs->m_io_timed= false;
2277 pfs->m_lock_enabled= false;
2278 pfs->m_lock_timed= false;
2279 }
2280 }
2281}
2282
2283void update_socket_derived_flags()
2284{
2285 PFS_socket *pfs= socket_array;
2286 PFS_socket *pfs_last= socket_array + socket_max;
2287 PFS_socket_class *klass;
2288
2289 for ( ; pfs < pfs_last; pfs++)
2290 {
2291 klass= sanitize_socket_class(pfs->m_class);
2292 if (likely(klass != NULL))
2293 {
2294 pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
2295 pfs->m_timed= klass->m_timed;
2296 }
2297 else
2298 {
2299 pfs->m_enabled= false;
2300 pfs->m_timed= false;
2301 }
2302 }
2303}
2304
2305void update_instruments_derived_flags()
2306{
2307 update_mutex_derived_flags();
2308 update_rwlock_derived_flags();
2309 update_cond_derived_flags();
2310 update_file_derived_flags();
2311 update_table_derived_flags();
2312 update_socket_derived_flags();
2313 /* nothing for stages and statements (no instances) */
2314}
2315
2316/** @} */
2317