1 | /* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. |
2 | |
3 | This program is free software; you can redistribute it and/or modify |
4 | it under the terms of the GNU General Public License as published by |
5 | the Free Software Foundation; version 2 of the License. |
6 | |
7 | This program is distributed in the hope that it will be useful, |
8 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | GNU General Public License for more details. |
11 | |
12 | You should have received a copy of the GNU General Public License |
13 | along with this program; if not, write to the Free Software Foundation, |
14 | 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ |
15 | |
16 | #ifndef PFS_INSTR_H |
17 | #define PFS_INSTR_H |
18 | |
19 | /** |
20 | @file storage/perfschema/pfs_instr.h |
21 | Performance schema instruments (declarations). |
22 | */ |
23 | |
24 | struct PFS_mutex_class; |
25 | struct PFS_rwlock_class; |
26 | struct PFS_cond_class; |
27 | struct PFS_file_class; |
28 | struct PFS_table_share; |
29 | struct PFS_thread_class; |
30 | struct PFS_socket_class; |
31 | |
32 | #include "my_global.h" |
33 | #ifdef __WIN__ |
34 | #include <winsock2.h> |
35 | #else |
36 | #include <arpa/inet.h> |
37 | #endif |
38 | #include "my_compiler.h" |
39 | #include "pfs_lock.h" |
40 | #include "pfs_stat.h" |
41 | #include "pfs_instr_class.h" |
42 | #include "pfs_events_waits.h" |
43 | #include "pfs_events_stages.h" |
44 | #include "pfs_events_statements.h" |
45 | #include "pfs_server.h" |
46 | #include "lf.h" |
47 | #include "pfs_con_slice.h" |
48 | |
49 | /** |
50 | @addtogroup Performance_schema_buffers |
51 | @{ |
52 | */ |
53 | |
54 | struct PFS_thread; |
55 | struct PFS_host; |
56 | struct PFS_user; |
57 | struct PFS_account; |
58 | |
59 | /** Base structure for wait instruments. */ |
60 | struct PFS_instr |
61 | { |
62 | /** Internal lock. */ |
63 | pfs_lock m_lock; |
64 | /** Enabled flag. */ |
65 | bool m_enabled; |
66 | /** Timed flag. */ |
67 | bool m_timed; |
68 | }; |
69 | |
70 | /** Instrumented mutex implementation. @see PSI_mutex. */ |
71 | struct PFS_ALIGNED PFS_mutex : public PFS_instr |
72 | { |
73 | /** Mutex identity, typically a pthread_mutex_t. */ |
74 | const void *m_identity; |
75 | /** Mutex class. */ |
76 | PFS_mutex_class *m_class; |
77 | /** Instrument statistics. */ |
78 | PFS_mutex_stat m_mutex_stat; |
79 | /** Current owner. */ |
80 | PFS_thread *m_owner; |
81 | /** |
82 | Timestamp of the last lock. |
83 | This statistic is not exposed in user visible tables yet. |
84 | */ |
85 | ulonglong m_last_locked; |
86 | }; |
87 | |
88 | /** Instrumented rwlock implementation. @see PSI_rwlock. */ |
89 | struct PFS_ALIGNED PFS_rwlock : public PFS_instr |
90 | { |
91 | /** RWLock identity, typically a pthread_rwlock_t. */ |
92 | const void *m_identity; |
93 | /** RWLock class. */ |
94 | PFS_rwlock_class *m_class; |
95 | /** Instrument statistics. */ |
96 | PFS_rwlock_stat m_rwlock_stat; |
97 | /** Current writer thread. */ |
98 | PFS_thread *m_writer; |
99 | /** Current count of readers. */ |
100 | uint m_readers; |
101 | /** |
102 | Timestamp of the last write. |
103 | This statistic is not exposed in user visible tables yet. |
104 | */ |
105 | ulonglong m_last_written; |
106 | /** |
107 | Timestamp of the last read. |
108 | This statistic is not exposed in user visible tables yet. |
109 | */ |
110 | ulonglong m_last_read; |
111 | }; |
112 | |
113 | /** Instrumented cond implementation. @see PSI_cond. */ |
114 | struct PFS_ALIGNED PFS_cond : public PFS_instr |
115 | { |
116 | /** Condition identity, typically a pthread_cond_t. */ |
117 | const void *m_identity; |
118 | /** Condition class. */ |
119 | PFS_cond_class *m_class; |
120 | /** Instrument wait statistics. */ |
121 | PFS_single_stat m_wait_stat; |
122 | /** Condition instance usage statistics. */ |
123 | PFS_cond_stat m_cond_stat; |
124 | }; |
125 | |
126 | /** Instrumented File and FILE implementation. @see PSI_file. */ |
127 | struct PFS_ALIGNED PFS_file : public PFS_instr |
128 | { |
129 | uint32 get_version() |
130 | { return m_lock.get_version(); } |
131 | |
132 | /** File identity */ |
133 | const void *m_identity; |
134 | /** File name. */ |
135 | char m_filename[FN_REFLEN]; |
136 | /** File name length in bytes. */ |
137 | uint m_filename_length; |
138 | /** File class. */ |
139 | PFS_file_class *m_class; |
140 | /** File usage statistics. */ |
141 | PFS_file_stat m_file_stat; |
142 | }; |
143 | |
144 | /** Instrumented table implementation. @see PSI_table. */ |
145 | struct PFS_ALIGNED PFS_table |
146 | { |
147 | /** |
148 | True if table io instrumentation is enabled. |
149 | This flag is computed. |
150 | */ |
151 | bool m_io_enabled; |
152 | /** |
153 | True if table lock instrumentation is enabled. |
154 | This flag is computed. |
155 | */ |
156 | bool m_lock_enabled; |
157 | /** |
158 | True if table io instrumentation is timed. |
159 | This flag is computed. |
160 | */ |
161 | bool m_io_timed; |
162 | /** |
163 | True if table lock instrumentation is timed. |
164 | This flag is computed. |
165 | */ |
166 | bool m_lock_timed; |
167 | |
168 | /** True if table io statistics have been collected. */ |
169 | bool m_has_io_stats; |
170 | |
171 | /** True if table lock statistics have been collected. */ |
172 | bool m_has_lock_stats; |
173 | |
174 | public: |
175 | /** |
176 | Aggregate this table handle statistics to the parents. |
177 | Only use this method for handles owned by the calling code. |
178 | @sa sanitized_aggregate. |
179 | */ |
180 | void aggregate(void) |
181 | { |
182 | if (m_has_io_stats && m_has_lock_stats) |
183 | { |
184 | safe_aggregate(& m_table_stat, m_share); |
185 | m_has_io_stats= false; |
186 | m_has_lock_stats= false; |
187 | } |
188 | else if (m_has_io_stats) |
189 | { |
190 | safe_aggregate_io(& m_table_stat, m_share); |
191 | m_has_io_stats= false; |
192 | } |
193 | else if (m_has_lock_stats) |
194 | { |
195 | safe_aggregate_lock(& m_table_stat, m_share); |
196 | m_has_lock_stats= false; |
197 | } |
198 | } |
199 | |
200 | /** |
201 | Aggregate this table handle statistics to the parents. |
202 | This method is safe to call on handles not owned by the calling code. |
203 | @sa aggregate |
204 | @sa sanitized_aggregate_io |
205 | @sa sanitized_aggregate_lock |
206 | */ |
207 | void sanitized_aggregate(void); |
208 | |
209 | /** |
210 | Aggregate this table handle io statistics to the parents. |
211 | This method is safe to call on handles not owned by the calling code. |
212 | */ |
213 | void sanitized_aggregate_io(void); |
214 | |
215 | /** |
216 | Aggregate this table handle lock statistics to the parents. |
217 | This method is safe to call on handles not owned by the calling code. |
218 | */ |
219 | void sanitized_aggregate_lock(void); |
220 | |
221 | /** Internal lock. */ |
222 | pfs_lock m_lock; |
223 | /** Owner. */ |
224 | PFS_thread *m_thread_owner; |
225 | /** Table share. */ |
226 | PFS_table_share *m_share; |
227 | /** Table identity, typically a handler. */ |
228 | const void *m_identity; |
229 | /** Table statistics. */ |
230 | PFS_table_stat m_table_stat; |
231 | |
232 | private: |
233 | static void safe_aggregate(PFS_table_stat *stat, |
234 | PFS_table_share *safe_share); |
235 | static void safe_aggregate_io(PFS_table_stat *stat, |
236 | PFS_table_share *safe_share); |
237 | static void safe_aggregate_lock(PFS_table_stat *stat, |
238 | PFS_table_share *safe_share); |
239 | }; |
240 | |
241 | /** Instrumented socket implementation. @see PSI_socket. */ |
242 | struct PFS_ALIGNED PFS_socket : public PFS_instr |
243 | { |
244 | uint32 get_version() |
245 | { return m_lock.get_version(); } |
246 | |
247 | /** Socket identity, typically int */ |
248 | const void *m_identity; |
249 | /** Owning thread, if applicable */ |
250 | PFS_thread *m_thread_owner; |
251 | /** Socket file descriptor */ |
252 | uint m_fd; |
253 | /** Raw socket address */ |
254 | struct sockaddr_storage m_sock_addr; |
255 | /** Length of address */ |
256 | socklen_t m_addr_len; |
257 | /** Idle flag. */ |
258 | bool m_idle; |
259 | /** Socket class. */ |
260 | PFS_socket_class *m_class; |
261 | /** Socket usage statistics. */ |
262 | PFS_socket_stat m_socket_stat; |
263 | }; |
264 | |
265 | /** |
266 | @def WAIT_STACK_LOGICAL_SIZE |
267 | Maximum number of nested waits. |
268 | Some waits, such as: |
269 | - "wait/io/table/sql/handler" |
270 | - "wait/lock/table/sql/handler" |
271 | are implemented by calling code in a storage engine, |
272 | that can cause nested waits (file io, mutex, ...) |
273 | Because of partitioned tables, a table io event (on the whole table) |
274 | can contain a nested table io event (on a partition). |
275 | Because of additional debug instrumentation, |
276 | waiting on what looks like a "mutex" (safe_mutex, innodb sync0sync, ...) |
277 | can cause nested waits to be recorded. |
278 | For example, a wait on innodb mutexes can lead to: |
279 | - wait/sync/mutex/innobase/some_mutex |
280 | - wait/sync/mutex/innobase/sync0sync |
281 | - wait/sync/mutex/innobase/os0sync |
282 | The max depth of the event stack must be sufficient |
283 | for these low level details to be visible. |
284 | */ |
285 | #define WAIT_STACK_LOGICAL_SIZE 5 |
286 | /** |
287 | @def WAIT_STACK_BOTTOM |
288 | Maximum number dummy waits records. |
289 | One dummy record is reserved for the parent stage / statement, |
290 | at the bottom of the wait stack. |
291 | */ |
292 | #define WAIT_STACK_BOTTOM 1 |
293 | /** |
294 | @def WAIT_STACK_SIZE |
295 | Physical size of the waits stack |
296 | */ |
297 | #define WAIT_STACK_SIZE (WAIT_STACK_BOTTOM + WAIT_STACK_LOGICAL_SIZE) |
298 | |
299 | /** Max size of the statements stack. */ |
300 | extern uint statement_stack_max; |
301 | /** Max size of the digests token array. */ |
302 | extern size_t pfs_max_digest_length; |
303 | |
304 | /** |
305 | @def PFS_MAX_ALLOC_RETRY |
306 | Maximum number of times the code attempts to allocate an item |
307 | from internal buffers, before giving up. |
308 | */ |
309 | #define PFS_MAX_ALLOC_RETRY 1000 |
310 | |
311 | /** The maximun number of passes in @sa PFS_scan. */ |
312 | #define PFS_MAX_SCAN_PASS 2 |
313 | |
314 | /** |
315 | Helper to scan circular buffers. |
316 | Given a buffer of size [0, max_size - 1], |
317 | and a random starting point in the buffer, |
318 | this helper returns up to two [first, last -1] intervals that: |
319 | - fit into the [0, max_size - 1] range, |
320 | - have a maximum combined length of at most PFS_MAX_ALLOC_RETRY. |
321 | */ |
322 | struct PFS_scan |
323 | { |
324 | public: |
325 | /** |
326 | Initialize a new scan. |
327 | @param random a random index to start from |
328 | @param max_size the max size of the interval to scan |
329 | */ |
330 | void init(uint random, uint max_size); |
331 | |
332 | /** |
333 | Predicate, has a next pass. |
334 | @return true if there is a next pass to perform. |
335 | */ |
336 | bool has_pass() const |
337 | { return (m_pass < m_pass_max); } |
338 | |
339 | /** |
340 | Iterator, proceed to the next pass. |
341 | */ |
342 | void next_pass() |
343 | { m_pass++; } |
344 | |
345 | /** First index for this pass. */ |
346 | uint first() const |
347 | { return m_first[m_pass]; } |
348 | |
349 | /** Last index for this pass. */ |
350 | uint last() const |
351 | { return m_last[m_pass]; } |
352 | |
353 | private: |
354 | /** Current pass. */ |
355 | uint m_pass; |
356 | /** Maximum number of passes. */ |
357 | uint m_pass_max; |
358 | /** First element for each pass. */ |
359 | uint m_first[PFS_MAX_SCAN_PASS]; |
360 | /** Last element for each pass. */ |
361 | uint m_last[PFS_MAX_SCAN_PASS]; |
362 | }; |
363 | |
364 | |
365 | /** Instrumented thread implementation. @see PSI_thread. */ |
366 | struct PFS_ALIGNED PFS_thread : PFS_connection_slice |
367 | { |
368 | static PFS_thread* get_current_thread(void); |
369 | |
370 | /** Thread instrumentation flag. */ |
371 | bool m_enabled; |
372 | /** Current wait event in the event stack. */ |
373 | PFS_events_waits *m_events_waits_current; |
374 | /** Event ID counter */ |
375 | ulonglong m_event_id; |
376 | /** |
377 | Internal lock. |
378 | This lock is exclusively used to protect against races |
379 | when creating and destroying PFS_thread. |
380 | Do not use this lock to protect thread attributes, |
381 | use one of @c m_stmt_lock or @c m_session_lock instead. |
382 | */ |
383 | pfs_lock m_lock; |
384 | /** Pins for filename_hash. */ |
385 | LF_PINS *m_filename_hash_pins; |
386 | /** Pins for table_share_hash. */ |
387 | LF_PINS *m_table_share_hash_pins; |
388 | /** Pins for setup_actor_hash. */ |
389 | LF_PINS *m_setup_actor_hash_pins; |
390 | /** Pins for setup_object_hash. */ |
391 | LF_PINS *m_setup_object_hash_pins; |
392 | /** Pins for host_hash. */ |
393 | LF_PINS *m_host_hash_pins; |
394 | /** Pins for user_hash. */ |
395 | LF_PINS *m_user_hash_pins; |
396 | /** Pins for account_hash. */ |
397 | LF_PINS *m_account_hash_pins; |
398 | /** Pins for digest_hash. */ |
399 | LF_PINS *m_digest_hash_pins; |
400 | /** Internal thread identifier, unique. */ |
401 | ulonglong m_thread_internal_id; |
402 | /** Parent internal thread identifier. */ |
403 | ulonglong m_parent_thread_internal_id; |
404 | /** External (SHOW PROCESSLIST) thread identifier, not unique. */ |
405 | ulong m_processlist_id; |
406 | /** Thread class. */ |
407 | PFS_thread_class *m_class; |
408 | /** |
409 | Stack of events waits. |
410 | This member holds the data for the table PERFORMANCE_SCHEMA.EVENTS_WAITS_CURRENT. |
411 | Note that stack[0] is a dummy record that represents the parent stage/statement. |
412 | For example, assuming the following tree: |
413 | - STAGE ID 100 |
414 | - WAIT ID 101, parent STAGE 100 |
415 | - WAIT ID 102, parent wait 101 |
416 | the data in the stack will be: |
417 | stack[0].m_event_id= 100, set by the stage instrumentation |
418 | stack[0].m_event_type= STAGE, set by the stage instrumentation |
419 | stack[0].m_nesting_event_id= unused |
420 | stack[0].m_nesting_event_type= unused |
421 | stack[1].m_event_id= 101 |
422 | stack[1].m_event_type= WAIT |
423 | stack[1].m_nesting_event_id= stack[0].m_event_id= 100 |
424 | stack[1].m_nesting_event_type= stack[0].m_event_type= STAGE |
425 | stack[2].m_event_id= 102 |
426 | stack[2].m_event_type= WAIT |
427 | stack[2].m_nesting_event_id= stack[1].m_event_id= 101 |
428 | stack[2].m_nesting_event_type= stack[1].m_event_type= WAIT |
429 | |
430 | The whole point of the stack[0] record is to allow this optimization |
431 | in the code, in the instrumentation for wait events: |
432 | wait->m_nesting_event_id= (wait-1)->m_event_id; |
433 | wait->m_nesting_event_type= (wait-1)->m_event_type; |
434 | This code works for both the top level wait, and nested waits, |
435 | and works without if conditions, which helps performances. |
436 | */ |
437 | PFS_events_waits m_events_waits_stack[WAIT_STACK_SIZE]; |
438 | /** True if the circular buffer @c m_waits_history is full. */ |
439 | bool m_waits_history_full; |
440 | /** Current index in the circular buffer @c m_waits_history. */ |
441 | uint m_waits_history_index; |
442 | /** |
443 | Waits history circular buffer. |
444 | This member holds the data for the table |
445 | PERFORMANCE_SCHEMA.EVENTS_WAITS_HISTORY. |
446 | */ |
447 | PFS_events_waits *m_waits_history; |
448 | |
449 | /** True if the circular buffer @c m_stages_history is full. */ |
450 | bool m_stages_history_full; |
451 | /** Current index in the circular buffer @c m_stages_history. */ |
452 | uint m_stages_history_index; |
453 | /** |
454 | Stages history circular buffer. |
455 | This member holds the data for the table |
456 | PERFORMANCE_SCHEMA.EVENTS_STAGES_HISTORY. |
457 | */ |
458 | PFS_events_stages *m_stages_history; |
459 | |
460 | /** True if the circular buffer @c m_statements_history is full. */ |
461 | bool m_statements_history_full; |
462 | /** Current index in the circular buffer @c m_statements_history. */ |
463 | uint m_statements_history_index; |
464 | /** |
465 | Statements history circular buffer. |
466 | This member holds the data for the table |
467 | PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_HISTORY. |
468 | */ |
469 | PFS_events_statements *m_statements_history; |
470 | |
471 | /** |
472 | Internal lock, for session attributes. |
473 | Statement attributes are expected to be updated in frequently, |
474 | typically per session execution. |
475 | */ |
476 | pfs_lock m_session_lock; |
477 | /** |
478 | User name. |
479 | Protected by @c m_session_lock. |
480 | */ |
481 | char m_username[USERNAME_LENGTH]; |
482 | /** |
483 | Length of @c m_username. |
484 | Protected by @c m_session_lock. |
485 | */ |
486 | uint m_username_length; |
487 | /** |
488 | Host name. |
489 | Protected by @c m_session_lock. |
490 | */ |
491 | char m_hostname[HOSTNAME_LENGTH]; |
492 | /** |
493 | Length of @c m_hostname. |
494 | Protected by @c m_session_lock. |
495 | */ |
496 | uint m_hostname_length; |
497 | /** |
498 | Database name. |
499 | Protected by @c m_stmt_lock. |
500 | */ |
501 | char m_dbname[NAME_LEN]; |
502 | /** |
503 | Length of @c m_dbname. |
504 | Protected by @c m_stmt_lock. |
505 | */ |
506 | uint m_dbname_length; |
507 | /** Current command. */ |
508 | int m_command; |
509 | /** Start time. */ |
510 | time_t m_start_time; |
511 | /** |
512 | Internal lock, for statement attributes. |
513 | Statement attributes are expected to be updated frequently, |
514 | typically per statement execution. |
515 | */ |
516 | pfs_lock m_stmt_lock; |
517 | /** Processlist state (derived from stage). */ |
518 | PFS_stage_key m_stage; |
519 | /** |
520 | Processlist info. |
521 | Protected by @c m_stmt_lock. |
522 | */ |
523 | char m_processlist_info[COL_INFO_SIZE]; |
524 | /** |
525 | Length of @c m_processlist_info_length. |
526 | Protected by @c m_stmt_lock. |
527 | */ |
528 | uint m_processlist_info_length; |
529 | |
530 | PFS_events_stages m_stage_current; |
531 | |
532 | /** Size of @c m_events_statements_stack. */ |
533 | uint m_events_statements_count; |
534 | PFS_events_statements *m_statement_stack; |
535 | |
536 | PFS_host *m_host; |
537 | PFS_user *m_user; |
538 | PFS_account *m_account; |
539 | |
540 | /** Reset session connect attributes */ |
541 | void reset_session_connect_attrs(); |
542 | |
543 | /** |
544 | Buffer for the connection attributes. |
545 | Protected by @c m_session_lock. |
546 | */ |
547 | char *m_session_connect_attrs; |
548 | /** |
549 | Length used by @c m_connect_attrs. |
550 | Protected by @c m_session_lock. |
551 | */ |
552 | uint m_session_connect_attrs_length; |
553 | /** |
554 | Character set in which @c m_connect_attrs are encoded. |
555 | Protected by @c m_session_lock. |
556 | */ |
557 | uint m_session_connect_attrs_cs_number; |
558 | }; |
559 | |
560 | extern PFS_stage_stat *global_instr_class_stages_array; |
561 | extern PFS_statement_stat *global_instr_class_statements_array; |
562 | |
563 | PFS_mutex *sanitize_mutex(PFS_mutex *unsafe); |
564 | PFS_rwlock *sanitize_rwlock(PFS_rwlock *unsafe); |
565 | PFS_cond *sanitize_cond(PFS_cond *unsafe); |
566 | PFS_thread *sanitize_thread(PFS_thread *unsafe); |
567 | PFS_file *sanitize_file(PFS_file *unsafe); |
568 | PFS_socket *sanitize_socket(PFS_socket *unsafe); |
569 | |
570 | int init_instruments(const PFS_global_param *param); |
571 | void cleanup_instruments(); |
572 | int init_file_hash(); |
573 | void cleanup_file_hash(); |
574 | PFS_mutex* create_mutex(PFS_mutex_class *mutex_class, const void *identity); |
575 | void destroy_mutex(PFS_mutex *pfs); |
576 | PFS_rwlock* create_rwlock(PFS_rwlock_class *klass, const void *identity); |
577 | void destroy_rwlock(PFS_rwlock *pfs); |
578 | PFS_cond* create_cond(PFS_cond_class *klass, const void *identity); |
579 | void destroy_cond(PFS_cond *pfs); |
580 | |
581 | PFS_thread* create_thread(PFS_thread_class *klass, const void *identity, |
582 | ulonglong processlist_id); |
583 | |
584 | void destroy_thread(PFS_thread *pfs); |
585 | |
586 | PFS_file* find_or_create_file(PFS_thread *thread, PFS_file_class *klass, |
587 | const char *filename, uint len, bool create); |
588 | |
589 | void release_file(PFS_file *pfs); |
590 | void destroy_file(PFS_thread *thread, PFS_file *pfs); |
591 | PFS_table* create_table(PFS_table_share *share, PFS_thread *opening_thread, |
592 | const void *identity); |
593 | void destroy_table(PFS_table *pfs); |
594 | |
595 | PFS_socket* create_socket(PFS_socket_class *socket_class, |
596 | const my_socket *fd, |
597 | const struct sockaddr *addr, |
598 | socklen_t addr_len); |
599 | void destroy_socket(PFS_socket *pfs); |
600 | |
601 | /* For iterators and show status. */ |
602 | |
603 | extern ulong mutex_max; |
604 | extern ulong mutex_lost; |
605 | extern ulong rwlock_max; |
606 | extern ulong rwlock_lost; |
607 | extern ulong cond_max; |
608 | extern ulong cond_lost; |
609 | extern ulong thread_max; |
610 | extern ulong thread_lost; |
611 | extern ulong file_max; |
612 | extern ulong file_lost; |
613 | extern long file_handle_max; |
614 | extern ulong file_handle_lost; |
615 | extern ulong table_max; |
616 | extern ulong table_lost; |
617 | extern ulong socket_max; |
618 | extern ulong socket_lost; |
619 | extern ulong events_waits_history_per_thread; |
620 | extern ulong events_stages_history_per_thread; |
621 | extern ulong events_statements_history_per_thread; |
622 | extern ulong locker_lost; |
623 | extern ulong statement_lost; |
624 | extern ulong session_connect_attrs_lost; |
625 | extern ulong session_connect_attrs_size_per_thread; |
626 | |
627 | /* Exposing the data directly, for iterators. */ |
628 | |
629 | extern PFS_mutex *mutex_array; |
630 | extern PFS_rwlock *rwlock_array; |
631 | extern PFS_cond *cond_array; |
632 | extern PFS_thread *thread_array; |
633 | extern PFS_file *file_array; |
634 | extern PFS_file **file_handle_array; |
635 | extern PFS_table *table_array; |
636 | extern PFS_socket *socket_array; |
637 | |
638 | void reset_events_waits_by_instance(); |
639 | void reset_file_instance_io(); |
640 | void reset_socket_instance_io(); |
641 | |
642 | void aggregate_all_event_names(PFS_single_stat *from_array, |
643 | PFS_single_stat *to_array); |
644 | void aggregate_all_event_names(PFS_single_stat *from_array, |
645 | PFS_single_stat *to_array_1, |
646 | PFS_single_stat *to_array_2); |
647 | |
648 | void aggregate_all_stages(PFS_stage_stat *from_array, |
649 | PFS_stage_stat *to_array); |
650 | void aggregate_all_stages(PFS_stage_stat *from_array, |
651 | PFS_stage_stat *to_array_1, |
652 | PFS_stage_stat *to_array_2); |
653 | |
654 | void aggregate_all_statements(PFS_statement_stat *from_array, |
655 | PFS_statement_stat *to_array); |
656 | void aggregate_all_statements(PFS_statement_stat *from_array, |
657 | PFS_statement_stat *to_array_1, |
658 | PFS_statement_stat *to_array_2); |
659 | |
660 | void aggregate_thread(PFS_thread *thread, |
661 | PFS_account *safe_account, |
662 | PFS_user *safe_user, |
663 | PFS_host *safe_host); |
664 | void aggregate_thread_waits(PFS_thread *thread, |
665 | PFS_account *safe_account, |
666 | PFS_user *safe_user, |
667 | PFS_host *safe_host); |
668 | void aggregate_thread_stages(PFS_thread *thread, |
669 | PFS_account *safe_account, |
670 | PFS_user *safe_user, |
671 | PFS_host *safe_host); |
672 | void aggregate_thread_statements(PFS_thread *thread, |
673 | PFS_account *safe_account, |
674 | PFS_user *safe_user, |
675 | PFS_host *safe_host); |
676 | void clear_thread_account(PFS_thread *thread); |
677 | void set_thread_account(PFS_thread *thread); |
678 | |
679 | /** Update derived flags for all mutex instances. */ |
680 | void update_mutex_derived_flags(); |
681 | /** Update derived flags for all rwlock instances. */ |
682 | void update_rwlock_derived_flags(); |
683 | /** Update derived flags for all condition instances. */ |
684 | void update_cond_derived_flags(); |
685 | /** Update derived flags for all file handles. */ |
686 | void update_file_derived_flags(); |
687 | /** Update derived flags for all table handles. */ |
688 | void update_table_derived_flags(); |
689 | /** Update derived flags for all socket instances. */ |
690 | void update_socket_derived_flags(); |
691 | /** Update derived flags for all instruments. */ |
692 | void update_instruments_derived_flags(); |
693 | |
694 | extern LF_HASH filename_hash; |
695 | |
696 | /** @} */ |
697 | #endif |
698 | |
699 | |