1 | /* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved. |
2 | |
3 | This program is free software; you can redistribute it and/or modify |
4 | it under the terms of the GNU General Public License as published by |
5 | the Free Software Foundation; version 2 of the License. |
6 | |
7 | This program is distributed in the hope that it will be useful, |
8 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | GNU General Public License for more details. |
11 | |
12 | You should have received a copy of the GNU General Public License |
13 | along with this program; if not, write to the Free Software Foundation, |
14 | 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ |
15 | |
16 | #ifndef PFS_STAT_H |
17 | #define PFS_STAT_H |
18 | |
19 | #include "sql_const.h" |
20 | /* memcpy */ |
21 | #include "string.h" |
22 | |
23 | /** |
24 | @file storage/perfschema/pfs_stat.h |
25 | Statistics (declarations). |
26 | */ |
27 | |
28 | /** |
29 | @addtogroup Performance_schema_buffers |
30 | @{ |
31 | */ |
32 | |
33 | /** Single statistic. */ |
34 | struct PFS_single_stat |
35 | { |
36 | /** Count of values. */ |
37 | ulonglong m_count; |
38 | /** Sum of values. */ |
39 | ulonglong m_sum; |
40 | /** Minimum value. */ |
41 | ulonglong m_min; |
42 | /** Maximum value. */ |
43 | ulonglong m_max; |
44 | |
45 | PFS_single_stat() |
46 | { |
47 | m_count= 0; |
48 | m_sum= 0; |
49 | m_min= ULONGLONG_MAX; |
50 | m_max= 0; |
51 | } |
52 | |
53 | inline void reset(void) |
54 | { |
55 | m_count= 0; |
56 | m_sum= 0; |
57 | m_min= ULONGLONG_MAX; |
58 | m_max= 0; |
59 | } |
60 | |
61 | inline bool has_timed_stats() const |
62 | { |
63 | return (m_min <= m_max); |
64 | } |
65 | |
66 | inline void aggregate(const PFS_single_stat *stat) |
67 | { |
68 | m_count+= stat->m_count; |
69 | m_sum+= stat->m_sum; |
70 | if (unlikely(m_min > stat->m_min)) |
71 | m_min= stat->m_min; |
72 | if (unlikely(m_max < stat->m_max)) |
73 | m_max= stat->m_max; |
74 | } |
75 | |
76 | inline void aggregate_counted() |
77 | { |
78 | m_count++; |
79 | } |
80 | |
81 | inline void aggregate_counted(ulonglong count) |
82 | { |
83 | m_count+= count; |
84 | } |
85 | |
86 | inline void aggregate_value(ulonglong value) |
87 | { |
88 | m_count++; |
89 | m_sum+= value; |
90 | if (unlikely(m_min > value)) |
91 | m_min= value; |
92 | if (unlikely(m_max < value)) |
93 | m_max= value; |
94 | } |
95 | }; |
96 | |
97 | /** Combined statistic. */ |
98 | struct PFS_byte_stat : public PFS_single_stat |
99 | { |
100 | /** Byte count statistics */ |
101 | ulonglong m_bytes; |
102 | |
103 | /* Aggregate wait stats, event count and byte count */ |
104 | inline void aggregate(const PFS_byte_stat *stat) |
105 | { |
106 | PFS_single_stat::aggregate(stat); |
107 | m_bytes+= stat->m_bytes; |
108 | } |
109 | |
110 | /* Aggregate individual wait time, event count and byte count */ |
111 | inline void aggregate(ulonglong wait, ulonglong bytes) |
112 | { |
113 | aggregate_value(wait); |
114 | m_bytes+= bytes; |
115 | } |
116 | |
117 | /* Aggregate wait stats and event count */ |
118 | inline void aggregate_waits(const PFS_byte_stat *stat) |
119 | { |
120 | PFS_single_stat::aggregate(stat); |
121 | } |
122 | |
123 | /* Aggregate event count and byte count */ |
124 | inline void aggregate_counted() |
125 | { |
126 | PFS_single_stat::aggregate_counted(); |
127 | } |
128 | |
129 | /* Aggregate event count and byte count */ |
130 | inline void aggregate_counted(ulonglong bytes) |
131 | { |
132 | PFS_single_stat::aggregate_counted(); |
133 | m_bytes+= bytes; |
134 | } |
135 | |
136 | PFS_byte_stat() |
137 | { |
138 | reset(); |
139 | } |
140 | |
141 | inline void reset(void) |
142 | { |
143 | PFS_single_stat::reset(); |
144 | m_bytes= 0; |
145 | } |
146 | }; |
147 | |
148 | /** Statistics for mutex usage. */ |
149 | struct PFS_mutex_stat |
150 | { |
151 | /** Wait statistics. */ |
152 | PFS_single_stat m_wait_stat; |
153 | /** |
154 | Lock statistics. |
155 | This statistic is not exposed in user visible tables yet. |
156 | */ |
157 | PFS_single_stat m_lock_stat; |
158 | |
159 | inline void aggregate(const PFS_mutex_stat *stat) |
160 | { |
161 | m_wait_stat.aggregate(&stat->m_wait_stat); |
162 | m_lock_stat.aggregate(&stat->m_lock_stat); |
163 | } |
164 | |
165 | inline void reset(void) |
166 | { |
167 | m_wait_stat.reset(); |
168 | m_lock_stat.reset(); |
169 | } |
170 | }; |
171 | |
172 | /** Statistics for rwlock usage. */ |
173 | struct PFS_rwlock_stat |
174 | { |
175 | /** Wait statistics. */ |
176 | PFS_single_stat m_wait_stat; |
177 | /** |
178 | RWLock read lock usage statistics. |
179 | This statistic is not exposed in user visible tables yet. |
180 | */ |
181 | PFS_single_stat m_read_lock_stat; |
182 | /** |
183 | RWLock write lock usage statistics. |
184 | This statistic is not exposed in user visible tables yet. |
185 | */ |
186 | PFS_single_stat m_write_lock_stat; |
187 | |
188 | inline void aggregate(const PFS_rwlock_stat *stat) |
189 | { |
190 | m_wait_stat.aggregate(&stat->m_wait_stat); |
191 | m_read_lock_stat.aggregate(&stat->m_read_lock_stat); |
192 | m_write_lock_stat.aggregate(&stat->m_write_lock_stat); |
193 | } |
194 | |
195 | inline void reset(void) |
196 | { |
197 | m_wait_stat.reset(); |
198 | m_read_lock_stat.reset(); |
199 | m_write_lock_stat.reset(); |
200 | } |
201 | }; |
202 | |
203 | /** Statistics for COND usage. */ |
204 | struct PFS_cond_stat |
205 | { |
206 | /** Wait statistics. */ |
207 | PFS_single_stat m_wait_stat; |
208 | /** |
209 | Number of times a condition was signalled. |
210 | This statistic is not exposed in user visible tables yet. |
211 | */ |
212 | ulonglong m_signal_count; |
213 | /** |
214 | Number of times a condition was broadcast. |
215 | This statistic is not exposed in user visible tables yet. |
216 | */ |
217 | ulonglong m_broadcast_count; |
218 | |
219 | inline void aggregate(const PFS_cond_stat *stat) |
220 | { |
221 | m_wait_stat.aggregate(&stat->m_wait_stat); |
222 | m_signal_count+= stat->m_signal_count; |
223 | m_broadcast_count+= stat->m_broadcast_count; |
224 | } |
225 | |
226 | inline void reset(void) |
227 | { |
228 | m_wait_stat.reset(); |
229 | m_signal_count= 0; |
230 | m_broadcast_count= 0; |
231 | } |
232 | }; |
233 | |
234 | /** Statistics for FILE IO. Used for both waits and byte counts. */ |
235 | struct PFS_file_io_stat |
236 | { |
237 | /** READ statistics */ |
238 | PFS_byte_stat m_read; |
239 | /** WRITE statistics */ |
240 | PFS_byte_stat m_write; |
241 | /** Miscelleanous statistics */ |
242 | PFS_byte_stat m_misc; |
243 | |
244 | inline void reset(void) |
245 | { |
246 | m_read.reset(); |
247 | m_write.reset(); |
248 | m_misc.reset(); |
249 | } |
250 | |
251 | inline void aggregate(const PFS_file_io_stat *stat) |
252 | { |
253 | m_read.aggregate(&stat->m_read); |
254 | m_write.aggregate(&stat->m_write); |
255 | m_misc.aggregate(&stat->m_misc); |
256 | } |
257 | |
258 | /* Sum waits and byte counts */ |
259 | inline void sum(PFS_byte_stat *stat) |
260 | { |
261 | stat->aggregate(&m_read); |
262 | stat->aggregate(&m_write); |
263 | stat->aggregate(&m_misc); |
264 | } |
265 | |
266 | /* Sum waits only */ |
267 | inline void sum_waits(PFS_single_stat *stat) |
268 | { |
269 | stat->aggregate(&m_read); |
270 | stat->aggregate(&m_write); |
271 | stat->aggregate(&m_misc); |
272 | } |
273 | }; |
274 | |
275 | /** Statistics for FILE usage. */ |
276 | struct PFS_file_stat |
277 | { |
278 | /** Number of current open handles. */ |
279 | ulong m_open_count; |
280 | /** File IO statistics. */ |
281 | PFS_file_io_stat m_io_stat; |
282 | |
283 | inline void aggregate(const PFS_file_stat *stat) |
284 | { |
285 | m_io_stat.aggregate(&stat->m_io_stat); |
286 | } |
287 | |
288 | /** Reset file statistics. */ |
289 | inline void reset(void) |
290 | { |
291 | m_io_stat.reset(); |
292 | } |
293 | }; |
294 | |
295 | /** Statistics for stage usage. */ |
296 | struct PFS_stage_stat |
297 | { |
298 | PFS_single_stat m_timer1_stat; |
299 | |
300 | inline void reset(void) |
301 | { m_timer1_stat.reset(); } |
302 | |
303 | inline void aggregate_counted() |
304 | { m_timer1_stat.aggregate_counted(); } |
305 | |
306 | inline void aggregate_value(ulonglong value) |
307 | { m_timer1_stat.aggregate_value(value); } |
308 | |
309 | inline void aggregate(PFS_stage_stat *stat) |
310 | { m_timer1_stat.aggregate(& stat->m_timer1_stat); } |
311 | }; |
312 | |
313 | /** Statistics for statement usage. */ |
314 | struct PFS_statement_stat |
315 | { |
316 | PFS_single_stat m_timer1_stat; |
317 | ulonglong m_error_count; |
318 | ulonglong m_warning_count; |
319 | ulonglong m_rows_affected; |
320 | ulonglong m_lock_time; |
321 | ulonglong m_rows_sent; |
322 | ulonglong m_rows_examined; |
323 | ulonglong m_created_tmp_disk_tables; |
324 | ulonglong m_created_tmp_tables; |
325 | ulonglong m_select_full_join; |
326 | ulonglong m_select_full_range_join; |
327 | ulonglong m_select_range; |
328 | ulonglong m_select_range_check; |
329 | ulonglong m_select_scan; |
330 | ulonglong m_sort_merge_passes; |
331 | ulonglong m_sort_range; |
332 | ulonglong m_sort_rows; |
333 | ulonglong m_sort_scan; |
334 | ulonglong m_no_index_used; |
335 | ulonglong m_no_good_index_used; |
336 | |
337 | PFS_statement_stat() |
338 | { |
339 | m_error_count= 0; |
340 | m_warning_count= 0; |
341 | m_rows_affected= 0; |
342 | m_lock_time= 0; |
343 | m_rows_sent= 0; |
344 | m_rows_examined= 0; |
345 | m_created_tmp_disk_tables= 0; |
346 | m_created_tmp_tables= 0; |
347 | m_select_full_join= 0; |
348 | m_select_full_range_join= 0; |
349 | m_select_range= 0; |
350 | m_select_range_check= 0; |
351 | m_select_scan= 0; |
352 | m_sort_merge_passes= 0; |
353 | m_sort_range= 0; |
354 | m_sort_rows= 0; |
355 | m_sort_scan= 0; |
356 | m_no_index_used= 0; |
357 | m_no_good_index_used= 0; |
358 | } |
359 | |
360 | inline void reset(void) |
361 | { |
362 | m_timer1_stat.reset(); |
363 | m_error_count= 0; |
364 | m_warning_count= 0; |
365 | m_rows_affected= 0; |
366 | m_lock_time= 0; |
367 | m_rows_sent= 0; |
368 | m_rows_examined= 0; |
369 | m_created_tmp_disk_tables= 0; |
370 | m_created_tmp_tables= 0; |
371 | m_select_full_join= 0; |
372 | m_select_full_range_join= 0; |
373 | m_select_range= 0; |
374 | m_select_range_check= 0; |
375 | m_select_scan= 0; |
376 | m_sort_merge_passes= 0; |
377 | m_sort_range= 0; |
378 | m_sort_rows= 0; |
379 | m_sort_scan= 0; |
380 | m_no_index_used= 0; |
381 | m_no_good_index_used= 0; |
382 | } |
383 | |
384 | inline void aggregate_counted() |
385 | { m_timer1_stat.aggregate_counted(); } |
386 | |
387 | inline void aggregate_value(ulonglong value) |
388 | { m_timer1_stat.aggregate_value(value); } |
389 | |
390 | inline void aggregate(PFS_statement_stat *stat) |
391 | { |
392 | m_timer1_stat.aggregate(& stat->m_timer1_stat); |
393 | |
394 | m_error_count+= stat->m_error_count; |
395 | m_warning_count+= stat->m_warning_count; |
396 | m_rows_affected+= stat->m_rows_affected; |
397 | m_lock_time+= stat->m_lock_time; |
398 | m_rows_sent+= stat->m_rows_sent; |
399 | m_rows_examined+= stat->m_rows_examined; |
400 | m_created_tmp_disk_tables+= stat->m_created_tmp_disk_tables; |
401 | m_created_tmp_tables+= stat->m_created_tmp_tables; |
402 | m_select_full_join+= stat->m_select_full_join; |
403 | m_select_full_range_join+= stat->m_select_full_range_join; |
404 | m_select_range+= stat->m_select_range; |
405 | m_select_range_check+= stat->m_select_range_check; |
406 | m_select_scan+= stat->m_select_scan; |
407 | m_sort_merge_passes+= stat->m_sort_merge_passes; |
408 | m_sort_range+= stat->m_sort_range; |
409 | m_sort_rows+= stat->m_sort_rows; |
410 | m_sort_scan+= stat->m_sort_scan; |
411 | m_no_index_used+= stat->m_no_index_used; |
412 | m_no_good_index_used+= stat->m_no_good_index_used; |
413 | } |
414 | }; |
415 | |
416 | /** Single table io statistic. */ |
417 | struct PFS_table_io_stat |
418 | { |
419 | bool m_has_data; |
420 | /** FETCH statistics */ |
421 | PFS_single_stat m_fetch; |
422 | /** INSERT statistics */ |
423 | PFS_single_stat m_insert; |
424 | /** UPDATE statistics */ |
425 | PFS_single_stat m_update; |
426 | /** DELETE statistics */ |
427 | PFS_single_stat m_delete; |
428 | |
429 | PFS_table_io_stat() |
430 | { |
431 | m_has_data= false; |
432 | } |
433 | |
434 | inline void reset(void) |
435 | { |
436 | m_has_data= false; |
437 | m_fetch.reset(); |
438 | m_insert.reset(); |
439 | m_update.reset(); |
440 | m_delete.reset(); |
441 | } |
442 | |
443 | inline void aggregate(const PFS_table_io_stat *stat) |
444 | { |
445 | if (stat->m_has_data) |
446 | { |
447 | m_has_data= true; |
448 | m_fetch.aggregate(&stat->m_fetch); |
449 | m_insert.aggregate(&stat->m_insert); |
450 | m_update.aggregate(&stat->m_update); |
451 | m_delete.aggregate(&stat->m_delete); |
452 | } |
453 | } |
454 | |
455 | inline void sum(PFS_single_stat *result) |
456 | { |
457 | if (m_has_data) |
458 | { |
459 | result->aggregate(& m_fetch); |
460 | result->aggregate(& m_insert); |
461 | result->aggregate(& m_update); |
462 | result->aggregate(& m_delete); |
463 | } |
464 | } |
465 | }; |
466 | |
467 | enum PFS_TL_LOCK_TYPE |
468 | { |
469 | /* Locks from enum thr_lock */ |
470 | PFS_TL_READ= 0, |
471 | PFS_TL_READ_WITH_SHARED_LOCKS= 1, |
472 | PFS_TL_READ_HIGH_PRIORITY= 2, |
473 | PFS_TL_READ_NO_INSERT= 3, |
474 | PFS_TL_WRITE_ALLOW_WRITE= 4, |
475 | PFS_TL_WRITE_CONCURRENT_INSERT= 5, |
476 | PFS_TL_WRITE_DELAYED= 6, |
477 | PFS_TL_WRITE_LOW_PRIORITY= 7, |
478 | PFS_TL_WRITE= 8, |
479 | |
480 | /* Locks for handler::ha_external_lock() */ |
481 | PFS_TL_READ_EXTERNAL= 9, |
482 | PFS_TL_WRITE_EXTERNAL= 10 |
483 | }; |
484 | |
485 | #define COUNT_PFS_TL_LOCK_TYPE 11 |
486 | |
487 | /** Statistics for table locks. */ |
488 | struct PFS_table_lock_stat |
489 | { |
490 | PFS_single_stat m_stat[COUNT_PFS_TL_LOCK_TYPE]; |
491 | |
492 | inline void reset(void) |
493 | { |
494 | PFS_single_stat *pfs= & m_stat[0]; |
495 | PFS_single_stat *pfs_last= & m_stat[COUNT_PFS_TL_LOCK_TYPE]; |
496 | for ( ; pfs < pfs_last ; pfs++) |
497 | pfs->reset(); |
498 | } |
499 | |
500 | inline void aggregate(const PFS_table_lock_stat *stat) |
501 | { |
502 | PFS_single_stat *pfs= & m_stat[0]; |
503 | PFS_single_stat *pfs_last= & m_stat[COUNT_PFS_TL_LOCK_TYPE]; |
504 | const PFS_single_stat *pfs_from= & stat->m_stat[0]; |
505 | for ( ; pfs < pfs_last ; pfs++, pfs_from++) |
506 | pfs->aggregate(pfs_from); |
507 | } |
508 | |
509 | inline void sum(PFS_single_stat *result) |
510 | { |
511 | PFS_single_stat *pfs= & m_stat[0]; |
512 | PFS_single_stat *pfs_last= & m_stat[COUNT_PFS_TL_LOCK_TYPE]; |
513 | for ( ; pfs < pfs_last ; pfs++) |
514 | result->aggregate(pfs); |
515 | } |
516 | }; |
517 | |
518 | /** Statistics for TABLE usage. */ |
519 | struct PFS_table_stat |
520 | { |
521 | /** |
522 | Statistics, per index. |
523 | Each index stat is in [0, MAX_INDEXES-1], |
524 | stats when using no index are in [MAX_INDEXES]. |
525 | */ |
526 | PFS_table_io_stat m_index_stat[MAX_INDEXES + 1]; |
527 | |
528 | /** |
529 | Statistics, per lock type. |
530 | */ |
531 | PFS_table_lock_stat m_lock_stat; |
532 | |
533 | /** Reset table io statistic. */ |
534 | inline void reset_io(void) |
535 | { |
536 | PFS_table_io_stat *stat= & m_index_stat[0]; |
537 | PFS_table_io_stat *stat_last= & m_index_stat[MAX_INDEXES + 1]; |
538 | for ( ; stat < stat_last ; stat++) |
539 | stat->reset(); |
540 | } |
541 | |
542 | /** Reset table lock statistic. */ |
543 | inline void reset_lock(void) |
544 | { |
545 | m_lock_stat.reset(); |
546 | } |
547 | |
548 | /** Reset table statistic. */ |
549 | inline void reset(void) |
550 | { |
551 | reset_io(); |
552 | reset_lock(); |
553 | } |
554 | |
555 | inline void fast_reset_io(void) |
556 | { |
557 | memcpy(& m_index_stat, & g_reset_template.m_index_stat, sizeof(m_index_stat)); |
558 | } |
559 | |
560 | inline void fast_reset_lock(void) |
561 | { |
562 | memcpy(& m_lock_stat, & g_reset_template.m_lock_stat, sizeof(m_lock_stat)); |
563 | } |
564 | |
565 | inline void fast_reset(void) |
566 | { |
567 | memcpy(this, & g_reset_template, sizeof(*this)); |
568 | } |
569 | |
570 | inline void aggregate_io(const PFS_table_stat *stat, uint key_count) |
571 | { |
572 | PFS_table_io_stat *to_stat; |
573 | PFS_table_io_stat *to_stat_last; |
574 | const PFS_table_io_stat *from_stat; |
575 | |
576 | DBUG_ASSERT(key_count <= MAX_INDEXES); |
577 | |
578 | /* Aggregate stats for each index, if any */ |
579 | to_stat= & m_index_stat[0]; |
580 | to_stat_last= to_stat + key_count; |
581 | from_stat= & stat->m_index_stat[0]; |
582 | for ( ; to_stat < to_stat_last ; from_stat++, to_stat++) |
583 | to_stat->aggregate(from_stat); |
584 | |
585 | /* Aggregate stats for the table */ |
586 | to_stat= & m_index_stat[MAX_INDEXES]; |
587 | from_stat= & stat->m_index_stat[MAX_INDEXES]; |
588 | to_stat->aggregate(from_stat); |
589 | } |
590 | |
591 | inline void aggregate_lock(const PFS_table_stat *stat) |
592 | { |
593 | m_lock_stat.aggregate(& stat->m_lock_stat); |
594 | } |
595 | |
596 | inline void aggregate(const PFS_table_stat *stat, uint key_count) |
597 | { |
598 | aggregate_io(stat, key_count); |
599 | aggregate_lock(stat); |
600 | } |
601 | |
602 | inline void sum_io(PFS_single_stat *result, uint key_count) |
603 | { |
604 | PFS_table_io_stat *stat; |
605 | PFS_table_io_stat *stat_last; |
606 | |
607 | DBUG_ASSERT(key_count <= MAX_INDEXES); |
608 | |
609 | /* Sum stats for each index, if any */ |
610 | stat= & m_index_stat[0]; |
611 | stat_last= stat + key_count; |
612 | for ( ; stat < stat_last ; stat++) |
613 | stat->sum(result); |
614 | |
615 | /* Sum stats for the table */ |
616 | m_index_stat[MAX_INDEXES].sum(result); |
617 | } |
618 | |
619 | inline void sum_lock(PFS_single_stat *result) |
620 | { |
621 | m_lock_stat.sum(result); |
622 | } |
623 | |
624 | inline void sum(PFS_single_stat *result, uint key_count) |
625 | { |
626 | sum_io(result, key_count); |
627 | sum_lock(result); |
628 | } |
629 | |
630 | static struct PFS_table_stat g_reset_template; |
631 | }; |
632 | |
633 | /** Statistics for SOCKET IO. Used for both waits and byte counts. */ |
634 | struct PFS_socket_io_stat |
635 | { |
636 | /** READ statistics */ |
637 | PFS_byte_stat m_read; |
638 | /** WRITE statistics */ |
639 | PFS_byte_stat m_write; |
640 | /** Miscelleanous statistics */ |
641 | PFS_byte_stat m_misc; |
642 | |
643 | inline void reset(void) |
644 | { |
645 | m_read.reset(); |
646 | m_write.reset(); |
647 | m_misc.reset(); |
648 | } |
649 | |
650 | inline void aggregate(const PFS_socket_io_stat *stat) |
651 | { |
652 | m_read.aggregate(&stat->m_read); |
653 | m_write.aggregate(&stat->m_write); |
654 | m_misc.aggregate(&stat->m_misc); |
655 | } |
656 | |
657 | /* Sum waits and byte counts */ |
658 | inline void sum(PFS_byte_stat *stat) |
659 | { |
660 | stat->aggregate(&m_read); |
661 | stat->aggregate(&m_write); |
662 | stat->aggregate(&m_misc); |
663 | } |
664 | |
665 | /* Sum waits only */ |
666 | inline void sum_waits(PFS_single_stat *stat) |
667 | { |
668 | stat->aggregate(&m_read); |
669 | stat->aggregate(&m_write); |
670 | stat->aggregate(&m_misc); |
671 | } |
672 | }; |
673 | |
674 | /** Statistics for SOCKET usage. */ |
675 | struct PFS_socket_stat |
676 | { |
677 | /** Socket timing and byte count statistics per operation */ |
678 | PFS_socket_io_stat m_io_stat; |
679 | |
680 | /** Reset socket statistics. */ |
681 | inline void reset(void) |
682 | { |
683 | m_io_stat.reset(); |
684 | } |
685 | }; |
686 | |
687 | struct PFS_connection_stat |
688 | { |
689 | PFS_connection_stat() |
690 | : m_current_connections(0), |
691 | m_total_connections(0) |
692 | {} |
693 | |
694 | ulonglong m_current_connections; |
695 | ulonglong m_total_connections; |
696 | |
697 | inline void aggregate_active(ulonglong active) |
698 | { |
699 | m_current_connections+= active; |
700 | m_total_connections+= active; |
701 | } |
702 | |
703 | inline void aggregate_disconnected(ulonglong disconnected) |
704 | { |
705 | m_total_connections+= disconnected; |
706 | } |
707 | }; |
708 | |
709 | /** @} */ |
710 | #endif |
711 | |
712 | |