1/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
3#ident "$Id$"
4/*======
5This file is part of PerconaFT.
6
7
8Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
9
10 PerconaFT is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License, version 2,
12 as published by the Free Software Foundation.
13
14 PerconaFT is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
21
22----------------------------------------
23
24 PerconaFT is free software: you can redistribute it and/or modify
25 it under the terms of the GNU Affero General Public License, version 3,
26 as published by the Free Software Foundation.
27
28 PerconaFT is distributed in the hope that it will be useful,
29 but WITHOUT ANY WARRANTY; without even the implied warranty of
30 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 GNU Affero General Public License for more details.
32
33 You should have received a copy of the GNU Affero General Public License
34 along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
35======= */
36
37#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
38
39#pragma once
40
41#include <db.h>
42
43#include "portability/toku_config.h"
44#include "portability/toku_list.h"
45#include "portability/toku_race_tools.h"
46
47#include "util/status.h"
48
49//
50// Leaf Entry statistics
51//
52class LE_STATUS_S {
53public:
54 enum {
55 LE_MAX_COMMITTED_XR = 0,
56 LE_MAX_PROVISIONAL_XR,
57 LE_EXPANDED,
58 LE_MAX_MEMSIZE,
59 LE_APPLY_GC_BYTES_IN,
60 LE_APPLY_GC_BYTES_OUT,
61 LE_NORMAL_GC_BYTES_IN,
62 LE_NORMAL_GC_BYTES_OUT,
63 LE_STATUS_NUM_ROWS
64 };
65
66 void init();
67 void destroy();
68
69 TOKU_ENGINE_STATUS_ROW_S status[LE_STATUS_NUM_ROWS];
70
71private:
72 bool m_initialized;
73};
74typedef LE_STATUS_S* LE_STATUS;
75extern LE_STATUS_S le_status;
76
77// executed too often to be worth making threadsafe
78#define LE_STATUS_VAL(x) le_status.status[LE_STATUS_S::x].value.num
79#define LE_STATUS_INC(x, d) \
80 do { \
81 if (le_status.status[LE_STATUS_S::x].type == PARCOUNT) { \
82 increment_partitioned_counter(le_status.status[LE_STATUS_S::x].value.parcount, d); \
83 } else { \
84 toku_sync_fetch_and_add(&le_status.status[LE_STATUS_S::x].value.num, d); \
85 } \
86 } while (0)
87
88
89
90//
91// Checkpoint statistics
92//
93class CHECKPOINT_STATUS_S {
94public:
95 enum {
96 CP_PERIOD,
97 CP_FOOTPRINT,
98 CP_TIME_LAST_CHECKPOINT_BEGIN,
99 CP_TIME_LAST_CHECKPOINT_BEGIN_COMPLETE,
100 CP_TIME_LAST_CHECKPOINT_END,
101 CP_TIME_CHECKPOINT_DURATION,
102 CP_TIME_CHECKPOINT_DURATION_LAST,
103 CP_LAST_LSN,
104 CP_CHECKPOINT_COUNT,
105 CP_CHECKPOINT_COUNT_FAIL,
106 CP_WAITERS_NOW, // how many threads are currently waiting for the checkpoint_safe lock to perform a checkpoint
107 CP_WAITERS_MAX, // max threads ever simultaneously waiting for the checkpoint_safe lock to perform a checkpoint
108 CP_CLIENT_WAIT_ON_MO, // how many times a client thread waited to take the multi_operation lock, not for checkpoint
109 CP_CLIENT_WAIT_ON_CS, // how many times a client thread waited for the checkpoint_safe lock, not for checkpoint
110 CP_BEGIN_TIME,
111 CP_LONG_BEGIN_TIME,
112 CP_LONG_BEGIN_COUNT,
113 CP_END_TIME,
114 CP_LONG_END_TIME,
115 CP_LONG_END_COUNT,
116 CP_STATUS_NUM_ROWS // number of rows in this status array. must be last.
117 };
118
119 void init();
120 void destroy();
121
122 TOKU_ENGINE_STATUS_ROW_S status[CP_STATUS_NUM_ROWS];
123
124private:
125 bool m_initialized;
126};
127typedef CHECKPOINT_STATUS_S* CHECKPOINT_STATUS;
128extern CHECKPOINT_STATUS_S cp_status;
129
130#define CP_STATUS_VAL(x) cp_status.status[CHECKPOINT_STATUS_S::x].value.num
131
132
133
134//
135// Cachetable statistics
136//
137class CACHETABLE_STATUS_S {
138public:
139 enum {
140 CT_MISS = 0,
141 CT_MISSTIME, // how many usec spent waiting for disk read because of cache miss
142 CT_PREFETCHES, // how many times has a block been prefetched into the cachetable?
143 CT_SIZE_CURRENT, // the sum of the sizes of the nodes represented in the cachetable
144 CT_SIZE_LIMIT, // the limit to the sum of the node sizes
145 CT_SIZE_WRITING, // the sum of the sizes of the nodes being written
146 CT_SIZE_NONLEAF, // number of bytes in cachetable belonging to nonleaf nodes
147 CT_SIZE_LEAF, // number of bytes in cachetable belonging to leaf nodes
148 CT_SIZE_ROLLBACK, // number of bytes in cachetable belonging to rollback nodes
149 CT_SIZE_CACHEPRESSURE, // number of bytes causing cache pressure (sum of buffers and workdone counters)
150 CT_SIZE_CLONED, // number of bytes of cloned data in the system
151 CT_EVICTIONS,
152 CT_CLEANER_EXECUTIONS, // number of times the cleaner thread's loop has executed
153 CT_CLEANER_PERIOD,
154 CT_CLEANER_ITERATIONS, // number of times the cleaner thread runs the cleaner per period
155 CT_WAIT_PRESSURE_COUNT,
156 CT_WAIT_PRESSURE_TIME,
157 CT_LONG_WAIT_PRESSURE_COUNT,
158 CT_LONG_WAIT_PRESSURE_TIME,
159
160 CT_POOL_CLIENT_NUM_THREADS,
161 CT_POOL_CLIENT_NUM_THREADS_ACTIVE,
162 CT_POOL_CLIENT_QUEUE_SIZE,
163 CT_POOL_CLIENT_MAX_QUEUE_SIZE,
164 CT_POOL_CLIENT_TOTAL_ITEMS_PROCESSED,
165 CT_POOL_CLIENT_TOTAL_EXECUTION_TIME,
166 CT_POOL_CACHETABLE_NUM_THREADS,
167 CT_POOL_CACHETABLE_NUM_THREADS_ACTIVE,
168 CT_POOL_CACHETABLE_QUEUE_SIZE,
169 CT_POOL_CACHETABLE_MAX_QUEUE_SIZE,
170 CT_POOL_CACHETABLE_TOTAL_ITEMS_PROCESSED,
171 CT_POOL_CACHETABLE_TOTAL_EXECUTION_TIME,
172 CT_POOL_CHECKPOINT_NUM_THREADS,
173 CT_POOL_CHECKPOINT_NUM_THREADS_ACTIVE,
174 CT_POOL_CHECKPOINT_QUEUE_SIZE,
175 CT_POOL_CHECKPOINT_MAX_QUEUE_SIZE,
176 CT_POOL_CHECKPOINT_TOTAL_ITEMS_PROCESSED,
177 CT_POOL_CHECKPOINT_TOTAL_EXECUTION_TIME,
178
179 CT_STATUS_NUM_ROWS
180 };
181
182 void init();
183 void destroy();
184
185 TOKU_ENGINE_STATUS_ROW_S status[CT_STATUS_NUM_ROWS];
186
187private:
188 bool m_initialized;
189};
190typedef CACHETABLE_STATUS_S* CACHETABLE_STATUS;
191extern CACHETABLE_STATUS_S ct_status;
192
193#define CT_STATUS_VAL(x) ct_status.status[CACHETABLE_STATUS_S::x].value.num
194
195
196
197//
198// Lock Tree Manager statistics
199//
200class LTM_STATUS_S {
201public:
202 enum {
203 LTM_SIZE_CURRENT = 0,
204 LTM_SIZE_LIMIT,
205 LTM_ESCALATION_COUNT,
206 LTM_ESCALATION_TIME,
207 LTM_ESCALATION_LATEST_RESULT,
208 LTM_NUM_LOCKTREES,
209 LTM_LOCK_REQUESTS_PENDING,
210 LTM_STO_NUM_ELIGIBLE,
211 LTM_STO_END_EARLY_COUNT,
212 LTM_STO_END_EARLY_TIME,
213 LTM_WAIT_COUNT,
214 LTM_WAIT_TIME,
215 LTM_LONG_WAIT_COUNT,
216 LTM_LONG_WAIT_TIME,
217 LTM_TIMEOUT_COUNT,
218 LTM_WAIT_ESCALATION_COUNT,
219 LTM_WAIT_ESCALATION_TIME,
220 LTM_LONG_WAIT_ESCALATION_COUNT,
221 LTM_LONG_WAIT_ESCALATION_TIME,
222 LTM_STATUS_NUM_ROWS // must be last
223 };
224
225 void init(void);
226 void destroy(void);
227
228 TOKU_ENGINE_STATUS_ROW_S status[LTM_STATUS_NUM_ROWS];
229
230private:
231 bool m_initialized;
232};
233typedef LTM_STATUS_S* LTM_STATUS;
234extern LTM_STATUS_S ltm_status;
235
236#define LTM_STATUS_VAL(x) ltm_status.status[LTM_STATUS_S::x].value.num
237
238
239//
240// Fractal Tree statistics
241//
242class FT_STATUS_S {
243public:
244 enum {
245 FT_UPDATES = 0,
246 FT_UPDATES_BROADCAST,
247 FT_DESCRIPTOR_SET,
248 FT_MSN_DISCARDS, // how many messages were ignored by leaf because of msn
249 FT_TOTAL_RETRIES, // total number of search retries due to TRY_AGAIN
250 FT_SEARCH_TRIES_GT_HEIGHT, // number of searches that required more tries than the height of the tree
251 FT_SEARCH_TRIES_GT_HEIGHTPLUS3, // number of searches that required more tries than the height of the tree plus three
252 FT_DISK_FLUSH_LEAF, // number of leaf nodes flushed to disk, not for checkpoint
253 FT_DISK_FLUSH_LEAF_BYTES, // number of leaf nodes flushed to disk, not for checkpoint
254 FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES, // number of leaf nodes flushed to disk, not for checkpoint
255 FT_DISK_FLUSH_LEAF_TOKUTIME, // number of leaf nodes flushed to disk, not for checkpoint
256 FT_DISK_FLUSH_NONLEAF, // number of nonleaf nodes flushed to disk, not for checkpoint
257 FT_DISK_FLUSH_NONLEAF_BYTES, // number of nonleaf nodes flushed to disk, not for checkpoint
258 FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES, // number of nonleaf nodes flushed to disk, not for checkpoint
259 FT_DISK_FLUSH_NONLEAF_TOKUTIME, // number of nonleaf nodes flushed to disk, not for checkpoint
260 FT_DISK_FLUSH_LEAF_FOR_CHECKPOINT, // number of leaf nodes flushed to disk for checkpoint
261 FT_DISK_FLUSH_LEAF_BYTES_FOR_CHECKPOINT, // number of leaf nodes flushed to disk for checkpoint
262 FT_DISK_FLUSH_LEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT,// number of leaf nodes flushed to disk for checkpoint
263 FT_DISK_FLUSH_LEAF_TOKUTIME_FOR_CHECKPOINT,// number of leaf nodes flushed to disk for checkpoint
264 FT_DISK_FLUSH_NONLEAF_FOR_CHECKPOINT, // number of nonleaf nodes flushed to disk for checkpoint
265 FT_DISK_FLUSH_NONLEAF_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
266 FT_DISK_FLUSH_NONLEAF_UNCOMPRESSED_BYTES_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
267 FT_DISK_FLUSH_NONLEAF_TOKUTIME_FOR_CHECKPOINT,// number of nonleaf nodes flushed to disk for checkpoint
268 FT_DISK_FLUSH_LEAF_COMPRESSION_RATIO, // effective compression ratio for leaf bytes flushed to disk
269 FT_DISK_FLUSH_NONLEAF_COMPRESSION_RATIO, // effective compression ratio for nonleaf bytes flushed to disk
270 FT_DISK_FLUSH_OVERALL_COMPRESSION_RATIO, // effective compression ratio for all bytes flushed to disk
271 FT_PARTIAL_EVICTIONS_NONLEAF, // number of nonleaf node partial evictions
272 FT_PARTIAL_EVICTIONS_NONLEAF_BYTES, // number of nonleaf node partial evictions
273 FT_PARTIAL_EVICTIONS_LEAF, // number of leaf node partial evictions
274 FT_PARTIAL_EVICTIONS_LEAF_BYTES, // number of leaf node partial evictions
275 FT_FULL_EVICTIONS_LEAF, // number of full cachetable evictions on leaf nodes
276 FT_FULL_EVICTIONS_LEAF_BYTES, // number of full cachetable evictions on leaf nodes (bytes)
277 FT_FULL_EVICTIONS_NONLEAF, // number of full cachetable evictions on nonleaf nodes
278 FT_FULL_EVICTIONS_NONLEAF_BYTES, // number of full cachetable evictions on nonleaf nodes (bytes)
279 FT_CREATE_LEAF, // number of leaf nodes created
280 FT_CREATE_NONLEAF, // number of nonleaf nodes created
281 FT_DESTROY_LEAF, // number of leaf nodes destroyed
282 FT_DESTROY_NONLEAF, // number of nonleaf nodes destroyed
283 FT_MSG_BYTES_IN, // how many bytes of messages injected at root (for all trees)
284 FT_MSG_BYTES_OUT, // how many bytes of messages flushed from h1 nodes to leaves
285 FT_MSG_BYTES_CURR, // how many bytes of messages currently in trees (estimate)
286 FT_MSG_NUM, // how many messages injected at root
287 FT_MSG_NUM_BROADCAST, // how many broadcast messages injected at root
288 FT_NUM_BASEMENTS_DECOMPRESSED_NORMAL, // how many basement nodes were decompressed because they were the target of a query
289 FT_NUM_BASEMENTS_DECOMPRESSED_AGGRESSIVE, // ... because they were between lc and rc
290 FT_NUM_BASEMENTS_DECOMPRESSED_PREFETCH,
291 FT_NUM_BASEMENTS_DECOMPRESSED_WRITE,
292 FT_NUM_MSG_BUFFER_DECOMPRESSED_NORMAL, // how many msg buffers were decompressed because they were the target of a query
293 FT_NUM_MSG_BUFFER_DECOMPRESSED_AGGRESSIVE, // ... because they were between lc and rc
294 FT_NUM_MSG_BUFFER_DECOMPRESSED_PREFETCH,
295 FT_NUM_MSG_BUFFER_DECOMPRESSED_WRITE,
296 FT_NUM_PIVOTS_FETCHED_QUERY, // how many pivots were fetched for a query
297 FT_BYTES_PIVOTS_FETCHED_QUERY, // how many pivots were fetched for a query
298 FT_TOKUTIME_PIVOTS_FETCHED_QUERY, // how many pivots were fetched for a query
299 FT_NUM_PIVOTS_FETCHED_PREFETCH, // ... for a prefetch
300 FT_BYTES_PIVOTS_FETCHED_PREFETCH, // ... for a prefetch
301 FT_TOKUTIME_PIVOTS_FETCHED_PREFETCH, // ... for a prefetch
302 FT_NUM_PIVOTS_FETCHED_WRITE, // ... for a write
303 FT_BYTES_PIVOTS_FETCHED_WRITE, // ... for a write
304 FT_TOKUTIME_PIVOTS_FETCHED_WRITE, // ... for a write
305 FT_NUM_BASEMENTS_FETCHED_NORMAL, // how many basement nodes were fetched because they were the target of a query
306 FT_BYTES_BASEMENTS_FETCHED_NORMAL, // how many basement nodes were fetched because they were the target of a query
307 FT_TOKUTIME_BASEMENTS_FETCHED_NORMAL, // how many basement nodes were fetched because they were the target of a query
308 FT_NUM_BASEMENTS_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
309 FT_BYTES_BASEMENTS_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
310 FT_TOKUTIME_BASEMENTS_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
311 FT_NUM_BASEMENTS_FETCHED_PREFETCH,
312 FT_BYTES_BASEMENTS_FETCHED_PREFETCH,
313 FT_TOKUTIME_BASEMENTS_FETCHED_PREFETCH,
314 FT_NUM_BASEMENTS_FETCHED_WRITE,
315 FT_BYTES_BASEMENTS_FETCHED_WRITE,
316 FT_TOKUTIME_BASEMENTS_FETCHED_WRITE,
317 FT_NUM_MSG_BUFFER_FETCHED_NORMAL, // how many msg buffers were fetched because they were the target of a query
318 FT_BYTES_MSG_BUFFER_FETCHED_NORMAL, // how many msg buffers were fetched because they were the target of a query
319 FT_TOKUTIME_MSG_BUFFER_FETCHED_NORMAL, // how many msg buffers were fetched because they were the target of a query
320 FT_NUM_MSG_BUFFER_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
321 FT_BYTES_MSG_BUFFER_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
322 FT_TOKUTIME_MSG_BUFFER_FETCHED_AGGRESSIVE, // ... because they were between lc and rc
323 FT_NUM_MSG_BUFFER_FETCHED_PREFETCH,
324 FT_BYTES_MSG_BUFFER_FETCHED_PREFETCH,
325 FT_TOKUTIME_MSG_BUFFER_FETCHED_PREFETCH,
326 FT_NUM_MSG_BUFFER_FETCHED_WRITE,
327 FT_BYTES_MSG_BUFFER_FETCHED_WRITE,
328 FT_TOKUTIME_MSG_BUFFER_FETCHED_WRITE,
329 FT_LEAF_COMPRESS_TOKUTIME, // seconds spent compressing leaf leaf nodes to memory
330 FT_LEAF_SERIALIZE_TOKUTIME, // seconds spent serializing leaf node to memory
331 FT_LEAF_DECOMPRESS_TOKUTIME, // seconds spent decompressing leaf nodes to memory
332 FT_LEAF_DESERIALIZE_TOKUTIME, // seconds spent deserializing leaf nodes to memory
333 FT_NONLEAF_COMPRESS_TOKUTIME, // seconds spent compressing nonleaf nodes to memory
334 FT_NONLEAF_SERIALIZE_TOKUTIME, // seconds spent serializing nonleaf nodes to memory
335 FT_NONLEAF_DECOMPRESS_TOKUTIME, // seconds spent decompressing nonleaf nodes to memory
336 FT_NONLEAF_DESERIALIZE_TOKUTIME, // seconds spent deserializing nonleaf nodes to memory
337 FT_PRO_NUM_ROOT_SPLIT,
338 FT_PRO_NUM_ROOT_H0_INJECT,
339 FT_PRO_NUM_ROOT_H1_INJECT,
340 FT_PRO_NUM_INJECT_DEPTH_0,
341 FT_PRO_NUM_INJECT_DEPTH_1,
342 FT_PRO_NUM_INJECT_DEPTH_2,
343 FT_PRO_NUM_INJECT_DEPTH_3,
344 FT_PRO_NUM_INJECT_DEPTH_GT3,
345 FT_PRO_NUM_STOP_NONEMPTY_BUF,
346 FT_PRO_NUM_STOP_H1,
347 FT_PRO_NUM_STOP_LOCK_CHILD,
348 FT_PRO_NUM_STOP_CHILD_INMEM,
349 FT_PRO_NUM_DIDNT_WANT_PROMOTE,
350 FT_BASEMENT_DESERIALIZE_FIXED_KEYSIZE, // how many basement nodes were deserialized with a fixed keysize
351 FT_BASEMENT_DESERIALIZE_VARIABLE_KEYSIZE, // how many basement nodes were deserialized with a variable keysize
352 FT_PRO_RIGHTMOST_LEAF_SHORTCUT_SUCCESS,
353 FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_POS,
354 FT_PRO_RIGHTMOST_LEAF_SHORTCUT_FAIL_REACTIVE,
355 FT_CURSOR_SKIP_DELETED_LEAF_ENTRY, // how many deleted leaf entries were skipped by a cursor
356 FT_STATUS_NUM_ROWS
357 };
358
359 void init(void);
360 void destroy(void);
361
362 TOKU_ENGINE_STATUS_ROW_S status[FT_STATUS_NUM_ROWS];
363
364private:
365 bool m_initialized;
366};
367typedef FT_STATUS_S* FT_STATUS;
368extern FT_STATUS_S ft_status;
369
370#define FT_STATUS_VAL(x) \
371 (ft_status.status[FT_STATUS_S::x].type == PARCOUNT ? \
372 read_partitioned_counter(ft_status.status[FT_STATUS_S::x].value.parcount) : \
373 ft_status.status[FT_STATUS_S::x].value.num)
374
375#define FT_STATUS_INC(x, d) \
376 do { \
377 if (ft_status.status[FT_STATUS_S::x].type == PARCOUNT) { \
378 increment_partitioned_counter(ft_status.status[FT_STATUS_S::x].value.parcount, d); \
379 } else { \
380 toku_sync_fetch_and_add(&ft_status.status[FT_STATUS_S::x].value.num, d); \
381 } \
382 } while (0)
383
384
385
386//
387// Flusher statistics
388//
389class FT_FLUSHER_STATUS_S {
390public:
391 enum {
392 FT_FLUSHER_CLEANER_TOTAL_NODES = 0, // total number of nodes whose buffers are potentially flushed by cleaner thread
393 FT_FLUSHER_CLEANER_H1_NODES, // number of nodes of height one whose message buffers are flushed by cleaner thread
394 FT_FLUSHER_CLEANER_HGT1_NODES, // number of nodes of height > 1 whose message buffers are flushed by cleaner thread
395 FT_FLUSHER_CLEANER_EMPTY_NODES, // number of nodes that are selected by cleaner, but whose buffers are empty
396 FT_FLUSHER_CLEANER_NODES_DIRTIED, // number of nodes that are made dirty by the cleaner thread
397 FT_FLUSHER_CLEANER_MAX_BUFFER_SIZE, // max number of bytes in message buffer flushed by cleaner thread
398 FT_FLUSHER_CLEANER_MIN_BUFFER_SIZE,
399 FT_FLUSHER_CLEANER_TOTAL_BUFFER_SIZE,
400 FT_FLUSHER_CLEANER_MAX_BUFFER_WORKDONE, // max workdone value of any message buffer flushed by cleaner thread
401 FT_FLUSHER_CLEANER_MIN_BUFFER_WORKDONE,
402 FT_FLUSHER_CLEANER_TOTAL_BUFFER_WORKDONE,
403 FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_STARTED, // number of times cleaner thread tries to merge a leaf
404 FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_RUNNING, // number of cleaner thread leaf merges in progress
405 FT_FLUSHER_CLEANER_NUM_LEAF_MERGES_COMPLETED, // number of times cleaner thread successfully merges a leaf
406 FT_FLUSHER_CLEANER_NUM_DIRTIED_FOR_LEAF_MERGE, // nodes dirtied by the "flush from root" process to merge a leaf node
407 FT_FLUSHER_FLUSH_TOTAL, // total number of flushes done by flusher threads or cleaner threads
408 FT_FLUSHER_FLUSH_IN_MEMORY, // number of in memory flushes
409 FT_FLUSHER_FLUSH_NEEDED_IO, // number of flushes that had to read a child (or part) off disk
410 FT_FLUSHER_FLUSH_CASCADES, // number of flushes that triggered another flush in the child
411 FT_FLUSHER_FLUSH_CASCADES_1, // number of flushes that triggered 1 cascading flush
412 FT_FLUSHER_FLUSH_CASCADES_2, // number of flushes that triggered 2 cascading flushes
413 FT_FLUSHER_FLUSH_CASCADES_3, // number of flushes that triggered 3 cascading flushes
414 FT_FLUSHER_FLUSH_CASCADES_4, // number of flushes that triggered 4 cascading flushes
415 FT_FLUSHER_FLUSH_CASCADES_5, // number of flushes that triggered 5 cascading flushes
416 FT_FLUSHER_FLUSH_CASCADES_GT_5, // number of flushes that triggered more than 5 cascading flushes
417 FT_FLUSHER_SPLIT_LEAF, // number of leaf nodes split
418 FT_FLUSHER_SPLIT_NONLEAF, // number of nonleaf nodes split
419 FT_FLUSHER_MERGE_LEAF, // number of times leaf nodes are merged
420 FT_FLUSHER_MERGE_NONLEAF, // number of times nonleaf nodes are merged
421 FT_FLUSHER_BALANCE_LEAF, // number of times a leaf node is balanced
422 FT_FLUSHER_STATUS_NUM_ROWS
423 };
424
425 void init(void);
426 void destroy(void);
427
428 TOKU_ENGINE_STATUS_ROW_S status[FT_FLUSHER_STATUS_NUM_ROWS];
429
430private:
431 bool m_initialized;
432};
433typedef FT_FLUSHER_STATUS_S* FT_FLUSHER_STATUS;
434extern FT_FLUSHER_STATUS_S fl_status;
435
436#define FL_STATUS_VAL(x) fl_status.status[FT_FLUSHER_STATUS_S::x].value.num
437
438
439
440//
441// Hot Flusher
442//
443class FT_HOT_STATUS_S {
444public:
445 enum {
446 FT_HOT_NUM_STARTED = 0, // number of HOT operations that have begun
447 FT_HOT_NUM_COMPLETED, // number of HOT operations that have successfully completed
448 FT_HOT_NUM_ABORTED, // number of HOT operations that have been aborted
449 FT_HOT_MAX_ROOT_FLUSH_COUNT, // max number of flushes from root ever required to optimize a tree
450 FT_HOT_STATUS_NUM_ROWS
451 };
452
453 void init(void);
454 void destroy(void);
455
456 TOKU_ENGINE_STATUS_ROW_S status[FT_HOT_STATUS_NUM_ROWS];
457
458private:
459 bool m_initialized;
460};
461typedef FT_HOT_STATUS_S* FT_HOT_STATUS;
462extern FT_HOT_STATUS_S hot_status;
463
464#define HOT_STATUS_VAL(x) hot_status.status[FT_HOT_STATUS_S::x].value.num
465
466
467
468//
469// Transaction statistics
470//
471class TXN_STATUS_S {
472public:
473 enum {
474 TXN_BEGIN, // total number of transactions begun (does not include recovered txns)
475 TXN_READ_BEGIN, // total number of read only transactions begun (does not include recovered txns)
476 TXN_COMMIT, // successful commits
477 TXN_ABORT,
478 TXN_STATUS_NUM_ROWS
479 };
480
481 void init(void);
482 void destroy(void);
483
484 TOKU_ENGINE_STATUS_ROW_S status[TXN_STATUS_NUM_ROWS];
485
486private:
487 bool m_initialized;
488};
489typedef TXN_STATUS_S* TXN_STATUS;
490extern TXN_STATUS_S txn_status;
491
492#define TXN_STATUS_INC(x, d) increment_partitioned_counter(txn_status.status[TXN_STATUS_S::x].value.parcount, d)
493
494
495
496//
497// Logger statistics
498//
499class LOGGER_STATUS_S {
500public:
501 enum {
502 LOGGER_NEXT_LSN = 0,
503 LOGGER_NUM_WRITES,
504 LOGGER_BYTES_WRITTEN,
505 LOGGER_UNCOMPRESSED_BYTES_WRITTEN,
506 LOGGER_TOKUTIME_WRITES,
507 LOGGER_WAIT_BUF_LONG,
508 LOGGER_STATUS_NUM_ROWS
509 };
510
511 void init(void);
512 void destroy(void);
513
514 TOKU_ENGINE_STATUS_ROW_S status[LOGGER_STATUS_NUM_ROWS];
515
516private:
517 bool m_initialized;
518};
519typedef LOGGER_STATUS_S* LOGGER_STATUS;
520extern LOGGER_STATUS_S log_status;
521
522#define LOG_STATUS_VAL(x) log_status.status[LOGGER_STATUS_S::x].value.num
523
524void toku_status_init(void);
525void toku_status_destroy(void);
526