1 | /***************************************************************************** |
2 | |
3 | Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved. |
4 | Copyright (c) 2017, 2018, MariaDB Corporation. |
5 | |
6 | This program is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free Software |
8 | Foundation; version 2 of the License. |
9 | |
10 | This program is distributed in the hope that it will be useful, but WITHOUT |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
12 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. |
13 | |
14 | You should have received a copy of the GNU General Public License along with |
15 | this program; if not, write to the Free Software Foundation, Inc., |
16 | 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA |
17 | |
18 | *****************************************************************************/ |
19 | |
20 | /**************************************************//** |
21 | @file include/sync0types.h |
22 | Global types for sync |
23 | |
24 | Created 9/5/1995 Heikki Tuuri |
25 | *******************************************************/ |
26 | |
27 | #ifndef sync0types_h |
28 | #define sync0types_h |
29 | |
30 | #include <vector> |
31 | #include <iostream> |
32 | #include <my_atomic.h> |
33 | |
34 | #include "ut0new.h" |
35 | #include "ut0counter.h" |
36 | |
37 | #ifdef _WIN32 |
38 | /** Native mutex */ |
39 | typedef CRITICAL_SECTION sys_mutex_t; |
40 | #else |
41 | /** Native mutex */ |
42 | typedef pthread_mutex_t sys_mutex_t; |
43 | #endif /* _WIN32 */ |
44 | |
45 | /** Mutex states. */ |
46 | enum mutex_state_t { |
47 | /** Mutex is free */ |
48 | MUTEX_STATE_UNLOCKED = 0, |
49 | |
50 | /** Mutex is acquired by some thread. */ |
51 | MUTEX_STATE_LOCKED = 1, |
52 | |
53 | /** Mutex is contended and there are threads waiting on the lock. */ |
54 | MUTEX_STATE_WAITERS = 2 |
55 | }; |
56 | |
57 | /* |
58 | LATCHING ORDER WITHIN THE DATABASE |
59 | ================================== |
60 | |
61 | The mutex or latch in the central memory object, for instance, a rollback |
62 | segment object, must be acquired before acquiring the latch or latches to |
63 | the corresponding file data structure. In the latching order below, these |
64 | file page object latches are placed immediately below the corresponding |
65 | central memory object latch or mutex. |
66 | |
67 | Synchronization object Notes |
68 | ---------------------- ----- |
69 | |
70 | Dictionary mutex If we have a pointer to a dictionary |
71 | | object, e.g., a table, it can be |
72 | | accessed without reserving the |
73 | | dictionary mutex. We must have a |
74 | | reservation, a memoryfix, to the |
75 | | appropriate table object in this case, |
76 | | and the table must be explicitly |
77 | | released later. |
78 | V |
79 | Dictionary header |
80 | | |
81 | V |
82 | Secondary index tree latch The tree latch protects also all |
83 | | the B-tree non-leaf pages. These |
84 | V can be read with the page only |
85 | Secondary index non-leaf bufferfixed to save CPU time, |
86 | | no s-latch is needed on the page. |
87 | | Modification of a page requires an |
88 | | x-latch on the page, however. If a |
89 | | thread owns an x-latch to the tree, |
90 | | it is allowed to latch non-leaf pages |
91 | | even after it has acquired the fsp |
92 | | latch. |
93 | V |
94 | Secondary index leaf The latch on the secondary index leaf |
95 | | can be kept while accessing the |
96 | | clustered index, to save CPU time. |
97 | V |
98 | Clustered index tree latch To increase concurrency, the tree |
99 | | latch is usually released when the |
100 | | leaf page latch has been acquired. |
101 | V |
102 | Clustered index non-leaf |
103 | | |
104 | V |
105 | Clustered index leaf |
106 | | |
107 | V |
108 | Transaction system header |
109 | | |
110 | V |
111 | Rollback segment mutex The rollback segment mutex must be |
112 | | reserved, if, e.g., a new page must |
113 | | be added to an undo log. The rollback |
114 | | segment and the undo logs in its |
115 | | history list can be seen as an |
116 | | analogue of a B-tree, and the latches |
117 | | reserved similarly, using a version of |
118 | | lock-coupling. If an undo log must be |
119 | | extended by a page when inserting an |
120 | | undo log record, this corresponds to |
121 | | a pessimistic insert in a B-tree. |
122 | V |
123 | Rollback segment header |
124 | | |
125 | V |
126 | Purge system latch |
127 | | |
128 | V |
129 | Undo log pages If a thread owns the trx undo mutex, |
130 | | or for a log in the history list, the |
131 | | rseg mutex, it is allowed to latch |
132 | | undo log pages in any order, and even |
133 | | after it has acquired the fsp latch. |
134 | | If a thread does not have the |
135 | | appropriate mutex, it is allowed to |
136 | | latch only a single undo log page in |
137 | | a mini-transaction. |
138 | V |
139 | File space management latch If a mini-transaction must allocate |
140 | | several file pages, it can do that, |
141 | | because it keeps the x-latch to the |
142 | | file space management in its memo. |
143 | V |
144 | File system pages |
145 | | |
146 | V |
147 | lock_sys_wait_mutex Mutex protecting lock timeout data |
148 | | |
149 | V |
150 | lock_sys_mutex Mutex protecting lock_sys_t |
151 | | |
152 | V |
153 | trx_sys.mutex Mutex protecting trx_sys_t |
154 | | |
155 | V |
156 | Threads mutex Background thread scheduling mutex |
157 | | |
158 | V |
159 | query_thr_mutex Mutex protecting query threads |
160 | | |
161 | V |
162 | trx_mutex Mutex protecting trx_t fields |
163 | | |
164 | V |
165 | Search system mutex |
166 | | |
167 | V |
168 | Buffer pool mutex |
169 | | |
170 | V |
171 | Log mutex |
172 | | |
173 | Any other latch |
174 | | |
175 | V |
176 | Memory pool mutex */ |
177 | |
178 | /** Latching order levels. If you modify these, you have to also update |
179 | LatchDebug internals in sync0debug.cc */ |
180 | |
181 | enum latch_level_t { |
182 | SYNC_UNKNOWN = 0, |
183 | |
184 | SYNC_MUTEX = 1, |
185 | |
186 | RW_LOCK_SX, |
187 | RW_LOCK_X_WAIT, |
188 | RW_LOCK_S, |
189 | RW_LOCK_X, |
190 | RW_LOCK_NOT_LOCKED, |
191 | |
192 | SYNC_MONITOR_MUTEX, |
193 | |
194 | SYNC_ANY_LATCH, |
195 | |
196 | SYNC_DOUBLEWRITE, |
197 | |
198 | SYNC_BUF_FLUSH_LIST, |
199 | |
200 | SYNC_BUF_BLOCK, |
201 | SYNC_BUF_PAGE_HASH, |
202 | |
203 | SYNC_BUF_POOL, |
204 | |
205 | SYNC_POOL, |
206 | SYNC_POOL_MANAGER, |
207 | |
208 | SYNC_SEARCH_SYS, |
209 | |
210 | SYNC_WORK_QUEUE, |
211 | |
212 | SYNC_FTS_TOKENIZE, |
213 | SYNC_FTS_OPTIMIZE, |
214 | SYNC_FTS_BG_THREADS, |
215 | SYNC_FTS_CACHE_INIT, |
216 | SYNC_RECV, |
217 | SYNC_LOG_FLUSH_ORDER, |
218 | SYNC_LOG, |
219 | SYNC_LOG_WRITE, |
220 | SYNC_PAGE_CLEANER, |
221 | SYNC_PURGE_QUEUE, |
222 | , |
223 | SYNC_REC_LOCK, |
224 | SYNC_THREADS, |
225 | SYNC_TRX, |
226 | SYNC_RW_TRX_HASH_ELEMENT, |
227 | SYNC_TRX_SYS, |
228 | SYNC_LOCK_SYS, |
229 | SYNC_LOCK_WAIT_SYS, |
230 | |
231 | SYNC_INDEX_ONLINE_LOG, |
232 | |
233 | SYNC_IBUF_BITMAP, |
234 | SYNC_IBUF_BITMAP_MUTEX, |
235 | SYNC_IBUF_TREE_NODE, |
236 | SYNC_IBUF_TREE_NODE_NEW, |
237 | SYNC_IBUF_INDEX_TREE, |
238 | |
239 | SYNC_IBUF_MUTEX, |
240 | |
241 | SYNC_FSP_PAGE, |
242 | SYNC_FSP, |
243 | SYNC_EXTERN_STORAGE, |
244 | SYNC_TRX_UNDO_PAGE, |
245 | , |
246 | , |
247 | SYNC_NOREDO_RSEG, |
248 | SYNC_REDO_RSEG, |
249 | SYNC_PURGE_LATCH, |
250 | SYNC_TREE_NODE, |
251 | SYNC_TREE_NODE_FROM_HASH, |
252 | SYNC_TREE_NODE_NEW, |
253 | SYNC_IBUF_PESS_INSERT_MUTEX, |
254 | SYNC_INDEX_TREE, |
255 | |
256 | , |
257 | , |
258 | SYNC_STATS_AUTO_RECALC, |
259 | SYNC_DICT_AUTOINC_MUTEX, |
260 | SYNC_DICT, |
261 | SYNC_FTS_CACHE, |
262 | |
263 | SYNC_DICT_OPERATION, |
264 | |
265 | SYNC_TRX_I_S_LAST_READ, |
266 | |
267 | SYNC_TRX_I_S_RWLOCK, |
268 | |
269 | SYNC_RECV_WRITER, |
270 | |
271 | /** Level is varying. Only used with buffer pool page locks, which |
272 | do not have a fixed level, but instead have their level set after |
273 | the page is locked; see e.g. ibuf_bitmap_get_map_page(). */ |
274 | |
275 | SYNC_LEVEL_VARYING, |
276 | |
277 | /** This can be used to suppress order checking. */ |
278 | SYNC_NO_ORDER_CHECK, |
279 | |
280 | /** Maximum level value */ |
281 | SYNC_LEVEL_MAX = SYNC_NO_ORDER_CHECK |
282 | }; |
283 | |
284 | /** Each latch has an ID. This id is used for creating the latch and to look |
285 | up its meta-data. See sync0debug.c. */ |
286 | enum latch_id_t { |
287 | LATCH_ID_NONE = 0, |
288 | LATCH_ID_AUTOINC, |
289 | LATCH_ID_BUF_BLOCK_MUTEX, |
290 | LATCH_ID_BUF_POOL, |
291 | LATCH_ID_BUF_POOL_ZIP, |
292 | LATCH_ID_CACHE_LAST_READ, |
293 | LATCH_ID_DICT_FOREIGN_ERR, |
294 | LATCH_ID_DICT_SYS, |
295 | LATCH_ID_FILE_FORMAT_MAX, |
296 | LATCH_ID_FIL_SYSTEM, |
297 | LATCH_ID_FLUSH_LIST, |
298 | LATCH_ID_FTS_BG_THREADS, |
299 | LATCH_ID_FTS_DELETE, |
300 | LATCH_ID_FTS_OPTIMIZE, |
301 | LATCH_ID_FTS_DOC_ID, |
302 | LATCH_ID_FTS_PLL_TOKENIZE, |
303 | LATCH_ID_HASH_TABLE_MUTEX, |
304 | LATCH_ID_IBUF_BITMAP, |
305 | LATCH_ID_IBUF, |
306 | LATCH_ID_IBUF_PESSIMISTIC_INSERT, |
307 | LATCH_ID_LOG_SYS, |
308 | LATCH_ID_LOG_WRITE, |
309 | LATCH_ID_LOG_FLUSH_ORDER, |
310 | LATCH_ID_LIST, |
311 | LATCH_ID_MUTEX_LIST, |
312 | LATCH_ID_PAGE_CLEANER, |
313 | LATCH_ID_PURGE_SYS_PQ, |
314 | LATCH_ID_RECALC_POOL, |
315 | LATCH_ID_RECV_SYS, |
316 | LATCH_ID_RECV_WRITER, |
317 | LATCH_ID_REDO_RSEG, |
318 | LATCH_ID_NOREDO_RSEG, |
319 | LATCH_ID_RW_LOCK_DEBUG, |
320 | LATCH_ID_RTR_SSN_MUTEX, |
321 | LATCH_ID_RTR_ACTIVE_MUTEX, |
322 | LATCH_ID_RTR_MATCH_MUTEX, |
323 | LATCH_ID_RTR_PATH_MUTEX, |
324 | LATCH_ID_RW_LOCK_LIST, |
325 | LATCH_ID_RW_LOCK_MUTEX, |
326 | LATCH_ID_SRV_INNODB_MONITOR, |
327 | LATCH_ID_SRV_MISC_TMPFILE, |
328 | LATCH_ID_SRV_MONITOR_FILE, |
329 | LATCH_ID_BUF_DBLWR, |
330 | LATCH_ID_TRX_POOL, |
331 | LATCH_ID_TRX_POOL_MANAGER, |
332 | LATCH_ID_TRX, |
333 | LATCH_ID_LOCK_SYS, |
334 | LATCH_ID_LOCK_SYS_WAIT, |
335 | LATCH_ID_TRX_SYS, |
336 | LATCH_ID_SRV_SYS, |
337 | LATCH_ID_SRV_SYS_TASKS, |
338 | LATCH_ID_PAGE_ZIP_STAT_PER_INDEX, |
339 | LATCH_ID_EVENT_MANAGER, |
340 | LATCH_ID_EVENT_MUTEX, |
341 | LATCH_ID_SYNC_ARRAY_MUTEX, |
342 | LATCH_ID_ZIP_PAD_MUTEX, |
343 | LATCH_ID_OS_AIO_READ_MUTEX, |
344 | LATCH_ID_OS_AIO_WRITE_MUTEX, |
345 | LATCH_ID_OS_AIO_LOG_MUTEX, |
346 | LATCH_ID_OS_AIO_IBUF_MUTEX, |
347 | LATCH_ID_OS_AIO_SYNC_MUTEX, |
348 | LATCH_ID_ROW_DROP_LIST, |
349 | LATCH_ID_INDEX_ONLINE_LOG, |
350 | LATCH_ID_WORK_QUEUE, |
351 | LATCH_ID_BTR_SEARCH, |
352 | LATCH_ID_BUF_BLOCK_LOCK, |
353 | LATCH_ID_BUF_BLOCK_DEBUG, |
354 | LATCH_ID_DICT_OPERATION, |
355 | LATCH_ID_CHECKPOINT, |
356 | LATCH_ID_FIL_SPACE, |
357 | LATCH_ID_FTS_CACHE, |
358 | LATCH_ID_FTS_CACHE_INIT, |
359 | LATCH_ID_TRX_I_S_CACHE, |
360 | LATCH_ID_TRX_PURGE, |
361 | LATCH_ID_IBUF_INDEX_TREE, |
362 | LATCH_ID_INDEX_TREE, |
363 | LATCH_ID_DICT_TABLE_STATS, |
364 | LATCH_ID_HASH_TABLE_RW_LOCK, |
365 | LATCH_ID_BUF_CHUNK_MAP_LATCH, |
366 | LATCH_ID_SYNC_DEBUG_MUTEX, |
367 | LATCH_ID_SCRUB_STAT_MUTEX, |
368 | LATCH_ID_DEFRAGMENT_MUTEX, |
369 | LATCH_ID_BTR_DEFRAGMENT_MUTEX, |
370 | LATCH_ID_FIL_CRYPT_MUTEX, |
371 | LATCH_ID_FIL_CRYPT_STAT_MUTEX, |
372 | LATCH_ID_FIL_CRYPT_DATA_MUTEX, |
373 | LATCH_ID_FIL_CRYPT_THREADS_MUTEX, |
374 | LATCH_ID_RW_TRX_HASH_ELEMENT, |
375 | LATCH_ID_TEST_MUTEX, |
376 | LATCH_ID_MAX = LATCH_ID_TEST_MUTEX |
377 | }; |
378 | |
379 | #ifndef UNIV_INNOCHECKSUM |
380 | /** OS mutex, without any policy. It is a thin wrapper around the |
381 | system mutexes. The interface is different from the policy mutexes, |
382 | to ensure that it is called directly and not confused with the |
383 | policy mutexes. */ |
384 | struct OSMutex { |
385 | |
386 | /** Constructor */ |
387 | OSMutex() |
388 | UNIV_NOTHROW |
389 | { |
390 | ut_d(m_freed = true); |
391 | } |
392 | |
393 | /** Create the mutex by calling the system functions. */ |
394 | void init() |
395 | UNIV_NOTHROW |
396 | { |
397 | ut_ad(m_freed); |
398 | |
399 | #ifdef _WIN32 |
400 | InitializeCriticalSection((LPCRITICAL_SECTION) &m_mutex); |
401 | #else |
402 | { |
403 | int ret = pthread_mutex_init(&m_mutex, NULL); |
404 | ut_a(ret == 0); |
405 | } |
406 | #endif /* _WIN32 */ |
407 | |
408 | ut_d(m_freed = false); |
409 | } |
410 | |
411 | /** Destructor */ |
412 | ~OSMutex() { } |
413 | |
414 | /** Destroy the mutex */ |
415 | void destroy() |
416 | UNIV_NOTHROW |
417 | { |
418 | ut_ad(!m_freed); |
419 | #ifdef _WIN32 |
420 | DeleteCriticalSection((LPCRITICAL_SECTION) &m_mutex); |
421 | #else |
422 | int ret; |
423 | |
424 | ret = pthread_mutex_destroy(&m_mutex); |
425 | |
426 | if (ret != 0) { |
427 | |
428 | ib::error() |
429 | << "Return value " << ret << " when calling " |
430 | << "pthread_mutex_destroy()." ; |
431 | } |
432 | #endif /* _WIN32 */ |
433 | ut_d(m_freed = true); |
434 | } |
435 | |
436 | /** Release the mutex. */ |
437 | void exit() |
438 | UNIV_NOTHROW |
439 | { |
440 | ut_ad(!m_freed); |
441 | #ifdef _WIN32 |
442 | LeaveCriticalSection(&m_mutex); |
443 | #else |
444 | int ret = pthread_mutex_unlock(&m_mutex); |
445 | ut_a(ret == 0); |
446 | #endif /* _WIN32 */ |
447 | } |
448 | |
449 | /** Acquire the mutex. */ |
450 | void enter() |
451 | UNIV_NOTHROW |
452 | { |
453 | ut_ad(!m_freed); |
454 | #ifdef _WIN32 |
455 | EnterCriticalSection((LPCRITICAL_SECTION) &m_mutex); |
456 | #else |
457 | int ret = pthread_mutex_lock(&m_mutex); |
458 | ut_a(ret == 0); |
459 | #endif /* _WIN32 */ |
460 | } |
461 | |
462 | /** @return true if locking succeeded */ |
463 | bool try_lock() |
464 | UNIV_NOTHROW |
465 | { |
466 | ut_ad(!m_freed); |
467 | #ifdef _WIN32 |
468 | return(TryEnterCriticalSection(&m_mutex) != 0); |
469 | #else |
470 | return(pthread_mutex_trylock(&m_mutex) == 0); |
471 | #endif /* _WIN32 */ |
472 | } |
473 | |
474 | /** Required for os_event_t */ |
475 | operator sys_mutex_t*() |
476 | UNIV_NOTHROW |
477 | { |
478 | return(&m_mutex); |
479 | } |
480 | |
481 | private: |
482 | #ifdef DBUG_ASSERT_EXISTS |
483 | /** true if the mutex has been freed/destroyed. */ |
484 | bool m_freed; |
485 | #endif /* DBUG_ASSERT_EXISTS */ |
486 | |
487 | sys_mutex_t m_mutex; |
488 | }; |
489 | |
490 | #ifdef UNIV_PFS_MUTEX |
491 | /** Latch element. |
492 | Used for mutexes which have PFS keys defined under UNIV_PFS_MUTEX. |
493 | @param[in] id Latch id |
494 | @param[in] level Latch level |
495 | @param[in] key PFS key */ |
496 | # define LATCH_ADD_MUTEX(id, level, key) latch_meta[LATCH_ID_ ## id] =\ |
497 | UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level, key)) |
498 | |
499 | #ifdef UNIV_PFS_RWLOCK |
500 | /** Latch element. |
501 | Used for rwlocks which have PFS keys defined under UNIV_PFS_RWLOCK. |
502 | @param[in] id Latch id |
503 | @param[in] level Latch level |
504 | @param[in] key PFS key */ |
505 | # define LATCH_ADD_RWLOCK(id, level, key) latch_meta[LATCH_ID_ ## id] =\ |
506 | UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level, key)) |
507 | #else |
508 | # define LATCH_ADD_RWLOCK(id, level, key) latch_meta[LATCH_ID_ ## id] =\ |
509 | UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level, \ |
510 | PSI_NOT_INSTRUMENTED)) |
511 | #endif /* UNIV_PFS_RWLOCK */ |
512 | |
513 | #else |
514 | # define LATCH_ADD_MUTEX(id, level, key) latch_meta[LATCH_ID_ ## id] =\ |
515 | UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level)) |
516 | # define LATCH_ADD_RWLOCK(id, level, key) latch_meta[LATCH_ID_ ## id] =\ |
517 | UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level)) |
518 | #endif /* UNIV_PFS_MUTEX */ |
519 | |
520 | /** Default latch counter */ |
521 | class LatchCounter { |
522 | |
523 | public: |
524 | /** The counts we collect for a mutex */ |
525 | struct Count { |
526 | |
527 | /** Constructor */ |
528 | Count() |
529 | UNIV_NOTHROW |
530 | : |
531 | m_spins(), |
532 | m_waits(), |
533 | m_calls(), |
534 | m_enabled() |
535 | { |
536 | /* No op */ |
537 | } |
538 | |
539 | /** Rest the values to zero */ |
540 | void reset() |
541 | UNIV_NOTHROW |
542 | { |
543 | m_spins = 0; |
544 | m_waits = 0; |
545 | m_calls = 0; |
546 | } |
547 | |
548 | /** Number of spins trying to acquire the latch. */ |
549 | uint32_t m_spins; |
550 | |
551 | /** Number of waits trying to acquire the latch */ |
552 | uint32_t m_waits; |
553 | |
554 | /** Number of times it was called */ |
555 | uint32_t m_calls; |
556 | |
557 | /** true if enabled */ |
558 | bool m_enabled; |
559 | }; |
560 | |
561 | /** Constructor */ |
562 | LatchCounter() |
563 | UNIV_NOTHROW |
564 | : |
565 | m_active(false) |
566 | { |
567 | m_mutex.init(); |
568 | } |
569 | |
570 | /** Destructor */ |
571 | ~LatchCounter() |
572 | UNIV_NOTHROW |
573 | { |
574 | m_mutex.destroy(); |
575 | |
576 | for (Counters::iterator it = m_counters.begin(); |
577 | it != m_counters.end(); |
578 | ++it) { |
579 | |
580 | Count* count = *it; |
581 | |
582 | UT_DELETE(count); |
583 | } |
584 | } |
585 | |
586 | /** Reset all counters to zero. It is not protected by any |
587 | mutex and we don't care about atomicity. Unless it is a |
588 | demonstrated problem. The information collected is not |
589 | required for the correct functioning of the server. */ |
590 | void reset() |
591 | UNIV_NOTHROW |
592 | { |
593 | m_mutex.enter(); |
594 | |
595 | Counters::iterator end = m_counters.end(); |
596 | |
597 | for (Counters::iterator it = m_counters.begin(); |
598 | it != end; |
599 | ++it) { |
600 | |
601 | (*it)->reset(); |
602 | } |
603 | |
604 | m_mutex.exit(); |
605 | } |
606 | |
607 | /** @return the aggregate counter */ |
608 | Count* sum_register() |
609 | UNIV_NOTHROW |
610 | { |
611 | m_mutex.enter(); |
612 | |
613 | Count* count; |
614 | |
615 | if (m_counters.empty()) { |
616 | count = UT_NEW_NOKEY(Count()); |
617 | m_counters.push_back(count); |
618 | } else { |
619 | ut_a(m_counters.size() == 1); |
620 | count = m_counters[0]; |
621 | } |
622 | |
623 | m_mutex.exit(); |
624 | |
625 | return(count); |
626 | } |
627 | |
628 | /** Register a single instance counter */ |
629 | void single_register(Count* count) |
630 | UNIV_NOTHROW |
631 | { |
632 | m_mutex.enter(); |
633 | |
634 | m_counters.push_back(count); |
635 | |
636 | m_mutex.exit(); |
637 | } |
638 | |
639 | /** Deregister a single instance counter |
640 | @param[in] count The count instance to deregister */ |
641 | void single_deregister(Count* count) |
642 | UNIV_NOTHROW |
643 | { |
644 | m_mutex.enter(); |
645 | |
646 | m_counters.erase( |
647 | std::remove( |
648 | m_counters.begin(), |
649 | m_counters.end(), count), |
650 | m_counters.end()); |
651 | |
652 | m_mutex.exit(); |
653 | } |
654 | |
655 | /** Iterate over the counters */ |
656 | template <typename Callback> |
657 | void iterate(Callback& callback) const |
658 | UNIV_NOTHROW |
659 | { |
660 | Counters::const_iterator end = m_counters.end(); |
661 | |
662 | for (Counters::const_iterator it = m_counters.begin(); |
663 | it != end; |
664 | ++it) { |
665 | |
666 | callback(*it); |
667 | } |
668 | } |
669 | |
670 | /** Disable the monitoring */ |
671 | void enable() |
672 | UNIV_NOTHROW |
673 | { |
674 | m_mutex.enter(); |
675 | |
676 | Counters::const_iterator end = m_counters.end(); |
677 | |
678 | for (Counters::const_iterator it = m_counters.begin(); |
679 | it != end; |
680 | ++it) { |
681 | |
682 | (*it)->m_enabled = true; |
683 | } |
684 | |
685 | m_active = true; |
686 | |
687 | m_mutex.exit(); |
688 | } |
689 | |
690 | /** Disable the monitoring */ |
691 | void disable() |
692 | UNIV_NOTHROW |
693 | { |
694 | m_mutex.enter(); |
695 | |
696 | Counters::const_iterator end = m_counters.end(); |
697 | |
698 | for (Counters::const_iterator it = m_counters.begin(); |
699 | it != end; |
700 | ++it) { |
701 | |
702 | (*it)->m_enabled = false; |
703 | } |
704 | |
705 | m_active = false; |
706 | |
707 | m_mutex.exit(); |
708 | } |
709 | |
710 | /** @return if monitoring is active */ |
711 | bool is_enabled() const |
712 | UNIV_NOTHROW |
713 | { |
714 | return(m_active); |
715 | } |
716 | |
717 | private: |
718 | /* Disable copying */ |
719 | LatchCounter(const LatchCounter&); |
720 | LatchCounter& operator=(const LatchCounter&); |
721 | |
722 | private: |
723 | typedef OSMutex Mutex; |
724 | typedef std::vector<Count*> Counters; |
725 | |
726 | /** Mutex protecting m_counters */ |
727 | Mutex m_mutex; |
728 | |
729 | /** Counters for the latches */ |
730 | Counters m_counters; |
731 | |
732 | /** if true then we collect the data */ |
733 | bool m_active; |
734 | }; |
735 | |
736 | /** Latch meta data */ |
737 | template <typename Counter = LatchCounter> |
738 | class LatchMeta { |
739 | |
740 | public: |
741 | typedef Counter CounterType; |
742 | |
743 | #ifdef UNIV_PFS_MUTEX |
744 | typedef mysql_pfs_key_t pfs_key_t; |
745 | #endif /* UNIV_PFS_MUTEX */ |
746 | |
747 | /** Constructor */ |
748 | LatchMeta() |
749 | : |
750 | m_id(LATCH_ID_NONE), |
751 | m_name(), |
752 | m_level(SYNC_UNKNOWN), |
753 | m_level_name() |
754 | #ifdef UNIV_PFS_MUTEX |
755 | ,m_pfs_key() |
756 | #endif /* UNIV_PFS_MUTEX */ |
757 | { |
758 | } |
759 | |
760 | /** Destructor */ |
761 | ~LatchMeta() { } |
762 | |
763 | /** Constructor |
764 | @param[in] id Latch id |
765 | @param[in] name Latch name |
766 | @param[in] level Latch level |
767 | @param[in] level_name Latch level text representation |
768 | @param[in] key PFS key */ |
769 | LatchMeta( |
770 | latch_id_t id, |
771 | const char* name, |
772 | latch_level_t level, |
773 | const char* level_name |
774 | #ifdef UNIV_PFS_MUTEX |
775 | ,pfs_key_t key |
776 | #endif /* UNIV_PFS_MUTEX */ |
777 | ) |
778 | : |
779 | m_id(id), |
780 | m_name(name), |
781 | m_level(level), |
782 | m_level_name(level_name) |
783 | #ifdef UNIV_PFS_MUTEX |
784 | ,m_pfs_key(key) |
785 | #endif /* UNIV_PFS_MUTEX */ |
786 | { |
787 | /* No op */ |
788 | } |
789 | |
790 | /* Less than operator. |
791 | @param[in] rhs Instance to compare against |
792 | @return true if this.get_id() < rhs.get_id() */ |
793 | bool operator<(const LatchMeta& rhs) const |
794 | { |
795 | return(get_id() < rhs.get_id()); |
796 | } |
797 | |
798 | /** @return the latch id */ |
799 | latch_id_t get_id() const |
800 | { |
801 | return(m_id); |
802 | } |
803 | |
804 | /** @return the latch name */ |
805 | const char* get_name() const |
806 | { |
807 | return(m_name); |
808 | } |
809 | |
810 | /** @return the latch level */ |
811 | latch_level_t get_level() const |
812 | { |
813 | return(m_level); |
814 | } |
815 | |
816 | /** @return the latch level name */ |
817 | const char* get_level_name() const |
818 | { |
819 | return(m_level_name); |
820 | } |
821 | |
822 | #ifdef UNIV_PFS_MUTEX |
823 | /** @return the PFS key for the latch */ |
824 | pfs_key_t get_pfs_key() const |
825 | { |
826 | return(m_pfs_key); |
827 | } |
828 | #endif /* UNIV_PFS_MUTEX */ |
829 | |
830 | /** @return the counter instance */ |
831 | Counter* get_counter() |
832 | { |
833 | return(&m_counter); |
834 | } |
835 | |
836 | private: |
837 | /** Latch id */ |
838 | latch_id_t m_id; |
839 | |
840 | /** Latch name */ |
841 | const char* m_name; |
842 | |
843 | /** Latch level in the ordering */ |
844 | latch_level_t m_level; |
845 | |
846 | /** Latch level text representation */ |
847 | const char* m_level_name; |
848 | |
849 | #ifdef UNIV_PFS_MUTEX |
850 | /** PFS key */ |
851 | pfs_key_t m_pfs_key; |
852 | #endif /* UNIV_PFS_MUTEX */ |
853 | |
854 | /** For gathering latch statistics */ |
855 | Counter m_counter; |
856 | }; |
857 | |
858 | typedef LatchMeta<LatchCounter> latch_meta_t; |
859 | typedef std::vector<latch_meta_t*, ut_allocator<latch_meta_t*> > LatchMetaData; |
860 | |
861 | /** Note: This is accessed without any mutex protection. It is initialised |
862 | at startup and elements should not be added to or removed from it after |
863 | that. See sync_latch_meta_init() */ |
864 | extern LatchMetaData latch_meta; |
865 | |
866 | /** Get the latch meta-data from the latch ID |
867 | @param[in] id Latch ID |
868 | @return the latch meta data */ |
869 | inline |
870 | latch_meta_t& |
871 | sync_latch_get_meta(latch_id_t id) |
872 | { |
873 | ut_ad(static_cast<size_t>(id) < latch_meta.size()); |
874 | ut_ad(id == latch_meta[id]->get_id()); |
875 | |
876 | return(*latch_meta[id]); |
877 | } |
878 | |
879 | /** Fetch the counter for the latch |
880 | @param[in] id Latch ID |
881 | @return the latch counter */ |
882 | inline |
883 | latch_meta_t::CounterType* |
884 | sync_latch_get_counter(latch_id_t id) |
885 | { |
886 | latch_meta_t& meta = sync_latch_get_meta(id); |
887 | |
888 | return(meta.get_counter()); |
889 | } |
890 | |
891 | /** Get the latch name from the latch ID |
892 | @param[in] id Latch ID |
893 | @return the name, will assert if not found */ |
894 | inline |
895 | const char* |
896 | sync_latch_get_name(latch_id_t id) |
897 | { |
898 | const latch_meta_t& meta = sync_latch_get_meta(id); |
899 | |
900 | return(meta.get_name()); |
901 | } |
902 | |
903 | /** Get the latch ordering level |
904 | @param[in] id Latch id to lookup |
905 | @return the latch level */ |
906 | inline |
907 | latch_level_t |
908 | sync_latch_get_level(latch_id_t id) |
909 | { |
910 | const latch_meta_t& meta = sync_latch_get_meta(id); |
911 | |
912 | return(meta.get_level()); |
913 | } |
914 | |
915 | #ifdef UNIV_PFS_MUTEX |
916 | /** Get the latch PFS key from the latch ID |
917 | @param[in] id Latch ID |
918 | @return the PFS key */ |
919 | inline |
920 | mysql_pfs_key_t |
921 | sync_latch_get_pfs_key(latch_id_t id) |
922 | { |
923 | const latch_meta_t& meta = sync_latch_get_meta(id); |
924 | |
925 | return(meta.get_pfs_key()); |
926 | } |
927 | #endif |
928 | |
929 | /** String representation of the filename and line number where the |
930 | latch was created |
931 | @param[in] id Latch ID |
932 | @param[in] created Filename and line number where it was crated |
933 | @return the string representation */ |
934 | std::string |
935 | sync_mutex_to_string( |
936 | latch_id_t id, |
937 | const std::string& created); |
938 | |
939 | /** Get the latch name from a sync level |
940 | @param[in] level Latch level to lookup |
941 | @return 0 if not found. */ |
942 | const char* |
943 | sync_latch_get_name(latch_level_t level); |
944 | |
945 | /** Print the filename "basename" |
946 | @return the basename */ |
947 | const char* |
948 | sync_basename(const char* filename); |
949 | |
950 | /** Register a latch, called when it is created |
951 | @param[in] ptr Latch instance that was created |
952 | @param[in] filename Filename where it was created |
953 | @param[in] line Line number in filename */ |
954 | void |
955 | sync_file_created_register( |
956 | const void* ptr, |
957 | const char* filename, |
958 | uint16_t line); |
959 | |
960 | /** Deregister a latch, called when it is destroyed |
961 | @param[in] ptr Latch to be destroyed */ |
962 | void |
963 | sync_file_created_deregister(const void* ptr); |
964 | |
965 | /** Get the string where the file was created. Its format is "name:line" |
966 | @param[in] ptr Latch instance |
967 | @return created information or "" if can't be found */ |
968 | std::string |
969 | sync_file_created_get(const void* ptr); |
970 | |
971 | #ifdef UNIV_DEBUG |
972 | |
973 | /** All (ordered) latches, used in debugging, must derive from this class. */ |
974 | struct latch_t { |
975 | |
976 | /** Constructor |
977 | @param[in] id The latch ID */ |
978 | explicit latch_t(latch_id_t id = LATCH_ID_NONE) |
979 | UNIV_NOTHROW |
980 | : |
981 | m_id(id), |
982 | m_rw_lock(), |
983 | m_temp_fsp() { } |
984 | |
985 | /** Destructor */ |
986 | virtual ~latch_t() UNIV_NOTHROW { } |
987 | |
988 | /** @return the latch ID */ |
989 | latch_id_t get_id() const |
990 | { |
991 | return(m_id); |
992 | } |
993 | |
994 | /** @return true if it is a rw-lock */ |
995 | bool is_rw_lock() const |
996 | UNIV_NOTHROW |
997 | { |
998 | return(m_rw_lock); |
999 | } |
1000 | |
1001 | /** Print the latch context |
1002 | @return the string representation */ |
1003 | virtual std::string to_string() const = 0; |
1004 | |
1005 | /** @return "filename:line" from where the latch was last locked */ |
1006 | virtual std::string locked_from() const = 0; |
1007 | |
1008 | /** @return the latch level */ |
1009 | latch_level_t get_level() const |
1010 | UNIV_NOTHROW |
1011 | { |
1012 | ut_a(m_id != LATCH_ID_NONE); |
1013 | |
1014 | return(sync_latch_get_level(m_id)); |
1015 | } |
1016 | |
1017 | /** @return true if the latch is for a temporary file space*/ |
1018 | bool is_temp_fsp() const |
1019 | UNIV_NOTHROW |
1020 | { |
1021 | return(m_temp_fsp); |
1022 | } |
1023 | |
1024 | /** Set the temporary tablespace flag. (For internal temporary |
1025 | tables, MySQL 5.7 does not always acquire the index->lock. We |
1026 | need to figure out the context and add some special rules |
1027 | during the checks.) */ |
1028 | void set_temp_fsp() |
1029 | UNIV_NOTHROW |
1030 | { |
1031 | ut_ad(get_id() == LATCH_ID_FIL_SPACE); |
1032 | m_temp_fsp = true; |
1033 | } |
1034 | |
1035 | /** @return the latch name, m_id must be set */ |
1036 | const char* get_name() const |
1037 | UNIV_NOTHROW |
1038 | { |
1039 | ut_a(m_id != LATCH_ID_NONE); |
1040 | |
1041 | return(sync_latch_get_name(m_id)); |
1042 | } |
1043 | |
1044 | /** Latch ID */ |
1045 | latch_id_t m_id; |
1046 | |
1047 | /** true if it is a rw-lock. In debug mode, rw_lock_t derives from |
1048 | this class and sets this variable. */ |
1049 | bool m_rw_lock; |
1050 | |
1051 | /** true if it is an temporary space latch */ |
1052 | bool m_temp_fsp; |
1053 | }; |
1054 | |
1055 | /** Subclass this to iterate over a thread's acquired latch levels. */ |
1056 | struct sync_check_functor_t { |
1057 | virtual ~sync_check_functor_t() { } |
1058 | virtual bool operator()(const latch_level_t) const = 0; |
1059 | }; |
1060 | |
1061 | /** Check that no latch is being held. |
1062 | @tparam some_allowed whether some latches are allowed to be held */ |
1063 | template<bool some_allowed = false> |
1064 | struct sync_checker : public sync_check_functor_t |
1065 | { |
1066 | /** Check the latching constraints |
1067 | @param[in] level The level held by the thread |
1068 | @return whether a latch violation was detected */ |
1069 | bool operator()(const latch_level_t level) const |
1070 | { |
1071 | if (some_allowed) { |
1072 | switch (level) { |
1073 | case SYNC_RECV_WRITER: |
1074 | /* This only happens in |
1075 | recv_apply_hashed_log_recs. */ |
1076 | case SYNC_DICT: |
1077 | case SYNC_DICT_OPERATION: |
1078 | case SYNC_FTS_CACHE: |
1079 | case SYNC_NO_ORDER_CHECK: |
1080 | return(false); |
1081 | default: |
1082 | return(true); |
1083 | } |
1084 | } |
1085 | |
1086 | return(true); |
1087 | } |
1088 | }; |
1089 | |
1090 | /** The strict latch checker (no InnoDB latches may be held) */ |
1091 | typedef struct sync_checker<false> sync_check; |
1092 | /** The sloppy latch checker (can hold InnoDB dictionary or SQL latches) */ |
1093 | typedef struct sync_checker<true> dict_sync_check; |
1094 | |
1095 | /** Functor to check for given latching constraints. */ |
1096 | struct sync_allowed_latches : public sync_check_functor_t { |
1097 | |
1098 | /** Constructor |
1099 | @param[in] from first element in an array of latch_level_t |
1100 | @param[in] to last element in an array of latch_level_t */ |
1101 | sync_allowed_latches( |
1102 | const latch_level_t* from, |
1103 | const latch_level_t* to) |
1104 | : begin(from), end(to) { } |
1105 | |
1106 | /** Checks whether the given latch_t violates the latch constraint. |
1107 | This object maintains a list of allowed latch levels, and if the given |
1108 | latch belongs to a latch level that is not there in the allowed list, |
1109 | then it is a violation. |
1110 | |
1111 | @param[in] latch The latch level to check |
1112 | @return true if there is a latch violation */ |
1113 | bool operator()(const latch_level_t level) const |
1114 | { |
1115 | return(std::find(begin, end, level) == end); |
1116 | } |
1117 | |
1118 | private: |
1119 | /** First element in an array of allowed latch levels */ |
1120 | const latch_level_t* const begin; |
1121 | /** First element after the end of the array of allowed latch levels */ |
1122 | const latch_level_t* const end; |
1123 | }; |
1124 | |
1125 | /** Get the latch id from a latch name. |
1126 | @param[in] id Latch name |
1127 | @return LATCH_ID_NONE. */ |
1128 | latch_id_t |
1129 | sync_latch_get_id(const char* name); |
1130 | |
1131 | typedef ulint rw_lock_flags_t; |
1132 | |
1133 | /* Flags to specify lock types for rw_lock_own_flagged() */ |
1134 | enum rw_lock_flag_t { |
1135 | RW_LOCK_FLAG_S = 1 << 0, |
1136 | RW_LOCK_FLAG_X = 1 << 1, |
1137 | RW_LOCK_FLAG_SX = 1 << 2 |
1138 | }; |
1139 | |
1140 | #endif /* UNIV_DBEUG */ |
1141 | |
1142 | #endif /* UNIV_INNOCHECKSUM */ |
1143 | |
1144 | static inline ulint my_atomic_addlint(ulint *A, ulint B) |
1145 | { |
1146 | #ifdef _WIN64 |
1147 | return ulint(my_atomic_add64((volatile int64*)A, B)); |
1148 | #else |
1149 | return ulint(my_atomic_addlong(A, B)); |
1150 | #endif |
1151 | } |
1152 | |
1153 | static inline ulint my_atomic_loadlint(const ulint *A) |
1154 | { |
1155 | #ifdef _WIN64 |
1156 | return ulint(my_atomic_load64((volatile int64*)A)); |
1157 | #else |
1158 | return ulint(my_atomic_loadlong(A)); |
1159 | #endif |
1160 | } |
1161 | |
1162 | static inline lint my_atomic_addlint(volatile lint *A, lint B) |
1163 | { |
1164 | #ifdef _WIN64 |
1165 | return my_atomic_add64((volatile int64*)A, B); |
1166 | #else |
1167 | return my_atomic_addlong(A, B); |
1168 | #endif |
1169 | } |
1170 | |
1171 | static inline lint my_atomic_loadlint(const lint *A) |
1172 | { |
1173 | #ifdef _WIN64 |
1174 | return lint(my_atomic_load64((volatile int64*)A)); |
1175 | #else |
1176 | return my_atomic_loadlong(A); |
1177 | #endif |
1178 | } |
1179 | |
1180 | static inline void my_atomic_storelint(ulint *A, ulint B) |
1181 | { |
1182 | #ifdef _WIN64 |
1183 | my_atomic_store64((volatile int64*)A, B); |
1184 | #else |
1185 | my_atomic_storelong(A, B); |
1186 | #endif |
1187 | } |
1188 | |
1189 | /** Simple non-atomic counter aligned to CACHE_LINE_SIZE |
1190 | @tparam Type the integer type of the counter */ |
1191 | template <typename Type> |
1192 | struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_counter |
1193 | { |
1194 | /** Increment the counter */ |
1195 | Type inc() { return add(1); } |
1196 | /** Decrement the counter */ |
1197 | Type dec() { return add(Type(~0)); } |
1198 | |
1199 | /** Add to the counter |
1200 | @param[in] i amount to be added |
1201 | @return the value of the counter after adding */ |
1202 | Type add(Type i) { return m_counter += i; } |
1203 | |
1204 | /** @return the value of the counter */ |
1205 | operator Type() const { return m_counter; } |
1206 | |
1207 | private: |
1208 | /** The counter */ |
1209 | Type m_counter; |
1210 | }; |
1211 | |
1212 | /** Simple atomic counter aligned to CACHE_LINE_SIZE |
1213 | @tparam Type lint or ulint */ |
1214 | template <typename Type = ulint> |
1215 | struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_atomic_counter |
1216 | { |
1217 | /** Increment the counter */ |
1218 | Type inc() { return add(1); } |
1219 | /** Decrement the counter */ |
1220 | Type dec() { return add(Type(~0)); } |
1221 | |
1222 | /** Add to the counter |
1223 | @param[in] i amount to be added |
1224 | @return the value of the counter before adding */ |
1225 | Type add(Type i) { return my_atomic_addlint(&m_counter, i); } |
1226 | |
1227 | /** @return the value of the counter (non-atomic access)! */ |
1228 | operator Type() const { return m_counter; } |
1229 | |
1230 | private: |
1231 | /** The counter */ |
1232 | Type m_counter; |
1233 | }; |
1234 | |
1235 | #endif /* sync0types_h */ |
1236 | |