1/*****************************************************************************
2
3Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2017, 2018, MariaDB Corporation.
5
6Portions of this file contain modifications contributed and copyrighted by
7Google, Inc. Those modifications are gratefully acknowledged and are described
8briefly in the InnoDB documentation. The contributions by Google are
9incorporated with their permission, and subject to the conditions contained in
10the file COPYING.Google.
11
12This program is free software; you can redistribute it and/or modify it under
13the terms of the GNU General Public License as published by the Free Software
14Foundation; version 2 of the License.
15
16This program is distributed in the hope that it will be useful, but WITHOUT
17ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
19
20You should have received a copy of the GNU General Public License along with
21this program; if not, write to the Free Software Foundation, Inc.,
2251 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
23
24*****************************************************************************/
25
26/**************************************************//**
27@file sync/sync0debug.cc
28Debug checks for latches.
29
30Created 2012-08-21 Sunny Bains
31*******************************************************/
32
33#include "sync0sync.h"
34#include "sync0debug.h"
35
36#include "ut0new.h"
37#include "srv0start.h"
38
39#include <map>
40#include <vector>
41#include <string>
42#include <algorithm>
43#include <iostream>
44
45#ifdef UNIV_DEBUG
46
47my_bool srv_sync_debug;
48
49/** The global mutex which protects debug info lists of all rw-locks.
50To modify the debug info list of an rw-lock, this mutex has to be
51acquired in addition to the mutex protecting the lock. */
52static SysMutex rw_lock_debug_mutex;
53
54/** The latch held by a thread */
55struct Latched {
56
57 /** Constructor */
58 Latched() : m_latch(), m_level(SYNC_UNKNOWN) { }
59
60 /** Constructor
61 @param[in] latch Latch instance
62 @param[in] level Level of latch held */
63 Latched(const latch_t* latch,
64 latch_level_t level)
65 :
66 m_latch(latch),
67 m_level(level)
68 {
69 /* No op */
70 }
71
72 /** @return the latch level */
73 latch_level_t get_level() const
74 {
75 return(m_level);
76 }
77
78 /** Check if the rhs latch and level match
79 @param[in] rhs instance to compare with
80 @return true on match */
81 bool operator==(const Latched& rhs) const
82 {
83 return(m_latch == rhs.m_latch && m_level == rhs.m_level);
84 }
85
86 /** The latch instance */
87 const latch_t* m_latch;
88
89 /** The latch level. For buffer blocks we can pass a separate latch
90 level to check against, see buf_block_dbg_add_level() */
91 latch_level_t m_level;
92};
93
94/** Thread specific latches. This is ordered on level in descending order. */
95typedef std::vector<Latched, ut_allocator<Latched> > Latches;
96
97/** The deadlock detector. */
98struct LatchDebug {
99
100 /** Debug mutex for control structures, should not be tracked
101 by this module. */
102 typedef OSMutex Mutex;
103
104 /** Comparator for the ThreadMap. */
105 struct os_thread_id_less
106 : public std::binary_function<
107 os_thread_id_t,
108 os_thread_id_t,
109 bool>
110 {
111 /** @return true if lhs < rhs */
112 bool operator()(
113 const os_thread_id_t& lhs,
114 const os_thread_id_t& rhs) const
115 UNIV_NOTHROW
116 {
117 return(os_thread_pf(lhs) < os_thread_pf(rhs));
118 }
119 };
120
121 /** For tracking a thread's latches. */
122 typedef std::map<
123 os_thread_id_t,
124 Latches*,
125 os_thread_id_less,
126 ut_allocator<std::pair<const os_thread_id_t, Latches*> > >
127 ThreadMap;
128
129 /** Constructor */
130 LatchDebug()
131 UNIV_NOTHROW;
132
133 /** Destructor */
134 ~LatchDebug()
135 UNIV_NOTHROW
136 {
137 m_mutex.destroy();
138 }
139
140 /** Create a new instance if one doesn't exist else return
141 the existing one.
142 @param[in] add add an empty entry if one is not
143 found (default no)
144 @return pointer to a thread's acquired latches. */
145 Latches* thread_latches(bool add = false)
146 UNIV_NOTHROW;
147
148 /** Check that all the latches already owned by a thread have a lower
149 level than limit.
150 @param[in] latches the thread's existing (acquired) latches
151 @param[in] limit to check against
152 @return latched if there is one with a level <= limit . */
153 const Latched* less(
154 const Latches* latches,
155 latch_level_t limit) const
156 UNIV_NOTHROW;
157
158 /** Checks if the level value exists in the thread's acquired latches.
159 @param[in] latches the thread's existing (acquired) latches
160 @param[in] level to lookup
161 @return latch if found or 0 */
162 const latch_t* find(
163 const Latches* Latches,
164 latch_level_t level) const
165 UNIV_NOTHROW;
166
167 /**
168 Checks if the level value exists in the thread's acquired latches.
169 @param[in] level to lookup
170 @return latch if found or 0 */
171 const latch_t* find(latch_level_t level)
172 UNIV_NOTHROW;
173
174 /** Report error and abort.
175 @param[in] latches thread's existing latches
176 @param[in] latched The existing latch causing the
177 invariant to fail
178 @param[in] level The new level request that breaks
179 the order */
180 void crash(
181 const Latches* latches,
182 const Latched* latched,
183 latch_level_t level) const
184 UNIV_NOTHROW;
185
186 /** Do a basic ordering check.
187 @param[in] latches thread's existing latches
188 @param[in] requested_level Level requested by latch
189 @param[in] level declared ulint so that we can
190 do level - 1. The level of the
191 latch that the thread is trying
192 to acquire
193 @return true if passes, else crash with error message. */
194 inline bool basic_check(
195 const Latches* latches,
196 latch_level_t requested_level,
197 lint level) const
198 UNIV_NOTHROW;
199
200 /** Adds a latch and its level in the thread level array. Allocates
201 the memory for the array if called for the first time for this
202 OS thread. Makes the checks against other latch levels stored
203 in the array for this thread.
204
205 @param[in] latch latch that the thread wants to acqire.
206 @param[in] level latch level to check against */
207 void lock_validate(
208 const latch_t* latch,
209 latch_level_t level)
210 UNIV_NOTHROW
211 {
212 /* Ignore diagnostic latches, starting with '.' */
213
214 if (*latch->get_name() != '.'
215 && latch->get_level() != SYNC_LEVEL_VARYING) {
216
217 ut_ad(level != SYNC_LEVEL_VARYING);
218
219 Latches* latches = check_order(latch, level);
220
221 ut_a(latches->empty()
222 || level == SYNC_LEVEL_VARYING
223 || level == SYNC_NO_ORDER_CHECK
224 || latches->back().get_level()
225 == SYNC_NO_ORDER_CHECK
226 || latches->back().m_latch->get_level()
227 == SYNC_LEVEL_VARYING
228 || latches->back().get_level() >= level);
229 }
230 }
231
232 /** Adds a latch and its level in the thread level array. Allocates
233 the memory for the array if called for the first time for this
234 OS thread. Makes the checks against other latch levels stored
235 in the array for this thread.
236
237 @param[in] latch latch that the thread wants to acqire.
238 @param[in] level latch level to check against */
239 void lock_granted(
240 const latch_t* latch,
241 latch_level_t level)
242 UNIV_NOTHROW
243 {
244 /* Ignore diagnostic latches, starting with '.' */
245
246 if (*latch->get_name() != '.'
247 && latch->get_level() != SYNC_LEVEL_VARYING) {
248
249 Latches* latches = thread_latches(true);
250
251 latches->push_back(Latched(latch, level));
252 }
253 }
254
255 /** For recursive X rw-locks.
256 @param[in] latch The RW-Lock to relock */
257 void relock(const latch_t* latch)
258 UNIV_NOTHROW
259 {
260 ut_a(latch->m_rw_lock);
261
262 latch_level_t level = latch->get_level();
263
264 /* Ignore diagnostic latches, starting with '.' */
265
266 if (*latch->get_name() != '.'
267 && latch->get_level() != SYNC_LEVEL_VARYING) {
268
269 Latches* latches = thread_latches(true);
270
271 Latches::iterator it = std::find(
272 latches->begin(), latches->end(),
273 Latched(latch, level));
274
275 ut_a(latches->empty()
276 || level == SYNC_LEVEL_VARYING
277 || level == SYNC_NO_ORDER_CHECK
278 || latches->back().m_latch->get_level()
279 == SYNC_LEVEL_VARYING
280 || latches->back().m_latch->get_level()
281 == SYNC_NO_ORDER_CHECK
282 || latches->back().get_level() >= level
283 || it != latches->end());
284
285 if (it == latches->end()) {
286 latches->push_back(Latched(latch, level));
287 } else {
288 latches->insert(it, Latched(latch, level));
289 }
290 }
291 }
292
293 /** Iterate over a thread's latches.
294 @param[in] functor The callback
295 @return true if the functor returns true. */
296 bool for_each(const sync_check_functor_t& functor)
297 UNIV_NOTHROW
298 {
299 if (const Latches* latches = thread_latches()) {
300 Latches::const_iterator end = latches->end();
301 for (Latches::const_iterator it = latches->begin();
302 it != end; ++it) {
303
304 if (functor(it->m_level)) {
305 return(true);
306 }
307 }
308 }
309
310 return(false);
311 }
312
313 /** Removes a latch from the thread level array if it is found there.
314 @param[in] latch The latch that was released
315 @return true if found in the array; it is not an error if the latch is
316 not found, as we presently are not able to determine the level for
317 every latch reservation the program does */
318 void unlock(const latch_t* latch) UNIV_NOTHROW;
319
320 /** Get the level name
321 @param[in] level The level ID to lookup
322 @return level name */
323 const std::string& get_level_name(latch_level_t level) const
324 UNIV_NOTHROW
325 {
326 Levels::const_iterator it = m_levels.find(level);
327
328 ut_ad(it != m_levels.end());
329
330 return(it->second);
331 }
332
333 /** Initialise the debug data structures */
334 static void init()
335 UNIV_NOTHROW;
336
337 /** Shutdown the latch debug checking */
338 static void shutdown()
339 UNIV_NOTHROW;
340
341 /** @return the singleton instance */
342 static LatchDebug* instance()
343 UNIV_NOTHROW
344 {
345 return(s_instance);
346 }
347
348 /** Create the singleton instance */
349 static void create_instance()
350 UNIV_NOTHROW
351 {
352 ut_ad(s_instance == NULL);
353
354 s_instance = UT_NEW_NOKEY(LatchDebug());
355 }
356
357private:
358 /** Disable copying */
359 LatchDebug(const LatchDebug&);
360 LatchDebug& operator=(const LatchDebug&);
361
362 /** Adds a latch and its level in the thread level array. Allocates
363 the memory for the array if called first time for this OS thread.
364 Makes the checks against other latch levels stored in the array
365 for this thread.
366
367 @param[in] latch pointer to a mutex or an rw-lock
368 @param[in] level level in the latching order
369 @return the thread's latches */
370 Latches* check_order(
371 const latch_t* latch,
372 latch_level_t level)
373 UNIV_NOTHROW;
374
375 /** Print the latches acquired by a thread
376 @param[in] latches Latches acquired by a thread */
377 void print_latches(const Latches* latches) const
378 UNIV_NOTHROW;
379
380 /** Special handling for the RTR mutexes. We need to add proper
381 levels for them if possible.
382 @param[in] latch Latch to check
383 @return true if it is a an _RTR_ mutex */
384 bool is_rtr_mutex(const latch_t* latch) const
385 UNIV_NOTHROW
386 {
387 return(latch->get_id() == LATCH_ID_RTR_ACTIVE_MUTEX
388 || latch->get_id() == LATCH_ID_RTR_PATH_MUTEX
389 || latch->get_id() == LATCH_ID_RTR_MATCH_MUTEX
390 || latch->get_id() == LATCH_ID_RTR_SSN_MUTEX);
391 }
392
393private:
394 /** Comparator for the Levels . */
395 struct latch_level_less
396 : public std::binary_function<
397 latch_level_t,
398 latch_level_t,
399 bool>
400 {
401 /** @return true if lhs < rhs */
402 bool operator()(
403 const latch_level_t& lhs,
404 const latch_level_t& rhs) const
405 UNIV_NOTHROW
406 {
407 return(lhs < rhs);
408 }
409 };
410
411 typedef std::map<
412 latch_level_t,
413 std::string,
414 latch_level_less,
415 ut_allocator<std::pair<const latch_level_t, std::string> > >
416 Levels;
417
418 /** Mutex protecting the deadlock detector data structures. */
419 Mutex m_mutex;
420
421 /** Thread specific data. Protected by m_mutex. */
422 ThreadMap m_threads;
423
424 /** Mapping from latche level to its string representation. */
425 Levels m_levels;
426
427 /** The singleton instance. Must be created in single threaded mode. */
428 static LatchDebug* s_instance;
429
430public:
431 /** For checking whether this module has been initialised or not. */
432 static bool s_initialized;
433};
434
435/** The latch order checking infra-structure */
436LatchDebug* LatchDebug::s_instance = NULL;
437bool LatchDebug::s_initialized = false;
438
439#define LEVEL_MAP_INSERT(T) \
440do { \
441 std::pair<Levels::iterator, bool> result = \
442 m_levels.insert(Levels::value_type(T, #T)); \
443 ut_ad(result.second); \
444} while(0)
445
446/** Setup the mapping from level ID to level name mapping */
447LatchDebug::LatchDebug()
448{
449 m_mutex.init();
450
451 LEVEL_MAP_INSERT(SYNC_UNKNOWN);
452 LEVEL_MAP_INSERT(SYNC_MUTEX);
453 LEVEL_MAP_INSERT(RW_LOCK_SX);
454 LEVEL_MAP_INSERT(RW_LOCK_X_WAIT);
455 LEVEL_MAP_INSERT(RW_LOCK_S);
456 LEVEL_MAP_INSERT(RW_LOCK_X);
457 LEVEL_MAP_INSERT(RW_LOCK_NOT_LOCKED);
458 LEVEL_MAP_INSERT(SYNC_MONITOR_MUTEX);
459 LEVEL_MAP_INSERT(SYNC_ANY_LATCH);
460 LEVEL_MAP_INSERT(SYNC_DOUBLEWRITE);
461 LEVEL_MAP_INSERT(SYNC_BUF_FLUSH_LIST);
462 LEVEL_MAP_INSERT(SYNC_BUF_BLOCK);
463 LEVEL_MAP_INSERT(SYNC_BUF_PAGE_HASH);
464 LEVEL_MAP_INSERT(SYNC_BUF_POOL);
465 LEVEL_MAP_INSERT(SYNC_POOL);
466 LEVEL_MAP_INSERT(SYNC_POOL_MANAGER);
467 LEVEL_MAP_INSERT(SYNC_SEARCH_SYS);
468 LEVEL_MAP_INSERT(SYNC_WORK_QUEUE);
469 LEVEL_MAP_INSERT(SYNC_FTS_TOKENIZE);
470 LEVEL_MAP_INSERT(SYNC_FTS_OPTIMIZE);
471 LEVEL_MAP_INSERT(SYNC_FTS_BG_THREADS);
472 LEVEL_MAP_INSERT(SYNC_FTS_CACHE_INIT);
473 LEVEL_MAP_INSERT(SYNC_RECV);
474 LEVEL_MAP_INSERT(SYNC_LOG_FLUSH_ORDER);
475 LEVEL_MAP_INSERT(SYNC_LOG);
476 LEVEL_MAP_INSERT(SYNC_LOG_WRITE);
477 LEVEL_MAP_INSERT(SYNC_PAGE_CLEANER);
478 LEVEL_MAP_INSERT(SYNC_PURGE_QUEUE);
479 LEVEL_MAP_INSERT(SYNC_TRX_SYS_HEADER);
480 LEVEL_MAP_INSERT(SYNC_REC_LOCK);
481 LEVEL_MAP_INSERT(SYNC_THREADS);
482 LEVEL_MAP_INSERT(SYNC_TRX);
483 LEVEL_MAP_INSERT(SYNC_RW_TRX_HASH_ELEMENT);
484 LEVEL_MAP_INSERT(SYNC_TRX_SYS);
485 LEVEL_MAP_INSERT(SYNC_LOCK_SYS);
486 LEVEL_MAP_INSERT(SYNC_LOCK_WAIT_SYS);
487 LEVEL_MAP_INSERT(SYNC_INDEX_ONLINE_LOG);
488 LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP);
489 LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP_MUTEX);
490 LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE);
491 LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE_NEW);
492 LEVEL_MAP_INSERT(SYNC_IBUF_INDEX_TREE);
493 LEVEL_MAP_INSERT(SYNC_IBUF_MUTEX);
494 LEVEL_MAP_INSERT(SYNC_FSP_PAGE);
495 LEVEL_MAP_INSERT(SYNC_FSP);
496 LEVEL_MAP_INSERT(SYNC_EXTERN_STORAGE);
497 LEVEL_MAP_INSERT(SYNC_TRX_UNDO_PAGE);
498 LEVEL_MAP_INSERT(SYNC_RSEG_HEADER);
499 LEVEL_MAP_INSERT(SYNC_RSEG_HEADER_NEW);
500 LEVEL_MAP_INSERT(SYNC_NOREDO_RSEG);
501 LEVEL_MAP_INSERT(SYNC_REDO_RSEG);
502 LEVEL_MAP_INSERT(SYNC_PURGE_LATCH);
503 LEVEL_MAP_INSERT(SYNC_TREE_NODE);
504 LEVEL_MAP_INSERT(SYNC_TREE_NODE_FROM_HASH);
505 LEVEL_MAP_INSERT(SYNC_TREE_NODE_NEW);
506 LEVEL_MAP_INSERT(SYNC_INDEX_TREE);
507 LEVEL_MAP_INSERT(SYNC_IBUF_PESS_INSERT_MUTEX);
508 LEVEL_MAP_INSERT(SYNC_IBUF_HEADER);
509 LEVEL_MAP_INSERT(SYNC_DICT_HEADER);
510 LEVEL_MAP_INSERT(SYNC_STATS_AUTO_RECALC);
511 LEVEL_MAP_INSERT(SYNC_DICT_AUTOINC_MUTEX);
512 LEVEL_MAP_INSERT(SYNC_DICT);
513 LEVEL_MAP_INSERT(SYNC_FTS_CACHE);
514 LEVEL_MAP_INSERT(SYNC_DICT_OPERATION);
515 LEVEL_MAP_INSERT(SYNC_TRX_I_S_LAST_READ);
516 LEVEL_MAP_INSERT(SYNC_TRX_I_S_RWLOCK);
517 LEVEL_MAP_INSERT(SYNC_RECV_WRITER);
518 LEVEL_MAP_INSERT(SYNC_LEVEL_VARYING);
519 LEVEL_MAP_INSERT(SYNC_NO_ORDER_CHECK);
520
521 /* Enum count starts from 0 */
522 ut_ad(m_levels.size() == SYNC_LEVEL_MAX + 1);
523}
524
525/** Print the latches acquired by a thread
526@param[in] latches Latches acquired by a thread */
527void
528LatchDebug::print_latches(const Latches* latches) const
529 UNIV_NOTHROW
530{
531 ib::error() << "Latches already owned by this thread: ";
532
533 Latches::const_iterator end = latches->end();
534
535 for (Latches::const_iterator it = latches->begin();
536 it != end;
537 ++it) {
538
539 ib::error()
540 << sync_latch_get_name(it->m_latch->get_id())
541 << " -> "
542 << it->m_level << " "
543 << "(" << get_level_name(it->m_level) << ")";
544 }
545}
546
547/** Report error and abort
548@param[in] latches thread's existing latches
549@param[in] latched The existing latch causing the invariant to fail
550@param[in] level The new level request that breaks the order */
551void
552LatchDebug::crash(
553 const Latches* latches,
554 const Latched* latched,
555 latch_level_t level) const
556 UNIV_NOTHROW
557{
558 const latch_t* latch = latched->m_latch;
559 const std::string& in_level_name = get_level_name(level);
560
561 const std::string& latch_level_name =
562 get_level_name(latched->m_level);
563
564 ib::error()
565 << "Thread " << os_thread_pf(os_thread_get_curr_id())
566 << " already owns a latch "
567 << sync_latch_get_name(latch->m_id) << " at level"
568 << " " << latched->m_level << " (" << latch_level_name
569 << " ), which is at a lower/same level than the"
570 << " requested latch: "
571 << level << " (" << in_level_name << "). "
572 << latch->to_string();
573
574 print_latches(latches);
575
576 ut_error;
577}
578
579/** Check that all the latches already owned by a thread have a lower
580level than limit.
581@param[in] latches the thread's existing (acquired) latches
582@param[in] limit to check against
583@return latched info if there is one with a level <= limit . */
584const Latched*
585LatchDebug::less(
586 const Latches* latches,
587 latch_level_t limit) const
588 UNIV_NOTHROW
589{
590 Latches::const_iterator end = latches->end();
591
592 for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
593
594 if (it->m_level <= limit) {
595 return(&(*it));
596 }
597 }
598
599 return(NULL);
600}
601
602/** Do a basic ordering check.
603@param[in] latches thread's existing latches
604@param[in] requested_level Level requested by latch
605@param[in] in_level declared ulint so that we can do level - 1.
606 The level of the latch that the thread is
607 trying to acquire
608@return true if passes, else crash with error message. */
609inline bool
610LatchDebug::basic_check(
611 const Latches* latches,
612 latch_level_t requested_level,
613 lint in_level) const
614 UNIV_NOTHROW
615{
616 latch_level_t level = latch_level_t(in_level);
617
618 ut_ad(level < SYNC_LEVEL_MAX);
619
620 const Latched* latched = less(latches, level);
621
622 if (latched != NULL) {
623 crash(latches, latched, requested_level);
624 return(false);
625 }
626
627 return(true);
628}
629
630/** Create a new instance if one doesn't exist else return the existing one.
631@param[in] add add an empty entry if one is not found
632 (default no)
633@return pointer to a thread's acquired latches. */
634Latches*
635LatchDebug::thread_latches(bool add)
636 UNIV_NOTHROW
637{
638 m_mutex.enter();
639
640 os_thread_id_t thread_id = os_thread_get_curr_id();
641 ThreadMap::iterator lb = m_threads.lower_bound(thread_id);
642
643 if (lb != m_threads.end()
644 && !(m_threads.key_comp()(thread_id, lb->first))) {
645
646 Latches* latches = lb->second;
647
648 m_mutex.exit();
649
650 return(latches);
651
652 } else if (!add) {
653
654 m_mutex.exit();
655
656 return(NULL);
657
658 } else {
659 typedef ThreadMap::value_type value_type;
660
661 Latches* latches = UT_NEW_NOKEY(Latches());
662
663 ut_a(latches != NULL);
664
665 latches->reserve(32);
666
667 m_threads.insert(lb, value_type(thread_id, latches));
668
669 m_mutex.exit();
670
671 return(latches);
672 }
673}
674
675/** Checks if the level value exists in the thread's acquired latches.
676@param[in] levels the thread's existing (acquired) latches
677@param[in] level to lookup
678@return latch if found or 0 */
679const latch_t*
680LatchDebug::find(
681 const Latches* latches,
682 latch_level_t level) const UNIV_NOTHROW
683{
684 Latches::const_iterator end = latches->end();
685
686 for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
687
688 if (it->m_level == level) {
689
690 return(it->m_latch);
691 }
692 }
693
694 return(0);
695}
696
697/** Checks if the level value exists in the thread's acquired latches.
698@param[in] level The level to lookup
699@return latch if found or NULL */
700const latch_t*
701LatchDebug::find(latch_level_t level)
702 UNIV_NOTHROW
703{
704 return(find(thread_latches(), level));
705}
706
707/**
708Adds a latch and its level in the thread level array. Allocates the memory
709for the array if called first time for this OS thread. Makes the checks
710against other latch levels stored in the array for this thread.
711@param[in] latch pointer to a mutex or an rw-lock
712@param[in] level level in the latching order
713@return the thread's latches */
714Latches*
715LatchDebug::check_order(
716 const latch_t* latch,
717 latch_level_t level)
718 UNIV_NOTHROW
719{
720 ut_ad(latch->get_level() != SYNC_LEVEL_VARYING);
721
722 Latches* latches = thread_latches(true);
723
724 /* NOTE that there is a problem with _NODE and _LEAF levels: if the
725 B-tree height changes, then a leaf can change to an internal node
726 or the other way around. We do not know at present if this can cause
727 unnecessary assertion failures below. */
728
729 switch (level) {
730 case SYNC_NO_ORDER_CHECK:
731 case SYNC_EXTERN_STORAGE:
732 case SYNC_TREE_NODE_FROM_HASH:
733 /* Do no order checking */
734 break;
735
736 case SYNC_TRX_SYS_HEADER:
737
738 if (srv_is_being_started) {
739 /* This is violated during trx_sys_create_rsegs()
740 when creating additional rollback segments when
741 upgrading in srv_start(). */
742 break;
743 }
744
745 /* Fall through */
746
747 case SYNC_MONITOR_MUTEX:
748 case SYNC_RECV:
749 case SYNC_FTS_BG_THREADS:
750 case SYNC_WORK_QUEUE:
751 case SYNC_FTS_TOKENIZE:
752 case SYNC_FTS_OPTIMIZE:
753 case SYNC_FTS_CACHE:
754 case SYNC_FTS_CACHE_INIT:
755 case SYNC_PAGE_CLEANER:
756 case SYNC_LOG:
757 case SYNC_LOG_WRITE:
758 case SYNC_LOG_FLUSH_ORDER:
759 case SYNC_DOUBLEWRITE:
760 case SYNC_SEARCH_SYS:
761 case SYNC_THREADS:
762 case SYNC_LOCK_SYS:
763 case SYNC_LOCK_WAIT_SYS:
764 case SYNC_RW_TRX_HASH_ELEMENT:
765 case SYNC_TRX_SYS:
766 case SYNC_IBUF_BITMAP_MUTEX:
767 case SYNC_REDO_RSEG:
768 case SYNC_NOREDO_RSEG:
769 case SYNC_PURGE_LATCH:
770 case SYNC_PURGE_QUEUE:
771 case SYNC_DICT_AUTOINC_MUTEX:
772 case SYNC_DICT_OPERATION:
773 case SYNC_DICT_HEADER:
774 case SYNC_TRX_I_S_RWLOCK:
775 case SYNC_TRX_I_S_LAST_READ:
776 case SYNC_IBUF_MUTEX:
777 case SYNC_INDEX_ONLINE_LOG:
778 case SYNC_STATS_AUTO_RECALC:
779 case SYNC_POOL:
780 case SYNC_POOL_MANAGER:
781 case SYNC_RECV_WRITER:
782
783 basic_check(latches, level, level);
784 break;
785
786 case SYNC_ANY_LATCH:
787
788 /* Temporary workaround for LATCH_ID_RTR_*_MUTEX */
789 if (is_rtr_mutex(latch)) {
790
791 const Latched* latched = less(latches, level);
792
793 if (latched == NULL
794 || (latched != NULL
795 && is_rtr_mutex(latched->m_latch))) {
796
797 /* No violation */
798 break;
799
800 }
801
802 crash(latches, latched, level);
803
804 } else {
805 basic_check(latches, level, level);
806 }
807
808 break;
809
810 case SYNC_TRX:
811
812 /* Either the thread must own the lock_sys.mutex, or
813 it is allowed to own only ONE trx_t::mutex. */
814
815 if (less(latches, level) != NULL) {
816 basic_check(latches, level, level - 1);
817 ut_a(find(latches, SYNC_LOCK_SYS) != 0);
818 }
819 break;
820
821 case SYNC_BUF_FLUSH_LIST:
822 case SYNC_BUF_POOL:
823
824 /* We can have multiple mutexes of this type therefore we
825 can only check whether the greater than condition holds. */
826
827 basic_check(latches, level, level - 1);
828 break;
829
830 case SYNC_BUF_PAGE_HASH:
831
832 /* Multiple page_hash locks are only allowed during
833 buf_validate and that is where buf_pool mutex is already
834 held. */
835
836 /* Fall through */
837
838 case SYNC_BUF_BLOCK:
839
840 /* Either the thread must own the (buffer pool) buf_pool->mutex
841 or it is allowed to latch only ONE of (buffer block)
842 block->mutex or buf_pool->zip_mutex. */
843
844 if (less(latches, level) != NULL) {
845 basic_check(latches, level, level - 1);
846 ut_a(find(latches, SYNC_BUF_POOL) != 0);
847 }
848 break;
849
850 case SYNC_REC_LOCK:
851
852 if (find(latches, SYNC_LOCK_SYS) != 0) {
853 basic_check(latches, level, SYNC_REC_LOCK - 1);
854 } else {
855 basic_check(latches, level, SYNC_REC_LOCK);
856 }
857 break;
858
859 case SYNC_IBUF_BITMAP:
860
861 /* Either the thread must own the master mutex to all
862 the bitmap pages, or it is allowed to latch only ONE
863 bitmap page. */
864
865 if (find(latches, SYNC_IBUF_BITMAP_MUTEX) != 0) {
866
867 basic_check(latches, level, SYNC_IBUF_BITMAP - 1);
868
869 } else if (!srv_is_being_started) {
870
871 /* This is violated during trx_sys_create_rsegs()
872 when creating additional rollback segments during
873 upgrade. */
874
875 basic_check(latches, level, SYNC_IBUF_BITMAP);
876 }
877 break;
878
879 case SYNC_FSP_PAGE:
880 ut_a(find(latches, SYNC_FSP) != 0);
881 break;
882
883 case SYNC_FSP:
884
885 ut_a(find(latches, SYNC_FSP) != 0
886 || basic_check(latches, level, SYNC_FSP));
887 break;
888
889 case SYNC_TRX_UNDO_PAGE:
890
891 /* Purge is allowed to read in as many UNDO pages as it likes.
892 The purge thread can read the UNDO pages without any covering
893 mutex. */
894
895 ut_a(find(latches, SYNC_REDO_RSEG) != 0
896 || find(latches, SYNC_NOREDO_RSEG) != 0
897 || basic_check(latches, level, level - 1));
898 break;
899
900 case SYNC_RSEG_HEADER:
901
902 ut_a(find(latches, SYNC_REDO_RSEG) != 0
903 || find(latches, SYNC_NOREDO_RSEG) != 0);
904 break;
905
906 case SYNC_RSEG_HEADER_NEW:
907
908 ut_a(find(latches, SYNC_FSP_PAGE) != 0);
909 break;
910
911 case SYNC_TREE_NODE:
912
913 {
914 const latch_t* fsp_latch;
915
916 fsp_latch = find(latches, SYNC_FSP);
917
918 ut_a((fsp_latch != NULL
919 && fsp_latch->is_temp_fsp())
920 || find(latches, SYNC_INDEX_TREE) != 0
921 || find(latches, SYNC_DICT_OPERATION)
922 || basic_check(latches,
923 level, SYNC_TREE_NODE - 1));
924 }
925
926 break;
927
928 case SYNC_TREE_NODE_NEW:
929
930 ut_a(find(latches, SYNC_FSP_PAGE) != 0);
931 break;
932
933 case SYNC_INDEX_TREE:
934
935 basic_check(latches, level, SYNC_TREE_NODE - 1);
936 break;
937
938 case SYNC_IBUF_TREE_NODE:
939
940 ut_a(find(latches, SYNC_IBUF_INDEX_TREE) != 0
941 || basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1));
942 break;
943
944 case SYNC_IBUF_TREE_NODE_NEW:
945
946 /* ibuf_add_free_page() allocates new pages for the change
947 buffer while only holding the tablespace x-latch. These
948 pre-allocated new pages may only be used while holding
949 ibuf_mutex, in btr_page_alloc_for_ibuf(). */
950
951 ut_a(find(latches, SYNC_IBUF_MUTEX) != 0
952 || find(latches, SYNC_FSP) != 0);
953 break;
954
955 case SYNC_IBUF_INDEX_TREE:
956
957 if (find(latches, SYNC_FSP) != 0) {
958 basic_check(latches, level, level - 1);
959 } else {
960 basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1);
961 }
962 break;
963
964 case SYNC_IBUF_PESS_INSERT_MUTEX:
965
966 basic_check(latches, level, SYNC_FSP - 1);
967 ut_a(find(latches, SYNC_IBUF_MUTEX) == 0);
968 break;
969
970 case SYNC_IBUF_HEADER:
971
972 basic_check(latches, level, SYNC_FSP - 1);
973 ut_a(find(latches, SYNC_IBUF_MUTEX) == NULL);
974 ut_a(find(latches, SYNC_IBUF_PESS_INSERT_MUTEX) == NULL);
975 break;
976
977 case SYNC_DICT:
978 basic_check(latches, level, SYNC_DICT);
979 break;
980
981 case SYNC_MUTEX:
982 case SYNC_UNKNOWN:
983 case SYNC_LEVEL_VARYING:
984 case RW_LOCK_X:
985 case RW_LOCK_X_WAIT:
986 case RW_LOCK_S:
987 case RW_LOCK_SX:
988 case RW_LOCK_NOT_LOCKED:
989 /* These levels should never be set for a latch. */
990 ut_error;
991 break;
992 }
993
994 return(latches);
995}
996
997/** Removes a latch from the thread level array if it is found there.
998@param[in] latch that was released/unlocked
999@param[in] level level of the latch
1000@return true if found in the array; it is not an error if the latch is
1001not found, as we presently are not able to determine the level for
1002every latch reservation the program does */
1003void
1004LatchDebug::unlock(const latch_t* latch)
1005 UNIV_NOTHROW
1006{
1007 if (latch->get_level() == SYNC_LEVEL_VARYING) {
1008 // We don't have varying level mutexes
1009 ut_ad(latch->m_rw_lock);
1010 }
1011
1012 Latches* latches;
1013
1014 if (*latch->get_name() == '.') {
1015
1016 /* Ignore diagnostic latches, starting with '.' */
1017
1018 } else if ((latches = thread_latches()) != NULL) {
1019
1020 Latches::reverse_iterator rend = latches->rend();
1021
1022 for (Latches::reverse_iterator it = latches->rbegin();
1023 it != rend;
1024 ++it) {
1025
1026 if (it->m_latch != latch) {
1027
1028 continue;
1029 }
1030
1031 Latches::iterator i = it.base();
1032
1033 latches->erase(--i);
1034
1035 /* If this thread doesn't own any more
1036 latches remove from the map.
1037
1038 FIXME: Perhaps use the master thread
1039 to do purge. Or, do it from close connection.
1040 This could be expensive. */
1041
1042 if (latches->empty()) {
1043
1044 m_mutex.enter();
1045
1046 os_thread_id_t thread_id;
1047
1048 thread_id = os_thread_get_curr_id();
1049
1050 m_threads.erase(thread_id);
1051
1052 m_mutex.exit();
1053
1054 UT_DELETE(latches);
1055 }
1056
1057 return;
1058 }
1059
1060 if (latch->get_level() != SYNC_LEVEL_VARYING) {
1061 ib::error()
1062 << "Couldn't find latch "
1063 << sync_latch_get_name(latch->get_id());
1064
1065 print_latches(latches);
1066
1067 /** Must find the latch. */
1068 ut_error;
1069 }
1070 }
1071}
1072
1073/** Get the latch id from a latch name.
1074@param[in] name Latch name
1075@return latch id if found else LATCH_ID_NONE. */
1076latch_id_t
1077sync_latch_get_id(const char* name)
1078{
1079 LatchMetaData::const_iterator end = latch_meta.end();
1080
1081 /* Linear scan should be OK, this should be extremely rare. */
1082
1083 for (LatchMetaData::const_iterator it = latch_meta.begin();
1084 it != end;
1085 ++it) {
1086
1087 if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1088
1089 continue;
1090
1091 } else if (strcmp((*it)->get_name(), name) == 0) {
1092
1093 return((*it)->get_id());
1094 }
1095 }
1096
1097 return(LATCH_ID_NONE);
1098}
1099
1100/** Get the latch name from a sync level
1101@param[in] level Latch level to lookup
1102@return NULL if not found. */
1103const char*
1104sync_latch_get_name(latch_level_t level)
1105{
1106 LatchMetaData::const_iterator end = latch_meta.end();
1107
1108 /* Linear scan should be OK, this should be extremely rare. */
1109
1110 for (LatchMetaData::const_iterator it = latch_meta.begin();
1111 it != end;
1112 ++it) {
1113
1114 if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1115
1116 continue;
1117
1118 } else if ((*it)->get_level() == level) {
1119
1120 return((*it)->get_name());
1121 }
1122 }
1123
1124 return(0);
1125}
1126
1127/** Check if it is OK to acquire the latch.
1128@param[in] latch latch type */
1129void
1130sync_check_lock_validate(const latch_t* latch)
1131{
1132 if (LatchDebug::instance() != NULL) {
1133 LatchDebug::instance()->lock_validate(
1134 latch, latch->get_level());
1135 }
1136}
1137
1138/** Note that the lock has been granted
1139@param[in] latch latch type */
1140void
1141sync_check_lock_granted(const latch_t* latch)
1142{
1143 if (LatchDebug::instance() != NULL) {
1144 LatchDebug::instance()->lock_granted(latch, latch->get_level());
1145 }
1146}
1147
1148/** Check if it is OK to acquire the latch.
1149@param[in] latch latch type
1150@param[in] level Latch level */
1151void
1152sync_check_lock(
1153 const latch_t* latch,
1154 latch_level_t level)
1155{
1156 if (LatchDebug::instance() != NULL) {
1157
1158 ut_ad(latch->get_level() == SYNC_LEVEL_VARYING);
1159 ut_ad(latch->get_id() == LATCH_ID_BUF_BLOCK_LOCK);
1160
1161 LatchDebug::instance()->lock_validate(latch, level);
1162 LatchDebug::instance()->lock_granted(latch, level);
1163 }
1164}
1165
1166/** Check if it is OK to re-acquire the lock.
1167@param[in] latch RW-LOCK to relock (recursive X locks) */
1168void
1169sync_check_relock(const latch_t* latch)
1170{
1171 if (LatchDebug::instance() != NULL) {
1172 LatchDebug::instance()->relock(latch);
1173 }
1174}
1175
1176/** Removes a latch from the thread level array if it is found there.
1177@param[in] latch The latch to unlock */
1178void
1179sync_check_unlock(const latch_t* latch)
1180{
1181 if (LatchDebug::instance() != NULL) {
1182 LatchDebug::instance()->unlock(latch);
1183 }
1184}
1185
1186/** Checks if the level array for the current thread contains a
1187mutex or rw-latch at the specified level.
1188@param[in] level to find
1189@return a matching latch, or NULL if not found */
1190const latch_t*
1191sync_check_find(latch_level_t level)
1192{
1193 if (LatchDebug::instance() != NULL) {
1194 return(LatchDebug::instance()->find(level));
1195 }
1196
1197 return(NULL);
1198}
1199
1200/** Iterate over the thread's latches.
1201@param[in,out] functor called for each element.
1202@return true if the functor returns true for any element */
1203bool
1204sync_check_iterate(const sync_check_functor_t& functor)
1205{
1206 if (LatchDebug* debug = LatchDebug::instance()) {
1207 return(debug->for_each(functor));
1208 }
1209
1210 return(false);
1211}
1212
1213/** Enable sync order checking.
1214
1215Note: We don't enforce any synchronisation checks. The caller must ensure
1216that no races can occur */
1217void
1218sync_check_enable()
1219{
1220 if (!srv_sync_debug) {
1221
1222 return;
1223 }
1224
1225 /* We should always call this before we create threads. */
1226
1227 LatchDebug::create_instance();
1228}
1229
1230/** Initialise the debug data structures */
1231void
1232LatchDebug::init()
1233 UNIV_NOTHROW
1234{
1235 mutex_create(LATCH_ID_RW_LOCK_DEBUG, &rw_lock_debug_mutex);
1236}
1237
1238/** Shutdown the latch debug checking
1239
1240Note: We don't enforce any synchronisation checks. The caller must ensure
1241that no races can occur */
1242void
1243LatchDebug::shutdown()
1244 UNIV_NOTHROW
1245{
1246 mutex_free(&rw_lock_debug_mutex);
1247
1248 ut_a(s_initialized);
1249
1250 s_initialized = false;
1251
1252 UT_DELETE(s_instance);
1253
1254 LatchDebug::s_instance = NULL;
1255}
1256
1257/** Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
1258because the debug mutex is also acquired in sync0arr while holding the OS
1259mutex protecting the sync array, and the ordinary mutex_enter might
1260recursively call routines in sync0arr, leading to a deadlock on the OS
1261mutex. */
1262void
1263rw_lock_debug_mutex_enter()
1264{
1265 mutex_enter(&rw_lock_debug_mutex);
1266}
1267
1268/** Releases the debug mutex. */
1269void
1270rw_lock_debug_mutex_exit()
1271{
1272 mutex_exit(&rw_lock_debug_mutex);
1273}
1274#endif /* UNIV_DEBUG */
1275
1276/* Meta data for all the InnoDB latches. If the latch is not in recorded
1277here then it will be be considered for deadlock checks. */
1278LatchMetaData latch_meta;
1279
1280/** Load the latch meta data. */
1281static
1282void
1283sync_latch_meta_init()
1284 UNIV_NOTHROW
1285{
1286 latch_meta.resize(LATCH_ID_MAX);
1287
1288 /* The latches should be ordered on latch_id_t. So that we can
1289 index directly into the vector to update and fetch meta-data. */
1290
1291 LATCH_ADD_MUTEX(AUTOINC, SYNC_DICT_AUTOINC_MUTEX, autoinc_mutex_key);
1292
1293#if defined PFS_SKIP_BUFFER_MUTEX_RWLOCK || defined PFS_GROUP_BUFFER_SYNC
1294 LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK, PFS_NOT_INSTRUMENTED);
1295#else
1296 LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK,
1297 buffer_block_mutex_key);
1298#endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */
1299
1300 LATCH_ADD_MUTEX(BUF_POOL, SYNC_BUF_POOL, buf_pool_mutex_key);
1301
1302 LATCH_ADD_MUTEX(BUF_POOL_ZIP, SYNC_BUF_BLOCK, buf_pool_zip_mutex_key);
1303
1304 LATCH_ADD_MUTEX(CACHE_LAST_READ, SYNC_TRX_I_S_LAST_READ,
1305 cache_last_read_mutex_key);
1306
1307 LATCH_ADD_MUTEX(DICT_FOREIGN_ERR, SYNC_NO_ORDER_CHECK,
1308 dict_foreign_err_mutex_key);
1309
1310 LATCH_ADD_MUTEX(DICT_SYS, SYNC_DICT, dict_sys_mutex_key);
1311
1312 LATCH_ADD_MUTEX(FIL_SYSTEM, SYNC_ANY_LATCH, fil_system_mutex_key);
1313
1314 LATCH_ADD_MUTEX(FLUSH_LIST, SYNC_BUF_FLUSH_LIST, flush_list_mutex_key);
1315
1316 LATCH_ADD_MUTEX(FTS_BG_THREADS, SYNC_FTS_BG_THREADS,
1317 fts_bg_threads_mutex_key);
1318
1319 LATCH_ADD_MUTEX(FTS_DELETE, SYNC_FTS_OPTIMIZE, fts_delete_mutex_key);
1320
1321 LATCH_ADD_MUTEX(FTS_OPTIMIZE, SYNC_FTS_OPTIMIZE,
1322 fts_optimize_mutex_key);
1323
1324 LATCH_ADD_MUTEX(FTS_DOC_ID, SYNC_FTS_OPTIMIZE, fts_doc_id_mutex_key);
1325
1326 LATCH_ADD_MUTEX(FTS_PLL_TOKENIZE, SYNC_FTS_TOKENIZE,
1327 fts_pll_tokenize_mutex_key);
1328
1329 LATCH_ADD_MUTEX(HASH_TABLE_MUTEX, SYNC_BUF_PAGE_HASH,
1330 hash_table_mutex_key);
1331
1332 LATCH_ADD_MUTEX(IBUF_BITMAP, SYNC_IBUF_BITMAP_MUTEX,
1333 ibuf_bitmap_mutex_key);
1334
1335 LATCH_ADD_MUTEX(IBUF, SYNC_IBUF_MUTEX, ibuf_mutex_key);
1336
1337 LATCH_ADD_MUTEX(IBUF_PESSIMISTIC_INSERT, SYNC_IBUF_PESS_INSERT_MUTEX,
1338 ibuf_pessimistic_insert_mutex_key);
1339
1340 LATCH_ADD_MUTEX(LOG_SYS, SYNC_LOG, log_sys_mutex_key);
1341
1342 LATCH_ADD_MUTEX(LOG_WRITE, SYNC_LOG_WRITE, log_sys_write_mutex_key);
1343
1344 LATCH_ADD_MUTEX(LOG_FLUSH_ORDER, SYNC_LOG_FLUSH_ORDER,
1345 log_flush_order_mutex_key);
1346
1347 LATCH_ADD_MUTEX(MUTEX_LIST, SYNC_NO_ORDER_CHECK, mutex_list_mutex_key);
1348
1349 LATCH_ADD_MUTEX(PAGE_CLEANER, SYNC_PAGE_CLEANER,
1350 page_cleaner_mutex_key);
1351
1352 LATCH_ADD_MUTEX(PURGE_SYS_PQ, SYNC_PURGE_QUEUE,
1353 purge_sys_pq_mutex_key);
1354
1355 LATCH_ADD_MUTEX(RECALC_POOL, SYNC_STATS_AUTO_RECALC,
1356 recalc_pool_mutex_key);
1357
1358 LATCH_ADD_MUTEX(RECV_SYS, SYNC_RECV, recv_sys_mutex_key);
1359
1360 LATCH_ADD_MUTEX(RECV_WRITER, SYNC_RECV_WRITER, recv_writer_mutex_key);
1361
1362 LATCH_ADD_MUTEX(REDO_RSEG, SYNC_REDO_RSEG, redo_rseg_mutex_key);
1363
1364 LATCH_ADD_MUTEX(NOREDO_RSEG, SYNC_NOREDO_RSEG, noredo_rseg_mutex_key);
1365
1366#ifdef UNIV_DEBUG
1367 /* Mutex names starting with '.' are not tracked. They are assumed
1368 to be diagnostic mutexes used in debugging. */
1369 latch_meta[LATCH_ID_RW_LOCK_DEBUG] =
1370 LATCH_ADD_MUTEX(RW_LOCK_DEBUG,
1371 SYNC_NO_ORDER_CHECK,
1372 rw_lock_debug_mutex_key);
1373#endif /* UNIV_DEBUG */
1374
1375 LATCH_ADD_MUTEX(RTR_SSN_MUTEX, SYNC_ANY_LATCH, rtr_ssn_mutex_key);
1376
1377 LATCH_ADD_MUTEX(RTR_ACTIVE_MUTEX, SYNC_ANY_LATCH,
1378 rtr_active_mutex_key);
1379
1380 LATCH_ADD_MUTEX(RTR_MATCH_MUTEX, SYNC_ANY_LATCH, rtr_match_mutex_key);
1381
1382 LATCH_ADD_MUTEX(RTR_PATH_MUTEX, SYNC_ANY_LATCH, rtr_path_mutex_key);
1383
1384 LATCH_ADD_MUTEX(RW_LOCK_LIST, SYNC_NO_ORDER_CHECK,
1385 rw_lock_list_mutex_key);
1386
1387 LATCH_ADD_MUTEX(RW_LOCK_MUTEX, SYNC_NO_ORDER_CHECK, rw_lock_mutex_key);
1388
1389 LATCH_ADD_MUTEX(SRV_INNODB_MONITOR, SYNC_NO_ORDER_CHECK,
1390 srv_innodb_monitor_mutex_key);
1391
1392 LATCH_ADD_MUTEX(SRV_MISC_TMPFILE, SYNC_ANY_LATCH,
1393 srv_misc_tmpfile_mutex_key);
1394
1395 LATCH_ADD_MUTEX(SRV_MONITOR_FILE, SYNC_NO_ORDER_CHECK,
1396 srv_monitor_file_mutex_key);
1397
1398 LATCH_ADD_MUTEX(BUF_DBLWR, SYNC_DOUBLEWRITE, buf_dblwr_mutex_key);
1399
1400 LATCH_ADD_MUTEX(TRX_POOL, SYNC_POOL, trx_pool_mutex_key);
1401
1402 LATCH_ADD_MUTEX(TRX_POOL_MANAGER, SYNC_POOL_MANAGER,
1403 trx_pool_manager_mutex_key);
1404
1405 LATCH_ADD_MUTEX(TRX, SYNC_TRX, trx_mutex_key);
1406
1407 LATCH_ADD_MUTEX(LOCK_SYS, SYNC_LOCK_SYS, lock_mutex_key);
1408
1409 LATCH_ADD_MUTEX(LOCK_SYS_WAIT, SYNC_LOCK_WAIT_SYS,
1410 lock_wait_mutex_key);
1411
1412 LATCH_ADD_MUTEX(TRX_SYS, SYNC_TRX_SYS, trx_sys_mutex_key);
1413
1414 LATCH_ADD_MUTEX(SRV_SYS, SYNC_THREADS, srv_sys_mutex_key);
1415
1416 LATCH_ADD_MUTEX(SRV_SYS_TASKS, SYNC_ANY_LATCH, srv_threads_mutex_key);
1417
1418 LATCH_ADD_MUTEX(PAGE_ZIP_STAT_PER_INDEX, SYNC_ANY_LATCH,
1419 page_zip_stat_per_index_mutex_key);
1420
1421#ifndef PFS_SKIP_EVENT_MUTEX
1422 LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1423 event_manager_mutex_key);
1424#else
1425 LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1426 PFS_NOT_INSTRUMENTED);
1427#endif /* !PFS_SKIP_EVENT_MUTEX */
1428
1429 LATCH_ADD_MUTEX(EVENT_MUTEX, SYNC_NO_ORDER_CHECK, event_mutex_key);
1430
1431 LATCH_ADD_MUTEX(SYNC_ARRAY_MUTEX, SYNC_NO_ORDER_CHECK,
1432 sync_array_mutex_key);
1433
1434 LATCH_ADD_MUTEX(ZIP_PAD_MUTEX, SYNC_NO_ORDER_CHECK, zip_pad_mutex_key);
1435
1436 LATCH_ADD_MUTEX(OS_AIO_READ_MUTEX, SYNC_NO_ORDER_CHECK,
1437 PFS_NOT_INSTRUMENTED);
1438
1439 LATCH_ADD_MUTEX(OS_AIO_WRITE_MUTEX, SYNC_NO_ORDER_CHECK,
1440 PFS_NOT_INSTRUMENTED);
1441
1442 LATCH_ADD_MUTEX(OS_AIO_LOG_MUTEX, SYNC_NO_ORDER_CHECK,
1443 PFS_NOT_INSTRUMENTED);
1444
1445 LATCH_ADD_MUTEX(OS_AIO_IBUF_MUTEX, SYNC_NO_ORDER_CHECK,
1446 PFS_NOT_INSTRUMENTED);
1447
1448 LATCH_ADD_MUTEX(OS_AIO_SYNC_MUTEX, SYNC_NO_ORDER_CHECK,
1449 PFS_NOT_INSTRUMENTED);
1450
1451 LATCH_ADD_MUTEX(ROW_DROP_LIST, SYNC_NO_ORDER_CHECK,
1452 row_drop_list_mutex_key);
1453
1454 LATCH_ADD_MUTEX(INDEX_ONLINE_LOG, SYNC_INDEX_ONLINE_LOG,
1455 index_online_log_key);
1456
1457 LATCH_ADD_MUTEX(WORK_QUEUE, SYNC_WORK_QUEUE, PFS_NOT_INSTRUMENTED);
1458
1459 // Add the RW locks
1460 LATCH_ADD_RWLOCK(BTR_SEARCH, SYNC_SEARCH_SYS, btr_search_latch_key);
1461
1462 LATCH_ADD_RWLOCK(BUF_BLOCK_LOCK, SYNC_LEVEL_VARYING,
1463 buf_block_lock_key);
1464
1465#ifdef UNIV_DEBUG
1466 LATCH_ADD_RWLOCK(BUF_BLOCK_DEBUG, SYNC_LEVEL_VARYING,
1467 buf_block_debug_latch_key);
1468#endif /* UNIV_DEBUG */
1469
1470 LATCH_ADD_RWLOCK(DICT_OPERATION, SYNC_DICT_OPERATION,
1471 dict_operation_lock_key);
1472
1473 LATCH_ADD_RWLOCK(CHECKPOINT, SYNC_NO_ORDER_CHECK, checkpoint_lock_key);
1474
1475 LATCH_ADD_RWLOCK(FIL_SPACE, SYNC_FSP, fil_space_latch_key);
1476
1477 LATCH_ADD_RWLOCK(FTS_CACHE, SYNC_FTS_CACHE, fts_cache_rw_lock_key);
1478
1479 LATCH_ADD_RWLOCK(FTS_CACHE_INIT, SYNC_FTS_CACHE_INIT,
1480 fts_cache_init_rw_lock_key);
1481
1482 LATCH_ADD_RWLOCK(TRX_I_S_CACHE, SYNC_TRX_I_S_RWLOCK,
1483 trx_i_s_cache_lock_key);
1484
1485 LATCH_ADD_RWLOCK(TRX_PURGE, SYNC_PURGE_LATCH, trx_purge_latch_key);
1486
1487 LATCH_ADD_RWLOCK(IBUF_INDEX_TREE, SYNC_IBUF_INDEX_TREE,
1488 index_tree_rw_lock_key);
1489
1490 LATCH_ADD_RWLOCK(INDEX_TREE, SYNC_INDEX_TREE, index_tree_rw_lock_key);
1491
1492 LATCH_ADD_RWLOCK(DICT_TABLE_STATS, SYNC_INDEX_TREE,
1493 dict_table_stats_key);
1494
1495 LATCH_ADD_RWLOCK(HASH_TABLE_RW_LOCK, SYNC_BUF_PAGE_HASH,
1496 hash_table_locks_key);
1497
1498 LATCH_ADD_MUTEX(SYNC_DEBUG_MUTEX, SYNC_NO_ORDER_CHECK,
1499 PFS_NOT_INSTRUMENTED);
1500
1501 /* JAN: TODO: Add PFS instrumentation */
1502 LATCH_ADD_MUTEX(SCRUB_STAT_MUTEX, SYNC_NO_ORDER_CHECK,
1503 PFS_NOT_INSTRUMENTED);
1504 LATCH_ADD_MUTEX(DEFRAGMENT_MUTEX, SYNC_NO_ORDER_CHECK,
1505 PFS_NOT_INSTRUMENTED);
1506 LATCH_ADD_MUTEX(BTR_DEFRAGMENT_MUTEX, SYNC_NO_ORDER_CHECK,
1507 PFS_NOT_INSTRUMENTED);
1508 LATCH_ADD_MUTEX(FIL_CRYPT_MUTEX, SYNC_NO_ORDER_CHECK,
1509 PFS_NOT_INSTRUMENTED);
1510 LATCH_ADD_MUTEX(FIL_CRYPT_STAT_MUTEX, SYNC_NO_ORDER_CHECK,
1511 PFS_NOT_INSTRUMENTED);
1512 LATCH_ADD_MUTEX(FIL_CRYPT_DATA_MUTEX, SYNC_NO_ORDER_CHECK,
1513 PFS_NOT_INSTRUMENTED);
1514 LATCH_ADD_MUTEX(FIL_CRYPT_THREADS_MUTEX, SYNC_NO_ORDER_CHECK,
1515 PFS_NOT_INSTRUMENTED);
1516 LATCH_ADD_MUTEX(RW_TRX_HASH_ELEMENT, SYNC_RW_TRX_HASH_ELEMENT,
1517 rw_trx_hash_element_mutex_key);
1518
1519 latch_id_t id = LATCH_ID_NONE;
1520
1521 /* The array should be ordered on latch ID.We need to
1522 index directly into it from the mutex policy to update
1523 the counters and access the meta-data. */
1524
1525 for (LatchMetaData::iterator it = latch_meta.begin();
1526 it != latch_meta.end();
1527 ++it) {
1528
1529 const latch_meta_t* meta = *it;
1530
1531
1532 /* Skip blank entries */
1533 if (meta == NULL || meta->get_id() == LATCH_ID_NONE) {
1534 continue;
1535 }
1536
1537 ut_a(id < meta->get_id());
1538
1539 id = meta->get_id();
1540 }
1541}
1542
1543/** Destroy the latch meta data */
1544static
1545void
1546sync_latch_meta_destroy()
1547{
1548 for (LatchMetaData::iterator it = latch_meta.begin();
1549 it != latch_meta.end();
1550 ++it) {
1551
1552 UT_DELETE(*it);
1553 }
1554
1555 latch_meta.clear();
1556}
1557
1558/** Track mutex file creation name and line number. This is to avoid storing
1559{ const char* name; uint16_t line; } in every instance. This results in the
1560sizeof(Mutex) > 64. We use a lookup table to store it separately. Fetching
1561the values is very rare, only required for diagnostic purposes. And, we
1562don't create/destroy mutexes that frequently. */
1563struct CreateTracker {
1564
1565 /** Constructor */
1566 CreateTracker()
1567 UNIV_NOTHROW
1568 {
1569 m_mutex.init();
1570 }
1571
1572 /** Destructor */
1573 ~CreateTracker()
1574 UNIV_NOTHROW
1575 {
1576 ut_d(m_files.empty());
1577
1578 m_mutex.destroy();
1579 }
1580
1581 /** Register where the latch was created
1582 @param[in] ptr Latch instance
1583 @param[in] filename Where created
1584 @param[in] line Line number in filename */
1585 void register_latch(
1586 const void* ptr,
1587 const char* filename,
1588 uint16_t line)
1589 UNIV_NOTHROW
1590 {
1591 m_mutex.enter();
1592
1593 Files::iterator lb = m_files.lower_bound(ptr);
1594
1595 ut_ad(lb == m_files.end()
1596 || m_files.key_comp()(ptr, lb->first));
1597
1598 typedef Files::value_type value_type;
1599
1600 m_files.insert(lb, value_type(ptr, File(filename, line)));
1601
1602 m_mutex.exit();
1603 }
1604
1605 /** Deregister a latch - when it is destroyed
1606 @param[in] ptr Latch instance being destroyed */
1607 void deregister_latch(const void* ptr)
1608 UNIV_NOTHROW
1609 {
1610 m_mutex.enter();
1611
1612 Files::iterator lb = m_files.lower_bound(ptr);
1613
1614 ut_ad(lb != m_files.end()
1615 && !(m_files.key_comp()(ptr, lb->first)));
1616
1617 m_files.erase(lb);
1618
1619 m_mutex.exit();
1620 }
1621
1622 /** Get the create string, format is "name:line"
1623 @param[in] ptr Latch instance
1624 @return the create string or "" if not found */
1625 std::string get(const void* ptr)
1626 UNIV_NOTHROW
1627 {
1628 m_mutex.enter();
1629
1630 std::string created;
1631
1632 Files::iterator lb = m_files.lower_bound(ptr);
1633
1634 if (lb != m_files.end()
1635 && !(m_files.key_comp()(ptr, lb->first))) {
1636
1637 std::ostringstream msg;
1638
1639 msg << lb->second.m_name << ":" << lb->second.m_line;
1640
1641 created = msg.str();
1642 }
1643
1644 m_mutex.exit();
1645
1646 return(created);
1647 }
1648
1649private:
1650 /** For tracking the filename and line number */
1651 struct File {
1652
1653 /** Constructor */
1654 File() UNIV_NOTHROW : m_name(), m_line() { }
1655
1656 /** Constructor
1657 @param[in] name Filename where created
1658 @param[in] line Line number where created */
1659 File(const char* name, uint16_t line)
1660 UNIV_NOTHROW
1661 :
1662 m_name(sync_basename(name)),
1663 m_line(line)
1664 {
1665 /* No op */
1666 }
1667
1668 /** Filename where created */
1669 std::string m_name;
1670
1671 /** Line number where created */
1672 uint16_t m_line;
1673 };
1674
1675 /** Map the mutex instance to where it was created */
1676 typedef std::map<
1677 const void*,
1678 File,
1679 std::less<const void*>,
1680 ut_allocator<std::pair<const void* const, File> > >
1681 Files;
1682
1683 typedef OSMutex Mutex;
1684
1685 /** Mutex protecting m_files */
1686 Mutex m_mutex;
1687
1688 /** Track the latch creation */
1689 Files m_files;
1690};
1691
1692/** Track latch creation location. For reducing the size of the latches */
1693static CreateTracker create_tracker;
1694
1695/** Register a latch, called when it is created
1696@param[in] ptr Latch instance that was created
1697@param[in] filename Filename where it was created
1698@param[in] line Line number in filename */
1699void
1700sync_file_created_register(
1701 const void* ptr,
1702 const char* filename,
1703 uint16_t line)
1704{
1705 create_tracker.register_latch(ptr, filename, line);
1706}
1707
1708/** Deregister a latch, called when it is destroyed
1709@param[in] ptr Latch to be destroyed */
1710void
1711sync_file_created_deregister(const void* ptr)
1712{
1713 create_tracker.deregister_latch(ptr);
1714}
1715
1716/** Get the string where the file was created. Its format is "name:line"
1717@param[in] ptr Latch instance
1718@return created information or "" if can't be found */
1719std::string
1720sync_file_created_get(const void* ptr)
1721{
1722 return(create_tracker.get(ptr));
1723}
1724
1725/** Initializes the synchronization data structures. */
1726void
1727sync_check_init()
1728{
1729 ut_ad(!LatchDebug::s_initialized);
1730 ut_d(LatchDebug::s_initialized = true);
1731
1732 sync_latch_meta_init();
1733
1734 /* Init the rw-lock & mutex list and create the mutex to protect it. */
1735
1736 UT_LIST_INIT(rw_lock_list, &rw_lock_t::list);
1737
1738 mutex_create(LATCH_ID_RW_LOCK_LIST, &rw_lock_list_mutex);
1739
1740 ut_d(LatchDebug::init());
1741
1742 sync_array_init();
1743}
1744
1745/** Free the InnoDB synchronization data structures. */
1746void
1747sync_check_close()
1748{
1749 ut_d(LatchDebug::shutdown());
1750
1751 mutex_free(&rw_lock_list_mutex);
1752
1753 sync_array_close();
1754
1755 sync_latch_meta_destroy();
1756}
1757
1758