1 | /* Copyright (c) 2007, 2012, Oracle and/or its affiliates. |
2 | |
3 | This program is free software; you can redistribute it and/or modify |
4 | it under the terms of the GNU General Public License as published by |
5 | the Free Software Foundation; version 2 of the License. |
6 | |
7 | This program is distributed in the hope that it will be useful, |
8 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | GNU General Public License for more details. |
11 | |
12 | You should have received a copy of the GNU General Public License |
13 | along with this program; if not, write to the Free Software Foundation, |
14 | 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ |
15 | |
16 | |
17 | #include "mariadb.h" |
18 | #include "sql_class.h" |
19 | #include "debug_sync.h" |
20 | #include "sql_array.h" |
21 | #include "rpl_rli.h" |
22 | #include <lf.h> |
23 | #include "unireg.h" |
24 | #include <mysql/plugin.h> |
25 | #include <mysql/service_thd_wait.h> |
26 | #include <mysql/psi/mysql_stage.h> |
27 | #include "wsrep_mysqld.h" |
28 | #include "wsrep_thd.h" |
29 | |
30 | #ifdef HAVE_PSI_INTERFACE |
31 | static PSI_mutex_key key_MDL_wait_LOCK_wait_status; |
32 | |
33 | static PSI_mutex_info all_mdl_mutexes[]= |
34 | { |
35 | { &key_MDL_wait_LOCK_wait_status, "MDL_wait::LOCK_wait_status" , 0} |
36 | }; |
37 | |
38 | static PSI_rwlock_key key_MDL_lock_rwlock; |
39 | static PSI_rwlock_key key_MDL_context_LOCK_waiting_for; |
40 | |
41 | static PSI_rwlock_info all_mdl_rwlocks[]= |
42 | { |
43 | { &key_MDL_lock_rwlock, "MDL_lock::rwlock" , 0}, |
44 | { &key_MDL_context_LOCK_waiting_for, "MDL_context::LOCK_waiting_for" , 0} |
45 | }; |
46 | |
47 | static PSI_cond_key key_MDL_wait_COND_wait_status; |
48 | |
49 | static PSI_cond_info all_mdl_conds[]= |
50 | { |
51 | { &key_MDL_wait_COND_wait_status, "MDL_context::COND_wait_status" , 0} |
52 | }; |
53 | |
54 | /** |
55 | Initialise all the performance schema instrumentation points |
56 | used by the MDL subsystem. |
57 | */ |
58 | static void init_mdl_psi_keys(void) |
59 | { |
60 | int count; |
61 | |
62 | count= array_elements(all_mdl_mutexes); |
63 | mysql_mutex_register("sql" , all_mdl_mutexes, count); |
64 | |
65 | count= array_elements(all_mdl_rwlocks); |
66 | mysql_rwlock_register("sql" , all_mdl_rwlocks, count); |
67 | |
68 | count= array_elements(all_mdl_conds); |
69 | mysql_cond_register("sql" , all_mdl_conds, count); |
70 | |
71 | MDL_key::init_psi_keys(); |
72 | } |
73 | #endif /* HAVE_PSI_INTERFACE */ |
74 | |
75 | |
76 | /** |
77 | Thread state names to be used in case when we have to wait on resource |
78 | belonging to certain namespace. |
79 | */ |
80 | |
81 | PSI_stage_info MDL_key::m_namespace_to_wait_state_name[NAMESPACE_END]= |
82 | { |
83 | {0, "Waiting for global read lock" , 0}, |
84 | {0, "Waiting for schema metadata lock" , 0}, |
85 | {0, "Waiting for table metadata lock" , 0}, |
86 | {0, "Waiting for stored function metadata lock" , 0}, |
87 | {0, "Waiting for stored procedure metadata lock" , 0}, |
88 | {0, "Waiting for stored package body metadata lock" , 0}, |
89 | {0, "Waiting for trigger metadata lock" , 0}, |
90 | {0, "Waiting for event metadata lock" , 0}, |
91 | {0, "Waiting for commit lock" , 0}, |
92 | {0, "User lock" , 0} /* Be compatible with old status. */ |
93 | }; |
94 | |
95 | #ifdef HAVE_PSI_INTERFACE |
96 | void MDL_key::init_psi_keys() |
97 | { |
98 | int i; |
99 | int count; |
100 | PSI_stage_info *info __attribute__((unused)); |
101 | |
102 | count= array_elements(MDL_key::m_namespace_to_wait_state_name); |
103 | for (i= 0; i<count; i++) |
104 | { |
105 | /* mysql_stage_register wants an array of pointers, registering 1 by 1. */ |
106 | info= & MDL_key::m_namespace_to_wait_state_name[i]; |
107 | mysql_stage_register("sql" , &info, 1); |
108 | } |
109 | } |
110 | #endif |
111 | |
112 | static bool mdl_initialized= 0; |
113 | |
114 | |
115 | /** |
116 | A collection of all MDL locks. A singleton, |
117 | there is only one instance of the map in the server. |
118 | */ |
119 | |
120 | class MDL_map |
121 | { |
122 | public: |
123 | void init(); |
124 | void destroy(); |
125 | MDL_lock *find_or_insert(LF_PINS *pins, const MDL_key *key); |
126 | unsigned long get_lock_owner(LF_PINS *pins, const MDL_key *key); |
127 | void remove(LF_PINS *pins, MDL_lock *lock); |
128 | LF_PINS *get_pins() { return lf_hash_get_pins(&m_locks); } |
129 | private: |
130 | LF_HASH m_locks; /**< All acquired locks in the server. */ |
131 | /** Pre-allocated MDL_lock object for GLOBAL namespace. */ |
132 | MDL_lock *m_global_lock; |
133 | /** Pre-allocated MDL_lock object for COMMIT namespace. */ |
134 | MDL_lock *m_commit_lock; |
135 | friend int mdl_iterate(int (*)(MDL_ticket *, void *), void *); |
136 | }; |
137 | |
138 | |
139 | /** |
140 | A context of the recursive traversal through all contexts |
141 | in all sessions in search for deadlock. |
142 | */ |
143 | |
144 | class Deadlock_detection_visitor: public MDL_wait_for_graph_visitor |
145 | { |
146 | public: |
147 | Deadlock_detection_visitor(MDL_context *start_node_arg) |
148 | : m_start_node(start_node_arg), |
149 | m_victim(NULL), |
150 | m_current_search_depth(0), |
151 | m_found_deadlock(FALSE) |
152 | {} |
153 | virtual bool enter_node(MDL_context *node); |
154 | virtual void leave_node(MDL_context *node); |
155 | |
156 | virtual bool inspect_edge(MDL_context *dest); |
157 | |
158 | MDL_context *get_victim() const { return m_victim; } |
159 | private: |
160 | /** |
161 | Change the deadlock victim to a new one if it has lower deadlock |
162 | weight. |
163 | */ |
164 | void opt_change_victim_to(MDL_context *new_victim); |
165 | private: |
166 | /** |
167 | The context which has initiated the search. There |
168 | can be multiple searches happening in parallel at the same time. |
169 | */ |
170 | MDL_context *m_start_node; |
171 | /** If a deadlock is found, the context that identifies the victim. */ |
172 | MDL_context *m_victim; |
173 | /** Set to the 0 at start. Increased whenever |
174 | we descend into another MDL context (aka traverse to the next |
175 | wait-for graph node). When MAX_SEARCH_DEPTH is reached, we |
176 | assume that a deadlock is found, even if we have not found a |
177 | loop. |
178 | */ |
179 | uint m_current_search_depth; |
180 | /** TRUE if we found a deadlock. */ |
181 | bool m_found_deadlock; |
182 | /** |
183 | Maximum depth for deadlock searches. After this depth is |
184 | achieved we will unconditionally declare that there is a |
185 | deadlock. |
186 | |
187 | @note This depth should be small enough to avoid stack |
188 | being exhausted by recursive search algorithm. |
189 | |
190 | TODO: Find out what is the optimal value for this parameter. |
191 | Current value is safe, but probably sub-optimal, |
192 | as there is an anecdotal evidence that real-life |
193 | deadlocks are even shorter typically. |
194 | */ |
195 | static const uint MAX_SEARCH_DEPTH= 32; |
196 | }; |
197 | |
198 | |
199 | /** |
200 | Enter a node of a wait-for graph. After |
201 | a node is entered, inspect_edge() will be called |
202 | for all wait-for destinations of this node. Then |
203 | leave_node() will be called. |
204 | We call "enter_node()" for all nodes we inspect, |
205 | including the starting node. |
206 | |
207 | @retval TRUE Maximum search depth exceeded. |
208 | @retval FALSE OK. |
209 | */ |
210 | |
211 | bool Deadlock_detection_visitor::enter_node(MDL_context *node) |
212 | { |
213 | m_found_deadlock= ++m_current_search_depth >= MAX_SEARCH_DEPTH; |
214 | if (m_found_deadlock) |
215 | { |
216 | DBUG_ASSERT(! m_victim); |
217 | opt_change_victim_to(node); |
218 | } |
219 | return m_found_deadlock; |
220 | } |
221 | |
222 | |
223 | /** |
224 | Done inspecting this node. Decrease the search |
225 | depth. If a deadlock is found, and we are |
226 | backtracking to the start node, optionally |
227 | change the deadlock victim to one with lower |
228 | deadlock weight. |
229 | */ |
230 | |
231 | void Deadlock_detection_visitor::leave_node(MDL_context *node) |
232 | { |
233 | --m_current_search_depth; |
234 | if (m_found_deadlock) |
235 | opt_change_victim_to(node); |
236 | } |
237 | |
238 | |
239 | /** |
240 | Inspect a wait-for graph edge from one MDL context to another. |
241 | |
242 | @retval TRUE A loop is found. |
243 | @retval FALSE No loop is found. |
244 | */ |
245 | |
246 | bool Deadlock_detection_visitor::inspect_edge(MDL_context *node) |
247 | { |
248 | m_found_deadlock= node == m_start_node; |
249 | return m_found_deadlock; |
250 | } |
251 | |
252 | |
253 | /** |
254 | Change the deadlock victim to a new one if it has lower deadlock |
255 | weight. |
256 | |
257 | @retval new_victim Victim is not changed. |
258 | @retval !new_victim New victim became the current. |
259 | */ |
260 | |
261 | void |
262 | Deadlock_detection_visitor::opt_change_victim_to(MDL_context *new_victim) |
263 | { |
264 | if (m_victim == NULL || |
265 | m_victim->get_deadlock_weight() >= new_victim->get_deadlock_weight()) |
266 | { |
267 | /* Swap victims, unlock the old one. */ |
268 | MDL_context *tmp= m_victim; |
269 | m_victim= new_victim; |
270 | m_victim->lock_deadlock_victim(); |
271 | if (tmp) |
272 | tmp->unlock_deadlock_victim(); |
273 | } |
274 | } |
275 | |
276 | |
277 | /** |
278 | Get a bit corresponding to enum_mdl_type value in a granted/waiting bitmaps |
279 | and compatibility matrices. |
280 | */ |
281 | |
282 | #define MDL_BIT(A) static_cast<MDL_lock::bitmap_t>(1U << A) |
283 | |
284 | /** |
285 | The lock context. Created internally for an acquired lock. |
286 | For a given name, there exists only one MDL_lock instance, |
287 | and it exists only when the lock has been granted. |
288 | Can be seen as an MDL subsystem's version of TABLE_SHARE. |
289 | |
290 | This is an abstract class which lacks information about |
291 | compatibility rules for lock types. They should be specified |
292 | in its descendants. |
293 | */ |
294 | |
295 | class MDL_lock |
296 | { |
297 | public: |
298 | typedef unsigned short bitmap_t; |
299 | |
300 | class Ticket_list |
301 | { |
302 | public: |
303 | typedef I_P_List<MDL_ticket, |
304 | I_P_List_adapter<MDL_ticket, |
305 | &MDL_ticket::next_in_lock, |
306 | &MDL_ticket::prev_in_lock>, |
307 | I_P_List_null_counter, |
308 | I_P_List_fast_push_back<MDL_ticket> > |
309 | List; |
310 | operator const List &() const { return m_list; } |
311 | Ticket_list() :m_bitmap(0) {} |
312 | |
313 | void add_ticket(MDL_ticket *ticket); |
314 | void remove_ticket(MDL_ticket *ticket); |
315 | bool is_empty() const { return m_list.is_empty(); } |
316 | bitmap_t bitmap() const { return m_bitmap; } |
317 | private: |
318 | void clear_bit_if_not_in_list(enum_mdl_type type); |
319 | private: |
320 | /** List of tickets. */ |
321 | List m_list; |
322 | /** Bitmap of types of tickets in this list. */ |
323 | bitmap_t m_bitmap; |
324 | }; |
325 | |
326 | typedef Ticket_list::List::Iterator Ticket_iterator; |
327 | |
328 | |
329 | /** |
330 | Helper struct which defines how different types of locks are handled |
331 | for a specific MDL_lock. In practice we use only two strategies: "scoped" |
332 | lock strategy for locks in GLOBAL, COMMIT and SCHEMA namespaces and |
333 | "object" lock strategy for all other namespaces. |
334 | */ |
335 | struct MDL_lock_strategy |
336 | { |
337 | virtual const bitmap_t *incompatible_granted_types_bitmap() const = 0; |
338 | virtual const bitmap_t *incompatible_waiting_types_bitmap() const = 0; |
339 | virtual bool needs_notification(const MDL_ticket *ticket) const = 0; |
340 | virtual bool conflicting_locks(const MDL_ticket *ticket) const = 0; |
341 | virtual bitmap_t hog_lock_types_bitmap() const = 0; |
342 | virtual ~MDL_lock_strategy() {} |
343 | }; |
344 | |
345 | |
346 | /** |
347 | An implementation of the scoped metadata lock. The only locking modes |
348 | which are supported at the moment are SHARED and INTENTION EXCLUSIVE |
349 | and EXCLUSIVE |
350 | */ |
351 | struct MDL_scoped_lock : public MDL_lock_strategy |
352 | { |
353 | MDL_scoped_lock() {} |
354 | virtual const bitmap_t *incompatible_granted_types_bitmap() const |
355 | { return m_granted_incompatible; } |
356 | virtual const bitmap_t *incompatible_waiting_types_bitmap() const |
357 | { return m_waiting_incompatible; } |
358 | virtual bool needs_notification(const MDL_ticket *ticket) const |
359 | { return (ticket->get_type() == MDL_SHARED); } |
360 | |
361 | /** |
362 | Notify threads holding scoped IX locks which conflict with a pending |
363 | S lock. |
364 | |
365 | Thread which holds global IX lock can be a handler thread for |
366 | insert delayed. We need to kill such threads in order to get |
367 | global shared lock. We do this my calling code outside of MDL. |
368 | */ |
369 | virtual bool conflicting_locks(const MDL_ticket *ticket) const |
370 | { return ticket->get_type() == MDL_INTENTION_EXCLUSIVE; } |
371 | |
372 | /* |
373 | In scoped locks, only IX lock request would starve because of X/S. But that |
374 | is practically very rare case. So just return 0 from this function. |
375 | */ |
376 | virtual bitmap_t hog_lock_types_bitmap() const |
377 | { return 0; } |
378 | private: |
379 | static const bitmap_t m_granted_incompatible[MDL_TYPE_END]; |
380 | static const bitmap_t m_waiting_incompatible[MDL_TYPE_END]; |
381 | }; |
382 | |
383 | |
384 | /** |
385 | An implementation of a per-object lock. Supports SHARED, SHARED_UPGRADABLE, |
386 | SHARED HIGH PRIORITY and EXCLUSIVE locks. |
387 | */ |
388 | struct MDL_object_lock : public MDL_lock_strategy |
389 | { |
390 | MDL_object_lock() {} |
391 | virtual const bitmap_t *incompatible_granted_types_bitmap() const |
392 | { return m_granted_incompatible; } |
393 | virtual const bitmap_t *incompatible_waiting_types_bitmap() const |
394 | { return m_waiting_incompatible; } |
395 | virtual bool needs_notification(const MDL_ticket *ticket) const |
396 | { |
397 | return ticket->get_type() == MDL_SHARED_NO_WRITE || |
398 | ticket->get_type() == MDL_SHARED_NO_READ_WRITE || |
399 | ticket->get_type() == MDL_EXCLUSIVE; |
400 | } |
401 | |
402 | /** |
403 | Notify threads holding a shared metadata locks on object which |
404 | conflict with a pending X, SNW or SNRW lock. |
405 | |
406 | If thread which holds conflicting lock is waiting on table-level |
407 | lock or some other non-MDL resource we might need to wake it up |
408 | by calling code outside of MDL. |
409 | */ |
410 | virtual bool conflicting_locks(const MDL_ticket *ticket) const |
411 | { return ticket->get_type() < MDL_SHARED_UPGRADABLE; } |
412 | |
413 | /* |
414 | To prevent starvation, these lock types that are only granted |
415 | max_write_lock_count times in a row while other lock types are |
416 | waiting. |
417 | */ |
418 | virtual bitmap_t hog_lock_types_bitmap() const |
419 | { |
420 | return (MDL_BIT(MDL_SHARED_NO_WRITE) | |
421 | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | |
422 | MDL_BIT(MDL_EXCLUSIVE)); |
423 | } |
424 | |
425 | private: |
426 | static const bitmap_t m_granted_incompatible[MDL_TYPE_END]; |
427 | static const bitmap_t m_waiting_incompatible[MDL_TYPE_END]; |
428 | }; |
429 | |
430 | public: |
431 | /** The key of the object (data) being protected. */ |
432 | MDL_key key; |
433 | /** |
434 | Read-write lock protecting this lock context. |
435 | |
436 | @note The fact that we use read-write lock prefers readers here is |
437 | important as deadlock detector won't work correctly otherwise. |
438 | |
439 | For example, imagine that we have following waiters graph: |
440 | |
441 | ctxA -> obj1 -> ctxB -> obj1 -| |
442 | ^ | |
443 | |----------------------------| |
444 | |
445 | and both ctxA and ctxB start deadlock detection process: |
446 | |
447 | ctxA read-locks obj1 ctxB read-locks obj2 |
448 | ctxA goes deeper ctxB goes deeper |
449 | |
450 | Now ctxC comes in who wants to start waiting on obj1, also |
451 | ctxD comes in who wants to start waiting on obj2. |
452 | |
453 | ctxC tries to write-lock obj1 ctxD tries to write-lock obj2 |
454 | ctxC is blocked ctxD is blocked |
455 | |
456 | Now ctxA and ctxB resume their search: |
457 | |
458 | ctxA tries to read-lock obj2 ctxB tries to read-lock obj1 |
459 | |
460 | If m_rwlock prefers writes (or fair) both ctxA and ctxB would be |
461 | blocked because of pending write locks from ctxD and ctxC |
462 | correspondingly. Thus we will get a deadlock in deadlock detector. |
463 | If m_wrlock prefers readers (actually ignoring pending writers is |
464 | enough) ctxA and ctxB will continue and no deadlock will occur. |
465 | */ |
466 | mysql_prlock_t m_rwlock; |
467 | |
468 | bool is_empty() const |
469 | { |
470 | return (m_granted.is_empty() && m_waiting.is_empty()); |
471 | } |
472 | |
473 | const bitmap_t *incompatible_granted_types_bitmap() const |
474 | { return m_strategy->incompatible_granted_types_bitmap(); } |
475 | const bitmap_t *incompatible_waiting_types_bitmap() const |
476 | { return m_strategy->incompatible_waiting_types_bitmap(); } |
477 | |
478 | bool has_pending_conflicting_lock(enum_mdl_type type); |
479 | |
480 | bool can_grant_lock(enum_mdl_type type, MDL_context *requstor_ctx, |
481 | bool ignore_lock_priority) const; |
482 | |
483 | inline unsigned long get_lock_owner() const; |
484 | |
485 | void reschedule_waiters(); |
486 | |
487 | void remove_ticket(LF_PINS *pins, Ticket_list MDL_lock::*queue, |
488 | MDL_ticket *ticket); |
489 | |
490 | bool visit_subgraph(MDL_ticket *waiting_ticket, |
491 | MDL_wait_for_graph_visitor *gvisitor); |
492 | |
493 | bool needs_notification(const MDL_ticket *ticket) const |
494 | { return m_strategy->needs_notification(ticket); } |
495 | void notify_conflicting_locks(MDL_context *ctx) |
496 | { |
497 | Ticket_iterator it(m_granted); |
498 | MDL_ticket *conflicting_ticket; |
499 | while ((conflicting_ticket= it++)) |
500 | { |
501 | if (conflicting_ticket->get_ctx() != ctx && |
502 | m_strategy->conflicting_locks(conflicting_ticket)) |
503 | { |
504 | MDL_context *conflicting_ctx= conflicting_ticket->get_ctx(); |
505 | |
506 | ctx->get_owner()-> |
507 | notify_shared_lock(conflicting_ctx->get_owner(), |
508 | conflicting_ctx->get_needs_thr_lock_abort()); |
509 | } |
510 | } |
511 | } |
512 | |
513 | bitmap_t hog_lock_types_bitmap() const |
514 | { return m_strategy->hog_lock_types_bitmap(); } |
515 | |
516 | #ifndef DBUG_OFF |
517 | bool check_if_conflicting_replication_locks(MDL_context *ctx); |
518 | #endif |
519 | |
520 | /** List of granted tickets for this lock. */ |
521 | Ticket_list m_granted; |
522 | /** Tickets for contexts waiting to acquire a lock. */ |
523 | Ticket_list m_waiting; |
524 | |
525 | /** |
526 | Number of times high priority lock requests have been granted while |
527 | low priority lock requests were waiting. |
528 | */ |
529 | ulong m_hog_lock_count; |
530 | |
531 | public: |
532 | |
533 | MDL_lock() |
534 | : m_hog_lock_count(0), |
535 | m_strategy(0) |
536 | { mysql_prlock_init(key_MDL_lock_rwlock, &m_rwlock); } |
537 | |
538 | MDL_lock(const MDL_key *key_arg) |
539 | : key(key_arg), |
540 | m_hog_lock_count(0), |
541 | m_strategy(&m_scoped_lock_strategy) |
542 | { |
543 | DBUG_ASSERT(key_arg->mdl_namespace() == MDL_key::GLOBAL || |
544 | key_arg->mdl_namespace() == MDL_key::COMMIT); |
545 | mysql_prlock_init(key_MDL_lock_rwlock, &m_rwlock); |
546 | } |
547 | |
548 | ~MDL_lock() |
549 | { mysql_prlock_destroy(&m_rwlock); } |
550 | |
551 | static void lf_alloc_constructor(uchar *arg) |
552 | { new (arg + LF_HASH_OVERHEAD) MDL_lock(); } |
553 | |
554 | static void lf_alloc_destructor(uchar *arg) |
555 | { ((MDL_lock*)(arg + LF_HASH_OVERHEAD))->~MDL_lock(); } |
556 | |
557 | static void lf_hash_initializer(LF_HASH *hash __attribute__((unused)), |
558 | MDL_lock *lock, MDL_key *key_arg) |
559 | { |
560 | DBUG_ASSERT(key_arg->mdl_namespace() != MDL_key::GLOBAL && |
561 | key_arg->mdl_namespace() != MDL_key::COMMIT); |
562 | new (&lock->key) MDL_key(key_arg); |
563 | if (key_arg->mdl_namespace() == MDL_key::SCHEMA) |
564 | lock->m_strategy= &m_scoped_lock_strategy; |
565 | else |
566 | lock->m_strategy= &m_object_lock_strategy; |
567 | } |
568 | |
569 | const MDL_lock_strategy *m_strategy; |
570 | private: |
571 | static const MDL_scoped_lock m_scoped_lock_strategy; |
572 | static const MDL_object_lock m_object_lock_strategy; |
573 | }; |
574 | |
575 | |
576 | const MDL_lock::MDL_scoped_lock MDL_lock::m_scoped_lock_strategy; |
577 | const MDL_lock::MDL_object_lock MDL_lock::m_object_lock_strategy; |
578 | |
579 | |
580 | static MDL_map mdl_locks; |
581 | |
582 | |
583 | extern "C" |
584 | { |
585 | static uchar * |
586 | mdl_locks_key(const uchar *record, size_t *length, |
587 | my_bool not_used __attribute__((unused))) |
588 | { |
589 | MDL_lock *lock=(MDL_lock*) record; |
590 | *length= lock->key.length(); |
591 | return (uchar*) lock->key.ptr(); |
592 | } |
593 | } /* extern "C" */ |
594 | |
595 | |
596 | /** |
597 | Initialize the metadata locking subsystem. |
598 | |
599 | This function is called at server startup. |
600 | |
601 | In particular, initializes the new global mutex and |
602 | the associated condition variable: LOCK_mdl and COND_mdl. |
603 | These locking primitives are implementation details of the MDL |
604 | subsystem and are private to it. |
605 | */ |
606 | |
607 | void mdl_init() |
608 | { |
609 | DBUG_ASSERT(! mdl_initialized); |
610 | mdl_initialized= TRUE; |
611 | |
612 | #ifdef HAVE_PSI_INTERFACE |
613 | init_mdl_psi_keys(); |
614 | #endif |
615 | |
616 | mdl_locks.init(); |
617 | } |
618 | |
619 | |
620 | /** |
621 | Release resources of metadata locking subsystem. |
622 | |
623 | Destroys the global mutex and the condition variable. |
624 | Called at server shutdown. |
625 | */ |
626 | |
627 | void mdl_destroy() |
628 | { |
629 | if (mdl_initialized) |
630 | { |
631 | mdl_initialized= FALSE; |
632 | mdl_locks.destroy(); |
633 | } |
634 | } |
635 | |
636 | |
637 | struct mdl_iterate_arg |
638 | { |
639 | int (*callback)(MDL_ticket *ticket, void *arg); |
640 | void *argument; |
641 | }; |
642 | |
643 | |
644 | static my_bool mdl_iterate_lock(MDL_lock *lock, mdl_iterate_arg *arg) |
645 | { |
646 | int res= FALSE; |
647 | /* |
648 | We can skip check for m_strategy here, becase m_granted |
649 | must be empty for such locks anyway. |
650 | */ |
651 | mysql_prlock_rdlock(&lock->m_rwlock); |
652 | MDL_lock::Ticket_iterator ticket_it(lock->m_granted); |
653 | MDL_ticket *ticket; |
654 | while ((ticket= ticket_it++) && !(res= arg->callback(ticket, arg->argument))) |
655 | /* no-op */; |
656 | mysql_prlock_unlock(&lock->m_rwlock); |
657 | return MY_TEST(res); |
658 | } |
659 | |
660 | |
661 | int mdl_iterate(int (*callback)(MDL_ticket *ticket, void *arg), void *arg) |
662 | { |
663 | DBUG_ENTER("mdl_iterate" ); |
664 | mdl_iterate_arg argument= { callback, arg }; |
665 | LF_PINS *pins= mdl_locks.get_pins(); |
666 | int res= 1; |
667 | |
668 | if (pins) |
669 | { |
670 | res= mdl_iterate_lock(mdl_locks.m_global_lock, &argument) || |
671 | mdl_iterate_lock(mdl_locks.m_commit_lock, &argument) || |
672 | lf_hash_iterate(&mdl_locks.m_locks, pins, |
673 | (my_hash_walk_action) mdl_iterate_lock, &argument); |
674 | lf_hash_put_pins(pins); |
675 | } |
676 | DBUG_RETURN(res); |
677 | } |
678 | |
679 | |
680 | my_hash_value_type mdl_hash_function(CHARSET_INFO *cs, |
681 | const uchar *key, size_t length) |
682 | { |
683 | MDL_key *mdl_key= (MDL_key*) (key - my_offsetof(MDL_key, m_ptr)); |
684 | return mdl_key->hash_value(); |
685 | } |
686 | |
687 | |
688 | /** Initialize the container for all MDL locks. */ |
689 | |
690 | void MDL_map::init() |
691 | { |
692 | MDL_key global_lock_key(MDL_key::GLOBAL, "" , "" ); |
693 | MDL_key commit_lock_key(MDL_key::COMMIT, "" , "" ); |
694 | |
695 | m_global_lock= new (std::nothrow) MDL_lock(&global_lock_key); |
696 | m_commit_lock= new (std::nothrow) MDL_lock(&commit_lock_key); |
697 | |
698 | lf_hash_init(&m_locks, sizeof(MDL_lock), LF_HASH_UNIQUE, 0, 0, |
699 | mdl_locks_key, &my_charset_bin); |
700 | m_locks.alloc.constructor= MDL_lock::lf_alloc_constructor; |
701 | m_locks.alloc.destructor= MDL_lock::lf_alloc_destructor; |
702 | m_locks.initializer= (lf_hash_initializer) MDL_lock::lf_hash_initializer; |
703 | m_locks.hash_function= mdl_hash_function; |
704 | } |
705 | |
706 | |
707 | /** |
708 | Destroy the container for all MDL locks. |
709 | @pre It must be empty. |
710 | */ |
711 | |
712 | void MDL_map::destroy() |
713 | { |
714 | delete m_global_lock; |
715 | delete m_commit_lock; |
716 | |
717 | DBUG_ASSERT(!my_atomic_load32(&m_locks.count)); |
718 | lf_hash_destroy(&m_locks); |
719 | } |
720 | |
721 | |
722 | /** |
723 | Find MDL_lock object corresponding to the key, create it |
724 | if it does not exist. |
725 | |
726 | @retval non-NULL - Success. MDL_lock instance for the key with |
727 | locked MDL_lock::m_rwlock. |
728 | @retval NULL - Failure (OOM). |
729 | */ |
730 | |
731 | MDL_lock* MDL_map::find_or_insert(LF_PINS *pins, const MDL_key *mdl_key) |
732 | { |
733 | MDL_lock *lock; |
734 | |
735 | if (mdl_key->mdl_namespace() == MDL_key::GLOBAL || |
736 | mdl_key->mdl_namespace() == MDL_key::COMMIT) |
737 | { |
738 | /* |
739 | Avoid locking any m_mutex when lock for GLOBAL or COMMIT namespace is |
740 | requested. Return pointer to pre-allocated MDL_lock instance instead. |
741 | Such an optimization allows to save one mutex lock/unlock for any |
742 | statement changing data. |
743 | |
744 | It works since these namespaces contain only one element so keys |
745 | for them look like '<namespace-id>\0\0'. |
746 | */ |
747 | DBUG_ASSERT(mdl_key->length() == 3); |
748 | |
749 | lock= (mdl_key->mdl_namespace() == MDL_key::GLOBAL) ? m_global_lock : |
750 | m_commit_lock; |
751 | |
752 | mysql_prlock_wrlock(&lock->m_rwlock); |
753 | |
754 | return lock; |
755 | } |
756 | |
757 | retry: |
758 | while (!(lock= (MDL_lock*) lf_hash_search(&m_locks, pins, mdl_key->ptr(), |
759 | mdl_key->length()))) |
760 | if (lf_hash_insert(&m_locks, pins, (uchar*) mdl_key) == -1) |
761 | return NULL; |
762 | |
763 | mysql_prlock_wrlock(&lock->m_rwlock); |
764 | if (unlikely(!lock->m_strategy)) |
765 | { |
766 | mysql_prlock_unlock(&lock->m_rwlock); |
767 | lf_hash_search_unpin(pins); |
768 | goto retry; |
769 | } |
770 | lf_hash_search_unpin(pins); |
771 | |
772 | return lock; |
773 | } |
774 | |
775 | |
776 | /** |
777 | * Return thread id of the owner of the lock, if it is owned. |
778 | */ |
779 | |
780 | unsigned long |
781 | MDL_map::get_lock_owner(LF_PINS *pins, const MDL_key *mdl_key) |
782 | { |
783 | MDL_lock *lock; |
784 | unsigned long res= 0; |
785 | |
786 | if (mdl_key->mdl_namespace() == MDL_key::GLOBAL || |
787 | mdl_key->mdl_namespace() == MDL_key::COMMIT) |
788 | { |
789 | lock= (mdl_key->mdl_namespace() == MDL_key::GLOBAL) ? m_global_lock : |
790 | m_commit_lock; |
791 | mysql_prlock_rdlock(&lock->m_rwlock); |
792 | res= lock->get_lock_owner(); |
793 | mysql_prlock_unlock(&lock->m_rwlock); |
794 | } |
795 | else |
796 | { |
797 | lock= (MDL_lock*) lf_hash_search(&m_locks, pins, mdl_key->ptr(), |
798 | mdl_key->length()); |
799 | if (lock) |
800 | { |
801 | /* |
802 | We can skip check for m_strategy here, becase m_granted |
803 | must be empty for such locks anyway. |
804 | */ |
805 | mysql_prlock_rdlock(&lock->m_rwlock); |
806 | res= lock->get_lock_owner(); |
807 | mysql_prlock_unlock(&lock->m_rwlock); |
808 | lf_hash_search_unpin(pins); |
809 | } |
810 | } |
811 | return res; |
812 | } |
813 | |
814 | |
815 | /** |
816 | Destroy MDL_lock object or delegate this responsibility to |
817 | whatever thread that holds the last outstanding reference to |
818 | it. |
819 | */ |
820 | |
821 | void MDL_map::remove(LF_PINS *pins, MDL_lock *lock) |
822 | { |
823 | if (lock->key.mdl_namespace() == MDL_key::GLOBAL || |
824 | lock->key.mdl_namespace() == MDL_key::COMMIT) |
825 | { |
826 | /* |
827 | Never destroy pre-allocated MDL_lock objects for GLOBAL and |
828 | COMMIT namespaces. |
829 | */ |
830 | mysql_prlock_unlock(&lock->m_rwlock); |
831 | return; |
832 | } |
833 | |
834 | lock->m_strategy= 0; |
835 | mysql_prlock_unlock(&lock->m_rwlock); |
836 | lf_hash_delete(&m_locks, pins, lock->key.ptr(), lock->key.length()); |
837 | } |
838 | |
839 | |
840 | /** |
841 | Initialize a metadata locking context. |
842 | |
843 | This is to be called when a new server connection is created. |
844 | */ |
845 | |
846 | MDL_context::MDL_context() |
847 | : |
848 | m_owner(NULL), |
849 | m_needs_thr_lock_abort(FALSE), |
850 | m_waiting_for(NULL), |
851 | m_pins(NULL) |
852 | { |
853 | mysql_prlock_init(key_MDL_context_LOCK_waiting_for, &m_LOCK_waiting_for); |
854 | } |
855 | |
856 | |
857 | /** |
858 | Destroy metadata locking context. |
859 | |
860 | Assumes and asserts that there are no active or pending locks |
861 | associated with this context at the time of the destruction. |
862 | |
863 | Currently does nothing. Asserts that there are no pending |
864 | or satisfied lock requests. The pending locks must be released |
865 | prior to destruction. This is a new way to express the assertion |
866 | that all tables are closed before a connection is destroyed. |
867 | */ |
868 | |
869 | void MDL_context::destroy() |
870 | { |
871 | DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty()); |
872 | DBUG_ASSERT(m_tickets[MDL_TRANSACTION].is_empty()); |
873 | DBUG_ASSERT(m_tickets[MDL_EXPLICIT].is_empty()); |
874 | |
875 | mysql_prlock_destroy(&m_LOCK_waiting_for); |
876 | if (m_pins) |
877 | lf_hash_put_pins(m_pins); |
878 | } |
879 | |
880 | |
881 | bool MDL_context::fix_pins() |
882 | { |
883 | return m_pins ? false : (m_pins= mdl_locks.get_pins()) == 0; |
884 | } |
885 | |
886 | |
887 | /** |
888 | Initialize a lock request. |
889 | |
890 | This is to be used for every lock request. |
891 | |
892 | Note that initialization and allocation are split into two |
893 | calls. This is to allow flexible memory management of lock |
894 | requests. Normally a lock request is stored in statement memory |
895 | (e.g. is a member of struct TABLE_LIST), but we would also like |
896 | to allow allocation of lock requests in other memory roots, |
897 | for example in the grant subsystem, to lock privilege tables. |
898 | |
899 | The MDL subsystem does not own or manage memory of lock requests. |
900 | |
901 | @param mdl_namespace Id of namespace of object to be locked |
902 | @param db Name of database to which the object belongs |
903 | @param name Name of of the object |
904 | @param mdl_type The MDL lock type for the request. |
905 | */ |
906 | |
907 | void MDL_request::init(MDL_key::enum_mdl_namespace mdl_namespace, |
908 | const char *db_arg, |
909 | const char *name_arg, |
910 | enum_mdl_type mdl_type_arg, |
911 | enum_mdl_duration mdl_duration_arg) |
912 | { |
913 | key.mdl_key_init(mdl_namespace, db_arg, name_arg); |
914 | type= mdl_type_arg; |
915 | duration= mdl_duration_arg; |
916 | ticket= NULL; |
917 | } |
918 | |
919 | |
920 | /** |
921 | Initialize a lock request using pre-built MDL_key. |
922 | |
923 | @sa MDL_request::init(namespace, db, name, type). |
924 | |
925 | @param key_arg The pre-built MDL key for the request. |
926 | @param mdl_type_arg The MDL lock type for the request. |
927 | */ |
928 | |
929 | void MDL_request::init(const MDL_key *key_arg, |
930 | enum_mdl_type mdl_type_arg, |
931 | enum_mdl_duration mdl_duration_arg) |
932 | { |
933 | key.mdl_key_init(key_arg); |
934 | type= mdl_type_arg; |
935 | duration= mdl_duration_arg; |
936 | ticket= NULL; |
937 | } |
938 | |
939 | |
940 | /** |
941 | Auxiliary functions needed for creation/destruction of MDL_ticket |
942 | objects. |
943 | |
944 | @todo This naive implementation should be replaced with one that saves |
945 | on memory allocation by reusing released objects. |
946 | */ |
947 | |
948 | MDL_ticket *MDL_ticket::create(MDL_context *ctx_arg, enum_mdl_type type_arg |
949 | #ifndef DBUG_OFF |
950 | , enum_mdl_duration duration_arg |
951 | #endif |
952 | ) |
953 | { |
954 | return new (std::nothrow) |
955 | MDL_ticket(ctx_arg, type_arg |
956 | #ifndef DBUG_OFF |
957 | , duration_arg |
958 | #endif |
959 | ); |
960 | } |
961 | |
962 | |
963 | void MDL_ticket::destroy(MDL_ticket *ticket) |
964 | { |
965 | delete ticket; |
966 | } |
967 | |
968 | |
969 | /** |
970 | Return the 'weight' of this ticket for the |
971 | victim selection algorithm. Requests with |
972 | lower weight are preferred to requests |
973 | with higher weight when choosing a victim. |
974 | */ |
975 | |
976 | uint MDL_ticket::get_deadlock_weight() const |
977 | { |
978 | return (m_lock->key.mdl_namespace() == MDL_key::GLOBAL || |
979 | m_type >= MDL_SHARED_UPGRADABLE ? |
980 | DEADLOCK_WEIGHT_DDL : DEADLOCK_WEIGHT_DML); |
981 | } |
982 | |
983 | |
984 | /** Construct an empty wait slot. */ |
985 | |
986 | MDL_wait::MDL_wait() |
987 | :m_wait_status(EMPTY) |
988 | { |
989 | mysql_mutex_init(key_MDL_wait_LOCK_wait_status, &m_LOCK_wait_status, NULL); |
990 | mysql_cond_init(key_MDL_wait_COND_wait_status, &m_COND_wait_status, NULL); |
991 | } |
992 | |
993 | |
994 | /** Destroy system resources. */ |
995 | |
996 | MDL_wait::~MDL_wait() |
997 | { |
998 | mysql_mutex_destroy(&m_LOCK_wait_status); |
999 | mysql_cond_destroy(&m_COND_wait_status); |
1000 | } |
1001 | |
1002 | |
1003 | /** |
1004 | Set the status unless it's already set. Return FALSE if set, |
1005 | TRUE otherwise. |
1006 | */ |
1007 | |
1008 | bool MDL_wait::set_status(enum_wait_status status_arg) |
1009 | { |
1010 | bool was_occupied= TRUE; |
1011 | mysql_mutex_lock(&m_LOCK_wait_status); |
1012 | if (m_wait_status == EMPTY) |
1013 | { |
1014 | was_occupied= FALSE; |
1015 | m_wait_status= status_arg; |
1016 | mysql_cond_signal(&m_COND_wait_status); |
1017 | } |
1018 | mysql_mutex_unlock(&m_LOCK_wait_status); |
1019 | return was_occupied; |
1020 | } |
1021 | |
1022 | |
1023 | /** Query the current value of the wait slot. */ |
1024 | |
1025 | MDL_wait::enum_wait_status MDL_wait::get_status() |
1026 | { |
1027 | enum_wait_status result; |
1028 | mysql_mutex_lock(&m_LOCK_wait_status); |
1029 | result= m_wait_status; |
1030 | mysql_mutex_unlock(&m_LOCK_wait_status); |
1031 | return result; |
1032 | } |
1033 | |
1034 | |
1035 | /** Clear the current value of the wait slot. */ |
1036 | |
1037 | void MDL_wait::reset_status() |
1038 | { |
1039 | mysql_mutex_lock(&m_LOCK_wait_status); |
1040 | m_wait_status= EMPTY; |
1041 | mysql_mutex_unlock(&m_LOCK_wait_status); |
1042 | } |
1043 | |
1044 | |
1045 | /** |
1046 | Wait for the status to be assigned to this wait slot. |
1047 | |
1048 | @param owner MDL context owner. |
1049 | @param abs_timeout Absolute time after which waiting should stop. |
1050 | @param set_status_on_timeout TRUE - If in case of timeout waiting |
1051 | context should close the wait slot by |
1052 | sending TIMEOUT to itself. |
1053 | FALSE - Otherwise. |
1054 | @param wait_state_name Thread state name to be set for duration of wait. |
1055 | |
1056 | @returns Signal posted. |
1057 | */ |
1058 | |
1059 | MDL_wait::enum_wait_status |
1060 | MDL_wait::timed_wait(MDL_context_owner *owner, struct timespec *abs_timeout, |
1061 | bool set_status_on_timeout, |
1062 | const PSI_stage_info *wait_state_name) |
1063 | { |
1064 | PSI_stage_info old_stage; |
1065 | enum_wait_status result; |
1066 | int wait_result= 0; |
1067 | DBUG_ENTER("MDL_wait::timed_wait" ); |
1068 | |
1069 | mysql_mutex_lock(&m_LOCK_wait_status); |
1070 | |
1071 | owner->ENTER_COND(&m_COND_wait_status, &m_LOCK_wait_status, |
1072 | wait_state_name, & old_stage); |
1073 | thd_wait_begin(NULL, THD_WAIT_META_DATA_LOCK); |
1074 | while (!m_wait_status && !owner->is_killed() && |
1075 | wait_result != ETIMEDOUT && wait_result != ETIME) |
1076 | { |
1077 | #ifdef WITH_WSREP |
1078 | // Allow tests to block the applier thread using the DBUG facilities |
1079 | DBUG_EXECUTE_IF("sync.wsrep_before_mdl_wait" , |
1080 | { |
1081 | const char act[]= |
1082 | "now " |
1083 | "wait_for signal.wsrep_before_mdl_wait" ; |
1084 | DBUG_ASSERT(!debug_sync_set_action((owner->get_thd()), |
1085 | STRING_WITH_LEN(act))); |
1086 | };); |
1087 | if (wsrep_thd_is_BF(owner->get_thd(), false)) |
1088 | { |
1089 | wait_result= mysql_cond_wait(&m_COND_wait_status, &m_LOCK_wait_status); |
1090 | } |
1091 | else |
1092 | #endif /* WITH_WSREP */ |
1093 | wait_result= mysql_cond_timedwait(&m_COND_wait_status, &m_LOCK_wait_status, |
1094 | abs_timeout); |
1095 | } |
1096 | thd_wait_end(NULL); |
1097 | |
1098 | if (m_wait_status == EMPTY) |
1099 | { |
1100 | /* |
1101 | Wait has ended not due to a status being set from another |
1102 | thread but due to this connection/statement being killed or a |
1103 | time out. |
1104 | To avoid races, which may occur if another thread sets |
1105 | GRANTED status before the code which calls this method |
1106 | processes the abort/timeout, we assign the status under |
1107 | protection of the m_LOCK_wait_status, within the critical |
1108 | section. An exception is when set_status_on_timeout is |
1109 | false, which means that the caller intends to restart the |
1110 | wait. |
1111 | */ |
1112 | if (owner->is_killed()) |
1113 | m_wait_status= KILLED; |
1114 | else if (set_status_on_timeout) |
1115 | m_wait_status= TIMEOUT; |
1116 | } |
1117 | result= m_wait_status; |
1118 | |
1119 | owner->EXIT_COND(& old_stage); |
1120 | |
1121 | DBUG_RETURN(result); |
1122 | } |
1123 | |
1124 | |
1125 | /** |
1126 | Clear bit corresponding to the type of metadata lock in bitmap representing |
1127 | set of such types if list of tickets does not contain ticket with such type. |
1128 | |
1129 | @param[in,out] bitmap Bitmap representing set of types of locks. |
1130 | @param[in] list List to inspect. |
1131 | @param[in] type Type of metadata lock to look up in the list. |
1132 | */ |
1133 | |
1134 | void MDL_lock::Ticket_list::clear_bit_if_not_in_list(enum_mdl_type type) |
1135 | { |
1136 | MDL_lock::Ticket_iterator it(m_list); |
1137 | const MDL_ticket *ticket; |
1138 | |
1139 | while ((ticket= it++)) |
1140 | if (ticket->get_type() == type) |
1141 | return; |
1142 | m_bitmap&= ~ MDL_BIT(type); |
1143 | } |
1144 | |
1145 | |
1146 | /** |
1147 | Add ticket to MDL_lock's list of waiting requests and |
1148 | update corresponding bitmap of lock types. |
1149 | */ |
1150 | |
1151 | void MDL_lock::Ticket_list::add_ticket(MDL_ticket *ticket) |
1152 | { |
1153 | /* |
1154 | Ticket being added to the list must have MDL_ticket::m_lock set, |
1155 | since for such tickets methods accessing this member might be |
1156 | called by other threads. |
1157 | */ |
1158 | DBUG_ASSERT(ticket->get_lock()); |
1159 | #ifdef WITH_WSREP |
1160 | if ((this == &(ticket->get_lock()->m_waiting)) && |
1161 | wsrep_thd_is_BF(ticket->get_ctx()->get_thd(), false)) |
1162 | { |
1163 | Ticket_iterator itw(ticket->get_lock()->m_waiting); |
1164 | Ticket_iterator itg(ticket->get_lock()->m_granted); |
1165 | |
1166 | DBUG_ASSERT(WSREP_ON); |
1167 | MDL_ticket *waiting, *granted; |
1168 | MDL_ticket *prev=NULL; |
1169 | bool added= false; |
1170 | |
1171 | while ((waiting= itw++) && !added) |
1172 | { |
1173 | if (!wsrep_thd_is_BF(waiting->get_ctx()->get_thd(), true)) |
1174 | { |
1175 | WSREP_DEBUG("MDL add_ticket inserted before: %lu %s" , |
1176 | thd_get_thread_id(waiting->get_ctx()->get_thd()), |
1177 | wsrep_thd_query(waiting->get_ctx()->get_thd())); |
1178 | /* Insert the ticket before the first non-BF waiting thd. */ |
1179 | m_list.insert_after(prev, ticket); |
1180 | added= true; |
1181 | } |
1182 | prev= waiting; |
1183 | } |
1184 | |
1185 | /* Otherwise, insert the ticket at the back of the waiting list. */ |
1186 | if (!added) m_list.push_back(ticket); |
1187 | |
1188 | while ((granted= itg++)) |
1189 | { |
1190 | if (granted->get_ctx() != ticket->get_ctx() && |
1191 | granted->is_incompatible_when_granted(ticket->get_type())) |
1192 | { |
1193 | if (!wsrep_grant_mdl_exception(ticket->get_ctx(), granted, |
1194 | &ticket->get_lock()->key)) |
1195 | { |
1196 | WSREP_DEBUG("MDL victim killed at add_ticket" ); |
1197 | } |
1198 | } |
1199 | } |
1200 | } |
1201 | else |
1202 | #endif /* WITH_WSREP */ |
1203 | { |
1204 | /* |
1205 | Add ticket to the *back* of the queue to ensure fairness |
1206 | among requests with the same priority. |
1207 | */ |
1208 | m_list.push_back(ticket); |
1209 | } |
1210 | m_bitmap|= MDL_BIT(ticket->get_type()); |
1211 | } |
1212 | |
1213 | |
1214 | /** |
1215 | Remove ticket from MDL_lock's list of requests and |
1216 | update corresponding bitmap of lock types. |
1217 | */ |
1218 | |
1219 | void MDL_lock::Ticket_list::remove_ticket(MDL_ticket *ticket) |
1220 | { |
1221 | m_list.remove(ticket); |
1222 | /* |
1223 | Check if waiting queue has another ticket with the same type as |
1224 | one which was removed. If there is no such ticket, i.e. we have |
1225 | removed last ticket of particular type, then we need to update |
1226 | bitmap of waiting ticket's types. |
1227 | Note that in most common case, i.e. when shared lock is removed |
1228 | from waiting queue, we are likely to find ticket of the same |
1229 | type early without performing full iteration through the list. |
1230 | So this method should not be too expensive. |
1231 | */ |
1232 | clear_bit_if_not_in_list(ticket->get_type()); |
1233 | } |
1234 | |
1235 | |
1236 | /** |
1237 | Determine waiting contexts which requests for the lock can be |
1238 | satisfied, grant lock to them and wake them up. |
1239 | |
1240 | @note Together with MDL_lock::add_ticket() this method implements |
1241 | fair scheduling among requests with the same priority. |
1242 | It tries to grant lock from the head of waiters list, while |
1243 | add_ticket() adds new requests to the back of this list. |
1244 | |
1245 | */ |
1246 | |
1247 | void MDL_lock::reschedule_waiters() |
1248 | { |
1249 | MDL_lock::Ticket_iterator it(m_waiting); |
1250 | MDL_ticket *ticket; |
1251 | bool skip_high_priority= false; |
1252 | bitmap_t hog_lock_types= hog_lock_types_bitmap(); |
1253 | |
1254 | if (m_hog_lock_count >= max_write_lock_count) |
1255 | { |
1256 | /* |
1257 | If number of successively granted high-prio, strong locks has exceeded |
1258 | max_write_lock_count give a way to low-prio, weak locks to avoid their |
1259 | starvation. |
1260 | */ |
1261 | |
1262 | if ((m_waiting.bitmap() & ~hog_lock_types) != 0) |
1263 | { |
1264 | /* |
1265 | Even though normally when m_hog_lock_count is non-0 there is |
1266 | some pending low-prio lock, we still can encounter situation |
1267 | when m_hog_lock_count is non-0 and there are no pending low-prio |
1268 | locks. This, for example, can happen when a ticket for pending |
1269 | low-prio lock was removed from waiters list due to timeout, |
1270 | and reschedule_waiters() is called after that to update the |
1271 | waiters queue. m_hog_lock_count will be reset to 0 at the |
1272 | end of this call in such case. |
1273 | |
1274 | Note that it is not an issue if we fail to wake up any pending |
1275 | waiters for weak locks in the loop below. This would mean that |
1276 | all of them are either killed, timed out or chosen as a victim |
1277 | by deadlock resolver, but have not managed to remove ticket |
1278 | from the waiters list yet. After tickets will be removed from |
1279 | the waiters queue there will be another call to |
1280 | reschedule_waiters() with pending bitmap updated to reflect new |
1281 | state of waiters queue. |
1282 | */ |
1283 | skip_high_priority= true; |
1284 | } |
1285 | } |
1286 | |
1287 | /* |
1288 | Find the first (and hence the oldest) waiting request which |
1289 | can be satisfied (taking into account priority). Grant lock to it. |
1290 | Repeat the process for the remainder of waiters. |
1291 | Note we don't need to re-start iteration from the head of the |
1292 | list after satisfying the first suitable request as in our case |
1293 | all compatible types of requests have the same priority. |
1294 | |
1295 | TODO/FIXME: We should: |
1296 | - Either switch to scheduling without priorities |
1297 | which will allow to stop iteration through the |
1298 | list of waiters once we found the first ticket |
1299 | which can't be satisfied |
1300 | - Or implement some check using bitmaps which will |
1301 | allow to stop iteration in cases when, e.g., we |
1302 | grant SNRW lock and there are no pending S or |
1303 | SH locks. |
1304 | */ |
1305 | while ((ticket= it++)) |
1306 | { |
1307 | /* |
1308 | Skip high-prio, strong locks if earlier we have decided to give way to |
1309 | low-prio, weaker locks. |
1310 | */ |
1311 | if (skip_high_priority && |
1312 | ((MDL_BIT(ticket->get_type()) & hog_lock_types) != 0)) |
1313 | continue; |
1314 | |
1315 | if (can_grant_lock(ticket->get_type(), ticket->get_ctx(), |
1316 | skip_high_priority)) |
1317 | { |
1318 | if (! ticket->get_ctx()->m_wait.set_status(MDL_wait::GRANTED)) |
1319 | { |
1320 | /* |
1321 | Satisfy the found request by updating lock structures. |
1322 | It is OK to do so even after waking up the waiter since any |
1323 | session which tries to get any information about the state of |
1324 | this lock has to acquire MDL_lock::m_rwlock first and thus, |
1325 | when manages to do so, already sees an updated state of the |
1326 | MDL_lock object. |
1327 | */ |
1328 | m_waiting.remove_ticket(ticket); |
1329 | m_granted.add_ticket(ticket); |
1330 | |
1331 | /* |
1332 | Increase counter of successively granted high-priority strong locks, |
1333 | if we have granted one. |
1334 | */ |
1335 | if ((MDL_BIT(ticket->get_type()) & hog_lock_types) != 0) |
1336 | m_hog_lock_count++; |
1337 | } |
1338 | /* |
1339 | If we could not update the wait slot of the waiter, |
1340 | it can be due to fact that its connection/statement was |
1341 | killed or it has timed out (i.e. the slot is not empty). |
1342 | Since in all such cases the waiter assumes that the lock was |
1343 | not been granted, we should keep the request in the waiting |
1344 | queue and look for another request to reschedule. |
1345 | */ |
1346 | } |
1347 | } |
1348 | |
1349 | if ((m_waiting.bitmap() & ~hog_lock_types) == 0) |
1350 | { |
1351 | /* |
1352 | Reset number of successively granted high-prio, strong locks |
1353 | if there are no pending low-prio, weak locks. |
1354 | This ensures: |
1355 | - That m_hog_lock_count is correctly reset after strong lock |
1356 | is released and weak locks are granted (or there are no |
1357 | other lock requests). |
1358 | - That situation when SNW lock is granted along with some SR |
1359 | locks, but SW locks are still blocked are handled correctly. |
1360 | - That m_hog_lock_count is zero in most cases when there are no pending |
1361 | weak locks (see comment at the start of this method for example of |
1362 | exception). This allows to save on checks at the start of this method. |
1363 | */ |
1364 | m_hog_lock_count= 0; |
1365 | } |
1366 | } |
1367 | |
1368 | |
1369 | /** |
1370 | Compatibility (or rather "incompatibility") matrices for scoped metadata |
1371 | lock. Arrays of bitmaps which elements specify which granted/waiting locks |
1372 | are incompatible with type of lock being requested. |
1373 | |
1374 | The first array specifies if particular type of request can be satisfied |
1375 | if there is granted scoped lock of certain type. |
1376 | |
1377 | | Type of active | |
1378 | Request | scoped lock | |
1379 | type | IS(*) IX S X | |
1380 | ---------+------------------+ |
1381 | IS | + + + + | |
1382 | IX | + + - - | |
1383 | S | + - + - | |
1384 | X | + - - - | |
1385 | |
1386 | The second array specifies if particular type of request can be satisfied |
1387 | if there is already waiting request for the scoped lock of certain type. |
1388 | I.e. it specifies what is the priority of different lock types. |
1389 | |
1390 | | Pending | |
1391 | Request | scoped lock | |
1392 | type | IS(*) IX S X | |
1393 | ---------+-----------------+ |
1394 | IS | + + + + | |
1395 | IX | + + - - | |
1396 | S | + + + - | |
1397 | X | + + + + | |
1398 | |
1399 | Here: "+" -- means that request can be satisfied |
1400 | "-" -- means that request can't be satisfied and should wait |
1401 | |
1402 | (*) Since intention shared scoped locks are compatible with all other |
1403 | type of locks we don't even have any accounting for them. |
1404 | |
1405 | Note that relation between scoped locks and objects locks requested |
1406 | by statement is not straightforward and is therefore fully defined |
1407 | by SQL-layer. |
1408 | For example, in order to support global read lock implementation |
1409 | SQL-layer acquires IX lock in GLOBAL namespace for each statement |
1410 | that can modify metadata or data (i.e. for each statement that |
1411 | needs SW, SU, SNW, SNRW or X object locks). OTOH, to ensure that |
1412 | DROP DATABASE works correctly with concurrent DDL, IX metadata locks |
1413 | in SCHEMA namespace are acquired for DDL statements which can update |
1414 | metadata in the schema (i.e. which acquire SU, SNW, SNRW and X locks |
1415 | on schema objects) and aren't acquired for DML. |
1416 | */ |
1417 | |
1418 | const MDL_lock::bitmap_t |
1419 | MDL_lock::MDL_scoped_lock::m_granted_incompatible[MDL_TYPE_END]= |
1420 | { |
1421 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED), |
1422 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_INTENTION_EXCLUSIVE), |
1423 | 0, 0, 0, 0, 0, 0, 0, |
1424 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED) | MDL_BIT(MDL_INTENTION_EXCLUSIVE) |
1425 | }; |
1426 | |
1427 | const MDL_lock::bitmap_t |
1428 | MDL_lock::MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END]= |
1429 | { |
1430 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED), |
1431 | MDL_BIT(MDL_EXCLUSIVE), 0, 0, 0, 0, 0, 0, 0, 0 |
1432 | }; |
1433 | |
1434 | |
1435 | /** |
1436 | Compatibility (or rather "incompatibility") matrices for per-object |
1437 | metadata lock. Arrays of bitmaps which elements specify which granted/ |
1438 | waiting locks are incompatible with type of lock being requested. |
1439 | |
1440 | The first array specifies if particular type of request can be satisfied |
1441 | if there is granted lock of certain type. |
1442 | |
1443 | Request | Granted requests for lock | |
1444 | type | S SH SR SW SU SRO SNW SNRW X | |
1445 | ----------+---------------------------------------+ |
1446 | S | + + + + + + + + - | |
1447 | SH | + + + + + + + + - | |
1448 | SR | + + + + + + + - - | |
1449 | SW | + + + + + - - - - | |
1450 | SU | + + + + - + - - - | |
1451 | SRO | + + + - + + + - - | |
1452 | SNW | + + + - - + - - - | |
1453 | SNRW | + + - - - - - - - | |
1454 | X | - - - - - - - - - | |
1455 | SU -> X | - - - - 0 - 0 0 0 | |
1456 | SNW -> X | - - - 0 0 - 0 0 0 | |
1457 | SNRW -> X | - - 0 0 0 0 0 0 0 | |
1458 | |
1459 | The second array specifies if particular type of request can be satisfied |
1460 | if there is waiting request for the same lock of certain type. In other |
1461 | words it specifies what is the priority of different lock types. |
1462 | |
1463 | Request | Pending requests for lock | |
1464 | type | S SH SR SW SU SRO SNW SNRW X | |
1465 | ----------+--------------------------------------+ |
1466 | S | + + + + + + + + - | |
1467 | SH | + + + + + + + + + | |
1468 | SR | + + + + + + + - - | |
1469 | SW | + + + + + + - - - | |
1470 | SU | + + + + + + + + - | |
1471 | SRO | + + + - + + + - - | |
1472 | SNW | + + + + + + + + - | |
1473 | SNRW | + + + + + + + + - | |
1474 | X | + + + + + + + + + | |
1475 | SU -> X | + + + + + + + + + | |
1476 | SNW -> X | + + + + + + + + + | |
1477 | SNRW -> X | + + + + + + + + + | |
1478 | |
1479 | Here: "+" -- means that request can be satisfied |
1480 | "-" -- means that request can't be satisfied and should wait |
1481 | "0" -- means impossible situation which will trigger assert |
1482 | |
1483 | @note In cases then current context already has "stronger" type |
1484 | of lock on the object it will be automatically granted |
1485 | thanks to usage of the MDL_context::find_ticket() method. |
1486 | |
1487 | @note IX locks are excluded since they are not used for per-object |
1488 | metadata locks. |
1489 | */ |
1490 | |
1491 | const MDL_lock::bitmap_t |
1492 | MDL_lock::MDL_object_lock::m_granted_incompatible[MDL_TYPE_END]= |
1493 | { |
1494 | 0, |
1495 | MDL_BIT(MDL_EXCLUSIVE), |
1496 | MDL_BIT(MDL_EXCLUSIVE), |
1497 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE), |
1498 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | |
1499 | MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_READ_ONLY), |
1500 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | |
1501 | MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE), |
1502 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | |
1503 | MDL_BIT(MDL_SHARED_WRITE), |
1504 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | |
1505 | MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) | |
1506 | MDL_BIT(MDL_SHARED_WRITE), |
1507 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | |
1508 | MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_READ_ONLY) | |
1509 | MDL_BIT(MDL_SHARED_UPGRADABLE) | MDL_BIT(MDL_SHARED_WRITE) | |
1510 | MDL_BIT(MDL_SHARED_READ), |
1511 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | |
1512 | MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_READ_ONLY) | |
1513 | MDL_BIT(MDL_SHARED_UPGRADABLE) | MDL_BIT(MDL_SHARED_WRITE) | |
1514 | MDL_BIT(MDL_SHARED_READ) | MDL_BIT(MDL_SHARED_HIGH_PRIO) | |
1515 | MDL_BIT(MDL_SHARED) |
1516 | }; |
1517 | |
1518 | |
1519 | const MDL_lock::bitmap_t |
1520 | MDL_lock::MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END]= |
1521 | { |
1522 | 0, |
1523 | MDL_BIT(MDL_EXCLUSIVE), |
1524 | 0, |
1525 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE), |
1526 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | |
1527 | MDL_BIT(MDL_SHARED_NO_WRITE), |
1528 | MDL_BIT(MDL_EXCLUSIVE), |
1529 | MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) | |
1530 | MDL_BIT(MDL_SHARED_WRITE), |
1531 | MDL_BIT(MDL_EXCLUSIVE), |
1532 | MDL_BIT(MDL_EXCLUSIVE), |
1533 | 0 |
1534 | }; |
1535 | |
1536 | |
1537 | /** |
1538 | Check if request for the metadata lock can be satisfied given its |
1539 | current state. |
1540 | |
1541 | @param type_arg The requested lock type. |
1542 | @param requestor_ctx The MDL context of the requestor. |
1543 | @param ignore_lock_priority Ignore lock priority. |
1544 | |
1545 | @retval TRUE Lock request can be satisfied |
1546 | @retval FALSE There is some conflicting lock. |
1547 | |
1548 | @note In cases then current context already has "stronger" type |
1549 | of lock on the object it will be automatically granted |
1550 | thanks to usage of the MDL_context::find_ticket() method. |
1551 | */ |
1552 | |
1553 | bool |
1554 | MDL_lock::can_grant_lock(enum_mdl_type type_arg, |
1555 | MDL_context *requestor_ctx, |
1556 | bool ignore_lock_priority) const |
1557 | { |
1558 | bool can_grant= FALSE; |
1559 | bitmap_t waiting_incompat_map= incompatible_waiting_types_bitmap()[type_arg]; |
1560 | bitmap_t granted_incompat_map= incompatible_granted_types_bitmap()[type_arg]; |
1561 | bool wsrep_can_grant= TRUE; |
1562 | |
1563 | /* |
1564 | New lock request can be satisfied iff: |
1565 | - There are no incompatible types of satisfied requests |
1566 | in other contexts |
1567 | - There are no waiting requests which have higher priority |
1568 | than this request when priority was not ignored. |
1569 | */ |
1570 | if (ignore_lock_priority || !(m_waiting.bitmap() & waiting_incompat_map)) |
1571 | { |
1572 | if (! (m_granted.bitmap() & granted_incompat_map)) |
1573 | can_grant= TRUE; |
1574 | else |
1575 | { |
1576 | Ticket_iterator it(m_granted); |
1577 | MDL_ticket *ticket; |
1578 | |
1579 | /* Check that the incompatible lock belongs to some other context. */ |
1580 | while ((ticket= it++)) |
1581 | { |
1582 | if (ticket->get_ctx() != requestor_ctx && |
1583 | ticket->is_incompatible_when_granted(type_arg)) |
1584 | { |
1585 | #ifdef WITH_WSREP |
1586 | if (wsrep_thd_is_BF(requestor_ctx->get_thd(),false) && |
1587 | key.mdl_namespace() == MDL_key::GLOBAL) |
1588 | { |
1589 | WSREP_DEBUG("global lock granted for BF: %lu %s" , |
1590 | thd_get_thread_id(requestor_ctx->get_thd()), |
1591 | wsrep_thd_query(requestor_ctx->get_thd())); |
1592 | can_grant = true; |
1593 | } |
1594 | else if (!wsrep_grant_mdl_exception(requestor_ctx, ticket, &key)) |
1595 | { |
1596 | wsrep_can_grant= FALSE; |
1597 | if (wsrep_log_conflicts) |
1598 | { |
1599 | MDL_lock * lock = ticket->get_lock(); |
1600 | WSREP_INFO( |
1601 | "MDL conflict db=%s table=%s ticket=%d solved by %s" , |
1602 | lock->key.db_name(), lock->key.name(), ticket->get_type(), |
1603 | "abort" ); |
1604 | } |
1605 | } |
1606 | else |
1607 | can_grant= TRUE; |
1608 | /* Continue loop */ |
1609 | #else |
1610 | break; |
1611 | #endif /* WITH_WSREP */ |
1612 | } |
1613 | } |
1614 | if ((ticket == NULL) && wsrep_can_grant) |
1615 | can_grant= TRUE; /* Incompatible locks are our own. */ |
1616 | } |
1617 | } |
1618 | else |
1619 | { |
1620 | if (wsrep_thd_is_BF(requestor_ctx->get_thd(), false) && |
1621 | key.mdl_namespace() == MDL_key::GLOBAL) |
1622 | { |
1623 | WSREP_DEBUG("global lock granted for BF (waiting queue): %lu %s" , |
1624 | thd_get_thread_id(requestor_ctx->get_thd()), |
1625 | wsrep_thd_query(requestor_ctx->get_thd())); |
1626 | can_grant = true; |
1627 | } |
1628 | } |
1629 | return can_grant; |
1630 | } |
1631 | |
1632 | |
1633 | /** |
1634 | Return thread id of the thread to which the first ticket was |
1635 | granted. |
1636 | */ |
1637 | |
1638 | inline unsigned long |
1639 | MDL_lock::get_lock_owner() const |
1640 | { |
1641 | Ticket_iterator it(m_granted); |
1642 | MDL_ticket *ticket; |
1643 | |
1644 | if ((ticket= it++)) |
1645 | return ticket->get_ctx()->get_thread_id(); |
1646 | return 0; |
1647 | } |
1648 | |
1649 | |
1650 | /** Remove a ticket from waiting or pending queue and wakeup up waiters. */ |
1651 | |
1652 | void MDL_lock::remove_ticket(LF_PINS *pins, Ticket_list MDL_lock::*list, |
1653 | MDL_ticket *ticket) |
1654 | { |
1655 | mysql_prlock_wrlock(&m_rwlock); |
1656 | (this->*list).remove_ticket(ticket); |
1657 | if (is_empty()) |
1658 | mdl_locks.remove(pins, this); |
1659 | else |
1660 | { |
1661 | /* |
1662 | There can be some contexts waiting to acquire a lock |
1663 | which now might be able to do it. Grant the lock to |
1664 | them and wake them up! |
1665 | |
1666 | We always try to reschedule locks, since there is no easy way |
1667 | (i.e. by looking at the bitmaps) to find out whether it is |
1668 | required or not. |
1669 | In a general case, even when the queue's bitmap is not changed |
1670 | after removal of the ticket, there is a chance that some request |
1671 | can be satisfied (due to the fact that a granted request |
1672 | reflected in the bitmap might belong to the same context as a |
1673 | pending request). |
1674 | */ |
1675 | reschedule_waiters(); |
1676 | mysql_prlock_unlock(&m_rwlock); |
1677 | } |
1678 | } |
1679 | |
1680 | |
1681 | /** |
1682 | Check if we have any pending locks which conflict with existing |
1683 | shared lock. |
1684 | |
1685 | @pre The ticket must match an acquired lock. |
1686 | |
1687 | @return TRUE if there is a conflicting lock request, FALSE otherwise. |
1688 | */ |
1689 | |
1690 | bool MDL_lock::has_pending_conflicting_lock(enum_mdl_type type) |
1691 | { |
1692 | bool result; |
1693 | |
1694 | mysql_prlock_rdlock(&m_rwlock); |
1695 | result= (m_waiting.bitmap() & incompatible_granted_types_bitmap()[type]); |
1696 | mysql_prlock_unlock(&m_rwlock); |
1697 | return result; |
1698 | } |
1699 | |
1700 | |
1701 | MDL_wait_for_graph_visitor::~MDL_wait_for_graph_visitor() |
1702 | { |
1703 | } |
1704 | |
1705 | |
1706 | MDL_wait_for_subgraph::~MDL_wait_for_subgraph() |
1707 | { |
1708 | } |
1709 | |
1710 | /** |
1711 | Check if ticket represents metadata lock of "stronger" or equal type |
1712 | than specified one. I.e. if metadata lock represented by ticket won't |
1713 | allow any of locks which are not allowed by specified type of lock. |
1714 | |
1715 | @return TRUE if ticket has stronger or equal type |
1716 | FALSE otherwise. |
1717 | */ |
1718 | |
1719 | bool MDL_ticket::has_stronger_or_equal_type(enum_mdl_type type) const |
1720 | { |
1721 | const MDL_lock::bitmap_t * |
1722 | granted_incompat_map= m_lock->incompatible_granted_types_bitmap(); |
1723 | |
1724 | return ! (granted_incompat_map[type] & ~(granted_incompat_map[m_type])); |
1725 | } |
1726 | |
1727 | |
1728 | bool MDL_ticket::is_incompatible_when_granted(enum_mdl_type type) const |
1729 | { |
1730 | return (MDL_BIT(m_type) & |
1731 | m_lock->incompatible_granted_types_bitmap()[type]); |
1732 | } |
1733 | |
1734 | |
1735 | bool MDL_ticket::is_incompatible_when_waiting(enum_mdl_type type) const |
1736 | { |
1737 | return (MDL_BIT(m_type) & |
1738 | m_lock->incompatible_waiting_types_bitmap()[type]); |
1739 | } |
1740 | |
1741 | |
1742 | /** |
1743 | Check whether the context already holds a compatible lock ticket |
1744 | on an object. |
1745 | Start searching from list of locks for the same duration as lock |
1746 | being requested. If not look at lists for other durations. |
1747 | |
1748 | @param mdl_request Lock request object for lock to be acquired |
1749 | @param[out] result_duration Duration of lock which was found. |
1750 | |
1751 | @note Tickets which correspond to lock types "stronger" than one |
1752 | being requested are also considered compatible. |
1753 | |
1754 | @return A pointer to the lock ticket for the object or NULL otherwise. |
1755 | */ |
1756 | |
1757 | MDL_ticket * |
1758 | MDL_context::find_ticket(MDL_request *mdl_request, |
1759 | enum_mdl_duration *result_duration) |
1760 | { |
1761 | MDL_ticket *ticket; |
1762 | int i; |
1763 | |
1764 | for (i= 0; i < MDL_DURATION_END; i++) |
1765 | { |
1766 | enum_mdl_duration duration= (enum_mdl_duration)((mdl_request->duration+i) % |
1767 | MDL_DURATION_END); |
1768 | Ticket_iterator it(m_tickets[duration]); |
1769 | |
1770 | while ((ticket= it++)) |
1771 | { |
1772 | if (mdl_request->key.is_equal(&ticket->m_lock->key) && |
1773 | ticket->has_stronger_or_equal_type(mdl_request->type)) |
1774 | { |
1775 | DBUG_PRINT("info" , ("Adding mdl lock %d to %d" , |
1776 | mdl_request->type, ticket->m_type)); |
1777 | *result_duration= duration; |
1778 | return ticket; |
1779 | } |
1780 | } |
1781 | } |
1782 | return NULL; |
1783 | } |
1784 | |
1785 | |
1786 | /** |
1787 | Try to acquire one lock. |
1788 | |
1789 | Unlike exclusive locks, shared locks are acquired one by |
1790 | one. This is interface is chosen to simplify introduction of |
1791 | the new locking API to the system. MDL_context::try_acquire_lock() |
1792 | is currently used from open_table(), and there we have only one |
1793 | table to work with. |
1794 | |
1795 | This function may also be used to try to acquire an exclusive |
1796 | lock on a destination table, by ALTER TABLE ... RENAME. |
1797 | |
1798 | Returns immediately without any side effect if encounters a lock |
1799 | conflict. Otherwise takes the lock. |
1800 | |
1801 | FIXME: Compared to lock_table_name_if_not_cached() (from 5.1) |
1802 | it gives slightly more false negatives. |
1803 | |
1804 | @param mdl_request [in/out] Lock request object for lock to be acquired |
1805 | |
1806 | @retval FALSE Success. The lock may have not been acquired. |
1807 | Check the ticket, if it's NULL, a conflicting lock |
1808 | exists. |
1809 | @retval TRUE Out of resources, an error has been reported. |
1810 | */ |
1811 | |
1812 | bool |
1813 | MDL_context::try_acquire_lock(MDL_request *mdl_request) |
1814 | { |
1815 | MDL_ticket *ticket; |
1816 | |
1817 | if (try_acquire_lock_impl(mdl_request, &ticket)) |
1818 | return TRUE; |
1819 | |
1820 | if (! mdl_request->ticket) |
1821 | { |
1822 | /* |
1823 | Our attempt to acquire lock without waiting has failed. |
1824 | Let us release resources which were acquired in the process. |
1825 | We can't get here if we allocated a new lock object so there |
1826 | is no need to release it. |
1827 | */ |
1828 | DBUG_ASSERT(! ticket->m_lock->is_empty()); |
1829 | mysql_prlock_unlock(&ticket->m_lock->m_rwlock); |
1830 | MDL_ticket::destroy(ticket); |
1831 | } |
1832 | |
1833 | return FALSE; |
1834 | } |
1835 | |
1836 | |
1837 | /** |
1838 | Auxiliary method for acquiring lock without waiting. |
1839 | |
1840 | @param mdl_request [in/out] Lock request object for lock to be acquired |
1841 | @param out_ticket [out] Ticket for the request in case when lock |
1842 | has not been acquired. |
1843 | |
1844 | @retval FALSE Success. The lock may have not been acquired. |
1845 | Check MDL_request::ticket, if it's NULL, a conflicting |
1846 | lock exists. In this case "out_ticket" out parameter |
1847 | points to ticket which was constructed for the request. |
1848 | MDL_ticket::m_lock points to the corresponding MDL_lock |
1849 | object and MDL_lock::m_rwlock write-locked. |
1850 | @retval TRUE Out of resources, an error has been reported. |
1851 | */ |
1852 | |
1853 | bool |
1854 | MDL_context::try_acquire_lock_impl(MDL_request *mdl_request, |
1855 | MDL_ticket **out_ticket) |
1856 | { |
1857 | MDL_lock *lock; |
1858 | MDL_key *key= &mdl_request->key; |
1859 | MDL_ticket *ticket; |
1860 | enum_mdl_duration found_duration; |
1861 | |
1862 | DBUG_ASSERT(mdl_request->type != MDL_EXCLUSIVE || |
1863 | is_lock_owner(MDL_key::GLOBAL, "" , "" , MDL_INTENTION_EXCLUSIVE)); |
1864 | DBUG_ASSERT(mdl_request->ticket == NULL); |
1865 | |
1866 | /* Don't take chances in production. */ |
1867 | mdl_request->ticket= NULL; |
1868 | |
1869 | /* |
1870 | Check whether the context already holds a shared lock on the object, |
1871 | and if so, grant the request. |
1872 | */ |
1873 | if ((ticket= find_ticket(mdl_request, &found_duration))) |
1874 | { |
1875 | DBUG_ASSERT(ticket->m_lock); |
1876 | DBUG_ASSERT(ticket->has_stronger_or_equal_type(mdl_request->type)); |
1877 | /* |
1878 | If the request is for a transactional lock, and we found |
1879 | a transactional lock, just reuse the found ticket. |
1880 | |
1881 | It's possible that we found a transactional lock, |
1882 | but the request is for a HANDLER lock. In that case HANDLER |
1883 | code will clone the ticket (see below why it's needed). |
1884 | |
1885 | If the request is for a transactional lock, and we found |
1886 | a HANDLER lock, create a copy, to make sure that when user |
1887 | does HANDLER CLOSE, the transactional lock is not released. |
1888 | |
1889 | If the request is for a handler lock, and we found a |
1890 | HANDLER lock, also do the clone. HANDLER CLOSE for one alias |
1891 | should not release the lock on the table HANDLER opened through |
1892 | a different alias. |
1893 | */ |
1894 | mdl_request->ticket= ticket; |
1895 | if ((found_duration != mdl_request->duration || |
1896 | mdl_request->duration == MDL_EXPLICIT) && |
1897 | clone_ticket(mdl_request)) |
1898 | { |
1899 | /* Clone failed. */ |
1900 | mdl_request->ticket= NULL; |
1901 | return TRUE; |
1902 | } |
1903 | return FALSE; |
1904 | } |
1905 | |
1906 | if (fix_pins()) |
1907 | return TRUE; |
1908 | |
1909 | if (!(ticket= MDL_ticket::create(this, mdl_request->type |
1910 | #ifndef DBUG_OFF |
1911 | , mdl_request->duration |
1912 | #endif |
1913 | ))) |
1914 | return TRUE; |
1915 | |
1916 | /* The below call implicitly locks MDL_lock::m_rwlock on success. */ |
1917 | if (!(lock= mdl_locks.find_or_insert(m_pins, key))) |
1918 | { |
1919 | MDL_ticket::destroy(ticket); |
1920 | return TRUE; |
1921 | } |
1922 | |
1923 | ticket->m_lock= lock; |
1924 | |
1925 | if (lock->can_grant_lock(mdl_request->type, this, false)) |
1926 | { |
1927 | lock->m_granted.add_ticket(ticket); |
1928 | |
1929 | mysql_prlock_unlock(&lock->m_rwlock); |
1930 | |
1931 | m_tickets[mdl_request->duration].push_front(ticket); |
1932 | |
1933 | mdl_request->ticket= ticket; |
1934 | } |
1935 | else |
1936 | *out_ticket= ticket; |
1937 | |
1938 | return FALSE; |
1939 | } |
1940 | |
1941 | |
1942 | /** |
1943 | Create a copy of a granted ticket. |
1944 | This is used to make sure that HANDLER ticket |
1945 | is never shared with a ticket that belongs to |
1946 | a transaction, so that when we HANDLER CLOSE, |
1947 | we don't release a transactional ticket, and |
1948 | vice versa -- when we COMMIT, we don't mistakenly |
1949 | release a ticket for an open HANDLER. |
1950 | |
1951 | @retval TRUE Out of memory. |
1952 | @retval FALSE Success. |
1953 | */ |
1954 | |
1955 | bool |
1956 | MDL_context::clone_ticket(MDL_request *mdl_request) |
1957 | { |
1958 | MDL_ticket *ticket; |
1959 | |
1960 | |
1961 | /* |
1962 | Since in theory we can clone ticket belonging to a different context |
1963 | we need to prepare target context for possible attempts to release |
1964 | lock and thus possible removal of MDL_lock from MDL_map container. |
1965 | So we allocate pins to be able to work with this container if they |
1966 | are not allocated already. |
1967 | */ |
1968 | if (fix_pins()) |
1969 | return TRUE; |
1970 | |
1971 | /* |
1972 | By submitting mdl_request->type to MDL_ticket::create() |
1973 | we effectively downgrade the cloned lock to the level of |
1974 | the request. |
1975 | */ |
1976 | if (!(ticket= MDL_ticket::create(this, mdl_request->type |
1977 | #ifndef DBUG_OFF |
1978 | , mdl_request->duration |
1979 | #endif |
1980 | ))) |
1981 | return TRUE; |
1982 | |
1983 | /* clone() is not supposed to be used to get a stronger lock. */ |
1984 | DBUG_ASSERT(mdl_request->ticket->has_stronger_or_equal_type(ticket->m_type)); |
1985 | |
1986 | ticket->m_lock= mdl_request->ticket->m_lock; |
1987 | mdl_request->ticket= ticket; |
1988 | |
1989 | mysql_prlock_wrlock(&ticket->m_lock->m_rwlock); |
1990 | ticket->m_lock->m_granted.add_ticket(ticket); |
1991 | mysql_prlock_unlock(&ticket->m_lock->m_rwlock); |
1992 | |
1993 | m_tickets[mdl_request->duration].push_front(ticket); |
1994 | |
1995 | return FALSE; |
1996 | } |
1997 | |
1998 | |
1999 | /** |
2000 | Check if there is any conflicting lock that could cause this thread |
2001 | to wait for another thread which is not ready to commit. |
2002 | This is always an error, as the upper level of parallel replication |
2003 | should not allow a scheduling of a conflicting DDL until all earlier |
2004 | transactions has commited. |
2005 | |
2006 | This function is only called for a slave using parallel replication |
2007 | and trying to get an exclusive lock for the table. |
2008 | */ |
2009 | |
2010 | #ifndef DBUG_OFF |
2011 | bool MDL_lock::check_if_conflicting_replication_locks(MDL_context *ctx) |
2012 | { |
2013 | Ticket_iterator it(m_granted); |
2014 | MDL_ticket *conflicting_ticket; |
2015 | rpl_group_info *rgi_slave= ctx->get_thd()->rgi_slave; |
2016 | |
2017 | if (!rgi_slave->gtid_sub_id) |
2018 | return 0; |
2019 | |
2020 | while ((conflicting_ticket= it++)) |
2021 | { |
2022 | if (conflicting_ticket->get_ctx() != ctx) |
2023 | { |
2024 | MDL_context *conflicting_ctx= conflicting_ticket->get_ctx(); |
2025 | rpl_group_info *conflicting_rgi_slave; |
2026 | conflicting_rgi_slave= conflicting_ctx->get_thd()->rgi_slave; |
2027 | |
2028 | /* |
2029 | If the conflicting thread is another parallel replication |
2030 | thread for the same master and it's not in commit stage, then |
2031 | the current transaction has started too early and something is |
2032 | seriously wrong. |
2033 | */ |
2034 | if (conflicting_rgi_slave && |
2035 | conflicting_rgi_slave->gtid_sub_id && |
2036 | conflicting_rgi_slave->rli == rgi_slave->rli && |
2037 | conflicting_rgi_slave->current_gtid.domain_id == |
2038 | rgi_slave->current_gtid.domain_id && |
2039 | !conflicting_rgi_slave->did_mark_start_commit) |
2040 | return 1; // Fatal error |
2041 | } |
2042 | } |
2043 | return 0; |
2044 | } |
2045 | #endif |
2046 | |
2047 | |
2048 | /** |
2049 | Acquire one lock with waiting for conflicting locks to go away if needed. |
2050 | |
2051 | @param mdl_request [in/out] Lock request object for lock to be acquired |
2052 | |
2053 | @param lock_wait_timeout [in] Seconds to wait before timeout. |
2054 | |
2055 | @retval FALSE Success. MDL_request::ticket points to the ticket |
2056 | for the lock. |
2057 | @retval TRUE Failure (Out of resources or waiting is aborted), |
2058 | */ |
2059 | |
2060 | bool |
2061 | MDL_context::acquire_lock(MDL_request *mdl_request, double lock_wait_timeout) |
2062 | { |
2063 | MDL_lock *lock; |
2064 | MDL_ticket *ticket; |
2065 | MDL_wait::enum_wait_status wait_status; |
2066 | DBUG_ENTER("MDL_context::acquire_lock" ); |
2067 | DBUG_PRINT("enter" , ("lock_type: %d" , mdl_request->type)); |
2068 | |
2069 | if (try_acquire_lock_impl(mdl_request, &ticket)) |
2070 | DBUG_RETURN(TRUE); |
2071 | |
2072 | if (mdl_request->ticket) |
2073 | { |
2074 | /* |
2075 | We have managed to acquire lock without waiting. |
2076 | MDL_lock, MDL_context and MDL_request were updated |
2077 | accordingly, so we can simply return success. |
2078 | */ |
2079 | DBUG_PRINT("info" , ("Got lock without waiting" )); |
2080 | DBUG_RETURN(FALSE); |
2081 | } |
2082 | |
2083 | /* |
2084 | Our attempt to acquire lock without waiting has failed. |
2085 | As a result of this attempt we got MDL_ticket with m_lock |
2086 | member pointing to the corresponding MDL_lock object which |
2087 | has MDL_lock::m_rwlock write-locked. |
2088 | */ |
2089 | lock= ticket->m_lock; |
2090 | |
2091 | if (lock_wait_timeout == 0) |
2092 | { |
2093 | mysql_prlock_unlock(&lock->m_rwlock); |
2094 | MDL_ticket::destroy(ticket); |
2095 | my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0)); |
2096 | DBUG_RETURN(TRUE); |
2097 | } |
2098 | |
2099 | lock->m_waiting.add_ticket(ticket); |
2100 | |
2101 | /* |
2102 | Once we added a pending ticket to the waiting queue, |
2103 | we must ensure that our wait slot is empty, so |
2104 | that our lock request can be scheduled. Do that in the |
2105 | critical section formed by the acquired write lock on MDL_lock. |
2106 | */ |
2107 | m_wait.reset_status(); |
2108 | |
2109 | /* |
2110 | Don't break conflicting locks if timeout is 0 as 0 is used |
2111 | To check if there is any conflicting locks... |
2112 | */ |
2113 | if (lock->needs_notification(ticket) && lock_wait_timeout) |
2114 | lock->notify_conflicting_locks(this); |
2115 | |
2116 | /* |
2117 | Ensure that if we are trying to get an exclusive lock for a slave |
2118 | running parallel replication, then we are not blocked by another |
2119 | parallel slave thread that is not committed. This should never happen as |
2120 | the parallel replication scheduler should never schedule a DDL while |
2121 | DML's are still running. |
2122 | */ |
2123 | DBUG_SLOW_ASSERT((mdl_request->type != MDL_INTENTION_EXCLUSIVE && |
2124 | mdl_request->type != MDL_EXCLUSIVE) || |
2125 | !(get_thd()->rgi_slave && |
2126 | get_thd()->rgi_slave->is_parallel_exec && |
2127 | lock->check_if_conflicting_replication_locks(this))); |
2128 | |
2129 | mysql_prlock_unlock(&lock->m_rwlock); |
2130 | |
2131 | will_wait_for(ticket); |
2132 | |
2133 | /* There is a shared or exclusive lock on the object. */ |
2134 | DEBUG_SYNC(get_thd(), "mdl_acquire_lock_wait" ); |
2135 | |
2136 | find_deadlock(); |
2137 | |
2138 | struct timespec abs_timeout, abs_shortwait; |
2139 | set_timespec_nsec(abs_timeout, |
2140 | (ulonglong)(lock_wait_timeout * 1000000000ULL)); |
2141 | set_timespec(abs_shortwait, 1); |
2142 | wait_status= MDL_wait::EMPTY; |
2143 | |
2144 | while (cmp_timespec(abs_shortwait, abs_timeout) <= 0) |
2145 | { |
2146 | /* abs_timeout is far away. Wait a short while and notify locks. */ |
2147 | wait_status= m_wait.timed_wait(m_owner, &abs_shortwait, FALSE, |
2148 | mdl_request->key.get_wait_state_name()); |
2149 | |
2150 | if (wait_status != MDL_wait::EMPTY) |
2151 | break; |
2152 | /* Check if the client is gone while we were waiting. */ |
2153 | if (! thd_is_connected(m_owner->get_thd())) |
2154 | { |
2155 | /* |
2156 | * The client is disconnected. Don't wait forever: |
2157 | * assume it's the same as a wait timeout, this |
2158 | * ensures all error handling is correct. |
2159 | */ |
2160 | wait_status= MDL_wait::TIMEOUT; |
2161 | break; |
2162 | } |
2163 | |
2164 | mysql_prlock_wrlock(&lock->m_rwlock); |
2165 | if (lock->needs_notification(ticket)) |
2166 | lock->notify_conflicting_locks(this); |
2167 | mysql_prlock_unlock(&lock->m_rwlock); |
2168 | set_timespec(abs_shortwait, 1); |
2169 | } |
2170 | if (wait_status == MDL_wait::EMPTY) |
2171 | wait_status= m_wait.timed_wait(m_owner, &abs_timeout, TRUE, |
2172 | mdl_request->key.get_wait_state_name()); |
2173 | |
2174 | done_waiting_for(); |
2175 | |
2176 | if (wait_status != MDL_wait::GRANTED) |
2177 | { |
2178 | lock->remove_ticket(m_pins, &MDL_lock::m_waiting, ticket); |
2179 | MDL_ticket::destroy(ticket); |
2180 | switch (wait_status) |
2181 | { |
2182 | case MDL_wait::VICTIM: |
2183 | my_error(ER_LOCK_DEADLOCK, MYF(0)); |
2184 | break; |
2185 | case MDL_wait::TIMEOUT: |
2186 | my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0)); |
2187 | break; |
2188 | case MDL_wait::KILLED: |
2189 | get_thd()->send_kill_message(); |
2190 | break; |
2191 | default: |
2192 | DBUG_ASSERT(0); |
2193 | break; |
2194 | } |
2195 | DBUG_RETURN(TRUE); |
2196 | } |
2197 | |
2198 | /* |
2199 | We have been granted our request. |
2200 | State of MDL_lock object is already being appropriately updated by a |
2201 | concurrent thread (@sa MDL_lock:reschedule_waiters()). |
2202 | So all we need to do is to update MDL_context and MDL_request objects. |
2203 | */ |
2204 | DBUG_ASSERT(wait_status == MDL_wait::GRANTED); |
2205 | |
2206 | m_tickets[mdl_request->duration].push_front(ticket); |
2207 | |
2208 | mdl_request->ticket= ticket; |
2209 | |
2210 | DBUG_RETURN(FALSE); |
2211 | } |
2212 | |
2213 | |
2214 | extern "C" int mdl_request_ptr_cmp(const void* ptr1, const void* ptr2) |
2215 | { |
2216 | MDL_request *req1= *(MDL_request**)ptr1; |
2217 | MDL_request *req2= *(MDL_request**)ptr2; |
2218 | return req1->key.cmp(&req2->key); |
2219 | } |
2220 | |
2221 | |
2222 | /** |
2223 | Acquire exclusive locks. There must be no granted locks in the |
2224 | context. |
2225 | |
2226 | This is a replacement of lock_table_names(). It is used in |
2227 | RENAME, DROP and other DDL SQL statements. |
2228 | |
2229 | @param mdl_requests List of requests for locks to be acquired. |
2230 | |
2231 | @param lock_wait_timeout Seconds to wait before timeout. |
2232 | |
2233 | @note The list of requests should not contain non-exclusive lock requests. |
2234 | There should not be any acquired locks in the context. |
2235 | |
2236 | @note Assumes that one already owns scoped intention exclusive lock. |
2237 | |
2238 | @retval FALSE Success |
2239 | @retval TRUE Failure |
2240 | */ |
2241 | |
2242 | bool MDL_context::acquire_locks(MDL_request_list *mdl_requests, |
2243 | double lock_wait_timeout) |
2244 | { |
2245 | MDL_request_list::Iterator it(*mdl_requests); |
2246 | MDL_request **sort_buf, **p_req; |
2247 | MDL_savepoint mdl_svp= mdl_savepoint(); |
2248 | ssize_t req_count= static_cast<ssize_t>(mdl_requests->elements()); |
2249 | DBUG_ENTER("MDL_context::acquire_locks" ); |
2250 | |
2251 | if (req_count == 0) |
2252 | DBUG_RETURN(FALSE); |
2253 | |
2254 | /* Sort requests according to MDL_key. */ |
2255 | if (! (sort_buf= (MDL_request **)my_malloc(req_count * |
2256 | sizeof(MDL_request*), |
2257 | MYF(MY_WME)))) |
2258 | DBUG_RETURN(TRUE); |
2259 | |
2260 | for (p_req= sort_buf; p_req < sort_buf + req_count; p_req++) |
2261 | *p_req= it++; |
2262 | |
2263 | my_qsort(sort_buf, req_count, sizeof(MDL_request*), |
2264 | mdl_request_ptr_cmp); |
2265 | |
2266 | for (p_req= sort_buf; p_req < sort_buf + req_count; p_req++) |
2267 | { |
2268 | if (acquire_lock(*p_req, lock_wait_timeout)) |
2269 | goto err; |
2270 | } |
2271 | my_free(sort_buf); |
2272 | DBUG_RETURN(FALSE); |
2273 | |
2274 | err: |
2275 | /* |
2276 | Release locks we have managed to acquire so far. |
2277 | Use rollback_to_savepoint() since there may be duplicate |
2278 | requests that got assigned the same ticket. |
2279 | */ |
2280 | rollback_to_savepoint(mdl_svp); |
2281 | /* Reset lock requests back to its initial state. */ |
2282 | for (req_count= p_req - sort_buf, p_req= sort_buf; |
2283 | p_req < sort_buf + req_count; p_req++) |
2284 | { |
2285 | (*p_req)->ticket= NULL; |
2286 | } |
2287 | my_free(sort_buf); |
2288 | DBUG_RETURN(TRUE); |
2289 | } |
2290 | |
2291 | |
2292 | /** |
2293 | Upgrade a shared metadata lock. |
2294 | |
2295 | Used in ALTER TABLE. |
2296 | |
2297 | @param mdl_ticket Lock to upgrade. |
2298 | @param new_type Lock type to upgrade to. |
2299 | @param lock_wait_timeout Seconds to wait before timeout. |
2300 | |
2301 | @note In case of failure to upgrade lock (e.g. because upgrader |
2302 | was killed) leaves lock in its original state (locked in |
2303 | shared mode). |
2304 | |
2305 | @note There can be only one upgrader for a lock or we will have deadlock. |
2306 | This invariant is ensured by the fact that upgradeable locks SU, SNW |
2307 | and SNRW are not compatible with each other and themselves. |
2308 | |
2309 | @retval FALSE Success |
2310 | @retval TRUE Failure (thread was killed) |
2311 | */ |
2312 | |
2313 | bool |
2314 | MDL_context::upgrade_shared_lock(MDL_ticket *mdl_ticket, |
2315 | enum_mdl_type new_type, |
2316 | double lock_wait_timeout) |
2317 | { |
2318 | MDL_request mdl_xlock_request; |
2319 | MDL_savepoint mdl_svp= mdl_savepoint(); |
2320 | bool is_new_ticket; |
2321 | DBUG_ENTER("MDL_context::upgrade_shared_lock" ); |
2322 | DBUG_PRINT("enter" ,("new_type: %d lock_wait_timeout: %f" , new_type, |
2323 | lock_wait_timeout)); |
2324 | DEBUG_SYNC(get_thd(), "mdl_upgrade_lock" ); |
2325 | |
2326 | /* |
2327 | Do nothing if already upgraded. Used when we FLUSH TABLE under |
2328 | LOCK TABLES and a table is listed twice in LOCK TABLES list. |
2329 | */ |
2330 | if (mdl_ticket->has_stronger_or_equal_type(new_type)) |
2331 | DBUG_RETURN(FALSE); |
2332 | |
2333 | mdl_xlock_request.init(&mdl_ticket->m_lock->key, new_type, |
2334 | MDL_TRANSACTION); |
2335 | |
2336 | if (acquire_lock(&mdl_xlock_request, lock_wait_timeout)) |
2337 | DBUG_RETURN(TRUE); |
2338 | |
2339 | is_new_ticket= ! has_lock(mdl_svp, mdl_xlock_request.ticket); |
2340 | |
2341 | /* Merge the acquired and the original lock. @todo: move to a method. */ |
2342 | mysql_prlock_wrlock(&mdl_ticket->m_lock->m_rwlock); |
2343 | if (is_new_ticket) |
2344 | mdl_ticket->m_lock->m_granted.remove_ticket(mdl_xlock_request.ticket); |
2345 | /* |
2346 | Set the new type of lock in the ticket. To update state of |
2347 | MDL_lock object correctly we need to temporarily exclude |
2348 | ticket from the granted queue and then include it back. |
2349 | */ |
2350 | mdl_ticket->m_lock->m_granted.remove_ticket(mdl_ticket); |
2351 | mdl_ticket->m_type= new_type; |
2352 | mdl_ticket->m_lock->m_granted.add_ticket(mdl_ticket); |
2353 | |
2354 | mysql_prlock_unlock(&mdl_ticket->m_lock->m_rwlock); |
2355 | |
2356 | if (is_new_ticket) |
2357 | { |
2358 | m_tickets[MDL_TRANSACTION].remove(mdl_xlock_request.ticket); |
2359 | MDL_ticket::destroy(mdl_xlock_request.ticket); |
2360 | } |
2361 | |
2362 | DBUG_RETURN(FALSE); |
2363 | } |
2364 | |
2365 | |
2366 | /** |
2367 | A fragment of recursive traversal of the wait-for graph |
2368 | in search for deadlocks. Direct the deadlock visitor to all |
2369 | contexts that own the lock the current node in the wait-for |
2370 | graph is waiting for. |
2371 | As long as the initial node is remembered in the visitor, |
2372 | a deadlock is found when the same node is seen twice. |
2373 | */ |
2374 | |
2375 | bool MDL_lock::visit_subgraph(MDL_ticket *waiting_ticket, |
2376 | MDL_wait_for_graph_visitor *gvisitor) |
2377 | { |
2378 | MDL_ticket *ticket; |
2379 | MDL_context *src_ctx= waiting_ticket->get_ctx(); |
2380 | bool result= TRUE; |
2381 | |
2382 | mysql_prlock_rdlock(&m_rwlock); |
2383 | |
2384 | /* Must be initialized after taking a read lock. */ |
2385 | Ticket_iterator granted_it(m_granted); |
2386 | Ticket_iterator waiting_it(m_waiting); |
2387 | |
2388 | /* |
2389 | MDL_lock's waiting and granted queues and MDL_context::m_waiting_for |
2390 | member are updated by different threads when the lock is granted |
2391 | (see MDL_context::acquire_lock() and MDL_lock::reschedule_waiters()). |
2392 | As a result, here we may encounter a situation when MDL_lock data |
2393 | already reflects the fact that the lock was granted but |
2394 | m_waiting_for member has not been updated yet. |
2395 | |
2396 | For example, imagine that: |
2397 | |
2398 | thread1: Owns SNW lock on table t1. |
2399 | thread2: Attempts to acquire SW lock on t1, |
2400 | but sees an active SNW lock. |
2401 | Thus adds the ticket to the waiting queue and |
2402 | sets m_waiting_for to point to the ticket. |
2403 | thread1: Releases SNW lock, updates MDL_lock object to |
2404 | grant SW lock to thread2 (moves the ticket for |
2405 | SW from waiting to the active queue). |
2406 | Attempts to acquire a new SNW lock on t1, |
2407 | sees an active SW lock (since it is present in the |
2408 | active queue), adds ticket for SNW lock to the waiting |
2409 | queue, sets m_waiting_for to point to this ticket. |
2410 | |
2411 | At this point deadlock detection algorithm run by thread1 will see that: |
2412 | - Thread1 waits for SNW lock on t1 (since m_waiting_for is set). |
2413 | - SNW lock is not granted, because it conflicts with active SW lock |
2414 | owned by thread 2 (since ticket for SW is present in granted queue). |
2415 | - Thread2 waits for SW lock (since its m_waiting_for has not been |
2416 | updated yet!). |
2417 | - SW lock is not granted because there is pending SNW lock from thread1. |
2418 | Therefore deadlock should exist [sic!]. |
2419 | |
2420 | To avoid detection of such false deadlocks we need to check the "actual" |
2421 | status of the ticket being waited for, before analyzing its blockers. |
2422 | We do this by checking the wait status of the context which is waiting |
2423 | for it. To avoid races this has to be done under protection of |
2424 | MDL_lock::m_rwlock lock. |
2425 | */ |
2426 | if (src_ctx->m_wait.get_status() != MDL_wait::EMPTY) |
2427 | { |
2428 | result= FALSE; |
2429 | goto end; |
2430 | } |
2431 | |
2432 | /* |
2433 | To avoid visiting nodes which were already marked as victims of |
2434 | deadlock detection (or whose requests were already satisfied) we |
2435 | enter the node only after peeking at its wait status. |
2436 | This is necessary to avoid active waiting in a situation |
2437 | when previous searches for a deadlock already selected the |
2438 | node we're about to enter as a victim (see the comment |
2439 | in MDL_context::find_deadlock() for explanation why several searches |
2440 | can be performed for the same wait). |
2441 | There is no guarantee that the node isn't chosen a victim while we |
2442 | are visiting it but this is OK: in the worst case we might do some |
2443 | extra work and one more context might be chosen as a victim. |
2444 | */ |
2445 | if (gvisitor->enter_node(src_ctx)) |
2446 | goto end; |
2447 | |
2448 | /* |
2449 | We do a breadth-first search first -- that is, inspect all |
2450 | edges of the current node, and only then follow up to the next |
2451 | node. In workloads that involve wait-for graph loops this |
2452 | has proven to be a more efficient strategy [citation missing]. |
2453 | */ |
2454 | while ((ticket= granted_it++)) |
2455 | { |
2456 | /* Filter out edges that point to the same node. */ |
2457 | if (ticket->get_ctx() != src_ctx && |
2458 | ticket->is_incompatible_when_granted(waiting_ticket->get_type()) && |
2459 | gvisitor->inspect_edge(ticket->get_ctx())) |
2460 | { |
2461 | goto end_leave_node; |
2462 | } |
2463 | } |
2464 | |
2465 | while ((ticket= waiting_it++)) |
2466 | { |
2467 | /* Filter out edges that point to the same node. */ |
2468 | if (ticket->get_ctx() != src_ctx && |
2469 | ticket->is_incompatible_when_waiting(waiting_ticket->get_type()) && |
2470 | gvisitor->inspect_edge(ticket->get_ctx())) |
2471 | { |
2472 | goto end_leave_node; |
2473 | } |
2474 | } |
2475 | |
2476 | /* Recurse and inspect all adjacent nodes. */ |
2477 | granted_it.rewind(); |
2478 | while ((ticket= granted_it++)) |
2479 | { |
2480 | if (ticket->get_ctx() != src_ctx && |
2481 | ticket->is_incompatible_when_granted(waiting_ticket->get_type()) && |
2482 | ticket->get_ctx()->visit_subgraph(gvisitor)) |
2483 | { |
2484 | goto end_leave_node; |
2485 | } |
2486 | } |
2487 | |
2488 | waiting_it.rewind(); |
2489 | while ((ticket= waiting_it++)) |
2490 | { |
2491 | if (ticket->get_ctx() != src_ctx && |
2492 | ticket->is_incompatible_when_waiting(waiting_ticket->get_type()) && |
2493 | ticket->get_ctx()->visit_subgraph(gvisitor)) |
2494 | { |
2495 | goto end_leave_node; |
2496 | } |
2497 | } |
2498 | |
2499 | result= FALSE; |
2500 | |
2501 | end_leave_node: |
2502 | gvisitor->leave_node(src_ctx); |
2503 | |
2504 | end: |
2505 | mysql_prlock_unlock(&m_rwlock); |
2506 | return result; |
2507 | } |
2508 | |
2509 | |
2510 | /** |
2511 | Traverse a portion of wait-for graph which is reachable |
2512 | through the edge represented by this ticket and search |
2513 | for deadlocks. |
2514 | |
2515 | @retval TRUE A deadlock is found. A pointer to deadlock |
2516 | victim is saved in the visitor. |
2517 | @retval FALSE |
2518 | */ |
2519 | |
2520 | bool MDL_ticket::accept_visitor(MDL_wait_for_graph_visitor *gvisitor) |
2521 | { |
2522 | return m_lock->visit_subgraph(this, gvisitor); |
2523 | } |
2524 | |
2525 | |
2526 | /** |
2527 | A fragment of recursive traversal of the wait-for graph of |
2528 | MDL contexts in the server in search for deadlocks. |
2529 | Assume this MDL context is a node in the wait-for graph, |
2530 | and direct the visitor to all adjacent nodes. As long |
2531 | as the starting node is remembered in the visitor, a |
2532 | deadlock is found when the same node is visited twice. |
2533 | One MDL context is connected to another in the wait-for |
2534 | graph if it waits on a resource that is held by the other |
2535 | context. |
2536 | |
2537 | @retval TRUE A deadlock is found. A pointer to deadlock |
2538 | victim is saved in the visitor. |
2539 | @retval FALSE |
2540 | */ |
2541 | |
2542 | bool MDL_context::visit_subgraph(MDL_wait_for_graph_visitor *gvisitor) |
2543 | { |
2544 | bool result= FALSE; |
2545 | |
2546 | mysql_prlock_rdlock(&m_LOCK_waiting_for); |
2547 | |
2548 | if (m_waiting_for) |
2549 | result= m_waiting_for->accept_visitor(gvisitor); |
2550 | |
2551 | mysql_prlock_unlock(&m_LOCK_waiting_for); |
2552 | |
2553 | return result; |
2554 | } |
2555 | |
2556 | |
2557 | /** |
2558 | Try to find a deadlock. This function produces no errors. |
2559 | |
2560 | @note If during deadlock resolution context which performs deadlock |
2561 | detection is chosen as a victim it will be informed about the |
2562 | fact by setting VICTIM status to its wait slot. |
2563 | */ |
2564 | |
2565 | void MDL_context::find_deadlock() |
2566 | { |
2567 | while (1) |
2568 | { |
2569 | /* |
2570 | The fact that we use fresh instance of gvisitor for each |
2571 | search performed by find_deadlock() below is important, |
2572 | the code responsible for victim selection relies on this. |
2573 | */ |
2574 | Deadlock_detection_visitor dvisitor(this); |
2575 | MDL_context *victim; |
2576 | |
2577 | if (! visit_subgraph(&dvisitor)) |
2578 | { |
2579 | /* No deadlocks are found! */ |
2580 | break; |
2581 | } |
2582 | |
2583 | victim= dvisitor.get_victim(); |
2584 | |
2585 | /* |
2586 | Failure to change status of the victim is OK as it means |
2587 | that the victim has received some other message and is |
2588 | about to stop its waiting/to break deadlock loop. |
2589 | Even when the initiator of the deadlock search is |
2590 | chosen the victim, we need to set the respective wait |
2591 | result in order to "close" it for any attempt to |
2592 | schedule the request. |
2593 | This is needed to avoid a possible race during |
2594 | cleanup in case when the lock request on which the |
2595 | context was waiting is concurrently satisfied. |
2596 | */ |
2597 | (void) victim->m_wait.set_status(MDL_wait::VICTIM); |
2598 | victim->unlock_deadlock_victim(); |
2599 | |
2600 | if (victim == this) |
2601 | break; |
2602 | /* |
2603 | After adding a new edge to the waiting graph we found that it |
2604 | creates a loop (i.e. there is a deadlock). We decided to destroy |
2605 | this loop by removing an edge, but not the one that we added. |
2606 | Since this doesn't guarantee that all loops created by addition |
2607 | of the new edge are destroyed, we have to repeat the search. |
2608 | */ |
2609 | } |
2610 | } |
2611 | |
2612 | |
2613 | /** |
2614 | Release lock. |
2615 | |
2616 | @param duration Lock duration. |
2617 | @param ticket Ticket for lock to be released. |
2618 | |
2619 | */ |
2620 | |
2621 | void MDL_context::release_lock(enum_mdl_duration duration, MDL_ticket *ticket) |
2622 | { |
2623 | MDL_lock *lock= ticket->m_lock; |
2624 | DBUG_ENTER("MDL_context::release_lock" ); |
2625 | DBUG_PRINT("enter" , ("db: '%s' name: '%s'" , |
2626 | lock->key.db_name(), lock->key.name())); |
2627 | |
2628 | DBUG_ASSERT(this == ticket->get_ctx()); |
2629 | |
2630 | lock->remove_ticket(m_pins, &MDL_lock::m_granted, ticket); |
2631 | |
2632 | m_tickets[duration].remove(ticket); |
2633 | MDL_ticket::destroy(ticket); |
2634 | |
2635 | DBUG_VOID_RETURN; |
2636 | } |
2637 | |
2638 | |
2639 | /** |
2640 | Release lock with explicit duration. |
2641 | |
2642 | @param ticket Ticket for lock to be released. |
2643 | |
2644 | */ |
2645 | |
2646 | void MDL_context::release_lock(MDL_ticket *ticket) |
2647 | { |
2648 | DBUG_SLOW_ASSERT(ticket->m_duration == MDL_EXPLICIT); |
2649 | |
2650 | release_lock(MDL_EXPLICIT, ticket); |
2651 | } |
2652 | |
2653 | |
2654 | /** |
2655 | Release all locks associated with the context. If the sentinel |
2656 | is not NULL, do not release locks stored in the list after and |
2657 | including the sentinel. |
2658 | |
2659 | Statement and transactional locks are added to the beginning of |
2660 | the corresponding lists, i.e. stored in reverse temporal order. |
2661 | This allows to employ this function to: |
2662 | - back off in case of a lock conflict. |
2663 | - release all locks in the end of a statement or transaction |
2664 | - rollback to a savepoint. |
2665 | */ |
2666 | |
2667 | void MDL_context::release_locks_stored_before(enum_mdl_duration duration, |
2668 | MDL_ticket *sentinel) |
2669 | { |
2670 | MDL_ticket *ticket; |
2671 | Ticket_iterator it(m_tickets[duration]); |
2672 | DBUG_ENTER("MDL_context::release_locks_stored_before" ); |
2673 | |
2674 | if (m_tickets[duration].is_empty()) |
2675 | DBUG_VOID_RETURN; |
2676 | |
2677 | while ((ticket= it++) && ticket != sentinel) |
2678 | { |
2679 | DBUG_PRINT("info" , ("found lock to release ticket=%p" , ticket)); |
2680 | release_lock(duration, ticket); |
2681 | } |
2682 | |
2683 | DBUG_VOID_RETURN; |
2684 | } |
2685 | |
2686 | |
2687 | /** |
2688 | Release all explicit locks in the context which correspond to the |
2689 | same name/object as this lock request. |
2690 | |
2691 | @param ticket One of the locks for the name/object for which all |
2692 | locks should be released. |
2693 | */ |
2694 | |
2695 | void MDL_context::release_all_locks_for_name(MDL_ticket *name) |
2696 | { |
2697 | /* Use MDL_ticket::m_lock to identify other locks for the same object. */ |
2698 | MDL_lock *lock= name->m_lock; |
2699 | |
2700 | /* Remove matching lock tickets from the context. */ |
2701 | MDL_ticket *ticket; |
2702 | Ticket_iterator it_ticket(m_tickets[MDL_EXPLICIT]); |
2703 | |
2704 | while ((ticket= it_ticket++)) |
2705 | { |
2706 | DBUG_ASSERT(ticket->m_lock); |
2707 | if (ticket->m_lock == lock) |
2708 | release_lock(MDL_EXPLICIT, ticket); |
2709 | } |
2710 | } |
2711 | |
2712 | |
2713 | /** |
2714 | Downgrade an EXCLUSIVE or SHARED_NO_WRITE lock to shared metadata lock. |
2715 | |
2716 | @param type Type of lock to which exclusive lock should be downgraded. |
2717 | */ |
2718 | |
2719 | void MDL_ticket::downgrade_lock(enum_mdl_type type) |
2720 | { |
2721 | /* |
2722 | Do nothing if already downgraded. Used when we FLUSH TABLE under |
2723 | LOCK TABLES and a table is listed twice in LOCK TABLES list. |
2724 | Note that this code might even try to "downgrade" a weak lock |
2725 | (e.g. SW) to a stronger one (e.g SNRW). So we can't even assert |
2726 | here that target lock is weaker than existing lock. |
2727 | */ |
2728 | if (m_type == type || !has_stronger_or_equal_type(type)) |
2729 | return; |
2730 | |
2731 | /* Only allow downgrade from EXCLUSIVE and SHARED_NO_WRITE. */ |
2732 | DBUG_ASSERT(m_type == MDL_EXCLUSIVE || |
2733 | m_type == MDL_SHARED_NO_WRITE); |
2734 | |
2735 | mysql_prlock_wrlock(&m_lock->m_rwlock); |
2736 | /* |
2737 | To update state of MDL_lock object correctly we need to temporarily |
2738 | exclude ticket from the granted queue and then include it back. |
2739 | */ |
2740 | m_lock->m_granted.remove_ticket(this); |
2741 | m_type= type; |
2742 | m_lock->m_granted.add_ticket(this); |
2743 | m_lock->reschedule_waiters(); |
2744 | mysql_prlock_unlock(&m_lock->m_rwlock); |
2745 | } |
2746 | |
2747 | |
2748 | /** |
2749 | Auxiliary function which allows to check if we have some kind of lock on |
2750 | a object. Returns TRUE if we have a lock of a given or stronger type. |
2751 | |
2752 | @param mdl_namespace Id of object namespace |
2753 | @param db Name of the database |
2754 | @param name Name of the object |
2755 | @param mdl_type Lock type. Pass in the weakest type to find |
2756 | out if there is at least some lock. |
2757 | |
2758 | @return TRUE if current context contains satisfied lock for the object, |
2759 | FALSE otherwise. |
2760 | */ |
2761 | |
2762 | bool |
2763 | MDL_context::is_lock_owner(MDL_key::enum_mdl_namespace mdl_namespace, |
2764 | const char *db, const char *name, |
2765 | enum_mdl_type mdl_type) |
2766 | { |
2767 | MDL_request mdl_request; |
2768 | enum_mdl_duration not_unused; |
2769 | /* We don't care about exact duration of lock here. */ |
2770 | mdl_request.init(mdl_namespace, db, name, mdl_type, MDL_TRANSACTION); |
2771 | MDL_ticket *ticket= find_ticket(&mdl_request, ¬_unused); |
2772 | |
2773 | DBUG_ASSERT(ticket == NULL || ticket->m_lock); |
2774 | |
2775 | return ticket; |
2776 | } |
2777 | |
2778 | |
2779 | /** |
2780 | Return thread id of the owner of the lock or 0 if |
2781 | there is no owner. |
2782 | @note: Lock type is not considered at all, the function |
2783 | simply checks that there is some lock for the given key. |
2784 | |
2785 | @return thread id of the owner of the lock or 0 |
2786 | */ |
2787 | |
2788 | unsigned long |
2789 | MDL_context::get_lock_owner(MDL_key *key) |
2790 | { |
2791 | fix_pins(); |
2792 | return mdl_locks.get_lock_owner(m_pins, key); |
2793 | } |
2794 | |
2795 | |
2796 | /** |
2797 | Check if we have any pending locks which conflict with existing shared lock. |
2798 | |
2799 | @pre The ticket must match an acquired lock. |
2800 | |
2801 | @return TRUE if there is a conflicting lock request, FALSE otherwise. |
2802 | */ |
2803 | |
2804 | bool MDL_ticket::has_pending_conflicting_lock() const |
2805 | { |
2806 | return m_lock->has_pending_conflicting_lock(m_type); |
2807 | } |
2808 | |
2809 | /** Return a key identifying this lock. */ |
2810 | MDL_key *MDL_ticket::get_key() const |
2811 | { |
2812 | return &m_lock->key; |
2813 | } |
2814 | |
2815 | /** |
2816 | Releases metadata locks that were acquired after a specific savepoint. |
2817 | |
2818 | @note Used to release tickets acquired during a savepoint unit. |
2819 | @note It's safe to iterate and unlock any locks after taken after this |
2820 | savepoint because other statements that take other special locks |
2821 | cause a implicit commit (ie LOCK TABLES). |
2822 | */ |
2823 | |
2824 | void MDL_context::rollback_to_savepoint(const MDL_savepoint &mdl_savepoint) |
2825 | { |
2826 | DBUG_ENTER("MDL_context::rollback_to_savepoint" ); |
2827 | |
2828 | /* If savepoint is NULL, it is from the start of the transaction. */ |
2829 | release_locks_stored_before(MDL_STATEMENT, mdl_savepoint.m_stmt_ticket); |
2830 | release_locks_stored_before(MDL_TRANSACTION, mdl_savepoint.m_trans_ticket); |
2831 | |
2832 | DBUG_VOID_RETURN; |
2833 | } |
2834 | |
2835 | |
2836 | /** |
2837 | Release locks acquired by normal statements (SELECT, UPDATE, |
2838 | DELETE, etc) in the course of a transaction. Do not release |
2839 | HANDLER locks, if there are any. |
2840 | |
2841 | This method is used at the end of a transaction, in |
2842 | implementation of COMMIT (implicit or explicit) and ROLLBACK. |
2843 | */ |
2844 | |
2845 | void MDL_context::release_transactional_locks() |
2846 | { |
2847 | DBUG_ENTER("MDL_context::release_transactional_locks" ); |
2848 | release_locks_stored_before(MDL_STATEMENT, NULL); |
2849 | release_locks_stored_before(MDL_TRANSACTION, NULL); |
2850 | DBUG_VOID_RETURN; |
2851 | } |
2852 | |
2853 | |
2854 | void MDL_context::release_statement_locks() |
2855 | { |
2856 | DBUG_ENTER("MDL_context::release_transactional_locks" ); |
2857 | release_locks_stored_before(MDL_STATEMENT, NULL); |
2858 | DBUG_VOID_RETURN; |
2859 | } |
2860 | |
2861 | |
2862 | /** |
2863 | Does this savepoint have this lock? |
2864 | |
2865 | @retval TRUE The ticket is older than the savepoint or |
2866 | is an LT, HA or GLR ticket. Thus it belongs |
2867 | to the savepoint or has explicit duration. |
2868 | @retval FALSE The ticket is newer than the savepoint. |
2869 | and is not an LT, HA or GLR ticket. |
2870 | */ |
2871 | |
2872 | bool MDL_context::has_lock(const MDL_savepoint &mdl_savepoint, |
2873 | MDL_ticket *mdl_ticket) |
2874 | { |
2875 | MDL_ticket *ticket; |
2876 | /* Start from the beginning, most likely mdl_ticket's been just acquired. */ |
2877 | MDL_context::Ticket_iterator s_it(m_tickets[MDL_STATEMENT]); |
2878 | MDL_context::Ticket_iterator t_it(m_tickets[MDL_TRANSACTION]); |
2879 | |
2880 | while ((ticket= s_it++) && ticket != mdl_savepoint.m_stmt_ticket) |
2881 | { |
2882 | if (ticket == mdl_ticket) |
2883 | return FALSE; |
2884 | } |
2885 | |
2886 | while ((ticket= t_it++) && ticket != mdl_savepoint.m_trans_ticket) |
2887 | { |
2888 | if (ticket == mdl_ticket) |
2889 | return FALSE; |
2890 | } |
2891 | return TRUE; |
2892 | } |
2893 | |
2894 | |
2895 | /** |
2896 | Change lock duration for transactional lock. |
2897 | |
2898 | @param ticket Ticket representing lock. |
2899 | @param duration Lock duration to be set. |
2900 | |
2901 | @note This method only supports changing duration of |
2902 | transactional lock to some other duration. |
2903 | */ |
2904 | |
2905 | void MDL_context::set_lock_duration(MDL_ticket *mdl_ticket, |
2906 | enum_mdl_duration duration) |
2907 | { |
2908 | DBUG_SLOW_ASSERT(mdl_ticket->m_duration == MDL_TRANSACTION && |
2909 | duration != MDL_TRANSACTION); |
2910 | |
2911 | m_tickets[MDL_TRANSACTION].remove(mdl_ticket); |
2912 | m_tickets[duration].push_front(mdl_ticket); |
2913 | #ifndef DBUG_OFF |
2914 | mdl_ticket->m_duration= duration; |
2915 | #endif |
2916 | } |
2917 | |
2918 | |
2919 | /** |
2920 | Set explicit duration for all locks in the context. |
2921 | */ |
2922 | |
2923 | void MDL_context::set_explicit_duration_for_all_locks() |
2924 | { |
2925 | int i; |
2926 | MDL_ticket *ticket; |
2927 | |
2928 | /* |
2929 | In the most common case when this function is called list |
2930 | of transactional locks is bigger than list of locks with |
2931 | explicit duration. So we start by swapping these two lists |
2932 | and then move elements from new list of transactional |
2933 | locks and list of statement locks to list of locks with |
2934 | explicit duration. |
2935 | */ |
2936 | |
2937 | m_tickets[MDL_EXPLICIT].swap(m_tickets[MDL_TRANSACTION]); |
2938 | |
2939 | for (i= 0; i < MDL_EXPLICIT; i++) |
2940 | { |
2941 | Ticket_iterator it_ticket(m_tickets[i]); |
2942 | |
2943 | while ((ticket= it_ticket++)) |
2944 | { |
2945 | m_tickets[i].remove(ticket); |
2946 | m_tickets[MDL_EXPLICIT].push_front(ticket); |
2947 | } |
2948 | } |
2949 | |
2950 | #ifndef DBUG_OFF |
2951 | Ticket_iterator exp_it(m_tickets[MDL_EXPLICIT]); |
2952 | |
2953 | while ((ticket= exp_it++)) |
2954 | ticket->m_duration= MDL_EXPLICIT; |
2955 | #endif |
2956 | } |
2957 | |
2958 | |
2959 | /** |
2960 | Set transactional duration for all locks in the context. |
2961 | */ |
2962 | |
2963 | void MDL_context::set_transaction_duration_for_all_locks() |
2964 | { |
2965 | MDL_ticket *ticket; |
2966 | |
2967 | /* |
2968 | In the most common case when this function is called list |
2969 | of explicit locks is bigger than two other lists (in fact, |
2970 | list of statement locks is always empty). So we start by |
2971 | swapping list of explicit and transactional locks and then |
2972 | move contents of new list of explicit locks to list of |
2973 | locks with transactional duration. |
2974 | */ |
2975 | |
2976 | DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty()); |
2977 | |
2978 | m_tickets[MDL_TRANSACTION].swap(m_tickets[MDL_EXPLICIT]); |
2979 | |
2980 | Ticket_iterator it_ticket(m_tickets[MDL_EXPLICIT]); |
2981 | |
2982 | while ((ticket= it_ticket++)) |
2983 | { |
2984 | m_tickets[MDL_EXPLICIT].remove(ticket); |
2985 | m_tickets[MDL_TRANSACTION].push_front(ticket); |
2986 | } |
2987 | |
2988 | #ifndef DBUG_OFF |
2989 | Ticket_iterator trans_it(m_tickets[MDL_TRANSACTION]); |
2990 | |
2991 | while ((ticket= trans_it++)) |
2992 | ticket->m_duration= MDL_TRANSACTION; |
2993 | #endif |
2994 | } |
2995 | |
2996 | |
2997 | |
2998 | void MDL_context::release_explicit_locks() |
2999 | { |
3000 | release_locks_stored_before(MDL_EXPLICIT, NULL); |
3001 | } |
3002 | |
3003 | bool MDL_context::has_explicit_locks() |
3004 | { |
3005 | MDL_ticket *ticket = NULL; |
3006 | |
3007 | Ticket_iterator it(m_tickets[MDL_EXPLICIT]); |
3008 | |
3009 | while ((ticket = it++)) |
3010 | { |
3011 | return true; |
3012 | } |
3013 | |
3014 | return false; |
3015 | } |
3016 | |
3017 | #ifdef WITH_WSREP |
3018 | static |
3019 | const char *wsrep_get_mdl_type_name(enum_mdl_type type) |
3020 | { |
3021 | switch (type) |
3022 | { |
3023 | case MDL_INTENTION_EXCLUSIVE : return "intention exclusive" ; |
3024 | case MDL_SHARED : return "shared" ; |
3025 | case MDL_SHARED_HIGH_PRIO : return "shared high prio" ; |
3026 | case MDL_SHARED_READ : return "shared read" ; |
3027 | case MDL_SHARED_WRITE : return "shared write" ; |
3028 | case MDL_SHARED_UPGRADABLE : return "shared upgradable" ; |
3029 | case MDL_SHARED_NO_WRITE : return "shared no write" ; |
3030 | case MDL_SHARED_NO_READ_WRITE : return "shared no read write" ; |
3031 | case MDL_EXCLUSIVE : return "exclusive" ; |
3032 | default: break; |
3033 | } |
3034 | return "UNKNOWN" ; |
3035 | } |
3036 | |
3037 | static |
3038 | const char *wsrep_get_mdl_namespace_name(MDL_key::enum_mdl_namespace ns) |
3039 | { |
3040 | switch (ns) |
3041 | { |
3042 | case MDL_key::GLOBAL : return "GLOBAL" ; |
3043 | case MDL_key::SCHEMA : return "SCHEMA" ; |
3044 | case MDL_key::TABLE : return "TABLE" ; |
3045 | case MDL_key::FUNCTION : return "FUNCTION" ; |
3046 | case MDL_key::PROCEDURE : return "PROCEDURE" ; |
3047 | case MDL_key::PACKAGE_BODY: return "PACKAGE BODY" ; |
3048 | case MDL_key::TRIGGER : return "TRIGGER" ; |
3049 | case MDL_key::EVENT : return "EVENT" ; |
3050 | case MDL_key::COMMIT : return "COMMIT" ; |
3051 | case MDL_key::USER_LOCK : return "USER_LOCK" ; |
3052 | default: break; |
3053 | } |
3054 | return "UNKNOWN" ; |
3055 | } |
3056 | |
3057 | void MDL_ticket::wsrep_report(bool debug) |
3058 | { |
3059 | if (!debug) return; |
3060 | |
3061 | const PSI_stage_info *psi_stage= m_lock->key.get_wait_state_name(); |
3062 | WSREP_DEBUG("MDL ticket: type: %s space: %s db: %s name: %s (%s)" , |
3063 | wsrep_get_mdl_type_name(get_type()), |
3064 | wsrep_get_mdl_namespace_name(m_lock->key.mdl_namespace()), |
3065 | m_lock->key.db_name(), |
3066 | m_lock->key.name(), |
3067 | psi_stage->m_name); |
3068 | } |
3069 | #endif /* WITH_WSREP */ |
3070 | |