1/*****************************************************************************
2
3Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2008, Google Inc.
5Copyright (c) 2017, 2018, MariaDB Corporation.
6
7Portions of this file contain modifications contributed and copyrighted by
8Google, Inc. Those modifications are gratefully acknowledged and are described
9briefly in the InnoDB documentation. The contributions by Google are
10incorporated with their permission, and subject to the conditions contained in
11the file COPYING.Google.
12
13This program is free software; you can redistribute it and/or modify it under
14the terms of the GNU General Public License as published by the Free Software
15Foundation; version 2 of the License.
16
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License along with
22this program; if not, write to the Free Software Foundation, Inc.,
2351 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
24
25*****************************************************************************/
26
27/**************************************************//**
28@file include/sync0rw.ic
29The read-write lock (for threads)
30
31Created 9/11/1995 Heikki Tuuri
32*******************************************************/
33
34#include "os0event.h"
35
36/******************************************************************//**
37Lock an rw-lock in shared mode for the current thread. If the rw-lock is
38locked in exclusive mode, or there is an exclusive lock request waiting,
39the function spins a preset time (controlled by srv_n_spin_wait_rounds),
40waiting for the lock before suspending the thread. */
41void
42rw_lock_s_lock_spin(
43/*================*/
44 rw_lock_t* lock, /*!< in: pointer to rw-lock */
45 ulint pass, /*!< in: pass value; != 0, if the lock will
46 be passed to another thread to unlock */
47 const char* file_name,/*!< in: file name where lock requested */
48 unsigned line); /*!< in: line where requested */
49#ifdef UNIV_DEBUG
50/******************************************************************//**
51Inserts the debug information for an rw-lock. */
52void
53rw_lock_add_debug_info(
54/*===================*/
55 rw_lock_t* lock, /*!< in: rw-lock */
56 ulint pass, /*!< in: pass value */
57 ulint lock_type, /*!< in: lock type */
58 const char* file_name, /*!< in: file where requested */
59 unsigned line); /*!< in: line where requested */
60/******************************************************************//**
61Removes a debug information struct for an rw-lock. */
62void
63rw_lock_remove_debug_info(
64/*======================*/
65 rw_lock_t* lock, /*!< in: rw-lock */
66 ulint pass, /*!< in: pass value */
67 ulint lock_type); /*!< in: lock type */
68#endif /* UNIV_DEBUG */
69
70/******************************************************************//**
71Returns the write-status of the lock - this function made more sense
72with the old rw_lock implementation.
73@return RW_LOCK_NOT_LOCKED, RW_LOCK_X, RW_LOCK_X_WAIT, RW_LOCK_SX */
74UNIV_INLINE
75ulint
76rw_lock_get_writer(
77/*===============*/
78 const rw_lock_t* lock) /*!< in: rw-lock */
79{
80 int32_t lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
81 MY_MEMORY_ORDER_RELAXED);
82
83 ut_ad(lock_word <= X_LOCK_DECR);
84 if (lock_word > X_LOCK_HALF_DECR) {
85 /* return NOT_LOCKED in s-lock state, like the writer
86 member of the old lock implementation. */
87 return(RW_LOCK_NOT_LOCKED);
88 } else if (lock_word > 0) {
89 /* sx-locked, no x-locks */
90 return(RW_LOCK_SX);
91 } else if (lock_word == 0
92 || lock_word == -X_LOCK_HALF_DECR
93 || lock_word <= -X_LOCK_DECR) {
94 /* x-lock with sx-lock is also treated as RW_LOCK_EX */
95 return(RW_LOCK_X);
96 } else {
97 /* x-waiter with sx-lock is also treated as RW_LOCK_WAIT_EX
98 e.g. -X_LOCK_HALF_DECR < lock_word < 0 : without sx
99 -X_LOCK_DECR < lock_word < -X_LOCK_HALF_DECR : with sx */
100 return(RW_LOCK_X_WAIT);
101 }
102}
103
104/******************************************************************//**
105Returns the number of readers (s-locks).
106@return number of readers */
107UNIV_INLINE
108ulint
109rw_lock_get_reader_count(
110/*=====================*/
111 const rw_lock_t* lock) /*!< in: rw-lock */
112{
113 int32_t lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
114 MY_MEMORY_ORDER_RELAXED);
115 ut_ad(lock_word <= X_LOCK_DECR);
116
117 if (lock_word > X_LOCK_HALF_DECR) {
118 /* s-locked, no x-waiter */
119 return ulint(X_LOCK_DECR - lock_word);
120 } else if (lock_word > 0) {
121 /* s-locked, with sx-locks only */
122 return ulint(X_LOCK_HALF_DECR - lock_word);
123 } else if (lock_word == 0) {
124 /* x-locked */
125 return(0);
126 } else if (lock_word > -X_LOCK_HALF_DECR) {
127 /* s-locked, with x-waiter */
128 return((ulint)(-lock_word));
129 } else if (lock_word == -X_LOCK_HALF_DECR) {
130 /* x-locked with sx-locks */
131 return(0);
132 } else if (lock_word > -X_LOCK_DECR) {
133 /* s-locked, with x-waiter and sx-lock */
134 return((ulint)(-(lock_word + X_LOCK_HALF_DECR)));
135 }
136 /* no s-locks */
137 return(0);
138}
139
140/******************************************************************//**
141Returns the value of writer_count for the lock. Does not reserve the lock
142mutex, so the caller must be sure it is not changed during the call.
143@return value of writer_count */
144UNIV_INLINE
145ulint
146rw_lock_get_x_lock_count(
147/*=====================*/
148 const rw_lock_t* lock) /*!< in: rw-lock */
149{
150 int32_t lock_copy = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
151 MY_MEMORY_ORDER_RELAXED);
152 ut_ad(lock_copy <= X_LOCK_DECR);
153
154 if (lock_copy == 0 || lock_copy == -X_LOCK_HALF_DECR) {
155 /* "1 x-lock" or "1 x-lock + sx-locks" */
156 return(1);
157 } else if (lock_copy > -X_LOCK_DECR) {
158 /* s-locks, one or more sx-locks if > 0, or x-waiter if < 0 */
159 return(0);
160 } else if (lock_copy > -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
161 /* no s-lock, no sx-lock, 2 or more x-locks.
162 First 2 x-locks are set with -X_LOCK_DECR,
163 all other recursive x-locks are set with -1 */
164 return ulint(2 - X_LOCK_DECR - lock_copy);
165 } else {
166 /* no s-lock, 1 or more sx-lock, 2 or more x-locks.
167 First 2 x-locks are set with -(X_LOCK_DECR + X_LOCK_HALF_DECR),
168 all other recursive x-locks are set with -1 */
169 return ulint(2 - X_LOCK_DECR - X_LOCK_HALF_DECR - lock_copy);
170 }
171}
172
173/******************************************************************//**
174Returns the number of sx-lock for the lock. Does not reserve the lock
175mutex, so the caller must be sure it is not changed during the call.
176@return value of sx-lock count */
177UNIV_INLINE
178ulint
179rw_lock_get_sx_lock_count(
180/*======================*/
181 const rw_lock_t* lock) /*!< in: rw-lock */
182{
183#ifdef UNIV_DEBUG
184 int32_t lock_copy = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
185 MY_MEMORY_ORDER_RELAXED);
186
187 ut_ad(lock_copy <= X_LOCK_DECR);
188
189 while (lock_copy < 0) {
190 lock_copy += X_LOCK_DECR;
191 }
192
193 if (lock_copy > 0 && lock_copy <= X_LOCK_HALF_DECR) {
194 return(lock->sx_recursive);
195 }
196
197 return(0);
198#else /* UNIV_DEBUG */
199 return(lock->sx_recursive);
200#endif /* UNIV_DEBUG */
201}
202
203/******************************************************************//**
204Recursive x-locks are not supported: they should be handled by the caller and
205need not be atomic since they are performed by the current lock holder.
206Returns true if the decrement was made, false if not.
207@return true if decr occurs */
208UNIV_INLINE
209bool
210rw_lock_lock_word_decr(
211/*===================*/
212 rw_lock_t* lock, /*!< in/out: rw-lock */
213 int32_t amount, /*!< in: amount to decrement */
214 int32_t threshold) /*!< in: threshold of judgement */
215{
216 int32_t lock_copy = my_atomic_load32_explicit(&lock->lock_word,
217 MY_MEMORY_ORDER_RELAXED);
218 while (lock_copy > threshold) {
219 if (my_atomic_cas32_strong_explicit(&lock->lock_word,
220 &lock_copy,
221 lock_copy - amount,
222 MY_MEMORY_ORDER_ACQUIRE,
223 MY_MEMORY_ORDER_RELAXED)) {
224 return(true);
225 }
226 }
227 return(false);
228}
229
230/******************************************************************//**
231Low-level function which tries to lock an rw-lock in s-mode. Performs no
232spinning.
233@return TRUE if success */
234UNIV_INLINE
235ibool
236rw_lock_s_lock_low(
237/*===============*/
238 rw_lock_t* lock, /*!< in: pointer to rw-lock */
239 ulint pass MY_ATTRIBUTE((unused)),
240 /*!< in: pass value; != 0, if the lock will be
241 passed to another thread to unlock */
242 const char* file_name, /*!< in: file name where lock requested */
243 unsigned line) /*!< in: line where requested */
244{
245 if (!rw_lock_lock_word_decr(lock, 1, 0)) {
246 /* Locking did not succeed */
247 return(FALSE);
248 }
249
250 ut_d(rw_lock_add_debug_info(lock, pass, RW_LOCK_S, file_name, line));
251
252 return(TRUE); /* locking succeeded */
253}
254
255/******************************************************************//**
256NOTE! Use the corresponding macro, not directly this function! Lock an
257rw-lock in shared mode for the current thread. If the rw-lock is locked
258in exclusive mode, or there is an exclusive lock request waiting, the
259function spins a preset time (controlled by srv_n_spin_wait_rounds), waiting for
260the lock, before suspending the thread. */
261UNIV_INLINE
262void
263rw_lock_s_lock_func(
264/*================*/
265 rw_lock_t* lock, /*!< in: pointer to rw-lock */
266 ulint pass, /*!< in: pass value; != 0, if the lock will
267 be passed to another thread to unlock */
268 const char* file_name,/*!< in: file name where lock requested */
269 unsigned line) /*!< in: line where requested */
270{
271 /* NOTE: As we do not know the thread ids for threads which have
272 s-locked a latch, and s-lockers will be served only after waiting
273 x-lock requests have been fulfilled, then if this thread already
274 owns an s-lock here, it may end up in a deadlock with another thread
275 which requests an x-lock here. Therefore, we will forbid recursive
276 s-locking of a latch: the following assert will warn the programmer
277 of the possibility of this kind of a deadlock. If we want to implement
278 safe recursive s-locking, we should keep in a list the thread ids of
279 the threads which have s-locked a latch. This would use some CPU
280 time. */
281
282 ut_ad(!rw_lock_own(lock, RW_LOCK_S)); /* see NOTE above */
283 ut_ad(!rw_lock_own(lock, RW_LOCK_X));
284
285 if (!rw_lock_s_lock_low(lock, pass, file_name, line)) {
286
287 /* Did not succeed, try spin wait */
288
289 rw_lock_s_lock_spin(lock, pass, file_name, line);
290 }
291}
292
293/******************************************************************//**
294NOTE! Use the corresponding macro, not directly this function! Lock an
295rw-lock in exclusive mode for the current thread if the lock can be
296obtained immediately.
297@return TRUE if success */
298UNIV_INLINE
299ibool
300rw_lock_x_lock_func_nowait(
301/*=======================*/
302 rw_lock_t* lock, /*!< in: pointer to rw-lock */
303 const char* file_name,/*!< in: file name where lock requested */
304 unsigned line) /*!< in: line where requested */
305{
306 int32_t oldval = X_LOCK_DECR;
307
308 if (my_atomic_cas32_strong_explicit(&lock->lock_word, &oldval, 0,
309 MY_MEMORY_ORDER_ACQUIRE,
310 MY_MEMORY_ORDER_RELAXED)) {
311 lock->writer_thread = os_thread_get_curr_id();
312
313 } else if (os_thread_eq(lock->writer_thread, os_thread_get_curr_id())) {
314 /* Relock: even though no other thread can modify (lock, unlock
315 or reserve) lock_word while there is an exclusive writer and
316 this is the writer thread, we still want concurrent threads to
317 observe consistent values. */
318 if (oldval == 0 || oldval == -X_LOCK_HALF_DECR) {
319 /* There are 1 x-locks */
320 my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_DECR,
321 MY_MEMORY_ORDER_RELAXED);
322 } else if (oldval <= -X_LOCK_DECR) {
323 /* There are 2 or more x-locks */
324 my_atomic_add32_explicit(&lock->lock_word, -1,
325 MY_MEMORY_ORDER_RELAXED);
326 /* Watch for too many recursive locks */
327 ut_ad(oldval < 1);
328 } else {
329 /* Failure */
330 return(FALSE);
331 }
332 } else {
333 /* Failure */
334 return(FALSE);
335 }
336
337 ut_d(rw_lock_add_debug_info(lock, 0, RW_LOCK_X, file_name, line));
338
339 lock->last_x_file_name = file_name;
340 lock->last_x_line = line;
341
342 ut_ad(rw_lock_validate(lock));
343
344 return(TRUE);
345}
346
347/******************************************************************//**
348Releases a shared mode lock. */
349UNIV_INLINE
350void
351rw_lock_s_unlock_func(
352/*==================*/
353#ifdef UNIV_DEBUG
354 ulint pass, /*!< in: pass value; != 0, if the lock may have
355 been passed to another thread to unlock */
356#endif /* UNIV_DEBUG */
357 rw_lock_t* lock) /*!< in/out: rw-lock */
358{
359#ifdef UNIV_DEBUG
360 int32_t dbg_lock_word = my_atomic_load32_explicit(&lock->lock_word,
361 MY_MEMORY_ORDER_RELAXED);
362 ut_ad(dbg_lock_word > -X_LOCK_DECR);
363 ut_ad(dbg_lock_word != 0);
364 ut_ad(dbg_lock_word < X_LOCK_DECR);
365#endif
366
367 ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S));
368
369 /* Increment lock_word to indicate 1 less reader */
370 int32_t lock_word = my_atomic_add32_explicit(&lock->lock_word, 1,
371 MY_MEMORY_ORDER_RELEASE) + 1;
372 if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
373
374 /* wait_ex waiter exists. It may not be asleep, but we signal
375 anyway. We do not wake other waiters, because they can't
376 exist without wait_ex waiter and wait_ex waiter goes first.*/
377 os_event_set(lock->wait_ex_event);
378 sync_array_object_signalled();
379
380 }
381
382 ut_ad(rw_lock_validate(lock));
383}
384
385/******************************************************************//**
386Releases an exclusive mode lock. */
387UNIV_INLINE
388void
389rw_lock_x_unlock_func(
390/*==================*/
391#ifdef UNIV_DEBUG
392 ulint pass, /*!< in: pass value; != 0, if the lock may have
393 been passed to another thread to unlock */
394#endif /* UNIV_DEBUG */
395 rw_lock_t* lock) /*!< in/out: rw-lock */
396{
397 int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
398 MY_MEMORY_ORDER_RELAXED);
399
400 ut_ad(lock_word == 0 || lock_word == -X_LOCK_HALF_DECR
401 || lock_word <= -X_LOCK_DECR);
402
403 if (lock_word == 0) {
404 /* Last caller in a possible recursive chain. */
405 lock->writer_thread = 0;
406 }
407
408 ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_X));
409
410 if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
411 /* Last X-lock owned by this thread, it may still hold SX-locks.
412 ACQ_REL due to...
413 RELEASE: we release rw-lock
414 ACQUIRE: we want waiters to be loaded after lock_word is stored */
415 my_atomic_add32_explicit(&lock->lock_word, X_LOCK_DECR,
416 MY_MEMORY_ORDER_ACQ_REL);
417
418 /* This no longer has an X-lock but it may still have
419 an SX-lock. So it is now free for S-locks by other threads.
420 We need to signal read/write waiters.
421 We do not need to signal wait_ex waiters, since they cannot
422 exist when there is a writer. */
423 if (my_atomic_load32_explicit(&lock->waiters,
424 MY_MEMORY_ORDER_RELAXED)) {
425 my_atomic_store32_explicit(&lock->waiters, 0,
426 MY_MEMORY_ORDER_RELAXED);
427 os_event_set(lock->event);
428 sync_array_object_signalled();
429 }
430 } else if (lock_word == -X_LOCK_DECR
431 || lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
432 /* There are 2 x-locks */
433 my_atomic_add32_explicit(&lock->lock_word, X_LOCK_DECR,
434 MY_MEMORY_ORDER_RELAXED);
435 } else {
436 /* There are more than 2 x-locks. */
437 ut_ad(lock_word < -X_LOCK_DECR);
438 my_atomic_add32_explicit(&lock->lock_word, 1,
439 MY_MEMORY_ORDER_RELAXED);
440 }
441
442 ut_ad(rw_lock_validate(lock));
443}
444
445/******************************************************************//**
446Releases a sx mode lock. */
447UNIV_INLINE
448void
449rw_lock_sx_unlock_func(
450/*===================*/
451#ifdef UNIV_DEBUG
452 ulint pass, /*!< in: pass value; != 0, if the lock may have
453 been passed to another thread to unlock */
454#endif /* UNIV_DEBUG */
455 rw_lock_t* lock) /*!< in/out: rw-lock */
456{
457 ut_ad(rw_lock_get_sx_lock_count(lock));
458 ut_ad(lock->sx_recursive > 0);
459
460 --lock->sx_recursive;
461
462 ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_SX));
463
464 if (lock->sx_recursive == 0) {
465 int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
466 MY_MEMORY_ORDER_RELAXED);
467 /* Last caller in a possible recursive chain. */
468 if (lock_word > 0) {
469 lock->writer_thread = 0;
470 ut_ad(lock_word <= INT_MAX32 - X_LOCK_HALF_DECR);
471
472 /* Last SX-lock owned by this thread, doesn't own X-lock.
473 ACQ_REL due to...
474 RELEASE: we release rw-lock
475 ACQUIRE: we want waiters to be loaded after lock_word is stored */
476 my_atomic_add32_explicit(&lock->lock_word, X_LOCK_HALF_DECR,
477 MY_MEMORY_ORDER_ACQ_REL);
478
479 /* Lock is now free. May have to signal read/write
480 waiters. We do not need to signal wait_ex waiters,
481 since they cannot exist when there is an sx-lock
482 holder. */
483 if (my_atomic_load32_explicit(&lock->waiters,
484 MY_MEMORY_ORDER_RELAXED)) {
485 my_atomic_store32_explicit(&lock->waiters, 0,
486 MY_MEMORY_ORDER_RELAXED);
487 os_event_set(lock->event);
488 sync_array_object_signalled();
489 }
490 } else {
491 /* still has x-lock */
492 ut_ad(lock_word == -X_LOCK_HALF_DECR ||
493 lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR));
494 my_atomic_add32_explicit(&lock->lock_word, X_LOCK_HALF_DECR,
495 MY_MEMORY_ORDER_RELAXED);
496 }
497 }
498
499 ut_ad(rw_lock_validate(lock));
500}
501
502#ifdef UNIV_PFS_RWLOCK
503
504/******************************************************************//**
505Performance schema instrumented wrap function for rw_lock_create_func().
506NOTE! Please use the corresponding macro rw_lock_create(), not directly
507this function! */
508UNIV_INLINE
509void
510pfs_rw_lock_create_func(
511/*====================*/
512 mysql_pfs_key_t key, /*!< in: key registered with
513 performance schema */
514 rw_lock_t* lock, /*!< in/out: pointer to memory */
515# ifdef UNIV_DEBUG
516 latch_level_t level, /*!< in: level */
517# endif /* UNIV_DEBUG */
518 const char* cfile_name, /*!< in: file name where created */
519 unsigned cline) /*!< in: file line where created */
520{
521 ut_d(new(lock) rw_lock_t());
522
523 /* Initialize the rwlock for performance schema */
524 lock->pfs_psi = PSI_RWLOCK_CALL(init_rwlock)(key, lock);
525
526 /* The actual function to initialize an rwlock */
527 rw_lock_create_func(lock,
528#ifdef UNIV_DEBUG
529 level,
530#endif /* UNIV_DEBUG */
531 cfile_name,
532 cline);
533}
534/******************************************************************//**
535Performance schema instrumented wrap function for rw_lock_x_lock_func()
536NOTE! Please use the corresponding macro rw_lock_x_lock(), not directly
537this function! */
538UNIV_INLINE
539void
540pfs_rw_lock_x_lock_func(
541/*====================*/
542 rw_lock_t* lock, /*!< in: pointer to rw-lock */
543 ulint pass, /*!< in: pass value; != 0, if the lock will
544 be passed to another thread to unlock */
545 const char* file_name,/*!< in: file name where lock requested */
546 unsigned line) /*!< in: line where requested */
547{
548 if (lock->pfs_psi != NULL) {
549 PSI_rwlock_locker* locker;
550 PSI_rwlock_locker_state state;
551
552 /* Record the acquisition of a read-write lock in exclusive
553 mode in performance schema */
554/* MySQL 5.7 New PSI */
555#define PSI_RWLOCK_EXCLUSIVELOCK PSI_RWLOCK_WRITELOCK
556
557 locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
558 &state, lock->pfs_psi, PSI_RWLOCK_EXCLUSIVELOCK,
559 file_name, static_cast<uint>(line));
560
561 rw_lock_x_lock_func(
562 lock, pass, file_name, static_cast<uint>(line));
563
564 if (locker != NULL) {
565 PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
566 }
567 } else {
568 rw_lock_x_lock_func(lock, pass, file_name, line);
569 }
570}
571/******************************************************************//**
572Performance schema instrumented wrap function for
573rw_lock_x_lock_func_nowait()
574NOTE! Please use the corresponding macro rw_lock_x_lock_func(),
575not directly this function!
576@return TRUE if success */
577UNIV_INLINE
578ibool
579pfs_rw_lock_x_lock_func_nowait(
580/*===========================*/
581 rw_lock_t* lock, /*!< in: pointer to rw-lock */
582 const char* file_name,/*!< in: file name where lock
583 requested */
584 unsigned line) /*!< in: line where requested */
585{
586 ibool ret;
587
588 if (lock->pfs_psi != NULL) {
589 PSI_rwlock_locker* locker;
590 PSI_rwlock_locker_state state;
591
592 /* Record the acquisition of a read-write trylock in exclusive
593 mode in performance schema */
594
595#define PSI_RWLOCK_TRYEXCLUSIVELOCK PSI_RWLOCK_TRYWRITELOCK
596 locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
597 &state, lock->pfs_psi, PSI_RWLOCK_TRYEXCLUSIVELOCK,
598 file_name, static_cast<uint>(line));
599
600 ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
601
602 if (locker != NULL) {
603 PSI_RWLOCK_CALL(end_rwlock_wrwait)(
604 locker, static_cast<int>(ret));
605 }
606 } else {
607 ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
608 }
609
610 return(ret);
611}
612/******************************************************************//**
613Performance schema instrumented wrap function for rw_lock_free_func()
614NOTE! Please use the corresponding macro rw_lock_free(), not directly
615this function! */
616UNIV_INLINE
617void
618pfs_rw_lock_free_func(
619/*==================*/
620 rw_lock_t* lock) /*!< in: pointer to rw-lock */
621{
622 if (lock->pfs_psi != NULL) {
623 PSI_RWLOCK_CALL(destroy_rwlock)(lock->pfs_psi);
624 lock->pfs_psi = NULL;
625 }
626
627 rw_lock_free_func(lock);
628}
629/******************************************************************//**
630Performance schema instrumented wrap function for rw_lock_s_lock_func()
631NOTE! Please use the corresponding macro rw_lock_s_lock(), not
632directly this function! */
633UNIV_INLINE
634void
635pfs_rw_lock_s_lock_func(
636/*====================*/
637 rw_lock_t* lock, /*!< in: pointer to rw-lock */
638 ulint pass, /*!< in: pass value; != 0, if the
639 lock will be passed to another
640 thread to unlock */
641 const char* file_name,/*!< in: file name where lock
642 requested */
643 unsigned line) /*!< in: line where requested */
644{
645 if (lock->pfs_psi != NULL) {
646 PSI_rwlock_locker* locker;
647 PSI_rwlock_locker_state state;
648
649#define PSI_RWLOCK_SHAREDLOCK PSI_RWLOCK_READLOCK
650 /* Instrumented to inform we are aquiring a shared rwlock */
651 locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
652 &state, lock->pfs_psi, PSI_RWLOCK_SHAREDLOCK,
653 file_name, static_cast<uint>(line));
654
655 rw_lock_s_lock_func(lock, pass, file_name, line);
656
657 if (locker != NULL) {
658 PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
659 }
660 } else {
661 rw_lock_s_lock_func(lock, pass, file_name, line);
662 }
663}
664/******************************************************************//**
665Performance schema instrumented wrap function for rw_lock_sx_lock_func()
666NOTE! Please use the corresponding macro rw_lock_sx_lock(), not
667directly this function! */
668UNIV_INLINE
669void
670pfs_rw_lock_sx_lock_func(
671/*====================*/
672 rw_lock_t* lock, /*!< in: pointer to rw-lock */
673 ulint pass, /*!< in: pass value; != 0, if the
674 lock will be passed to another
675 thread to unlock */
676 const char* file_name,/*!< in: file name where lock
677 requested */
678 unsigned line) /*!< in: line where requested */
679{
680 if (lock->pfs_psi != NULL) {
681 PSI_rwlock_locker* locker;
682 PSI_rwlock_locker_state state;
683
684#define PSI_RWLOCK_SHAREDEXCLUSIVELOCK PSI_RWLOCK_WRITELOCK
685 /* Instrumented to inform we are aquiring a shared rwlock */
686 locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
687 &state, lock->pfs_psi, PSI_RWLOCK_SHAREDEXCLUSIVELOCK,
688 file_name, static_cast<uint>(line));
689
690 rw_lock_sx_lock_func(lock, pass, file_name, line);
691
692 if (locker != NULL) {
693 PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
694 }
695 } else {
696 rw_lock_sx_lock_func(lock, pass, file_name, line);
697 }
698}
699/******************************************************************//**
700Performance schema instrumented wrap function for rw_lock_s_lock_func()
701NOTE! Please use the corresponding macro rw_lock_s_lock(), not
702directly this function!
703@return TRUE if success */
704UNIV_INLINE
705ibool
706pfs_rw_lock_s_lock_low(
707/*===================*/
708 rw_lock_t* lock, /*!< in: pointer to rw-lock */
709 ulint pass, /*!< in: pass value; != 0, if the
710 lock will be passed to another
711 thread to unlock */
712 const char* file_name, /*!< in: file name where lock requested */
713 unsigned line) /*!< in: line where requested */
714{
715 ibool ret;
716
717 if (lock->pfs_psi != NULL) {
718 PSI_rwlock_locker* locker;
719 PSI_rwlock_locker_state state;
720
721#define PSI_RWLOCK_TRYSHAREDLOCK PSI_RWLOCK_TRYREADLOCK
722 /* Instrumented to inform we are aquiring a shared rwlock */
723 locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
724 &state, lock->pfs_psi, PSI_RWLOCK_TRYSHAREDLOCK,
725 file_name, static_cast<uint>(line));
726
727 ret = rw_lock_s_lock_low(lock, pass, file_name, line);
728
729 if (locker != NULL) {
730 PSI_RWLOCK_CALL(end_rwlock_rdwait)(
731 locker, static_cast<int>(ret));
732 }
733 } else {
734 ret = rw_lock_s_lock_low(lock, pass, file_name, line);
735 }
736
737 return(ret);
738}
739/******************************************************************//**
740Performance schema instrumented wrap function for rw_lock_sx_lock_nowait()
741NOTE! Please use the corresponding macro, not
742directly this function!
743@return TRUE if success */
744UNIV_INLINE
745ibool
746pfs_rw_lock_sx_lock_low(
747/*====================*/
748 rw_lock_t* lock, /*!< in: pointer to rw-lock */
749 ulint pass, /*!< in: pass value; != 0, if the
750 lock will be passed to another
751 thread to unlock */
752 const char* file_name, /*!< in: file name where lock requested */
753 unsigned line) /*!< in: line where requested */
754{
755 ibool ret;
756
757 if (lock->pfs_psi != NULL) {
758 PSI_rwlock_locker* locker;
759 PSI_rwlock_locker_state state;
760
761#define PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK PSI_RWLOCK_TRYWRITELOCK
762 /* Instrumented to inform we are aquiring a shared
763 exclusive rwlock */
764 locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
765 &state, lock->pfs_psi,
766 PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK,
767 file_name, static_cast<uint>(line));
768
769 ret = rw_lock_sx_lock_low(lock, pass, file_name, line);
770
771 if (locker != NULL) {
772 PSI_RWLOCK_CALL(end_rwlock_rdwait)(
773 locker, static_cast<int>(ret));
774 }
775 } else {
776 ret = rw_lock_sx_lock_low(lock, pass, file_name, line);
777 }
778
779 return(ret);
780}
781/******************************************************************//**
782Performance schema instrumented wrap function for rw_lock_x_unlock_func()
783NOTE! Please use the corresponding macro rw_lock_x_unlock(), not directly
784this function! */
785UNIV_INLINE
786void
787pfs_rw_lock_x_unlock_func(
788/*======================*/
789#ifdef UNIV_DEBUG
790 ulint pass, /*!< in: pass value; != 0, if the
791 lock may have been passed to another
792 thread to unlock */
793#endif /* UNIV_DEBUG */
794 rw_lock_t* lock) /*!< in/out: rw-lock */
795{
796 /* Inform performance schema we are unlocking the lock */
797 if (lock->pfs_psi != NULL) {
798 PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
799 }
800
801 rw_lock_x_unlock_func(
802#ifdef UNIV_DEBUG
803 pass,
804#endif /* UNIV_DEBUG */
805 lock);
806}
807
808/******************************************************************//**
809Performance schema instrumented wrap function for rw_lock_sx_unlock_func()
810NOTE! Please use the corresponding macro rw_lock_sx_unlock(), not directly
811this function! */
812UNIV_INLINE
813void
814pfs_rw_lock_sx_unlock_func(
815/*======================*/
816#ifdef UNIV_DEBUG
817 ulint pass, /*!< in: pass value; != 0, if the
818 lock may have been passed to another
819 thread to unlock */
820#endif /* UNIV_DEBUG */
821 rw_lock_t* lock) /*!< in/out: rw-lock */
822{
823 /* Inform performance schema we are unlocking the lock */
824 if (lock->pfs_psi != NULL) {
825 PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
826 }
827
828 rw_lock_sx_unlock_func(
829#ifdef UNIV_DEBUG
830 pass,
831#endif /* UNIV_DEBUG */
832 lock);
833}
834
835/******************************************************************//**
836Performance schema instrumented wrap function for rw_lock_s_unlock_func()
837NOTE! Please use the corresponding macro pfs_rw_lock_s_unlock(), not
838directly this function! */
839UNIV_INLINE
840void
841pfs_rw_lock_s_unlock_func(
842/*======================*/
843#ifdef UNIV_DEBUG
844 ulint pass, /*!< in: pass value; != 0, if the
845 lock may have been passed to another
846 thread to unlock */
847#endif /* UNIV_DEBUG */
848 rw_lock_t* lock) /*!< in/out: rw-lock */
849{
850 /* Inform performance schema we are unlocking the lock */
851 if (lock->pfs_psi != NULL) {
852 PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
853 }
854
855 rw_lock_s_unlock_func(
856#ifdef UNIV_DEBUG
857 pass,
858#endif /* UNIV_DEBUG */
859 lock);
860
861}
862#endif /* UNIV_PFS_RWLOCK */
863