1/*****************************************************************************
2
3Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2008, Google Inc.
5Copyright (c) 2017, MariaDB Corporation.
6
7Portions of this file contain modifications contributed and copyrighted by
8Google, Inc. Those modifications are gratefully acknowledged and are described
9briefly in the InnoDB documentation. The contributions by Google are
10incorporated with their permission, and subject to the conditions contained in
11the file COPYING.Google.
12
13This program is free software; you can redistribute it and/or modify it under
14the terms of the GNU General Public License as published by the Free Software
15Foundation; version 2 of the License.
16
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License along with
22this program; if not, write to the Free Software Foundation, Inc.,
2351 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
24
25*****************************************************************************/
26
27/**************************************************//**
28@file sync/sync0rw.cc
29The read-write lock (for thread synchronization)
30
31Created 9/11/1995 Heikki Tuuri
32*******************************************************/
33
34#include "sync0rw.h"
35#include "ha_prototypes.h"
36
37#include "os0thread.h"
38#include "mem0mem.h"
39#include "srv0srv.h"
40#include "os0event.h"
41#include "srv0mon.h"
42#include "sync0debug.h"
43#include "ha_prototypes.h"
44#include "my_cpu.h"
45#include <my_sys.h>
46
47/*
48 IMPLEMENTATION OF THE RW_LOCK
49 =============================
50The status of a rw_lock is held in lock_word. The initial value of lock_word is
51X_LOCK_DECR. lock_word is decremented by 1 for each s-lock and by X_LOCK_DECR
52or 1 for each x-lock. This describes the lock state for each value of lock_word:
53
54lock_word == X_LOCK_DECR: Unlocked.
55X_LOCK_HALF_DECR < lock_word < X_LOCK_DECR:
56 S locked, no waiting writers.
57 (X_LOCK_DECR - lock_word) is the number
58 of S locks.
59lock_word == X_LOCK_HALF_DECR: SX locked, no waiting writers.
600 < lock_word < X_LOCK_HALF_DECR:
61 SX locked AND S locked, no waiting writers.
62 (X_LOCK_HALF_DECR - lock_word) is the number
63 of S locks.
64lock_word == 0: X locked, no waiting writers.
65-X_LOCK_HALF_DECR < lock_word < 0:
66 S locked, with a waiting writer.
67 (-lock_word) is the number of S locks.
68lock_word == -X_LOCK_HALF_DECR: X locked and SX locked, no waiting writers.
69-X_LOCK_DECR < lock_word < -X_LOCK_HALF_DECR:
70 S locked, with a waiting writer
71 which has SX lock.
72 -(lock_word + X_LOCK_HALF_DECR) is the number
73 of S locks.
74lock_word == -X_LOCK_DECR: X locked with recursive X lock (2 X locks).
75-(X_LOCK_DECR + X_LOCK_HALF_DECR) < lock_word < -X_LOCK_DECR:
76 X locked. The number of the X locks is:
77 2 - (lock_word + X_LOCK_DECR)
78lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR):
79 X locked with recursive X lock (2 X locks)
80 and SX locked.
81lock_word < -(X_LOCK_DECR + X_LOCK_HALF_DECR):
82 X locked and SX locked.
83 The number of the X locks is:
84 2 - (lock_word + X_LOCK_DECR + X_LOCK_HALF_DECR)
85
86 LOCK COMPATIBILITY MATRIX
87
88 | S|SX| X|
89 --+--+--+--+
90 S| +| +| -|
91 --+--+--+--+
92 SX| +| -| -|
93 --+--+--+--+
94 X| -| -| -|
95 --+--+--+--+
96
97The lock_word is always read and updated atomically and consistently, so that
98it always represents the state of the lock, and the state of the lock changes
99with a single atomic operation. This lock_word holds all of the information
100that a thread needs in order to determine if it is eligible to gain the lock
101or if it must spin or sleep. The one exception to this is that writer_thread
102must be verified before recursive write locks: to solve this scenario, we make
103writer_thread readable by all threads, but only writeable by the x-lock or
104sx-lock holder.
105
106The other members of the lock obey the following rules to remain consistent:
107
108writer_thread: Is used only in recursive x-locking or sx-locking.
109 This field is 0 at lock creation time and is updated
110 when x-lock is acquired or when move_ownership is called.
111 A thread is only allowed to set the value of this field to
112 it's thread_id i.e.: a thread cannot set writer_thread to
113 some other thread's id.
114waiters: May be set to 1 anytime, but to avoid unnecessary wake-up
115 signals, it should only be set to 1 when there are threads
116 waiting on event. Must be 1 when a writer starts waiting to
117 ensure the current x-locking thread sends a wake-up signal
118 during unlock. May only be reset to 0 immediately before a
119 a wake-up signal is sent to event. On most platforms, a
120 memory barrier is required after waiters is set, and before
121 verifying lock_word is still held, to ensure some unlocker
122 really does see the flags new value.
123event: Threads wait on event for read or writer lock when another
124 thread has an x-lock or an x-lock reservation (wait_ex). A
125 thread may only wait on event after performing the following
126 actions in order:
127 (1) Record the counter value of event (with os_event_reset).
128 (2) Set waiters to 1.
129 (3) Verify lock_word <= 0.
130 (1) must come before (2) to ensure signal is not missed.
131 (2) must come before (3) to ensure a signal is sent.
132 These restrictions force the above ordering.
133 Immediately before sending the wake-up signal, we should:
134 (1) Verify lock_word == X_LOCK_DECR (unlocked)
135 (2) Reset waiters to 0.
136wait_ex_event: A thread may only wait on the wait_ex_event after it has
137 performed the following actions in order:
138 (1) Decrement lock_word by X_LOCK_DECR.
139 (2) Record counter value of wait_ex_event (os_event_reset,
140 called from sync_array_reserve_cell).
141 (3) Verify that lock_word < 0.
142 (1) must come first to ensures no other threads become reader
143 or next writer, and notifies unlocker that signal must be sent.
144 (2) must come before (3) to ensure the signal is not missed.
145 These restrictions force the above ordering.
146 Immediately before sending the wake-up signal, we should:
147 Verify lock_word == 0 (waiting thread holds x_lock)
148*/
149
150rw_lock_stats_t rw_lock_stats;
151
152/* The global list of rw-locks */
153rw_lock_list_t rw_lock_list;
154ib_mutex_t rw_lock_list_mutex;
155
156#ifdef UNIV_DEBUG
157/******************************************************************//**
158Creates a debug info struct. */
159static
160rw_lock_debug_t*
161rw_lock_debug_create(void);
162/*======================*/
163/******************************************************************//**
164Frees a debug info struct. */
165static
166void
167rw_lock_debug_free(
168/*===============*/
169 rw_lock_debug_t* info);
170
171/******************************************************************//**
172Creates a debug info struct.
173@return own: debug info struct */
174static
175rw_lock_debug_t*
176rw_lock_debug_create(void)
177/*======================*/
178{
179 return((rw_lock_debug_t*) ut_malloc_nokey(sizeof(rw_lock_debug_t)));
180}
181
182/******************************************************************//**
183Frees a debug info struct. */
184static
185void
186rw_lock_debug_free(
187/*===============*/
188 rw_lock_debug_t* info)
189{
190 ut_free(info);
191}
192#endif /* UNIV_DEBUG */
193
194/******************************************************************//**
195Creates, or rather, initializes an rw-lock object in a specified memory
196location (which must be appropriately aligned). The rw-lock is initialized
197to the non-locked state. Explicit freeing of the rw-lock with rw_lock_free
198is necessary only if the memory block containing it is freed. */
199void
200rw_lock_create_func(
201/*================*/
202 rw_lock_t* lock, /*!< in: pointer to memory */
203#ifdef UNIV_DEBUG
204 latch_level_t level, /*!< in: level */
205#endif /* UNIV_DEBUG */
206 const char* cfile_name, /*!< in: file name where created */
207 unsigned cline) /*!< in: file line where created */
208{
209#if defined(UNIV_DEBUG) && !defined(UNIV_PFS_RWLOCK)
210 /* It should have been created in pfs_rw_lock_create_func() */
211 new(lock) rw_lock_t();
212#endif /* UNIV_DEBUG */
213
214 /* If this is the very first time a synchronization object is
215 created, then the following call initializes the sync system. */
216
217 lock->lock_word = X_LOCK_DECR;
218 lock->waiters = 0;
219
220 lock->sx_recursive = 0;
221 lock->writer_thread= 0;
222
223#ifdef UNIV_DEBUG
224 lock->m_rw_lock = true;
225
226 UT_LIST_INIT(lock->debug_list, &rw_lock_debug_t::list);
227
228 lock->m_id = sync_latch_get_id(sync_latch_get_name(level));
229 ut_a(lock->m_id != LATCH_ID_NONE);
230
231 lock->level = level;
232#endif /* UNIV_DEBUG */
233
234 lock->cfile_name = cfile_name;
235
236 /* This should hold in practice. If it doesn't then we need to
237 split the source file anyway. Or create the locks on lines
238 less than 8192. cline is unsigned:13. */
239 ut_ad(cline <= 8192);
240 lock->cline = cline;
241 lock->count_os_wait = 0;
242 lock->last_x_file_name = "not yet reserved";
243 lock->last_x_line = 0;
244 lock->event = os_event_create(0);
245 lock->wait_ex_event = os_event_create(0);
246
247 lock->is_block_lock = 0;
248
249 mutex_enter(&rw_lock_list_mutex);
250
251 ut_ad(UT_LIST_GET_FIRST(rw_lock_list) == NULL
252 || UT_LIST_GET_FIRST(rw_lock_list)->magic_n == RW_LOCK_MAGIC_N);
253
254 UT_LIST_ADD_FIRST(rw_lock_list, lock);
255
256 mutex_exit(&rw_lock_list_mutex);
257}
258
259/******************************************************************//**
260Calling this function is obligatory only if the memory buffer containing
261the rw-lock is freed. Removes an rw-lock object from the global list. The
262rw-lock is checked to be in the non-locked state. */
263void
264rw_lock_free_func(
265/*==============*/
266 rw_lock_t* lock) /*!< in/out: rw-lock */
267{
268 ut_ad(rw_lock_validate(lock));
269 ut_a(my_atomic_load32_explicit(&lock->lock_word,
270 MY_MEMORY_ORDER_RELAXED) == X_LOCK_DECR);
271
272 mutex_enter(&rw_lock_list_mutex);
273
274 os_event_destroy(lock->event);
275
276 os_event_destroy(lock->wait_ex_event);
277
278 UT_LIST_REMOVE(rw_lock_list, lock);
279
280 mutex_exit(&rw_lock_list_mutex);
281
282 /* We did an in-place new in rw_lock_create_func() */
283 ut_d(lock->~rw_lock_t());
284 /* Sometimes (maybe when compiled with GCC -O3) the above call
285 to rw_lock_t::~rw_lock_t() will not actually assign magic_n=0. */
286 ut_d(lock->magic_n = 0);
287}
288
289/******************************************************************//**
290Lock an rw-lock in shared mode for the current thread. If the rw-lock is
291locked in exclusive mode, or there is an exclusive lock request waiting,
292the function spins a preset time (controlled by srv_n_spin_wait_rounds), waiting
293for the lock, before suspending the thread. */
294void
295rw_lock_s_lock_spin(
296/*================*/
297 rw_lock_t* lock, /*!< in: pointer to rw-lock */
298 ulint pass, /*!< in: pass value; != 0, if the lock
299 will be passed to another thread to unlock */
300 const char* file_name, /*!< in: file name where lock requested */
301 unsigned line) /*!< in: line where requested */
302{
303 ulint i = 0; /* spin round count */
304 sync_array_t* sync_arr;
305 lint spin_count = 0;
306 int64_t count_os_wait = 0;
307
308 /* We reuse the thread id to index into the counter, cache
309 it here for efficiency. */
310
311 ut_ad(rw_lock_validate(lock));
312
313lock_loop:
314
315 /* Spin waiting for the writer field to become free */
316 HMT_low();
317 while (i < srv_n_spin_wait_rounds &&
318 my_atomic_load32_explicit(&lock->lock_word,
319 MY_MEMORY_ORDER_RELAXED) <= 0) {
320 ut_delay(srv_spin_wait_delay);
321 i++;
322 }
323
324 HMT_medium();
325 if (i >= srv_n_spin_wait_rounds) {
326 os_thread_yield();
327 }
328
329 ++spin_count;
330
331 /* We try once again to obtain the lock */
332 if (rw_lock_s_lock_low(lock, pass, file_name, line)) {
333
334 if (count_os_wait > 0) {
335 lock->count_os_wait +=
336 static_cast<uint32_t>(count_os_wait);
337 rw_lock_stats.rw_s_os_wait_count.add(count_os_wait);
338 }
339
340 rw_lock_stats.rw_s_spin_round_count.add(spin_count);
341
342 return; /* Success */
343 } else {
344
345 if (i < srv_n_spin_wait_rounds) {
346 goto lock_loop;
347 }
348
349
350 ++count_os_wait;
351
352 sync_cell_t* cell;
353
354 sync_arr = sync_array_get_and_reserve_cell(
355 lock, RW_LOCK_S, file_name, line, &cell);
356
357 /* Set waiters before checking lock_word to ensure wake-up
358 signal is sent. This may lead to some unnecessary signals. */
359 my_atomic_fas32_explicit(&lock->waiters, 1, MY_MEMORY_ORDER_ACQUIRE);
360
361 if (rw_lock_s_lock_low(lock, pass, file_name, line)) {
362
363 sync_array_free_cell(sync_arr, cell);
364
365 if (count_os_wait > 0) {
366
367 lock->count_os_wait +=
368 static_cast<uint32_t>(count_os_wait);
369
370 rw_lock_stats.rw_s_os_wait_count.add(
371 count_os_wait);
372 }
373
374 rw_lock_stats.rw_s_spin_round_count.add(spin_count);
375
376 return; /* Success */
377 }
378
379 /* see comments in trx_commit_low() to
380 before_trx_state_committed_in_memory explaining
381 this care to invoke the following sync check.*/
382#ifndef DBUG_OFF
383#ifdef UNIV_DEBUG
384 if (lock->get_level() != SYNC_DICT_OPERATION) {
385 DEBUG_SYNC_C("rw_s_lock_waiting");
386 }
387#endif
388#endif
389 sync_array_wait_event(sync_arr, cell);
390
391 i = 0;
392
393 goto lock_loop;
394 }
395}
396
397/******************************************************************//**
398This function is used in the insert buffer to move the ownership of an
399x-latch on a buffer frame to the current thread. The x-latch was set by
400the buffer read operation and it protected the buffer frame while the
401read was done. The ownership is moved because we want that the current
402thread is able to acquire a second x-latch which is stored in an mtr.
403This, in turn, is needed to pass the debug checks of index page
404operations. */
405void
406rw_lock_x_lock_move_ownership(
407/*==========================*/
408 rw_lock_t* lock) /*!< in: lock which was x-locked in the
409 buffer read */
410{
411 ut_ad(rw_lock_is_locked(lock, RW_LOCK_X));
412
413 lock->writer_thread = os_thread_get_curr_id();
414}
415
416/******************************************************************//**
417Function for the next writer to call. Waits for readers to exit.
418The caller must have already decremented lock_word by X_LOCK_DECR. */
419UNIV_INLINE
420void
421rw_lock_x_lock_wait_func(
422/*=====================*/
423 rw_lock_t* lock, /*!< in: pointer to rw-lock */
424#ifdef UNIV_DEBUG
425 ulint pass, /*!< in: pass value; != 0, if the lock will
426 be passed to another thread to unlock */
427#endif
428 lint threshold,/*!< in: threshold to wait for */
429 const char* file_name,/*!< in: file name where lock requested */
430 unsigned line) /*!< in: line where requested */
431{
432 ulint i = 0;
433 lint n_spins = 0;
434 sync_array_t* sync_arr;
435 int64_t count_os_wait = 0;
436
437 ut_ad(my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= threshold);
438
439 HMT_low();
440 while (my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) < threshold) {
441 ut_delay(srv_spin_wait_delay);
442
443 if (i < srv_n_spin_wait_rounds) {
444 i++;
445 continue;
446 }
447
448 /* If there is still a reader, then go to sleep.*/
449 ++n_spins;
450
451 sync_cell_t* cell;
452
453 sync_arr = sync_array_get_and_reserve_cell(
454 lock, RW_LOCK_X_WAIT, file_name, line, &cell);
455
456 i = 0;
457
458 /* Check lock_word to ensure wake-up isn't missed.*/
459 if (my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) < threshold) {
460
461 ++count_os_wait;
462
463 /* Add debug info as it is needed to detect possible
464 deadlock. We must add info for WAIT_EX thread for
465 deadlock detection to work properly. */
466 ut_d(rw_lock_add_debug_info(
467 lock, pass, RW_LOCK_X_WAIT,
468 file_name, line));
469
470 sync_array_wait_event(sync_arr, cell);
471
472 ut_d(rw_lock_remove_debug_info(
473 lock, pass, RW_LOCK_X_WAIT));
474
475 /* It is possible to wake when lock_word < 0.
476 We must pass the while-loop check to proceed.*/
477
478 } else {
479 sync_array_free_cell(sync_arr, cell);
480 break;
481 }
482 }
483 HMT_medium();
484 rw_lock_stats.rw_x_spin_round_count.add(n_spins);
485
486 if (count_os_wait > 0) {
487 lock->count_os_wait += static_cast<uint32_t>(count_os_wait);
488 rw_lock_stats.rw_x_os_wait_count.add(count_os_wait);
489 }
490
491 rw_lock_stats.rw_x_spin_round_count.add(n_spins);
492
493 if (count_os_wait > 0) {
494 lock->count_os_wait +=
495 static_cast<uint32_t>(count_os_wait);
496 rw_lock_stats.rw_x_os_wait_count.add(count_os_wait);
497 }
498}
499
500#ifdef UNIV_DEBUG
501# define rw_lock_x_lock_wait(L, P, T, F, O) \
502 rw_lock_x_lock_wait_func(L, P, T, F, O)
503#else
504# define rw_lock_x_lock_wait(L, P, T, F, O) \
505 rw_lock_x_lock_wait_func(L, T, F, O)
506#endif /* UNIV_DBEUG */
507
508/******************************************************************//**
509Low-level function for acquiring an exclusive lock.
510@return FALSE if did not succeed, TRUE if success. */
511UNIV_INLINE
512ibool
513rw_lock_x_lock_low(
514/*===============*/
515 rw_lock_t* lock, /*!< in: pointer to rw-lock */
516 ulint pass, /*!< in: pass value; != 0, if the lock will
517 be passed to another thread to unlock */
518 const char* file_name,/*!< in: file name where lock requested */
519 unsigned line) /*!< in: line where requested */
520{
521 if (rw_lock_lock_word_decr(lock, X_LOCK_DECR, X_LOCK_HALF_DECR)) {
522
523 /* As we are going to write our own thread id in that field it
524 must be that the current writer_thread value is not active. */
525 ut_a(!lock->writer_thread);
526
527 /* Decrement occurred: we are writer or next-writer. */
528 if (!pass)
529 {
530 lock->writer_thread = os_thread_get_curr_id();
531 }
532
533 rw_lock_x_lock_wait(lock, pass, 0, file_name, line);
534
535 } else {
536 os_thread_id_t thread_id = os_thread_get_curr_id();
537
538 /* Decrement failed: An X or SX lock is held by either
539 this thread or another. Try to relock. */
540 if (!pass && os_thread_eq(lock->writer_thread, thread_id)) {
541 /* Other s-locks can be allowed. If it is request x
542 recursively while holding sx lock, this x lock should
543 be along with the latching-order. */
544
545 /* The existing X or SX lock is from this thread */
546 if (rw_lock_lock_word_decr(lock, X_LOCK_DECR, 0)) {
547 /* There is at least one SX-lock from this
548 thread, but no X-lock. */
549
550 /* Wait for any the other S-locks to be
551 released. */
552 rw_lock_x_lock_wait(
553 lock, pass, -X_LOCK_HALF_DECR,
554 file_name, line);
555
556 } else {
557 int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
558 MY_MEMORY_ORDER_RELAXED);
559 /* At least one X lock by this thread already
560 exists. Add another. */
561 if (lock_word == 0
562 || lock_word == -X_LOCK_HALF_DECR) {
563 my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_DECR,
564 MY_MEMORY_ORDER_RELAXED);
565 } else {
566 ut_ad(lock_word <= -X_LOCK_DECR);
567 my_atomic_add32_explicit(&lock->lock_word, -1,
568 MY_MEMORY_ORDER_RELAXED);
569 }
570 }
571
572 } else {
573 /* Another thread locked before us */
574 return(FALSE);
575 }
576 }
577
578 ut_d(rw_lock_add_debug_info(lock, pass, RW_LOCK_X, file_name, line));
579
580 lock->last_x_file_name = file_name;
581 lock->last_x_line = line;
582
583 return(TRUE);
584}
585
586/******************************************************************//**
587Low-level function for acquiring an sx lock.
588@return FALSE if did not succeed, TRUE if success. */
589ibool
590rw_lock_sx_lock_low(
591/*================*/
592 rw_lock_t* lock, /*!< in: pointer to rw-lock */
593 ulint pass, /*!< in: pass value; != 0, if the lock will
594 be passed to another thread to unlock */
595 const char* file_name,/*!< in: file name where lock requested */
596 unsigned line) /*!< in: line where requested */
597{
598 if (rw_lock_lock_word_decr(lock, X_LOCK_HALF_DECR, X_LOCK_HALF_DECR)) {
599
600 /* As we are going to write our own thread id in that field it
601 must be that the current writer_thread value is not active. */
602 ut_a(!lock->writer_thread);
603
604 /* Decrement occurred: we are the SX lock owner. */
605 if (!pass)
606 {
607 lock->writer_thread = os_thread_get_curr_id();
608 }
609
610 lock->sx_recursive = 1;
611 } else {
612 os_thread_id_t thread_id = os_thread_get_curr_id();
613
614 /* Decrement failed: It already has an X or SX lock by this
615 thread or another thread. If it is this thread, relock,
616 else fail. */
617 if (!pass && os_thread_eq(lock->writer_thread, thread_id)) {
618 /* This thread owns an X or SX lock */
619 if (lock->sx_recursive++ == 0) {
620 /* This thread is making first SX-lock request
621 and it must be holding at least one X-lock here
622 because:
623
624 * There can't be a WAIT_EX thread because we are
625 the thread which has it's thread_id written in
626 the writer_thread field and we are not waiting.
627
628 * Any other X-lock thread cannot exist because
629 it must update recursive flag only after
630 updating the thread_id. Had there been
631 a concurrent X-locking thread which succeeded
632 in decrementing the lock_word it must have
633 written it's thread_id before setting the
634 recursive flag. As we cleared the if()
635 condition above therefore we must be the only
636 thread working on this lock and it is safe to
637 read and write to the lock_word. */
638
639#ifdef UNIV_DEBUG
640 int32_t lock_word =
641#endif
642 my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_HALF_DECR,
643 MY_MEMORY_ORDER_RELAXED);
644
645 ut_ad((lock_word == 0)
646 || ((lock_word <= -X_LOCK_DECR)
647 && (lock_word
648 > -(X_LOCK_DECR
649 + X_LOCK_HALF_DECR))));
650 }
651 } else {
652 /* Another thread locked before us */
653 return(FALSE);
654 }
655 }
656
657 ut_d(rw_lock_add_debug_info(lock, pass, RW_LOCK_SX, file_name, line));
658
659 lock->last_x_file_name = file_name;
660 lock->last_x_line = line;
661
662 return(TRUE);
663}
664
665/******************************************************************//**
666NOTE! Use the corresponding macro, not directly this function! Lock an
667rw-lock in exclusive mode for the current thread. If the rw-lock is locked
668in shared or exclusive mode, or there is an exclusive lock request waiting,
669the function spins a preset time (controlled by srv_n_spin_wait_rounds), waiting
670for the lock before suspending the thread. If the same thread has an x-lock
671on the rw-lock, locking succeed, with the following exception: if pass != 0,
672only a single x-lock may be taken on the lock. NOTE: If the same thread has
673an s-lock, locking does not succeed! */
674void
675rw_lock_x_lock_func(
676/*================*/
677 rw_lock_t* lock, /*!< in: pointer to rw-lock */
678 ulint pass, /*!< in: pass value; != 0, if the lock will
679 be passed to another thread to unlock */
680 const char* file_name,/*!< in: file name where lock requested */
681 unsigned line) /*!< in: line where requested */
682{
683 ulint i = 0;
684 sync_array_t* sync_arr;
685 lint spin_count = 0;
686 int64_t count_os_wait = 0;
687
688 ut_ad(rw_lock_validate(lock));
689 ut_ad(!rw_lock_own(lock, RW_LOCK_S));
690
691lock_loop:
692
693 if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
694
695 if (count_os_wait > 0) {
696 lock->count_os_wait +=
697 static_cast<uint32_t>(count_os_wait);
698 rw_lock_stats.rw_x_os_wait_count.add(count_os_wait);
699 }
700
701 rw_lock_stats.rw_x_spin_round_count.add(spin_count);
702
703 /* Locking succeeded */
704 return;
705
706 } else {
707
708 /* Spin waiting for the lock_word to become free */
709 HMT_low();
710 while (i < srv_n_spin_wait_rounds
711 && my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= X_LOCK_HALF_DECR) {
712 ut_delay(srv_spin_wait_delay);
713 i++;
714 }
715
716 HMT_medium();
717 spin_count += lint(i);
718
719 if (i >= srv_n_spin_wait_rounds) {
720
721 os_thread_yield();
722
723 } else {
724
725 goto lock_loop;
726 }
727 }
728
729 sync_cell_t* cell;
730
731 sync_arr = sync_array_get_and_reserve_cell(
732 lock, RW_LOCK_X, file_name, line, &cell);
733
734 /* Waiters must be set before checking lock_word, to ensure signal
735 is sent. This could lead to a few unnecessary wake-up signals. */
736 my_atomic_fas32_explicit(&lock->waiters, 1, MY_MEMORY_ORDER_ACQUIRE);
737
738 if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
739 sync_array_free_cell(sync_arr, cell);
740
741 if (count_os_wait > 0) {
742 lock->count_os_wait +=
743 static_cast<uint32_t>(count_os_wait);
744 rw_lock_stats.rw_x_os_wait_count.add(count_os_wait);
745 }
746
747 rw_lock_stats.rw_x_spin_round_count.add(spin_count);
748
749 /* Locking succeeded */
750 return;
751 }
752
753 ++count_os_wait;
754
755 sync_array_wait_event(sync_arr, cell);
756
757 i = 0;
758
759 goto lock_loop;
760}
761
762/******************************************************************//**
763NOTE! Use the corresponding macro, not directly this function! Lock an
764rw-lock in SX mode for the current thread. If the rw-lock is locked
765in exclusive mode, or there is an exclusive lock request waiting,
766the function spins a preset time (controlled by SYNC_SPIN_ROUNDS), waiting
767for the lock, before suspending the thread. If the same thread has an x-lock
768on the rw-lock, locking succeed, with the following exception: if pass != 0,
769only a single sx-lock may be taken on the lock. NOTE: If the same thread has
770an s-lock, locking does not succeed! */
771void
772rw_lock_sx_lock_func(
773/*=================*/
774 rw_lock_t* lock, /*!< in: pointer to rw-lock */
775 ulint pass, /*!< in: pass value; != 0, if the lock will
776 be passed to another thread to unlock */
777 const char* file_name,/*!< in: file name where lock requested */
778 unsigned line) /*!< in: line where requested */
779
780{
781 ulint i = 0;
782 sync_array_t* sync_arr;
783 lint spin_count = 0;
784 int64_t count_os_wait = 0;
785 lint spin_wait_count = 0;
786
787 ut_ad(rw_lock_validate(lock));
788 ut_ad(!rw_lock_own(lock, RW_LOCK_S));
789
790lock_loop:
791
792 if (rw_lock_sx_lock_low(lock, pass, file_name, line)) {
793
794 if (count_os_wait > 0) {
795 lock->count_os_wait +=
796 static_cast<uint32_t>(count_os_wait);
797 rw_lock_stats.rw_sx_os_wait_count.add(count_os_wait);
798 }
799
800 rw_lock_stats.rw_sx_spin_round_count.add(spin_count);
801 rw_lock_stats.rw_sx_spin_wait_count.add(spin_wait_count);
802
803 /* Locking succeeded */
804 return;
805
806 } else {
807
808 ++spin_wait_count;
809
810 /* Spin waiting for the lock_word to become free */
811 while (i < srv_n_spin_wait_rounds
812 && my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= X_LOCK_HALF_DECR) {
813 ut_delay(srv_spin_wait_delay);
814 i++;
815 }
816
817 spin_count += lint(i);
818
819 if (i >= srv_n_spin_wait_rounds) {
820
821 os_thread_yield();
822
823 } else {
824
825 goto lock_loop;
826 }
827 }
828
829 sync_cell_t* cell;
830
831 sync_arr = sync_array_get_and_reserve_cell(
832 lock, RW_LOCK_SX, file_name, line, &cell);
833
834 /* Waiters must be set before checking lock_word, to ensure signal
835 is sent. This could lead to a few unnecessary wake-up signals. */
836 my_atomic_fas32_explicit(&lock->waiters, 1, MY_MEMORY_ORDER_ACQUIRE);
837
838 if (rw_lock_sx_lock_low(lock, pass, file_name, line)) {
839
840 sync_array_free_cell(sync_arr, cell);
841
842 if (count_os_wait > 0) {
843 lock->count_os_wait +=
844 static_cast<uint32_t>(count_os_wait);
845 rw_lock_stats.rw_sx_os_wait_count.add(count_os_wait);
846 }
847
848 rw_lock_stats.rw_sx_spin_round_count.add(spin_count);
849 rw_lock_stats.rw_sx_spin_wait_count.add(spin_wait_count);
850
851 /* Locking succeeded */
852 return;
853 }
854
855 ++count_os_wait;
856
857 sync_array_wait_event(sync_arr, cell);
858
859 i = 0;
860
861 goto lock_loop;
862}
863
864#ifdef UNIV_DEBUG
865
866/******************************************************************//**
867Checks that the rw-lock has been initialized and that there are no
868simultaneous shared and exclusive locks.
869@return true */
870bool
871rw_lock_validate(
872/*=============*/
873 const rw_lock_t* lock) /*!< in: rw-lock */
874{
875 int32_t lock_word;
876
877 ut_ad(lock);
878
879 lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word),
880 MY_MEMORY_ORDER_RELAXED);
881
882 ut_ad(lock->magic_n == RW_LOCK_MAGIC_N);
883 ut_ad(my_atomic_load32_explicit(const_cast<int32_t*>(&lock->waiters),
884 MY_MEMORY_ORDER_RELAXED) < 2);
885 ut_ad(lock_word > -(2 * X_LOCK_DECR));
886 ut_ad(lock_word <= X_LOCK_DECR);
887
888 return(true);
889}
890
891/******************************************************************//**
892Checks if somebody has locked the rw-lock in the specified mode.
893@return true if locked */
894bool
895rw_lock_is_locked(
896/*==============*/
897 rw_lock_t* lock, /*!< in: rw-lock */
898 ulint lock_type) /*!< in: lock type: RW_LOCK_S,
899 RW_LOCK_X or RW_LOCK_SX */
900{
901 ut_ad(rw_lock_validate(lock));
902
903 switch (lock_type) {
904 case RW_LOCK_S:
905 return(rw_lock_get_reader_count(lock) > 0);
906
907 case RW_LOCK_X:
908 return(rw_lock_get_writer(lock) == RW_LOCK_X);
909
910 case RW_LOCK_SX:
911 return(rw_lock_get_sx_lock_count(lock) > 0);
912
913 default:
914 ut_error;
915 }
916 return(false); /* avoid compiler warnings */
917}
918
919/******************************************************************//**
920Inserts the debug information for an rw-lock. */
921void
922rw_lock_add_debug_info(
923/*===================*/
924 rw_lock_t* lock, /*!< in: rw-lock */
925 ulint pass, /*!< in: pass value */
926 ulint lock_type, /*!< in: lock type */
927 const char* file_name, /*!< in: file where requested */
928 unsigned line) /*!< in: line where requested */
929{
930 ut_ad(file_name != NULL);
931
932 rw_lock_debug_t* info = rw_lock_debug_create();
933
934 rw_lock_debug_mutex_enter();
935
936 info->pass = pass;
937 info->line = line;
938 info->lock_type = lock_type;
939 info->file_name = file_name;
940 info->thread_id = os_thread_get_curr_id();
941
942 UT_LIST_ADD_FIRST(lock->debug_list, info);
943
944 rw_lock_debug_mutex_exit();
945
946 if (pass == 0 && lock_type != RW_LOCK_X_WAIT) {
947 int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word,
948 MY_MEMORY_ORDER_RELAXED);
949
950 /* Recursive x while holding SX
951 (lock_type == RW_LOCK_X && lock_word == -X_LOCK_HALF_DECR)
952 is treated as not-relock (new lock). */
953
954 if ((lock_type == RW_LOCK_X
955 && lock_word < -X_LOCK_HALF_DECR)
956 || (lock_type == RW_LOCK_SX
957 && (lock_word < 0 || lock->sx_recursive == 1))) {
958
959 sync_check_lock_validate(lock);
960 sync_check_lock_granted(lock);
961 } else {
962 sync_check_relock(lock);
963 }
964 }
965}
966
967/******************************************************************//**
968Removes a debug information struct for an rw-lock. */
969void
970rw_lock_remove_debug_info(
971/*======================*/
972 rw_lock_t* lock, /*!< in: rw-lock */
973 ulint pass, /*!< in: pass value */
974 ulint lock_type) /*!< in: lock type */
975{
976 rw_lock_debug_t* info;
977
978 ut_ad(lock);
979
980 if (pass == 0 && lock_type != RW_LOCK_X_WAIT) {
981 sync_check_unlock(lock);
982 }
983
984 rw_lock_debug_mutex_enter();
985
986 for (info = UT_LIST_GET_FIRST(lock->debug_list);
987 info != 0;
988 info = UT_LIST_GET_NEXT(list, info)) {
989
990 if (pass == info->pass
991 && (pass != 0
992 || os_thread_eq(info->thread_id,
993 os_thread_get_curr_id()))
994 && info->lock_type == lock_type) {
995
996 /* Found! */
997 UT_LIST_REMOVE(lock->debug_list, info);
998
999 rw_lock_debug_mutex_exit();
1000
1001 rw_lock_debug_free(info);
1002
1003 return;
1004 }
1005 }
1006
1007 ut_error;
1008}
1009
1010/******************************************************************//**
1011Checks if the thread has locked the rw-lock in the specified mode, with
1012the pass value == 0.
1013@return TRUE if locked */
1014bool
1015rw_lock_own(
1016/*========*/
1017 rw_lock_t* lock, /*!< in: rw-lock */
1018 ulint lock_type) /*!< in: lock type: RW_LOCK_S,
1019 RW_LOCK_X */
1020{
1021 ut_ad(lock);
1022 ut_ad(rw_lock_validate(lock));
1023
1024 rw_lock_debug_mutex_enter();
1025
1026 for (const rw_lock_debug_t* info = UT_LIST_GET_FIRST(lock->debug_list);
1027 info != NULL;
1028 info = UT_LIST_GET_NEXT(list, info)) {
1029
1030 if (os_thread_eq(info->thread_id, os_thread_get_curr_id())
1031 && info->pass == 0
1032 && info->lock_type == lock_type) {
1033
1034 rw_lock_debug_mutex_exit();
1035 /* Found! */
1036
1037 return(true);
1038 }
1039 }
1040 rw_lock_debug_mutex_exit();
1041
1042 return(false);
1043}
1044
1045/** For collecting the debug information for a thread's rw-lock */
1046typedef std::vector<rw_lock_debug_t*> Infos;
1047
1048/** Get the thread debug info
1049@param[in] infos The rw-lock mode owned by the threads
1050@param[in] lock rw-lock to check
1051@return the thread debug info or NULL if not found */
1052void
1053rw_lock_get_debug_info(const rw_lock_t* lock, Infos* infos)
1054{
1055 rw_lock_debug_t* info = NULL;
1056
1057 ut_ad(rw_lock_validate(lock));
1058
1059 rw_lock_debug_mutex_enter();
1060
1061 for (info = UT_LIST_GET_FIRST(lock->debug_list);
1062 info != NULL;
1063 info = UT_LIST_GET_NEXT(list, info)) {
1064
1065 if (os_thread_eq(info->thread_id, os_thread_get_curr_id())) {
1066
1067 infos->push_back(info);
1068 }
1069 }
1070
1071 rw_lock_debug_mutex_exit();
1072}
1073
1074/** Checks if the thread has locked the rw-lock in the specified mode, with
1075the pass value == 0.
1076@param[in] lock rw-lock
1077@param[in] flags specify lock types with OR of the
1078 rw_lock_flag_t values
1079@return true if locked */
1080bool
1081rw_lock_own_flagged(
1082 const rw_lock_t* lock,
1083 rw_lock_flags_t flags)
1084{
1085 Infos infos;
1086
1087 rw_lock_get_debug_info(lock, &infos);
1088
1089 Infos::const_iterator end = infos.end();
1090
1091 for (Infos::const_iterator it = infos.begin(); it != end; ++it) {
1092
1093 const rw_lock_debug_t* info = *it;
1094
1095 ut_ad(os_thread_eq(info->thread_id, os_thread_get_curr_id()));
1096
1097 if (info->pass != 0) {
1098 continue;
1099 }
1100
1101 switch (info->lock_type) {
1102 case RW_LOCK_S:
1103
1104 if (flags & RW_LOCK_FLAG_S) {
1105 return(true);
1106 }
1107 break;
1108
1109 case RW_LOCK_X:
1110
1111 if (flags & RW_LOCK_FLAG_X) {
1112 return(true);
1113 }
1114 break;
1115
1116 case RW_LOCK_SX:
1117
1118 if (flags & RW_LOCK_FLAG_SX) {
1119 return(true);
1120 }
1121 }
1122 }
1123
1124 return(false);
1125}
1126
1127/***************************************************************//**
1128Prints debug info of currently locked rw-locks. */
1129void
1130rw_lock_list_print_info(
1131/*====================*/
1132 FILE* file) /*!< in: file where to print */
1133{
1134 ulint count = 0;
1135
1136 mutex_enter(&rw_lock_list_mutex);
1137
1138 fputs("-------------\n"
1139 "RW-LATCH INFO\n"
1140 "-------------\n", file);
1141
1142 for (const rw_lock_t* lock = UT_LIST_GET_FIRST(rw_lock_list);
1143 lock != NULL;
1144 lock = UT_LIST_GET_NEXT(list, lock)) {
1145
1146 count++;
1147
1148 if (my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word), MY_MEMORY_ORDER_RELAXED) != X_LOCK_DECR) {
1149
1150 fprintf(file, "RW-LOCK: %p ", (void*) lock);
1151
1152 if (int32_t waiters= my_atomic_load32_explicit(const_cast<int32_t*>(&lock->waiters), MY_MEMORY_ORDER_RELAXED)) {
1153 fprintf(file, " (%d waiters)\n", waiters);
1154 } else {
1155 putc('\n', file);
1156 }
1157
1158 rw_lock_debug_t* info;
1159
1160 rw_lock_debug_mutex_enter();
1161
1162 for (info = UT_LIST_GET_FIRST(lock->debug_list);
1163 info != NULL;
1164 info = UT_LIST_GET_NEXT(list, info)) {
1165
1166 rw_lock_debug_print(file, info);
1167 }
1168
1169 rw_lock_debug_mutex_exit();
1170 }
1171 }
1172
1173 fprintf(file, "Total number of rw-locks " ULINTPF "\n", count);
1174 mutex_exit(&rw_lock_list_mutex);
1175}
1176
1177/*********************************************************************//**
1178Prints info of a debug struct. */
1179void
1180rw_lock_debug_print(
1181/*================*/
1182 FILE* f, /*!< in: output stream */
1183 const rw_lock_debug_t* info) /*!< in: debug struct */
1184{
1185 ulint rwt = info->lock_type;
1186
1187 fprintf(f, "Locked: thread %lu file %s line %lu ",
1188 static_cast<ulong>(os_thread_pf(info->thread_id)),
1189 sync_basename(info->file_name),
1190 static_cast<ulong>(info->line));
1191
1192 switch (rwt) {
1193 case RW_LOCK_S:
1194 fputs("S-LOCK", f);
1195 break;
1196 case RW_LOCK_X:
1197 fputs("X-LOCK", f);
1198 break;
1199 case RW_LOCK_SX:
1200 fputs("SX-LOCK", f);
1201 break;
1202 case RW_LOCK_X_WAIT:
1203 fputs("WAIT X-LOCK", f);
1204 break;
1205 default:
1206 ut_error;
1207 }
1208
1209 if (info->pass != 0) {
1210 fprintf(f, " pass value %lu", (ulong) info->pass);
1211 }
1212
1213 fprintf(f, "\n");
1214}
1215
1216/** Print where it was locked from
1217@return the string representation */
1218std::string
1219rw_lock_t::locked_from() const
1220{
1221 /* Note: For X locks it can be locked form multiple places because
1222 the same thread can call X lock recursively. */
1223
1224 std::ostringstream msg;
1225 Infos infos;
1226
1227 rw_lock_get_debug_info(this, &infos);
1228
1229 ulint i = 0;
1230 Infos::const_iterator end = infos.end();
1231
1232 for (Infos::const_iterator it = infos.begin(); it != end; ++it, ++i) {
1233
1234 const rw_lock_debug_t* info = *it;
1235
1236 ut_ad(os_thread_eq(info->thread_id, os_thread_get_curr_id()));
1237
1238 if (i > 0) {
1239 msg << ", ";
1240 }
1241
1242 msg << info->file_name << ":" << info->line;
1243 }
1244
1245 return(msg.str());
1246
1247}
1248
1249/** Print the rw-lock information.
1250@return the string representation */
1251std::string
1252rw_lock_t::to_string() const
1253{
1254 std::ostringstream msg;
1255
1256 msg << "RW-LATCH: "
1257 << "thread id " << os_thread_pf(os_thread_get_curr_id())
1258 << " addr: " << this
1259 << " Locked from: " << locked_from().c_str();
1260
1261 return(msg.str());
1262}
1263#endif /* UNIV_DEBUG */
1264