| 1 | /* Copyright (C) 2002-2020 Free Software Foundation, Inc. |
| 2 | This file is part of the GNU C Library. |
| 3 | Contributed by Ulrich Drepper <drepper@redhat.com>, 2002. |
| 4 | |
| 5 | The GNU C Library is free software; you can redistribute it and/or |
| 6 | modify it under the terms of the GNU Lesser General Public |
| 7 | License as published by the Free Software Foundation; either |
| 8 | version 2.1 of the License, or (at your option) any later version. |
| 9 | |
| 10 | The GNU C Library is distributed in the hope that it will be useful, |
| 11 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | Lesser General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU Lesser General Public |
| 16 | License along with the GNU C Library; if not, see |
| 17 | <https://www.gnu.org/licenses/>. */ |
| 18 | |
| 19 | #include <errno.h> |
| 20 | #include "pthreadP.h" |
| 21 | #include <atomic.h> |
| 22 | #include <stdbool.h> |
| 23 | #include "pthread_rwlock_common.c" |
| 24 | |
| 25 | |
| 26 | /* See pthread_rwlock_common.c for an overview. */ |
| 27 | int |
| 28 | __pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock) |
| 29 | { |
| 30 | /* For tryrdlock, we could speculate that we will succeed and go ahead and |
| 31 | register as a reader. However, if we misspeculate, we have to do the |
| 32 | same steps as a timed-out rdlock, which will increase contention. |
| 33 | Therefore, there is a trade-off between being able to use a combinable |
| 34 | read-modify-write operation and a CAS loop as used below; we pick the |
| 35 | latter because it simplifies the code, and should perform better when |
| 36 | tryrdlock is used in cases where writers are infrequent. |
| 37 | Because POSIX does not require a failed trylock to "synchronize memory", |
| 38 | relaxed MO is sufficient here and on the failure path of the CAS |
| 39 | below. */ |
| 40 | unsigned int r = atomic_load_relaxed (&rwlock->__data.__readers); |
| 41 | unsigned int rnew; |
| 42 | do |
| 43 | { |
| 44 | if ((r & PTHREAD_RWLOCK_WRPHASE) == 0) |
| 45 | { |
| 46 | /* If we are in a read phase, try to acquire unless there is a |
| 47 | primary writer and we prefer writers and there will be no |
| 48 | recursive read locks. */ |
| 49 | if (((r & PTHREAD_RWLOCK_WRLOCKED) != 0) |
| 50 | && (rwlock->__data.__flags |
| 51 | == PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP)) |
| 52 | return EBUSY; |
| 53 | rnew = r + (1 << PTHREAD_RWLOCK_READER_SHIFT); |
| 54 | } |
| 55 | else |
| 56 | { |
| 57 | /* If there is a writer that has acquired the lock and we are in |
| 58 | a write phase, fail. */ |
| 59 | if ((r & PTHREAD_RWLOCK_WRLOCKED) != 0) |
| 60 | return EBUSY; |
| 61 | else |
| 62 | { |
| 63 | /* If we do not care about potentially waiting writers, just |
| 64 | try to acquire. */ |
| 65 | rnew = (r + (1 << PTHREAD_RWLOCK_READER_SHIFT)) |
| 66 | ^ PTHREAD_RWLOCK_WRPHASE; |
| 67 | } |
| 68 | } |
| 69 | /* If we could have caused an overflow or take effect during an |
| 70 | overflow, we just can / need to return EAGAIN. There is no need to |
| 71 | have actually modified the number of readers because we could have |
| 72 | done that and cleaned up immediately. */ |
| 73 | if (rnew >= PTHREAD_RWLOCK_READER_OVERFLOW) |
| 74 | return EAGAIN; |
| 75 | } |
| 76 | /* If the CAS fails, we retry; this prevents that tryrdlock fails spuriously |
| 77 | (i.e., fails to acquire the lock although there is no writer), which is |
| 78 | fine for C++14 but not currently allowed by POSIX. |
| 79 | However, because tryrdlock must not appear to block, we should avoid |
| 80 | starving this CAS loop due to constant changes to __readers: |
| 81 | While normal rdlock readers that won't be able to acquire will just block |
| 82 | (and we expect timeouts on timedrdlock to be longer than one retry of the |
| 83 | CAS loop), we can have concurrently failing tryrdlock calls due to |
| 84 | readers or writers that acquire and release in the meantime. Using |
| 85 | randomized exponential back-off to make a live-lock unlikely should be |
| 86 | sufficient. |
| 87 | TODO Back-off. |
| 88 | Acquire MO so we synchronize with prior writers. */ |
| 89 | while (!atomic_compare_exchange_weak_acquire (&rwlock->__data.__readers, |
| 90 | &r, rnew)); |
| 91 | |
| 92 | if ((r & PTHREAD_RWLOCK_WRPHASE) != 0) |
| 93 | { |
| 94 | /* Same as in __pthread_rwlock_rdlock_full: |
| 95 | We started the read phase, so we are also responsible for |
| 96 | updating the write-phase futex. Relaxed MO is sufficient. |
| 97 | We have to do the same steps as a writer would when handing over the |
| 98 | read phase to use because other readers cannot distinguish between |
| 99 | us and the writer. |
| 100 | Note that __pthread_rwlock_tryrdlock callers will not have to be |
| 101 | woken up because they will either see the read phase started by us |
| 102 | or they will try to start it themselves; however, callers of |
| 103 | __pthread_rwlock_rdlock_full just increase the reader count and then |
| 104 | check what state the lock is in, so they cannot distinguish between |
| 105 | us and a writer that acquired and released the lock in the |
| 106 | meantime. */ |
| 107 | if ((atomic_exchange_relaxed (&rwlock->__data.__wrphase_futex, 0) |
| 108 | & PTHREAD_RWLOCK_FUTEX_USED) != 0) |
| 109 | { |
| 110 | int private = __pthread_rwlock_get_private (rwlock); |
| 111 | futex_wake (&rwlock->__data.__wrphase_futex, INT_MAX, private); |
| 112 | } |
| 113 | } |
| 114 | |
| 115 | return 0; |
| 116 | |
| 117 | |
| 118 | } |
| 119 | strong_alias (__pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock) |
| 120 | |