1/*
2 Copyright (c) 2005-2019 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_reader_writer_lock_H
18#define __TBB_reader_writer_lock_H
19
20#include "tbb_thread.h"
21#include "tbb_allocator.h"
22#include "atomic.h"
23
24namespace tbb {
25namespace interface5 {
26//! Writer-preference reader-writer lock with local-only spinning on readers.
27/** Loosely adapted from Mellor-Crummey and Scott pseudocode at
28 http://www.cs.rochester.edu/research/synchronization/pseudocode/rw.html#s_wp
29 @ingroup synchronization */
30 class reader_writer_lock : tbb::internal::no_copy {
31 public:
32 friend class scoped_lock;
33 friend class scoped_lock_read;
34 //! Status type for nodes associated with lock instances
35 /** waiting_nonblocking: the wait state for nonblocking lock
36 instances; for writes, these transition straight to active
37 states; for reads, these are unused.
38
39 waiting: the start and spin state for all lock instances; these will
40 transition to active state when appropriate. Non-blocking write locks
41 transition from this state to waiting_nonblocking immediately.
42
43 active: the active state means that the lock instance holds
44 the lock; it will transition to invalid state during node deletion
45
46 invalid: the end state for all nodes; this is set in the
47 destructor so if we encounter this state, we are looking at
48 memory that has already been freed
49
50 The state diagrams below describe the status transitions.
51 Single arrows indicate that the thread that owns the node is
52 responsible for the transition; double arrows indicate that
53 any thread could make the transition.
54
55 State diagram for scoped_lock status:
56
57 waiting ----------> waiting_nonblocking
58 | _____________/ |
59 V V V
60 active -----------------> invalid
61
62 State diagram for scoped_lock_read status:
63
64 waiting
65 |
66 V
67 active ----------------->invalid
68
69 */
70 enum status_t { waiting_nonblocking, waiting, active, invalid };
71
72 //! Constructs a new reader_writer_lock
73 reader_writer_lock() {
74 internal_construct();
75 }
76
77 //! Destructs a reader_writer_lock object
78 ~reader_writer_lock() {
79 internal_destroy();
80 }
81
82 //! The scoped lock pattern for write locks
83 /** Scoped locks help avoid the common problem of forgetting to release the lock.
84 This type also serves as the node for queuing locks. */
85 class scoped_lock : tbb::internal::no_copy {
86 public:
87 friend class reader_writer_lock;
88
89 //! Construct with blocking attempt to acquire write lock on the passed-in lock
90 scoped_lock(reader_writer_lock& lock) {
91 internal_construct(lock);
92 }
93
94 //! Destructor, releases the write lock
95 ~scoped_lock() {
96 internal_destroy();
97 }
98
99 void* operator new(size_t s) {
100 return tbb::internal::allocate_via_handler_v3(s);
101 }
102 void operator delete(void* p) {
103 tbb::internal::deallocate_via_handler_v3(p);
104 }
105
106 private:
107 //! The pointer to the mutex to lock
108 reader_writer_lock *mutex;
109 //! The next queued competitor for the mutex
110 scoped_lock* next;
111 //! Status flag of the thread associated with this node
112 atomic<status_t> status;
113
114 //! Construct scoped_lock that is not holding lock
115 scoped_lock();
116
117 void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&);
118 void __TBB_EXPORTED_METHOD internal_destroy();
119 };
120
121 //! The scoped lock pattern for read locks
122 class scoped_lock_read : tbb::internal::no_copy {
123 public:
124 friend class reader_writer_lock;
125
126 //! Construct with blocking attempt to acquire read lock on the passed-in lock
127 scoped_lock_read(reader_writer_lock& lock) {
128 internal_construct(lock);
129 }
130
131 //! Destructor, releases the read lock
132 ~scoped_lock_read() {
133 internal_destroy();
134 }
135
136 void* operator new(size_t s) {
137 return tbb::internal::allocate_via_handler_v3(s);
138 }
139 void operator delete(void* p) {
140 tbb::internal::deallocate_via_handler_v3(p);
141 }
142
143 private:
144 //! The pointer to the mutex to lock
145 reader_writer_lock *mutex;
146 //! The next queued competitor for the mutex
147 scoped_lock_read *next;
148 //! Status flag of the thread associated with this node
149 atomic<status_t> status;
150
151 //! Construct scoped_lock_read that is not holding lock
152 scoped_lock_read();
153
154 void __TBB_EXPORTED_METHOD internal_construct(reader_writer_lock&);
155 void __TBB_EXPORTED_METHOD internal_destroy();
156 };
157
158 //! Acquires the reader_writer_lock for write.
159 /** If the lock is currently held in write mode by another
160 context, the writer will block by spinning on a local
161 variable. Exceptions thrown: improper_lock The context tries
162 to acquire a reader_writer_lock that it already has write
163 ownership of.*/
164 void __TBB_EXPORTED_METHOD lock();
165
166 //! Tries to acquire the reader_writer_lock for write.
167 /** This function does not block. Return Value: True or false,
168 depending on whether the lock is acquired or not. If the lock
169 is already held by this acquiring context, try_lock() returns
170 false. */
171 bool __TBB_EXPORTED_METHOD try_lock();
172
173 //! Acquires the reader_writer_lock for read.
174 /** If the lock is currently held by a writer, this reader will
175 block and wait until the writers are done. Exceptions thrown:
176 improper_lock The context tries to acquire a
177 reader_writer_lock that it already has write ownership of. */
178 void __TBB_EXPORTED_METHOD lock_read();
179
180 //! Tries to acquire the reader_writer_lock for read.
181 /** This function does not block. Return Value: True or false,
182 depending on whether the lock is acquired or not. */
183 bool __TBB_EXPORTED_METHOD try_lock_read();
184
185 //! Releases the reader_writer_lock
186 void __TBB_EXPORTED_METHOD unlock();
187
188 private:
189 void __TBB_EXPORTED_METHOD internal_construct();
190 void __TBB_EXPORTED_METHOD internal_destroy();
191
192 //! Attempts to acquire write lock
193 /** If unavailable, spins in blocking case, returns false in non-blocking case. */
194 bool start_write(scoped_lock *);
195 //! Sets writer_head to w and attempts to unblock
196 void set_next_writer(scoped_lock *w);
197 //! Relinquishes write lock to next waiting writer or group of readers
198 void end_write(scoped_lock *);
199 //! Checks if current thread holds write lock
200 bool is_current_writer();
201
202 //! Attempts to acquire read lock
203 /** If unavailable, spins in blocking case, returns false in non-blocking case. */
204 void start_read(scoped_lock_read *);
205 //! Unblocks pending readers
206 void unblock_readers();
207 //! Relinquishes read lock by decrementing counter; last reader wakes pending writer
208 void end_read();
209
210 //! The list of pending readers
211 atomic<scoped_lock_read*> reader_head;
212 //! The list of pending writers
213 atomic<scoped_lock*> writer_head;
214 //! The last node in the list of pending writers
215 atomic<scoped_lock*> writer_tail;
216 //! Writer that owns the mutex; tbb_thread::id() otherwise.
217 tbb_thread::id my_current_writer;
218 //! Status of mutex
219 atomic<uintptr_t> rdr_count_and_flags; // used with __TBB_AtomicOR, which assumes uintptr_t
220};
221
222} // namespace interface5
223
224using interface5::reader_writer_lock;
225
226} // namespace tbb
227
228#endif /* __TBB_reader_writer_lock_H */
229