1 | /* |
2 | * Copyright 2012-present Facebook, Inc. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | */ |
16 | |
17 | #pragma once |
18 | |
19 | #include <folly/Random.h> |
20 | #include <folly/Synchronized.h> |
21 | #include <folly/container/Foreach.h> |
22 | #include <folly/portability/GTest.h> |
23 | #include <glog/logging.h> |
24 | #include <algorithm> |
25 | #include <condition_variable> |
26 | #include <functional> |
27 | #include <map> |
28 | #include <random> |
29 | #include <thread> |
30 | #include <vector> |
31 | |
32 | namespace folly { |
33 | namespace sync_tests { |
34 | |
35 | inline std::mt19937& getRNG() { |
36 | static const auto seed = folly::randomNumberSeed(); |
37 | static std::mt19937 rng(seed); |
38 | return rng; |
39 | } |
40 | |
41 | void randomSleep(std::chrono::milliseconds min, std::chrono::milliseconds max) { |
42 | std::uniform_int_distribution<> range(min.count(), max.count()); |
43 | std::chrono::milliseconds duration(range(getRNG())); |
44 | /* sleep override */ |
45 | std::this_thread::sleep_for(duration); |
46 | } |
47 | |
48 | /* |
49 | * Run a functon simultaneously in a number of different threads. |
50 | * |
51 | * The function will be passed the index number of the thread it is running in. |
52 | * This function makes an attempt to synchronize the start of the threads as |
53 | * best as possible. It waits for all threads to be allocated and started |
54 | * before invoking the function. |
55 | */ |
56 | template <class Function> |
57 | void runParallel(size_t numThreads, const Function& function) { |
58 | std::vector<std::thread> threads; |
59 | threads.reserve(numThreads); |
60 | |
61 | // Variables used to synchronize all threads to try and start them |
62 | // as close to the same time as possible |
63 | folly::Synchronized<size_t, std::mutex> threadsReady(0); |
64 | std::condition_variable readyCV; |
65 | folly::Synchronized<bool, std::mutex> go(false); |
66 | std::condition_variable goCV; |
67 | |
68 | auto worker = [&](size_t threadIndex) { |
69 | // Signal that we are ready |
70 | ++(*threadsReady.lock()); |
71 | readyCV.notify_one(); |
72 | |
73 | // Wait until we are given the signal to start |
74 | // The purpose of this is to try and make sure all threads start |
75 | // as close to the same time as possible. |
76 | { |
77 | auto lockedGo = go.lock(); |
78 | goCV.wait(lockedGo.getUniqueLock(), [&] { return *lockedGo; }); |
79 | } |
80 | |
81 | function(threadIndex); |
82 | }; |
83 | |
84 | // Start all of the threads |
85 | for (size_t threadIndex = 0; threadIndex < numThreads; ++threadIndex) { |
86 | threads.emplace_back([threadIndex, &worker]() { worker(threadIndex); }); |
87 | } |
88 | |
89 | // Wait for all threads to become ready |
90 | { |
91 | auto readyLocked = threadsReady.lock(); |
92 | readyCV.wait(readyLocked.getUniqueLock(), [&] { |
93 | return *readyLocked == numThreads; |
94 | }); |
95 | } |
96 | // Now signal the threads that they can go |
97 | go = true; |
98 | goCV.notify_all(); |
99 | |
100 | // Wait for all threads to finish |
101 | for (auto& thread : threads) { |
102 | thread.join(); |
103 | } |
104 | } |
105 | |
106 | // testBasic() version for shared lock types |
107 | template <class Mutex> |
108 | typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type |
109 | testBasicImpl() { |
110 | folly::Synchronized<std::vector<int>, Mutex> obj; |
111 | const auto& constObj = obj; |
112 | |
113 | obj.wlock()->resize(1000); |
114 | |
115 | folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.wlock()}; |
116 | EXPECT_EQ(1000, obj2.rlock()->size()); |
117 | |
118 | { |
119 | auto lockedObj = obj.wlock(); |
120 | lockedObj->push_back(10); |
121 | EXPECT_EQ(1001, lockedObj->size()); |
122 | EXPECT_EQ(10, lockedObj->back()); |
123 | EXPECT_EQ(1000, obj2.wlock()->size()); |
124 | EXPECT_EQ(1000, obj2.rlock()->size()); |
125 | |
126 | { |
127 | auto unlocker = lockedObj.scopedUnlock(); |
128 | EXPECT_EQ(1001, obj.wlock()->size()); |
129 | } |
130 | } |
131 | |
132 | { |
133 | auto lockedObj = obj.rlock(); |
134 | EXPECT_EQ(1001, lockedObj->size()); |
135 | EXPECT_EQ(1001, obj.rlock()->size()); |
136 | { |
137 | auto unlocker = lockedObj.scopedUnlock(); |
138 | EXPECT_EQ(1001, obj.wlock()->size()); |
139 | } |
140 | } |
141 | |
142 | obj.wlock()->front() = 2; |
143 | |
144 | { |
145 | // contextualLock() on a const reference should grab a shared lock |
146 | auto lockedObj = constObj.contextualLock(); |
147 | EXPECT_EQ(2, lockedObj->front()); |
148 | EXPECT_EQ(2, constObj.rlock()->front()); |
149 | EXPECT_EQ(2, obj.rlock()->front()); |
150 | } |
151 | |
152 | EXPECT_EQ(1001, obj.rlock()->size()); |
153 | EXPECT_EQ(2, obj.rlock()->front()); |
154 | EXPECT_EQ(10, obj.rlock()->back()); |
155 | EXPECT_EQ(1000, obj2.rlock()->size()); |
156 | } |
157 | |
158 | // testBasic() version for non-shared lock types |
159 | template <class Mutex> |
160 | typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type |
161 | testBasicImpl() { |
162 | folly::Synchronized<std::vector<int>, Mutex> obj; |
163 | const auto& constObj = obj; |
164 | |
165 | obj.lock()->resize(1000); |
166 | |
167 | folly::Synchronized<std::vector<int>, Mutex> obj2{*obj.lock()}; |
168 | EXPECT_EQ(1000, obj2.lock()->size()); |
169 | |
170 | { |
171 | auto lockedObj = obj.lock(); |
172 | lockedObj->push_back(10); |
173 | EXPECT_EQ(1001, lockedObj->size()); |
174 | EXPECT_EQ(10, lockedObj->back()); |
175 | EXPECT_EQ(1000, obj2.lock()->size()); |
176 | |
177 | { |
178 | auto unlocker = lockedObj.scopedUnlock(); |
179 | EXPECT_EQ(1001, obj.lock()->size()); |
180 | } |
181 | } |
182 | { |
183 | auto lockedObj = constObj.lock(); |
184 | EXPECT_EQ(1001, lockedObj->size()); |
185 | EXPECT_EQ(10, lockedObj->back()); |
186 | EXPECT_EQ(1000, obj2.lock()->size()); |
187 | } |
188 | |
189 | obj.lock()->front() = 2; |
190 | |
191 | EXPECT_EQ(1001, obj.lock()->size()); |
192 | EXPECT_EQ(2, obj.lock()->front()); |
193 | EXPECT_EQ(2, obj.contextualLock()->front()); |
194 | EXPECT_EQ(10, obj.lock()->back()); |
195 | EXPECT_EQ(1000, obj2.lock()->size()); |
196 | } |
197 | |
198 | template <class Mutex> |
199 | void testBasic() { |
200 | testBasicImpl<Mutex>(); |
201 | } |
202 | |
203 | // testWithLock() version for shared lock types |
204 | template <class Mutex> |
205 | typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type |
206 | testWithLock() { |
207 | folly::Synchronized<std::vector<int>, Mutex> obj; |
208 | const auto& constObj = obj; |
209 | |
210 | // Test withWLock() and withRLock() |
211 | obj.withWLock([](std::vector<int>& lockedObj) { |
212 | lockedObj.resize(1000); |
213 | lockedObj.push_back(10); |
214 | lockedObj.push_back(11); |
215 | }); |
216 | obj.withWLock([](const std::vector<int>& lockedObj) { |
217 | EXPECT_EQ(1002, lockedObj.size()); |
218 | }); |
219 | constObj.withWLock([](const std::vector<int>& lockedObj) { |
220 | EXPECT_EQ(1002, lockedObj.size()); |
221 | EXPECT_EQ(11, lockedObj.back()); |
222 | }); |
223 | obj.withRLock([](const std::vector<int>& lockedObj) { |
224 | EXPECT_EQ(1002, lockedObj.size()); |
225 | EXPECT_EQ(11, lockedObj.back()); |
226 | }); |
227 | constObj.withRLock([](const std::vector<int>& lockedObj) { |
228 | EXPECT_EQ(1002, lockedObj.size()); |
229 | }); |
230 | |
231 | #if __cpp_generic_lambdas >= 201304 |
232 | obj.withWLock([](auto& lockedObj) { lockedObj.push_back(12); }); |
233 | obj.withWLock( |
234 | [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); }); |
235 | constObj.withWLock([](const auto& lockedObj) { |
236 | EXPECT_EQ(1003, lockedObj.size()); |
237 | EXPECT_EQ(12, lockedObj.back()); |
238 | }); |
239 | obj.withRLock([](const auto& lockedObj) { |
240 | EXPECT_EQ(1003, lockedObj.size()); |
241 | EXPECT_EQ(12, lockedObj.back()); |
242 | }); |
243 | constObj.withRLock( |
244 | [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); }); |
245 | obj.withWLock([](auto& lockedObj) { lockedObj.pop_back(); }); |
246 | #endif |
247 | |
248 | // Test withWLockPtr() and withRLockPtr() |
249 | using SynchType = folly::Synchronized<std::vector<int>, Mutex>; |
250 | #if __cpp_generic_lambdas >= 201304 |
251 | obj.withWLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); }); |
252 | obj.withRLockPtr([](auto&& lockedObj) { |
253 | EXPECT_EQ(1003, lockedObj->size()); |
254 | EXPECT_EQ(13, lockedObj->back()); |
255 | }); |
256 | constObj.withRLockPtr([](auto&& lockedObj) { |
257 | EXPECT_EQ(1003, lockedObj->size()); |
258 | EXPECT_EQ(13, lockedObj->back()); |
259 | }); |
260 | obj.withWLockPtr([&](auto&& lockedObj) { |
261 | lockedObj->push_back(14); |
262 | { |
263 | auto unlocker = lockedObj.scopedUnlock(); |
264 | obj.wlock()->push_back(15); |
265 | } |
266 | EXPECT_EQ(15, lockedObj->back()); |
267 | }); |
268 | constObj.withWLockPtr([](auto&& lockedObj) { |
269 | EXPECT_EQ(1005, lockedObj->size()); |
270 | EXPECT_EQ(15, lockedObj->back()); |
271 | }); |
272 | #else |
273 | obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) { |
274 | lockedObj->push_back(13); |
275 | lockedObj->push_back(14); |
276 | lockedObj->push_back(15); |
277 | }); |
278 | #endif |
279 | |
280 | obj.withWLockPtr([](typename SynchType::LockedPtr&& lockedObj) { |
281 | lockedObj->push_back(16); |
282 | EXPECT_EQ(1006, lockedObj->size()); |
283 | }); |
284 | constObj.withWLockPtr([](typename SynchType::ConstWLockedPtr&& lockedObj) { |
285 | EXPECT_EQ(1006, lockedObj->size()); |
286 | EXPECT_EQ(16, lockedObj->back()); |
287 | }); |
288 | obj.withRLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) { |
289 | EXPECT_EQ(1006, lockedObj->size()); |
290 | EXPECT_EQ(16, lockedObj->back()); |
291 | }); |
292 | constObj.withRLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) { |
293 | EXPECT_EQ(1006, lockedObj->size()); |
294 | EXPECT_EQ(16, lockedObj->back()); |
295 | }); |
296 | } |
297 | |
298 | // testWithLock() version for non-shared lock types |
299 | template <class Mutex> |
300 | typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type |
301 | testWithLock() { |
302 | folly::Synchronized<std::vector<int>, Mutex> obj; |
303 | |
304 | // Test withLock() |
305 | obj.withLock([](std::vector<int>& lockedObj) { |
306 | lockedObj.resize(1000); |
307 | lockedObj.push_back(10); |
308 | lockedObj.push_back(11); |
309 | }); |
310 | obj.withLock([](const std::vector<int>& lockedObj) { |
311 | EXPECT_EQ(1002, lockedObj.size()); |
312 | }); |
313 | |
314 | #if __cpp_generic_lambdas >= 201304 |
315 | obj.withLock([](auto& lockedObj) { lockedObj.push_back(12); }); |
316 | obj.withLock( |
317 | [](const auto& lockedObj) { EXPECT_EQ(1003, lockedObj.size()); }); |
318 | obj.withLock([](auto& lockedObj) { lockedObj.pop_back(); }); |
319 | #endif |
320 | |
321 | // Test withLockPtr() |
322 | using SynchType = folly::Synchronized<std::vector<int>, Mutex>; |
323 | #if __cpp_generic_lambdas >= 201304 |
324 | obj.withLockPtr([](auto&& lockedObj) { lockedObj->push_back(13); }); |
325 | obj.withLockPtr([](auto&& lockedObj) { |
326 | EXPECT_EQ(1003, lockedObj->size()); |
327 | EXPECT_EQ(13, lockedObj->back()); |
328 | }); |
329 | obj.withLockPtr([&](auto&& lockedObj) { |
330 | lockedObj->push_back(14); |
331 | { |
332 | auto unlocker = lockedObj.scopedUnlock(); |
333 | obj.lock()->push_back(15); |
334 | } |
335 | EXPECT_EQ(1005, lockedObj->size()); |
336 | EXPECT_EQ(15, lockedObj->back()); |
337 | }); |
338 | #else |
339 | obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) { |
340 | lockedObj->push_back(13); |
341 | lockedObj->push_back(14); |
342 | lockedObj->push_back(15); |
343 | }); |
344 | #endif |
345 | |
346 | obj.withLockPtr([](typename SynchType::LockedPtr&& lockedObj) { |
347 | lockedObj->push_back(16); |
348 | EXPECT_EQ(1006, lockedObj->size()); |
349 | }); |
350 | const auto& constObj = obj; |
351 | constObj.withLockPtr([](typename SynchType::ConstLockedPtr&& lockedObj) { |
352 | EXPECT_EQ(1006, lockedObj->size()); |
353 | EXPECT_EQ(16, lockedObj->back()); |
354 | }); |
355 | } |
356 | |
357 | template <class Mutex> |
358 | void testUnlockCommon() { |
359 | folly::Synchronized<int, Mutex> value{7}; |
360 | const auto& cv = value; |
361 | |
362 | { |
363 | auto lv = value.contextualLock(); |
364 | EXPECT_EQ(7, *lv); |
365 | *lv = 5; |
366 | lv.unlock(); |
367 | EXPECT_TRUE(lv.isNull()); |
368 | EXPECT_FALSE(lv); |
369 | |
370 | auto rlv = cv.contextualLock(); |
371 | EXPECT_EQ(5, *rlv); |
372 | rlv.unlock(); |
373 | EXPECT_TRUE(rlv.isNull()); |
374 | EXPECT_FALSE(rlv); |
375 | |
376 | auto rlv2 = cv.contextualRLock(); |
377 | EXPECT_EQ(5, *rlv2); |
378 | rlv2.unlock(); |
379 | |
380 | lv = value.contextualLock(); |
381 | EXPECT_EQ(5, *lv); |
382 | *lv = 9; |
383 | } |
384 | |
385 | EXPECT_EQ(9, *value.contextualRLock()); |
386 | } |
387 | |
388 | // testUnlock() version for shared lock types |
389 | template <class Mutex> |
390 | typename std::enable_if<folly::LockTraits<Mutex>::is_shared>::type |
391 | testUnlock() { |
392 | folly::Synchronized<int, Mutex> value{10}; |
393 | { |
394 | auto lv = value.wlock(); |
395 | EXPECT_EQ(10, *lv); |
396 | *lv = 5; |
397 | lv.unlock(); |
398 | EXPECT_FALSE(lv); |
399 | EXPECT_TRUE(lv.isNull()); |
400 | |
401 | auto rlv = value.rlock(); |
402 | EXPECT_EQ(5, *rlv); |
403 | rlv.unlock(); |
404 | EXPECT_FALSE(rlv); |
405 | EXPECT_TRUE(rlv.isNull()); |
406 | |
407 | auto lv2 = value.wlock(); |
408 | EXPECT_EQ(5, *lv2); |
409 | *lv2 = 7; |
410 | |
411 | lv = std::move(lv2); |
412 | EXPECT_FALSE(lv2); |
413 | EXPECT_TRUE(lv2.isNull()); |
414 | EXPECT_FALSE(lv.isNull()); |
415 | EXPECT_EQ(7, *lv); |
416 | } |
417 | |
418 | testUnlockCommon<Mutex>(); |
419 | } |
420 | |
421 | // testUnlock() version for non-shared lock types |
422 | template <class Mutex> |
423 | typename std::enable_if<!folly::LockTraits<Mutex>::is_shared>::type |
424 | testUnlock() { |
425 | folly::Synchronized<int, Mutex> value{10}; |
426 | { |
427 | auto lv = value.lock(); |
428 | EXPECT_EQ(10, *lv); |
429 | *lv = 5; |
430 | lv.unlock(); |
431 | EXPECT_TRUE(lv.isNull()); |
432 | EXPECT_FALSE(lv); |
433 | |
434 | auto lv2 = value.lock(); |
435 | EXPECT_EQ(5, *lv2); |
436 | *lv2 = 6; |
437 | lv2.unlock(); |
438 | EXPECT_TRUE(lv2.isNull()); |
439 | EXPECT_FALSE(lv2); |
440 | |
441 | lv = value.lock(); |
442 | EXPECT_EQ(6, *lv); |
443 | *lv = 7; |
444 | |
445 | lv2 = std::move(lv); |
446 | EXPECT_TRUE(lv.isNull()); |
447 | EXPECT_FALSE(lv); |
448 | EXPECT_FALSE(lv2.isNull()); |
449 | EXPECT_EQ(7, *lv2); |
450 | } |
451 | |
452 | testUnlockCommon<Mutex>(); |
453 | } |
454 | |
455 | // Testing the deprecated SYNCHRONIZED and SYNCHRONIZED_CONST APIs |
456 | template <class Mutex> |
457 | void testDeprecated() { |
458 | folly::Synchronized<std::vector<int>, Mutex> obj; |
459 | |
460 | obj->resize(1000); |
461 | |
462 | auto obj2 = obj; |
463 | EXPECT_EQ(1000, obj2->size()); |
464 | |
465 | SYNCHRONIZED(obj) { |
466 | obj.push_back(10); |
467 | EXPECT_EQ(1001, obj.size()); |
468 | EXPECT_EQ(10, obj.back()); |
469 | EXPECT_EQ(1000, obj2->size()); |
470 | } |
471 | |
472 | SYNCHRONIZED_CONST(obj) { |
473 | EXPECT_EQ(1001, obj.size()); |
474 | } |
475 | |
476 | SYNCHRONIZED(lockedObj, *&obj) { |
477 | lockedObj.front() = 2; |
478 | } |
479 | |
480 | EXPECT_EQ(1001, obj->size()); |
481 | EXPECT_EQ(10, obj->back()); |
482 | EXPECT_EQ(1000, obj2->size()); |
483 | |
484 | EXPECT_EQ(FB_ARG_2_OR_1(1, 2), 2); |
485 | EXPECT_EQ(FB_ARG_2_OR_1(1), 1); |
486 | } |
487 | |
488 | template <class Mutex> |
489 | void testConcurrency() { |
490 | folly::Synchronized<std::vector<int>, Mutex> v; |
491 | static const size_t numThreads = 100; |
492 | // Note: I initially tried using itersPerThread = 1000, |
493 | // which works fine for most lock types, but std::shared_timed_mutex |
494 | // appears to be extraordinarily slow. It could take around 30 seconds |
495 | // to run this test with 1000 iterations per thread using shared_timed_mutex. |
496 | static const size_t itersPerThread = 100; |
497 | |
498 | auto pushNumbers = [&](size_t threadIdx) { |
499 | // Test lock() |
500 | for (size_t n = 0; n < itersPerThread; ++n) { |
501 | v.contextualLock()->push_back((itersPerThread * threadIdx) + n); |
502 | std::this_thread::yield(); |
503 | } |
504 | }; |
505 | runParallel(numThreads, pushNumbers); |
506 | |
507 | std::vector<int> result; |
508 | v.swap(result); |
509 | |
510 | EXPECT_EQ(numThreads * itersPerThread, result.size()); |
511 | sort(result.begin(), result.end()); |
512 | |
513 | for (size_t i = 0; i < itersPerThread * numThreads; ++i) { |
514 | EXPECT_EQ(i, result[i]); |
515 | } |
516 | } |
517 | |
518 | template <class Mutex> |
519 | void testAcquireLocked() { |
520 | folly::Synchronized<std::vector<int>, Mutex> v; |
521 | folly::Synchronized<std::map<int, int>, Mutex> m; |
522 | |
523 | auto dualLockWorker = [&](size_t threadIdx) { |
524 | // Note: this will be less awkward with C++ 17's structured |
525 | // binding functionality, which will make it easier to use the returned |
526 | // std::tuple. |
527 | if (threadIdx & 1) { |
528 | auto ret = acquireLocked(v, m); |
529 | std::get<0>(ret)->push_back(threadIdx); |
530 | (*std::get<1>(ret))[threadIdx] = threadIdx + 1; |
531 | } else { |
532 | auto ret = acquireLocked(m, v); |
533 | std::get<1>(ret)->push_back(threadIdx); |
534 | (*std::get<0>(ret))[threadIdx] = threadIdx + 1; |
535 | } |
536 | }; |
537 | static const size_t numThreads = 100; |
538 | runParallel(numThreads, dualLockWorker); |
539 | |
540 | std::vector<int> result; |
541 | v.swap(result); |
542 | |
543 | EXPECT_EQ(numThreads, result.size()); |
544 | sort(result.begin(), result.end()); |
545 | |
546 | for (size_t i = 0; i < numThreads; ++i) { |
547 | EXPECT_EQ(i, result[i]); |
548 | } |
549 | } |
550 | |
551 | template <class Mutex> |
552 | void testAcquireLockedWithConst() { |
553 | folly::Synchronized<std::vector<int>, Mutex> v; |
554 | folly::Synchronized<std::map<int, int>, Mutex> m; |
555 | |
556 | auto dualLockWorker = [&](size_t threadIdx) { |
557 | const auto& cm = m; |
558 | if (threadIdx & 1) { |
559 | auto ret = acquireLocked(v, cm); |
560 | (void)std::get<1>(ret)->size(); |
561 | std::get<0>(ret)->push_back(threadIdx); |
562 | } else { |
563 | auto ret = acquireLocked(cm, v); |
564 | (void)std::get<0>(ret)->size(); |
565 | std::get<1>(ret)->push_back(threadIdx); |
566 | } |
567 | }; |
568 | static const size_t numThreads = 100; |
569 | runParallel(numThreads, dualLockWorker); |
570 | |
571 | std::vector<int> result; |
572 | v.swap(result); |
573 | |
574 | EXPECT_EQ(numThreads, result.size()); |
575 | sort(result.begin(), result.end()); |
576 | |
577 | for (size_t i = 0; i < numThreads; ++i) { |
578 | EXPECT_EQ(i, result[i]); |
579 | } |
580 | } |
581 | |
582 | // Testing the deprecated SYNCHRONIZED_DUAL API |
583 | template <class Mutex> |
584 | void testDualLocking() { |
585 | folly::Synchronized<std::vector<int>, Mutex> v; |
586 | folly::Synchronized<std::map<int, int>, Mutex> m; |
587 | |
588 | auto dualLockWorker = [&](size_t threadIdx) { |
589 | if (threadIdx & 1) { |
590 | SYNCHRONIZED_DUAL(lv, v, lm, m) { |
591 | lv.push_back(threadIdx); |
592 | lm[threadIdx] = threadIdx + 1; |
593 | } |
594 | } else { |
595 | SYNCHRONIZED_DUAL(lm, m, lv, v) { |
596 | lv.push_back(threadIdx); |
597 | lm[threadIdx] = threadIdx + 1; |
598 | } |
599 | } |
600 | }; |
601 | static const size_t numThreads = 100; |
602 | runParallel(numThreads, dualLockWorker); |
603 | |
604 | std::vector<int> result; |
605 | v.swap(result); |
606 | |
607 | EXPECT_EQ(numThreads, result.size()); |
608 | sort(result.begin(), result.end()); |
609 | |
610 | for (size_t i = 0; i < numThreads; ++i) { |
611 | EXPECT_EQ(i, result[i]); |
612 | } |
613 | } |
614 | |
615 | // Testing the deprecated SYNCHRONIZED_DUAL API |
616 | template <class Mutex> |
617 | void testDualLockingWithConst() { |
618 | folly::Synchronized<std::vector<int>, Mutex> v; |
619 | folly::Synchronized<std::map<int, int>, Mutex> m; |
620 | |
621 | auto dualLockWorker = [&](size_t threadIdx) { |
622 | const auto& cm = m; |
623 | if (threadIdx & 1) { |
624 | SYNCHRONIZED_DUAL(lv, v, lm, cm) { |
625 | (void)lm.size(); |
626 | lv.push_back(threadIdx); |
627 | } |
628 | } else { |
629 | SYNCHRONIZED_DUAL(lm, cm, lv, v) { |
630 | (void)lm.size(); |
631 | lv.push_back(threadIdx); |
632 | } |
633 | } |
634 | }; |
635 | static const size_t numThreads = 100; |
636 | runParallel(numThreads, dualLockWorker); |
637 | |
638 | std::vector<int> result; |
639 | v.swap(result); |
640 | |
641 | EXPECT_EQ(numThreads, result.size()); |
642 | sort(result.begin(), result.end()); |
643 | |
644 | for (size_t i = 0; i < numThreads; ++i) { |
645 | EXPECT_EQ(i, result[i]); |
646 | } |
647 | } |
648 | |
649 | template <class Mutex> |
650 | void testTimed() { |
651 | folly::Synchronized<std::vector<int>, Mutex> v; |
652 | folly::Synchronized<uint64_t, Mutex> numTimeouts; |
653 | |
654 | auto worker = [&](size_t threadIdx) { |
655 | // Test directly using operator-> on the lock result |
656 | v.contextualLock()->push_back(2 * threadIdx); |
657 | |
658 | // Test using lock with a timeout |
659 | for (;;) { |
660 | auto lv = v.contextualLock(std::chrono::milliseconds(5)); |
661 | if (!lv) { |
662 | ++(*numTimeouts.contextualLock()); |
663 | continue; |
664 | } |
665 | |
666 | // Sleep for a random time to ensure we trigger timeouts |
667 | // in other threads |
668 | randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15)); |
669 | lv->push_back(2 * threadIdx + 1); |
670 | break; |
671 | } |
672 | }; |
673 | |
674 | static const size_t numThreads = 100; |
675 | runParallel(numThreads, worker); |
676 | |
677 | std::vector<int> result; |
678 | v.swap(result); |
679 | |
680 | EXPECT_EQ(2 * numThreads, result.size()); |
681 | sort(result.begin(), result.end()); |
682 | |
683 | for (size_t i = 0; i < 2 * numThreads; ++i) { |
684 | EXPECT_EQ(i, result[i]); |
685 | } |
686 | // We generally expect a large number of number timeouts here. |
687 | // I'm not adding a check for it since it's theoretically possible that |
688 | // we might get 0 timeouts depending on the CPU scheduling if our threads |
689 | // don't get to run very often. |
690 | LOG(INFO) << "testTimed: " << *numTimeouts.contextualRLock() << " timeouts" ; |
691 | |
692 | // Make sure we can lock with various timeout duration units |
693 | { |
694 | auto lv = v.contextualLock(std::chrono::milliseconds(5)); |
695 | EXPECT_TRUE(bool(lv)); |
696 | EXPECT_FALSE(lv.isNull()); |
697 | auto lv2 = v.contextualLock(std::chrono::microseconds(5)); |
698 | // We may or may not acquire lv2 successfully, depending on whether |
699 | // or not this is a recursive mutex type. |
700 | } |
701 | { |
702 | auto lv = v.contextualLock(std::chrono::seconds(1)); |
703 | EXPECT_TRUE(bool(lv)); |
704 | } |
705 | } |
706 | |
707 | template <class Mutex> |
708 | void testTimedShared() { |
709 | folly::Synchronized<std::vector<int>, Mutex> v; |
710 | folly::Synchronized<uint64_t, Mutex> numTimeouts; |
711 | |
712 | auto worker = [&](size_t threadIdx) { |
713 | // Test directly using operator-> on the lock result |
714 | v.wlock()->push_back(threadIdx); |
715 | |
716 | // Test lock() with a timeout |
717 | for (;;) { |
718 | auto lv = v.rlock(std::chrono::milliseconds(10)); |
719 | if (!lv) { |
720 | ++(*numTimeouts.contextualLock()); |
721 | continue; |
722 | } |
723 | |
724 | // Sleep while holding the lock. |
725 | // |
726 | // This will block other threads from acquiring the write lock to add |
727 | // their thread index to v, but it won't block threads that have entered |
728 | // the for loop and are trying to acquire a read lock. |
729 | // |
730 | // For lock types that give preference to readers rather than writers, |
731 | // this will tend to serialize all threads on the wlock() above. |
732 | randomSleep(std::chrono::milliseconds(5), std::chrono::milliseconds(15)); |
733 | auto found = std::find(lv->begin(), lv->end(), threadIdx); |
734 | CHECK(found != lv->end()); |
735 | break; |
736 | } |
737 | }; |
738 | |
739 | static const size_t numThreads = 100; |
740 | runParallel(numThreads, worker); |
741 | |
742 | std::vector<int> result; |
743 | v.swap(result); |
744 | |
745 | EXPECT_EQ(numThreads, result.size()); |
746 | sort(result.begin(), result.end()); |
747 | |
748 | for (size_t i = 0; i < numThreads; ++i) { |
749 | EXPECT_EQ(i, result[i]); |
750 | } |
751 | // We generally expect a small number of timeouts here. |
752 | // For locks that give readers preference over writers this should usually |
753 | // be 0. With locks that give writers preference we do see a small-ish |
754 | // number of read timeouts. |
755 | LOG(INFO) << "testTimedShared: " << *numTimeouts.contextualRLock() |
756 | << " timeouts" ; |
757 | } |
758 | |
759 | // Testing the deprecated TIMED_SYNCHRONIZED API |
760 | template <class Mutex> |
761 | void testTimedSynchronized() { |
762 | folly::Synchronized<std::vector<int>, Mutex> v; |
763 | folly::Synchronized<uint64_t, Mutex> numTimeouts; |
764 | |
765 | auto worker = [&](size_t threadIdx) { |
766 | // Test operator-> |
767 | v->push_back(2 * threadIdx); |
768 | |
769 | // Aaand test the TIMED_SYNCHRONIZED macro |
770 | for (;;) { |
771 | TIMED_SYNCHRONIZED(5, lv, v) { |
772 | if (lv) { |
773 | // Sleep for a random time to ensure we trigger timeouts |
774 | // in other threads |
775 | randomSleep( |
776 | std::chrono::milliseconds(5), std::chrono::milliseconds(15)); |
777 | lv->push_back(2 * threadIdx + 1); |
778 | return; |
779 | } |
780 | |
781 | ++(*numTimeouts.contextualLock()); |
782 | } |
783 | } |
784 | }; |
785 | |
786 | static const size_t numThreads = 100; |
787 | runParallel(numThreads, worker); |
788 | |
789 | std::vector<int> result; |
790 | v.swap(result); |
791 | |
792 | EXPECT_EQ(2 * numThreads, result.size()); |
793 | sort(result.begin(), result.end()); |
794 | |
795 | for (size_t i = 0; i < 2 * numThreads; ++i) { |
796 | EXPECT_EQ(i, result[i]); |
797 | } |
798 | // We generally expect a large number of number timeouts here. |
799 | // I'm not adding a check for it since it's theoretically possible that |
800 | // we might get 0 timeouts depending on the CPU scheduling if our threads |
801 | // don't get to run very often. |
802 | LOG(INFO) << "testTimedSynchronized: " << *numTimeouts.contextualRLock() |
803 | << " timeouts" ; |
804 | } |
805 | |
806 | // Testing the deprecated TIMED_SYNCHRONIZED_CONST API |
807 | template <class Mutex> |
808 | void testTimedSynchronizedWithConst() { |
809 | folly::Synchronized<std::vector<int>, Mutex> v; |
810 | folly::Synchronized<uint64_t, Mutex> numTimeouts; |
811 | |
812 | auto worker = [&](size_t threadIdx) { |
813 | // Test operator-> |
814 | v->push_back(threadIdx); |
815 | |
816 | // Test TIMED_SYNCHRONIZED_CONST |
817 | for (;;) { |
818 | TIMED_SYNCHRONIZED_CONST(10, lv, v) { |
819 | if (lv) { |
820 | // Sleep while holding the lock. |
821 | // |
822 | // This will block other threads from acquiring the write lock to add |
823 | // their thread index to v, but it won't block threads that have |
824 | // entered the for loop and are trying to acquire a read lock. |
825 | // |
826 | // For lock types that give preference to readers rather than writers, |
827 | // this will tend to serialize all threads on the wlock() above. |
828 | randomSleep( |
829 | std::chrono::milliseconds(5), std::chrono::milliseconds(15)); |
830 | auto found = std::find(lv->begin(), lv->end(), threadIdx); |
831 | CHECK(found != lv->end()); |
832 | return; |
833 | } else { |
834 | ++(*numTimeouts.contextualLock()); |
835 | } |
836 | } |
837 | } |
838 | }; |
839 | |
840 | static const size_t numThreads = 100; |
841 | runParallel(numThreads, worker); |
842 | |
843 | std::vector<int> result; |
844 | v.swap(result); |
845 | |
846 | EXPECT_EQ(numThreads, result.size()); |
847 | sort(result.begin(), result.end()); |
848 | |
849 | for (size_t i = 0; i < numThreads; ++i) { |
850 | EXPECT_EQ(i, result[i]); |
851 | } |
852 | // We generally expect a small number of timeouts here. |
853 | // For locks that give readers preference over writers this should usually |
854 | // be 0. With locks that give writers preference we do see a small-ish |
855 | // number of read timeouts. |
856 | LOG(INFO) << "testTimedSynchronizedWithConst: " |
857 | << *numTimeouts.contextualRLock() << " timeouts" ; |
858 | } |
859 | |
860 | template <class Mutex> |
861 | void testConstCopy() { |
862 | std::vector<int> input = {1, 2, 3}; |
863 | const folly::Synchronized<std::vector<int>, Mutex> v(input); |
864 | |
865 | std::vector<int> result; |
866 | |
867 | v.copyInto(result); |
868 | EXPECT_EQ(input, result); |
869 | |
870 | result = v.copy(); |
871 | EXPECT_EQ(input, result); |
872 | } |
873 | |
874 | struct NotCopiableNotMovable { |
875 | NotCopiableNotMovable(int, const char*) {} |
876 | NotCopiableNotMovable(const NotCopiableNotMovable&) = delete; |
877 | NotCopiableNotMovable& operator=(const NotCopiableNotMovable&) = delete; |
878 | NotCopiableNotMovable(NotCopiableNotMovable&&) = delete; |
879 | NotCopiableNotMovable& operator=(NotCopiableNotMovable&&) = delete; |
880 | }; |
881 | |
882 | template <class Mutex> |
883 | void testInPlaceConstruction() { |
884 | // This won't compile without in_place |
885 | folly::Synchronized<NotCopiableNotMovable> a(folly::in_place, 5, "a" ); |
886 | } |
887 | |
888 | template <class Mutex> |
889 | void testExchange() { |
890 | std::vector<int> input = {1, 2, 3}; |
891 | folly::Synchronized<std::vector<int>, Mutex> v(input); |
892 | std::vector<int> next = {4, 5, 6}; |
893 | auto prev = v.exchange(std::move(next)); |
894 | EXPECT_EQ((std::vector<int>{{1, 2, 3}}), prev); |
895 | EXPECT_EQ((std::vector<int>{{4, 5, 6}}), v.copy()); |
896 | } |
897 | } // namespace sync_tests |
898 | } // namespace folly |
899 | |