1/*
2 * Copyright 2018-present Facebook, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <folly/Portability.h>
18
19#if FOLLY_HAS_COROUTINES
20
21#include <folly/experimental/coro/Mutex.h>
22
23#include <cassert>
24
25using namespace folly::coro;
26
27Mutex::~Mutex() {
28 // Check there are no waiters waiting to acquire the lock.
29 assert(
30 state_.load(std::memory_order_relaxed) == unlockedState() ||
31 state_.load(std::memory_order_relaxed) == nullptr);
32 assert(waiters_ == nullptr);
33}
34
35void Mutex::unlock() noexcept {
36 assert(state_.load(std::memory_order_relaxed) != unlockedState());
37
38 auto* waitersHead = waiters_;
39 if (waitersHead == nullptr) {
40 void* currentState = state_.load(std::memory_order_relaxed);
41 if (currentState == nullptr) {
42 // Looks like there are no waiters waiting to acquire the lock.
43 // Try to unlock it - use a compare-exchange to decide the race between
44 // unlocking the mutex and another thread enqueueing another waiter.
45 const bool releasedLock = state_.compare_exchange_strong(
46 currentState,
47 unlockedState(),
48 std::memory_order_release,
49 std::memory_order_relaxed);
50 if (releasedLock) {
51 return;
52 }
53 }
54
55 // There are some awaiters that have been newly queued.
56 // Dequeue them and reverse their order from LIFO to FIFO.
57 currentState = state_.exchange(nullptr, std::memory_order_acquire);
58
59 assert(currentState != unlockedState());
60 assert(currentState != nullptr);
61
62 auto* waiter = static_cast<LockOperation*>(currentState);
63 do {
64 auto* temp = waiter->next_;
65 waiter->next_ = waitersHead;
66 waitersHead = waiter;
67 waiter = temp;
68 } while (waiter != nullptr);
69 }
70
71 assert(waitersHead != nullptr);
72
73 waiters_ = waitersHead->next_;
74
75 waitersHead->awaitingCoroutine_.resume();
76}
77
78bool Mutex::lockAsyncImpl(LockOperation* awaiter) {
79 void* oldValue = state_.load(std::memory_order_relaxed);
80 while (true) {
81 if (oldValue == unlockedState()) {
82 // It looks like the mutex is currently unlocked.
83 // Try to acquire it synchronously.
84 void* newValue = nullptr;
85 if (state_.compare_exchange_weak(
86 oldValue,
87 newValue,
88 std::memory_order_acquire,
89 std::memory_order_relaxed)) {
90 // Acquired synchronously, don't suspend.
91 return false;
92 }
93 } else {
94 // It looks like the mutex is currently locked.
95 // Try to queue this waiter to the list of waiters.
96 void* newValue = awaiter;
97 awaiter->next_ = static_cast<LockOperation*>(oldValue);
98 if (state_.compare_exchange_weak(
99 oldValue,
100 newValue,
101 std::memory_order_release,
102 std::memory_order_relaxed)) {
103 // Queued waiter successfully. Awaiting coroutine should suspend.
104 return true;
105 }
106 }
107 }
108}
109
110#endif
111