1 | /* |
2 | * Copyright 2017-present Facebook, Inc. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | */ |
16 | |
17 | #include <folly/executors/Codel.h> |
18 | |
19 | #include <folly/portability/GFlags.h> |
20 | #include <algorithm> |
21 | |
22 | DEFINE_int32(codel_interval, 100, "Codel default interval time in ms" ); |
23 | DEFINE_int32(codel_target_delay, 5, "Target codel queueing delay in ms" ); |
24 | |
25 | using namespace std::chrono; |
26 | |
27 | namespace folly { |
28 | |
29 | Codel::Codel() |
30 | : codelMinDelayNs_(0), |
31 | codelIntervalTimeNs_( |
32 | duration_cast<nanoseconds>(steady_clock::now().time_since_epoch()) |
33 | .count()), |
34 | codelResetDelay_(true), |
35 | overloaded_(false) {} |
36 | |
37 | bool Codel::overloaded(nanoseconds delay) { |
38 | bool ret = false; |
39 | auto now = steady_clock::now(); |
40 | |
41 | // Avoid another thread updating the value at the same time we are using it |
42 | // to calculate the overloaded state |
43 | auto minDelay = nanoseconds(codelMinDelayNs_); |
44 | |
45 | if (now > steady_clock::time_point(nanoseconds(codelIntervalTimeNs_)) && |
46 | // testing before exchanging is more cacheline-friendly |
47 | (!codelResetDelay_.load(std::memory_order_acquire) && |
48 | !codelResetDelay_.exchange(true))) { |
49 | codelIntervalTimeNs_ = |
50 | duration_cast<nanoseconds>((now + getInterval()).time_since_epoch()) |
51 | .count(); |
52 | |
53 | if (minDelay > getTargetDelay()) { |
54 | overloaded_ = true; |
55 | } else { |
56 | overloaded_ = false; |
57 | } |
58 | } |
59 | // Care must be taken that only a single thread resets codelMinDelay_, |
60 | // and that it happens after the interval reset above |
61 | if (codelResetDelay_.load(std::memory_order_acquire) && |
62 | codelResetDelay_.exchange(false)) { |
63 | codelMinDelayNs_ = delay.count(); |
64 | // More than one request must come in during an interval before codel |
65 | // starts dropping requests |
66 | return false; |
67 | } else if (delay < nanoseconds(codelMinDelayNs_)) { |
68 | codelMinDelayNs_ = delay.count(); |
69 | } |
70 | |
71 | // Here is where we apply different logic than codel proper. Instead of |
72 | // adapting the interval until the next drop, we slough off requests with |
73 | // queueing delay > 2*target_delay while in the overloaded regime. This |
74 | // empirically works better for our services than the codel approach of |
75 | // increasingly often dropping packets. |
76 | if (overloaded_ && delay > getSloughTimeout()) { |
77 | ret = true; |
78 | } |
79 | |
80 | return ret; |
81 | } |
82 | |
83 | int Codel::getLoad() { |
84 | // it might be better to use the average delay instead of minDelay, but we'd |
85 | // have to track it. aspiring bootcamper? |
86 | return std::min<int>(100, 100 * getMinDelay() / getSloughTimeout()); |
87 | } |
88 | |
89 | nanoseconds Codel::getMinDelay() { |
90 | return nanoseconds(codelMinDelayNs_); |
91 | } |
92 | |
93 | milliseconds Codel::getInterval() { |
94 | return milliseconds(FLAGS_codel_interval); |
95 | } |
96 | |
97 | milliseconds Codel::getTargetDelay() { |
98 | return milliseconds(FLAGS_codel_target_delay); |
99 | } |
100 | |
101 | milliseconds Codel::getSloughTimeout() { |
102 | return getTargetDelay() * 2; |
103 | } |
104 | |
105 | } // namespace folly |
106 | |