1 | /* |
2 | * Copyright 2012-present Facebook, Inc. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
5 | * you may not use this file except in compliance with the License. |
6 | * You may obtain a copy of the License at |
7 | * |
8 | * http://www.apache.org/licenses/LICENSE-2.0 |
9 | * |
10 | * Unless required by applicable law or agreed to in writing, software |
11 | * distributed under the License is distributed on an "AS IS" BASIS, |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | * See the License for the specific language governing permissions and |
14 | * limitations under the License. |
15 | */ |
16 | |
17 | #include <folly/stats/BucketedTimeSeries-defs.h> |
18 | #include <folly/stats/BucketedTimeSeries.h> |
19 | #include <folly/stats/MultiLevelTimeSeries-defs.h> |
20 | #include <folly/stats/MultiLevelTimeSeries.h> |
21 | #include <folly/stats/detail/Bucket.h> |
22 | |
23 | #include <array> |
24 | |
25 | #include <glog/logging.h> |
26 | |
27 | #include <folly/container/Foreach.h> |
28 | #include <folly/portability/GTest.h> |
29 | |
30 | using folly::BucketedTimeSeries; |
31 | using std::string; |
32 | using std::vector; |
33 | using std::chrono::seconds; |
34 | |
35 | using Bucket = folly::detail::Bucket<int64_t>; |
36 | using StatsClock = folly::LegacyStatsClock<std::chrono::seconds>; |
37 | using TimePoint = StatsClock::time_point; |
38 | using Duration = StatsClock::duration; |
39 | |
40 | /* |
41 | * Helper functions to allow us to directly log time points and duration |
42 | */ |
43 | namespace std { |
44 | std::ostream& operator<<(std::ostream& os, std::chrono::seconds s) { |
45 | os << s.count(); |
46 | return os; |
47 | } |
48 | std::ostream& operator<<(std::ostream& os, TimePoint tp) { |
49 | os << tp.time_since_epoch().count(); |
50 | return os; |
51 | } |
52 | } // namespace std |
53 | |
54 | namespace { |
55 | TimePoint mkTimePoint(int value) { |
56 | return TimePoint(StatsClock::duration(value)); |
57 | } |
58 | |
59 | struct TestData { |
60 | TestData(int d, int b, std::initializer_list<int> starts) |
61 | : duration(d), numBuckets(b) { |
62 | bucketStarts.reserve(starts.size()); |
63 | for (int s : starts) { |
64 | bucketStarts.push_back(mkTimePoint(s)); |
65 | } |
66 | } |
67 | seconds duration; |
68 | size_t numBuckets; |
69 | vector<TimePoint> bucketStarts; |
70 | }; |
71 | vector<TestData> testData = { |
72 | // 71 seconds x 4 buckets |
73 | {71, 4, {0, 18, 36, 54}}, |
74 | // 100 seconds x 10 buckets |
75 | {100, 10, {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}}, |
76 | // 10 seconds x 10 buckets |
77 | {10, 10, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}, |
78 | // 10 seconds x 1 buckets |
79 | {10, 1, {0}}, |
80 | // 1 second x 1 buckets |
81 | {1, 1, {0}}, |
82 | }; |
83 | } // namespace |
84 | |
85 | TEST(BucketedTimeSeries, getBucketInfo) { |
86 | for (const auto& data : testData) { |
87 | BucketedTimeSeries<int64_t> ts(data.numBuckets, data.duration); |
88 | |
89 | for (uint32_t n = 0; n < 10000; n += 1234) { |
90 | seconds offset(n * data.duration); |
91 | |
92 | for (uint32_t idx = 0; idx < data.numBuckets; ++idx) { |
93 | auto bucketStart = data.bucketStarts[idx]; |
94 | TimePoint nextBucketStart; |
95 | if (idx + 1 < data.numBuckets) { |
96 | nextBucketStart = data.bucketStarts[idx + 1]; |
97 | } else { |
98 | nextBucketStart = TimePoint(data.duration); |
99 | } |
100 | |
101 | TimePoint expectedStart = offset + bucketStart; |
102 | TimePoint expectedNextStart = offset + nextBucketStart; |
103 | TimePoint midpoint = |
104 | expectedStart + (expectedNextStart - expectedStart) / 2; |
105 | |
106 | vector<std::pair<string, TimePoint>> timePoints = { |
107 | {"expectedStart" , expectedStart}, |
108 | {"midpoint" , midpoint}, |
109 | {"expectedEnd" , expectedNextStart - seconds(1)}, |
110 | }; |
111 | |
112 | for (const auto& point : timePoints) { |
113 | // Check that getBucketIdx() returns the expected index |
114 | EXPECT_EQ(idx, ts.getBucketIdx(point.second)) |
115 | << data.duration << "x" << data.numBuckets << ": " << point.first |
116 | << "=" << point.second; |
117 | |
118 | // Check the data returned by getBucketInfo() |
119 | size_t returnedIdx; |
120 | TimePoint returnedStart; |
121 | TimePoint returnedNextStart; |
122 | ts.getBucketInfo( |
123 | expectedStart, &returnedIdx, &returnedStart, &returnedNextStart); |
124 | EXPECT_EQ(idx, returnedIdx) |
125 | << data.duration << "x" << data.numBuckets << ": " << point.first |
126 | << "=" << point.second; |
127 | EXPECT_EQ(expectedStart, returnedStart) |
128 | << data.duration << "x" << data.numBuckets << ": " << point.first |
129 | << "=" << point.second; |
130 | EXPECT_EQ(expectedNextStart, returnedNextStart) |
131 | << data.duration << "x" << data.numBuckets << ": " << point.first |
132 | << "=" << point.second; |
133 | } |
134 | } |
135 | } |
136 | } |
137 | } |
138 | |
139 | void testUpdate100x10(size_t offset) { |
140 | // This test code only works when offset is a multiple of the bucket width |
141 | CHECK_EQ(0, offset % 10); |
142 | |
143 | // Create a 100 second timeseries, with 10 buckets |
144 | BucketedTimeSeries<int64_t> ts(10, seconds(100)); |
145 | |
146 | auto setup = [&] { |
147 | ts.clear(); |
148 | // Add 1 value to each bucket |
149 | for (int n = 5; n <= 95; n += 10) { |
150 | ts.addValue(seconds(n + offset), 6); |
151 | } |
152 | |
153 | EXPECT_EQ(10, ts.count()); |
154 | EXPECT_EQ(60, ts.sum()); |
155 | EXPECT_EQ(6, ts.avg()); |
156 | }; |
157 | |
158 | // Update 2 buckets forwards. This should throw away 2 data points. |
159 | setup(); |
160 | ts.update(seconds(110 + offset)); |
161 | EXPECT_EQ(8, ts.count()); |
162 | EXPECT_EQ(48, ts.sum()); |
163 | EXPECT_EQ(6, ts.avg()); |
164 | |
165 | // The last time we added was 95. |
166 | // Try updating to 189. This should clear everything but the last bucket. |
167 | setup(); |
168 | ts.update(seconds(151 + offset)); |
169 | EXPECT_EQ(4, ts.count()); |
170 | // EXPECT_EQ(6, ts.sum()); |
171 | EXPECT_EQ(6, ts.avg()); |
172 | |
173 | // The last time we added was 95. |
174 | // Try updating to 193: This is nearly one full loop around, |
175 | // back to the same bucket. update() needs to clear everything |
176 | setup(); |
177 | ts.update(seconds(193 + offset)); |
178 | EXPECT_EQ(0, ts.count()); |
179 | EXPECT_EQ(0, ts.sum()); |
180 | EXPECT_EQ(0, ts.avg()); |
181 | |
182 | // The last time we added was 95. |
183 | // Try updating to 197: This is slightly over one full loop around, |
184 | // back to the same bucket. update() needs to clear everything |
185 | setup(); |
186 | ts.update(seconds(197 + offset)); |
187 | EXPECT_EQ(0, ts.count()); |
188 | EXPECT_EQ(0, ts.sum()); |
189 | EXPECT_EQ(0, ts.avg()); |
190 | |
191 | // The last time we added was 95. |
192 | // Try updating to 230: This is well over one full loop around, |
193 | // and everything should be cleared. |
194 | setup(); |
195 | ts.update(seconds(230 + offset)); |
196 | EXPECT_EQ(0, ts.count()); |
197 | EXPECT_EQ(0, ts.sum()); |
198 | EXPECT_EQ(0, ts.avg()); |
199 | } |
200 | |
201 | TEST(BucketedTimeSeries, update100x10) { |
202 | // Run testUpdate100x10() multiple times, with various offsets. |
203 | // This makes sure the update code works regardless of which bucket it starts |
204 | // at in the modulo arithmetic. |
205 | testUpdate100x10(0); |
206 | testUpdate100x10(50); |
207 | testUpdate100x10(370); |
208 | testUpdate100x10(1937090); |
209 | } |
210 | |
211 | TEST(BucketedTimeSeries, update71x5) { |
212 | // Create a 71 second timeseries, with 5 buckets |
213 | // This tests when the number of buckets does not divide evenly into the |
214 | // duration. |
215 | BucketedTimeSeries<int64_t> ts(5, seconds(71)); |
216 | |
217 | auto setup = [&] { |
218 | ts.clear(); |
219 | // Add 1 value to each bucket |
220 | ts.addValue(seconds(11), 6); |
221 | ts.addValue(seconds(24), 6); |
222 | ts.addValue(seconds(42), 6); |
223 | ts.addValue(seconds(43), 6); |
224 | ts.addValue(seconds(66), 6); |
225 | |
226 | EXPECT_EQ(5, ts.count()); |
227 | EXPECT_EQ(30, ts.sum()); |
228 | EXPECT_EQ(6, ts.avg()); |
229 | }; |
230 | |
231 | // Update 2 buckets forwards. This should throw away 2 data points. |
232 | setup(); |
233 | ts.update(seconds(99)); |
234 | EXPECT_EQ(3, ts.count()); |
235 | EXPECT_EQ(18, ts.sum()); |
236 | EXPECT_EQ(6, ts.avg()); |
237 | |
238 | // Update 3 buckets forwards. This should throw away 3 data points. |
239 | setup(); |
240 | ts.update(seconds(100)); |
241 | EXPECT_EQ(2, ts.count()); |
242 | EXPECT_EQ(12, ts.sum()); |
243 | EXPECT_EQ(6, ts.avg()); |
244 | |
245 | // Update 4 buckets forwards, just under the wrap limit. |
246 | // This should throw everything but the last bucket away. |
247 | setup(); |
248 | ts.update(seconds(127)); |
249 | EXPECT_EQ(1, ts.count()); |
250 | EXPECT_EQ(6, ts.sum()); |
251 | EXPECT_EQ(6, ts.avg()); |
252 | |
253 | // Update 5 buckets forwards, exactly at the wrap limit. |
254 | // This should throw everything away. |
255 | setup(); |
256 | ts.update(seconds(128)); |
257 | EXPECT_EQ(0, ts.count()); |
258 | EXPECT_EQ(0, ts.sum()); |
259 | EXPECT_EQ(0, ts.avg()); |
260 | |
261 | // Update very far forwards, wrapping multiple times. |
262 | // This should throw everything away. |
263 | setup(); |
264 | ts.update(seconds(1234)); |
265 | EXPECT_EQ(0, ts.count()); |
266 | EXPECT_EQ(0, ts.sum()); |
267 | EXPECT_EQ(0, ts.avg()); |
268 | } |
269 | |
270 | TEST(BucketedTimeSeries, elapsed) { |
271 | BucketedTimeSeries<int64_t> ts(60, seconds(600)); |
272 | |
273 | // elapsed() is 0 when no data points have been added |
274 | EXPECT_EQ(0, ts.elapsed().count()); |
275 | |
276 | // With exactly 1 data point, elapsed() should report 1 second of data |
277 | seconds start(239218); |
278 | ts.addValue(start + seconds(0), 200); |
279 | EXPECT_EQ(1, ts.elapsed().count()); |
280 | // Adding a data point 10 seconds later should result in an elapsed time of |
281 | // 11 seconds (the time range is [0, 10], inclusive). |
282 | ts.addValue(start + seconds(10), 200); |
283 | EXPECT_EQ(11, ts.elapsed().count()); |
284 | |
285 | // elapsed() returns to 0 after clear() |
286 | ts.clear(); |
287 | EXPECT_EQ(0, ts.elapsed().count()); |
288 | |
289 | // Restart, with the starting point on an easier number to work with |
290 | ts.addValue(seconds(10), 200); |
291 | EXPECT_EQ(1, ts.elapsed().count()); |
292 | ts.addValue(seconds(580), 200); |
293 | EXPECT_EQ(571, ts.elapsed().count()); |
294 | ts.addValue(seconds(590), 200); |
295 | EXPECT_EQ(581, ts.elapsed().count()); |
296 | ts.addValue(seconds(598), 200); |
297 | EXPECT_EQ(589, ts.elapsed().count()); |
298 | ts.addValue(seconds(599), 200); |
299 | EXPECT_EQ(590, ts.elapsed().count()); |
300 | ts.addValue(seconds(600), 200); |
301 | EXPECT_EQ(591, ts.elapsed().count()); |
302 | ts.addValue(seconds(608), 200); |
303 | EXPECT_EQ(599, ts.elapsed().count()); |
304 | ts.addValue(seconds(609), 200); |
305 | EXPECT_EQ(600, ts.elapsed().count()); |
306 | // Once we reach 600 seconds worth of data, when we move on to the next |
307 | // second a full bucket will get thrown out. Now we drop back down to 591 |
308 | // seconds worth of data |
309 | ts.addValue(seconds(610), 200); |
310 | EXPECT_EQ(591, ts.elapsed().count()); |
311 | ts.addValue(seconds(618), 200); |
312 | EXPECT_EQ(599, ts.elapsed().count()); |
313 | ts.addValue(seconds(619), 200); |
314 | EXPECT_EQ(600, ts.elapsed().count()); |
315 | ts.addValue(seconds(620), 200); |
316 | EXPECT_EQ(591, ts.elapsed().count()); |
317 | ts.addValue(seconds(123419), 200); |
318 | EXPECT_EQ(600, ts.elapsed().count()); |
319 | ts.addValue(seconds(123420), 200); |
320 | EXPECT_EQ(591, ts.elapsed().count()); |
321 | ts.addValue(seconds(123425), 200); |
322 | EXPECT_EQ(596, ts.elapsed().count()); |
323 | |
324 | // Time never moves backwards. |
325 | // Calling update with an old timestamp will just be ignored. |
326 | ts.update(seconds(29)); |
327 | EXPECT_EQ(596, ts.elapsed().count()); |
328 | } |
329 | |
330 | TEST(BucketedTimeSeries, rate) { |
331 | BucketedTimeSeries<int64_t> ts(60, seconds(600)); |
332 | |
333 | // Add 3 values every 2 seconds, until fill up the buckets |
334 | for (size_t n = 0; n < 600; n += 2) { |
335 | ts.addValue(seconds(n), 200, 3); |
336 | } |
337 | |
338 | EXPECT_EQ(900, ts.count()); |
339 | EXPECT_EQ(180000, ts.sum()); |
340 | EXPECT_EQ(200, ts.avg()); |
341 | |
342 | // Really we only entered 599 seconds worth of data: [0, 598] (inclusive) |
343 | EXPECT_EQ(599, ts.elapsed().count()); |
344 | EXPECT_NEAR(300.5, ts.rate(), 0.005); |
345 | EXPECT_NEAR(1.5, ts.countRate(), 0.005); |
346 | |
347 | // If we add 1 more second, now we will have 600 seconds worth of data |
348 | ts.update(seconds(599)); |
349 | EXPECT_EQ(600, ts.elapsed().count()); |
350 | EXPECT_NEAR(300, ts.rate(), 0.005); |
351 | EXPECT_EQ(300, ts.rate<int>()); |
352 | EXPECT_NEAR(1.5, ts.countRate(), 0.005); |
353 | |
354 | // However, 1 more second after that and we will have filled up all the |
355 | // buckets, and have to drop one. |
356 | ts.update(seconds(600)); |
357 | EXPECT_EQ(591, ts.elapsed().count()); |
358 | EXPECT_NEAR(299.5, ts.rate(), 0.01); |
359 | EXPECT_EQ(299, ts.rate<int>()); |
360 | EXPECT_NEAR(1.5, ts.countRate(), 0.005); |
361 | } |
362 | |
363 | TEST(BucketedTimeSeries, avgTypeConversion) { |
364 | // Make sure the computed average values are accurate regardless |
365 | // of the input type and return type. |
366 | |
367 | { |
368 | // Simple sanity tests for small positive integer values |
369 | BucketedTimeSeries<int64_t> ts(60, seconds(600)); |
370 | ts.addValue(seconds(0), 4, 100); |
371 | ts.addValue(seconds(0), 10, 200); |
372 | ts.addValue(seconds(0), 16, 100); |
373 | |
374 | EXPECT_DOUBLE_EQ(10.0, ts.avg()); |
375 | EXPECT_DOUBLE_EQ(10.0, ts.avg<float>()); |
376 | EXPECT_EQ(10, ts.avg<uint64_t>()); |
377 | EXPECT_EQ(10, ts.avg<int64_t>()); |
378 | EXPECT_EQ(10, ts.avg<int32_t>()); |
379 | EXPECT_EQ(10, ts.avg<int16_t>()); |
380 | EXPECT_EQ(10, ts.avg<int8_t>()); |
381 | EXPECT_EQ(10, ts.avg<uint8_t>()); |
382 | } |
383 | |
384 | { |
385 | // Test signed integer types with negative values |
386 | BucketedTimeSeries<int64_t> ts(60, seconds(600)); |
387 | ts.addValue(seconds(0), -100); |
388 | ts.addValue(seconds(0), -200); |
389 | ts.addValue(seconds(0), -300); |
390 | ts.addValue(seconds(0), -200, 65535); |
391 | |
392 | EXPECT_DOUBLE_EQ(-200.0, ts.avg()); |
393 | EXPECT_DOUBLE_EQ(-200.0, ts.avg<float>()); |
394 | EXPECT_EQ(-200, ts.avg<int64_t>()); |
395 | EXPECT_EQ(-200, ts.avg<int32_t>()); |
396 | EXPECT_EQ(-200, ts.avg<int16_t>()); |
397 | } |
398 | |
399 | { |
400 | // Test uint64_t values that would overflow int64_t |
401 | BucketedTimeSeries<uint64_t> ts(60, seconds(600)); |
402 | ts.addValueAggregated( |
403 | seconds(0), |
404 | std::numeric_limits<uint64_t>::max(), |
405 | std::numeric_limits<uint64_t>::max()); |
406 | |
407 | EXPECT_DOUBLE_EQ(1.0, ts.avg()); |
408 | EXPECT_DOUBLE_EQ(1.0, ts.avg<float>()); |
409 | EXPECT_EQ(1, ts.avg<uint64_t>()); |
410 | EXPECT_EQ(1, ts.avg<int64_t>()); |
411 | EXPECT_EQ(1, ts.avg<int8_t>()); |
412 | } |
413 | |
414 | { |
415 | // Test doubles with small-ish values that will fit in integer types |
416 | BucketedTimeSeries<double> ts(60, seconds(600)); |
417 | ts.addValue(seconds(0), 4.0, 100); |
418 | ts.addValue(seconds(0), 10.0, 200); |
419 | ts.addValue(seconds(0), 16.0, 100); |
420 | |
421 | EXPECT_DOUBLE_EQ(10.0, ts.avg()); |
422 | EXPECT_DOUBLE_EQ(10.0, ts.avg<float>()); |
423 | EXPECT_EQ(10, ts.avg<uint64_t>()); |
424 | EXPECT_EQ(10, ts.avg<int64_t>()); |
425 | EXPECT_EQ(10, ts.avg<int32_t>()); |
426 | EXPECT_EQ(10, ts.avg<int16_t>()); |
427 | EXPECT_EQ(10, ts.avg<int8_t>()); |
428 | EXPECT_EQ(10, ts.avg<uint8_t>()); |
429 | } |
430 | |
431 | { |
432 | // Test doubles with huge values |
433 | BucketedTimeSeries<double> ts(60, seconds(600)); |
434 | ts.addValue(seconds(0), 1e19, 100); |
435 | ts.addValue(seconds(0), 2e19, 200); |
436 | ts.addValue(seconds(0), 3e19, 100); |
437 | |
438 | EXPECT_DOUBLE_EQ(ts.avg(), 2e19); |
439 | EXPECT_NEAR(ts.avg<float>(), 2e19, 1e11); |
440 | } |
441 | |
442 | { |
443 | // Test doubles where the sum adds up larger than a uint64_t, |
444 | // but the average fits in an int64_t |
445 | BucketedTimeSeries<double> ts(60, seconds(600)); |
446 | uint64_t value = 0x3fffffffffffffff; |
447 | FOR_EACH_RANGE (i, 0, 16) { ts.addValue(seconds(0), value); } |
448 | |
449 | EXPECT_DOUBLE_EQ(value, ts.avg()); |
450 | EXPECT_DOUBLE_EQ(value, ts.avg<float>()); |
451 | // Some precision is lost here due to the huge sum, so the |
452 | // integer average returned is off by one. |
453 | EXPECT_NEAR(value, ts.avg<uint64_t>(), 1); |
454 | EXPECT_NEAR(value, ts.avg<int64_t>(), 1); |
455 | } |
456 | |
457 | { |
458 | // Test BucketedTimeSeries with a smaller integer type |
459 | BucketedTimeSeries<int16_t> ts(60, seconds(600)); |
460 | FOR_EACH_RANGE (i, 0, 101) { ts.addValue(seconds(0), i); } |
461 | |
462 | EXPECT_DOUBLE_EQ(50.0, ts.avg()); |
463 | EXPECT_DOUBLE_EQ(50.0, ts.avg<float>()); |
464 | EXPECT_EQ(50, ts.avg<uint64_t>()); |
465 | EXPECT_EQ(50, ts.avg<int64_t>()); |
466 | EXPECT_EQ(50, ts.avg<int16_t>()); |
467 | EXPECT_EQ(50, ts.avg<int8_t>()); |
468 | } |
469 | |
470 | { |
471 | // Test BucketedTimeSeries with long double input |
472 | BucketedTimeSeries<long double> ts(60, seconds(600)); |
473 | ts.addValueAggregated(seconds(0), 1000.0L, 7); |
474 | |
475 | long double expected = 1000.0L / 7.0L; |
476 | EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg()); |
477 | EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>()); |
478 | EXPECT_DOUBLE_EQ(expected, ts.avg<long double>()); |
479 | EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>()); |
480 | EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>()); |
481 | } |
482 | |
483 | { |
484 | // Test BucketedTimeSeries with int64_t values, |
485 | // but using an average that requires a fair amount of precision. |
486 | BucketedTimeSeries<int64_t> ts(60, seconds(600)); |
487 | ts.addValueAggregated(seconds(0), 1000, 7); |
488 | |
489 | long double expected = 1000.0L / 7.0L; |
490 | EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg()); |
491 | EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>()); |
492 | EXPECT_DOUBLE_EQ(expected, ts.avg<long double>()); |
493 | EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>()); |
494 | EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>()); |
495 | } |
496 | } |
497 | |
498 | TEST(BucketedTimeSeries, forEachBucket) { |
499 | typedef BucketedTimeSeries<int64_t>::Bucket BucketSeries; |
500 | struct BucketInfo { |
501 | BucketInfo(const BucketSeries* b, TimePoint s, TimePoint ns) |
502 | : bucket(b), start(s), nextStart(ns) {} |
503 | |
504 | const BucketSeries* bucket; |
505 | TimePoint start; |
506 | TimePoint nextStart; |
507 | }; |
508 | |
509 | for (const auto& data : testData) { |
510 | BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration)); |
511 | |
512 | vector<BucketInfo> info; |
513 | auto fn = [&](const BucketSeries& bucket, |
514 | TimePoint bucketStart, |
515 | TimePoint bucketEnd) -> bool { |
516 | info.emplace_back(&bucket, bucketStart, bucketEnd); |
517 | return true; |
518 | }; |
519 | |
520 | // If we haven't yet added any data, the current bucket will start at 0, |
521 | // and all data previous buckets will have negative times. |
522 | ts.forEachBucket(fn); |
523 | |
524 | CHECK_EQ(data.numBuckets, info.size()); |
525 | |
526 | // Check the data passed in to the function |
527 | size_t infoIdx = 0; |
528 | size_t bucketIdx = 1; |
529 | seconds offset = -data.duration; |
530 | for (size_t n = 0; n < data.numBuckets; ++n) { |
531 | if (bucketIdx >= data.numBuckets) { |
532 | bucketIdx = 0; |
533 | offset += data.duration; |
534 | } |
535 | |
536 | EXPECT_EQ(data.bucketStarts[bucketIdx] + offset, info[infoIdx].start) |
537 | << data.duration << "x" << data.numBuckets |
538 | << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx; |
539 | |
540 | size_t nextBucketIdx = bucketIdx + 1; |
541 | seconds nextOffset = offset; |
542 | if (nextBucketIdx >= data.numBuckets) { |
543 | nextBucketIdx = 0; |
544 | nextOffset += data.duration; |
545 | } |
546 | EXPECT_EQ( |
547 | data.bucketStarts[nextBucketIdx] + nextOffset, |
548 | info[infoIdx].nextStart) |
549 | << data.duration << "x" << data.numBuckets |
550 | << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx; |
551 | |
552 | EXPECT_EQ(&ts.getBucketByIndex(bucketIdx), info[infoIdx].bucket); |
553 | |
554 | ++bucketIdx; |
555 | ++infoIdx; |
556 | } |
557 | } |
558 | } |
559 | |
560 | TEST(BucketedTimeSeries, queryByIntervalSimple) { |
561 | BucketedTimeSeries<int> a(3, seconds(12)); |
562 | for (int i = 0; i < 8; i++) { |
563 | a.addValue(seconds(i), 1); |
564 | } |
565 | // We added 1 at each second from 0..7 |
566 | // Query from the time period 0..2. |
567 | // This is entirely in the first bucket, which has a sum of 4. |
568 | // The code knows only part of the bucket is covered, and correctly |
569 | // estimates the desired sum as 3. |
570 | EXPECT_EQ(2, a.sum(mkTimePoint(0), mkTimePoint(2))); |
571 | } |
572 | |
573 | TEST(BucketedTimeSeries, queryByInterval) { |
574 | // Set up a BucketedTimeSeries tracking 6 seconds in 3 buckets |
575 | const int kNumBuckets = 3; |
576 | const int kDuration = 6; |
577 | BucketedTimeSeries<double> b(kNumBuckets, seconds(kDuration)); |
578 | |
579 | for (unsigned int i = 0; i < kDuration; ++i) { |
580 | // add value 'i' at time 'i' |
581 | b.addValue(mkTimePoint(i), i); |
582 | } |
583 | |
584 | // Current bucket state: |
585 | // 0: time=[0, 2): values=(0, 1), sum=1, count=2 |
586 | // 1: time=[2, 4): values=(2, 3), sum=5, count=1 |
587 | // 2: time=[4, 6): values=(4, 5), sum=9, count=2 |
588 | // clang-format off |
589 | double expectedSums1[kDuration + 1][kDuration + 1] = { |
590 | {0, 4.5, 9, 11.5, 14, 14.5, 15}, |
591 | {0, 4.5, 7, 9.5, 10, 10.5, -1}, |
592 | {0, 2.5, 5, 5.5, 6, -1, -1}, |
593 | {0, 2.5, 3, 3.5, -1, -1, -1}, |
594 | {0, 0.5, 1, -1, -1, -1, -1}, |
595 | {0, 0.5, -1, -1, -1, -1, -1}, |
596 | {0, -1, -1, -1, -1, -1, -1}, |
597 | }; |
598 | int expectedCounts1[kDuration + 1][kDuration + 1] = { |
599 | {0, 1, 2, 3, 4, 5, 6}, |
600 | {0, 1, 2, 3, 4, 5, -1}, |
601 | {0, 1, 2, 3, 4, -1, -1}, |
602 | {0, 1, 2, 3, -1, -1, -1}, |
603 | {0, 1, 2, -1, -1, -1, -1}, |
604 | {0, 1, -1, -1, -1, -1, -1}, |
605 | {0, -1, -1, -1, -1, -1, -1}, |
606 | }; |
607 | // clang-format on |
608 | |
609 | TimePoint currentTime = b.getLatestTime() + seconds(1); |
610 | for (int i = 0; i <= kDuration + 1; i++) { |
611 | for (int j = 0; j <= kDuration - i; j++) { |
612 | TimePoint start = currentTime - seconds(i + j); |
613 | TimePoint end = currentTime - seconds(i); |
614 | double expectedSum = expectedSums1[i][j]; |
615 | EXPECT_EQ(expectedSum, b.sum(start, end)) |
616 | << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end |
617 | << ")" ; |
618 | |
619 | uint64_t expectedCount = expectedCounts1[i][j]; |
620 | EXPECT_EQ(expectedCount, b.count(start, end)) |
621 | << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end |
622 | << ")" ; |
623 | |
624 | double expectedAvg = expectedCount ? expectedSum / expectedCount : 0; |
625 | EXPECT_EQ(expectedAvg, b.avg(start, end)) |
626 | << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end |
627 | << ")" ; |
628 | |
629 | double expectedRate = j ? expectedSum / j : 0; |
630 | EXPECT_EQ(expectedRate, b.rate(start, end)) |
631 | << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end |
632 | << ")" ; |
633 | } |
634 | } |
635 | |
636 | // Add 3 more values. |
637 | // This will overwrite 1 full bucket, and put us halfway through the next. |
638 | for (unsigned int i = kDuration; i < kDuration + 3; ++i) { |
639 | b.addValue(mkTimePoint(i), i); |
640 | } |
641 | EXPECT_EQ(mkTimePoint(4), b.getEarliestTime()); |
642 | |
643 | // Current bucket state: |
644 | // 0: time=[6, 8): values=(6, 7), sum=13, count=2 |
645 | // 1: time=[8, 10): values=(8), sum=8, count=1 |
646 | // 2: time=[4, 6): values=(4, 5), sum=9, count=2 |
647 | // clang-format off |
648 | double expectedSums2[kDuration + 1][kDuration + 1] = { |
649 | {0, 8, 14.5, 21, 25.5, 30, 30}, |
650 | {0, 6.5, 13, 17.5, 22, 22, -1}, |
651 | {0, 6.5, 11, 15.5, 15.5, -1, -1}, |
652 | {0, 4.5, 9, 9, -1, -1, -1}, |
653 | {0, 4.5, 4.5, -1, -1, -1, -1}, |
654 | {0, 0, -1, -1, -1, -1, -1}, |
655 | {0, -1, -1, -1, -1, -1, -1}, |
656 | }; |
657 | int expectedCounts2[kDuration + 1][kDuration + 1] = { |
658 | {0, 1, 2, 3, 4, 5, 5}, |
659 | {0, 1, 2, 3, 4, 4, -1}, |
660 | {0, 1, 2, 3, 3, -1, -1}, |
661 | {0, 1, 2, 2, -1, -1, -1}, |
662 | {0, 1, 1, -1, -1, -1, -1}, |
663 | {0, 0, -1, -1, -1, -1, -1}, |
664 | {0, -1, -1, -1, -1, -1, -1}, |
665 | }; |
666 | // clang-format on |
667 | |
668 | currentTime = b.getLatestTime() + seconds(1); |
669 | for (int i = 0; i <= kDuration + 1; i++) { |
670 | for (int j = 0; j <= kDuration - i; j++) { |
671 | TimePoint start = currentTime - seconds(i + j); |
672 | TimePoint end = currentTime - seconds(i); |
673 | double expectedSum = expectedSums2[i][j]; |
674 | EXPECT_EQ(expectedSum, b.sum(start, end)) |
675 | << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end |
676 | << ")" ; |
677 | |
678 | uint64_t expectedCount = expectedCounts2[i][j]; |
679 | EXPECT_EQ(expectedCount, b.count(start, end)) |
680 | << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end |
681 | << ")" ; |
682 | |
683 | double expectedAvg = expectedCount ? expectedSum / expectedCount : 0; |
684 | EXPECT_EQ(expectedAvg, b.avg(start, end)) |
685 | << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end |
686 | << ")" ; |
687 | |
688 | TimePoint dataStart = std::max(start, b.getEarliestTime()); |
689 | TimePoint dataEnd = std::max(end, dataStart); |
690 | seconds expectedInterval = dataEnd - dataStart; |
691 | EXPECT_EQ(expectedInterval, b.elapsed(start, end)) |
692 | << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end |
693 | << ")" ; |
694 | |
695 | double expectedRate = |
696 | expectedInterval.count() ? expectedSum / expectedInterval.count() : 0; |
697 | EXPECT_EQ(expectedRate, b.rate(start, end)) |
698 | << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end |
699 | << ")" ; |
700 | } |
701 | } |
702 | } |
703 | |
704 | TEST(BucketedTimeSeries, rateByInterval) { |
705 | const int kNumBuckets = 5; |
706 | const seconds kDuration(10); |
707 | BucketedTimeSeries<double> b(kNumBuckets, kDuration); |
708 | |
709 | // Add data points at a constant rate of 10 per second. |
710 | // Start adding data points at kDuration, and fill half of the buckets for |
711 | // now. |
712 | TimePoint start(kDuration); |
713 | TimePoint end(kDuration + (kDuration / 2)); |
714 | const double kFixedRate = 10.0; |
715 | for (TimePoint i = start; i < end; i += seconds(1)) { |
716 | b.addValue(i, kFixedRate); |
717 | } |
718 | |
719 | // Querying the rate should yield kFixedRate. |
720 | EXPECT_EQ(kFixedRate, b.rate()); |
721 | EXPECT_EQ(kFixedRate, b.rate(start, end)); |
722 | EXPECT_EQ(kFixedRate, b.rate(start, start + kDuration)); |
723 | EXPECT_EQ(kFixedRate, b.rate(end - kDuration, end)); |
724 | EXPECT_EQ(kFixedRate, b.rate(end - seconds(1), end)); |
725 | // We have been adding 1 data point per second, so countRate() |
726 | // should be 1. |
727 | EXPECT_EQ(1.0, b.countRate()); |
728 | EXPECT_EQ(1.0, b.countRate(start, end)); |
729 | EXPECT_EQ(1.0, b.countRate(start, start + kDuration)); |
730 | EXPECT_EQ(1.0, b.countRate(end - kDuration, end)); |
731 | EXPECT_EQ(1.0, b.countRate(end - seconds(1), end)); |
732 | |
733 | // We haven't added anything before time kDuration. |
734 | // Querying data earlier than this should result in a rate of 0. |
735 | EXPECT_EQ(0.0, b.rate(mkTimePoint(0), mkTimePoint(1))); |
736 | EXPECT_EQ(0.0, b.countRate(mkTimePoint(0), mkTimePoint(1))); |
737 | |
738 | // Fill the remainder of the timeseries from kDuration to kDuration*2 |
739 | start = end; |
740 | end = TimePoint(kDuration * 2); |
741 | for (TimePoint i = start; i < end; i += seconds(1)) { |
742 | b.addValue(i, kFixedRate); |
743 | } |
744 | |
745 | EXPECT_EQ(kFixedRate, b.rate()); |
746 | EXPECT_EQ(kFixedRate, b.rate(TimePoint(kDuration), TimePoint(kDuration * 2))); |
747 | EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 2))); |
748 | EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 10))); |
749 | EXPECT_EQ(1.0, b.countRate()); |
750 | EXPECT_EQ(1.0, b.countRate(TimePoint(kDuration), TimePoint(kDuration * 2))); |
751 | EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 2))); |
752 | EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 10))); |
753 | } |
754 | |
755 | TEST(BucketedTimeSeries, addHistorical) { |
756 | const int kNumBuckets = 5; |
757 | const seconds kDuration(10); |
758 | BucketedTimeSeries<double> b(kNumBuckets, kDuration); |
759 | |
760 | // Initially fill with a constant rate of data |
761 | for (TimePoint i = mkTimePoint(0); i < mkTimePoint(10); i += seconds(1)) { |
762 | b.addValue(i, 10.0); |
763 | } |
764 | |
765 | EXPECT_EQ(10.0, b.rate()); |
766 | EXPECT_EQ(10.0, b.avg()); |
767 | EXPECT_EQ(10, b.count()); |
768 | |
769 | // Add some more data points to the middle bucket |
770 | b.addValue(mkTimePoint(4), 40.0); |
771 | b.addValue(mkTimePoint(5), 40.0); |
772 | EXPECT_EQ(15.0, b.avg()); |
773 | EXPECT_EQ(18.0, b.rate()); |
774 | EXPECT_EQ(12, b.count()); |
775 | |
776 | // Now start adding more current data points, until we are about to roll over |
777 | // the bucket where we added the extra historical data. |
778 | for (TimePoint i = mkTimePoint(10); i < mkTimePoint(14); i += seconds(1)) { |
779 | b.addValue(i, 10.0); |
780 | } |
781 | EXPECT_EQ(15.0, b.avg()); |
782 | EXPECT_EQ(18.0, b.rate()); |
783 | EXPECT_EQ(12, b.count()); |
784 | |
785 | // Now roll over the middle bucket |
786 | b.addValue(mkTimePoint(14), 10.0); |
787 | b.addValue(mkTimePoint(15), 10.0); |
788 | EXPECT_EQ(10.0, b.avg()); |
789 | EXPECT_EQ(10.0, b.rate()); |
790 | EXPECT_EQ(10, b.count()); |
791 | |
792 | // Add more historical values past the bucket window. |
793 | // These should be ignored. |
794 | EXPECT_FALSE(b.addValue(mkTimePoint(4), 40.0)); |
795 | EXPECT_FALSE(b.addValue(mkTimePoint(5), 40.0)); |
796 | EXPECT_EQ(10.0, b.avg()); |
797 | EXPECT_EQ(10.0, b.rate()); |
798 | EXPECT_EQ(10, b.count()); |
799 | } |
800 | |
801 | TEST(BucketedTimeSeries, reConstructEmptyTimeSeries) { |
802 | auto verify = [](auto timeSeries) { |
803 | EXPECT_TRUE(timeSeries.empty()); |
804 | EXPECT_EQ(0, timeSeries.sum()); |
805 | EXPECT_EQ(0, timeSeries.count()); |
806 | }; |
807 | |
808 | // Create a 100 second timeseries with 10 buckets_ |
809 | BucketedTimeSeries<int64_t> ts(10, seconds(100)); |
810 | |
811 | verify(ts); |
812 | |
813 | auto firstTime = ts.firstTime(); |
814 | auto latestTime = ts.latestTime(); |
815 | auto duration = ts.duration(); |
816 | auto buckets = ts.buckets(); |
817 | |
818 | // Reconstruct the timeseries |
819 | BucketedTimeSeries<int64_t> newTs(firstTime, latestTime, duration, buckets); |
820 | |
821 | verify(newTs); |
822 | } |
823 | |
824 | TEST(BucketedTimeSeries, reConstructWithValidData) { |
825 | // Create a 100 second timeseries with 10 buckets_ |
826 | BucketedTimeSeries<int64_t> ts(10, seconds(100)); |
827 | |
828 | auto setup = [&] { |
829 | ts.clear(); |
830 | // Add 1 value to each bucket |
831 | for (int n = 5; n <= 95; n += 10) { |
832 | ts.addValue(seconds(n), 6); |
833 | } |
834 | |
835 | EXPECT_EQ(10, ts.count()); |
836 | EXPECT_EQ(60, ts.sum()); |
837 | EXPECT_EQ(6, ts.avg()); |
838 | }; |
839 | |
840 | setup(); |
841 | |
842 | auto firstTime = ts.firstTime(); |
843 | auto latestTime = ts.latestTime(); |
844 | auto duration = ts.duration(); |
845 | auto buckets = ts.buckets(); |
846 | |
847 | // Reconstruct the timeseries |
848 | BucketedTimeSeries<int64_t> newTs(firstTime, latestTime, duration, buckets); |
849 | |
850 | auto compare = [&] { |
851 | EXPECT_EQ(ts.firstTime(), newTs.firstTime()); |
852 | EXPECT_EQ(ts.latestTime(), newTs.latestTime()); |
853 | EXPECT_EQ(ts.duration(), newTs.duration()); |
854 | EXPECT_EQ(ts.buckets().size(), newTs.buckets().size()); |
855 | EXPECT_EQ(ts.sum(), newTs.sum()); |
856 | EXPECT_EQ(ts.count(), newTs.count()); |
857 | |
858 | for (auto it1 = ts.buckets().begin(), it2 = newTs.buckets().begin(); |
859 | it1 != ts.buckets().end(); |
860 | it1++, it2++) { |
861 | EXPECT_EQ(it1->sum, it2->sum); |
862 | EXPECT_EQ(it1->count, it2->count); |
863 | } |
864 | }; |
865 | |
866 | compare(); |
867 | } |
868 | |
869 | TEST(BucketedTimeSeries, reConstructWithCorruptedData) { |
870 | // The total should have been 0 as firstTime > latestTime |
871 | EXPECT_THROW( |
872 | { |
873 | std::vector<Bucket> buckets(10); |
874 | buckets[0].sum = 1; |
875 | buckets[0].count = 1; |
876 | |
877 | BucketedTimeSeries<int64_t> ts( |
878 | mkTimePoint(1), mkTimePoint(0), Duration(10), buckets); |
879 | }, |
880 | std::invalid_argument); |
881 | |
882 | // The duration should be no less than latestTime - firstTime |
883 | EXPECT_THROW( |
884 | BucketedTimeSeries<int64_t>( |
885 | mkTimePoint(1), |
886 | mkTimePoint(100), |
887 | Duration(10), |
888 | std::vector<Bucket>(10)), |
889 | std::invalid_argument); |
890 | } |
891 | |
892 | namespace IntMHTS { |
893 | enum Levels { |
894 | MINUTE, |
895 | HOUR, |
896 | ALLTIME, |
897 | NUM_LEVELS, |
898 | }; |
899 | |
900 | const seconds kMinuteHourDurations[] = {seconds(60), seconds(3600), seconds(0)}; |
901 | } // namespace IntMHTS |
902 | |
903 | TEST(MinuteHourTimeSeries, Basic) { |
904 | folly::MultiLevelTimeSeries<int> mhts( |
905 | 60, IntMHTS::NUM_LEVELS, IntMHTS::kMinuteHourDurations); |
906 | EXPECT_EQ(mhts.numLevels(), IntMHTS::NUM_LEVELS); |
907 | EXPECT_EQ(mhts.numLevels(), 3); |
908 | |
909 | EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 0); |
910 | EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 0); |
911 | EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0); |
912 | |
913 | EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 0); |
914 | EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 0); |
915 | EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 0); |
916 | |
917 | EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 0); |
918 | EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 0); |
919 | EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 0); |
920 | |
921 | EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 0); |
922 | EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 0); |
923 | EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 0); |
924 | |
925 | seconds cur_time(0); |
926 | |
927 | mhts.addValue(cur_time++, 10); |
928 | mhts.flush(); |
929 | |
930 | EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 1); |
931 | EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 1); |
932 | EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 1); |
933 | |
934 | for (int i = 0; i < 299; ++i) { |
935 | mhts.addValue(cur_time++, 10); |
936 | } |
937 | mhts.flush(); |
938 | |
939 | EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60); |
940 | EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 300); |
941 | EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 300); |
942 | |
943 | EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600); |
944 | EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 300 * 10); |
945 | EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 300 * 10); |
946 | |
947 | EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10); |
948 | EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10); |
949 | EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10); |
950 | |
951 | EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10); |
952 | EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10); |
953 | EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10); |
954 | |
955 | for (int i = 0; i < 3600 * 3 - 300; ++i) { |
956 | mhts.addValue(cur_time++, 10); |
957 | } |
958 | mhts.flush(); |
959 | |
960 | EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60); |
961 | EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 3600); |
962 | EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 3600 * 3); |
963 | |
964 | EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600); |
965 | EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600 * 10); |
966 | EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 3600 * 3 * 10); |
967 | |
968 | EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10); |
969 | EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10); |
970 | EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10); |
971 | |
972 | EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10); |
973 | EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10); |
974 | EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10); |
975 | |
976 | for (int i = 0; i < 3600; ++i) { |
977 | mhts.addValue(cur_time++, 100); |
978 | } |
979 | mhts.flush(); |
980 | |
981 | EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60 * 100); |
982 | EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600 * 100); |
983 | EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 3600 * 3 * 10 + 3600 * 100); |
984 | |
985 | EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 100); |
986 | EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 100); |
987 | EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 32.5); |
988 | EXPECT_EQ(mhts.avg<int>(IntMHTS::ALLTIME), 32); |
989 | |
990 | EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 100); |
991 | EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 100); |
992 | EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 32.5); |
993 | EXPECT_EQ(mhts.rate<int>(IntMHTS::ALLTIME), 32); |
994 | |
995 | for (int i = 0; i < 1800; ++i) { |
996 | mhts.addValue(cur_time++, 120); |
997 | } |
998 | mhts.flush(); |
999 | |
1000 | EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60 * 120); |
1001 | EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 1800 * 100 + 1800 * 120); |
1002 | EXPECT_EQ( |
1003 | mhts.sum(IntMHTS::ALLTIME), 3600 * 3 * 10 + 3600 * 100 + 1800 * 120); |
1004 | |
1005 | for (int i = 0; i < 60; ++i) { |
1006 | mhts.addValue(cur_time++, 1000); |
1007 | } |
1008 | mhts.flush(); |
1009 | |
1010 | EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60 * 1000); |
1011 | EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 1740 * 100 + 1800 * 120 + 60 * 1000); |
1012 | EXPECT_EQ( |
1013 | mhts.sum(IntMHTS::ALLTIME), |
1014 | 3600 * 3 * 10 + 3600 * 100 + 1800 * 120 + 60 * 1000); |
1015 | |
1016 | mhts.clear(); |
1017 | EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0); |
1018 | } |
1019 | |
1020 | TEST(MinuteHourTimeSeries, QueryByInterval) { |
1021 | folly::MultiLevelTimeSeries<int> mhts( |
1022 | 60, IntMHTS::NUM_LEVELS, IntMHTS::kMinuteHourDurations); |
1023 | |
1024 | TimePoint curTime; |
1025 | for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200); |
1026 | curTime += seconds(1)) { |
1027 | mhts.addValue(curTime, 1); |
1028 | } |
1029 | for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540); |
1030 | curTime += seconds(1)) { |
1031 | mhts.addValue(curTime, 10); |
1032 | } |
1033 | for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600); |
1034 | curTime += seconds(1)) { |
1035 | mhts.addValue(curTime, 100); |
1036 | } |
1037 | mhts.flush(); |
1038 | |
1039 | struct TimeInterval { |
1040 | TimePoint start; |
1041 | TimePoint end; |
1042 | }; |
1043 | TimeInterval intervals[12] = { |
1044 | {curTime - seconds(60), curTime}, |
1045 | {curTime - seconds(3600), curTime}, |
1046 | {curTime - seconds(7200), curTime}, |
1047 | {curTime - seconds(3600), curTime - seconds(60)}, |
1048 | {curTime - seconds(7200), curTime - seconds(60)}, |
1049 | {curTime - seconds(7200), curTime - seconds(3600)}, |
1050 | {curTime - seconds(50), curTime - seconds(20)}, |
1051 | {curTime - seconds(3020), curTime - seconds(20)}, |
1052 | {curTime - seconds(7200), curTime - seconds(20)}, |
1053 | {curTime - seconds(3000), curTime - seconds(1000)}, |
1054 | {curTime - seconds(7200), curTime - seconds(1000)}, |
1055 | {curTime - seconds(7200), curTime - seconds(3600)}, |
1056 | }; |
1057 | |
1058 | int expectedSums[12] = { |
1059 | 6000, |
1060 | 41400, |
1061 | 32400, |
1062 | 35400, |
1063 | 32130, |
1064 | 16200, |
1065 | 3000, |
1066 | 33600, |
1067 | 32310, |
1068 | 20000, |
1069 | 27900, |
1070 | 16200, |
1071 | }; |
1072 | |
1073 | int expectedCounts[12] = { |
1074 | 60, |
1075 | 3600, |
1076 | 7200, |
1077 | 3540, |
1078 | 7140, |
1079 | 3600, |
1080 | 30, |
1081 | 3000, |
1082 | 7180, |
1083 | 2000, |
1084 | 6200, |
1085 | 3600, |
1086 | }; |
1087 | |
1088 | for (int i = 0; i < 12; ++i) { |
1089 | TimeInterval interval = intervals[i]; |
1090 | |
1091 | int s = mhts.sum(interval.start, interval.end); |
1092 | EXPECT_EQ(expectedSums[i], s); |
1093 | |
1094 | int c = mhts.count(interval.start, interval.end); |
1095 | EXPECT_EQ(expectedCounts[i], c); |
1096 | |
1097 | int a = mhts.avg<int>(interval.start, interval.end); |
1098 | EXPECT_EQ(expectedCounts[i] ? (expectedSums[i] / expectedCounts[i]) : 0, a); |
1099 | |
1100 | int r = mhts.rate<int>(interval.start, interval.end); |
1101 | int expectedRate = |
1102 | expectedSums[i] / (interval.end - interval.start).count(); |
1103 | EXPECT_EQ(expectedRate, r); |
1104 | } |
1105 | } |
1106 | |
1107 | TEST(MultiLevelTimeSeries, Basic) { |
1108 | // using constructor with initializer_list parameter |
1109 | folly::MultiLevelTimeSeries<int> mhts( |
1110 | 60, {seconds(60), seconds(3600), seconds(0)}); |
1111 | EXPECT_EQ(mhts.numLevels(), 3); |
1112 | |
1113 | EXPECT_EQ(mhts.sum(seconds(60)), 0); |
1114 | EXPECT_EQ(mhts.sum(seconds(3600)), 0); |
1115 | EXPECT_EQ(mhts.sum(seconds(0)), 0); |
1116 | |
1117 | EXPECT_EQ(mhts.avg(seconds(60)), 0); |
1118 | EXPECT_EQ(mhts.avg(seconds(3600)), 0); |
1119 | EXPECT_EQ(mhts.avg(seconds(0)), 0); |
1120 | |
1121 | EXPECT_EQ(mhts.rate(seconds(60)), 0); |
1122 | EXPECT_EQ(mhts.rate(seconds(3600)), 0); |
1123 | EXPECT_EQ(mhts.rate(seconds(0)), 0); |
1124 | |
1125 | EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 0); |
1126 | EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 0); |
1127 | EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 0); |
1128 | |
1129 | seconds cur_time(0); |
1130 | |
1131 | mhts.addValue(cur_time++, 10); |
1132 | mhts.flush(); |
1133 | |
1134 | EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 1); |
1135 | EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 1); |
1136 | EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 1); |
1137 | |
1138 | for (int i = 0; i < 299; ++i) { |
1139 | mhts.addValue(cur_time++, 10); |
1140 | } |
1141 | mhts.flush(); |
1142 | |
1143 | EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60); |
1144 | EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 300); |
1145 | EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 300); |
1146 | |
1147 | EXPECT_EQ(mhts.sum(seconds(60)), 600); |
1148 | EXPECT_EQ(mhts.sum(seconds(3600)), 300 * 10); |
1149 | EXPECT_EQ(mhts.sum(seconds(0)), 300 * 10); |
1150 | |
1151 | EXPECT_EQ(mhts.avg(seconds(60)), 10); |
1152 | EXPECT_EQ(mhts.avg(seconds(3600)), 10); |
1153 | EXPECT_EQ(mhts.avg(seconds(0)), 10); |
1154 | |
1155 | EXPECT_EQ(mhts.rate(seconds(60)), 10); |
1156 | EXPECT_EQ(mhts.rate(seconds(3600)), 10); |
1157 | EXPECT_EQ(mhts.rate(seconds(0)), 10); |
1158 | |
1159 | for (int i = 0; i < 3600 * 3 - 300; ++i) { |
1160 | mhts.addValue(cur_time++, 10); |
1161 | } |
1162 | mhts.flush(); |
1163 | |
1164 | EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60); |
1165 | EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 3600); |
1166 | EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 3600 * 3); |
1167 | |
1168 | EXPECT_EQ(mhts.sum(seconds(60)), 600); |
1169 | EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 10); |
1170 | EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10); |
1171 | |
1172 | EXPECT_EQ(mhts.avg(seconds(60)), 10); |
1173 | EXPECT_EQ(mhts.avg(seconds(3600)), 10); |
1174 | EXPECT_EQ(mhts.avg(seconds(0)), 10); |
1175 | |
1176 | EXPECT_EQ(mhts.rate(seconds(60)), 10); |
1177 | EXPECT_EQ(mhts.rate(seconds(3600)), 10); |
1178 | EXPECT_EQ(mhts.rate(seconds(0)), 10); |
1179 | |
1180 | for (int i = 0; i < 3600; ++i) { |
1181 | mhts.addValue(cur_time++, 100); |
1182 | } |
1183 | mhts.flush(); |
1184 | |
1185 | EXPECT_EQ(mhts.sum(seconds(60)), 60 * 100); |
1186 | EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 100); |
1187 | EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100); |
1188 | |
1189 | EXPECT_EQ(mhts.avg(seconds(60)), 100); |
1190 | EXPECT_EQ(mhts.avg(seconds(3600)), 100); |
1191 | EXPECT_EQ(mhts.avg(seconds(0)), 32.5); |
1192 | EXPECT_EQ(mhts.avg<int>(seconds(0)), 32); |
1193 | |
1194 | EXPECT_EQ(mhts.rate(seconds(60)), 100); |
1195 | EXPECT_EQ(mhts.rate(seconds(3600)), 100); |
1196 | EXPECT_EQ(mhts.rate(seconds(0)), 32.5); |
1197 | EXPECT_EQ(mhts.rate<int>(seconds(0)), 32); |
1198 | |
1199 | for (int i = 0; i < 1800; ++i) { |
1200 | mhts.addValue(cur_time++, 120); |
1201 | } |
1202 | mhts.flush(); |
1203 | |
1204 | EXPECT_EQ(mhts.sum(seconds(60)), 60 * 120); |
1205 | EXPECT_EQ(mhts.sum(seconds(3600)), 1800 * 100 + 1800 * 120); |
1206 | EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100 + 1800 * 120); |
1207 | |
1208 | for (int i = 0; i < 60; ++i) { |
1209 | mhts.addValue(cur_time++, 1000); |
1210 | } |
1211 | mhts.flush(); |
1212 | |
1213 | EXPECT_EQ(mhts.sum(seconds(60)), 60 * 1000); |
1214 | EXPECT_EQ(mhts.sum(seconds(3600)), 1740 * 100 + 1800 * 120 + 60 * 1000); |
1215 | EXPECT_EQ( |
1216 | mhts.sum(seconds(0)), |
1217 | 3600 * 3 * 10 + 3600 * 100 + 1800 * 120 + 60 * 1000); |
1218 | |
1219 | mhts.clear(); |
1220 | EXPECT_EQ(mhts.sum(seconds(0)), 0); |
1221 | } |
1222 | |
1223 | TEST(MultiLevelTimeSeries, QueryByInterval) { |
1224 | folly::MultiLevelTimeSeries<int> mhts( |
1225 | 60, {seconds(60), seconds(3600), seconds(0)}); |
1226 | |
1227 | TimePoint curTime; |
1228 | for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200); |
1229 | curTime += seconds(1)) { |
1230 | mhts.addValue(curTime, 1); |
1231 | } |
1232 | for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540); |
1233 | curTime += seconds(1)) { |
1234 | mhts.addValue(curTime, 10); |
1235 | } |
1236 | for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600); |
1237 | curTime += seconds(1)) { |
1238 | mhts.addValue(curTime, 100); |
1239 | } |
1240 | mhts.flush(); |
1241 | |
1242 | struct TimeInterval { |
1243 | TimePoint start; |
1244 | TimePoint end; |
1245 | }; |
1246 | |
1247 | std::array<TimeInterval, 12> intervals = {{ |
1248 | {curTime - seconds(60), curTime}, |
1249 | {curTime - seconds(3600), curTime}, |
1250 | {curTime - seconds(7200), curTime}, |
1251 | {curTime - seconds(3600), curTime - seconds(60)}, |
1252 | {curTime - seconds(7200), curTime - seconds(60)}, |
1253 | {curTime - seconds(7200), curTime - seconds(3600)}, |
1254 | {curTime - seconds(50), curTime - seconds(20)}, |
1255 | {curTime - seconds(3020), curTime - seconds(20)}, |
1256 | {curTime - seconds(7200), curTime - seconds(20)}, |
1257 | {curTime - seconds(3000), curTime - seconds(1000)}, |
1258 | {curTime - seconds(7200), curTime - seconds(1000)}, |
1259 | {curTime - seconds(7200), curTime - seconds(3600)}, |
1260 | }}; |
1261 | |
1262 | std::array<int, 12> expectedSums = {{6000, |
1263 | 41400, |
1264 | 32400, |
1265 | 35400, |
1266 | 32130, |
1267 | 16200, |
1268 | 3000, |
1269 | 33600, |
1270 | 32310, |
1271 | 20000, |
1272 | 27900, |
1273 | 16200}}; |
1274 | |
1275 | std::array<int, 12> expectedCounts = { |
1276 | {60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600}}; |
1277 | |
1278 | for (size_t i = 0; i < intervals.size(); ++i) { |
1279 | TimeInterval interval = intervals[i]; |
1280 | |
1281 | int s = mhts.sum(interval.start, interval.end); |
1282 | EXPECT_EQ(expectedSums[i], s); |
1283 | |
1284 | int c = mhts.count(interval.start, interval.end); |
1285 | EXPECT_EQ(expectedCounts[i], c); |
1286 | |
1287 | int a = mhts.avg<int>(interval.start, interval.end); |
1288 | EXPECT_EQ(expectedCounts[i] ? (expectedSums[i] / expectedCounts[i]) : 0, a); |
1289 | |
1290 | int r = mhts.rate<int>(interval.start, interval.end); |
1291 | int expectedRate = |
1292 | expectedSums[i] / (interval.end - interval.start).count(); |
1293 | EXPECT_EQ(expectedRate, r); |
1294 | } |
1295 | } |
1296 | |