1// Copyright (c) Microsoft Corporation. All rights reserved.
2// Licensed under the MIT license.
3
4#pragma once
5
6#include <atomic>
7#include <cassert>
8#include <cstddef>
9#include <cstdint>
10#include <stdexcept>
11
12/// Turn this on to have Thread::current_num_threads_ keep a count of currently-active threads.
13#undef COUNT_ACTIVE_THREADS
14
15namespace FASTER {
16namespace core {
17
18/// Gives every thread a unique, numeric thread ID, and recycles IDs when threads exit.
19class Thread {
20 public:
21 /// The number of entries in table. Currently, this is fixed at 96 and never changes or grows.
22 /// If the table runs out of entries, then the current implementation will throw a
23 /// std::runtime_error.
24 static constexpr size_t kMaxNumThreads = 96;
25
26 private:
27 /// Encapsulates a thread ID, getting a free ID from the Thread class when the thread starts, and
28 /// releasing it back to the Thread class, when the thread exits.
29 class ThreadId {
30 public:
31 static constexpr uint32_t kInvalidId = UINT32_MAX;
32
33 inline ThreadId();
34 inline ~ThreadId();
35
36 inline uint32_t id() const {
37 return id_;
38 }
39
40 private:
41 uint32_t id_;
42 };
43
44 public:
45 /// Call static method Thread::id() to get the executing thread's ID.
46 inline static uint32_t id() {
47 return id_.id();
48 }
49
50 private:
51 /// Methods ReserveEntry() and ReleaseEntry() do the real work.
52 inline static uint32_t ReserveEntry() {
53#ifdef COUNT_ACTIVE_THREADS
54 int32_t result = ++current_num_threads_;
55 assert(result < kMaxNumThreads);
56#endif
57 uint32_t start = next_index_++;
58 uint32_t end = start + 2 * kMaxNumThreads;
59 for(uint32_t id = start; id < end; ++id) {
60 bool expected = false;
61 if(id_used_[id % kMaxNumThreads].compare_exchange_strong(expected, true)) {
62 return id % kMaxNumThreads;
63 }
64 }
65 // Already have 64 active threads.
66 throw std::runtime_error{ "Too many threads!" };
67 }
68
69 inline static void ReleaseEntry(uint32_t id) {
70 assert(id != ThreadId::kInvalidId);
71 assert(id_used_[id].load());
72 id_used_[id] = false;
73#ifdef COUNT_ACTIVE_THREADS
74 int32_t result = --current_num_threads_;
75#endif
76 }
77
78 /// The current thread's page_index.
79 static thread_local ThreadId id_;
80
81 /// Next thread index to consider.
82 static std::atomic<uint32_t> next_index_;
83 /// Which thread IDs have already been taken.
84 static std::atomic<bool> id_used_[kMaxNumThreads];
85
86#ifdef COUNT_ACTIVE_THREADS
87 static std::atomic<int32_t> current_num_threads_;
88#endif
89
90 friend class ThreadId;
91};
92
93inline Thread::ThreadId::ThreadId()
94 : id_{ kInvalidId } {
95 id_ = Thread::ReserveEntry();
96}
97
98inline Thread::ThreadId::~ThreadId() {
99 Thread::ReleaseEntry(id_);
100}
101
102}
103} // namespace FASTER::core
104