1 | // Protocol Buffers - Google's data interchange format |
2 | // Copyright 2008 Google Inc. All rights reserved. |
3 | // https://developers.google.com/protocol-buffers/ |
4 | // |
5 | // Redistribution and use in source and binary forms, with or without |
6 | // modification, are permitted provided that the following conditions are |
7 | // met: |
8 | // |
9 | // * Redistributions of source code must retain the above copyright |
10 | // notice, this list of conditions and the following disclaimer. |
11 | // * Redistributions in binary form must reproduce the above |
12 | // copyright notice, this list of conditions and the following disclaimer |
13 | // in the documentation and/or other materials provided with the |
14 | // distribution. |
15 | // * Neither the name of Google Inc. nor the names of its |
16 | // contributors may be used to endorse or promote products derived from |
17 | // this software without specific prior written permission. |
18 | // |
19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | |
31 | // This file defines an Arena allocator for better allocation performance. |
32 | |
33 | #ifndef GOOGLE_PROTOBUF_ARENA_IMPL_H__ |
34 | #define GOOGLE_PROTOBUF_ARENA_IMPL_H__ |
35 | |
36 | #include <atomic> |
37 | #include <limits> |
38 | #include <typeinfo> |
39 | |
40 | #include <google/protobuf/stubs/common.h> |
41 | #include <google/protobuf/stubs/logging.h> |
42 | #include <google/protobuf/stubs/port.h> |
43 | |
44 | #ifdef ADDRESS_SANITIZER |
45 | #include <sanitizer/asan_interface.h> |
46 | #endif // ADDRESS_SANITIZER |
47 | |
48 | #include <google/protobuf/arenaz_sampler.h> |
49 | |
50 | // Must be included last. |
51 | #include <google/protobuf/port_def.inc> |
52 | |
53 | |
54 | namespace google { |
55 | namespace protobuf { |
56 | namespace internal { |
57 | |
58 | // To prevent sharing cache lines between threads |
59 | #ifdef __cpp_aligned_new |
60 | enum { kCacheAlignment = 64 }; |
61 | #else |
62 | enum { kCacheAlignment = alignof(max_align_t) }; // do the best we can |
63 | #endif |
64 | |
65 | inline constexpr size_t AlignUpTo8(size_t n) { |
66 | // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.) |
67 | return (n + 7) & static_cast<size_t>(-8); |
68 | } |
69 | |
70 | using LifecycleIdAtomic = uint64_t; |
71 | |
72 | // MetricsCollector collects stats for a particular arena. |
73 | class PROTOBUF_EXPORT ArenaMetricsCollector { |
74 | public: |
75 | ArenaMetricsCollector(bool record_allocs) : record_allocs_(record_allocs) {} |
76 | |
77 | // Invoked when the arena is about to be destroyed. This method will |
78 | // typically finalize any metric collection and delete the collector. |
79 | // space_allocated is the space used by the arena. |
80 | virtual void OnDestroy(uint64_t space_allocated) = 0; |
81 | |
82 | // OnReset() is called when the associated arena is reset. |
83 | // space_allocated is the space used by the arena just before the reset. |
84 | virtual void OnReset(uint64_t space_allocated) = 0; |
85 | |
86 | // OnAlloc is called when an allocation happens. |
87 | // type_info is promised to be static - its lifetime extends to |
88 | // match program's lifetime (It is given by typeid operator). |
89 | // Note: typeid(void) will be passed as allocated_type every time we |
90 | // intentionally want to avoid monitoring an allocation. (i.e. internal |
91 | // allocations for managing the arena) |
92 | virtual void OnAlloc(const std::type_info* allocated_type, |
93 | uint64_t alloc_size) = 0; |
94 | |
95 | // Does OnAlloc() need to be called? If false, metric collection overhead |
96 | // will be reduced since we will not do extra work per allocation. |
97 | bool RecordAllocs() { return record_allocs_; } |
98 | |
99 | protected: |
100 | // This class is destructed by the call to OnDestroy(). |
101 | ~ArenaMetricsCollector() = default; |
102 | const bool record_allocs_; |
103 | }; |
104 | |
105 | struct AllocationPolicy { |
106 | static constexpr size_t kDefaultStartBlockSize = 256; |
107 | static constexpr size_t kDefaultMaxBlockSize = 8192; |
108 | |
109 | size_t start_block_size = kDefaultStartBlockSize; |
110 | size_t max_block_size = kDefaultMaxBlockSize; |
111 | void* (*block_alloc)(size_t) = nullptr; |
112 | void (*block_dealloc)(void*, size_t) = nullptr; |
113 | ArenaMetricsCollector* metrics_collector = nullptr; |
114 | |
115 | bool IsDefault() const { |
116 | return start_block_size == kDefaultMaxBlockSize && |
117 | max_block_size == kDefaultMaxBlockSize && block_alloc == nullptr && |
118 | block_dealloc == nullptr && metrics_collector == nullptr; |
119 | } |
120 | }; |
121 | |
122 | // Tagged pointer to an AllocationPolicy. |
123 | class TaggedAllocationPolicyPtr { |
124 | public: |
125 | constexpr TaggedAllocationPolicyPtr() : policy_(0) {} |
126 | |
127 | explicit TaggedAllocationPolicyPtr(AllocationPolicy* policy) |
128 | : policy_(reinterpret_cast<uintptr_t>(policy)) {} |
129 | |
130 | void set_policy(AllocationPolicy* policy) { |
131 | auto bits = policy_ & kTagsMask; |
132 | policy_ = reinterpret_cast<uintptr_t>(policy) | bits; |
133 | } |
134 | |
135 | AllocationPolicy* get() { |
136 | return reinterpret_cast<AllocationPolicy*>(policy_ & kPtrMask); |
137 | } |
138 | const AllocationPolicy* get() const { |
139 | return reinterpret_cast<const AllocationPolicy*>(policy_ & kPtrMask); |
140 | } |
141 | |
142 | AllocationPolicy& operator*() { return *get(); } |
143 | const AllocationPolicy& operator*() const { return *get(); } |
144 | |
145 | AllocationPolicy* operator->() { return get(); } |
146 | const AllocationPolicy* operator->() const { return get(); } |
147 | |
148 | bool is_user_owned_initial_block() const { |
149 | return static_cast<bool>(get_mask<kUserOwnedInitialBlock>()); |
150 | } |
151 | void set_is_user_owned_initial_block(bool v) { |
152 | set_mask<kUserOwnedInitialBlock>(v); |
153 | } |
154 | |
155 | bool should_record_allocs() const { |
156 | return static_cast<bool>(get_mask<kRecordAllocs>()); |
157 | } |
158 | void set_should_record_allocs(bool v) { set_mask<kRecordAllocs>(v); } |
159 | |
160 | uintptr_t get_raw() const { return policy_; } |
161 | |
162 | inline void RecordAlloc(const std::type_info* allocated_type, |
163 | size_t n) const { |
164 | get()->metrics_collector->OnAlloc(allocated_type, alloc_size: n); |
165 | } |
166 | |
167 | private: |
168 | enum : uintptr_t { |
169 | kUserOwnedInitialBlock = 1, |
170 | kRecordAllocs = 2, |
171 | }; |
172 | |
173 | static constexpr uintptr_t kTagsMask = 7; |
174 | static constexpr uintptr_t kPtrMask = ~kTagsMask; |
175 | |
176 | template <uintptr_t kMask> |
177 | uintptr_t get_mask() const { |
178 | return policy_ & kMask; |
179 | } |
180 | template <uintptr_t kMask> |
181 | void set_mask(bool v) { |
182 | if (v) { |
183 | policy_ |= kMask; |
184 | } else { |
185 | policy_ &= ~kMask; |
186 | } |
187 | } |
188 | uintptr_t policy_; |
189 | }; |
190 | |
191 | enum class AllocationClient { kDefault, kArray }; |
192 | |
193 | // A simple arena allocator. Calls to allocate functions must be properly |
194 | // serialized by the caller, hence this class cannot be used as a general |
195 | // purpose allocator in a multi-threaded program. It serves as a building block |
196 | // for ThreadSafeArena, which provides a thread-safe arena allocator. |
197 | // |
198 | // This class manages |
199 | // 1) Arena bump allocation + owning memory blocks. |
200 | // 2) Maintaining a cleanup list. |
201 | // It delagetes the actual memory allocation back to ThreadSafeArena, which |
202 | // contains the information on block growth policy and backing memory allocation |
203 | // used. |
204 | class PROTOBUF_EXPORT SerialArena { |
205 | public: |
206 | struct Memory { |
207 | void* ptr; |
208 | size_t size; |
209 | }; |
210 | |
211 | // Node contains the ptr of the object to be cleaned up and the associated |
212 | // cleanup function ptr. |
213 | struct CleanupNode { |
214 | void* elem; // Pointer to the object to be cleaned up. |
215 | void (*cleanup)(void*); // Function pointer to the destructor or deleter. |
216 | }; |
217 | |
218 | void CleanupList(); |
219 | uint64_t SpaceAllocated() const { |
220 | return space_allocated_.load(m: std::memory_order_relaxed); |
221 | } |
222 | uint64_t SpaceUsed() const; |
223 | |
224 | bool HasSpace(size_t n) const { |
225 | return n <= static_cast<size_t>(limit_ - ptr_); |
226 | } |
227 | |
228 | // See comments on `cached_blocks_` member for details. |
229 | PROTOBUF_ALWAYS_INLINE void* TryAllocateFromCachedBlock(size_t size) { |
230 | if (PROTOBUF_PREDICT_FALSE(size < 16)) return nullptr; |
231 | // We round up to the next larger block in case the memory doesn't match |
232 | // the pattern we are looking for. |
233 | const size_t index = Bits::Log2FloorNonZero64(n: size - 1) - 3; |
234 | |
235 | if (index >= cached_block_length_) return nullptr; |
236 | auto& cached_head = cached_blocks_[index]; |
237 | if (cached_head == nullptr) return nullptr; |
238 | |
239 | void* ret = cached_head; |
240 | #ifdef ADDRESS_SANITIZER |
241 | ASAN_UNPOISON_MEMORY_REGION(ret, size); |
242 | #endif // ADDRESS_SANITIZER |
243 | cached_head = cached_head->next; |
244 | return ret; |
245 | } |
246 | |
247 | // In kArray mode we look through cached blocks. |
248 | // We do not do this by default because most non-array allocations will not |
249 | // have the right size and will fail to find an appropriate cached block. |
250 | // |
251 | // TODO(sbenza): Evaluate if we should use cached blocks for message types of |
252 | // the right size. We can statically know if the allocation size can benefit |
253 | // from it. |
254 | template <AllocationClient alloc_client = AllocationClient::kDefault> |
255 | void* AllocateAligned(size_t n, const AllocationPolicy* policy) { |
256 | GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned. |
257 | GOOGLE_DCHECK_GE(limit_, ptr_); |
258 | |
259 | if (alloc_client == AllocationClient::kArray) { |
260 | if (void* res = TryAllocateFromCachedBlock(size: n)) { |
261 | return res; |
262 | } |
263 | } |
264 | |
265 | if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) { |
266 | return AllocateAlignedFallback(n, policy); |
267 | } |
268 | return AllocateFromExisting(n); |
269 | } |
270 | |
271 | private: |
272 | void* AllocateFromExisting(size_t n) { |
273 | void* ret = ptr_; |
274 | ptr_ += n; |
275 | #ifdef ADDRESS_SANITIZER |
276 | ASAN_UNPOISON_MEMORY_REGION(ret, n); |
277 | #endif // ADDRESS_SANITIZER |
278 | return ret; |
279 | } |
280 | |
281 | // See comments on `cached_blocks_` member for details. |
282 | void ReturnArrayMemory(void* p, size_t size) { |
283 | // We only need to check for 32-bit platforms. |
284 | // In 64-bit platforms the minimum allocation size from Repeated*Field will |
285 | // be 16 guaranteed. |
286 | if (sizeof(void*) < 8) { |
287 | if (PROTOBUF_PREDICT_FALSE(size < 16)) return; |
288 | } else { |
289 | GOOGLE_DCHECK(size >= 16); |
290 | } |
291 | |
292 | // We round down to the next smaller block in case the memory doesn't match |
293 | // the pattern we are looking for. eg, someone might have called Reserve() |
294 | // on the repeated field. |
295 | const size_t index = Bits::Log2FloorNonZero64(n: size) - 4; |
296 | |
297 | if (PROTOBUF_PREDICT_FALSE(index >= cached_block_length_)) { |
298 | // We can't put this object on the freelist so make this object the |
299 | // freelist. It is guaranteed it is larger than the one we have, and |
300 | // large enough to hold another allocation of `size`. |
301 | CachedBlock** new_list = static_cast<CachedBlock**>(p); |
302 | size_t new_size = size / sizeof(CachedBlock*); |
303 | |
304 | std::copy(cached_blocks_, cached_blocks_ + cached_block_length_, |
305 | new_list); |
306 | std::fill(new_list + cached_block_length_, new_list + new_size, nullptr); |
307 | cached_blocks_ = new_list; |
308 | // Make the size fit in uint8_t. This is the power of two, so we don't |
309 | // need anything larger. |
310 | cached_block_length_ = |
311 | static_cast<uint8_t>(std::min(size_t{64}, new_size)); |
312 | |
313 | return; |
314 | } |
315 | |
316 | auto& cached_head = cached_blocks_[index]; |
317 | auto* new_node = static_cast<CachedBlock*>(p); |
318 | new_node->next = cached_head; |
319 | cached_head = new_node; |
320 | #ifdef ADDRESS_SANITIZER |
321 | ASAN_POISON_MEMORY_REGION(p, size); |
322 | #endif // ADDRESS_SANITIZER |
323 | } |
324 | |
325 | public: |
326 | // Allocate space if the current region provides enough space. |
327 | bool MaybeAllocateAligned(size_t n, void** out) { |
328 | GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned. |
329 | GOOGLE_DCHECK_GE(limit_, ptr_); |
330 | if (PROTOBUF_PREDICT_FALSE(!HasSpace(n))) return false; |
331 | *out = AllocateFromExisting(n); |
332 | return true; |
333 | } |
334 | |
335 | std::pair<void*, CleanupNode*> AllocateAlignedWithCleanup( |
336 | size_t n, const AllocationPolicy* policy) { |
337 | GOOGLE_DCHECK_EQ(internal::AlignUpTo8(n), n); // Must be already aligned. |
338 | if (PROTOBUF_PREDICT_FALSE(!HasSpace(n + kCleanupSize))) { |
339 | return AllocateAlignedWithCleanupFallback(n, policy); |
340 | } |
341 | return AllocateFromExistingWithCleanupFallback(n); |
342 | } |
343 | |
344 | private: |
345 | std::pair<void*, CleanupNode*> AllocateFromExistingWithCleanupFallback( |
346 | size_t n) { |
347 | void* ret = ptr_; |
348 | ptr_ += n; |
349 | limit_ -= kCleanupSize; |
350 | #ifdef ADDRESS_SANITIZER |
351 | ASAN_UNPOISON_MEMORY_REGION(ret, n); |
352 | ASAN_UNPOISON_MEMORY_REGION(limit_, kCleanupSize); |
353 | #endif // ADDRESS_SANITIZER |
354 | return CreatePair(ptr: ret, node: reinterpret_cast<CleanupNode*>(limit_)); |
355 | } |
356 | |
357 | public: |
358 | void AddCleanup(void* elem, void (*cleanup)(void*), |
359 | const AllocationPolicy* policy) { |
360 | auto res = AllocateAlignedWithCleanup(n: 0, policy); |
361 | res.second->elem = elem; |
362 | res.second->cleanup = cleanup; |
363 | } |
364 | |
365 | void* owner() const { return owner_; } |
366 | SerialArena* next() const { return next_; } |
367 | void set_next(SerialArena* next) { next_ = next; } |
368 | |
369 | private: |
370 | friend class ThreadSafeArena; |
371 | friend class ArenaBenchmark; |
372 | |
373 | // Creates a new SerialArena inside mem using the remaining memory as for |
374 | // future allocations. |
375 | static SerialArena* New(SerialArena::Memory mem, void* owner, |
376 | ThreadSafeArenaStats* stats); |
377 | // Free SerialArena returning the memory passed in to New |
378 | template <typename Deallocator> |
379 | Memory Free(Deallocator deallocator); |
380 | |
381 | // Blocks are variable length malloc-ed objects. The following structure |
382 | // describes the common header for all blocks. |
383 | struct Block { |
384 | Block(Block* next, size_t size) : next(next), size(size), start(nullptr) {} |
385 | |
386 | char* Pointer(size_t n) { |
387 | GOOGLE_DCHECK(n <= size); |
388 | return reinterpret_cast<char*>(this) + n; |
389 | } |
390 | |
391 | Block* const next; |
392 | const size_t size; |
393 | CleanupNode* start; |
394 | // data follows |
395 | }; |
396 | |
397 | void* owner_; // &ThreadCache of this thread; |
398 | Block* head_; // Head of linked list of blocks. |
399 | SerialArena* next_; // Next SerialArena in this linked list. |
400 | size_t space_used_ = 0; // Necessary for metrics. |
401 | std::atomic<size_t> space_allocated_; |
402 | |
403 | // Next pointer to allocate from. Always 8-byte aligned. Points inside |
404 | // head_ (and head_->pos will always be non-canonical). We keep these |
405 | // here to reduce indirection. |
406 | char* ptr_; |
407 | // Limiting address up to which memory can be allocated from the head block. |
408 | char* limit_; |
409 | // For holding sampling information. The pointer is owned by the |
410 | // ThreadSafeArena that holds this serial arena. |
411 | ThreadSafeArenaStats* arena_stats_; |
412 | |
413 | // Repeated*Field and Arena play together to reduce memory consumption by |
414 | // reusing blocks. Currently, natural growth of the repeated field types makes |
415 | // them allocate blocks of size `8 + 2^N, N>=3`. |
416 | // When the repeated field grows returns the previous block and we put it in |
417 | // this free list. |
418 | // `cached_blocks_[i]` points to the free list for blocks of size `8+2^(i+3)`. |
419 | // The array of freelists is grown when needed in `ReturnArrayMemory()`. |
420 | struct CachedBlock { |
421 | // Simple linked list. |
422 | CachedBlock* next; |
423 | }; |
424 | uint8_t cached_block_length_ = 0; |
425 | CachedBlock** cached_blocks_ = nullptr; |
426 | |
427 | // Constructor is private as only New() should be used. |
428 | inline SerialArena(Block* b, void* owner, ThreadSafeArenaStats* stats); |
429 | void* AllocateAlignedFallback(size_t n, const AllocationPolicy* policy); |
430 | std::pair<void*, CleanupNode*> AllocateAlignedWithCleanupFallback( |
431 | size_t n, const AllocationPolicy* policy); |
432 | void AllocateNewBlock(size_t n, const AllocationPolicy* policy); |
433 | |
434 | std::pair<void*, CleanupNode*> CreatePair(void* ptr, CleanupNode* node) { |
435 | return {ptr, node}; |
436 | } |
437 | |
438 | public: |
439 | static constexpr size_t = AlignUpTo8(n: sizeof(Block)); |
440 | static constexpr size_t kCleanupSize = AlignUpTo8(n: sizeof(CleanupNode)); |
441 | }; |
442 | |
443 | // Tag type used to invoke the constructor of message-owned arena. |
444 | // Only message-owned arenas use this constructor for creation. |
445 | // Such constructors are internal implementation details of the library. |
446 | struct MessageOwned { |
447 | explicit MessageOwned() = default; |
448 | }; |
449 | |
450 | // This class provides the core Arena memory allocation library. Different |
451 | // implementations only need to implement the public interface below. |
452 | // Arena is not a template type as that would only be useful if all protos |
453 | // in turn would be templates, which will/cannot happen. However separating |
454 | // the memory allocation part from the cruft of the API users expect we can |
455 | // use #ifdef the select the best implementation based on hardware / OS. |
456 | class PROTOBUF_EXPORT ThreadSafeArena { |
457 | public: |
458 | ThreadSafeArena() { Init(); } |
459 | |
460 | // Constructor solely used by message-owned arena. |
461 | ThreadSafeArena(internal::MessageOwned) : tag_and_id_(kMessageOwnedArena) { |
462 | Init(); |
463 | } |
464 | |
465 | ThreadSafeArena(char* mem, size_t size) { InitializeFrom(mem, size); } |
466 | |
467 | explicit ThreadSafeArena(void* mem, size_t size, |
468 | const AllocationPolicy& policy) { |
469 | InitializeWithPolicy(mem, size, policy); |
470 | } |
471 | |
472 | // Destructor deletes all owned heap allocated objects, and destructs objects |
473 | // that have non-trivial destructors, except for proto2 message objects whose |
474 | // destructors can be skipped. Also, frees all blocks except the initial block |
475 | // if it was passed in. |
476 | ~ThreadSafeArena(); |
477 | |
478 | uint64_t Reset(); |
479 | |
480 | uint64_t SpaceAllocated() const; |
481 | uint64_t SpaceUsed() const; |
482 | |
483 | template <AllocationClient alloc_client = AllocationClient::kDefault> |
484 | void* AllocateAligned(size_t n, const std::type_info* type) { |
485 | SerialArena* arena; |
486 | if (PROTOBUF_PREDICT_TRUE(!alloc_policy_.should_record_allocs() && |
487 | GetSerialArenaFast(&arena))) { |
488 | return arena->AllocateAligned<alloc_client>(n, AllocPolicy()); |
489 | } else { |
490 | return AllocateAlignedFallback(n, type); |
491 | } |
492 | } |
493 | |
494 | void ReturnArrayMemory(void* p, size_t size) { |
495 | SerialArena* arena; |
496 | if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) { |
497 | arena->ReturnArrayMemory(p, size); |
498 | } |
499 | } |
500 | |
501 | // This function allocates n bytes if the common happy case is true and |
502 | // returns true. Otherwise does nothing and returns false. This strange |
503 | // semantics is necessary to allow callers to program functions that only |
504 | // have fallback function calls in tail position. This substantially improves |
505 | // code for the happy path. |
506 | PROTOBUF_NDEBUG_INLINE bool MaybeAllocateAligned(size_t n, void** out) { |
507 | SerialArena* arena; |
508 | if (PROTOBUF_PREDICT_TRUE(!alloc_policy_.should_record_allocs() && |
509 | GetSerialArenaFromThreadCache(&arena))) { |
510 | return arena->MaybeAllocateAligned(n, out); |
511 | } |
512 | return false; |
513 | } |
514 | |
515 | std::pair<void*, SerialArena::CleanupNode*> AllocateAlignedWithCleanup( |
516 | size_t n, const std::type_info* type); |
517 | |
518 | // Add object pointer and cleanup function pointer to the list. |
519 | void AddCleanup(void* elem, void (*cleanup)(void*)); |
520 | |
521 | // Checks whether this arena is message-owned. |
522 | PROTOBUF_ALWAYS_INLINE bool IsMessageOwned() const { |
523 | return tag_and_id_ & kMessageOwnedArena; |
524 | } |
525 | |
526 | private: |
527 | // Unique for each arena. Changes on Reset(). |
528 | uint64_t tag_and_id_ = 0; |
529 | // The LSB of tag_and_id_ indicates if the arena is message-owned. |
530 | enum : uint64_t { kMessageOwnedArena = 1 }; |
531 | |
532 | TaggedAllocationPolicyPtr alloc_policy_; // Tagged pointer to AllocPolicy. |
533 | |
534 | static_assert(std::is_trivially_destructible<SerialArena>{}, |
535 | "SerialArena needs to be trivially destructible." ); |
536 | // Pointer to a linked list of SerialArena. |
537 | std::atomic<SerialArena*> threads_; |
538 | std::atomic<SerialArena*> hint_; // Fast thread-local block access |
539 | |
540 | const AllocationPolicy* AllocPolicy() const { return alloc_policy_.get(); } |
541 | void InitializeFrom(void* mem, size_t size); |
542 | void InitializeWithPolicy(void* mem, size_t size, AllocationPolicy policy); |
543 | void* AllocateAlignedFallback(size_t n, const std::type_info* type); |
544 | std::pair<void*, SerialArena::CleanupNode*> |
545 | AllocateAlignedWithCleanupFallback(size_t n, const std::type_info* type); |
546 | |
547 | void Init(); |
548 | void SetInitialBlock(void* mem, size_t size); |
549 | |
550 | // Delete or Destruct all objects owned by the arena. |
551 | void CleanupList(); |
552 | |
553 | inline uint64_t LifeCycleId() const { |
554 | return tag_and_id_ & ~kMessageOwnedArena; |
555 | } |
556 | |
557 | inline void CacheSerialArena(SerialArena* serial) { |
558 | thread_cache().last_serial_arena = serial; |
559 | thread_cache().last_lifecycle_id_seen = tag_and_id_; |
560 | // TODO(haberman): evaluate whether we would gain efficiency by getting rid |
561 | // of hint_. It's the only write we do to ThreadSafeArena in the allocation |
562 | // path, which will dirty the cache line. |
563 | |
564 | hint_.store(p: serial, m: std::memory_order_release); |
565 | } |
566 | |
567 | PROTOBUF_NDEBUG_INLINE bool GetSerialArenaFast(SerialArena** arena) { |
568 | if (GetSerialArenaFromThreadCache(arena)) return true; |
569 | |
570 | // Check whether we own the last accessed SerialArena on this arena. This |
571 | // fast path optimizes the case where a single thread uses multiple arenas. |
572 | ThreadCache* tc = &thread_cache(); |
573 | SerialArena* serial = hint_.load(m: std::memory_order_acquire); |
574 | if (PROTOBUF_PREDICT_TRUE(serial != nullptr && serial->owner() == tc)) { |
575 | *arena = serial; |
576 | return true; |
577 | } |
578 | return false; |
579 | } |
580 | |
581 | PROTOBUF_NDEBUG_INLINE bool GetSerialArenaFromThreadCache( |
582 | SerialArena** arena) { |
583 | // If this thread already owns a block in this arena then try to use that. |
584 | // This fast path optimizes the case where multiple threads allocate from |
585 | // the same arena. |
586 | ThreadCache* tc = &thread_cache(); |
587 | if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == tag_and_id_)) { |
588 | *arena = tc->last_serial_arena; |
589 | return true; |
590 | } |
591 | return false; |
592 | } |
593 | SerialArena* GetSerialArenaFallback(void* me); |
594 | |
595 | template <typename Functor> |
596 | void PerSerialArena(Functor fn) { |
597 | // By omitting an Acquire barrier we ensure that any user code that doesn't |
598 | // properly synchronize Reset() or the destructor will throw a TSAN warning. |
599 | SerialArena* serial = threads_.load(m: std::memory_order_relaxed); |
600 | |
601 | for (; serial; serial = serial->next()) fn(serial); |
602 | } |
603 | |
604 | // Releases all memory except the first block which it returns. The first |
605 | // block might be owned by the user and thus need some extra checks before |
606 | // deleting. |
607 | SerialArena::Memory Free(size_t* space_allocated); |
608 | |
609 | #ifdef _MSC_VER |
610 | #pragma warning(disable : 4324) |
611 | #endif |
612 | struct alignas(kCacheAlignment) ThreadCache { |
613 | #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) |
614 | // If we are using the ThreadLocalStorage class to store the ThreadCache, |
615 | // then the ThreadCache's default constructor has to be responsible for |
616 | // initializing it. |
617 | ThreadCache() |
618 | : next_lifecycle_id(0), |
619 | last_lifecycle_id_seen(-1), |
620 | last_serial_arena(nullptr) {} |
621 | #endif |
622 | |
623 | // Number of per-thread lifecycle IDs to reserve. Must be power of two. |
624 | // To reduce contention on a global atomic, each thread reserves a batch of |
625 | // IDs. The following number is calculated based on a stress test with |
626 | // ~6500 threads all frequently allocating a new arena. |
627 | static constexpr size_t kPerThreadIds = 256; |
628 | // Next lifecycle ID available to this thread. We need to reserve a new |
629 | // batch, if `next_lifecycle_id & (kPerThreadIds - 1) == 0`. |
630 | uint64_t next_lifecycle_id; |
631 | // The ThreadCache is considered valid as long as this matches the |
632 | // lifecycle_id of the arena being used. |
633 | uint64_t last_lifecycle_id_seen; |
634 | SerialArena* last_serial_arena; |
635 | }; |
636 | |
637 | // Lifecycle_id can be highly contended variable in a situation of lots of |
638 | // arena creation. Make sure that other global variables are not sharing the |
639 | // cacheline. |
640 | #ifdef _MSC_VER |
641 | #pragma warning(disable : 4324) |
642 | #endif |
643 | struct alignas(kCacheAlignment) CacheAlignedLifecycleIdGenerator { |
644 | std::atomic<LifecycleIdAtomic> id; |
645 | }; |
646 | static CacheAlignedLifecycleIdGenerator lifecycle_id_generator_; |
647 | #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL) |
648 | // iOS does not support __thread keyword so we use a custom thread local |
649 | // storage class we implemented. |
650 | static ThreadCache& thread_cache(); |
651 | #elif defined(PROTOBUF_USE_DLLS) |
652 | // Thread local variables cannot be exposed through DLL interface but we can |
653 | // wrap them in static functions. |
654 | static ThreadCache& thread_cache(); |
655 | #else |
656 | static PROTOBUF_THREAD_LOCAL ThreadCache thread_cache_; |
657 | static ThreadCache& thread_cache() { return thread_cache_; } |
658 | #endif |
659 | |
660 | ThreadSafeArenaStatsHandle arena_stats_; |
661 | |
662 | GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ThreadSafeArena); |
663 | // All protos have pointers back to the arena hence Arena must have |
664 | // pointer stability. |
665 | ThreadSafeArena(ThreadSafeArena&&) = delete; |
666 | ThreadSafeArena& operator=(ThreadSafeArena&&) = delete; |
667 | |
668 | public: |
669 | // kBlockHeaderSize is sizeof(Block), aligned up to the nearest multiple of 8 |
670 | // to protect the invariant that pos is always at a multiple of 8. |
671 | static constexpr size_t = SerialArena::kBlockHeaderSize; |
672 | static constexpr size_t kSerialArenaSize = |
673 | (sizeof(SerialArena) + 7) & static_cast<size_t>(-8); |
674 | static_assert(kBlockHeaderSize % 8 == 0, |
675 | "kBlockHeaderSize must be a multiple of 8." ); |
676 | static_assert(kSerialArenaSize % 8 == 0, |
677 | "kSerialArenaSize must be a multiple of 8." ); |
678 | }; |
679 | |
680 | } // namespace internal |
681 | } // namespace protobuf |
682 | } // namespace google |
683 | |
684 | #include <google/protobuf/port_undef.inc> |
685 | |
686 | #endif // GOOGLE_PROTOBUF_ARENA_IMPL_H__ |
687 | |