| 1 | /* |
| 2 | * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | #ifndef SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP |
| 25 | #define SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP |
| 26 | |
| 27 | #include "jfr/utilities/jfrAllocation.hpp" |
| 28 | #include "jfr/utilities/jfrDoublyLinkedList.hpp" |
| 29 | #include "jfr/utilities/jfrIterator.hpp" |
| 30 | #include "jfr/utilities/jfrTypes.hpp" |
| 31 | #include "runtime/os.hpp" |
| 32 | #include "utilities/globalDefinitions.hpp" |
| 33 | #include "utilities/macros.hpp" |
| 34 | |
| 35 | template <typename T, template <typename> class RetrievalType, typename Callback> |
| 36 | class JfrMemorySpace : public JfrCHeapObj { |
| 37 | public: |
| 38 | typedef T Type; |
| 39 | typedef RetrievalType<JfrMemorySpace<T, RetrievalType, Callback> > Retrieval; |
| 40 | typedef JfrDoublyLinkedList<Type> List; |
| 41 | typedef StopOnNullIterator<List> Iterator; |
| 42 | private: |
| 43 | List _free; |
| 44 | List _full; |
| 45 | size_t _min_elem_size; |
| 46 | size_t _limit_size; |
| 47 | size_t _cache_count; |
| 48 | Callback* _callback; |
| 49 | |
| 50 | bool should_populate_cache() const { return _free.count() < _cache_count; } |
| 51 | |
| 52 | public: |
| 53 | JfrMemorySpace(size_t min_elem_size, size_t limit_size, size_t cache_count, Callback* callback); |
| 54 | ~JfrMemorySpace(); |
| 55 | bool initialize(); |
| 56 | |
| 57 | size_t min_elem_size() const { return _min_elem_size; } |
| 58 | size_t limit_size() const { return _limit_size; } |
| 59 | |
| 60 | bool has_full() const { return _full.head() != NULL; } |
| 61 | bool has_free() const { return _free.head() != NULL; } |
| 62 | bool is_full_empty() const { return !has_full(); } |
| 63 | bool is_free_empty() const { return !has_free(); } |
| 64 | |
| 65 | size_t full_count() const { return _full.count(); } |
| 66 | size_t free_count() const { return _free.count(); } |
| 67 | |
| 68 | List& full() { return _full; } |
| 69 | const List& full() const { return _full; } |
| 70 | List& free() { return _free; } |
| 71 | const List& free() const { return _free; } |
| 72 | |
| 73 | Type* full_head() { return _full.head(); } |
| 74 | Type* full_tail() { return _full.tail(); } |
| 75 | Type* free_head() { return _free.head(); } |
| 76 | Type* free_tail() { return _free.tail(); } |
| 77 | |
| 78 | void insert_free_head(Type* t) { _free.prepend(t); } |
| 79 | void insert_free_tail(Type* t) { _free.append(t); } |
| 80 | void insert_free_tail(Type* t, Type* tail, size_t count) { _free.append_list(t, tail, count); } |
| 81 | void insert_full_head(Type* t) { _full.prepend(t); } |
| 82 | void insert_full_tail(Type* t) { _full.append(t); } |
| 83 | void insert_full_tail(Type* t, Type* tail, size_t count) { _full.append_list(t, tail, count); } |
| 84 | |
| 85 | Type* remove_free(Type* t) { return _free.remove(t); } |
| 86 | Type* remove_full(Type* t) { return _full.remove(t); } |
| 87 | Type* remove_free_tail() { _free.remove(_free.tail()); } |
| 88 | Type* remove_full_tail() { return _full.remove(_full.tail()); } |
| 89 | Type* clear_full(bool return_tail = false) { return _full.clear(return_tail); } |
| 90 | Type* clear_free(bool return_tail = false) { return _free.clear(return_tail); } |
| 91 | void release_full(Type* t); |
| 92 | void release_free(Type* t); |
| 93 | |
| 94 | void register_full(Type* t, Thread* thread) { _callback->register_full(t, thread); } |
| 95 | void lock() { _callback->lock(); } |
| 96 | void unlock() { _callback->unlock(); } |
| 97 | DEBUG_ONLY(bool is_locked() const { return _callback->is_locked(); }) |
| 98 | |
| 99 | Type* allocate(size_t size); |
| 100 | void deallocate(Type* t); |
| 101 | Type* get(size_t size, Thread* thread) { return Retrieval::get(size, this, thread); } |
| 102 | |
| 103 | template <typename IteratorCallback, typename IteratorType> |
| 104 | void iterate(IteratorCallback& callback, bool full = true, jfr_iter_direction direction = forward); |
| 105 | |
| 106 | debug_only(bool in_full_list(const Type* t) const { return _full.in_list(t); }) |
| 107 | debug_only(bool in_free_list(const Type* t) const { return _free.in_list(t); }) |
| 108 | }; |
| 109 | |
| 110 | // allocations are even multiples of the mspace min size |
| 111 | inline u8 align_allocation_size(u8 requested_size, size_t min_elem_size) { |
| 112 | assert((int)min_elem_size % os::vm_page_size() == 0, "invariant" ); |
| 113 | u8 alloc_size_bytes = min_elem_size; |
| 114 | while (requested_size > alloc_size_bytes) { |
| 115 | alloc_size_bytes <<= 1; |
| 116 | } |
| 117 | assert((int)alloc_size_bytes % os::vm_page_size() == 0, "invariant" ); |
| 118 | return alloc_size_bytes; |
| 119 | } |
| 120 | |
| 121 | template <typename T, template <typename> class RetrievalType, typename Callback> |
| 122 | T* JfrMemorySpace<T, RetrievalType, Callback>::allocate(size_t size) { |
| 123 | const u8 aligned_size_bytes = align_allocation_size(size, _min_elem_size); |
| 124 | void* const allocation = JfrCHeapObj::new_array<u1>(aligned_size_bytes + sizeof(T)); |
| 125 | if (allocation == NULL) { |
| 126 | return NULL; |
| 127 | } |
| 128 | T* const t = new (allocation) T; |
| 129 | assert(t != NULL, "invariant" ); |
| 130 | if (!t->initialize(sizeof(T), aligned_size_bytes)) { |
| 131 | JfrCHeapObj::free(t, aligned_size_bytes + sizeof(T)); |
| 132 | return NULL; |
| 133 | } |
| 134 | return t; |
| 135 | } |
| 136 | |
| 137 | template <typename T, template <typename> class RetrievalType, typename Callback> |
| 138 | void JfrMemorySpace<T, RetrievalType, Callback>::deallocate(T* t) { |
| 139 | assert(t != NULL, "invariant" ); |
| 140 | assert(!_free.in_list(t), "invariant" ); |
| 141 | assert(!_full.in_list(t), "invariant" ); |
| 142 | assert(t != NULL, "invariant" ); |
| 143 | JfrCHeapObj::free(t, t->total_size()); |
| 144 | } |
| 145 | |
| 146 | template <typename Mspace> |
| 147 | class MspaceLock { |
| 148 | private: |
| 149 | Mspace* _mspace; |
| 150 | public: |
| 151 | MspaceLock(Mspace* mspace) : _mspace(mspace) { _mspace->lock(); } |
| 152 | ~MspaceLock() { _mspace->unlock(); } |
| 153 | }; |
| 154 | |
| 155 | template <typename Mspace> |
| 156 | class ReleaseOp : public StackObj { |
| 157 | private: |
| 158 | Mspace* _mspace; |
| 159 | Thread* _thread; |
| 160 | bool _release_full; |
| 161 | public: |
| 162 | typedef typename Mspace::Type Type; |
| 163 | ReleaseOp(Mspace* mspace, Thread* thread, bool release_full = true) : _mspace(mspace), _thread(thread), _release_full(release_full) {} |
| 164 | bool process(Type* t); |
| 165 | size_t processed() const { return 0; } |
| 166 | }; |
| 167 | |
| 168 | #endif // SHARE_JFR_RECORDER_STORAGE_JFRMEMORYSPACE_HPP |
| 169 | |