1/*
2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_SHARED_OOPSTORAGE_HPP
26#define SHARE_GC_SHARED_OOPSTORAGE_HPP
27
28#include "memory/allocation.hpp"
29#include "oops/oop.hpp"
30#include "utilities/globalDefinitions.hpp"
31#include "utilities/macros.hpp"
32#include "utilities/singleWriterSynchronizer.hpp"
33
34class Mutex;
35class outputStream;
36
37// OopStorage supports management of off-heap references to objects allocated
38// in the Java heap. An OopStorage object provides a set of Java object
39// references (oop values), which clients refer to via oop* handles to the
40// associated OopStorage entries. Clients allocate entries to create a
41// (possibly weak) reference to a Java object, use that reference, and release
42// the reference when no longer needed.
43//
44// The garbage collector must know about all OopStorage objects and their
45// reference strength. OopStorage provides the garbage collector with support
46// for iteration over all the allocated entries.
47//
48// There are several categories of interaction with an OopStorage object.
49//
50// (1) allocation and release of entries, by the mutator or the VM.
51// (2) iteration by the garbage collector, possibly concurrent with mutator.
52// (3) iteration by other, non-GC, tools (only at safepoints).
53// (4) cleanup of unused internal storage, possibly concurrent with mutator.
54//
55// A goal of OopStorage is to make these interactions thread-safe, while
56// minimizing potential lock contention issues within and between these
57// categories. In particular, support for concurrent iteration by the garbage
58// collector, under certain restrictions, is required. Further, it must not
59// block nor be blocked by other operations for long periods.
60//
61// Internally, OopStorage is a set of Block objects, from which entries are
62// allocated and released. A block contains an oop[] and a bitmask indicating
63// which entries are in use (have been allocated and not yet released). New
64// blocks are constructed and added to the storage object when an entry
65// allocation request is made and there are no blocks with unused entries.
66// Blocks may be removed and deleted when empty.
67//
68// There are two important (and somewhat intertwined) protocols governing
69// concurrent access to a storage object. These are the Concurrent Iteration
70// Protocol and the Allocation Protocol. See the ParState class for a
71// discussion of concurrent iteration and the management of thread
72// interactions for this protocol. Similarly, see the allocate() function for
73// a discussion of allocation.
74
75class OopStorage : public CHeapObj<mtGC> {
76public:
77 OopStorage(const char* name, Mutex* allocation_mutex, Mutex* active_mutex);
78 ~OopStorage();
79
80 // These count and usage accessors are racy unless at a safepoint.
81
82 // The number of allocated and not yet released entries.
83 size_t allocation_count() const;
84
85 // The number of blocks of entries. Useful for sizing parallel iteration.
86 size_t block_count() const;
87
88 // Total number of blocks * memory allocation per block, plus
89 // bookkeeping overhead, including this storage object.
90 size_t total_memory_usage() const;
91
92 enum EntryStatus {
93 INVALID_ENTRY,
94 UNALLOCATED_ENTRY,
95 ALLOCATED_ENTRY
96 };
97
98 // Locks _allocation_mutex.
99 // precondition: ptr != NULL.
100 EntryStatus allocation_status(const oop* ptr) const;
101
102 // Allocates and returns a new entry. Returns NULL if memory allocation
103 // failed. Locks _allocation_mutex.
104 // postcondition: *result == NULL.
105 oop* allocate();
106
107 // Deallocates ptr. No locking.
108 // precondition: ptr is a valid allocated entry.
109 // precondition: *ptr == NULL.
110 void release(const oop* ptr);
111
112 // Releases all the ptrs. Possibly faster than individual calls to
113 // release(oop*). Best if ptrs is sorted by address. No locking.
114 // precondition: All elements of ptrs are valid allocated entries.
115 // precondition: *ptrs[i] == NULL, for i in [0,size).
116 void release(const oop* const* ptrs, size_t size);
117
118 // Applies f to each allocated entry's location. f must be a function or
119 // function object. Assume p is either a const oop* or an oop*, depending
120 // on whether the associated storage is const or non-const, respectively.
121 // Then f(p) must be a valid expression. The result of invoking f(p) must
122 // be implicitly convertible to bool. Iteration terminates and returns
123 // false if any invocation of f returns false. Otherwise, the result of
124 // iteration is true.
125 // precondition: at safepoint.
126 template<typename F> inline bool iterate_safepoint(F f);
127 template<typename F> inline bool iterate_safepoint(F f) const;
128
129 // oops_do and weak_oops_do are wrappers around iterate_safepoint, providing
130 // an adaptation layer allowing the use of existing is-alive closures and
131 // OopClosures. Assume p is either const oop* or oop*, depending on whether
132 // the associated storage is const or non-const, respectively. Then
133 //
134 // - closure->do_oop(p) must be a valid expression whose value is ignored.
135 //
136 // - is_alive->do_object_b(*p) must be a valid expression whose value is
137 // convertible to bool.
138 //
139 // For weak_oops_do, if *p == NULL then neither is_alive nor closure will be
140 // invoked for p. If is_alive->do_object_b(*p) is false, then closure will
141 // not be invoked on p, and *p will be set to NULL.
142
143 template<typename Closure> inline void oops_do(Closure* closure);
144 template<typename Closure> inline void oops_do(Closure* closure) const;
145 template<typename Closure> inline void weak_oops_do(Closure* closure);
146
147 template<typename IsAliveClosure, typename Closure>
148 inline void weak_oops_do(IsAliveClosure* is_alive, Closure* closure);
149
150 // Parallel iteration is for the exclusive use of the GC.
151 // Other clients must use serial iteration.
152 template<bool concurrent, bool is_const> class ParState;
153
154 // Service thread cleanup support.
155
156 // Called by the service thread to process any pending cleanups for this
157 // storage object. Drains the _deferred_updates list, and deletes empty
158 // blocks. Stops deleting if there is an in-progress concurrent
159 // iteration. Locks both the _allocation_mutex and the _active_mutex, and
160 // may safepoint. Deletion may be throttled, with only some available
161 // work performed, in order to allow other Service thread subtasks to run.
162 // Returns true if there may be more work to do, false if nothing to do.
163 bool delete_empty_blocks();
164
165 // Called by safepoint cleanup to notify the service thread (via
166 // Service_lock) that there may be some OopStorage objects with pending
167 // cleanups to process.
168 static void trigger_cleanup_if_needed();
169
170 // Called by the service thread (while holding Service_lock) to to test
171 // for pending cleanup requests, and resets the request state to allow
172 // recognition of new requests. Returns true if there was a pending
173 // request.
174 static bool has_cleanup_work_and_reset();
175
176 // Debugging and logging support.
177 const char* name() const;
178 void print_on(outputStream* st) const PRODUCT_RETURN;
179
180 // Provides access to storage internals, for unit testing.
181 // Declare, but not define, the public class OopStorage::TestAccess.
182 // That class is defined as part of the unit-test. It "exports" the needed
183 // private types by providing public typedefs for them.
184 class TestAccess;
185
186 // xlC on AIX can't compile test_oopStorage.cpp with following private
187 // classes. C++03 introduced access for nested classes with DR45, but xlC
188 // version 12 rejects it.
189NOT_AIX( private: )
190 class Block; // Fixed-size array of oops, plus bookkeeping.
191 class ActiveArray; // Array of Blocks, plus bookkeeping.
192 class AllocationListEntry; // Provides AllocationList links in a Block.
193
194 // Doubly-linked list of Blocks.
195 class AllocationList {
196 const Block* _head;
197 const Block* _tail;
198
199 // Noncopyable.
200 AllocationList(const AllocationList&);
201 AllocationList& operator=(const AllocationList&);
202
203 public:
204 AllocationList();
205 ~AllocationList();
206
207 Block* head();
208 Block* tail();
209 const Block* chead() const;
210 const Block* ctail() const;
211
212 Block* prev(Block& block);
213 Block* next(Block& block);
214
215 const Block* prev(const Block& block) const;
216 const Block* next(const Block& block) const;
217
218 void push_front(const Block& block);
219 void push_back(const Block& block);
220 void unlink(const Block& block);
221 };
222
223private:
224 const char* _name;
225 ActiveArray* _active_array;
226 AllocationList _allocation_list;
227AIX_ONLY(public:) // xlC 12 on AIX doesn't implement C++ DR45.
228 Block* volatile _deferred_updates;
229AIX_ONLY(private:)
230
231 Mutex* _allocation_mutex;
232 Mutex* _active_mutex;
233
234 // Volatile for racy unlocked accesses.
235 volatile size_t _allocation_count;
236
237 // Protection for _active_array.
238 mutable SingleWriterSynchronizer _protect_active;
239
240 // mutable because this gets set even for const iteration.
241 mutable int _concurrent_iteration_count;
242
243 volatile bool _needs_cleanup;
244
245 bool try_add_block();
246 Block* block_for_allocation();
247
248 Block* find_block_or_null(const oop* ptr) const;
249 void delete_empty_block(const Block& block);
250 bool reduce_deferred_updates();
251AIX_ONLY(public:) // xlC 12 on AIX doesn't implement C++ DR45.
252 void record_needs_cleanup();
253AIX_ONLY(private:)
254
255 // Managing _active_array.
256 bool expand_active_array();
257 void replace_active_array(ActiveArray* new_array);
258 ActiveArray* obtain_active_array() const;
259 void relinquish_block_array(ActiveArray* array) const;
260 class WithActiveArray; // RAII helper for active array access.
261
262 template<typename F, typename Storage>
263 static bool iterate_impl(F f, Storage* storage);
264
265 // Implementation support for parallel iteration
266 class BasicParState;
267
268 // Wrapper for OopClosure-style function, so it can be used with
269 // iterate. Assume p is of type oop*. Then cl->do_oop(p) must be a
270 // valid expression whose value may be ignored.
271 template<typename Closure> class OopFn;
272 template<typename Closure> static OopFn<Closure> oop_fn(Closure* cl);
273
274 // Wrapper for BoolObjectClosure + iteration handler pair, so they
275 // can be used with iterate.
276 template<typename IsAlive, typename F> class IfAliveFn;
277 template<typename IsAlive, typename F>
278 static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f);
279
280 // Wrapper for iteration handler, automatically skipping NULL entries.
281 template<typename F> class SkipNullFn;
282 template<typename F> static SkipNullFn<F> skip_null_fn(F f);
283};
284
285#endif // SHARE_GC_SHARED_OOPSTORAGE_HPP
286