1/*
2 * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24#include "precompiled.hpp"
25#include "gc/shared/gcHeapSummary.hpp"
26#include "gc/shared/suspendibleThreadSet.hpp"
27#include "gc/z/zCollectedHeap.hpp"
28#include "gc/z/zGlobals.hpp"
29#include "gc/z/zHeap.inline.hpp"
30#include "gc/z/zNMethod.hpp"
31#include "gc/z/zServiceability.hpp"
32#include "gc/z/zStat.hpp"
33#include "gc/z/zUtils.inline.hpp"
34#include "memory/universe.hpp"
35#include "runtime/mutexLocker.hpp"
36
37ZCollectedHeap* ZCollectedHeap::heap() {
38 CollectedHeap* heap = Universe::heap();
39 assert(heap != NULL, "Uninitialized access to ZCollectedHeap::heap()");
40 assert(heap->kind() == CollectedHeap::Z, "Invalid name");
41 return (ZCollectedHeap*)heap;
42}
43
44ZCollectedHeap::ZCollectedHeap() :
45 _soft_ref_policy(),
46 _barrier_set(),
47 _initialize(&_barrier_set),
48 _heap(),
49 _director(new ZDirector()),
50 _driver(new ZDriver()),
51 _uncommitter(new ZUncommitter()),
52 _stat(new ZStat()),
53 _runtime_workers() {}
54
55CollectedHeap::Name ZCollectedHeap::kind() const {
56 return CollectedHeap::Z;
57}
58
59const char* ZCollectedHeap::name() const {
60 return ZName;
61}
62
63jint ZCollectedHeap::initialize() {
64 if (!_heap.is_initialized()) {
65 return JNI_ENOMEM;
66 }
67
68 initialize_reserved_region((HeapWord*)ZAddressReservedStart,
69 (HeapWord*)ZAddressReservedEnd);
70
71 return JNI_OK;
72}
73
74void ZCollectedHeap::initialize_serviceability() {
75 _heap.serviceability_initialize();
76}
77
78void ZCollectedHeap::stop() {
79 _director->stop();
80 _driver->stop();
81 _uncommitter->stop();
82 _stat->stop();
83}
84
85SoftRefPolicy* ZCollectedHeap::soft_ref_policy() {
86 return &_soft_ref_policy;
87}
88
89size_t ZCollectedHeap::max_capacity() const {
90 return _heap.max_capacity();
91}
92
93size_t ZCollectedHeap::capacity() const {
94 return _heap.capacity();
95}
96
97size_t ZCollectedHeap::used() const {
98 return _heap.used();
99}
100
101size_t ZCollectedHeap::unused() const {
102 return _heap.unused();
103}
104
105bool ZCollectedHeap::is_maximal_no_gc() const {
106 // Not supported
107 ShouldNotReachHere();
108 return false;
109}
110
111bool ZCollectedHeap::is_in(const void* p) const {
112 return _heap.is_in((uintptr_t)p);
113}
114
115uint32_t ZCollectedHeap::hash_oop(oop obj) const {
116 return _heap.hash_oop(obj);
117}
118
119HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
120 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
121 const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
122
123 if (addr != 0) {
124 *actual_size = requested_size;
125 }
126
127 return (HeapWord*)addr;
128}
129
130HeapWord* ZCollectedHeap::mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded) {
131 const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(size));
132 return (HeapWord*)_heap.alloc_object(size_in_bytes);
133}
134
135MetaWord* ZCollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
136 size_t size,
137 Metaspace::MetadataType mdtype) {
138 MetaWord* result;
139
140 // Start asynchronous GC
141 collect(GCCause::_metadata_GC_threshold);
142
143 // Expand and retry allocation
144 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
145 if (result != NULL) {
146 return result;
147 }
148
149 // Start synchronous GC
150 collect(GCCause::_metadata_GC_clear_soft_refs);
151
152 // Retry allocation
153 result = loader_data->metaspace_non_null()->allocate(size, mdtype);
154 if (result != NULL) {
155 return result;
156 }
157
158 // Expand and retry allocation
159 result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
160 if (result != NULL) {
161 return result;
162 }
163
164 // Out of memory
165 return NULL;
166}
167
168void ZCollectedHeap::collect(GCCause::Cause cause) {
169 _driver->collect(cause);
170}
171
172void ZCollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
173 // These collection requests are ignored since ZGC can't run a synchronous
174 // GC cycle from within the VM thread. This is considered benign, since the
175 // only GC causes coming in here should be heap dumper and heap inspector.
176 // However, neither the heap dumper nor the heap inspector really need a GC
177 // to happen, but the result of their heap iterations might in that case be
178 // less accurate since they might include objects that would otherwise have
179 // been collected by a GC.
180 assert(Thread::current()->is_VM_thread(), "Should be the VM thread");
181 guarantee(cause == GCCause::_heap_dump ||
182 cause == GCCause::_heap_inspection, "Invalid cause");
183}
184
185void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
186 // Not supported
187 ShouldNotReachHere();
188}
189
190bool ZCollectedHeap::supports_tlab_allocation() const {
191 return true;
192}
193
194size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
195 return _heap.tlab_capacity();
196}
197
198size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
199 return _heap.tlab_used();
200}
201
202size_t ZCollectedHeap::max_tlab_size() const {
203 return _heap.max_tlab_size();
204}
205
206size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
207 return _heap.unsafe_max_tlab_alloc();
208}
209
210bool ZCollectedHeap::can_elide_tlab_store_barriers() const {
211 return false;
212}
213
214bool ZCollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
215 // Not supported
216 ShouldNotReachHere();
217 return true;
218}
219
220bool ZCollectedHeap::card_mark_must_follow_store() const {
221 // Not supported
222 ShouldNotReachHere();
223 return false;
224}
225
226GrowableArray<GCMemoryManager*> ZCollectedHeap::memory_managers() {
227 return GrowableArray<GCMemoryManager*>(1, 1, _heap.serviceability_memory_manager());
228}
229
230GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
231 return GrowableArray<MemoryPool*>(1, 1, _heap.serviceability_memory_pool());
232}
233
234void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
235 _heap.object_iterate(cl, true /* visit_weaks */);
236}
237
238void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
239 _heap.object_iterate(cl, true /* visit_weaks */);
240}
241
242HeapWord* ZCollectedHeap::block_start(const void* addr) const {
243 return (HeapWord*)_heap.block_start((uintptr_t)addr);
244}
245
246bool ZCollectedHeap::block_is_obj(const HeapWord* addr) const {
247 return _heap.block_is_obj((uintptr_t)addr);
248}
249
250void ZCollectedHeap::register_nmethod(nmethod* nm) {
251 ZNMethod::register_nmethod(nm);
252}
253
254void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
255 ZNMethod::unregister_nmethod(nm);
256}
257
258void ZCollectedHeap::flush_nmethod(nmethod* nm) {
259 ZNMethod::flush_nmethod(nm);
260}
261
262void ZCollectedHeap::verify_nmethod(nmethod* nm) {
263 // Does nothing
264}
265
266WorkGang* ZCollectedHeap::get_safepoint_workers() {
267 return _runtime_workers.workers();
268}
269
270jlong ZCollectedHeap::millis_since_last_gc() {
271 return ZStatCycle::time_since_last() / MILLIUNITS;
272}
273
274void ZCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
275 tc->do_thread(_director);
276 tc->do_thread(_driver);
277 tc->do_thread(_uncommitter);
278 tc->do_thread(_stat);
279 _heap.worker_threads_do(tc);
280 _runtime_workers.threads_do(tc);
281}
282
283VirtualSpaceSummary ZCollectedHeap::create_heap_space_summary() {
284 const size_t capacity_in_words = capacity() / HeapWordSize;
285 const size_t max_capacity_in_words = max_capacity() / HeapWordSize;
286 return VirtualSpaceSummary(reserved_region().start(),
287 reserved_region().start() + capacity_in_words,
288 reserved_region().start() + max_capacity_in_words);
289}
290
291void ZCollectedHeap::safepoint_synchronize_begin() {
292 SuspendibleThreadSet::synchronize();
293}
294
295void ZCollectedHeap::safepoint_synchronize_end() {
296 SuspendibleThreadSet::desynchronize();
297}
298
299void ZCollectedHeap::prepare_for_verify() {
300 // Does nothing
301}
302
303void ZCollectedHeap::print_on(outputStream* st) const {
304 _heap.print_on(st);
305}
306
307void ZCollectedHeap::print_on_error(outputStream* st) const {
308 CollectedHeap::print_on_error(st);
309
310 st->print_cr("Address Space");
311 st->print_cr( " Start: " PTR_FORMAT, ZAddressSpaceStart);
312 st->print_cr( " End: " PTR_FORMAT, ZAddressSpaceEnd);
313 st->print_cr( " Size: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressSpaceSize, ZAddressSpaceSize);
314 st->print_cr( "Heap");
315 st->print_cr( " GlobalPhase: %u", ZGlobalPhase);
316 st->print_cr( " GlobalSeqNum: %u", ZGlobalSeqNum);
317 st->print_cr( " Offset Max: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZAddressOffsetMax, ZAddressOffsetMax);
318 st->print_cr( " Page Size Small: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeSmall, ZPageSizeSmall);
319 st->print_cr( " Page Size Medium: " SIZE_FORMAT_W(-15) " (" PTR_FORMAT ")", ZPageSizeMedium, ZPageSizeMedium);
320 st->print_cr( "Metadata Bits");
321 st->print_cr( " Good: " PTR_FORMAT, ZAddressGoodMask);
322 st->print_cr( " Bad: " PTR_FORMAT, ZAddressBadMask);
323 st->print_cr( " WeakBad: " PTR_FORMAT, ZAddressWeakBadMask);
324 st->print_cr( " Marked: " PTR_FORMAT, ZAddressMetadataMarked);
325 st->print_cr( " Remapped: " PTR_FORMAT, ZAddressMetadataRemapped);
326}
327
328void ZCollectedHeap::print_extended_on(outputStream* st) const {
329 _heap.print_extended_on(st);
330}
331
332void ZCollectedHeap::print_gc_threads_on(outputStream* st) const {
333 _director->print_on(st);
334 st->cr();
335 _driver->print_on(st);
336 st->cr();
337 _uncommitter->print_on(st);
338 st->cr();
339 _stat->print_on(st);
340 st->cr();
341 _heap.print_worker_threads_on(st);
342 _runtime_workers.print_threads_on(st);
343}
344
345void ZCollectedHeap::print_tracing_info() const {
346 // Does nothing
347}
348
349void ZCollectedHeap::verify(VerifyOption option /* ignored */) {
350 _heap.verify();
351}
352
353bool ZCollectedHeap::is_oop(oop object) const {
354 return CollectedHeap::is_oop(object) && _heap.is_oop(object);
355}
356