1 | /* |
2 | * Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved. |
3 | * |
4 | * This code is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License version 2 only, as |
6 | * published by the Free Software Foundation. |
7 | * |
8 | * This code is distributed in the hope that it will be useful, but WITHOUT |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
11 | * version 2 for more details (a copy is included in the LICENSE file that |
12 | * accompanied this code). |
13 | * |
14 | * You should have received a copy of the GNU General Public License version |
15 | * 2 along with this work; if not, write to the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
17 | * |
18 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
19 | * or visit www.oracle.com if you need additional information or have any |
20 | * questions. |
21 | * |
22 | */ |
23 | |
24 | #include "precompiled.hpp" |
25 | #include "gc/epsilon/epsilonHeap.hpp" |
26 | #include "gc/epsilon/epsilonMemoryPool.hpp" |
27 | #include "gc/epsilon/epsilonThreadLocalData.hpp" |
28 | #include "gc/shared/gcArguments.hpp" |
29 | #include "memory/allocation.hpp" |
30 | #include "memory/allocation.inline.hpp" |
31 | #include "memory/resourceArea.hpp" |
32 | #include "memory/universe.hpp" |
33 | #include "runtime/globals.hpp" |
34 | |
35 | jint EpsilonHeap::initialize() { |
36 | size_t align = HeapAlignment; |
37 | size_t init_byte_size = align_up(InitialHeapSize, align); |
38 | size_t max_byte_size = align_up(MaxHeapSize, align); |
39 | |
40 | // Initialize backing storage |
41 | ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, align); |
42 | _virtual_space.initialize(heap_rs, init_byte_size); |
43 | |
44 | MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); |
45 | MemRegion reserved_region((HeapWord*)_virtual_space.low_boundary(), (HeapWord*)_virtual_space.high_boundary()); |
46 | |
47 | initialize_reserved_region(reserved_region.start(), reserved_region.end()); |
48 | |
49 | _space = new ContiguousSpace(); |
50 | _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true); |
51 | |
52 | // Precompute hot fields |
53 | _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize)); |
54 | _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep); |
55 | _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps); |
56 | _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC; |
57 | |
58 | // Enable monitoring |
59 | _monitoring_support = new EpsilonMonitoringSupport(this); |
60 | _last_counter_update = 0; |
61 | _last_heap_print = 0; |
62 | |
63 | // Install barrier set |
64 | BarrierSet::set_barrier_set(new EpsilonBarrierSet()); |
65 | |
66 | // All done, print out the configuration |
67 | if (init_byte_size != max_byte_size) { |
68 | log_info(gc)("Resizeable heap; starting at " SIZE_FORMAT "M, max: " SIZE_FORMAT "M, step: " SIZE_FORMAT "M" , |
69 | init_byte_size / M, max_byte_size / M, EpsilonMinHeapExpand / M); |
70 | } else { |
71 | log_info(gc)("Non-resizeable heap; start/max: " SIZE_FORMAT "M" , init_byte_size / M); |
72 | } |
73 | |
74 | if (UseTLAB) { |
75 | log_info(gc)("Using TLAB allocation; max: " SIZE_FORMAT "K" , _max_tlab_size * HeapWordSize / K); |
76 | if (EpsilonElasticTLAB) { |
77 | log_info(gc)("Elastic TLABs enabled; elasticity: %.2fx" , EpsilonTLABElasticity); |
78 | } |
79 | if (EpsilonElasticTLABDecay) { |
80 | log_info(gc)("Elastic TLABs decay enabled; decay time: " SIZE_FORMAT "ms" , EpsilonTLABDecayTime); |
81 | } |
82 | } else { |
83 | log_info(gc)("Not using TLAB allocation" ); |
84 | } |
85 | |
86 | return JNI_OK; |
87 | } |
88 | |
89 | void EpsilonHeap::post_initialize() { |
90 | CollectedHeap::post_initialize(); |
91 | } |
92 | |
93 | void EpsilonHeap::initialize_serviceability() { |
94 | _pool = new EpsilonMemoryPool(this); |
95 | _memory_manager.add_pool(_pool); |
96 | } |
97 | |
98 | GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() { |
99 | GrowableArray<GCMemoryManager*> memory_managers(1); |
100 | memory_managers.append(&_memory_manager); |
101 | return memory_managers; |
102 | } |
103 | |
104 | GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() { |
105 | GrowableArray<MemoryPool*> memory_pools(1); |
106 | memory_pools.append(_pool); |
107 | return memory_pools; |
108 | } |
109 | |
110 | size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const { |
111 | // Return max allocatable TLAB size, and let allocation path figure out |
112 | // the actual TLAB allocation size. |
113 | return _max_tlab_size; |
114 | } |
115 | |
116 | EpsilonHeap* EpsilonHeap::heap() { |
117 | CollectedHeap* heap = Universe::heap(); |
118 | assert(heap != NULL, "Uninitialized access to EpsilonHeap::heap()" ); |
119 | assert(heap->kind() == CollectedHeap::Epsilon, "Not an Epsilon heap" ); |
120 | return (EpsilonHeap*)heap; |
121 | } |
122 | |
123 | HeapWord* EpsilonHeap::allocate_work(size_t size) { |
124 | assert(is_object_aligned(size), "Allocation size should be aligned: " SIZE_FORMAT, size); |
125 | |
126 | HeapWord* res = _space->par_allocate(size); |
127 | |
128 | while (res == NULL) { |
129 | // Allocation failed, attempt expansion, and retry: |
130 | MutexLocker ml(Heap_lock); |
131 | |
132 | size_t space_left = max_capacity() - capacity(); |
133 | size_t want_space = MAX2(size, EpsilonMinHeapExpand); |
134 | |
135 | if (want_space < space_left) { |
136 | // Enough space to expand in bulk: |
137 | bool expand = _virtual_space.expand_by(want_space); |
138 | assert(expand, "Should be able to expand" ); |
139 | } else if (size < space_left) { |
140 | // No space to expand in bulk, and this allocation is still possible, |
141 | // take all the remaining space: |
142 | bool expand = _virtual_space.expand_by(space_left); |
143 | assert(expand, "Should be able to expand" ); |
144 | } else { |
145 | // No space left: |
146 | return NULL; |
147 | } |
148 | |
149 | _space->set_end((HeapWord *) _virtual_space.high()); |
150 | res = _space->par_allocate(size); |
151 | } |
152 | |
153 | size_t used = _space->used(); |
154 | |
155 | // Allocation successful, update counters |
156 | { |
157 | size_t last = _last_counter_update; |
158 | if ((used - last >= _step_counter_update) && Atomic::cmpxchg(used, &_last_counter_update, last) == last) { |
159 | _monitoring_support->update_counters(); |
160 | } |
161 | } |
162 | |
163 | // ...and print the occupancy line, if needed |
164 | { |
165 | size_t last = _last_heap_print; |
166 | if ((used - last >= _step_heap_print) && Atomic::cmpxchg(used, &_last_heap_print, last) == last) { |
167 | print_heap_info(used); |
168 | print_metaspace_info(); |
169 | } |
170 | } |
171 | |
172 | assert(is_object_aligned(res), "Object should be aligned: " PTR_FORMAT, p2i(res)); |
173 | return res; |
174 | } |
175 | |
176 | HeapWord* EpsilonHeap::allocate_new_tlab(size_t min_size, |
177 | size_t requested_size, |
178 | size_t* actual_size) { |
179 | Thread* thread = Thread::current(); |
180 | |
181 | // Defaults in case elastic paths are not taken |
182 | bool fits = true; |
183 | size_t size = requested_size; |
184 | size_t ergo_tlab = requested_size; |
185 | int64_t time = 0; |
186 | |
187 | if (EpsilonElasticTLAB) { |
188 | ergo_tlab = EpsilonThreadLocalData::ergo_tlab_size(thread); |
189 | |
190 | if (EpsilonElasticTLABDecay) { |
191 | int64_t last_time = EpsilonThreadLocalData::last_tlab_time(thread); |
192 | time = (int64_t) os::javaTimeNanos(); |
193 | |
194 | assert(last_time <= time, "time should be monotonic" ); |
195 | |
196 | // If the thread had not allocated recently, retract the ergonomic size. |
197 | // This conserves memory when the thread had initial burst of allocations, |
198 | // and then started allocating only sporadically. |
199 | if (last_time != 0 && (time - last_time > _decay_time_ns)) { |
200 | ergo_tlab = 0; |
201 | EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0); |
202 | } |
203 | } |
204 | |
205 | // If we can fit the allocation under current TLAB size, do so. |
206 | // Otherwise, we want to elastically increase the TLAB size. |
207 | fits = (requested_size <= ergo_tlab); |
208 | if (!fits) { |
209 | size = (size_t) (ergo_tlab * EpsilonTLABElasticity); |
210 | } |
211 | } |
212 | |
213 | // Always honor boundaries |
214 | size = MAX2(min_size, MIN2(_max_tlab_size, size)); |
215 | |
216 | // Always honor alignment |
217 | size = align_up(size, MinObjAlignment); |
218 | |
219 | // Check that adjustments did not break local and global invariants |
220 | assert(is_object_aligned(size), |
221 | "Size honors object alignment: " SIZE_FORMAT, size); |
222 | assert(min_size <= size, |
223 | "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size); |
224 | assert(size <= _max_tlab_size, |
225 | "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size); |
226 | assert(size <= CollectedHeap::max_tlab_size(), |
227 | "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size()); |
228 | |
229 | if (log_is_enabled(Trace, gc)) { |
230 | ResourceMark rm; |
231 | log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT |
232 | "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K" , |
233 | thread->name(), |
234 | requested_size * HeapWordSize / K, |
235 | min_size * HeapWordSize / K, |
236 | _max_tlab_size * HeapWordSize / K, |
237 | ergo_tlab * HeapWordSize / K, |
238 | size * HeapWordSize / K); |
239 | } |
240 | |
241 | // All prepared, let's do it! |
242 | HeapWord* res = allocate_work(size); |
243 | |
244 | if (res != NULL) { |
245 | // Allocation successful |
246 | *actual_size = size; |
247 | if (EpsilonElasticTLABDecay) { |
248 | EpsilonThreadLocalData::set_last_tlab_time(thread, time); |
249 | } |
250 | if (EpsilonElasticTLAB && !fits) { |
251 | // If we requested expansion, this is our new ergonomic TLAB size |
252 | EpsilonThreadLocalData::set_ergo_tlab_size(thread, size); |
253 | } |
254 | } else { |
255 | // Allocation failed, reset ergonomics to try and fit smaller TLABs |
256 | if (EpsilonElasticTLAB) { |
257 | EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0); |
258 | } |
259 | } |
260 | |
261 | return res; |
262 | } |
263 | |
264 | HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) { |
265 | *gc_overhead_limit_was_exceeded = false; |
266 | return allocate_work(size); |
267 | } |
268 | |
269 | void EpsilonHeap::collect(GCCause::Cause cause) { |
270 | switch (cause) { |
271 | case GCCause::_metadata_GC_threshold: |
272 | case GCCause::_metadata_GC_clear_soft_refs: |
273 | // Receiving these causes means the VM itself entered the safepoint for metadata collection. |
274 | // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would |
275 | // re-enter the safepoint again very soon. |
276 | |
277 | assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint" ); |
278 | log_info(gc)("GC request for \"%s\" is handled" , GCCause::to_string(cause)); |
279 | MetaspaceGC::compute_new_size(); |
280 | print_metaspace_info(); |
281 | break; |
282 | default: |
283 | log_info(gc)("GC request for \"%s\" is ignored" , GCCause::to_string(cause)); |
284 | } |
285 | _monitoring_support->update_counters(); |
286 | } |
287 | |
288 | void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) { |
289 | collect(gc_cause()); |
290 | } |
291 | |
292 | void EpsilonHeap::safe_object_iterate(ObjectClosure *cl) { |
293 | _space->safe_object_iterate(cl); |
294 | } |
295 | |
296 | void EpsilonHeap::print_on(outputStream *st) const { |
297 | st->print_cr("Epsilon Heap" ); |
298 | |
299 | // Cast away constness: |
300 | ((VirtualSpace)_virtual_space).print_on(st); |
301 | |
302 | st->print_cr("Allocation space:" ); |
303 | _space->print_on(st); |
304 | |
305 | MetaspaceUtils::print_on(st); |
306 | } |
307 | |
308 | void EpsilonHeap::print_tracing_info() const { |
309 | print_heap_info(used()); |
310 | print_metaspace_info(); |
311 | } |
312 | |
313 | void EpsilonHeap::print_heap_info(size_t used) const { |
314 | size_t reserved = max_capacity(); |
315 | size_t committed = capacity(); |
316 | |
317 | if (reserved != 0) { |
318 | log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, " |
319 | SIZE_FORMAT "%s (%.2f%%) used" , |
320 | byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved), |
321 | byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed), |
322 | committed * 100.0 / reserved, |
323 | byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), |
324 | used * 100.0 / reserved); |
325 | } else { |
326 | log_info(gc)("Heap: no reliable data" ); |
327 | } |
328 | } |
329 | |
330 | void EpsilonHeap::print_metaspace_info() const { |
331 | size_t reserved = MetaspaceUtils::reserved_bytes(); |
332 | size_t committed = MetaspaceUtils::committed_bytes(); |
333 | size_t used = MetaspaceUtils::used_bytes(); |
334 | |
335 | if (reserved != 0) { |
336 | log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, " |
337 | SIZE_FORMAT "%s (%.2f%%) used" , |
338 | byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved), |
339 | byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed), |
340 | committed * 100.0 / reserved, |
341 | byte_size_in_proper_unit(used), proper_unit_for_byte_size(used), |
342 | used * 100.0 / reserved); |
343 | } else { |
344 | log_info(gc, metaspace)("Metaspace: no reliable data" ); |
345 | } |
346 | } |
347 | |