1 | /* |
2 | * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/g1/g1AllocRegion.inline.hpp" |
27 | #include "gc/g1/g1EvacStats.inline.hpp" |
28 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
29 | #include "logging/log.hpp" |
30 | #include "logging/logStream.hpp" |
31 | #include "memory/resourceArea.hpp" |
32 | #include "runtime/orderAccess.hpp" |
33 | #include "utilities/align.hpp" |
34 | |
35 | G1CollectedHeap* G1AllocRegion::_g1h = NULL; |
36 | HeapRegion* G1AllocRegion::_dummy_region = NULL; |
37 | |
38 | void G1AllocRegion::setup(G1CollectedHeap* g1h, HeapRegion* dummy_region) { |
39 | assert(_dummy_region == NULL, "should be set once" ); |
40 | assert(dummy_region != NULL, "pre-condition" ); |
41 | assert(dummy_region->free() == 0, "pre-condition" ); |
42 | |
43 | // Make sure that any allocation attempt on this region will fail |
44 | // and will not trigger any asserts. |
45 | assert(dummy_region->allocate_no_bot_updates(1) == NULL, "should fail" ); |
46 | assert(dummy_region->allocate(1) == NULL, "should fail" ); |
47 | DEBUG_ONLY(size_t assert_tmp); |
48 | assert(dummy_region->par_allocate_no_bot_updates(1, 1, &assert_tmp) == NULL, "should fail" ); |
49 | assert(dummy_region->par_allocate(1, 1, &assert_tmp) == NULL, "should fail" ); |
50 | |
51 | _g1h = g1h; |
52 | _dummy_region = dummy_region; |
53 | } |
54 | |
55 | size_t G1AllocRegion::fill_up_remaining_space(HeapRegion* alloc_region) { |
56 | assert(alloc_region != NULL && alloc_region != _dummy_region, |
57 | "pre-condition" ); |
58 | size_t result = 0; |
59 | |
60 | // Other threads might still be trying to allocate using a CAS out |
61 | // of the region we are trying to retire, as they can do so without |
62 | // holding the lock. So, we first have to make sure that noone else |
63 | // can allocate out of it by doing a maximal allocation. Even if our |
64 | // CAS attempt fails a few times, we'll succeed sooner or later |
65 | // given that failed CAS attempts mean that the region is getting |
66 | // closed to being full. |
67 | size_t free_word_size = alloc_region->free() / HeapWordSize; |
68 | |
69 | // This is the minimum free chunk we can turn into a dummy |
70 | // object. If the free space falls below this, then noone can |
71 | // allocate in this region anyway (all allocation requests will be |
72 | // of a size larger than this) so we won't have to perform the dummy |
73 | // allocation. |
74 | size_t min_word_size_to_fill = CollectedHeap::min_fill_size(); |
75 | |
76 | while (free_word_size >= min_word_size_to_fill) { |
77 | HeapWord* dummy = par_allocate(alloc_region, free_word_size); |
78 | if (dummy != NULL) { |
79 | // If the allocation was successful we should fill in the space. |
80 | CollectedHeap::fill_with_object(dummy, free_word_size); |
81 | alloc_region->set_pre_dummy_top(dummy); |
82 | result += free_word_size * HeapWordSize; |
83 | break; |
84 | } |
85 | |
86 | free_word_size = alloc_region->free() / HeapWordSize; |
87 | // It's also possible that someone else beats us to the |
88 | // allocation and they fill up the region. In that case, we can |
89 | // just get out of the loop. |
90 | } |
91 | result += alloc_region->free(); |
92 | |
93 | assert(alloc_region->free() / HeapWordSize < min_word_size_to_fill, |
94 | "post-condition" ); |
95 | return result; |
96 | } |
97 | |
98 | size_t G1AllocRegion::retire_internal(HeapRegion* alloc_region, bool fill_up) { |
99 | // We never have to check whether the active region is empty or not, |
100 | // and potentially free it if it is, given that it's guaranteed that |
101 | // it will never be empty. |
102 | size_t waste = 0; |
103 | assert_alloc_region(!alloc_region->is_empty(), |
104 | "the alloc region should never be empty" ); |
105 | |
106 | if (fill_up) { |
107 | waste = fill_up_remaining_space(alloc_region); |
108 | } |
109 | |
110 | assert_alloc_region(alloc_region->used() >= _used_bytes_before, "invariant" ); |
111 | size_t allocated_bytes = alloc_region->used() - _used_bytes_before; |
112 | retire_region(alloc_region, allocated_bytes); |
113 | _used_bytes_before = 0; |
114 | |
115 | return waste; |
116 | } |
117 | |
118 | size_t G1AllocRegion::retire(bool fill_up) { |
119 | assert_alloc_region(_alloc_region != NULL, "not initialized properly" ); |
120 | |
121 | size_t waste = 0; |
122 | |
123 | trace("retiring" ); |
124 | HeapRegion* alloc_region = _alloc_region; |
125 | if (alloc_region != _dummy_region) { |
126 | waste = retire_internal(alloc_region, fill_up); |
127 | reset_alloc_region(); |
128 | } |
129 | trace("retired" ); |
130 | |
131 | return waste; |
132 | } |
133 | |
134 | HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size, |
135 | bool force) { |
136 | assert_alloc_region(_alloc_region == _dummy_region, "pre-condition" ); |
137 | assert_alloc_region(_used_bytes_before == 0, "pre-condition" ); |
138 | |
139 | trace("attempting region allocation" ); |
140 | HeapRegion* new_alloc_region = allocate_new_region(word_size, force); |
141 | if (new_alloc_region != NULL) { |
142 | new_alloc_region->reset_pre_dummy_top(); |
143 | // Need to do this before the allocation |
144 | _used_bytes_before = new_alloc_region->used(); |
145 | HeapWord* result = allocate(new_alloc_region, word_size); |
146 | assert_alloc_region(result != NULL, "the allocation should succeeded" ); |
147 | |
148 | OrderAccess::storestore(); |
149 | // Note that we first perform the allocation and then we store the |
150 | // region in _alloc_region. This is the reason why an active region |
151 | // can never be empty. |
152 | update_alloc_region(new_alloc_region); |
153 | trace("region allocation successful" ); |
154 | return result; |
155 | } else { |
156 | trace("region allocation failed" ); |
157 | return NULL; |
158 | } |
159 | ShouldNotReachHere(); |
160 | } |
161 | |
162 | void G1AllocRegion::init() { |
163 | trace("initializing" ); |
164 | assert_alloc_region(_alloc_region == NULL && _used_bytes_before == 0, "pre-condition" ); |
165 | assert_alloc_region(_dummy_region != NULL, "should have been set" ); |
166 | _alloc_region = _dummy_region; |
167 | _count = 0; |
168 | trace("initialized" ); |
169 | } |
170 | |
171 | void G1AllocRegion::set(HeapRegion* alloc_region) { |
172 | trace("setting" ); |
173 | // We explicitly check that the region is not empty to make sure we |
174 | // maintain the "the alloc region cannot be empty" invariant. |
175 | assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition" ); |
176 | assert_alloc_region(_alloc_region == _dummy_region && |
177 | _used_bytes_before == 0 && _count == 0, |
178 | "pre-condition" ); |
179 | |
180 | _used_bytes_before = alloc_region->used(); |
181 | _alloc_region = alloc_region; |
182 | _count += 1; |
183 | trace("set" ); |
184 | } |
185 | |
186 | void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) { |
187 | trace("update" ); |
188 | // We explicitly check that the region is not empty to make sure we |
189 | // maintain the "the alloc region cannot be empty" invariant. |
190 | assert_alloc_region(alloc_region != NULL && !alloc_region->is_empty(), "pre-condition" ); |
191 | |
192 | _alloc_region = alloc_region; |
193 | _count += 1; |
194 | trace("updated" ); |
195 | } |
196 | |
197 | HeapRegion* G1AllocRegion::release() { |
198 | trace("releasing" ); |
199 | HeapRegion* alloc_region = _alloc_region; |
200 | retire(false /* fill_up */); |
201 | assert_alloc_region(_alloc_region == _dummy_region, "post-condition of retire()" ); |
202 | _alloc_region = NULL; |
203 | trace("released" ); |
204 | return (alloc_region == _dummy_region) ? NULL : alloc_region; |
205 | } |
206 | |
207 | #ifndef PRODUCT |
208 | void G1AllocRegion::trace(const char* str, size_t min_word_size, size_t desired_word_size, size_t actual_word_size, HeapWord* result) { |
209 | // All the calls to trace that set either just the size or the size |
210 | // and the result are considered part of detailed tracing and are |
211 | // skipped during other tracing. |
212 | |
213 | Log(gc, alloc, region) log; |
214 | |
215 | if (!log.is_debug()) { |
216 | return; |
217 | } |
218 | |
219 | bool detailed_info = log.is_trace(); |
220 | |
221 | if ((actual_word_size == 0 && result == NULL) || detailed_info) { |
222 | ResourceMark rm; |
223 | LogStream ls_trace(log.trace()); |
224 | LogStream ls_debug(log.debug()); |
225 | outputStream* out = detailed_info ? &ls_trace : &ls_debug; |
226 | |
227 | out->print("%s: %u " , _name, _count); |
228 | |
229 | if (_alloc_region == NULL) { |
230 | out->print("NULL" ); |
231 | } else if (_alloc_region == _dummy_region) { |
232 | out->print("DUMMY" ); |
233 | } else { |
234 | out->print(HR_FORMAT, HR_FORMAT_PARAMS(_alloc_region)); |
235 | } |
236 | |
237 | out->print(" : %s" , str); |
238 | |
239 | if (detailed_info) { |
240 | if (result != NULL) { |
241 | out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT " actual " SIZE_FORMAT " " PTR_FORMAT, |
242 | min_word_size, desired_word_size, actual_word_size, p2i(result)); |
243 | } else if (min_word_size != 0) { |
244 | out->print(" min " SIZE_FORMAT " desired " SIZE_FORMAT, min_word_size, desired_word_size); |
245 | } |
246 | } |
247 | out->cr(); |
248 | } |
249 | } |
250 | #endif // PRODUCT |
251 | |
252 | G1AllocRegion::G1AllocRegion(const char* name, |
253 | bool bot_updates) |
254 | : _alloc_region(NULL), |
255 | _count(0), |
256 | _used_bytes_before(0), |
257 | _bot_updates(bot_updates), |
258 | _name(name) |
259 | { } |
260 | |
261 | HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, |
262 | bool force) { |
263 | return _g1h->new_mutator_alloc_region(word_size, force); |
264 | } |
265 | |
266 | void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, |
267 | size_t allocated_bytes) { |
268 | _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); |
269 | } |
270 | |
271 | void MutatorAllocRegion::init() { |
272 | assert(_retained_alloc_region == NULL, "Pre-condition" ); |
273 | G1AllocRegion::init(); |
274 | _wasted_bytes = 0; |
275 | } |
276 | |
277 | bool MutatorAllocRegion::should_retain(HeapRegion* region) { |
278 | size_t free_bytes = region->free(); |
279 | if (free_bytes < MinTLABSize) { |
280 | return false; |
281 | } |
282 | |
283 | if (_retained_alloc_region != NULL && |
284 | free_bytes < _retained_alloc_region->free()) { |
285 | return false; |
286 | } |
287 | |
288 | return true; |
289 | } |
290 | |
291 | size_t MutatorAllocRegion::retire(bool fill_up) { |
292 | size_t waste = 0; |
293 | trace("retiring" ); |
294 | HeapRegion* current_region = get(); |
295 | if (current_region != NULL) { |
296 | // Retain the current region if it fits a TLAB and has more |
297 | // free than the currently retained region. |
298 | if (should_retain(current_region)) { |
299 | trace("mutator retained" ); |
300 | if (_retained_alloc_region != NULL) { |
301 | waste = retire_internal(_retained_alloc_region, true); |
302 | } |
303 | _retained_alloc_region = current_region; |
304 | } else { |
305 | waste = retire_internal(current_region, fill_up); |
306 | } |
307 | reset_alloc_region(); |
308 | } |
309 | |
310 | _wasted_bytes += waste; |
311 | trace("retired" ); |
312 | return waste; |
313 | } |
314 | |
315 | size_t MutatorAllocRegion::used_in_alloc_regions() { |
316 | size_t used = 0; |
317 | HeapRegion* hr = get(); |
318 | if (hr != NULL) { |
319 | used += hr->used(); |
320 | } |
321 | |
322 | hr = _retained_alloc_region; |
323 | if (hr != NULL) { |
324 | used += hr->used(); |
325 | } |
326 | return used; |
327 | } |
328 | |
329 | HeapRegion* MutatorAllocRegion::release() { |
330 | HeapRegion* ret = G1AllocRegion::release(); |
331 | |
332 | // The retained alloc region must be retired and this must be |
333 | // done after the above call to release the mutator alloc region, |
334 | // since it might update the _retained_alloc_region member. |
335 | if (_retained_alloc_region != NULL) { |
336 | _wasted_bytes += retire_internal(_retained_alloc_region, false); |
337 | _retained_alloc_region = NULL; |
338 | } |
339 | log_debug(gc, alloc, region)("Mutator Allocation stats, regions: %u, wasted size: " SIZE_FORMAT "%s (%4.1f%%)" , |
340 | count(), |
341 | byte_size_in_proper_unit(_wasted_bytes), |
342 | proper_unit_for_byte_size(_wasted_bytes), |
343 | percent_of(_wasted_bytes, count() * HeapRegion::GrainBytes)); |
344 | return ret; |
345 | } |
346 | |
347 | HeapRegion* G1GCAllocRegion::allocate_new_region(size_t word_size, |
348 | bool force) { |
349 | assert(!force, "not supported for GC alloc regions" ); |
350 | return _g1h->new_gc_alloc_region(word_size, _purpose); |
351 | } |
352 | |
353 | void G1GCAllocRegion::retire_region(HeapRegion* alloc_region, |
354 | size_t allocated_bytes) { |
355 | _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, _purpose); |
356 | } |
357 | |
358 | size_t G1GCAllocRegion::retire(bool fill_up) { |
359 | HeapRegion* retired = get(); |
360 | size_t end_waste = G1AllocRegion::retire(fill_up); |
361 | // Do not count retirement of the dummy allocation region. |
362 | if (retired != NULL) { |
363 | _stats->add_region_end_waste(end_waste / HeapWordSize); |
364 | } |
365 | return end_waste; |
366 | } |
367 | |
368 | HeapRegion* OldGCAllocRegion::release() { |
369 | HeapRegion* cur = get(); |
370 | if (cur != NULL) { |
371 | // Determine how far we are from the next card boundary. If it is smaller than |
372 | // the minimum object size we can allocate into, expand into the next card. |
373 | HeapWord* top = cur->top(); |
374 | HeapWord* aligned_top = align_up(top, BOTConstants::N_bytes); |
375 | |
376 | size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); |
377 | |
378 | if (to_allocate_words != 0) { |
379 | // We are not at a card boundary. Fill up, possibly into the next, taking the |
380 | // end of the region and the minimum object size into account. |
381 | to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize), |
382 | MAX2(to_allocate_words, G1CollectedHeap::min_fill_size())); |
383 | |
384 | // Skip allocation if there is not enough space to allocate even the smallest |
385 | // possible object. In this case this region will not be retained, so the |
386 | // original problem cannot occur. |
387 | if (to_allocate_words >= G1CollectedHeap::min_fill_size()) { |
388 | HeapWord* dummy = attempt_allocation(to_allocate_words); |
389 | CollectedHeap::fill_with_object(dummy, to_allocate_words); |
390 | } |
391 | } |
392 | } |
393 | return G1AllocRegion::release(); |
394 | } |
395 | |