| 1 | /* |
| 2 | * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "gc/g1/g1Allocator.inline.hpp" |
| 27 | #include "gc/g1/g1AllocRegion.inline.hpp" |
| 28 | #include "gc/g1/g1EvacStats.inline.hpp" |
| 29 | #include "gc/g1/g1EvacuationInfo.hpp" |
| 30 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
| 31 | #include "gc/g1/g1Policy.hpp" |
| 32 | #include "gc/g1/heapRegion.inline.hpp" |
| 33 | #include "gc/g1/heapRegionSet.inline.hpp" |
| 34 | #include "gc/g1/heapRegionType.hpp" |
| 35 | #include "utilities/align.hpp" |
| 36 | |
| 37 | G1Allocator::G1Allocator(G1CollectedHeap* heap) : |
| 38 | _g1h(heap), |
| 39 | _survivor_is_full(false), |
| 40 | _old_is_full(false), |
| 41 | _mutator_alloc_region(), |
| 42 | _survivor_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Young)), |
| 43 | _old_gc_alloc_region(heap->alloc_buffer_stats(G1HeapRegionAttr::Old)), |
| 44 | _retained_old_gc_alloc_region(NULL) { |
| 45 | } |
| 46 | |
| 47 | void G1Allocator::init_mutator_alloc_region() { |
| 48 | assert(_mutator_alloc_region.get() == NULL, "pre-condition" ); |
| 49 | _mutator_alloc_region.init(); |
| 50 | } |
| 51 | |
| 52 | void G1Allocator::release_mutator_alloc_region() { |
| 53 | _mutator_alloc_region.release(); |
| 54 | assert(_mutator_alloc_region.get() == NULL, "post-condition" ); |
| 55 | } |
| 56 | |
| 57 | bool G1Allocator::is_retained_old_region(HeapRegion* hr) { |
| 58 | return _retained_old_gc_alloc_region == hr; |
| 59 | } |
| 60 | |
| 61 | void G1Allocator::reuse_retained_old_region(G1EvacuationInfo& evacuation_info, |
| 62 | OldGCAllocRegion* old, |
| 63 | HeapRegion** retained_old) { |
| 64 | HeapRegion* retained_region = *retained_old; |
| 65 | *retained_old = NULL; |
| 66 | assert(retained_region == NULL || !retained_region->is_archive(), |
| 67 | "Archive region should not be alloc region (index %u)" , retained_region->hrm_index()); |
| 68 | |
| 69 | // We will discard the current GC alloc region if: |
| 70 | // a) it's in the collection set (it can happen!), |
| 71 | // b) it's already full (no point in using it), |
| 72 | // c) it's empty (this means that it was emptied during |
| 73 | // a cleanup and it should be on the free list now), or |
| 74 | // d) it's humongous (this means that it was emptied |
| 75 | // during a cleanup and was added to the free list, but |
| 76 | // has been subsequently used to allocate a humongous |
| 77 | // object that may be less than the region size). |
| 78 | if (retained_region != NULL && |
| 79 | !retained_region->in_collection_set() && |
| 80 | !(retained_region->top() == retained_region->end()) && |
| 81 | !retained_region->is_empty() && |
| 82 | !retained_region->is_humongous()) { |
| 83 | // The retained region was added to the old region set when it was |
| 84 | // retired. We have to remove it now, since we don't allow regions |
| 85 | // we allocate to in the region sets. We'll re-add it later, when |
| 86 | // it's retired again. |
| 87 | _g1h->old_set_remove(retained_region); |
| 88 | old->set(retained_region); |
| 89 | _g1h->hr_printer()->reuse(retained_region); |
| 90 | evacuation_info.set_alloc_regions_used_before(retained_region->used()); |
| 91 | } |
| 92 | } |
| 93 | |
| 94 | void G1Allocator::init_gc_alloc_regions(G1EvacuationInfo& evacuation_info) { |
| 95 | assert_at_safepoint_on_vm_thread(); |
| 96 | |
| 97 | _survivor_is_full = false; |
| 98 | _old_is_full = false; |
| 99 | |
| 100 | _survivor_gc_alloc_region.init(); |
| 101 | _old_gc_alloc_region.init(); |
| 102 | reuse_retained_old_region(evacuation_info, |
| 103 | &_old_gc_alloc_region, |
| 104 | &_retained_old_gc_alloc_region); |
| 105 | } |
| 106 | |
| 107 | void G1Allocator::release_gc_alloc_regions(G1EvacuationInfo& evacuation_info) { |
| 108 | evacuation_info.set_allocation_regions(survivor_gc_alloc_region()->count() + |
| 109 | old_gc_alloc_region()->count()); |
| 110 | survivor_gc_alloc_region()->release(); |
| 111 | // If we have an old GC alloc region to release, we'll save it in |
| 112 | // _retained_old_gc_alloc_region. If we don't |
| 113 | // _retained_old_gc_alloc_region will become NULL. This is what we |
| 114 | // want either way so no reason to check explicitly for either |
| 115 | // condition. |
| 116 | _retained_old_gc_alloc_region = old_gc_alloc_region()->release(); |
| 117 | } |
| 118 | |
| 119 | void G1Allocator::abandon_gc_alloc_regions() { |
| 120 | assert(survivor_gc_alloc_region()->get() == NULL, "pre-condition" ); |
| 121 | assert(old_gc_alloc_region()->get() == NULL, "pre-condition" ); |
| 122 | _retained_old_gc_alloc_region = NULL; |
| 123 | } |
| 124 | |
| 125 | bool G1Allocator::survivor_is_full() const { |
| 126 | return _survivor_is_full; |
| 127 | } |
| 128 | |
| 129 | bool G1Allocator::old_is_full() const { |
| 130 | return _old_is_full; |
| 131 | } |
| 132 | |
| 133 | void G1Allocator::set_survivor_full() { |
| 134 | _survivor_is_full = true; |
| 135 | } |
| 136 | |
| 137 | void G1Allocator::set_old_full() { |
| 138 | _old_is_full = true; |
| 139 | } |
| 140 | |
| 141 | size_t G1Allocator::unsafe_max_tlab_alloc() { |
| 142 | // Return the remaining space in the cur alloc region, but not less than |
| 143 | // the min TLAB size. |
| 144 | |
| 145 | // Also, this value can be at most the humongous object threshold, |
| 146 | // since we can't allow tlabs to grow big enough to accommodate |
| 147 | // humongous objects. |
| 148 | |
| 149 | HeapRegion* hr = mutator_alloc_region()->get(); |
| 150 | size_t max_tlab = _g1h->max_tlab_size() * wordSize; |
| 151 | if (hr == NULL) { |
| 152 | return max_tlab; |
| 153 | } else { |
| 154 | return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); |
| 155 | } |
| 156 | } |
| 157 | |
| 158 | size_t G1Allocator::used_in_alloc_regions() { |
| 159 | assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf." ); |
| 160 | return mutator_alloc_region()->used_in_alloc_regions(); |
| 161 | } |
| 162 | |
| 163 | |
| 164 | HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest, |
| 165 | size_t word_size) { |
| 166 | size_t temp = 0; |
| 167 | HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp); |
| 168 | assert(result == NULL || temp == word_size, |
| 169 | "Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, |
| 170 | word_size, temp, p2i(result)); |
| 171 | return result; |
| 172 | } |
| 173 | |
| 174 | HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest, |
| 175 | size_t min_word_size, |
| 176 | size_t desired_word_size, |
| 177 | size_t* actual_word_size) { |
| 178 | switch (dest.type()) { |
| 179 | case G1HeapRegionAttr::Young: |
| 180 | return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size); |
| 181 | case G1HeapRegionAttr::Old: |
| 182 | return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size); |
| 183 | default: |
| 184 | ShouldNotReachHere(); |
| 185 | return NULL; // Keep some compilers happy |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size, |
| 190 | size_t desired_word_size, |
| 191 | size_t* actual_word_size) { |
| 192 | assert(!_g1h->is_humongous(desired_word_size), |
| 193 | "we should not be seeing humongous-size allocations in this path" ); |
| 194 | |
| 195 | HeapWord* result = survivor_gc_alloc_region()->attempt_allocation(min_word_size, |
| 196 | desired_word_size, |
| 197 | actual_word_size); |
| 198 | if (result == NULL && !survivor_is_full()) { |
| 199 | MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
| 200 | result = survivor_gc_alloc_region()->attempt_allocation_locked(min_word_size, |
| 201 | desired_word_size, |
| 202 | actual_word_size); |
| 203 | if (result == NULL) { |
| 204 | set_survivor_full(); |
| 205 | } |
| 206 | } |
| 207 | if (result != NULL) { |
| 208 | _g1h->dirty_young_block(result, *actual_word_size); |
| 209 | } |
| 210 | return result; |
| 211 | } |
| 212 | |
| 213 | HeapWord* G1Allocator::old_attempt_allocation(size_t min_word_size, |
| 214 | size_t desired_word_size, |
| 215 | size_t* actual_word_size) { |
| 216 | assert(!_g1h->is_humongous(desired_word_size), |
| 217 | "we should not be seeing humongous-size allocations in this path" ); |
| 218 | |
| 219 | HeapWord* result = old_gc_alloc_region()->attempt_allocation(min_word_size, |
| 220 | desired_word_size, |
| 221 | actual_word_size); |
| 222 | if (result == NULL && !old_is_full()) { |
| 223 | MutexLocker x(FreeList_lock, Mutex::_no_safepoint_check_flag); |
| 224 | result = old_gc_alloc_region()->attempt_allocation_locked(min_word_size, |
| 225 | desired_word_size, |
| 226 | actual_word_size); |
| 227 | if (result == NULL) { |
| 228 | set_old_full(); |
| 229 | } |
| 230 | } |
| 231 | return result; |
| 232 | } |
| 233 | |
| 234 | uint G1PLABAllocator::calc_survivor_alignment_bytes() { |
| 235 | assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity" ); |
| 236 | if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) { |
| 237 | // No need to align objects in the survivors differently, return 0 |
| 238 | // which means "survivor alignment is not used". |
| 239 | return 0; |
| 240 | } else { |
| 241 | assert(SurvivorAlignmentInBytes > 0, "sanity" ); |
| 242 | return SurvivorAlignmentInBytes; |
| 243 | } |
| 244 | } |
| 245 | |
| 246 | G1PLABAllocator::G1PLABAllocator(G1Allocator* allocator) : |
| 247 | _g1h(G1CollectedHeap::heap()), |
| 248 | _allocator(allocator), |
| 249 | _surviving_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Young)), |
| 250 | _tenured_alloc_buffer(_g1h->desired_plab_sz(G1HeapRegionAttr::Old)), |
| 251 | _survivor_alignment_bytes(calc_survivor_alignment_bytes()) { |
| 252 | for (uint state = 0; state < G1HeapRegionAttr::Num; state++) { |
| 253 | _direct_allocated[state] = 0; |
| 254 | _alloc_buffers[state] = NULL; |
| 255 | } |
| 256 | _alloc_buffers[G1HeapRegionAttr::Young] = &_surviving_alloc_buffer; |
| 257 | _alloc_buffers[G1HeapRegionAttr::Old] = &_tenured_alloc_buffer; |
| 258 | } |
| 259 | |
| 260 | bool G1PLABAllocator::may_throw_away_buffer(size_t const allocation_word_sz, size_t const buffer_size) const { |
| 261 | return (allocation_word_sz * 100 < buffer_size * ParallelGCBufferWastePct); |
| 262 | } |
| 263 | |
| 264 | HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest, |
| 265 | size_t word_sz, |
| 266 | bool* plab_refill_failed) { |
| 267 | size_t plab_word_size = _g1h->desired_plab_sz(dest); |
| 268 | size_t required_in_plab = PLAB::size_required_for_allocation(word_sz); |
| 269 | |
| 270 | // Only get a new PLAB if the allocation fits and it would not waste more than |
| 271 | // ParallelGCBufferWastePct in the existing buffer. |
| 272 | if ((required_in_plab <= plab_word_size) && |
| 273 | may_throw_away_buffer(required_in_plab, plab_word_size)) { |
| 274 | |
| 275 | PLAB* alloc_buf = alloc_buffer(dest); |
| 276 | alloc_buf->retire(); |
| 277 | |
| 278 | size_t actual_plab_size = 0; |
| 279 | HeapWord* buf = _allocator->par_allocate_during_gc(dest, |
| 280 | required_in_plab, |
| 281 | plab_word_size, |
| 282 | &actual_plab_size); |
| 283 | |
| 284 | assert(buf == NULL || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)), |
| 285 | "Requested at minimum " SIZE_FORMAT ", desired " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT, |
| 286 | required_in_plab, plab_word_size, actual_plab_size, p2i(buf)); |
| 287 | |
| 288 | if (buf != NULL) { |
| 289 | alloc_buf->set_buf(buf, actual_plab_size); |
| 290 | |
| 291 | HeapWord* const obj = alloc_buf->allocate(word_sz); |
| 292 | assert(obj != NULL, "PLAB should have been big enough, tried to allocate " |
| 293 | SIZE_FORMAT " requiring " SIZE_FORMAT " PLAB size " SIZE_FORMAT, |
| 294 | word_sz, required_in_plab, plab_word_size); |
| 295 | return obj; |
| 296 | } |
| 297 | // Otherwise. |
| 298 | *plab_refill_failed = true; |
| 299 | } |
| 300 | // Try direct allocation. |
| 301 | HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz); |
| 302 | if (result != NULL) { |
| 303 | _direct_allocated[dest.type()] += word_sz; |
| 304 | } |
| 305 | return result; |
| 306 | } |
| 307 | |
| 308 | void G1PLABAllocator::undo_allocation(G1HeapRegionAttr dest, HeapWord* obj, size_t word_sz) { |
| 309 | alloc_buffer(dest)->undo_allocation(obj, word_sz); |
| 310 | } |
| 311 | |
| 312 | void G1PLABAllocator::flush_and_retire_stats() { |
| 313 | for (uint state = 0; state < G1HeapRegionAttr::Num; state++) { |
| 314 | PLAB* const buf = _alloc_buffers[state]; |
| 315 | if (buf != NULL) { |
| 316 | G1EvacStats* stats = _g1h->alloc_buffer_stats(state); |
| 317 | buf->flush_and_retire_stats(stats); |
| 318 | stats->add_direct_allocated(_direct_allocated[state]); |
| 319 | _direct_allocated[state] = 0; |
| 320 | } |
| 321 | } |
| 322 | } |
| 323 | |
| 324 | size_t G1PLABAllocator::waste() const { |
| 325 | size_t result = 0; |
| 326 | for (uint state = 0; state < G1HeapRegionAttr::Num; state++) { |
| 327 | PLAB * const buf = _alloc_buffers[state]; |
| 328 | if (buf != NULL) { |
| 329 | result += buf->waste(); |
| 330 | } |
| 331 | } |
| 332 | return result; |
| 333 | } |
| 334 | |
| 335 | size_t G1PLABAllocator::undo_waste() const { |
| 336 | size_t result = 0; |
| 337 | for (uint state = 0; state < G1HeapRegionAttr::Num; state++) { |
| 338 | PLAB * const buf = _alloc_buffers[state]; |
| 339 | if (buf != NULL) { |
| 340 | result += buf->undo_waste(); |
| 341 | } |
| 342 | } |
| 343 | return result; |
| 344 | } |
| 345 | |
| 346 | bool G1ArchiveAllocator::_archive_check_enabled = false; |
| 347 | G1ArchiveRegionMap G1ArchiveAllocator::_closed_archive_region_map; |
| 348 | G1ArchiveRegionMap G1ArchiveAllocator::_open_archive_region_map; |
| 349 | |
| 350 | G1ArchiveAllocator* G1ArchiveAllocator::create_allocator(G1CollectedHeap* g1h, bool open) { |
| 351 | // Create the archive allocator, and also enable archive object checking |
| 352 | // in mark-sweep, since we will be creating archive regions. |
| 353 | G1ArchiveAllocator* result = new G1ArchiveAllocator(g1h, open); |
| 354 | enable_archive_object_check(); |
| 355 | return result; |
| 356 | } |
| 357 | |
| 358 | bool G1ArchiveAllocator::alloc_new_region() { |
| 359 | // Allocate the highest free region in the reserved heap, |
| 360 | // and add it to our list of allocated regions. It is marked |
| 361 | // archive and added to the old set. |
| 362 | HeapRegion* hr = _g1h->alloc_highest_free_region(); |
| 363 | if (hr == NULL) { |
| 364 | return false; |
| 365 | } |
| 366 | assert(hr->is_empty(), "expected empty region (index %u)" , hr->hrm_index()); |
| 367 | if (_open) { |
| 368 | hr->set_open_archive(); |
| 369 | } else { |
| 370 | hr->set_closed_archive(); |
| 371 | } |
| 372 | _g1h->policy()->remset_tracker()->update_at_allocate(hr); |
| 373 | _g1h->archive_set_add(hr); |
| 374 | _g1h->hr_printer()->alloc(hr); |
| 375 | _allocated_regions.append(hr); |
| 376 | _allocation_region = hr; |
| 377 | |
| 378 | // Set up _bottom and _max to begin allocating in the lowest |
| 379 | // min_region_size'd chunk of the allocated G1 region. |
| 380 | _bottom = hr->bottom(); |
| 381 | _max = _bottom + HeapRegion::min_region_size_in_words(); |
| 382 | |
| 383 | // Tell mark-sweep that objects in this region are not to be marked. |
| 384 | set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), _open); |
| 385 | |
| 386 | // Since we've modified the old set, call update_sizes. |
| 387 | _g1h->g1mm()->update_sizes(); |
| 388 | return true; |
| 389 | } |
| 390 | |
| 391 | HeapWord* G1ArchiveAllocator::archive_mem_allocate(size_t word_size) { |
| 392 | assert(word_size != 0, "size must not be zero" ); |
| 393 | if (_allocation_region == NULL) { |
| 394 | if (!alloc_new_region()) { |
| 395 | return NULL; |
| 396 | } |
| 397 | } |
| 398 | HeapWord* old_top = _allocation_region->top(); |
| 399 | assert(_bottom >= _allocation_region->bottom(), |
| 400 | "inconsistent allocation state: " PTR_FORMAT " < " PTR_FORMAT, |
| 401 | p2i(_bottom), p2i(_allocation_region->bottom())); |
| 402 | assert(_max <= _allocation_region->end(), |
| 403 | "inconsistent allocation state: " PTR_FORMAT " > " PTR_FORMAT, |
| 404 | p2i(_max), p2i(_allocation_region->end())); |
| 405 | assert(_bottom <= old_top && old_top <= _max, |
| 406 | "inconsistent allocation state: expected " |
| 407 | PTR_FORMAT " <= " PTR_FORMAT " <= " PTR_FORMAT, |
| 408 | p2i(_bottom), p2i(old_top), p2i(_max)); |
| 409 | |
| 410 | // Allocate the next word_size words in the current allocation chunk. |
| 411 | // If allocation would cross the _max boundary, insert a filler and begin |
| 412 | // at the base of the next min_region_size'd chunk. Also advance to the next |
| 413 | // chunk if we don't yet cross the boundary, but the remainder would be too |
| 414 | // small to fill. |
| 415 | HeapWord* new_top = old_top + word_size; |
| 416 | size_t remainder = pointer_delta(_max, new_top); |
| 417 | if ((new_top > _max) || |
| 418 | ((new_top < _max) && (remainder < CollectedHeap::min_fill_size()))) { |
| 419 | if (old_top != _max) { |
| 420 | size_t fill_size = pointer_delta(_max, old_top); |
| 421 | CollectedHeap::fill_with_object(old_top, fill_size); |
| 422 | _summary_bytes_used += fill_size * HeapWordSize; |
| 423 | } |
| 424 | _allocation_region->set_top(_max); |
| 425 | old_top = _bottom = _max; |
| 426 | |
| 427 | // Check if we've just used up the last min_region_size'd chunk |
| 428 | // in the current region, and if so, allocate a new one. |
| 429 | if (_bottom != _allocation_region->end()) { |
| 430 | _max = _bottom + HeapRegion::min_region_size_in_words(); |
| 431 | } else { |
| 432 | if (!alloc_new_region()) { |
| 433 | return NULL; |
| 434 | } |
| 435 | old_top = _allocation_region->bottom(); |
| 436 | } |
| 437 | } |
| 438 | _allocation_region->set_top(old_top + word_size); |
| 439 | _summary_bytes_used += word_size * HeapWordSize; |
| 440 | |
| 441 | return old_top; |
| 442 | } |
| 443 | |
| 444 | void G1ArchiveAllocator::complete_archive(GrowableArray<MemRegion>* ranges, |
| 445 | size_t end_alignment_in_bytes) { |
| 446 | assert((end_alignment_in_bytes >> LogHeapWordSize) < HeapRegion::min_region_size_in_words(), |
| 447 | "alignment " SIZE_FORMAT " too large" , end_alignment_in_bytes); |
| 448 | assert(is_aligned(end_alignment_in_bytes, HeapWordSize), |
| 449 | "alignment " SIZE_FORMAT " is not HeapWord (%u) aligned" , end_alignment_in_bytes, HeapWordSize); |
| 450 | |
| 451 | // If we've allocated nothing, simply return. |
| 452 | if (_allocation_region == NULL) { |
| 453 | return; |
| 454 | } |
| 455 | |
| 456 | // If an end alignment was requested, insert filler objects. |
| 457 | if (end_alignment_in_bytes != 0) { |
| 458 | HeapWord* currtop = _allocation_region->top(); |
| 459 | HeapWord* newtop = align_up(currtop, end_alignment_in_bytes); |
| 460 | size_t fill_size = pointer_delta(newtop, currtop); |
| 461 | if (fill_size != 0) { |
| 462 | if (fill_size < CollectedHeap::min_fill_size()) { |
| 463 | // If the required fill is smaller than we can represent, |
| 464 | // bump up to the next aligned address. We know we won't exceed the current |
| 465 | // region boundary because the max supported alignment is smaller than the min |
| 466 | // region size, and because the allocation code never leaves space smaller than |
| 467 | // the min_fill_size at the top of the current allocation region. |
| 468 | newtop = align_up(currtop + CollectedHeap::min_fill_size(), |
| 469 | end_alignment_in_bytes); |
| 470 | fill_size = pointer_delta(newtop, currtop); |
| 471 | } |
| 472 | HeapWord* fill = archive_mem_allocate(fill_size); |
| 473 | CollectedHeap::fill_with_objects(fill, fill_size); |
| 474 | } |
| 475 | } |
| 476 | |
| 477 | // Loop through the allocated regions, and create MemRegions summarizing |
| 478 | // the allocated address range, combining contiguous ranges. Add the |
| 479 | // MemRegions to the GrowableArray provided by the caller. |
| 480 | int index = _allocated_regions.length() - 1; |
| 481 | assert(_allocated_regions.at(index) == _allocation_region, |
| 482 | "expected region %u at end of array, found %u" , |
| 483 | _allocation_region->hrm_index(), _allocated_regions.at(index)->hrm_index()); |
| 484 | HeapWord* base_address = _allocation_region->bottom(); |
| 485 | HeapWord* top = base_address; |
| 486 | |
| 487 | while (index >= 0) { |
| 488 | HeapRegion* next = _allocated_regions.at(index); |
| 489 | HeapWord* new_base = next->bottom(); |
| 490 | HeapWord* new_top = next->top(); |
| 491 | if (new_base != top) { |
| 492 | ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); |
| 493 | base_address = new_base; |
| 494 | } |
| 495 | top = new_top; |
| 496 | index = index - 1; |
| 497 | } |
| 498 | |
| 499 | assert(top != base_address, "zero-sized range, address " PTR_FORMAT, p2i(base_address)); |
| 500 | ranges->append(MemRegion(base_address, pointer_delta(top, base_address))); |
| 501 | _allocated_regions.clear(); |
| 502 | _allocation_region = NULL; |
| 503 | }; |
| 504 | |