| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_GC_G1_HEAPREGION_INLINE_HPP |
| 26 | #define SHARE_GC_G1_HEAPREGION_INLINE_HPP |
| 27 | |
| 28 | #include "gc/g1/g1BlockOffsetTable.inline.hpp" |
| 29 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
| 30 | #include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp" |
| 31 | #include "gc/g1/heapRegion.hpp" |
| 32 | #include "gc/shared/space.hpp" |
| 33 | #include "oops/oop.inline.hpp" |
| 34 | #include "runtime/atomic.hpp" |
| 35 | #include "runtime/prefetch.inline.hpp" |
| 36 | #include "utilities/align.hpp" |
| 37 | |
| 38 | inline HeapWord* G1ContiguousSpace::allocate_impl(size_t min_word_size, |
| 39 | size_t desired_word_size, |
| 40 | size_t* actual_size) { |
| 41 | HeapWord* obj = top(); |
| 42 | size_t available = pointer_delta(end(), obj); |
| 43 | size_t want_to_allocate = MIN2(available, desired_word_size); |
| 44 | if (want_to_allocate >= min_word_size) { |
| 45 | HeapWord* new_top = obj + want_to_allocate; |
| 46 | set_top(new_top); |
| 47 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment" ); |
| 48 | *actual_size = want_to_allocate; |
| 49 | return obj; |
| 50 | } else { |
| 51 | return NULL; |
| 52 | } |
| 53 | } |
| 54 | |
| 55 | inline HeapWord* G1ContiguousSpace::par_allocate_impl(size_t min_word_size, |
| 56 | size_t desired_word_size, |
| 57 | size_t* actual_size) { |
| 58 | do { |
| 59 | HeapWord* obj = top(); |
| 60 | size_t available = pointer_delta(end(), obj); |
| 61 | size_t want_to_allocate = MIN2(available, desired_word_size); |
| 62 | if (want_to_allocate >= min_word_size) { |
| 63 | HeapWord* new_top = obj + want_to_allocate; |
| 64 | HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); |
| 65 | // result can be one of two: |
| 66 | // the old top value: the exchange succeeded |
| 67 | // otherwise: the new value of the top is returned. |
| 68 | if (result == obj) { |
| 69 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment" ); |
| 70 | *actual_size = want_to_allocate; |
| 71 | return obj; |
| 72 | } |
| 73 | } else { |
| 74 | return NULL; |
| 75 | } |
| 76 | } while (true); |
| 77 | } |
| 78 | |
| 79 | inline HeapWord* G1ContiguousSpace::allocate(size_t min_word_size, |
| 80 | size_t desired_word_size, |
| 81 | size_t* actual_size) { |
| 82 | HeapWord* res = allocate_impl(min_word_size, desired_word_size, actual_size); |
| 83 | if (res != NULL) { |
| 84 | _bot_part.alloc_block(res, *actual_size); |
| 85 | } |
| 86 | return res; |
| 87 | } |
| 88 | |
| 89 | inline HeapWord* G1ContiguousSpace::allocate(size_t word_size) { |
| 90 | size_t temp; |
| 91 | return allocate(word_size, word_size, &temp); |
| 92 | } |
| 93 | |
| 94 | inline HeapWord* G1ContiguousSpace::par_allocate(size_t word_size) { |
| 95 | size_t temp; |
| 96 | return par_allocate(word_size, word_size, &temp); |
| 97 | } |
| 98 | |
| 99 | // Because of the requirement of keeping "_offsets" up to date with the |
| 100 | // allocations, we sequentialize these with a lock. Therefore, best if |
| 101 | // this is used for larger LAB allocations only. |
| 102 | inline HeapWord* G1ContiguousSpace::par_allocate(size_t min_word_size, |
| 103 | size_t desired_word_size, |
| 104 | size_t* actual_size) { |
| 105 | MutexLocker x(&_par_alloc_lock); |
| 106 | return allocate(min_word_size, desired_word_size, actual_size); |
| 107 | } |
| 108 | |
| 109 | inline HeapWord* G1ContiguousSpace::block_start(const void* p) { |
| 110 | return _bot_part.block_start(p); |
| 111 | } |
| 112 | |
| 113 | inline HeapWord* |
| 114 | G1ContiguousSpace::block_start_const(const void* p) const { |
| 115 | return _bot_part.block_start_const(p); |
| 116 | } |
| 117 | |
| 118 | inline bool HeapRegion::is_obj_dead_with_size(const oop obj, const G1CMBitMap* const prev_bitmap, size_t* size) const { |
| 119 | HeapWord* addr = (HeapWord*) obj; |
| 120 | |
| 121 | assert(addr < top(), "must be" ); |
| 122 | assert(!is_closed_archive(), |
| 123 | "Closed archive regions should not have references into other regions" ); |
| 124 | assert(!is_humongous(), "Humongous objects not handled here" ); |
| 125 | bool obj_is_dead = is_obj_dead(obj, prev_bitmap); |
| 126 | |
| 127 | if (ClassUnloadingWithConcurrentMark && obj_is_dead) { |
| 128 | assert(!block_is_obj(addr), "must be" ); |
| 129 | *size = block_size_using_bitmap(addr, prev_bitmap); |
| 130 | } else { |
| 131 | assert(block_is_obj(addr), "must be" ); |
| 132 | *size = obj->size(); |
| 133 | } |
| 134 | return obj_is_dead; |
| 135 | } |
| 136 | |
| 137 | inline bool |
| 138 | HeapRegion::block_is_obj(const HeapWord* p) const { |
| 139 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
| 140 | |
| 141 | if (!this->is_in(p)) { |
| 142 | assert(is_continues_humongous(), "This case can only happen for humongous regions" ); |
| 143 | return (p == humongous_start_region()->bottom()); |
| 144 | } |
| 145 | if (ClassUnloadingWithConcurrentMark) { |
| 146 | return !g1h->is_obj_dead(oop(p), this); |
| 147 | } |
| 148 | return p < top(); |
| 149 | } |
| 150 | |
| 151 | inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMap* const prev_bitmap) const { |
| 152 | assert(ClassUnloadingWithConcurrentMark, |
| 153 | "All blocks should be objects if class unloading isn't used, so this method should not be called. " |
| 154 | "HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") " |
| 155 | "addr: " PTR_FORMAT, |
| 156 | p2i(bottom()), p2i(top()), p2i(end()), p2i(addr)); |
| 157 | |
| 158 | // Old regions' dead objects may have dead classes |
| 159 | // We need to find the next live object using the bitmap |
| 160 | HeapWord* next = prev_bitmap->get_next_marked_addr(addr, prev_top_at_mark_start()); |
| 161 | |
| 162 | assert(next > addr, "must get the next live object" ); |
| 163 | return pointer_delta(next, addr); |
| 164 | } |
| 165 | |
| 166 | inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMap* const prev_bitmap) const { |
| 167 | assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region" , p2i(obj)); |
| 168 | return !obj_allocated_since_prev_marking(obj) && |
| 169 | !prev_bitmap->is_marked((HeapWord*)obj) && |
| 170 | !is_open_archive(); |
| 171 | } |
| 172 | |
| 173 | inline size_t HeapRegion::block_size(const HeapWord *addr) const { |
| 174 | if (addr == top()) { |
| 175 | return pointer_delta(end(), addr); |
| 176 | } |
| 177 | |
| 178 | if (block_is_obj(addr)) { |
| 179 | return oop(addr)->size(); |
| 180 | } |
| 181 | |
| 182 | return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prev_mark_bitmap()); |
| 183 | } |
| 184 | |
| 185 | inline void HeapRegion::complete_compaction() { |
| 186 | // Reset space and bot after compaction is complete if needed. |
| 187 | reset_after_compaction(); |
| 188 | if (used_region().is_empty()) { |
| 189 | reset_bot(); |
| 190 | } |
| 191 | |
| 192 | // After a compaction the mark bitmap is invalid, so we must |
| 193 | // treat all objects as being inside the unmarked area. |
| 194 | zero_marked_bytes(); |
| 195 | init_top_at_mark_start(); |
| 196 | |
| 197 | // Clear unused heap memory in debug builds. |
| 198 | if (ZapUnusedHeapArea) { |
| 199 | mangle_unused_area(); |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | template<typename ApplyToMarkedClosure> |
| 204 | inline void HeapRegion::apply_to_marked_objects(G1CMBitMap* bitmap, ApplyToMarkedClosure* closure) { |
| 205 | HeapWord* limit = scan_limit(); |
| 206 | HeapWord* next_addr = bottom(); |
| 207 | |
| 208 | while (next_addr < limit) { |
| 209 | Prefetch::write(next_addr, PrefetchScanIntervalInBytes); |
| 210 | // This explicit is_marked check is a way to avoid |
| 211 | // some extra work done by get_next_marked_addr for |
| 212 | // the case where next_addr is marked. |
| 213 | if (bitmap->is_marked(next_addr)) { |
| 214 | oop current = oop(next_addr); |
| 215 | next_addr += closure->apply(current); |
| 216 | } else { |
| 217 | next_addr = bitmap->get_next_marked_addr(next_addr, limit); |
| 218 | } |
| 219 | } |
| 220 | |
| 221 | assert(next_addr == limit, "Should stop the scan at the limit." ); |
| 222 | } |
| 223 | |
| 224 | inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size, |
| 225 | size_t desired_word_size, |
| 226 | size_t* actual_word_size) { |
| 227 | assert(is_young(), "we can only skip BOT updates on young regions" ); |
| 228 | return par_allocate_impl(min_word_size, desired_word_size, actual_word_size); |
| 229 | } |
| 230 | |
| 231 | inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { |
| 232 | size_t temp; |
| 233 | return allocate_no_bot_updates(word_size, word_size, &temp); |
| 234 | } |
| 235 | |
| 236 | inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t min_word_size, |
| 237 | size_t desired_word_size, |
| 238 | size_t* actual_word_size) { |
| 239 | assert(is_young(), "we can only skip BOT updates on young regions" ); |
| 240 | return allocate_impl(min_word_size, desired_word_size, actual_word_size); |
| 241 | } |
| 242 | |
| 243 | inline void HeapRegion::note_start_of_marking() { |
| 244 | _next_marked_bytes = 0; |
| 245 | _next_top_at_mark_start = top(); |
| 246 | } |
| 247 | |
| 248 | inline void HeapRegion::note_end_of_marking() { |
| 249 | _prev_top_at_mark_start = _next_top_at_mark_start; |
| 250 | _next_top_at_mark_start = bottom(); |
| 251 | _prev_marked_bytes = _next_marked_bytes; |
| 252 | _next_marked_bytes = 0; |
| 253 | } |
| 254 | |
| 255 | inline bool HeapRegion::in_collection_set() const { |
| 256 | return G1CollectedHeap::heap()->is_in_cset(this); |
| 257 | } |
| 258 | |
| 259 | template <class Closure, bool is_gc_active> |
| 260 | bool HeapRegion::do_oops_on_card_in_humongous(MemRegion mr, |
| 261 | Closure* cl, |
| 262 | G1CollectedHeap* g1h) { |
| 263 | assert(is_humongous(), "precondition" ); |
| 264 | HeapRegion* sr = humongous_start_region(); |
| 265 | oop obj = oop(sr->bottom()); |
| 266 | |
| 267 | // If concurrent and klass_or_null is NULL, then space has been |
| 268 | // allocated but the object has not yet been published by setting |
| 269 | // the klass. That can only happen if the card is stale. However, |
| 270 | // we've already set the card clean, so we must return failure, |
| 271 | // since the allocating thread could have performed a write to the |
| 272 | // card that might be missed otherwise. |
| 273 | if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) { |
| 274 | return false; |
| 275 | } |
| 276 | |
| 277 | // We have a well-formed humongous object at the start of sr. |
| 278 | // Only filler objects follow a humongous object in the containing |
| 279 | // regions, and we can ignore those. So only process the one |
| 280 | // humongous object. |
| 281 | if (!g1h->is_obj_dead(obj, sr)) { |
| 282 | if (obj->is_objArray() || (sr->bottom() < mr.start())) { |
| 283 | // objArrays are always marked precisely, so limit processing |
| 284 | // with mr. Non-objArrays might be precisely marked, and since |
| 285 | // it's humongous it's worthwhile avoiding full processing. |
| 286 | // However, the card could be stale and only cover filler |
| 287 | // objects. That should be rare, so not worth checking for; |
| 288 | // instead let it fall out from the bounded iteration. |
| 289 | obj->oop_iterate(cl, mr); |
| 290 | } else { |
| 291 | // If obj is not an objArray and mr contains the start of the |
| 292 | // obj, then this could be an imprecise mark, and we need to |
| 293 | // process the entire object. |
| 294 | obj->oop_iterate(cl); |
| 295 | } |
| 296 | } |
| 297 | return true; |
| 298 | } |
| 299 | |
| 300 | template <bool is_gc_active, class Closure> |
| 301 | bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr, |
| 302 | Closure* cl) { |
| 303 | assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region" ); |
| 304 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
| 305 | |
| 306 | // Special handling for humongous regions. |
| 307 | if (is_humongous()) { |
| 308 | return do_oops_on_card_in_humongous<Closure, is_gc_active>(mr, cl, g1h); |
| 309 | } |
| 310 | assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s" , _hrm_index, get_type_str()); |
| 311 | |
| 312 | // Because mr has been trimmed to what's been allocated in this |
| 313 | // region, the parts of the heap that are examined here are always |
| 314 | // parsable; there's no need to use klass_or_null to detect |
| 315 | // in-progress allocation. |
| 316 | |
| 317 | // Cache the boundaries of the memory region in some const locals |
| 318 | HeapWord* const start = mr.start(); |
| 319 | HeapWord* const end = mr.end(); |
| 320 | |
| 321 | // Find the obj that extends onto mr.start(). |
| 322 | // Update BOT as needed while finding start of (possibly dead) |
| 323 | // object containing the start of the region. |
| 324 | HeapWord* cur = block_start(start); |
| 325 | |
| 326 | #ifdef ASSERT |
| 327 | { |
| 328 | assert(cur <= start, |
| 329 | "cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start)); |
| 330 | HeapWord* next = cur + block_size(cur); |
| 331 | assert(start < next, |
| 332 | "start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next)); |
| 333 | } |
| 334 | #endif |
| 335 | |
| 336 | const G1CMBitMap* const bitmap = g1h->concurrent_mark()->prev_mark_bitmap(); |
| 337 | do { |
| 338 | oop obj = oop(cur); |
| 339 | assert(oopDesc::is_oop(obj, true), "Not an oop at " PTR_FORMAT, p2i(cur)); |
| 340 | assert(obj->klass_or_null() != NULL, |
| 341 | "Unparsable heap at " PTR_FORMAT, p2i(cur)); |
| 342 | |
| 343 | size_t size; |
| 344 | bool is_dead = is_obj_dead_with_size(obj, bitmap, &size); |
| 345 | |
| 346 | cur += size; |
| 347 | if (!is_dead) { |
| 348 | // Process live object's references. |
| 349 | |
| 350 | // Non-objArrays are usually marked imprecise at the object |
| 351 | // start, in which case we need to iterate over them in full. |
| 352 | // objArrays are precisely marked, but can still be iterated |
| 353 | // over in full if completely covered. |
| 354 | if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) { |
| 355 | obj->oop_iterate(cl); |
| 356 | } else { |
| 357 | obj->oop_iterate(cl, mr); |
| 358 | } |
| 359 | } |
| 360 | } while (cur < end); |
| 361 | |
| 362 | return true; |
| 363 | } |
| 364 | |
| 365 | #endif // SHARE_GC_G1_HEAPREGION_INLINE_HPP |
| 366 | |