| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "code/nmethod.hpp" |
| 27 | #include "gc/g1/g1BlockOffsetTable.inline.hpp" |
| 28 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
| 29 | #include "gc/g1/g1CollectionSet.hpp" |
| 30 | #include "gc/g1/g1HeapRegionTraceType.hpp" |
| 31 | #include "gc/g1/g1OopClosures.inline.hpp" |
| 32 | #include "gc/g1/heapRegion.inline.hpp" |
| 33 | #include "gc/g1/heapRegionBounds.inline.hpp" |
| 34 | #include "gc/g1/heapRegionManager.inline.hpp" |
| 35 | #include "gc/g1/heapRegionRemSet.hpp" |
| 36 | #include "gc/g1/heapRegionTracer.hpp" |
| 37 | #include "gc/shared/genOopClosures.inline.hpp" |
| 38 | #include "gc/shared/space.inline.hpp" |
| 39 | #include "logging/log.hpp" |
| 40 | #include "logging/logStream.hpp" |
| 41 | #include "memory/iterator.inline.hpp" |
| 42 | #include "memory/resourceArea.hpp" |
| 43 | #include "oops/access.inline.hpp" |
| 44 | #include "oops/compressedOops.inline.hpp" |
| 45 | #include "oops/oop.inline.hpp" |
| 46 | #include "runtime/atomic.hpp" |
| 47 | #include "runtime/orderAccess.hpp" |
| 48 | #include "utilities/growableArray.hpp" |
| 49 | |
| 50 | int HeapRegion::LogOfHRGrainBytes = 0; |
| 51 | int HeapRegion::LogOfHRGrainWords = 0; |
| 52 | size_t HeapRegion::GrainBytes = 0; |
| 53 | size_t HeapRegion::GrainWords = 0; |
| 54 | size_t HeapRegion::CardsPerRegion = 0; |
| 55 | |
| 56 | size_t HeapRegion::max_region_size() { |
| 57 | return HeapRegionBounds::max_size(); |
| 58 | } |
| 59 | |
| 60 | size_t HeapRegion::min_region_size_in_words() { |
| 61 | return HeapRegionBounds::min_size() >> LogHeapWordSize; |
| 62 | } |
| 63 | |
| 64 | void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) { |
| 65 | size_t region_size = G1HeapRegionSize; |
| 66 | if (FLAG_IS_DEFAULT(G1HeapRegionSize)) { |
| 67 | size_t average_heap_size = (initial_heap_size + max_heap_size) / 2; |
| 68 | region_size = MAX2(average_heap_size / HeapRegionBounds::target_number(), |
| 69 | HeapRegionBounds::min_size()); |
| 70 | } |
| 71 | |
| 72 | int region_size_log = log2_long((jlong) region_size); |
| 73 | // Recalculate the region size to make sure it's a power of |
| 74 | // 2. This means that region_size is the largest power of 2 that's |
| 75 | // <= what we've calculated so far. |
| 76 | region_size = ((size_t)1 << region_size_log); |
| 77 | |
| 78 | // Now make sure that we don't go over or under our limits. |
| 79 | if (region_size < HeapRegionBounds::min_size()) { |
| 80 | region_size = HeapRegionBounds::min_size(); |
| 81 | } else if (region_size > HeapRegionBounds::max_size()) { |
| 82 | region_size = HeapRegionBounds::max_size(); |
| 83 | } |
| 84 | |
| 85 | // And recalculate the log. |
| 86 | region_size_log = log2_long((jlong) region_size); |
| 87 | |
| 88 | // Now, set up the globals. |
| 89 | guarantee(LogOfHRGrainBytes == 0, "we should only set it once" ); |
| 90 | LogOfHRGrainBytes = region_size_log; |
| 91 | |
| 92 | guarantee(LogOfHRGrainWords == 0, "we should only set it once" ); |
| 93 | LogOfHRGrainWords = LogOfHRGrainBytes - LogHeapWordSize; |
| 94 | |
| 95 | guarantee(GrainBytes == 0, "we should only set it once" ); |
| 96 | // The cast to int is safe, given that we've bounded region_size by |
| 97 | // MIN_REGION_SIZE and MAX_REGION_SIZE. |
| 98 | GrainBytes = region_size; |
| 99 | log_info(gc, heap)("Heap region size: " SIZE_FORMAT "M" , GrainBytes / M); |
| 100 | |
| 101 | guarantee(GrainWords == 0, "we should only set it once" ); |
| 102 | GrainWords = GrainBytes >> LogHeapWordSize; |
| 103 | guarantee((size_t) 1 << LogOfHRGrainWords == GrainWords, "sanity" ); |
| 104 | |
| 105 | guarantee(CardsPerRegion == 0, "we should only set it once" ); |
| 106 | CardsPerRegion = GrainBytes >> G1CardTable::card_shift; |
| 107 | |
| 108 | if (G1HeapRegionSize != GrainBytes) { |
| 109 | FLAG_SET_ERGO(G1HeapRegionSize, GrainBytes); |
| 110 | } |
| 111 | } |
| 112 | |
| 113 | void HeapRegion::hr_clear(bool keep_remset, bool clear_space, bool locked) { |
| 114 | assert(_humongous_start_region == NULL, |
| 115 | "we should have already filtered out humongous regions" ); |
| 116 | assert(!in_collection_set(), |
| 117 | "Should not clear heap region %u in the collection set" , hrm_index()); |
| 118 | |
| 119 | set_young_index_in_cset(-1); |
| 120 | clear_index_in_opt_cset(); |
| 121 | uninstall_surv_rate_group(); |
| 122 | set_free(); |
| 123 | reset_pre_dummy_top(); |
| 124 | |
| 125 | if (!keep_remset) { |
| 126 | if (locked) { |
| 127 | rem_set()->clear_locked(); |
| 128 | } else { |
| 129 | rem_set()->clear(); |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | zero_marked_bytes(); |
| 134 | |
| 135 | init_top_at_mark_start(); |
| 136 | if (clear_space) clear(SpaceDecorator::Mangle); |
| 137 | } |
| 138 | |
| 139 | void HeapRegion::clear_cardtable() { |
| 140 | G1CardTable* ct = G1CollectedHeap::heap()->card_table(); |
| 141 | ct->clear(MemRegion(bottom(), end())); |
| 142 | } |
| 143 | |
| 144 | void HeapRegion::calc_gc_efficiency() { |
| 145 | // GC efficiency is the ratio of how much space would be |
| 146 | // reclaimed over how long we predict it would take to reclaim it. |
| 147 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
| 148 | G1Policy* policy = g1h->policy(); |
| 149 | |
| 150 | // Retrieve a prediction of the elapsed time for this region for |
| 151 | // a mixed gc because the region will only be evacuated during a |
| 152 | // mixed gc. |
| 153 | double region_elapsed_time_ms = |
| 154 | policy->predict_region_elapsed_time_ms(this, false /* for_young_gc */); |
| 155 | _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms; |
| 156 | } |
| 157 | |
| 158 | void HeapRegion::set_free() { |
| 159 | report_region_type_change(G1HeapRegionTraceType::Free); |
| 160 | _type.set_free(); |
| 161 | } |
| 162 | |
| 163 | void HeapRegion::set_eden() { |
| 164 | report_region_type_change(G1HeapRegionTraceType::Eden); |
| 165 | _type.set_eden(); |
| 166 | } |
| 167 | |
| 168 | void HeapRegion::set_eden_pre_gc() { |
| 169 | report_region_type_change(G1HeapRegionTraceType::Eden); |
| 170 | _type.set_eden_pre_gc(); |
| 171 | } |
| 172 | |
| 173 | void HeapRegion::set_survivor() { |
| 174 | report_region_type_change(G1HeapRegionTraceType::Survivor); |
| 175 | _type.set_survivor(); |
| 176 | } |
| 177 | |
| 178 | void HeapRegion::move_to_old() { |
| 179 | if (_type.relabel_as_old()) { |
| 180 | report_region_type_change(G1HeapRegionTraceType::Old); |
| 181 | } |
| 182 | } |
| 183 | |
| 184 | void HeapRegion::set_old() { |
| 185 | report_region_type_change(G1HeapRegionTraceType::Old); |
| 186 | _type.set_old(); |
| 187 | } |
| 188 | |
| 189 | void HeapRegion::set_open_archive() { |
| 190 | report_region_type_change(G1HeapRegionTraceType::OpenArchive); |
| 191 | _type.set_open_archive(); |
| 192 | } |
| 193 | |
| 194 | void HeapRegion::set_closed_archive() { |
| 195 | report_region_type_change(G1HeapRegionTraceType::ClosedArchive); |
| 196 | _type.set_closed_archive(); |
| 197 | } |
| 198 | |
| 199 | void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) { |
| 200 | assert(!is_humongous(), "sanity / pre-condition" ); |
| 201 | assert(top() == bottom(), "should be empty" ); |
| 202 | |
| 203 | report_region_type_change(G1HeapRegionTraceType::StartsHumongous); |
| 204 | _type.set_starts_humongous(); |
| 205 | _humongous_start_region = this; |
| 206 | |
| 207 | _bot_part.set_for_starts_humongous(obj_top, fill_size); |
| 208 | } |
| 209 | |
| 210 | void HeapRegion::set_continues_humongous(HeapRegion* first_hr) { |
| 211 | assert(!is_humongous(), "sanity / pre-condition" ); |
| 212 | assert(top() == bottom(), "should be empty" ); |
| 213 | assert(first_hr->is_starts_humongous(), "pre-condition" ); |
| 214 | |
| 215 | report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous); |
| 216 | _type.set_continues_humongous(); |
| 217 | _humongous_start_region = first_hr; |
| 218 | |
| 219 | _bot_part.set_object_can_span(true); |
| 220 | } |
| 221 | |
| 222 | void HeapRegion::clear_humongous() { |
| 223 | assert(is_humongous(), "pre-condition" ); |
| 224 | |
| 225 | assert(capacity() == HeapRegion::GrainBytes, "pre-condition" ); |
| 226 | _humongous_start_region = NULL; |
| 227 | |
| 228 | _bot_part.set_object_can_span(false); |
| 229 | } |
| 230 | |
| 231 | HeapRegion::HeapRegion(uint hrm_index, |
| 232 | G1BlockOffsetTable* bot, |
| 233 | MemRegion mr) : |
| 234 | G1ContiguousSpace(bot), |
| 235 | _rem_set(NULL), |
| 236 | _hrm_index(hrm_index), |
| 237 | _type(), |
| 238 | _humongous_start_region(NULL), |
| 239 | _evacuation_failed(false), |
| 240 | _next(NULL), _prev(NULL), |
| 241 | #ifdef ASSERT |
| 242 | _containing_set(NULL), |
| 243 | #endif |
| 244 | _prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0), |
| 245 | _index_in_opt_cset(InvalidCSetIndex), _young_index_in_cset(-1), |
| 246 | _surv_rate_group(NULL), _age_index(-1), |
| 247 | _prev_top_at_mark_start(NULL), _next_top_at_mark_start(NULL), |
| 248 | _recorded_rs_length(0), _predicted_elapsed_time_ms(0) |
| 249 | { |
| 250 | _rem_set = new HeapRegionRemSet(bot, this); |
| 251 | |
| 252 | initialize(mr); |
| 253 | } |
| 254 | |
| 255 | void HeapRegion::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
| 256 | assert(_rem_set->is_empty(), "Remembered set must be empty" ); |
| 257 | |
| 258 | G1ContiguousSpace::initialize(mr, clear_space, mangle_space); |
| 259 | |
| 260 | hr_clear(false /*par*/, false /*clear_space*/); |
| 261 | set_top(bottom()); |
| 262 | } |
| 263 | |
| 264 | void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { |
| 265 | HeapRegionTracer::send_region_type_change(_hrm_index, |
| 266 | get_trace_type(), |
| 267 | to, |
| 268 | (uintptr_t)bottom(), |
| 269 | used()); |
| 270 | } |
| 271 | |
| 272 | void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, |
| 273 | bool during_conc_mark) { |
| 274 | // We always recreate the prev marking info and we'll explicitly |
| 275 | // mark all objects we find to be self-forwarded on the prev |
| 276 | // bitmap. So all objects need to be below PTAMS. |
| 277 | _prev_marked_bytes = 0; |
| 278 | |
| 279 | if (during_initial_mark) { |
| 280 | // During initial-mark, we'll also explicitly mark all objects |
| 281 | // we find to be self-forwarded on the next bitmap. So all |
| 282 | // objects need to be below NTAMS. |
| 283 | _next_top_at_mark_start = top(); |
| 284 | _next_marked_bytes = 0; |
| 285 | } else if (during_conc_mark) { |
| 286 | // During concurrent mark, all objects in the CSet (including |
| 287 | // the ones we find to be self-forwarded) are implicitly live. |
| 288 | // So all objects need to be above NTAMS. |
| 289 | _next_top_at_mark_start = bottom(); |
| 290 | _next_marked_bytes = 0; |
| 291 | } |
| 292 | } |
| 293 | |
| 294 | void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) { |
| 295 | assert(marked_bytes <= used(), |
| 296 | "marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used()); |
| 297 | _prev_top_at_mark_start = top(); |
| 298 | _prev_marked_bytes = marked_bytes; |
| 299 | } |
| 300 | |
| 301 | // Code roots support |
| 302 | |
| 303 | void HeapRegion::add_strong_code_root(nmethod* nm) { |
| 304 | HeapRegionRemSet* hrrs = rem_set(); |
| 305 | hrrs->add_strong_code_root(nm); |
| 306 | } |
| 307 | |
| 308 | void HeapRegion::add_strong_code_root_locked(nmethod* nm) { |
| 309 | assert_locked_or_safepoint(CodeCache_lock); |
| 310 | HeapRegionRemSet* hrrs = rem_set(); |
| 311 | hrrs->add_strong_code_root_locked(nm); |
| 312 | } |
| 313 | |
| 314 | void HeapRegion::remove_strong_code_root(nmethod* nm) { |
| 315 | HeapRegionRemSet* hrrs = rem_set(); |
| 316 | hrrs->remove_strong_code_root(nm); |
| 317 | } |
| 318 | |
| 319 | void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { |
| 320 | HeapRegionRemSet* hrrs = rem_set(); |
| 321 | hrrs->strong_code_roots_do(blk); |
| 322 | } |
| 323 | |
| 324 | class VerifyStrongCodeRootOopClosure: public OopClosure { |
| 325 | const HeapRegion* _hr; |
| 326 | bool _failures; |
| 327 | bool _has_oops_in_region; |
| 328 | |
| 329 | template <class T> void do_oop_work(T* p) { |
| 330 | T heap_oop = RawAccess<>::oop_load(p); |
| 331 | if (!CompressedOops::is_null(heap_oop)) { |
| 332 | oop obj = CompressedOops::decode_not_null(heap_oop); |
| 333 | |
| 334 | // Note: not all the oops embedded in the nmethod are in the |
| 335 | // current region. We only look at those which are. |
| 336 | if (_hr->is_in(obj)) { |
| 337 | // Object is in the region. Check that its less than top |
| 338 | if (_hr->top() <= (HeapWord*)obj) { |
| 339 | // Object is above top |
| 340 | log_error(gc, verify)("Object " PTR_FORMAT " in region " HR_FORMAT " is above top " , |
| 341 | p2i(obj), HR_FORMAT_PARAMS(_hr)); |
| 342 | _failures = true; |
| 343 | return; |
| 344 | } |
| 345 | // Nmethod has at least one oop in the current region |
| 346 | _has_oops_in_region = true; |
| 347 | } |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | public: |
| 352 | VerifyStrongCodeRootOopClosure(const HeapRegion* hr): |
| 353 | _hr(hr), _failures(false), _has_oops_in_region(false) {} |
| 354 | |
| 355 | void do_oop(narrowOop* p) { do_oop_work(p); } |
| 356 | void do_oop(oop* p) { do_oop_work(p); } |
| 357 | |
| 358 | bool failures() { return _failures; } |
| 359 | bool has_oops_in_region() { return _has_oops_in_region; } |
| 360 | }; |
| 361 | |
| 362 | class VerifyStrongCodeRootCodeBlobClosure: public CodeBlobClosure { |
| 363 | const HeapRegion* _hr; |
| 364 | bool _failures; |
| 365 | public: |
| 366 | VerifyStrongCodeRootCodeBlobClosure(const HeapRegion* hr) : |
| 367 | _hr(hr), _failures(false) {} |
| 368 | |
| 369 | void do_code_blob(CodeBlob* cb) { |
| 370 | nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null(); |
| 371 | if (nm != NULL) { |
| 372 | // Verify that the nemthod is live |
| 373 | if (!nm->is_alive()) { |
| 374 | log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has dead nmethod " PTR_FORMAT " in its strong code roots" , |
| 375 | p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); |
| 376 | _failures = true; |
| 377 | } else { |
| 378 | VerifyStrongCodeRootOopClosure oop_cl(_hr); |
| 379 | nm->oops_do(&oop_cl); |
| 380 | if (!oop_cl.has_oops_in_region()) { |
| 381 | log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has nmethod " PTR_FORMAT " in its strong code roots with no pointers into region" , |
| 382 | p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); |
| 383 | _failures = true; |
| 384 | } else if (oop_cl.failures()) { |
| 385 | log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] has other failures for nmethod " PTR_FORMAT, |
| 386 | p2i(_hr->bottom()), p2i(_hr->end()), p2i(nm)); |
| 387 | _failures = true; |
| 388 | } |
| 389 | } |
| 390 | } |
| 391 | } |
| 392 | |
| 393 | bool failures() { return _failures; } |
| 394 | }; |
| 395 | |
| 396 | void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const { |
| 397 | if (!G1VerifyHeapRegionCodeRoots) { |
| 398 | // We're not verifying code roots. |
| 399 | return; |
| 400 | } |
| 401 | if (vo == VerifyOption_G1UseFullMarking) { |
| 402 | // Marking verification during a full GC is performed after class |
| 403 | // unloading, code cache unloading, etc so the strong code roots |
| 404 | // attached to each heap region are in an inconsistent state. They won't |
| 405 | // be consistent until the strong code roots are rebuilt after the |
| 406 | // actual GC. Skip verifying the strong code roots in this particular |
| 407 | // time. |
| 408 | assert(VerifyDuringGC, "only way to get here" ); |
| 409 | return; |
| 410 | } |
| 411 | |
| 412 | HeapRegionRemSet* hrrs = rem_set(); |
| 413 | size_t strong_code_roots_length = hrrs->strong_code_roots_list_length(); |
| 414 | |
| 415 | // if this region is empty then there should be no entries |
| 416 | // on its strong code root list |
| 417 | if (is_empty()) { |
| 418 | if (strong_code_roots_length > 0) { |
| 419 | log_error(gc, verify)("region " HR_FORMAT " is empty but has " SIZE_FORMAT " code root entries" , |
| 420 | HR_FORMAT_PARAMS(this), strong_code_roots_length); |
| 421 | *failures = true; |
| 422 | } |
| 423 | return; |
| 424 | } |
| 425 | |
| 426 | if (is_continues_humongous()) { |
| 427 | if (strong_code_roots_length > 0) { |
| 428 | log_error(gc, verify)("region " HR_FORMAT " is a continuation of a humongous region but has " SIZE_FORMAT " code root entries" , |
| 429 | HR_FORMAT_PARAMS(this), strong_code_roots_length); |
| 430 | *failures = true; |
| 431 | } |
| 432 | return; |
| 433 | } |
| 434 | |
| 435 | VerifyStrongCodeRootCodeBlobClosure cb_cl(this); |
| 436 | strong_code_roots_do(&cb_cl); |
| 437 | |
| 438 | if (cb_cl.failures()) { |
| 439 | *failures = true; |
| 440 | } |
| 441 | } |
| 442 | |
| 443 | void HeapRegion::print() const { print_on(tty); } |
| 444 | void HeapRegion::print_on(outputStream* st) const { |
| 445 | st->print("|%4u" , this->_hrm_index); |
| 446 | st->print("|" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT, |
| 447 | p2i(bottom()), p2i(top()), p2i(end())); |
| 448 | st->print("|%3d%%" , (int) ((double) used() * 100 / capacity())); |
| 449 | st->print("|%2s" , get_short_type_str()); |
| 450 | if (in_collection_set()) { |
| 451 | st->print("|CS" ); |
| 452 | } else { |
| 453 | st->print("| " ); |
| 454 | } |
| 455 | st->print_cr("|TAMS " PTR_FORMAT ", " PTR_FORMAT "| %s " , |
| 456 | p2i(prev_top_at_mark_start()), p2i(next_top_at_mark_start()), rem_set()->get_state_str()); |
| 457 | } |
| 458 | |
| 459 | class G1VerificationClosure : public BasicOopIterateClosure { |
| 460 | protected: |
| 461 | G1CollectedHeap* _g1h; |
| 462 | G1CardTable *_ct; |
| 463 | oop _containing_obj; |
| 464 | bool _failures; |
| 465 | int _n_failures; |
| 466 | VerifyOption _vo; |
| 467 | public: |
| 468 | // _vo == UsePrevMarking -> use "prev" marking information, |
| 469 | // _vo == UseNextMarking -> use "next" marking information, |
| 470 | // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS. |
| 471 | G1VerificationClosure(G1CollectedHeap* g1h, VerifyOption vo) : |
| 472 | _g1h(g1h), _ct(g1h->card_table()), |
| 473 | _containing_obj(NULL), _failures(false), _n_failures(0), _vo(vo) { |
| 474 | } |
| 475 | |
| 476 | void set_containing_obj(oop obj) { |
| 477 | _containing_obj = obj; |
| 478 | } |
| 479 | |
| 480 | bool failures() { return _failures; } |
| 481 | int n_failures() { return _n_failures; } |
| 482 | |
| 483 | void print_object(outputStream* out, oop obj) { |
| 484 | #ifdef PRODUCT |
| 485 | Klass* k = obj->klass(); |
| 486 | const char* class_name = k->external_name(); |
| 487 | out->print_cr("class name %s" , class_name); |
| 488 | #else // PRODUCT |
| 489 | obj->print_on(out); |
| 490 | #endif // PRODUCT |
| 491 | } |
| 492 | |
| 493 | // This closure provides its own oop verification code. |
| 494 | debug_only(virtual bool should_verify_oops() { return false; }) |
| 495 | }; |
| 496 | |
| 497 | class VerifyLiveClosure : public G1VerificationClosure { |
| 498 | public: |
| 499 | VerifyLiveClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} |
| 500 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
| 501 | virtual void do_oop(oop* p) { do_oop_work(p); } |
| 502 | |
| 503 | template <class T> |
| 504 | void do_oop_work(T* p) { |
| 505 | assert(_containing_obj != NULL, "Precondition" ); |
| 506 | assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), |
| 507 | "Precondition" ); |
| 508 | verify_liveness(p); |
| 509 | } |
| 510 | |
| 511 | template <class T> |
| 512 | void verify_liveness(T* p) { |
| 513 | T heap_oop = RawAccess<>::oop_load(p); |
| 514 | Log(gc, verify) log; |
| 515 | if (!CompressedOops::is_null(heap_oop)) { |
| 516 | oop obj = CompressedOops::decode_not_null(heap_oop); |
| 517 | bool failed = false; |
| 518 | if (!_g1h->is_in(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { |
| 519 | MutexLocker x(ParGCRareEvent_lock, |
| 520 | Mutex::_no_safepoint_check_flag); |
| 521 | |
| 522 | if (!_failures) { |
| 523 | log.error("----------" ); |
| 524 | } |
| 525 | ResourceMark rm; |
| 526 | if (!_g1h->is_in(obj)) { |
| 527 | HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
| 528 | log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT, |
| 529 | p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); |
| 530 | LogStream ls(log.error()); |
| 531 | print_object(&ls, _containing_obj); |
| 532 | HeapRegion* const to = _g1h->heap_region_containing(obj); |
| 533 | log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s" , |
| 534 | p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str()); |
| 535 | } else { |
| 536 | HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
| 537 | HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); |
| 538 | log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT, |
| 539 | p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); |
| 540 | LogStream ls(log.error()); |
| 541 | print_object(&ls, _containing_obj); |
| 542 | log.error("points to dead obj " PTR_FORMAT " in region " HR_FORMAT, |
| 543 | p2i(obj), HR_FORMAT_PARAMS(to)); |
| 544 | print_object(&ls, obj); |
| 545 | } |
| 546 | log.error("----------" ); |
| 547 | _failures = true; |
| 548 | failed = true; |
| 549 | _n_failures++; |
| 550 | } |
| 551 | } |
| 552 | } |
| 553 | }; |
| 554 | |
| 555 | class VerifyRemSetClosure : public G1VerificationClosure { |
| 556 | public: |
| 557 | VerifyRemSetClosure(G1CollectedHeap* g1h, VerifyOption vo) : G1VerificationClosure(g1h, vo) {} |
| 558 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
| 559 | virtual void do_oop(oop* p) { do_oop_work(p); } |
| 560 | |
| 561 | template <class T> |
| 562 | void do_oop_work(T* p) { |
| 563 | assert(_containing_obj != NULL, "Precondition" ); |
| 564 | assert(!_g1h->is_obj_dead_cond(_containing_obj, _vo), |
| 565 | "Precondition" ); |
| 566 | verify_remembered_set(p); |
| 567 | } |
| 568 | |
| 569 | template <class T> |
| 570 | void verify_remembered_set(T* p) { |
| 571 | T heap_oop = RawAccess<>::oop_load(p); |
| 572 | Log(gc, verify) log; |
| 573 | if (!CompressedOops::is_null(heap_oop)) { |
| 574 | oop obj = CompressedOops::decode_not_null(heap_oop); |
| 575 | HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); |
| 576 | HeapRegion* to = _g1h->heap_region_containing(obj); |
| 577 | if (from != NULL && to != NULL && |
| 578 | from != to && |
| 579 | !to->is_pinned() && |
| 580 | to->rem_set()->is_complete()) { |
| 581 | jbyte cv_obj = *_ct->byte_for_const(_containing_obj); |
| 582 | jbyte cv_field = *_ct->byte_for_const(p); |
| 583 | const jbyte dirty = G1CardTable::dirty_card_val(); |
| 584 | |
| 585 | bool is_bad = !(from->is_young() |
| 586 | || to->rem_set()->contains_reference(p) |
| 587 | || (_containing_obj->is_objArray() ? |
| 588 | cv_field == dirty : |
| 589 | cv_obj == dirty || cv_field == dirty)); |
| 590 | if (is_bad) { |
| 591 | MutexLocker x(ParGCRareEvent_lock, |
| 592 | Mutex::_no_safepoint_check_flag); |
| 593 | |
| 594 | if (!_failures) { |
| 595 | log.error("----------" ); |
| 596 | } |
| 597 | log.error("Missing rem set entry:" ); |
| 598 | log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT " in region " HR_FORMAT, |
| 599 | p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from)); |
| 600 | ResourceMark rm; |
| 601 | LogStream ls(log.error()); |
| 602 | _containing_obj->print_on(&ls); |
| 603 | log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s" , |
| 604 | p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str()); |
| 605 | if (oopDesc::is_oop(obj)) { |
| 606 | obj->print_on(&ls); |
| 607 | } |
| 608 | log.error("Obj head CTE = %d, field CTE = %d." , cv_obj, cv_field); |
| 609 | log.error("----------" ); |
| 610 | _failures = true; |
| 611 | _n_failures++; |
| 612 | } |
| 613 | } |
| 614 | } |
| 615 | } |
| 616 | }; |
| 617 | |
| 618 | // Closure that applies the given two closures in sequence. |
| 619 | class G1Mux2Closure : public BasicOopIterateClosure { |
| 620 | OopClosure* _c1; |
| 621 | OopClosure* _c2; |
| 622 | public: |
| 623 | G1Mux2Closure(OopClosure *c1, OopClosure *c2) { _c1 = c1; _c2 = c2; } |
| 624 | template <class T> inline void do_oop_work(T* p) { |
| 625 | // Apply first closure; then apply the second. |
| 626 | _c1->do_oop(p); |
| 627 | _c2->do_oop(p); |
| 628 | } |
| 629 | virtual inline void do_oop(oop* p) { do_oop_work(p); } |
| 630 | virtual inline void do_oop(narrowOop* p) { do_oop_work(p); } |
| 631 | |
| 632 | // This closure provides its own oop verification code. |
| 633 | debug_only(virtual bool should_verify_oops() { return false; }) |
| 634 | }; |
| 635 | |
| 636 | // This really ought to be commoned up into OffsetTableContigSpace somehow. |
| 637 | // We would need a mechanism to make that code skip dead objects. |
| 638 | |
| 639 | void HeapRegion::verify(VerifyOption vo, |
| 640 | bool* failures) const { |
| 641 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
| 642 | *failures = false; |
| 643 | HeapWord* p = bottom(); |
| 644 | HeapWord* prev_p = NULL; |
| 645 | VerifyLiveClosure vl_cl(g1h, vo); |
| 646 | VerifyRemSetClosure vr_cl(g1h, vo); |
| 647 | bool is_region_humongous = is_humongous(); |
| 648 | size_t object_num = 0; |
| 649 | while (p < top()) { |
| 650 | oop obj = oop(p); |
| 651 | size_t obj_size = block_size(p); |
| 652 | object_num += 1; |
| 653 | |
| 654 | if (!g1h->is_obj_dead_cond(obj, this, vo)) { |
| 655 | if (oopDesc::is_oop(obj)) { |
| 656 | Klass* klass = obj->klass(); |
| 657 | bool is_metaspace_object = Metaspace::contains(klass); |
| 658 | if (!is_metaspace_object) { |
| 659 | log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " |
| 660 | "not metadata" , p2i(klass), p2i(obj)); |
| 661 | *failures = true; |
| 662 | return; |
| 663 | } else if (!klass->is_klass()) { |
| 664 | log_error(gc, verify)("klass " PTR_FORMAT " of object " PTR_FORMAT " " |
| 665 | "not a klass" , p2i(klass), p2i(obj)); |
| 666 | *failures = true; |
| 667 | return; |
| 668 | } else { |
| 669 | vl_cl.set_containing_obj(obj); |
| 670 | if (!g1h->collector_state()->in_full_gc() || G1VerifyRSetsDuringFullGC) { |
| 671 | // verify liveness and rem_set |
| 672 | vr_cl.set_containing_obj(obj); |
| 673 | G1Mux2Closure mux(&vl_cl, &vr_cl); |
| 674 | obj->oop_iterate(&mux); |
| 675 | |
| 676 | if (vr_cl.failures()) { |
| 677 | *failures = true; |
| 678 | } |
| 679 | if (G1MaxVerifyFailures >= 0 && |
| 680 | vr_cl.n_failures() >= G1MaxVerifyFailures) { |
| 681 | return; |
| 682 | } |
| 683 | } else { |
| 684 | // verify only liveness |
| 685 | obj->oop_iterate(&vl_cl); |
| 686 | } |
| 687 | if (vl_cl.failures()) { |
| 688 | *failures = true; |
| 689 | } |
| 690 | if (G1MaxVerifyFailures >= 0 && |
| 691 | vl_cl.n_failures() >= G1MaxVerifyFailures) { |
| 692 | return; |
| 693 | } |
| 694 | } |
| 695 | } else { |
| 696 | log_error(gc, verify)(PTR_FORMAT " not an oop" , p2i(obj)); |
| 697 | *failures = true; |
| 698 | return; |
| 699 | } |
| 700 | } |
| 701 | prev_p = p; |
| 702 | p += obj_size; |
| 703 | } |
| 704 | |
| 705 | if (!is_young() && !is_empty()) { |
| 706 | _bot_part.verify(); |
| 707 | } |
| 708 | |
| 709 | if (is_region_humongous) { |
| 710 | oop obj = oop(this->humongous_start_region()->bottom()); |
| 711 | if ((HeapWord*)obj > bottom() || (HeapWord*)obj + obj->size() < bottom()) { |
| 712 | log_error(gc, verify)("this humongous region is not part of its' humongous object " PTR_FORMAT, p2i(obj)); |
| 713 | *failures = true; |
| 714 | return; |
| 715 | } |
| 716 | } |
| 717 | |
| 718 | if (!is_region_humongous && p != top()) { |
| 719 | log_error(gc, verify)("end of last object " PTR_FORMAT " " |
| 720 | "does not match top " PTR_FORMAT, p2i(p), p2i(top())); |
| 721 | *failures = true; |
| 722 | return; |
| 723 | } |
| 724 | |
| 725 | HeapWord* the_end = end(); |
| 726 | // Do some extra BOT consistency checking for addresses in the |
| 727 | // range [top, end). BOT look-ups in this range should yield |
| 728 | // top. No point in doing that if top == end (there's nothing there). |
| 729 | if (p < the_end) { |
| 730 | // Look up top |
| 731 | HeapWord* addr_1 = p; |
| 732 | HeapWord* b_start_1 = _bot_part.block_start_const(addr_1); |
| 733 | if (b_start_1 != p) { |
| 734 | log_error(gc, verify)("BOT look up for top: " PTR_FORMAT " " |
| 735 | " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, |
| 736 | p2i(addr_1), p2i(b_start_1), p2i(p)); |
| 737 | *failures = true; |
| 738 | return; |
| 739 | } |
| 740 | |
| 741 | // Look up top + 1 |
| 742 | HeapWord* addr_2 = p + 1; |
| 743 | if (addr_2 < the_end) { |
| 744 | HeapWord* b_start_2 = _bot_part.block_start_const(addr_2); |
| 745 | if (b_start_2 != p) { |
| 746 | log_error(gc, verify)("BOT look up for top + 1: " PTR_FORMAT " " |
| 747 | " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, |
| 748 | p2i(addr_2), p2i(b_start_2), p2i(p)); |
| 749 | *failures = true; |
| 750 | return; |
| 751 | } |
| 752 | } |
| 753 | |
| 754 | // Look up an address between top and end |
| 755 | size_t diff = pointer_delta(the_end, p) / 2; |
| 756 | HeapWord* addr_3 = p + diff; |
| 757 | if (addr_3 < the_end) { |
| 758 | HeapWord* b_start_3 = _bot_part.block_start_const(addr_3); |
| 759 | if (b_start_3 != p) { |
| 760 | log_error(gc, verify)("BOT look up for top + diff: " PTR_FORMAT " " |
| 761 | " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, |
| 762 | p2i(addr_3), p2i(b_start_3), p2i(p)); |
| 763 | *failures = true; |
| 764 | return; |
| 765 | } |
| 766 | } |
| 767 | |
| 768 | // Look up end - 1 |
| 769 | HeapWord* addr_4 = the_end - 1; |
| 770 | HeapWord* b_start_4 = _bot_part.block_start_const(addr_4); |
| 771 | if (b_start_4 != p) { |
| 772 | log_error(gc, verify)("BOT look up for end - 1: " PTR_FORMAT " " |
| 773 | " yielded " PTR_FORMAT ", expecting " PTR_FORMAT, |
| 774 | p2i(addr_4), p2i(b_start_4), p2i(p)); |
| 775 | *failures = true; |
| 776 | return; |
| 777 | } |
| 778 | } |
| 779 | |
| 780 | verify_strong_code_roots(vo, failures); |
| 781 | } |
| 782 | |
| 783 | void HeapRegion::verify() const { |
| 784 | bool dummy = false; |
| 785 | verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); |
| 786 | } |
| 787 | |
| 788 | void HeapRegion::verify_rem_set(VerifyOption vo, bool* failures) const { |
| 789 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
| 790 | *failures = false; |
| 791 | HeapWord* p = bottom(); |
| 792 | HeapWord* prev_p = NULL; |
| 793 | VerifyRemSetClosure vr_cl(g1h, vo); |
| 794 | while (p < top()) { |
| 795 | oop obj = oop(p); |
| 796 | size_t obj_size = block_size(p); |
| 797 | |
| 798 | if (!g1h->is_obj_dead_cond(obj, this, vo)) { |
| 799 | if (oopDesc::is_oop(obj)) { |
| 800 | vr_cl.set_containing_obj(obj); |
| 801 | obj->oop_iterate(&vr_cl); |
| 802 | |
| 803 | if (vr_cl.failures()) { |
| 804 | *failures = true; |
| 805 | } |
| 806 | if (G1MaxVerifyFailures >= 0 && |
| 807 | vr_cl.n_failures() >= G1MaxVerifyFailures) { |
| 808 | return; |
| 809 | } |
| 810 | } else { |
| 811 | log_error(gc, verify)(PTR_FORMAT " not an oop" , p2i(obj)); |
| 812 | *failures = true; |
| 813 | return; |
| 814 | } |
| 815 | } |
| 816 | |
| 817 | prev_p = p; |
| 818 | p += obj_size; |
| 819 | } |
| 820 | } |
| 821 | |
| 822 | void HeapRegion::verify_rem_set() const { |
| 823 | bool failures = false; |
| 824 | verify_rem_set(VerifyOption_G1UsePrevMarking, &failures); |
| 825 | guarantee(!failures, "HeapRegion RemSet verification failed" ); |
| 826 | } |
| 827 | |
| 828 | void HeapRegion::prepare_for_compaction(CompactPoint* cp) { |
| 829 | // Not used for G1 anymore, but pure virtual in Space. |
| 830 | ShouldNotReachHere(); |
| 831 | } |
| 832 | |
| 833 | // G1OffsetTableContigSpace code; copied from space.cpp. Hope this can go |
| 834 | // away eventually. |
| 835 | |
| 836 | void G1ContiguousSpace::clear(bool mangle_space) { |
| 837 | set_top(bottom()); |
| 838 | CompactibleSpace::clear(mangle_space); |
| 839 | reset_bot(); |
| 840 | } |
| 841 | #ifndef PRODUCT |
| 842 | void G1ContiguousSpace::mangle_unused_area() { |
| 843 | mangle_unused_area_complete(); |
| 844 | } |
| 845 | |
| 846 | void G1ContiguousSpace::mangle_unused_area_complete() { |
| 847 | SpaceMangler::mangle_region(MemRegion(top(), end())); |
| 848 | } |
| 849 | #endif |
| 850 | |
| 851 | void G1ContiguousSpace::print() const { |
| 852 | print_short(); |
| 853 | tty->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " |
| 854 | INTPTR_FORMAT ", " INTPTR_FORMAT ")" , |
| 855 | p2i(bottom()), p2i(top()), p2i(_bot_part.threshold()), p2i(end())); |
| 856 | } |
| 857 | |
| 858 | HeapWord* G1ContiguousSpace::initialize_threshold() { |
| 859 | return _bot_part.initialize_threshold(); |
| 860 | } |
| 861 | |
| 862 | HeapWord* G1ContiguousSpace::cross_threshold(HeapWord* start, |
| 863 | HeapWord* end) { |
| 864 | _bot_part.alloc_block(start, end); |
| 865 | return _bot_part.threshold(); |
| 866 | } |
| 867 | |
| 868 | void G1ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { |
| 869 | object_iterate(blk); |
| 870 | } |
| 871 | |
| 872 | void G1ContiguousSpace::object_iterate(ObjectClosure* blk) { |
| 873 | HeapWord* p = bottom(); |
| 874 | while (p < top()) { |
| 875 | if (block_is_obj(p)) { |
| 876 | blk->do_object(oop(p)); |
| 877 | } |
| 878 | p += block_size(p); |
| 879 | } |
| 880 | } |
| 881 | |
| 882 | G1ContiguousSpace::G1ContiguousSpace(G1BlockOffsetTable* bot) : |
| 883 | _top(NULL), |
| 884 | _bot_part(bot, this), |
| 885 | _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock" , true), |
| 886 | _pre_dummy_top(NULL) |
| 887 | { |
| 888 | } |
| 889 | |
| 890 | void G1ContiguousSpace::initialize(MemRegion mr, bool clear_space, bool mangle_space) { |
| 891 | CompactibleSpace::initialize(mr, clear_space, mangle_space); |
| 892 | _top = bottom(); |
| 893 | set_saved_mark_word(NULL); |
| 894 | reset_bot(); |
| 895 | } |
| 896 | |