| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "gc/g1/g1BlockOffsetTable.inline.hpp" |
| 27 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
| 28 | #include "gc/g1/g1ConcurrentRefine.hpp" |
| 29 | #include "gc/g1/heapRegionManager.inline.hpp" |
| 30 | #include "gc/g1/heapRegionRemSet.hpp" |
| 31 | #include "gc/shared/space.inline.hpp" |
| 32 | #include "memory/allocation.hpp" |
| 33 | #include "memory/padded.inline.hpp" |
| 34 | #include "oops/oop.inline.hpp" |
| 35 | #include "runtime/atomic.hpp" |
| 36 | #include "utilities/bitMap.inline.hpp" |
| 37 | #include "utilities/debug.hpp" |
| 38 | #include "utilities/formatBuffer.hpp" |
| 39 | #include "utilities/globalDefinitions.hpp" |
| 40 | #include "utilities/growableArray.hpp" |
| 41 | |
| 42 | const char* HeapRegionRemSet::_state_strings[] = {"Untracked" , "Updating" , "Complete" }; |
| 43 | const char* HeapRegionRemSet::_short_state_strings[] = {"UNTRA" , "UPDAT" , "CMPLT" }; |
| 44 | |
| 45 | class PerRegionTable: public CHeapObj<mtGC> { |
| 46 | friend class OtherRegionsTable; |
| 47 | friend class HeapRegionRemSetIterator; |
| 48 | |
| 49 | HeapRegion* _hr; |
| 50 | CHeapBitMap _bm; |
| 51 | jint _occupied; |
| 52 | |
| 53 | // next pointer for free/allocated 'all' list |
| 54 | PerRegionTable* _next; |
| 55 | |
| 56 | // prev pointer for the allocated 'all' list |
| 57 | PerRegionTable* _prev; |
| 58 | |
| 59 | // next pointer in collision list |
| 60 | PerRegionTable * _collision_list_next; |
| 61 | |
| 62 | // Global free list of PRTs |
| 63 | static PerRegionTable* volatile _free_list; |
| 64 | |
| 65 | protected: |
| 66 | // We need access in order to union things into the base table. |
| 67 | BitMap* bm() { return &_bm; } |
| 68 | |
| 69 | PerRegionTable(HeapRegion* hr) : |
| 70 | _hr(hr), |
| 71 | _bm(HeapRegion::CardsPerRegion, mtGC), |
| 72 | _occupied(0), |
| 73 | _next(NULL), _prev(NULL), |
| 74 | _collision_list_next(NULL) |
| 75 | {} |
| 76 | |
| 77 | void add_card_work(CardIdx_t from_card, bool par) { |
| 78 | if (!_bm.at(from_card)) { |
| 79 | if (par) { |
| 80 | if (_bm.par_at_put(from_card, 1)) { |
| 81 | Atomic::inc(&_occupied); |
| 82 | } |
| 83 | } else { |
| 84 | _bm.at_put(from_card, 1); |
| 85 | _occupied++; |
| 86 | } |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | void add_reference_work(OopOrNarrowOopStar from, bool par) { |
| 91 | // Must make this robust in case "from" is not in "_hr", because of |
| 92 | // concurrency. |
| 93 | |
| 94 | HeapRegion* loc_hr = hr(); |
| 95 | // If the test below fails, then this table was reused concurrently |
| 96 | // with this operation. This is OK, since the old table was coarsened, |
| 97 | // and adding a bit to the new table is never incorrect. |
| 98 | if (loc_hr->is_in_reserved(from)) { |
| 99 | CardIdx_t from_card = OtherRegionsTable::card_within_region(from, loc_hr); |
| 100 | add_card_work(from_card, par); |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | public: |
| 105 | |
| 106 | HeapRegion* hr() const { return OrderAccess::load_acquire(&_hr); } |
| 107 | |
| 108 | jint occupied() const { |
| 109 | // Overkill, but if we ever need it... |
| 110 | // guarantee(_occupied == _bm.count_one_bits(), "Check"); |
| 111 | return _occupied; |
| 112 | } |
| 113 | |
| 114 | void init(HeapRegion* hr, bool clear_links_to_all_list) { |
| 115 | if (clear_links_to_all_list) { |
| 116 | set_next(NULL); |
| 117 | set_prev(NULL); |
| 118 | } |
| 119 | _collision_list_next = NULL; |
| 120 | _occupied = 0; |
| 121 | _bm.clear(); |
| 122 | // Make sure that the bitmap clearing above has been finished before publishing |
| 123 | // this PRT to concurrent threads. |
| 124 | OrderAccess::release_store(&_hr, hr); |
| 125 | } |
| 126 | |
| 127 | void add_reference(OopOrNarrowOopStar from) { |
| 128 | add_reference_work(from, /*parallel*/ true); |
| 129 | } |
| 130 | |
| 131 | void seq_add_reference(OopOrNarrowOopStar from) { |
| 132 | add_reference_work(from, /*parallel*/ false); |
| 133 | } |
| 134 | |
| 135 | void add_card(CardIdx_t from_card_index) { |
| 136 | add_card_work(from_card_index, /*parallel*/ true); |
| 137 | } |
| 138 | |
| 139 | void seq_add_card(CardIdx_t from_card_index) { |
| 140 | add_card_work(from_card_index, /*parallel*/ false); |
| 141 | } |
| 142 | |
| 143 | // (Destructively) union the bitmap of the current table into the given |
| 144 | // bitmap (which is assumed to be of the same size.) |
| 145 | void union_bitmap_into(BitMap* bm) { |
| 146 | bm->set_union(_bm); |
| 147 | } |
| 148 | |
| 149 | // Mem size in bytes. |
| 150 | size_t mem_size() const { |
| 151 | return sizeof(PerRegionTable) + _bm.size_in_words() * HeapWordSize; |
| 152 | } |
| 153 | |
| 154 | // Requires "from" to be in "hr()". |
| 155 | bool contains_reference(OopOrNarrowOopStar from) const { |
| 156 | assert(hr()->is_in_reserved(from), "Precondition." ); |
| 157 | size_t card_ind = pointer_delta(from, hr()->bottom(), |
| 158 | G1CardTable::card_size); |
| 159 | return _bm.at(card_ind); |
| 160 | } |
| 161 | |
| 162 | // Bulk-free the PRTs from prt to last, assumes that they are |
| 163 | // linked together using their _next field. |
| 164 | static void bulk_free(PerRegionTable* prt, PerRegionTable* last) { |
| 165 | while (true) { |
| 166 | PerRegionTable* fl = _free_list; |
| 167 | last->set_next(fl); |
| 168 | PerRegionTable* res = Atomic::cmpxchg(prt, &_free_list, fl); |
| 169 | if (res == fl) { |
| 170 | return; |
| 171 | } |
| 172 | } |
| 173 | ShouldNotReachHere(); |
| 174 | } |
| 175 | |
| 176 | static void free(PerRegionTable* prt) { |
| 177 | bulk_free(prt, prt); |
| 178 | } |
| 179 | |
| 180 | // Returns an initialized PerRegionTable instance. |
| 181 | static PerRegionTable* alloc(HeapRegion* hr) { |
| 182 | PerRegionTable* fl = _free_list; |
| 183 | while (fl != NULL) { |
| 184 | PerRegionTable* nxt = fl->next(); |
| 185 | PerRegionTable* res = Atomic::cmpxchg(nxt, &_free_list, fl); |
| 186 | if (res == fl) { |
| 187 | fl->init(hr, true); |
| 188 | return fl; |
| 189 | } else { |
| 190 | fl = _free_list; |
| 191 | } |
| 192 | } |
| 193 | assert(fl == NULL, "Loop condition." ); |
| 194 | return new PerRegionTable(hr); |
| 195 | } |
| 196 | |
| 197 | PerRegionTable* next() const { return _next; } |
| 198 | void set_next(PerRegionTable* next) { _next = next; } |
| 199 | PerRegionTable* prev() const { return _prev; } |
| 200 | void set_prev(PerRegionTable* prev) { _prev = prev; } |
| 201 | |
| 202 | // Accessor and Modification routines for the pointer for the |
| 203 | // singly linked collision list that links the PRTs within the |
| 204 | // OtherRegionsTable::_fine_grain_regions hash table. |
| 205 | // |
| 206 | // It might be useful to also make the collision list doubly linked |
| 207 | // to avoid iteration over the collisions list during scrubbing/deletion. |
| 208 | // OTOH there might not be many collisions. |
| 209 | |
| 210 | PerRegionTable* collision_list_next() const { |
| 211 | return _collision_list_next; |
| 212 | } |
| 213 | |
| 214 | void set_collision_list_next(PerRegionTable* next) { |
| 215 | _collision_list_next = next; |
| 216 | } |
| 217 | |
| 218 | PerRegionTable** collision_list_next_addr() { |
| 219 | return &_collision_list_next; |
| 220 | } |
| 221 | |
| 222 | static size_t fl_mem_size() { |
| 223 | PerRegionTable* cur = _free_list; |
| 224 | size_t res = 0; |
| 225 | while (cur != NULL) { |
| 226 | res += cur->mem_size(); |
| 227 | cur = cur->next(); |
| 228 | } |
| 229 | return res; |
| 230 | } |
| 231 | |
| 232 | static void test_fl_mem_size(); |
| 233 | }; |
| 234 | |
| 235 | PerRegionTable* volatile PerRegionTable::_free_list = NULL; |
| 236 | |
| 237 | size_t OtherRegionsTable::_max_fine_entries = 0; |
| 238 | size_t OtherRegionsTable::_mod_max_fine_entries_mask = 0; |
| 239 | size_t OtherRegionsTable::_fine_eviction_stride = 0; |
| 240 | size_t OtherRegionsTable::_fine_eviction_sample_size = 0; |
| 241 | |
| 242 | OtherRegionsTable::OtherRegionsTable(Mutex* m) : |
| 243 | _g1h(G1CollectedHeap::heap()), |
| 244 | _m(m), |
| 245 | _coarse_map(G1CollectedHeap::heap()->max_regions(), mtGC), |
| 246 | _n_coarse_entries(0), |
| 247 | _fine_grain_regions(NULL), |
| 248 | _n_fine_entries(0), |
| 249 | _first_all_fine_prts(NULL), |
| 250 | _last_all_fine_prts(NULL), |
| 251 | _fine_eviction_start(0), |
| 252 | _sparse_table() |
| 253 | { |
| 254 | typedef PerRegionTable* PerRegionTablePtr; |
| 255 | |
| 256 | if (_max_fine_entries == 0) { |
| 257 | assert(_mod_max_fine_entries_mask == 0, "Both or none." ); |
| 258 | size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries); |
| 259 | _max_fine_entries = (size_t)1 << max_entries_log; |
| 260 | _mod_max_fine_entries_mask = _max_fine_entries - 1; |
| 261 | |
| 262 | assert(_fine_eviction_sample_size == 0 |
| 263 | && _fine_eviction_stride == 0, "All init at same time." ); |
| 264 | _fine_eviction_sample_size = MAX2((size_t)4, max_entries_log); |
| 265 | _fine_eviction_stride = _max_fine_entries / _fine_eviction_sample_size; |
| 266 | } |
| 267 | |
| 268 | _fine_grain_regions = NEW_C_HEAP_ARRAY3(PerRegionTablePtr, _max_fine_entries, |
| 269 | mtGC, CURRENT_PC, AllocFailStrategy::RETURN_NULL); |
| 270 | |
| 271 | if (_fine_grain_regions == NULL) { |
| 272 | vm_exit_out_of_memory(sizeof(void*)*_max_fine_entries, OOM_MALLOC_ERROR, |
| 273 | "Failed to allocate _fine_grain_entries." ); |
| 274 | } |
| 275 | |
| 276 | for (size_t i = 0; i < _max_fine_entries; i++) { |
| 277 | _fine_grain_regions[i] = NULL; |
| 278 | } |
| 279 | } |
| 280 | |
| 281 | void OtherRegionsTable::link_to_all(PerRegionTable* prt) { |
| 282 | // We always append to the beginning of the list for convenience; |
| 283 | // the order of entries in this list does not matter. |
| 284 | if (_first_all_fine_prts != NULL) { |
| 285 | assert(_first_all_fine_prts->prev() == NULL, "invariant" ); |
| 286 | _first_all_fine_prts->set_prev(prt); |
| 287 | prt->set_next(_first_all_fine_prts); |
| 288 | } else { |
| 289 | // this is the first element we insert. Adjust the "last" pointer |
| 290 | _last_all_fine_prts = prt; |
| 291 | assert(prt->next() == NULL, "just checking" ); |
| 292 | } |
| 293 | // the new element is always the first element without a predecessor |
| 294 | prt->set_prev(NULL); |
| 295 | _first_all_fine_prts = prt; |
| 296 | |
| 297 | assert(prt->prev() == NULL, "just checking" ); |
| 298 | assert(_first_all_fine_prts == prt, "just checking" ); |
| 299 | assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || |
| 300 | (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), |
| 301 | "just checking" ); |
| 302 | assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, |
| 303 | "just checking" ); |
| 304 | assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, |
| 305 | "just checking" ); |
| 306 | } |
| 307 | |
| 308 | void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) { |
| 309 | if (prt->prev() != NULL) { |
| 310 | assert(_first_all_fine_prts != prt, "just checking" ); |
| 311 | prt->prev()->set_next(prt->next()); |
| 312 | // removing the last element in the list? |
| 313 | if (_last_all_fine_prts == prt) { |
| 314 | _last_all_fine_prts = prt->prev(); |
| 315 | } |
| 316 | } else { |
| 317 | assert(_first_all_fine_prts == prt, "just checking" ); |
| 318 | _first_all_fine_prts = prt->next(); |
| 319 | // list is empty now? |
| 320 | if (_first_all_fine_prts == NULL) { |
| 321 | _last_all_fine_prts = NULL; |
| 322 | } |
| 323 | } |
| 324 | |
| 325 | if (prt->next() != NULL) { |
| 326 | prt->next()->set_prev(prt->prev()); |
| 327 | } |
| 328 | |
| 329 | prt->set_next(NULL); |
| 330 | prt->set_prev(NULL); |
| 331 | |
| 332 | assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) || |
| 333 | (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL), |
| 334 | "just checking" ); |
| 335 | assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL, |
| 336 | "just checking" ); |
| 337 | assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL, |
| 338 | "just checking" ); |
| 339 | } |
| 340 | |
| 341 | CardIdx_t OtherRegionsTable::card_within_region(OopOrNarrowOopStar within_region, HeapRegion* hr) { |
| 342 | assert(hr->is_in_reserved(within_region), |
| 343 | "HeapWord " PTR_FORMAT " is outside of region %u [" PTR_FORMAT ", " PTR_FORMAT ")" , |
| 344 | p2i(within_region), hr->hrm_index(), p2i(hr->bottom()), p2i(hr->end())); |
| 345 | CardIdx_t result = (CardIdx_t)(pointer_delta((HeapWord*)within_region, hr->bottom()) >> (CardTable::card_shift - LogHeapWordSize)); |
| 346 | return result; |
| 347 | } |
| 348 | |
| 349 | void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) { |
| 350 | // Note that this may be a continued H region. |
| 351 | HeapRegion* from_hr = _g1h->heap_region_containing(from); |
| 352 | RegionIdx_t from_hrm_ind = (RegionIdx_t) from_hr->hrm_index(); |
| 353 | |
| 354 | // If the region is already coarsened, return. |
| 355 | if (_coarse_map.at(from_hrm_ind)) { |
| 356 | assert(contains_reference(from), "We just found " PTR_FORMAT " in the Coarse table" , p2i(from)); |
| 357 | return; |
| 358 | } |
| 359 | |
| 360 | // Otherwise find a per-region table to add it to. |
| 361 | size_t ind = from_hrm_ind & _mod_max_fine_entries_mask; |
| 362 | PerRegionTable* prt = find_region_table(ind, from_hr); |
| 363 | if (prt == NULL) { |
| 364 | MutexLocker x(_m, Mutex::_no_safepoint_check_flag); |
| 365 | // Confirm that it's really not there... |
| 366 | prt = find_region_table(ind, from_hr); |
| 367 | if (prt == NULL) { |
| 368 | |
| 369 | CardIdx_t card_index = card_within_region(from, from_hr); |
| 370 | |
| 371 | if (_sparse_table.add_card(from_hrm_ind, card_index)) { |
| 372 | assert(contains_reference_locked(from), "We just added " PTR_FORMAT " to the Sparse table" , p2i(from)); |
| 373 | return; |
| 374 | } |
| 375 | |
| 376 | if (_n_fine_entries == _max_fine_entries) { |
| 377 | prt = delete_region_table(); |
| 378 | // There is no need to clear the links to the 'all' list here: |
| 379 | // prt will be reused immediately, i.e. remain in the 'all' list. |
| 380 | prt->init(from_hr, false /* clear_links_to_all_list */); |
| 381 | } else { |
| 382 | prt = PerRegionTable::alloc(from_hr); |
| 383 | link_to_all(prt); |
| 384 | } |
| 385 | |
| 386 | PerRegionTable* first_prt = _fine_grain_regions[ind]; |
| 387 | prt->set_collision_list_next(first_prt); |
| 388 | // The assignment into _fine_grain_regions allows the prt to |
| 389 | // start being used concurrently. In addition to |
| 390 | // collision_list_next which must be visible (else concurrent |
| 391 | // parsing of the list, if any, may fail to see other entries), |
| 392 | // the content of the prt must be visible (else for instance |
| 393 | // some mark bits may not yet seem cleared or a 'later' update |
| 394 | // performed by a concurrent thread could be undone when the |
| 395 | // zeroing becomes visible). This requires store ordering. |
| 396 | OrderAccess::release_store(&_fine_grain_regions[ind], prt); |
| 397 | _n_fine_entries++; |
| 398 | |
| 399 | // Transfer from sparse to fine-grain. |
| 400 | SparsePRTEntry *sprt_entry = _sparse_table.get_entry(from_hrm_ind); |
| 401 | assert(sprt_entry != NULL, "There should have been an entry" ); |
| 402 | for (int i = 0; i < sprt_entry->num_valid_cards(); i++) { |
| 403 | CardIdx_t c = sprt_entry->card(i); |
| 404 | prt->add_card(c); |
| 405 | } |
| 406 | // Now we can delete the sparse entry. |
| 407 | bool res = _sparse_table.delete_entry(from_hrm_ind); |
| 408 | assert(res, "It should have been there." ); |
| 409 | } |
| 410 | assert(prt != NULL && prt->hr() == from_hr, "consequence" ); |
| 411 | } |
| 412 | // Note that we can't assert "prt->hr() == from_hr", because of the |
| 413 | // possibility of concurrent reuse. But see head comment of |
| 414 | // OtherRegionsTable for why this is OK. |
| 415 | assert(prt != NULL, "Inv" ); |
| 416 | |
| 417 | prt->add_reference(from); |
| 418 | assert(contains_reference(from), "We just added " PTR_FORMAT " to the PRT (%d)" , p2i(from), prt->contains_reference(from)); |
| 419 | } |
| 420 | |
| 421 | PerRegionTable* |
| 422 | OtherRegionsTable::find_region_table(size_t ind, HeapRegion* hr) const { |
| 423 | assert(ind < _max_fine_entries, "Preconditions." ); |
| 424 | PerRegionTable* prt = _fine_grain_regions[ind]; |
| 425 | while (prt != NULL && prt->hr() != hr) { |
| 426 | prt = prt->collision_list_next(); |
| 427 | } |
| 428 | // Loop postcondition is the method postcondition. |
| 429 | return prt; |
| 430 | } |
| 431 | |
| 432 | jint OtherRegionsTable::_n_coarsenings = 0; |
| 433 | |
| 434 | PerRegionTable* OtherRegionsTable::delete_region_table() { |
| 435 | assert(_m->owned_by_self(), "Precondition" ); |
| 436 | assert(_n_fine_entries == _max_fine_entries, "Precondition" ); |
| 437 | PerRegionTable* max = NULL; |
| 438 | jint max_occ = 0; |
| 439 | PerRegionTable** max_prev = NULL; |
| 440 | size_t max_ind; |
| 441 | |
| 442 | size_t i = _fine_eviction_start; |
| 443 | for (size_t k = 0; k < _fine_eviction_sample_size; k++) { |
| 444 | size_t ii = i; |
| 445 | // Make sure we get a non-NULL sample. |
| 446 | while (_fine_grain_regions[ii] == NULL) { |
| 447 | ii++; |
| 448 | if (ii == _max_fine_entries) ii = 0; |
| 449 | guarantee(ii != i, "We must find one." ); |
| 450 | } |
| 451 | PerRegionTable** prev = &_fine_grain_regions[ii]; |
| 452 | PerRegionTable* cur = *prev; |
| 453 | while (cur != NULL) { |
| 454 | jint cur_occ = cur->occupied(); |
| 455 | if (max == NULL || cur_occ > max_occ) { |
| 456 | max = cur; |
| 457 | max_prev = prev; |
| 458 | max_ind = i; |
| 459 | max_occ = cur_occ; |
| 460 | } |
| 461 | prev = cur->collision_list_next_addr(); |
| 462 | cur = cur->collision_list_next(); |
| 463 | } |
| 464 | i = i + _fine_eviction_stride; |
| 465 | if (i >= _n_fine_entries) i = i - _n_fine_entries; |
| 466 | } |
| 467 | |
| 468 | _fine_eviction_start++; |
| 469 | |
| 470 | if (_fine_eviction_start >= _n_fine_entries) { |
| 471 | _fine_eviction_start -= _n_fine_entries; |
| 472 | } |
| 473 | |
| 474 | guarantee(max != NULL, "Since _n_fine_entries > 0" ); |
| 475 | guarantee(max_prev != NULL, "Since max != NULL." ); |
| 476 | |
| 477 | // Set the corresponding coarse bit. |
| 478 | size_t max_hrm_index = (size_t) max->hr()->hrm_index(); |
| 479 | if (!_coarse_map.at(max_hrm_index)) { |
| 480 | _coarse_map.at_put(max_hrm_index, true); |
| 481 | _n_coarse_entries++; |
| 482 | } |
| 483 | |
| 484 | // Unsplice. |
| 485 | *max_prev = max->collision_list_next(); |
| 486 | Atomic::inc(&_n_coarsenings); |
| 487 | _n_fine_entries--; |
| 488 | return max; |
| 489 | } |
| 490 | |
| 491 | bool OtherRegionsTable::occupancy_less_or_equal_than(size_t limit) const { |
| 492 | if (limit <= (size_t)G1RSetSparseRegionEntries) { |
| 493 | return occ_coarse() == 0 && _first_all_fine_prts == NULL && occ_sparse() <= limit; |
| 494 | } else { |
| 495 | // Current uses of this method may only use values less than G1RSetSparseRegionEntries |
| 496 | // for the limit. The solution, comparing against occupied() would be too slow |
| 497 | // at this time. |
| 498 | Unimplemented(); |
| 499 | return false; |
| 500 | } |
| 501 | } |
| 502 | |
| 503 | bool OtherRegionsTable::is_empty() const { |
| 504 | return occ_sparse() == 0 && occ_coarse() == 0 && _first_all_fine_prts == NULL; |
| 505 | } |
| 506 | |
| 507 | size_t OtherRegionsTable::occupied() const { |
| 508 | size_t sum = occ_fine(); |
| 509 | sum += occ_sparse(); |
| 510 | sum += occ_coarse(); |
| 511 | return sum; |
| 512 | } |
| 513 | |
| 514 | size_t OtherRegionsTable::occ_fine() const { |
| 515 | size_t sum = 0; |
| 516 | |
| 517 | size_t num = 0; |
| 518 | PerRegionTable * cur = _first_all_fine_prts; |
| 519 | while (cur != NULL) { |
| 520 | sum += cur->occupied(); |
| 521 | cur = cur->next(); |
| 522 | num++; |
| 523 | } |
| 524 | guarantee(num == _n_fine_entries, "just checking" ); |
| 525 | return sum; |
| 526 | } |
| 527 | |
| 528 | size_t OtherRegionsTable::occ_coarse() const { |
| 529 | return (_n_coarse_entries * HeapRegion::CardsPerRegion); |
| 530 | } |
| 531 | |
| 532 | size_t OtherRegionsTable::occ_sparse() const { |
| 533 | return _sparse_table.occupied(); |
| 534 | } |
| 535 | |
| 536 | size_t OtherRegionsTable::mem_size() const { |
| 537 | size_t sum = 0; |
| 538 | // all PRTs are of the same size so it is sufficient to query only one of them. |
| 539 | if (_first_all_fine_prts != NULL) { |
| 540 | assert(_last_all_fine_prts != NULL && |
| 541 | _first_all_fine_prts->mem_size() == _last_all_fine_prts->mem_size(), "check that mem_size() is constant" ); |
| 542 | sum += _first_all_fine_prts->mem_size() * _n_fine_entries; |
| 543 | } |
| 544 | sum += (sizeof(PerRegionTable*) * _max_fine_entries); |
| 545 | sum += (_coarse_map.size_in_words() * HeapWordSize); |
| 546 | sum += (_sparse_table.mem_size()); |
| 547 | sum += sizeof(OtherRegionsTable) - sizeof(_sparse_table); // Avoid double counting above. |
| 548 | return sum; |
| 549 | } |
| 550 | |
| 551 | size_t OtherRegionsTable::static_mem_size() { |
| 552 | return G1FromCardCache::static_mem_size(); |
| 553 | } |
| 554 | |
| 555 | size_t OtherRegionsTable::fl_mem_size() { |
| 556 | return PerRegionTable::fl_mem_size(); |
| 557 | } |
| 558 | |
| 559 | void OtherRegionsTable::clear() { |
| 560 | // if there are no entries, skip this step |
| 561 | if (_first_all_fine_prts != NULL) { |
| 562 | guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking" ); |
| 563 | PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts); |
| 564 | memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0])); |
| 565 | } else { |
| 566 | guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking" ); |
| 567 | } |
| 568 | |
| 569 | _first_all_fine_prts = _last_all_fine_prts = NULL; |
| 570 | _sparse_table.clear(); |
| 571 | if (_n_coarse_entries > 0) { |
| 572 | _coarse_map.clear(); |
| 573 | } |
| 574 | _n_fine_entries = 0; |
| 575 | _n_coarse_entries = 0; |
| 576 | } |
| 577 | |
| 578 | bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const { |
| 579 | // Cast away const in this case. |
| 580 | MutexLocker x((Mutex*)_m, Mutex::_no_safepoint_check_flag); |
| 581 | return contains_reference_locked(from); |
| 582 | } |
| 583 | |
| 584 | bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { |
| 585 | HeapRegion* hr = _g1h->heap_region_containing(from); |
| 586 | RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index(); |
| 587 | // Is this region in the coarse map? |
| 588 | if (_coarse_map.at(hr_ind)) return true; |
| 589 | |
| 590 | PerRegionTable* prt = find_region_table(hr_ind & _mod_max_fine_entries_mask, |
| 591 | hr); |
| 592 | if (prt != NULL) { |
| 593 | return prt->contains_reference(from); |
| 594 | |
| 595 | } else { |
| 596 | CardIdx_t card_index = card_within_region(from, hr); |
| 597 | return _sparse_table.contains_card(hr_ind, card_index); |
| 598 | } |
| 599 | } |
| 600 | |
| 601 | HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetTable* bot, |
| 602 | HeapRegion* hr) |
| 603 | : _bot(bot), |
| 604 | _code_roots(), |
| 605 | _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u" , hr->hrm_index()), true, Monitor::_safepoint_check_never), |
| 606 | _other_regions(&_m), |
| 607 | _hr(hr), |
| 608 | _state(Untracked) |
| 609 | { |
| 610 | } |
| 611 | |
| 612 | void HeapRegionRemSet::clear_fcc() { |
| 613 | G1FromCardCache::clear(_hr->hrm_index()); |
| 614 | } |
| 615 | |
| 616 | void HeapRegionRemSet::setup_remset_size() { |
| 617 | const int LOG_M = 20; |
| 618 | guarantee(HeapRegion::LogOfHRGrainBytes >= LOG_M, "Code assumes the region size >= 1M, but is " SIZE_FORMAT "B" , HeapRegion::GrainBytes); |
| 619 | |
| 620 | int region_size_log_mb = HeapRegion::LogOfHRGrainBytes - LOG_M; |
| 621 | if (FLAG_IS_DEFAULT(G1RSetSparseRegionEntries)) { |
| 622 | G1RSetSparseRegionEntries = G1RSetSparseRegionEntriesBase * ((size_t)1 << (region_size_log_mb + 1)); |
| 623 | } |
| 624 | if (FLAG_IS_DEFAULT(G1RSetRegionEntries)) { |
| 625 | G1RSetRegionEntries = G1RSetRegionEntriesBase * (region_size_log_mb + 1); |
| 626 | } |
| 627 | guarantee(G1RSetSparseRegionEntries > 0 && G1RSetRegionEntries > 0 , "Sanity" ); |
| 628 | } |
| 629 | |
| 630 | void HeapRegionRemSet::clear(bool only_cardset) { |
| 631 | MutexLocker x(&_m, Mutex::_no_safepoint_check_flag); |
| 632 | clear_locked(only_cardset); |
| 633 | } |
| 634 | |
| 635 | void HeapRegionRemSet::clear_locked(bool only_cardset) { |
| 636 | if (!only_cardset) { |
| 637 | _code_roots.clear(); |
| 638 | } |
| 639 | clear_fcc(); |
| 640 | _other_regions.clear(); |
| 641 | set_state_empty(); |
| 642 | assert(occupied_locked() == 0, "Should be clear." ); |
| 643 | } |
| 644 | |
| 645 | // Code roots support |
| 646 | // |
| 647 | // The code root set is protected by two separate locking schemes |
| 648 | // When at safepoint the per-hrrs lock must be held during modifications |
| 649 | // except when doing a full gc. |
| 650 | // When not at safepoint the CodeCache_lock must be held during modifications. |
| 651 | // When concurrent readers access the contains() function |
| 652 | // (during the evacuation phase) no removals are allowed. |
| 653 | |
| 654 | void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { |
| 655 | assert(nm != NULL, "sanity" ); |
| 656 | assert((!CodeCache_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint()), |
| 657 | "should call add_strong_code_root_locked instead. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s" , |
| 658 | BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint())); |
| 659 | // Optimistic unlocked contains-check |
| 660 | if (!_code_roots.contains(nm)) { |
| 661 | MutexLocker ml(&_m, Mutex::_no_safepoint_check_flag); |
| 662 | add_strong_code_root_locked(nm); |
| 663 | } |
| 664 | } |
| 665 | |
| 666 | void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) { |
| 667 | assert(nm != NULL, "sanity" ); |
| 668 | assert((CodeCache_lock->owned_by_self() || |
| 669 | (SafepointSynchronize::is_at_safepoint() && |
| 670 | (_m.owned_by_self() || Thread::current()->is_VM_thread()))), |
| 671 | "not safely locked. CodeCache_lock->owned_by_self(): %s, is_at_safepoint(): %s, _m.owned_by_self(): %s, Thread::current()->is_VM_thread(): %s" , |
| 672 | BOOL_TO_STR(CodeCache_lock->owned_by_self()), BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), |
| 673 | BOOL_TO_STR(_m.owned_by_self()), BOOL_TO_STR(Thread::current()->is_VM_thread())); |
| 674 | _code_roots.add(nm); |
| 675 | } |
| 676 | |
| 677 | void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { |
| 678 | assert(nm != NULL, "sanity" ); |
| 679 | assert_locked_or_safepoint(CodeCache_lock); |
| 680 | |
| 681 | MutexLocker ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag); |
| 682 | _code_roots.remove(nm); |
| 683 | |
| 684 | // Check that there were no duplicates |
| 685 | guarantee(!_code_roots.contains(nm), "duplicate entry found" ); |
| 686 | } |
| 687 | |
| 688 | void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { |
| 689 | _code_roots.nmethods_do(blk); |
| 690 | } |
| 691 | |
| 692 | void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) { |
| 693 | _code_roots.clean(hr); |
| 694 | } |
| 695 | |
| 696 | size_t HeapRegionRemSet::strong_code_roots_mem_size() { |
| 697 | return _code_roots.mem_size(); |
| 698 | } |
| 699 | |
| 700 | HeapRegionRemSetIterator:: HeapRegionRemSetIterator(HeapRegionRemSet* hrrs) : |
| 701 | _hrrs(hrrs), |
| 702 | _coarse_map(&hrrs->_other_regions._coarse_map), |
| 703 | _bot(hrrs->_bot), |
| 704 | _g1h(G1CollectedHeap::heap()), |
| 705 | _n_yielded_fine(0), |
| 706 | _n_yielded_coarse(0), |
| 707 | _n_yielded_sparse(0), |
| 708 | _is(Sparse), |
| 709 | _cur_region_card_offset(0), |
| 710 | // Set these values so that we increment to the first region. |
| 711 | _coarse_cur_region_index(-1), |
| 712 | _coarse_cur_region_cur_card(HeapRegion::CardsPerRegion-1), |
| 713 | _fine_cur_prt(NULL), |
| 714 | _cur_card_in_prt(HeapRegion::CardsPerRegion), |
| 715 | _sparse_iter(&hrrs->_other_regions._sparse_table) {} |
| 716 | |
| 717 | bool HeapRegionRemSetIterator::coarse_has_next(size_t& card_index) { |
| 718 | if (_hrrs->_other_regions._n_coarse_entries == 0) return false; |
| 719 | // Go to the next card. |
| 720 | _coarse_cur_region_cur_card++; |
| 721 | // Was the last the last card in the current region? |
| 722 | if (_coarse_cur_region_cur_card == HeapRegion::CardsPerRegion) { |
| 723 | // Yes: find the next region. This may leave _coarse_cur_region_index |
| 724 | // Set to the last index, in which case there are no more coarse |
| 725 | // regions. |
| 726 | _coarse_cur_region_index = |
| 727 | (int) _coarse_map->get_next_one_offset(_coarse_cur_region_index + 1); |
| 728 | if ((size_t)_coarse_cur_region_index < _coarse_map->size()) { |
| 729 | _coarse_cur_region_cur_card = 0; |
| 730 | HeapWord* r_bot = |
| 731 | _g1h->region_at((uint) _coarse_cur_region_index)->bottom(); |
| 732 | _cur_region_card_offset = _bot->index_for_raw(r_bot); |
| 733 | } else { |
| 734 | return false; |
| 735 | } |
| 736 | } |
| 737 | // If we didn't return false above, then we can yield a card. |
| 738 | card_index = _cur_region_card_offset + _coarse_cur_region_cur_card; |
| 739 | return true; |
| 740 | } |
| 741 | |
| 742 | bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) { |
| 743 | if (fine_has_next()) { |
| 744 | _cur_card_in_prt = |
| 745 | _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); |
| 746 | } |
| 747 | if (_cur_card_in_prt == HeapRegion::CardsPerRegion) { |
| 748 | // _fine_cur_prt may still be NULL in case if there are not PRTs at all for |
| 749 | // the remembered set. |
| 750 | if (_fine_cur_prt == NULL || _fine_cur_prt->next() == NULL) { |
| 751 | return false; |
| 752 | } |
| 753 | PerRegionTable* next_prt = _fine_cur_prt->next(); |
| 754 | switch_to_prt(next_prt); |
| 755 | _cur_card_in_prt = _fine_cur_prt->_bm.get_next_one_offset(_cur_card_in_prt + 1); |
| 756 | } |
| 757 | |
| 758 | card_index = _cur_region_card_offset + _cur_card_in_prt; |
| 759 | guarantee(_cur_card_in_prt < HeapRegion::CardsPerRegion, |
| 760 | "Card index " SIZE_FORMAT " must be within the region" , _cur_card_in_prt); |
| 761 | return true; |
| 762 | } |
| 763 | |
| 764 | bool HeapRegionRemSetIterator::fine_has_next() { |
| 765 | return _cur_card_in_prt != HeapRegion::CardsPerRegion; |
| 766 | } |
| 767 | |
| 768 | void HeapRegionRemSetIterator::switch_to_prt(PerRegionTable* prt) { |
| 769 | assert(prt != NULL, "Cannot switch to NULL prt" ); |
| 770 | _fine_cur_prt = prt; |
| 771 | |
| 772 | HeapWord* r_bot = _fine_cur_prt->hr()->bottom(); |
| 773 | _cur_region_card_offset = _bot->index_for_raw(r_bot); |
| 774 | |
| 775 | // The bitmap scan for the PRT always scans from _cur_region_cur_card + 1. |
| 776 | // To avoid special-casing this start case, and not miss the first bitmap |
| 777 | // entry, initialize _cur_region_cur_card with -1 instead of 0. |
| 778 | _cur_card_in_prt = (size_t)-1; |
| 779 | } |
| 780 | |
| 781 | bool HeapRegionRemSetIterator::has_next(size_t& card_index) { |
| 782 | switch (_is) { |
| 783 | case Sparse: { |
| 784 | if (_sparse_iter.has_next(card_index)) { |
| 785 | _n_yielded_sparse++; |
| 786 | return true; |
| 787 | } |
| 788 | // Otherwise, deliberate fall-through |
| 789 | _is = Fine; |
| 790 | PerRegionTable* initial_fine_prt = _hrrs->_other_regions._first_all_fine_prts; |
| 791 | if (initial_fine_prt != NULL) { |
| 792 | switch_to_prt(_hrrs->_other_regions._first_all_fine_prts); |
| 793 | } |
| 794 | } |
| 795 | case Fine: |
| 796 | if (fine_has_next(card_index)) { |
| 797 | _n_yielded_fine++; |
| 798 | return true; |
| 799 | } |
| 800 | // Otherwise, deliberate fall-through |
| 801 | _is = Coarse; |
| 802 | case Coarse: |
| 803 | if (coarse_has_next(card_index)) { |
| 804 | _n_yielded_coarse++; |
| 805 | return true; |
| 806 | } |
| 807 | // Otherwise... |
| 808 | break; |
| 809 | } |
| 810 | return false; |
| 811 | } |
| 812 | |
| 813 | #ifndef PRODUCT |
| 814 | void HeapRegionRemSet::test() { |
| 815 | os::sleep(Thread::current(), (jlong)5000, false); |
| 816 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
| 817 | |
| 818 | // Run with "-XX:G1LogRSetRegionEntries=2", so that 1 and 5 end up in same |
| 819 | // hash bucket. |
| 820 | HeapRegion* hr0 = g1h->region_at(0); |
| 821 | HeapRegion* hr1 = g1h->region_at(1); |
| 822 | HeapRegion* hr2 = g1h->region_at(5); |
| 823 | HeapRegion* hr3 = g1h->region_at(6); |
| 824 | HeapRegion* hr4 = g1h->region_at(7); |
| 825 | HeapRegion* hr5 = g1h->region_at(8); |
| 826 | |
| 827 | HeapWord* hr1_start = hr1->bottom(); |
| 828 | HeapWord* hr1_mid = hr1_start + HeapRegion::GrainWords/2; |
| 829 | HeapWord* hr1_last = hr1->end() - 1; |
| 830 | |
| 831 | HeapWord* hr2_start = hr2->bottom(); |
| 832 | HeapWord* hr2_mid = hr2_start + HeapRegion::GrainWords/2; |
| 833 | HeapWord* hr2_last = hr2->end() - 1; |
| 834 | |
| 835 | HeapWord* hr3_start = hr3->bottom(); |
| 836 | HeapWord* hr3_mid = hr3_start + HeapRegion::GrainWords/2; |
| 837 | HeapWord* hr3_last = hr3->end() - 1; |
| 838 | |
| 839 | HeapRegionRemSet* hrrs = hr0->rem_set(); |
| 840 | |
| 841 | // Make three references from region 0x101... |
| 842 | hrrs->add_reference((OopOrNarrowOopStar)hr1_start); |
| 843 | hrrs->add_reference((OopOrNarrowOopStar)hr1_mid); |
| 844 | hrrs->add_reference((OopOrNarrowOopStar)hr1_last); |
| 845 | |
| 846 | hrrs->add_reference((OopOrNarrowOopStar)hr2_start); |
| 847 | hrrs->add_reference((OopOrNarrowOopStar)hr2_mid); |
| 848 | hrrs->add_reference((OopOrNarrowOopStar)hr2_last); |
| 849 | |
| 850 | hrrs->add_reference((OopOrNarrowOopStar)hr3_start); |
| 851 | hrrs->add_reference((OopOrNarrowOopStar)hr3_mid); |
| 852 | hrrs->add_reference((OopOrNarrowOopStar)hr3_last); |
| 853 | |
| 854 | // Now cause a coarsening. |
| 855 | hrrs->add_reference((OopOrNarrowOopStar)hr4->bottom()); |
| 856 | hrrs->add_reference((OopOrNarrowOopStar)hr5->bottom()); |
| 857 | |
| 858 | // Now, does iteration yield these three? |
| 859 | HeapRegionRemSetIterator iter(hrrs); |
| 860 | size_t sum = 0; |
| 861 | size_t card_index; |
| 862 | while (iter.has_next(card_index)) { |
| 863 | HeapWord* card_start = g1h->bot()->address_for_index(card_index); |
| 864 | tty->print_cr(" Card " PTR_FORMAT "." , p2i(card_start)); |
| 865 | sum++; |
| 866 | } |
| 867 | guarantee(sum == 11 - 3 + 2048, "Failure" ); |
| 868 | guarantee(sum == hrrs->occupied(), "Failure" ); |
| 869 | } |
| 870 | #endif |
| 871 | |