| 1 | /* |
| 2 | * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "gc/cms/cmsCardTable.hpp" |
| 27 | #include "gc/cms/cmsHeap.hpp" |
| 28 | #include "gc/shared/cardTableBarrierSet.hpp" |
| 29 | #include "gc/shared/cardTableRS.hpp" |
| 30 | #include "gc/shared/collectedHeap.hpp" |
| 31 | #include "gc/shared/space.inline.hpp" |
| 32 | #include "memory/allocation.inline.hpp" |
| 33 | #include "memory/virtualspace.hpp" |
| 34 | #include "oops/oop.inline.hpp" |
| 35 | #include "runtime/java.hpp" |
| 36 | #include "runtime/mutexLocker.hpp" |
| 37 | #include "runtime/orderAccess.hpp" |
| 38 | #include "runtime/vmThread.hpp" |
| 39 | |
| 40 | CMSCardTable::CMSCardTable(MemRegion whole_heap) : |
| 41 | CardTableRS(whole_heap, CMSPrecleaningEnabled /* scanned_concurrently */) { |
| 42 | } |
| 43 | |
| 44 | // Returns the number of chunks necessary to cover "mr". |
| 45 | size_t CMSCardTable::chunks_to_cover(MemRegion mr) { |
| 46 | return (size_t)(addr_to_chunk_index(mr.last()) - |
| 47 | addr_to_chunk_index(mr.start()) + 1); |
| 48 | } |
| 49 | |
| 50 | // Returns the index of the chunk in a stride which |
| 51 | // covers the given address. |
| 52 | uintptr_t CMSCardTable::addr_to_chunk_index(const void* addr) { |
| 53 | uintptr_t card = (uintptr_t) byte_for(addr); |
| 54 | return card / ParGCCardsPerStrideChunk; |
| 55 | } |
| 56 | |
| 57 | void CMSCardTable:: |
| 58 | non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr, |
| 59 | OopsInGenClosure* cl, |
| 60 | CardTableRS* ct, |
| 61 | uint n_threads) { |
| 62 | assert(n_threads > 0, "expected n_threads > 0" ); |
| 63 | assert(n_threads <= ParallelGCThreads, |
| 64 | "n_threads: %u > ParallelGCThreads: %u" , n_threads, ParallelGCThreads); |
| 65 | |
| 66 | // Make sure the LNC array is valid for the space. |
| 67 | CardValue** lowest_non_clean; |
| 68 | uintptr_t lowest_non_clean_base_chunk_index; |
| 69 | size_t lowest_non_clean_chunk_size; |
| 70 | get_LNC_array_for_space(sp, lowest_non_clean, |
| 71 | lowest_non_clean_base_chunk_index, |
| 72 | lowest_non_clean_chunk_size); |
| 73 | |
| 74 | uint n_strides = n_threads * ParGCStridesPerThread; |
| 75 | SequentialSubTasksDone* pst = sp->par_seq_tasks(); |
| 76 | // Sets the condition for completion of the subtask (how many threads |
| 77 | // need to finish in order to be done). |
| 78 | pst->set_n_threads(n_threads); |
| 79 | pst->set_n_tasks(n_strides); |
| 80 | |
| 81 | uint stride = 0; |
| 82 | while (pst->try_claim_task(/* reference */ stride)) { |
| 83 | process_stride(sp, mr, stride, n_strides, |
| 84 | cl, ct, |
| 85 | lowest_non_clean, |
| 86 | lowest_non_clean_base_chunk_index, |
| 87 | lowest_non_clean_chunk_size); |
| 88 | } |
| 89 | if (pst->all_tasks_completed()) { |
| 90 | // Clear lowest_non_clean array for next time. |
| 91 | intptr_t first_chunk_index = addr_to_chunk_index(mr.start()); |
| 92 | uintptr_t last_chunk_index = addr_to_chunk_index(mr.last()); |
| 93 | for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) { |
| 94 | intptr_t ind = ch - lowest_non_clean_base_chunk_index; |
| 95 | assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size, |
| 96 | "Bounds error" ); |
| 97 | lowest_non_clean[ind] = NULL; |
| 98 | } |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | void |
| 103 | CMSCardTable:: |
| 104 | process_stride(Space* sp, |
| 105 | MemRegion used, |
| 106 | jint stride, int n_strides, |
| 107 | OopsInGenClosure* cl, |
| 108 | CardTableRS* ct, |
| 109 | CardValue** lowest_non_clean, |
| 110 | uintptr_t lowest_non_clean_base_chunk_index, |
| 111 | size_t lowest_non_clean_chunk_size) { |
| 112 | // We go from higher to lower addresses here; it wouldn't help that much |
| 113 | // because of the strided parallelism pattern used here. |
| 114 | |
| 115 | // Find the first card address of the first chunk in the stride that is |
| 116 | // at least "bottom" of the used region. |
| 117 | CardValue* start_card = byte_for(used.start()); |
| 118 | CardValue* end_card = byte_after(used.last()); |
| 119 | uintptr_t start_chunk = addr_to_chunk_index(used.start()); |
| 120 | uintptr_t start_chunk_stride_num = start_chunk % n_strides; |
| 121 | CardValue* chunk_card_start; |
| 122 | |
| 123 | if ((uintptr_t)stride >= start_chunk_stride_num) { |
| 124 | chunk_card_start = (start_card + |
| 125 | (stride - start_chunk_stride_num) * ParGCCardsPerStrideChunk); |
| 126 | } else { |
| 127 | // Go ahead to the next chunk group boundary, then to the requested stride. |
| 128 | chunk_card_start = (start_card + |
| 129 | (n_strides - start_chunk_stride_num + stride) * ParGCCardsPerStrideChunk); |
| 130 | } |
| 131 | |
| 132 | while (chunk_card_start < end_card) { |
| 133 | // Even though we go from lower to higher addresses below, the |
| 134 | // strided parallelism can interleave the actual processing of the |
| 135 | // dirty pages in various ways. For a specific chunk within this |
| 136 | // stride, we take care to avoid double scanning or missing a card |
| 137 | // by suitably initializing the "min_done" field in process_chunk_boundaries() |
| 138 | // below, together with the dirty region extension accomplished in |
| 139 | // DirtyCardToOopClosure::do_MemRegion(). |
| 140 | CardValue* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk; |
| 141 | // Invariant: chunk_mr should be fully contained within the "used" region. |
| 142 | MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start), |
| 143 | chunk_card_end >= end_card ? |
| 144 | used.end() : addr_for(chunk_card_end)); |
| 145 | assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)" ); |
| 146 | assert(used.contains(chunk_mr), "chunk_mr should be subset of used" ); |
| 147 | |
| 148 | // This function is used by the parallel card table iteration. |
| 149 | const bool parallel = true; |
| 150 | |
| 151 | DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(), |
| 152 | cl->gen_boundary(), |
| 153 | parallel); |
| 154 | ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel); |
| 155 | |
| 156 | |
| 157 | // Process the chunk. |
| 158 | process_chunk_boundaries(sp, |
| 159 | dcto_cl, |
| 160 | chunk_mr, |
| 161 | used, |
| 162 | lowest_non_clean, |
| 163 | lowest_non_clean_base_chunk_index, |
| 164 | lowest_non_clean_chunk_size); |
| 165 | |
| 166 | // We want the LNC array updates above in process_chunk_boundaries |
| 167 | // to be visible before any of the card table value changes as a |
| 168 | // result of the dirty card iteration below. |
| 169 | OrderAccess::storestore(); |
| 170 | |
| 171 | // We want to clear the cards: clear_cl here does the work of finding |
| 172 | // contiguous dirty ranges of cards to process and clear. |
| 173 | clear_cl.do_MemRegion(chunk_mr); |
| 174 | |
| 175 | // Find the next chunk of the stride. |
| 176 | chunk_card_start += ParGCCardsPerStrideChunk * n_strides; |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | void |
| 181 | CMSCardTable:: |
| 182 | process_chunk_boundaries(Space* sp, |
| 183 | DirtyCardToOopClosure* dcto_cl, |
| 184 | MemRegion chunk_mr, |
| 185 | MemRegion used, |
| 186 | CardValue** lowest_non_clean, |
| 187 | uintptr_t lowest_non_clean_base_chunk_index, |
| 188 | size_t lowest_non_clean_chunk_size) |
| 189 | { |
| 190 | // We must worry about non-array objects that cross chunk boundaries, |
| 191 | // because such objects are both precisely and imprecisely marked: |
| 192 | // .. if the head of such an object is dirty, the entire object |
| 193 | // needs to be scanned, under the interpretation that this |
| 194 | // was an imprecise mark |
| 195 | // .. if the head of such an object is not dirty, we can assume |
| 196 | // precise marking and it's efficient to scan just the dirty |
| 197 | // cards. |
| 198 | // In either case, each scanned reference must be scanned precisely |
| 199 | // once so as to avoid cloning of a young referent. For efficiency, |
| 200 | // our closures depend on this property and do not protect against |
| 201 | // double scans. |
| 202 | |
| 203 | uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start()); |
| 204 | assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error." ); |
| 205 | uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index; |
| 206 | |
| 207 | // First, set "our" lowest_non_clean entry, which would be |
| 208 | // used by the thread scanning an adjoining left chunk with |
| 209 | // a non-array object straddling the mutual boundary. |
| 210 | // Find the object that spans our boundary, if one exists. |
| 211 | // first_block is the block possibly straddling our left boundary. |
| 212 | HeapWord* first_block = sp->block_start(chunk_mr.start()); |
| 213 | assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()), |
| 214 | "First chunk should always have a co-initial block" ); |
| 215 | // Does the block straddle the chunk's left boundary, and is it |
| 216 | // a non-array object? |
| 217 | if (first_block < chunk_mr.start() // first block straddles left bdry |
| 218 | && sp->block_is_obj(first_block) // first block is an object |
| 219 | && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied) |
| 220 | || oop(first_block)->is_typeArray())) { |
| 221 | // Find our least non-clean card, so that a left neighbor |
| 222 | // does not scan an object straddling the mutual boundary |
| 223 | // too far to the right, and attempt to scan a portion of |
| 224 | // that object twice. |
| 225 | CardValue* first_dirty_card = NULL; |
| 226 | CardValue* last_card_of_first_obj = |
| 227 | byte_for(first_block + sp->block_size(first_block) - 1); |
| 228 | CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start()); |
| 229 | CardValue* last_card_of_cur_chunk = byte_for(chunk_mr.last()); |
| 230 | CardValue* last_card_to_check = MIN2(last_card_of_cur_chunk, last_card_of_first_obj); |
| 231 | // Note that this does not need to go beyond our last card |
| 232 | // if our first object completely straddles this chunk. |
| 233 | for (CardValue* cur = first_card_of_cur_chunk; |
| 234 | cur <= last_card_to_check; cur++) { |
| 235 | CardValue val = *cur; |
| 236 | if (card_will_be_scanned(val)) { |
| 237 | first_dirty_card = cur; |
| 238 | break; |
| 239 | } else { |
| 240 | assert(!card_may_have_been_dirty(val), "Error" ); |
| 241 | } |
| 242 | } |
| 243 | if (first_dirty_card != NULL) { |
| 244 | assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error." ); |
| 245 | assert(lowest_non_clean[cur_chunk_index] == NULL, |
| 246 | "Write exactly once : value should be stable hereafter for this round" ); |
| 247 | lowest_non_clean[cur_chunk_index] = first_dirty_card; |
| 248 | } |
| 249 | } else { |
| 250 | // In this case we can help our neighbor by just asking them |
| 251 | // to stop at our first card (even though it may not be dirty). |
| 252 | assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter" ); |
| 253 | CardValue* first_card_of_cur_chunk = byte_for(chunk_mr.start()); |
| 254 | lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk; |
| 255 | } |
| 256 | |
| 257 | // Next, set our own max_to_do, which will strictly/exclusively bound |
| 258 | // the highest address that we will scan past the right end of our chunk. |
| 259 | HeapWord* max_to_do = NULL; |
| 260 | if (chunk_mr.end() < used.end()) { |
| 261 | // This is not the last chunk in the used region. |
| 262 | // What is our last block? We check the first block of |
| 263 | // the next (right) chunk rather than strictly check our last block |
| 264 | // because it's potentially more efficient to do so. |
| 265 | HeapWord* const last_block = sp->block_start(chunk_mr.end()); |
| 266 | assert(last_block <= chunk_mr.end(), "In case this property changes." ); |
| 267 | if ((last_block == chunk_mr.end()) // our last block does not straddle boundary |
| 268 | || !sp->block_is_obj(last_block) // last_block isn't an object |
| 269 | || oop(last_block)->is_objArray() // last_block is an array (precisely marked) |
| 270 | || oop(last_block)->is_typeArray()) { |
| 271 | max_to_do = chunk_mr.end(); |
| 272 | } else { |
| 273 | assert(last_block < chunk_mr.end(), "Tautology" ); |
| 274 | // It is a non-array object that straddles the right boundary of this chunk. |
| 275 | // last_obj_card is the card corresponding to the start of the last object |
| 276 | // in the chunk. Note that the last object may not start in |
| 277 | // the chunk. |
| 278 | CardValue* const last_obj_card = byte_for(last_block); |
| 279 | const CardValue val = *last_obj_card; |
| 280 | if (!card_will_be_scanned(val)) { |
| 281 | assert(!card_may_have_been_dirty(val), "Error" ); |
| 282 | // The card containing the head is not dirty. Any marks on |
| 283 | // subsequent cards still in this chunk must have been made |
| 284 | // precisely; we can cap processing at the end of our chunk. |
| 285 | max_to_do = chunk_mr.end(); |
| 286 | } else { |
| 287 | // The last object must be considered dirty, and extends onto the |
| 288 | // following chunk. Look for a dirty card in that chunk that will |
| 289 | // bound our processing. |
| 290 | CardValue* limit_card = NULL; |
| 291 | const size_t last_block_size = sp->block_size(last_block); |
| 292 | CardValue* const last_card_of_last_obj = |
| 293 | byte_for(last_block + last_block_size - 1); |
| 294 | CardValue* const first_card_of_next_chunk = byte_for(chunk_mr.end()); |
| 295 | // This search potentially goes a long distance looking |
| 296 | // for the next card that will be scanned, terminating |
| 297 | // at the end of the last_block, if no earlier dirty card |
| 298 | // is found. |
| 299 | assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk, |
| 300 | "last card of next chunk may be wrong" ); |
| 301 | for (CardValue* cur = first_card_of_next_chunk; |
| 302 | cur <= last_card_of_last_obj; cur++) { |
| 303 | const CardValue val = *cur; |
| 304 | if (card_will_be_scanned(val)) { |
| 305 | limit_card = cur; break; |
| 306 | } else { |
| 307 | assert(!card_may_have_been_dirty(val), "Error: card can't be skipped" ); |
| 308 | } |
| 309 | } |
| 310 | if (limit_card != NULL) { |
| 311 | max_to_do = addr_for(limit_card); |
| 312 | assert(limit_card != NULL && max_to_do != NULL, "Error" ); |
| 313 | } else { |
| 314 | // The following is a pessimistic value, because it's possible |
| 315 | // that a dirty card on a subsequent chunk has been cleared by |
| 316 | // the time we get to look at it; we'll correct for that further below, |
| 317 | // using the LNC array which records the least non-clean card |
| 318 | // before cards were cleared in a particular chunk. |
| 319 | limit_card = last_card_of_last_obj; |
| 320 | max_to_do = last_block + last_block_size; |
| 321 | assert(limit_card != NULL && max_to_do != NULL, "Error" ); |
| 322 | } |
| 323 | assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size, |
| 324 | "Bounds error." ); |
| 325 | // It is possible that a dirty card for the last object may have been |
| 326 | // cleared before we had a chance to examine it. In that case, the value |
| 327 | // will have been logged in the LNC for that chunk. |
| 328 | // We need to examine as many chunks to the right as this object |
| 329 | // covers. However, we need to bound this checking to the largest |
| 330 | // entry in the LNC array: this is because the heap may expand |
| 331 | // after the LNC array has been created but before we reach this point, |
| 332 | // and the last block in our chunk may have been expanded to include |
| 333 | // the expansion delta (and possibly subsequently allocated from, so |
| 334 | // it wouldn't be sufficient to check whether that last block was |
| 335 | // or was not an object at this point). |
| 336 | uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1) |
| 337 | - lowest_non_clean_base_chunk_index; |
| 338 | const uintptr_t last_chunk_index = addr_to_chunk_index(used.last()) |
| 339 | - lowest_non_clean_base_chunk_index; |
| 340 | if (last_chunk_index_to_check > last_chunk_index) { |
| 341 | assert(last_block + last_block_size > used.end(), |
| 342 | "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]" |
| 343 | " does not exceed used.end() = " PTR_FORMAT "," |
| 344 | " yet last_chunk_index_to_check " INTPTR_FORMAT |
| 345 | " exceeds last_chunk_index " INTPTR_FORMAT, |
| 346 | p2i(last_block), p2i(last_block + last_block_size), |
| 347 | p2i(used.end()), |
| 348 | last_chunk_index_to_check, last_chunk_index); |
| 349 | assert(sp->used_region().end() > used.end(), |
| 350 | "Expansion did not happen: " |
| 351 | "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")" , |
| 352 | p2i(sp->used_region().start()), p2i(sp->used_region().end()), |
| 353 | p2i(used.start()), p2i(used.end())); |
| 354 | last_chunk_index_to_check = last_chunk_index; |
| 355 | } |
| 356 | for (uintptr_t lnc_index = cur_chunk_index + 1; |
| 357 | lnc_index <= last_chunk_index_to_check; |
| 358 | lnc_index++) { |
| 359 | CardValue* lnc_card = lowest_non_clean[lnc_index]; |
| 360 | if (lnc_card != NULL) { |
| 361 | // we can stop at the first non-NULL entry we find |
| 362 | if (lnc_card <= limit_card) { |
| 363 | limit_card = lnc_card; |
| 364 | max_to_do = addr_for(limit_card); |
| 365 | assert(limit_card != NULL && max_to_do != NULL, "Error" ); |
| 366 | } |
| 367 | // In any case, we break now |
| 368 | break; |
| 369 | } // else continue to look for a non-NULL entry if any |
| 370 | } |
| 371 | assert(limit_card != NULL && max_to_do != NULL, "Error" ); |
| 372 | } |
| 373 | assert(max_to_do != NULL, "OOPS 1 !" ); |
| 374 | } |
| 375 | assert(max_to_do != NULL, "OOPS 2!" ); |
| 376 | } else { |
| 377 | max_to_do = used.end(); |
| 378 | } |
| 379 | assert(max_to_do != NULL, "OOPS 3!" ); |
| 380 | // Now we can set the closure we're using so it doesn't to beyond |
| 381 | // max_to_do. |
| 382 | dcto_cl->set_min_done(max_to_do); |
| 383 | #ifndef PRODUCT |
| 384 | dcto_cl->set_last_bottom(max_to_do); |
| 385 | #endif |
| 386 | } |
| 387 | |
| 388 | void |
| 389 | CMSCardTable:: |
| 390 | get_LNC_array_for_space(Space* sp, |
| 391 | CardValue**& lowest_non_clean, |
| 392 | uintptr_t& lowest_non_clean_base_chunk_index, |
| 393 | size_t& lowest_non_clean_chunk_size) { |
| 394 | |
| 395 | int i = find_covering_region_containing(sp->bottom()); |
| 396 | MemRegion covered = _covered[i]; |
| 397 | size_t n_chunks = chunks_to_cover(covered); |
| 398 | |
| 399 | // Only the first thread to obtain the lock will resize the |
| 400 | // LNC array for the covered region. Any later expansion can't affect |
| 401 | // the used_at_save_marks region. |
| 402 | // (I observed a bug in which the first thread to execute this would |
| 403 | // resize, and then it would cause "expand_and_allocate" that would |
| 404 | // increase the number of chunks in the covered region. Then a second |
| 405 | // thread would come and execute this, see that the size didn't match, |
| 406 | // and free and allocate again. So the first thread would be using a |
| 407 | // freed "_lowest_non_clean" array.) |
| 408 | |
| 409 | // Do a dirty read here. If we pass the conditional then take the rare |
| 410 | // event lock and do the read again in case some other thread had already |
| 411 | // succeeded and done the resize. |
| 412 | int cur_collection = CMSHeap::heap()->total_collections(); |
| 413 | // Updated _last_LNC_resizing_collection[i] must not be visible before |
| 414 | // _lowest_non_clean and friends are visible. Therefore use acquire/release |
| 415 | // to guarantee this on non TSO architecures. |
| 416 | if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { |
| 417 | MutexLocker x(ParGCRareEvent_lock); |
| 418 | // This load_acquire is here for clarity only. The MutexLocker already fences. |
| 419 | if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { |
| 420 | if (_lowest_non_clean[i] == NULL || |
| 421 | n_chunks != _lowest_non_clean_chunk_size[i]) { |
| 422 | |
| 423 | // Should we delete the old? |
| 424 | if (_lowest_non_clean[i] != NULL) { |
| 425 | assert(n_chunks != _lowest_non_clean_chunk_size[i], |
| 426 | "logical consequence" ); |
| 427 | FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]); |
| 428 | _lowest_non_clean[i] = NULL; |
| 429 | } |
| 430 | // Now allocate a new one if necessary. |
| 431 | if (_lowest_non_clean[i] == NULL) { |
| 432 | _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC); |
| 433 | _lowest_non_clean_chunk_size[i] = n_chunks; |
| 434 | _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start()); |
| 435 | for (int j = 0; j < (int)n_chunks; j++) |
| 436 | _lowest_non_clean[i][j] = NULL; |
| 437 | } |
| 438 | } |
| 439 | // Make sure this gets visible only after _lowest_non_clean* was initialized |
| 440 | OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection); |
| 441 | } |
| 442 | } |
| 443 | // In any case, now do the initialization. |
| 444 | lowest_non_clean = _lowest_non_clean[i]; |
| 445 | lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i]; |
| 446 | lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i]; |
| 447 | } |
| 448 | |
| 449 | #ifdef ASSERT |
| 450 | void CMSCardTable::verify_used_region_at_save_marks(Space* sp) const { |
| 451 | MemRegion ur = sp->used_region(); |
| 452 | MemRegion urasm = sp->used_region_at_save_marks(); |
| 453 | |
| 454 | if (!ur.contains(urasm)) { |
| 455 | log_warning(gc)("CMS+ParNew: Did you forget to call save_marks()? " |
| 456 | "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in " |
| 457 | "[" PTR_FORMAT ", " PTR_FORMAT ")" , |
| 458 | p2i(urasm.start()), p2i(urasm.end()), p2i(ur.start()), p2i(ur.end())); |
| 459 | MemRegion ur2 = sp->used_region(); |
| 460 | MemRegion urasm2 = sp->used_region_at_save_marks(); |
| 461 | if (!ur.equals(ur2)) { |
| 462 | log_warning(gc)("CMS+ParNew: Flickering used_region()!!" ); |
| 463 | } |
| 464 | if (!urasm.equals(urasm2)) { |
| 465 | log_warning(gc)("CMS+ParNew: Flickering used_region_at_save_marks()!!" ); |
| 466 | } |
| 467 | ShouldNotReachHere(); |
| 468 | } |
| 469 | } |
| 470 | #endif // ASSERT |
| 471 | |