| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "gc/cms/cmsHeap.hpp" |
| 27 | #include "gc/cms/cmsLockVerifier.hpp" |
| 28 | #include "gc/cms/compactibleFreeListSpace.hpp" |
| 29 | #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp" |
| 30 | #include "gc/cms/concurrentMarkSweepThread.hpp" |
| 31 | #include "gc/shared/blockOffsetTable.inline.hpp" |
| 32 | #include "gc/shared/collectedHeap.inline.hpp" |
| 33 | #include "gc/shared/genOopClosures.inline.hpp" |
| 34 | #include "gc/shared/space.inline.hpp" |
| 35 | #include "gc/shared/spaceDecorator.hpp" |
| 36 | #include "logging/log.hpp" |
| 37 | #include "logging/logStream.hpp" |
| 38 | #include "memory/allocation.inline.hpp" |
| 39 | #include "memory/binaryTreeDictionary.inline.hpp" |
| 40 | #include "memory/iterator.inline.hpp" |
| 41 | #include "memory/resourceArea.hpp" |
| 42 | #include "memory/universe.hpp" |
| 43 | #include "oops/access.inline.hpp" |
| 44 | #include "oops/compressedOops.inline.hpp" |
| 45 | #include "oops/oop.inline.hpp" |
| 46 | #include "runtime/globals.hpp" |
| 47 | #include "runtime/handles.inline.hpp" |
| 48 | #include "runtime/init.hpp" |
| 49 | #include "runtime/java.hpp" |
| 50 | #include "runtime/orderAccess.hpp" |
| 51 | #include "runtime/vmThread.hpp" |
| 52 | #include "utilities/align.hpp" |
| 53 | #include "utilities/copy.hpp" |
| 54 | |
| 55 | // Specialize for AdaptiveFreeList which tries to avoid |
| 56 | // splitting a chunk of a size that is under populated in favor of |
| 57 | // an over populated size. The general get_better_list() just returns |
| 58 | // the current list. |
| 59 | template <> |
| 60 | TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* |
| 61 | TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list( |
| 62 | BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) { |
| 63 | // A candidate chunk has been found. If it is already under |
| 64 | // populated, get a chunk associated with the hint for this |
| 65 | // chunk. |
| 66 | |
| 67 | TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this; |
| 68 | if (curTL->surplus() <= 0) { |
| 69 | /* Use the hint to find a size with a surplus, and reset the hint. */ |
| 70 | TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this; |
| 71 | while (hintTL->hint() != 0) { |
| 72 | assert(hintTL->hint() > hintTL->size(), |
| 73 | "hint points in the wrong direction" ); |
| 74 | hintTL = dictionary->find_list(hintTL->hint()); |
| 75 | assert(curTL != hintTL, "Infinite loop" ); |
| 76 | if (hintTL == NULL || |
| 77 | hintTL == curTL /* Should not happen but protect against it */ ) { |
| 78 | // No useful hint. Set the hint to NULL and go on. |
| 79 | curTL->set_hint(0); |
| 80 | break; |
| 81 | } |
| 82 | assert(hintTL->size() > curTL->size(), "hint is inconsistent" ); |
| 83 | if (hintTL->surplus() > 0) { |
| 84 | // The hint led to a list that has a surplus. Use it. |
| 85 | // Set the hint for the candidate to an overpopulated |
| 86 | // size. |
| 87 | curTL->set_hint(hintTL->size()); |
| 88 | // Change the candidate. |
| 89 | curTL = hintTL; |
| 90 | break; |
| 91 | } |
| 92 | } |
| 93 | } |
| 94 | return curTL; |
| 95 | } |
| 96 | |
| 97 | void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) { |
| 98 | TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size); |
| 99 | if (nd) { |
| 100 | if (split) { |
| 101 | if (birth) { |
| 102 | nd->increment_split_births(); |
| 103 | nd->increment_surplus(); |
| 104 | } else { |
| 105 | nd->increment_split_deaths(); |
| 106 | nd->decrement_surplus(); |
| 107 | } |
| 108 | } else { |
| 109 | if (birth) { |
| 110 | nd->increment_coal_births(); |
| 111 | nd->increment_surplus(); |
| 112 | } else { |
| 113 | nd->increment_coal_deaths(); |
| 114 | nd->decrement_surplus(); |
| 115 | } |
| 116 | } |
| 117 | } |
| 118 | // A list for this size may not be found (nd == 0) if |
| 119 | // This is a death where the appropriate list is now |
| 120 | // empty and has been removed from the list. |
| 121 | // This is a birth associated with a LinAB. The chunk |
| 122 | // for the LinAB is not in the dictionary. |
| 123 | } |
| 124 | |
| 125 | bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) { |
| 126 | if (FLSAlwaysCoalesceLarge) return true; |
| 127 | |
| 128 | TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size); |
| 129 | // None of requested size implies overpopulated. |
| 130 | return list_of_size == NULL || list_of_size->coal_desired() <= 0 || |
| 131 | list_of_size->count() > list_of_size->coal_desired(); |
| 132 | } |
| 133 | |
| 134 | // For each list in the tree, calculate the desired, desired |
| 135 | // coalesce, count before sweep, and surplus before sweep. |
| 136 | class BeginSweepClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > { |
| 137 | double _percentage; |
| 138 | float _inter_sweep_current; |
| 139 | float _inter_sweep_estimate; |
| 140 | float _intra_sweep_estimate; |
| 141 | |
| 142 | public: |
| 143 | BeginSweepClosure(double p, float inter_sweep_current, |
| 144 | float inter_sweep_estimate, |
| 145 | float intra_sweep_estimate) : |
| 146 | _percentage(p), |
| 147 | _inter_sweep_current(inter_sweep_current), |
| 148 | _inter_sweep_estimate(inter_sweep_estimate), |
| 149 | _intra_sweep_estimate(intra_sweep_estimate) { } |
| 150 | |
| 151 | void do_list(AdaptiveFreeList<FreeChunk>* fl) { |
| 152 | double coalSurplusPercent = _percentage; |
| 153 | fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate); |
| 154 | fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent)); |
| 155 | fl->set_before_sweep(fl->count()); |
| 156 | fl->set_bfr_surp(fl->surplus()); |
| 157 | } |
| 158 | }; |
| 159 | |
| 160 | void AFLBinaryTreeDictionary::begin_sweep_dict_census(double coalSurplusPercent, |
| 161 | float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) { |
| 162 | BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current, |
| 163 | inter_sweep_estimate, |
| 164 | intra_sweep_estimate); |
| 165 | bsc.do_tree(root()); |
| 166 | } |
| 167 | |
| 168 | // Calculate surpluses for the lists in the tree. |
| 169 | class setTreeSurplusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > { |
| 170 | double percentage; |
| 171 | public: |
| 172 | setTreeSurplusClosure(double v) { percentage = v; } |
| 173 | |
| 174 | void do_list(AdaptiveFreeList<FreeChunk>* fl) { |
| 175 | double splitSurplusPercent = percentage; |
| 176 | fl->set_surplus(fl->count() - |
| 177 | (ssize_t)((double)fl->desired() * splitSurplusPercent)); |
| 178 | } |
| 179 | }; |
| 180 | |
| 181 | void AFLBinaryTreeDictionary::set_tree_surplus(double splitSurplusPercent) { |
| 182 | setTreeSurplusClosure sts(splitSurplusPercent); |
| 183 | sts.do_tree(root()); |
| 184 | } |
| 185 | |
| 186 | // Set hints for the lists in the tree. |
| 187 | class setTreeHintsClosure : public DescendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > { |
| 188 | size_t hint; |
| 189 | public: |
| 190 | setTreeHintsClosure(size_t v) { hint = v; } |
| 191 | |
| 192 | void do_list(AdaptiveFreeList<FreeChunk>* fl) { |
| 193 | fl->set_hint(hint); |
| 194 | assert(fl->hint() == 0 || fl->hint() > fl->size(), |
| 195 | "Current hint is inconsistent" ); |
| 196 | if (fl->surplus() > 0) { |
| 197 | hint = fl->size(); |
| 198 | } |
| 199 | } |
| 200 | }; |
| 201 | |
| 202 | void AFLBinaryTreeDictionary::set_tree_hints(void) { |
| 203 | setTreeHintsClosure sth(0); |
| 204 | sth.do_tree(root()); |
| 205 | } |
| 206 | |
| 207 | // Save count before previous sweep and splits and coalesces. |
| 208 | class clearTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > { |
| 209 | void do_list(AdaptiveFreeList<FreeChunk>* fl) { |
| 210 | fl->set_prev_sweep(fl->count()); |
| 211 | fl->set_coal_births(0); |
| 212 | fl->set_coal_deaths(0); |
| 213 | fl->set_split_births(0); |
| 214 | fl->set_split_deaths(0); |
| 215 | } |
| 216 | }; |
| 217 | |
| 218 | void AFLBinaryTreeDictionary::clear_tree_census(void) { |
| 219 | clearTreeCensusClosure ctc; |
| 220 | ctc.do_tree(root()); |
| 221 | } |
| 222 | |
| 223 | // Do reporting and post sweep clean up. |
| 224 | void AFLBinaryTreeDictionary::end_sweep_dict_census(double splitSurplusPercent) { |
| 225 | // Does walking the tree 3 times hurt? |
| 226 | set_tree_surplus(splitSurplusPercent); |
| 227 | set_tree_hints(); |
| 228 | LogTarget(Trace, gc, freelist, stats) log; |
| 229 | if (log.is_enabled()) { |
| 230 | LogStream out(log); |
| 231 | report_statistics(&out); |
| 232 | } |
| 233 | clear_tree_census(); |
| 234 | } |
| 235 | |
| 236 | // Print census information - counts, births, deaths, etc. |
| 237 | // for each list in the tree. Also print some summary |
| 238 | // information. |
| 239 | class PrintTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > { |
| 240 | int _print_line; |
| 241 | size_t _total_free; |
| 242 | AdaptiveFreeList<FreeChunk> _total; |
| 243 | |
| 244 | public: |
| 245 | PrintTreeCensusClosure() { |
| 246 | _print_line = 0; |
| 247 | _total_free = 0; |
| 248 | } |
| 249 | AdaptiveFreeList<FreeChunk>* total() { return &_total; } |
| 250 | size_t total_free() { return _total_free; } |
| 251 | |
| 252 | void do_list(AdaptiveFreeList<FreeChunk>* fl) { |
| 253 | LogStreamHandle(Debug, gc, freelist, census) out; |
| 254 | |
| 255 | if (++_print_line >= 40) { |
| 256 | AdaptiveFreeList<FreeChunk>::print_labels_on(&out, "size" ); |
| 257 | _print_line = 0; |
| 258 | } |
| 259 | fl->print_on(&out); |
| 260 | _total_free += fl->count() * fl->size() ; |
| 261 | total()->set_count( total()->count() + fl->count() ); |
| 262 | total()->set_bfr_surp( total()->bfr_surp() + fl->bfr_surp() ); |
| 263 | total()->set_surplus( total()->split_deaths() + fl->surplus() ); |
| 264 | total()->set_desired( total()->desired() + fl->desired() ); |
| 265 | total()->set_prev_sweep( total()->prev_sweep() + fl->prev_sweep() ); |
| 266 | total()->set_before_sweep(total()->before_sweep() + fl->before_sweep()); |
| 267 | total()->set_coal_births( total()->coal_births() + fl->coal_births() ); |
| 268 | total()->set_coal_deaths( total()->coal_deaths() + fl->coal_deaths() ); |
| 269 | total()->set_split_births(total()->split_births() + fl->split_births()); |
| 270 | total()->set_split_deaths(total()->split_deaths() + fl->split_deaths()); |
| 271 | } |
| 272 | }; |
| 273 | |
| 274 | void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const { |
| 275 | |
| 276 | st->print_cr("BinaryTree" ); |
| 277 | AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size" ); |
| 278 | PrintTreeCensusClosure ptc; |
| 279 | ptc.do_tree(root()); |
| 280 | |
| 281 | AdaptiveFreeList<FreeChunk>* total = ptc.total(); |
| 282 | AdaptiveFreeList<FreeChunk>::print_labels_on(st, " " ); |
| 283 | total->print_on(st, "TOTAL\t" ); |
| 284 | st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f deficit: %8.5f" , |
| 285 | ptc.total_free(), |
| 286 | (double)(total->split_births() + total->coal_births() |
| 287 | - total->split_deaths() - total->coal_deaths()) |
| 288 | /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0), |
| 289 | (double)(total->desired() - total->count()) |
| 290 | /(total->desired() != 0 ? (double)total->desired() : 1.0)); |
| 291 | } |
| 292 | |
| 293 | ///////////////////////////////////////////////////////////////////////// |
| 294 | //// CompactibleFreeListSpace |
| 295 | ///////////////////////////////////////////////////////////////////////// |
| 296 | |
| 297 | // highest ranked free list lock rank |
| 298 | int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3; |
| 299 | |
| 300 | // Defaults are 0 so things will break badly if incorrectly initialized. |
| 301 | size_t CompactibleFreeListSpace::IndexSetStart = 0; |
| 302 | size_t CompactibleFreeListSpace::IndexSetStride = 0; |
| 303 | size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0; |
| 304 | |
| 305 | size_t MinChunkSize = 0; |
| 306 | |
| 307 | void CompactibleFreeListSpace::set_cms_values() { |
| 308 | // Set CMS global values |
| 309 | assert(MinChunkSize == 0, "already set" ); |
| 310 | |
| 311 | // MinChunkSize should be a multiple of MinObjAlignment and be large enough |
| 312 | // for chunks to contain a FreeChunk. |
| 313 | _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes); |
| 314 | MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord; |
| 315 | |
| 316 | assert(IndexSetStart == 0 && IndexSetStride == 0, "already set" ); |
| 317 | IndexSetStart = MinChunkSize; |
| 318 | IndexSetStride = MinObjAlignment; |
| 319 | } |
| 320 | |
| 321 | // Constructor |
| 322 | CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) : |
| 323 | _rescan_task_size(CardTable::card_size_in_words * BitsPerWord * |
| 324 | CMSRescanMultiple), |
| 325 | _marking_task_size(CardTable::card_size_in_words * BitsPerWord * |
| 326 | CMSConcMarkMultiple), |
| 327 | _bt(bs, mr), |
| 328 | _collector(NULL), |
| 329 | // free list locks are in the range of values taken by _lockRank |
| 330 | // This range currently is [_leaf+2, _leaf+3] |
| 331 | // Note: this requires that CFLspace c'tors |
| 332 | // are called serially in the order in which the locks are |
| 333 | // are acquired in the program text. This is true today. |
| 334 | _freelistLock(_lockRank--, "CompactibleFreeListSpace_lock" , true, |
| 335 | Monitor::_safepoint_check_never), |
| 336 | _preconsumptionDirtyCardClosure(NULL), |
| 337 | _parDictionaryAllocLock(Mutex::leaf - 1, // == rank(ExpandHeap_lock) - 1 |
| 338 | "CompactibleFreeListSpace_dict_par_lock" , true, |
| 339 | Monitor::_safepoint_check_never) |
| 340 | { |
| 341 | assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize, |
| 342 | "FreeChunk is larger than expected" ); |
| 343 | _bt.set_space(this); |
| 344 | initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
| 345 | |
| 346 | _dictionary = new AFLBinaryTreeDictionary(mr); |
| 347 | |
| 348 | assert(_dictionary != NULL, "CMS dictionary initialization" ); |
| 349 | // The indexed free lists are initially all empty and are lazily |
| 350 | // filled in on demand. Initialize the array elements to NULL. |
| 351 | initializeIndexedFreeListArray(); |
| 352 | |
| 353 | _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, |
| 354 | SmallForLinearAlloc); |
| 355 | |
| 356 | // CMSIndexedFreeListReplenish should be at least 1 |
| 357 | CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish); |
| 358 | _promoInfo.setSpace(this); |
| 359 | if (UseCMSBestFit) { |
| 360 | _fitStrategy = FreeBlockBestFitFirst; |
| 361 | } else { |
| 362 | _fitStrategy = FreeBlockStrategyNone; |
| 363 | } |
| 364 | check_free_list_consistency(); |
| 365 | |
| 366 | // Initialize locks for parallel case. |
| 367 | for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 368 | _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1 |
| 369 | "a freelist par lock" , true, Mutex::_safepoint_check_never); |
| 370 | DEBUG_ONLY( |
| 371 | _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]); |
| 372 | ) |
| 373 | } |
| 374 | _dictionary->set_par_lock(&_parDictionaryAllocLock); |
| 375 | } |
| 376 | |
| 377 | // Like CompactibleSpace forward() but always calls cross_threshold() to |
| 378 | // update the block offset table. Removed initialize_threshold call because |
| 379 | // CFLS does not use a block offset array for contiguous spaces. |
| 380 | HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, |
| 381 | CompactPoint* cp, HeapWord* compact_top) { |
| 382 | // q is alive |
| 383 | // First check if we should switch compaction space |
| 384 | assert(this == cp->space, "'this' should be current compaction space." ); |
| 385 | size_t compaction_max_size = pointer_delta(end(), compact_top); |
| 386 | assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size), |
| 387 | "virtual adjustObjectSize_v() method is not correct" ); |
| 388 | size_t adjusted_size = adjustObjectSize(size); |
| 389 | assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0, |
| 390 | "no small fragments allowed" ); |
| 391 | assert(minimum_free_block_size() == MinChunkSize, |
| 392 | "for de-virtualized reference below" ); |
| 393 | // Can't leave a nonzero size, residual fragment smaller than MinChunkSize |
| 394 | if (adjusted_size + MinChunkSize > compaction_max_size && |
| 395 | adjusted_size != compaction_max_size) { |
| 396 | do { |
| 397 | // switch to next compaction space |
| 398 | cp->space->set_compaction_top(compact_top); |
| 399 | cp->space = cp->space->next_compaction_space(); |
| 400 | if (cp->space == NULL) { |
| 401 | cp->gen = CMSHeap::heap()->young_gen(); |
| 402 | assert(cp->gen != NULL, "compaction must succeed" ); |
| 403 | cp->space = cp->gen->first_compaction_space(); |
| 404 | assert(cp->space != NULL, "generation must have a first compaction space" ); |
| 405 | } |
| 406 | compact_top = cp->space->bottom(); |
| 407 | cp->space->set_compaction_top(compact_top); |
| 408 | // The correct adjusted_size may not be the same as that for this method |
| 409 | // (i.e., cp->space may no longer be "this" so adjust the size again. |
| 410 | // Use the virtual method which is not used above to save the virtual |
| 411 | // dispatch. |
| 412 | adjusted_size = cp->space->adjust_object_size_v(size); |
| 413 | compaction_max_size = pointer_delta(cp->space->end(), compact_top); |
| 414 | assert(cp->space->minimum_free_block_size() == 0, "just checking" ); |
| 415 | } while (adjusted_size > compaction_max_size); |
| 416 | } |
| 417 | |
| 418 | // store the forwarding pointer into the mark word |
| 419 | if ((HeapWord*)q != compact_top) { |
| 420 | q->forward_to(oop(compact_top)); |
| 421 | assert(q->is_gc_marked(), "encoding the pointer should preserve the mark" ); |
| 422 | } else { |
| 423 | // if the object isn't moving we can just set the mark to the default |
| 424 | // mark and handle it specially later on. |
| 425 | q->init_mark_raw(); |
| 426 | assert(q->forwardee() == NULL, "should be forwarded to NULL" ); |
| 427 | } |
| 428 | |
| 429 | compact_top += adjusted_size; |
| 430 | |
| 431 | // we need to update the offset table so that the beginnings of objects can be |
| 432 | // found during scavenge. Note that we are updating the offset table based on |
| 433 | // where the object will be once the compaction phase finishes. |
| 434 | |
| 435 | // Always call cross_threshold(). A contiguous space can only call it when |
| 436 | // the compaction_top exceeds the current threshold but not for an |
| 437 | // non-contiguous space. |
| 438 | cp->threshold = |
| 439 | cp->space->cross_threshold(compact_top - adjusted_size, compact_top); |
| 440 | return compact_top; |
| 441 | } |
| 442 | |
| 443 | // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt |
| 444 | // and use of single_block instead of alloc_block. The name here is not really |
| 445 | // appropriate - maybe a more general name could be invented for both the |
| 446 | // contiguous and noncontiguous spaces. |
| 447 | |
| 448 | HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) { |
| 449 | _bt.single_block(start, the_end); |
| 450 | return end(); |
| 451 | } |
| 452 | |
| 453 | // Initialize them to NULL. |
| 454 | void CompactibleFreeListSpace::initializeIndexedFreeListArray() { |
| 455 | for (size_t i = 0; i < IndexSetSize; i++) { |
| 456 | // Note that on platforms where objects are double word aligned, |
| 457 | // the odd array elements are not used. It is convenient, however, |
| 458 | // to map directly from the object size to the array element. |
| 459 | _indexedFreeList[i].reset(IndexSetSize); |
| 460 | _indexedFreeList[i].set_size(i); |
| 461 | assert(_indexedFreeList[i].count() == 0, "reset check failed" ); |
| 462 | assert(_indexedFreeList[i].head() == NULL, "reset check failed" ); |
| 463 | assert(_indexedFreeList[i].tail() == NULL, "reset check failed" ); |
| 464 | assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed" ); |
| 465 | } |
| 466 | } |
| 467 | |
| 468 | size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const { |
| 469 | return adjustObjectSize(oop(addr)->size()); |
| 470 | } |
| 471 | |
| 472 | void CompactibleFreeListSpace::resetIndexedFreeListArray() { |
| 473 | for (size_t i = 1; i < IndexSetSize; i++) { |
| 474 | assert(_indexedFreeList[i].size() == (size_t) i, |
| 475 | "Indexed free list sizes are incorrect" ); |
| 476 | _indexedFreeList[i].reset(IndexSetSize); |
| 477 | assert(_indexedFreeList[i].count() == 0, "reset check failed" ); |
| 478 | assert(_indexedFreeList[i].head() == NULL, "reset check failed" ); |
| 479 | assert(_indexedFreeList[i].tail() == NULL, "reset check failed" ); |
| 480 | assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed" ); |
| 481 | } |
| 482 | } |
| 483 | |
| 484 | void CompactibleFreeListSpace::reset(MemRegion mr) { |
| 485 | resetIndexedFreeListArray(); |
| 486 | dictionary()->reset(); |
| 487 | if (BlockOffsetArrayUseUnallocatedBlock) { |
| 488 | assert(end() == mr.end(), "We are compacting to the bottom of CMS gen" ); |
| 489 | // Everything's allocated until proven otherwise. |
| 490 | _bt.set_unallocated_block(end()); |
| 491 | } |
| 492 | if (!mr.is_empty()) { |
| 493 | assert(mr.word_size() >= MinChunkSize, "Chunk size is too small" ); |
| 494 | _bt.single_block(mr.start(), mr.word_size()); |
| 495 | FreeChunk* fc = (FreeChunk*) mr.start(); |
| 496 | fc->set_size(mr.word_size()); |
| 497 | if (mr.word_size() >= IndexSetSize ) { |
| 498 | returnChunkToDictionary(fc); |
| 499 | } else { |
| 500 | _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); |
| 501 | _indexedFreeList[mr.word_size()].return_chunk_at_head(fc); |
| 502 | } |
| 503 | coalBirth(mr.word_size()); |
| 504 | } |
| 505 | _promoInfo.reset(); |
| 506 | _smallLinearAllocBlock._ptr = NULL; |
| 507 | _smallLinearAllocBlock._word_size = 0; |
| 508 | } |
| 509 | |
| 510 | void CompactibleFreeListSpace::reset_after_compaction() { |
| 511 | // Reset the space to the new reality - one free chunk. |
| 512 | MemRegion mr(compaction_top(), end()); |
| 513 | reset(mr); |
| 514 | // Now refill the linear allocation block(s) if possible. |
| 515 | refillLinearAllocBlocksIfNeeded(); |
| 516 | } |
| 517 | |
| 518 | // Walks the entire dictionary, returning a coterminal |
| 519 | // chunk, if it exists. Use with caution since it involves |
| 520 | // a potentially complete walk of a potentially large tree. |
| 521 | FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() { |
| 522 | |
| 523 | assert_lock_strong(&_freelistLock); |
| 524 | |
| 525 | return dictionary()->find_chunk_ends_at(end()); |
| 526 | } |
| 527 | |
| 528 | |
| 529 | #ifndef PRODUCT |
| 530 | void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() { |
| 531 | for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 532 | _indexedFreeList[i].allocation_stats()->set_returned_bytes(0); |
| 533 | } |
| 534 | } |
| 535 | |
| 536 | size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() { |
| 537 | size_t sum = 0; |
| 538 | for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 539 | sum += _indexedFreeList[i].allocation_stats()->returned_bytes(); |
| 540 | } |
| 541 | return sum; |
| 542 | } |
| 543 | |
| 544 | size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const { |
| 545 | size_t count = 0; |
| 546 | for (size_t i = IndexSetStart; i < IndexSetSize; i++) { |
| 547 | debug_only( |
| 548 | ssize_t total_list_count = 0; |
| 549 | for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
| 550 | fc = fc->next()) { |
| 551 | total_list_count++; |
| 552 | } |
| 553 | assert(total_list_count == _indexedFreeList[i].count(), |
| 554 | "Count in list is incorrect" ); |
| 555 | ) |
| 556 | count += _indexedFreeList[i].count(); |
| 557 | } |
| 558 | return count; |
| 559 | } |
| 560 | |
| 561 | size_t CompactibleFreeListSpace::totalCount() { |
| 562 | size_t num = totalCountInIndexedFreeLists(); |
| 563 | num += dictionary()->total_count(); |
| 564 | if (_smallLinearAllocBlock._word_size != 0) { |
| 565 | num++; |
| 566 | } |
| 567 | return num; |
| 568 | } |
| 569 | #endif |
| 570 | |
| 571 | bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const { |
| 572 | FreeChunk* fc = (FreeChunk*) p; |
| 573 | return fc->is_free(); |
| 574 | } |
| 575 | |
| 576 | size_t CompactibleFreeListSpace::used() const { |
| 577 | return capacity() - free(); |
| 578 | } |
| 579 | |
| 580 | size_t CompactibleFreeListSpace::free() const { |
| 581 | // "MT-safe, but not MT-precise"(TM), if you will: i.e. |
| 582 | // if you do this while the structures are in flux you |
| 583 | // may get an approximate answer only; for instance |
| 584 | // because there is concurrent allocation either |
| 585 | // directly by mutators or for promotion during a GC. |
| 586 | // It's "MT-safe", however, in the sense that you are guaranteed |
| 587 | // not to crash and burn, for instance, because of walking |
| 588 | // pointers that could disappear as you were walking them. |
| 589 | // The approximation is because the various components |
| 590 | // that are read below are not read atomically (and |
| 591 | // further the computation of totalSizeInIndexedFreeLists() |
| 592 | // is itself a non-atomic computation. The normal use of |
| 593 | // this is during a resize operation at the end of GC |
| 594 | // and at that time you are guaranteed to get the |
| 595 | // correct actual value. However, for instance, this is |
| 596 | // also read completely asynchronously by the "perf-sampler" |
| 597 | // that supports jvmstat, and you are apt to see the values |
| 598 | // flicker in such cases. |
| 599 | assert(_dictionary != NULL, "No _dictionary?" ); |
| 600 | return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) + |
| 601 | totalSizeInIndexedFreeLists() + |
| 602 | _smallLinearAllocBlock._word_size) * HeapWordSize; |
| 603 | } |
| 604 | |
| 605 | size_t CompactibleFreeListSpace::max_alloc_in_words() const { |
| 606 | assert(_dictionary != NULL, "No _dictionary?" ); |
| 607 | assert_locked(); |
| 608 | size_t res = _dictionary->max_chunk_size(); |
| 609 | res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size, |
| 610 | (size_t) SmallForLinearAlloc - 1)); |
| 611 | // XXX the following could potentially be pretty slow; |
| 612 | // should one, pessimistically for the rare cases when res |
| 613 | // calculated above is less than IndexSetSize, |
| 614 | // just return res calculated above? My reasoning was that |
| 615 | // those cases will be so rare that the extra time spent doesn't |
| 616 | // really matter.... |
| 617 | // Note: do not change the loop test i >= res + IndexSetStride |
| 618 | // to i > res below, because i is unsigned and res may be zero. |
| 619 | for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride; |
| 620 | i -= IndexSetStride) { |
| 621 | if (_indexedFreeList[i].head() != NULL) { |
| 622 | assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList" ); |
| 623 | return i; |
| 624 | } |
| 625 | } |
| 626 | return res; |
| 627 | } |
| 628 | |
| 629 | void LinearAllocBlock::print_on(outputStream* st) const { |
| 630 | st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT |
| 631 | ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT, |
| 632 | p2i(_ptr), _word_size, _refillSize, _allocation_size_limit); |
| 633 | } |
| 634 | |
| 635 | void CompactibleFreeListSpace::print_on(outputStream* st) const { |
| 636 | st->print_cr("COMPACTIBLE FREELIST SPACE" ); |
| 637 | st->print_cr(" Space:" ); |
| 638 | Space::print_on(st); |
| 639 | |
| 640 | st->print_cr("promoInfo:" ); |
| 641 | _promoInfo.print_on(st); |
| 642 | |
| 643 | st->print_cr("_smallLinearAllocBlock" ); |
| 644 | _smallLinearAllocBlock.print_on(st); |
| 645 | |
| 646 | // dump_memory_block(_smallLinearAllocBlock->_ptr, 128); |
| 647 | |
| 648 | st->print_cr(" _fitStrategy = %s" , BOOL_TO_STR(_fitStrategy)); |
| 649 | } |
| 650 | |
| 651 | void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st) |
| 652 | const { |
| 653 | reportIndexedFreeListStatistics(st); |
| 654 | st->print_cr("Layout of Indexed Freelists" ); |
| 655 | st->print_cr("---------------------------" ); |
| 656 | AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size" ); |
| 657 | for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 658 | _indexedFreeList[i].print_on(st); |
| 659 | for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) { |
| 660 | st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s" , |
| 661 | p2i(fc), p2i((HeapWord*)fc + i), |
| 662 | fc->cantCoalesce() ? "\t CC" : "" ); |
| 663 | } |
| 664 | } |
| 665 | } |
| 666 | |
| 667 | void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st) |
| 668 | const { |
| 669 | _promoInfo.print_on(st); |
| 670 | } |
| 671 | |
| 672 | void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st) |
| 673 | const { |
| 674 | _dictionary->report_statistics(st); |
| 675 | st->print_cr("Layout of Freelists in Tree" ); |
| 676 | st->print_cr("---------------------------" ); |
| 677 | _dictionary->print_free_lists(st); |
| 678 | } |
| 679 | |
| 680 | class BlkPrintingClosure: public BlkClosure { |
| 681 | const CMSCollector* _collector; |
| 682 | const CompactibleFreeListSpace* _sp; |
| 683 | const CMSBitMap* _live_bit_map; |
| 684 | const bool ; |
| 685 | outputStream* _st; |
| 686 | public: |
| 687 | BlkPrintingClosure(const CMSCollector* collector, |
| 688 | const CompactibleFreeListSpace* sp, |
| 689 | const CMSBitMap* live_bit_map, |
| 690 | outputStream* st): |
| 691 | _collector(collector), |
| 692 | _sp(sp), |
| 693 | _live_bit_map(live_bit_map), |
| 694 | _post_remark(collector->abstract_state() > CMSCollector::FinalMarking), |
| 695 | _st(st) { } |
| 696 | size_t do_blk(HeapWord* addr); |
| 697 | }; |
| 698 | |
| 699 | size_t BlkPrintingClosure::do_blk(HeapWord* addr) { |
| 700 | size_t sz = _sp->block_size_no_stall(addr, _collector); |
| 701 | assert(sz != 0, "Should always be able to compute a size" ); |
| 702 | if (_sp->block_is_obj(addr)) { |
| 703 | const bool dead = _post_remark && !_live_bit_map->isMarked(addr); |
| 704 | _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s" , |
| 705 | p2i(addr), |
| 706 | dead ? "dead" : "live" , |
| 707 | sz, |
| 708 | (!dead && CMSPrintObjectsInDump) ? ":" : "." ); |
| 709 | if (CMSPrintObjectsInDump && !dead) { |
| 710 | oop(addr)->print_on(_st); |
| 711 | _st->print_cr("--------------------------------------" ); |
| 712 | } |
| 713 | } else { // free block |
| 714 | _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s" , |
| 715 | p2i(addr), sz, CMSPrintChunksInDump ? ":" : "." ); |
| 716 | if (CMSPrintChunksInDump) { |
| 717 | ((FreeChunk*)addr)->print_on(_st); |
| 718 | _st->print_cr("--------------------------------------" ); |
| 719 | } |
| 720 | } |
| 721 | return sz; |
| 722 | } |
| 723 | |
| 724 | void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) { |
| 725 | st->print_cr("=========================" ); |
| 726 | st->print_cr("Block layout in CMS Heap:" ); |
| 727 | st->print_cr("=========================" ); |
| 728 | BlkPrintingClosure bpcl(c, this, c->markBitMap(), st); |
| 729 | blk_iterate(&bpcl); |
| 730 | |
| 731 | st->print_cr("=======================================" ); |
| 732 | st->print_cr("Order & Layout of Promotion Info Blocks" ); |
| 733 | st->print_cr("=======================================" ); |
| 734 | print_promo_info_blocks(st); |
| 735 | |
| 736 | st->print_cr("===========================" ); |
| 737 | st->print_cr("Order of Indexed Free Lists" ); |
| 738 | st->print_cr("=========================" ); |
| 739 | print_indexed_free_lists(st); |
| 740 | |
| 741 | st->print_cr("=================================" ); |
| 742 | st->print_cr("Order of Free Lists in Dictionary" ); |
| 743 | st->print_cr("=================================" ); |
| 744 | print_dictionary_free_lists(st); |
| 745 | } |
| 746 | |
| 747 | |
| 748 | void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const { |
| 749 | assert_lock_strong(&_freelistLock); |
| 750 | Log(gc, freelist, stats) log; |
| 751 | if (!log.is_debug()) { |
| 752 | return; |
| 753 | } |
| 754 | log.debug("%s" , title); |
| 755 | |
| 756 | LogStream out(log.debug()); |
| 757 | _dictionary->report_statistics(&out); |
| 758 | |
| 759 | if (log.is_trace()) { |
| 760 | LogStream trace_out(log.trace()); |
| 761 | reportIndexedFreeListStatistics(&trace_out); |
| 762 | size_t total_size = totalSizeInIndexedFreeLists() + |
| 763 | _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())); |
| 764 | log.trace(" free=" SIZE_FORMAT " frag=%1.4f" , total_size, flsFrag()); |
| 765 | } |
| 766 | } |
| 767 | |
| 768 | void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const { |
| 769 | assert_lock_strong(&_freelistLock); |
| 770 | st->print_cr("Statistics for IndexedFreeLists:" ); |
| 771 | st->print_cr("--------------------------------" ); |
| 772 | size_t total_size = totalSizeInIndexedFreeLists(); |
| 773 | size_t free_blocks = numFreeBlocksInIndexedFreeLists(); |
| 774 | st->print_cr("Total Free Space: " SIZE_FORMAT, total_size); |
| 775 | st->print_cr("Max Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists()); |
| 776 | st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks); |
| 777 | if (free_blocks != 0) { |
| 778 | st->print_cr("Av. Block Size: " SIZE_FORMAT, total_size/free_blocks); |
| 779 | } |
| 780 | } |
| 781 | |
| 782 | size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const { |
| 783 | size_t res = 0; |
| 784 | for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 785 | debug_only( |
| 786 | ssize_t recount = 0; |
| 787 | for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
| 788 | fc = fc->next()) { |
| 789 | recount += 1; |
| 790 | } |
| 791 | assert(recount == _indexedFreeList[i].count(), |
| 792 | "Incorrect count in list" ); |
| 793 | ) |
| 794 | res += _indexedFreeList[i].count(); |
| 795 | } |
| 796 | return res; |
| 797 | } |
| 798 | |
| 799 | size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const { |
| 800 | for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { |
| 801 | if (_indexedFreeList[i].head() != NULL) { |
| 802 | assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList" ); |
| 803 | return (size_t)i; |
| 804 | } |
| 805 | } |
| 806 | return 0; |
| 807 | } |
| 808 | |
| 809 | void CompactibleFreeListSpace::set_end(HeapWord* value) { |
| 810 | HeapWord* prevEnd = end(); |
| 811 | assert(prevEnd != value, "unnecessary set_end call" ); |
| 812 | assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
| 813 | "New end is below unallocated block" ); |
| 814 | _end = value; |
| 815 | if (prevEnd != NULL) { |
| 816 | // Resize the underlying block offset table. |
| 817 | _bt.resize(pointer_delta(value, bottom())); |
| 818 | if (value <= prevEnd) { |
| 819 | assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(), |
| 820 | "New end is below unallocated block" ); |
| 821 | } else { |
| 822 | // Now, take this new chunk and add it to the free blocks. |
| 823 | // Note that the BOT has not yet been updated for this block. |
| 824 | size_t newFcSize = pointer_delta(value, prevEnd); |
| 825 | // Add the block to the free lists, if possible coalescing it |
| 826 | // with the last free block, and update the BOT and census data. |
| 827 | addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize); |
| 828 | } |
| 829 | } |
| 830 | } |
| 831 | |
| 832 | class FreeListSpaceDCTOC : public FilteringDCTOC { |
| 833 | CompactibleFreeListSpace* _cfls; |
| 834 | CMSCollector* _collector; |
| 835 | bool _parallel; |
| 836 | protected: |
| 837 | // Override. |
| 838 | #define walk_mem_region_with_cl_DECL(ClosureType) \ |
| 839 | virtual void walk_mem_region_with_cl(MemRegion mr, \ |
| 840 | HeapWord* bottom, HeapWord* top, \ |
| 841 | ClosureType* cl); \ |
| 842 | void walk_mem_region_with_cl_par(MemRegion mr, \ |
| 843 | HeapWord* bottom, HeapWord* top, \ |
| 844 | ClosureType* cl); \ |
| 845 | void walk_mem_region_with_cl_nopar(MemRegion mr, \ |
| 846 | HeapWord* bottom, HeapWord* top, \ |
| 847 | ClosureType* cl) |
| 848 | walk_mem_region_with_cl_DECL(OopIterateClosure); |
| 849 | walk_mem_region_with_cl_DECL(FilteringClosure); |
| 850 | |
| 851 | public: |
| 852 | FreeListSpaceDCTOC(CompactibleFreeListSpace* sp, |
| 853 | CMSCollector* collector, |
| 854 | OopIterateClosure* cl, |
| 855 | CardTable::PrecisionStyle precision, |
| 856 | HeapWord* boundary, |
| 857 | bool parallel) : |
| 858 | FilteringDCTOC(sp, cl, precision, boundary), |
| 859 | _cfls(sp), _collector(collector), _parallel(parallel) {} |
| 860 | }; |
| 861 | |
| 862 | // We de-virtualize the block-related calls below, since we know that our |
| 863 | // space is a CompactibleFreeListSpace. |
| 864 | |
| 865 | #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ |
| 866 | void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ |
| 867 | HeapWord* bottom, \ |
| 868 | HeapWord* top, \ |
| 869 | ClosureType* cl) { \ |
| 870 | if (_parallel) { \ |
| 871 | walk_mem_region_with_cl_par(mr, bottom, top, cl); \ |
| 872 | } else { \ |
| 873 | walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ |
| 874 | } \ |
| 875 | } \ |
| 876 | void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr, \ |
| 877 | HeapWord* bottom, \ |
| 878 | HeapWord* top, \ |
| 879 | ClosureType* cl) { \ |
| 880 | /* Skip parts that are before "mr", in case "block_start" sent us \ |
| 881 | back too far. */ \ |
| 882 | HeapWord* mr_start = mr.start(); \ |
| 883 | size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ |
| 884 | HeapWord* next = bottom + bot_size; \ |
| 885 | while (next < mr_start) { \ |
| 886 | bottom = next; \ |
| 887 | bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom); \ |
| 888 | next = bottom + bot_size; \ |
| 889 | } \ |
| 890 | \ |
| 891 | while (bottom < top) { \ |
| 892 | if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) && \ |
| 893 | !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ |
| 894 | oop(bottom)) && \ |
| 895 | !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ |
| 896 | size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \ |
| 897 | bottom += _cfls->adjustObjectSize(word_sz); \ |
| 898 | } else { \ |
| 899 | bottom += _cfls->CompactibleFreeListSpace::block_size(bottom); \ |
| 900 | } \ |
| 901 | } \ |
| 902 | } \ |
| 903 | void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr, \ |
| 904 | HeapWord* bottom, \ |
| 905 | HeapWord* top, \ |
| 906 | ClosureType* cl) { \ |
| 907 | /* Skip parts that are before "mr", in case "block_start" sent us \ |
| 908 | back too far. */ \ |
| 909 | HeapWord* mr_start = mr.start(); \ |
| 910 | size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ |
| 911 | HeapWord* next = bottom + bot_size; \ |
| 912 | while (next < mr_start) { \ |
| 913 | bottom = next; \ |
| 914 | bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ |
| 915 | next = bottom + bot_size; \ |
| 916 | } \ |
| 917 | \ |
| 918 | while (bottom < top) { \ |
| 919 | if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) && \ |
| 920 | !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks( \ |
| 921 | oop(bottom)) && \ |
| 922 | !_collector->CMSCollector::is_dead_obj(oop(bottom))) { \ |
| 923 | size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr); \ |
| 924 | bottom += _cfls->adjustObjectSize(word_sz); \ |
| 925 | } else { \ |
| 926 | bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom); \ |
| 927 | } \ |
| 928 | } \ |
| 929 | } |
| 930 | |
| 931 | // (There are only two of these, rather than N, because the split is due |
| 932 | // only to the introduction of the FilteringClosure, a local part of the |
| 933 | // impl of this abstraction.) |
| 934 | FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure) |
| 935 | FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) |
| 936 | |
| 937 | DirtyCardToOopClosure* |
| 938 | CompactibleFreeListSpace::new_dcto_cl(OopIterateClosure* cl, |
| 939 | CardTable::PrecisionStyle precision, |
| 940 | HeapWord* boundary, |
| 941 | bool parallel) { |
| 942 | return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel); |
| 943 | } |
| 944 | |
| 945 | |
| 946 | // Note on locking for the space iteration functions: |
| 947 | // since the collector's iteration activities are concurrent with |
| 948 | // allocation activities by mutators, absent a suitable mutual exclusion |
| 949 | // mechanism the iterators may go awry. For instance a block being iterated |
| 950 | // may suddenly be allocated or divided up and part of it allocated and |
| 951 | // so on. |
| 952 | |
| 953 | // Apply the given closure to each block in the space. |
| 954 | void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) { |
| 955 | assert_lock_strong(freelistLock()); |
| 956 | HeapWord *cur, *limit; |
| 957 | for (cur = bottom(), limit = end(); cur < limit; |
| 958 | cur += cl->do_blk_careful(cur)); |
| 959 | } |
| 960 | |
| 961 | // Apply the given closure to each block in the space. |
| 962 | void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) { |
| 963 | assert_lock_strong(freelistLock()); |
| 964 | HeapWord *cur, *limit; |
| 965 | for (cur = bottom(), limit = end(); cur < limit; |
| 966 | cur += cl->do_blk(cur)); |
| 967 | } |
| 968 | |
| 969 | // Apply the given closure to each oop in the space. |
| 970 | void CompactibleFreeListSpace::oop_iterate(OopIterateClosure* cl) { |
| 971 | assert_lock_strong(freelistLock()); |
| 972 | HeapWord *cur, *limit; |
| 973 | size_t curSize; |
| 974 | for (cur = bottom(), limit = end(); cur < limit; |
| 975 | cur += curSize) { |
| 976 | curSize = block_size(cur); |
| 977 | if (block_is_obj(cur)) { |
| 978 | oop(cur)->oop_iterate(cl); |
| 979 | } |
| 980 | } |
| 981 | } |
| 982 | |
| 983 | // NOTE: In the following methods, in order to safely be able to |
| 984 | // apply the closure to an object, we need to be sure that the |
| 985 | // object has been initialized. We are guaranteed that an object |
| 986 | // is initialized if we are holding the Heap_lock with the |
| 987 | // world stopped. |
| 988 | void CompactibleFreeListSpace::verify_objects_initialized() const { |
| 989 | if (is_init_completed()) { |
| 990 | assert_locked_or_safepoint(Heap_lock); |
| 991 | if (Universe::is_fully_initialized()) { |
| 992 | guarantee(SafepointSynchronize::is_at_safepoint(), |
| 993 | "Required for objects to be initialized" ); |
| 994 | } |
| 995 | } // else make a concession at vm start-up |
| 996 | } |
| 997 | |
| 998 | // Apply the given closure to each object in the space |
| 999 | void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) { |
| 1000 | assert_lock_strong(freelistLock()); |
| 1001 | NOT_PRODUCT(verify_objects_initialized()); |
| 1002 | HeapWord *cur, *limit; |
| 1003 | size_t curSize; |
| 1004 | for (cur = bottom(), limit = end(); cur < limit; |
| 1005 | cur += curSize) { |
| 1006 | curSize = block_size(cur); |
| 1007 | if (block_is_obj(cur)) { |
| 1008 | blk->do_object(oop(cur)); |
| 1009 | } |
| 1010 | } |
| 1011 | } |
| 1012 | |
| 1013 | // Apply the given closure to each live object in the space |
| 1014 | // The usage of CompactibleFreeListSpace |
| 1015 | // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
| 1016 | // objects in the space with references to objects that are no longer |
| 1017 | // valid. For example, an object may reference another object |
| 1018 | // that has already been sweep up (collected). This method uses |
| 1019 | // obj_is_alive() to determine whether it is safe to apply the closure to |
| 1020 | // an object. See obj_is_alive() for details on how liveness of an |
| 1021 | // object is decided. |
| 1022 | |
| 1023 | void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) { |
| 1024 | assert_lock_strong(freelistLock()); |
| 1025 | NOT_PRODUCT(verify_objects_initialized()); |
| 1026 | HeapWord *cur, *limit; |
| 1027 | size_t curSize; |
| 1028 | for (cur = bottom(), limit = end(); cur < limit; |
| 1029 | cur += curSize) { |
| 1030 | curSize = block_size(cur); |
| 1031 | if (block_is_obj(cur) && obj_is_alive(cur)) { |
| 1032 | blk->do_object(oop(cur)); |
| 1033 | } |
| 1034 | } |
| 1035 | } |
| 1036 | |
| 1037 | void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, |
| 1038 | UpwardsObjectClosure* cl) { |
| 1039 | assert_locked(freelistLock()); |
| 1040 | NOT_PRODUCT(verify_objects_initialized()); |
| 1041 | assert(!mr.is_empty(), "Should be non-empty" ); |
| 1042 | // We use MemRegion(bottom(), end()) rather than used_region() below |
| 1043 | // because the two are not necessarily equal for some kinds of |
| 1044 | // spaces, in particular, certain kinds of free list spaces. |
| 1045 | // We could use the more complicated but more precise: |
| 1046 | // MemRegion(used_region().start(), align_up(used_region().end(), CardSize)) |
| 1047 | // but the slight imprecision seems acceptable in the assertion check. |
| 1048 | assert(MemRegion(bottom(), end()).contains(mr), |
| 1049 | "Should be within used space" ); |
| 1050 | HeapWord* prev = cl->previous(); // max address from last time |
| 1051 | if (prev >= mr.end()) { // nothing to do |
| 1052 | return; |
| 1053 | } |
| 1054 | // This assert will not work when we go from cms space to perm |
| 1055 | // space, and use same closure. Easy fix deferred for later. XXX YSR |
| 1056 | // assert(prev == NULL || contains(prev), "Should be within space"); |
| 1057 | |
| 1058 | bool last_was_obj_array = false; |
| 1059 | HeapWord *blk_start_addr, *region_start_addr; |
| 1060 | if (prev > mr.start()) { |
| 1061 | region_start_addr = prev; |
| 1062 | blk_start_addr = prev; |
| 1063 | // The previous invocation may have pushed "prev" beyond the |
| 1064 | // last allocated block yet there may be still be blocks |
| 1065 | // in this region due to a particular coalescing policy. |
| 1066 | // Relax the assertion so that the case where the unallocated |
| 1067 | // block is maintained and "prev" is beyond the unallocated |
| 1068 | // block does not cause the assertion to fire. |
| 1069 | assert((BlockOffsetArrayUseUnallocatedBlock && |
| 1070 | (!is_in(prev))) || |
| 1071 | (blk_start_addr == block_start(region_start_addr)), "invariant" ); |
| 1072 | } else { |
| 1073 | region_start_addr = mr.start(); |
| 1074 | blk_start_addr = block_start(region_start_addr); |
| 1075 | } |
| 1076 | HeapWord* region_end_addr = mr.end(); |
| 1077 | MemRegion derived_mr(region_start_addr, region_end_addr); |
| 1078 | while (blk_start_addr < region_end_addr) { |
| 1079 | const size_t size = block_size(blk_start_addr); |
| 1080 | if (block_is_obj(blk_start_addr)) { |
| 1081 | last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr); |
| 1082 | } else { |
| 1083 | last_was_obj_array = false; |
| 1084 | } |
| 1085 | blk_start_addr += size; |
| 1086 | } |
| 1087 | if (!last_was_obj_array) { |
| 1088 | assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()), |
| 1089 | "Should be within (closed) used space" ); |
| 1090 | assert(blk_start_addr > prev, "Invariant" ); |
| 1091 | cl->set_previous(blk_start_addr); // min address for next time |
| 1092 | } |
| 1093 | } |
| 1094 | |
| 1095 | // Callers of this iterator beware: The closure application should |
| 1096 | // be robust in the face of uninitialized objects and should (always) |
| 1097 | // return a correct size so that the next addr + size below gives us a |
| 1098 | // valid block boundary. [See for instance, |
| 1099 | // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful() |
| 1100 | // in ConcurrentMarkSweepGeneration.cpp.] |
| 1101 | HeapWord* |
| 1102 | CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr, |
| 1103 | ObjectClosureCareful* cl) { |
| 1104 | assert_lock_strong(freelistLock()); |
| 1105 | // Can't use used_region() below because it may not necessarily |
| 1106 | // be the same as [bottom(),end()); although we could |
| 1107 | // use [used_region().start(),align_up(used_region().end(),CardSize)), |
| 1108 | // that appears too cumbersome, so we just do the simpler check |
| 1109 | // in the assertion below. |
| 1110 | assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr), |
| 1111 | "mr should be non-empty and within used space" ); |
| 1112 | HeapWord *addr, *end; |
| 1113 | size_t size; |
| 1114 | for (addr = block_start_careful(mr.start()), end = mr.end(); |
| 1115 | addr < end; addr += size) { |
| 1116 | FreeChunk* fc = (FreeChunk*)addr; |
| 1117 | if (fc->is_free()) { |
| 1118 | // Since we hold the free list lock, which protects direct |
| 1119 | // allocation in this generation by mutators, a free object |
| 1120 | // will remain free throughout this iteration code. |
| 1121 | size = fc->size(); |
| 1122 | } else { |
| 1123 | // Note that the object need not necessarily be initialized, |
| 1124 | // because (for instance) the free list lock does NOT protect |
| 1125 | // object initialization. The closure application below must |
| 1126 | // therefore be correct in the face of uninitialized objects. |
| 1127 | size = cl->do_object_careful_m(oop(addr), mr); |
| 1128 | if (size == 0) { |
| 1129 | // An unparsable object found. Signal early termination. |
| 1130 | return addr; |
| 1131 | } |
| 1132 | } |
| 1133 | } |
| 1134 | return NULL; |
| 1135 | } |
| 1136 | |
| 1137 | |
| 1138 | HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const { |
| 1139 | NOT_PRODUCT(verify_objects_initialized()); |
| 1140 | return _bt.block_start(p); |
| 1141 | } |
| 1142 | |
| 1143 | HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const { |
| 1144 | return _bt.block_start_careful(p); |
| 1145 | } |
| 1146 | |
| 1147 | size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const { |
| 1148 | NOT_PRODUCT(verify_objects_initialized()); |
| 1149 | // This must be volatile, or else there is a danger that the compiler |
| 1150 | // will compile the code below into a sometimes-infinite loop, by keeping |
| 1151 | // the value read the first time in a register. |
| 1152 | while (true) { |
| 1153 | // We must do this until we get a consistent view of the object. |
| 1154 | if (FreeChunk::indicatesFreeChunk(p)) { |
| 1155 | volatile FreeChunk* fc = (volatile FreeChunk*)p; |
| 1156 | size_t res = fc->size(); |
| 1157 | |
| 1158 | // Bugfix for systems with weak memory model (PPC64/IA64). The |
| 1159 | // block's free bit was set and we have read the size of the |
| 1160 | // block. Acquire and check the free bit again. If the block is |
| 1161 | // still free, the read size is correct. |
| 1162 | OrderAccess::acquire(); |
| 1163 | |
| 1164 | // If the object is still a free chunk, return the size, else it |
| 1165 | // has been allocated so try again. |
| 1166 | if (FreeChunk::indicatesFreeChunk(p)) { |
| 1167 | assert(res != 0, "Block size should not be 0" ); |
| 1168 | return res; |
| 1169 | } |
| 1170 | } else { |
| 1171 | // Ensure klass read before size. |
| 1172 | Klass* k = oop(p)->klass_or_null_acquire(); |
| 1173 | if (k != NULL) { |
| 1174 | assert(k->is_klass(), "Should really be klass oop." ); |
| 1175 | oop o = (oop)p; |
| 1176 | assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop." ); |
| 1177 | |
| 1178 | size_t res = o->size_given_klass(k); |
| 1179 | res = adjustObjectSize(res); |
| 1180 | assert(res != 0, "Block size should not be 0" ); |
| 1181 | return res; |
| 1182 | } |
| 1183 | } |
| 1184 | } |
| 1185 | } |
| 1186 | |
| 1187 | // TODO: Now that is_parsable is gone, we should combine these two functions. |
| 1188 | // A variant of the above that uses the Printezis bits for |
| 1189 | // unparsable but allocated objects. This avoids any possible |
| 1190 | // stalls waiting for mutators to initialize objects, and is |
| 1191 | // thus potentially faster than the variant above. However, |
| 1192 | // this variant may return a zero size for a block that is |
| 1193 | // under mutation and for which a consistent size cannot be |
| 1194 | // inferred without stalling; see CMSCollector::block_size_if_printezis_bits(). |
| 1195 | size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p, |
| 1196 | const CMSCollector* c) |
| 1197 | const { |
| 1198 | assert(MemRegion(bottom(), end()).contains(p), "p not in space" ); |
| 1199 | // This must be volatile, or else there is a danger that the compiler |
| 1200 | // will compile the code below into a sometimes-infinite loop, by keeping |
| 1201 | // the value read the first time in a register. |
| 1202 | DEBUG_ONLY(uint loops = 0;) |
| 1203 | while (true) { |
| 1204 | // We must do this until we get a consistent view of the object. |
| 1205 | if (FreeChunk::indicatesFreeChunk(p)) { |
| 1206 | volatile FreeChunk* fc = (volatile FreeChunk*)p; |
| 1207 | size_t res = fc->size(); |
| 1208 | |
| 1209 | // Bugfix for systems with weak memory model (PPC64/IA64). The |
| 1210 | // free bit of the block was set and we have read the size of |
| 1211 | // the block. Acquire and check the free bit again. If the |
| 1212 | // block is still free, the read size is correct. |
| 1213 | OrderAccess::acquire(); |
| 1214 | |
| 1215 | if (FreeChunk::indicatesFreeChunk(p)) { |
| 1216 | assert(res != 0, "Block size should not be 0" ); |
| 1217 | assert(loops == 0, "Should be 0" ); |
| 1218 | return res; |
| 1219 | } |
| 1220 | } else { |
| 1221 | // Ensure klass read before size. |
| 1222 | Klass* k = oop(p)->klass_or_null_acquire(); |
| 1223 | if (k != NULL) { |
| 1224 | assert(k->is_klass(), "Should really be klass oop." ); |
| 1225 | oop o = (oop)p; |
| 1226 | assert(oopDesc::is_oop(o), "Should be an oop" ); |
| 1227 | |
| 1228 | size_t res = o->size_given_klass(k); |
| 1229 | res = adjustObjectSize(res); |
| 1230 | assert(res != 0, "Block size should not be 0" ); |
| 1231 | return res; |
| 1232 | } else { |
| 1233 | // May return 0 if P-bits not present. |
| 1234 | return c->block_size_if_printezis_bits(p); |
| 1235 | } |
| 1236 | } |
| 1237 | assert(loops == 0, "Can loop at most once" ); |
| 1238 | DEBUG_ONLY(loops++;) |
| 1239 | } |
| 1240 | } |
| 1241 | |
| 1242 | size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const { |
| 1243 | NOT_PRODUCT(verify_objects_initialized()); |
| 1244 | assert(MemRegion(bottom(), end()).contains(p), "p not in space" ); |
| 1245 | FreeChunk* fc = (FreeChunk*)p; |
| 1246 | if (fc->is_free()) { |
| 1247 | return fc->size(); |
| 1248 | } else { |
| 1249 | // Ignore mark word because this may be a recently promoted |
| 1250 | // object whose mark word is used to chain together grey |
| 1251 | // objects (the last one would have a null value). |
| 1252 | assert(oopDesc::is_oop(oop(p), true), "Should be an oop" ); |
| 1253 | return adjustObjectSize(oop(p)->size()); |
| 1254 | } |
| 1255 | } |
| 1256 | |
| 1257 | // This implementation assumes that the property of "being an object" is |
| 1258 | // stable. But being a free chunk may not be (because of parallel |
| 1259 | // promotion.) |
| 1260 | bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const { |
| 1261 | FreeChunk* fc = (FreeChunk*)p; |
| 1262 | assert(is_in_reserved(p), "Should be in space" ); |
| 1263 | if (FreeChunk::indicatesFreeChunk(p)) return false; |
| 1264 | Klass* k = oop(p)->klass_or_null_acquire(); |
| 1265 | if (k != NULL) { |
| 1266 | // Ignore mark word because it may have been used to |
| 1267 | // chain together promoted objects (the last one |
| 1268 | // would have a null value). |
| 1269 | assert(oopDesc::is_oop(oop(p), true), "Should be an oop" ); |
| 1270 | return true; |
| 1271 | } else { |
| 1272 | return false; // Was not an object at the start of collection. |
| 1273 | } |
| 1274 | } |
| 1275 | |
| 1276 | // Check if the object is alive. This fact is checked either by consulting |
| 1277 | // the main marking bitmap in the sweeping phase or, if it's a permanent |
| 1278 | // generation and we're not in the sweeping phase, by checking the |
| 1279 | // perm_gen_verify_bit_map where we store the "deadness" information if |
| 1280 | // we did not sweep the perm gen in the most recent previous GC cycle. |
| 1281 | bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const { |
| 1282 | assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(), |
| 1283 | "Else races are possible" ); |
| 1284 | assert(block_is_obj(p), "The address should point to an object" ); |
| 1285 | |
| 1286 | // If we're sweeping, we use object liveness information from the main bit map |
| 1287 | // for both perm gen and old gen. |
| 1288 | // We don't need to lock the bitmap (live_map or dead_map below), because |
| 1289 | // EITHER we are in the middle of the sweeping phase, and the |
| 1290 | // main marking bit map (live_map below) is locked, |
| 1291 | // OR we're in other phases and perm_gen_verify_bit_map (dead_map below) |
| 1292 | // is stable, because it's mutated only in the sweeping phase. |
| 1293 | // NOTE: This method is also used by jmap where, if class unloading is |
| 1294 | // off, the results can return "false" for legitimate perm objects, |
| 1295 | // when we are not in the midst of a sweeping phase, which can result |
| 1296 | // in jmap not reporting certain perm gen objects. This will be moot |
| 1297 | // if/when the perm gen goes away in the future. |
| 1298 | if (_collector->abstract_state() == CMSCollector::Sweeping) { |
| 1299 | CMSBitMap* live_map = _collector->markBitMap(); |
| 1300 | return live_map->par_isMarked((HeapWord*) p); |
| 1301 | } |
| 1302 | return true; |
| 1303 | } |
| 1304 | |
| 1305 | bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const { |
| 1306 | FreeChunk* fc = (FreeChunk*)p; |
| 1307 | assert(is_in_reserved(p), "Should be in space" ); |
| 1308 | assert(_bt.block_start(p) == p, "Should be a block boundary" ); |
| 1309 | if (!fc->is_free()) { |
| 1310 | // Ignore mark word because it may have been used to |
| 1311 | // chain together promoted objects (the last one |
| 1312 | // would have a null value). |
| 1313 | assert(oopDesc::is_oop(oop(p), true), "Should be an oop" ); |
| 1314 | return true; |
| 1315 | } |
| 1316 | return false; |
| 1317 | } |
| 1318 | |
| 1319 | // "MT-safe but not guaranteed MT-precise" (TM); you may get an |
| 1320 | // approximate answer if you don't hold the freelistlock when you call this. |
| 1321 | size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const { |
| 1322 | size_t size = 0; |
| 1323 | for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 1324 | debug_only( |
| 1325 | // We may be calling here without the lock in which case we |
| 1326 | // won't do this modest sanity check. |
| 1327 | if (freelistLock()->owned_by_self()) { |
| 1328 | size_t total_list_size = 0; |
| 1329 | for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; |
| 1330 | fc = fc->next()) { |
| 1331 | total_list_size += i; |
| 1332 | } |
| 1333 | assert(total_list_size == i * _indexedFreeList[i].count(), |
| 1334 | "Count in list is incorrect" ); |
| 1335 | } |
| 1336 | ) |
| 1337 | size += i * _indexedFreeList[i].count(); |
| 1338 | } |
| 1339 | return size; |
| 1340 | } |
| 1341 | |
| 1342 | HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) { |
| 1343 | MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag); |
| 1344 | return allocate(size); |
| 1345 | } |
| 1346 | |
| 1347 | HeapWord* |
| 1348 | CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) { |
| 1349 | return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size); |
| 1350 | } |
| 1351 | |
| 1352 | HeapWord* CompactibleFreeListSpace::allocate(size_t size) { |
| 1353 | assert_lock_strong(freelistLock()); |
| 1354 | HeapWord* res = NULL; |
| 1355 | assert(size == adjustObjectSize(size), |
| 1356 | "use adjustObjectSize() before calling into allocate()" ); |
| 1357 | |
| 1358 | res = allocate_adaptive_freelists(size); |
| 1359 | |
| 1360 | if (res != NULL) { |
| 1361 | // check that res does lie in this space! |
| 1362 | assert(is_in_reserved(res), "Not in this space!" ); |
| 1363 | assert(is_aligned((void*)res), "alignment check" ); |
| 1364 | |
| 1365 | FreeChunk* fc = (FreeChunk*)res; |
| 1366 | fc->markNotFree(); |
| 1367 | assert(!fc->is_free(), "shouldn't be marked free" ); |
| 1368 | assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized" ); |
| 1369 | // Verify that the block offset table shows this to |
| 1370 | // be a single block, but not one which is unallocated. |
| 1371 | _bt.verify_single_block(res, size); |
| 1372 | _bt.verify_not_unallocated(res, size); |
| 1373 | // mangle a just allocated object with a distinct pattern. |
| 1374 | debug_only(fc->mangleAllocated(size)); |
| 1375 | } |
| 1376 | |
| 1377 | return res; |
| 1378 | } |
| 1379 | |
| 1380 | HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) { |
| 1381 | assert_lock_strong(freelistLock()); |
| 1382 | HeapWord* res = NULL; |
| 1383 | assert(size == adjustObjectSize(size), |
| 1384 | "use adjustObjectSize() before calling into allocate()" ); |
| 1385 | |
| 1386 | // Strategy |
| 1387 | // if small |
| 1388 | // exact size from small object indexed list if small |
| 1389 | // small or large linear allocation block (linAB) as appropriate |
| 1390 | // take from lists of greater sized chunks |
| 1391 | // else |
| 1392 | // dictionary |
| 1393 | // small or large linear allocation block if it has the space |
| 1394 | // Try allocating exact size from indexTable first |
| 1395 | if (size < IndexSetSize) { |
| 1396 | res = (HeapWord*) getChunkFromIndexedFreeList(size); |
| 1397 | if(res != NULL) { |
| 1398 | assert(res != (HeapWord*)_indexedFreeList[size].head(), |
| 1399 | "Not removed from free list" ); |
| 1400 | // no block offset table adjustment is necessary on blocks in |
| 1401 | // the indexed lists. |
| 1402 | |
| 1403 | // Try allocating from the small LinAB |
| 1404 | } else if (size < _smallLinearAllocBlock._allocation_size_limit && |
| 1405 | (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) { |
| 1406 | // if successful, the above also adjusts block offset table |
| 1407 | // Note that this call will refill the LinAB to |
| 1408 | // satisfy the request. This is different that |
| 1409 | // evm. |
| 1410 | // Don't record chunk off a LinAB? smallSplitBirth(size); |
| 1411 | } else { |
| 1412 | // Raid the exact free lists larger than size, even if they are not |
| 1413 | // overpopulated. |
| 1414 | res = (HeapWord*) getChunkFromGreater(size); |
| 1415 | } |
| 1416 | } else { |
| 1417 | // Big objects get allocated directly from the dictionary. |
| 1418 | res = (HeapWord*) getChunkFromDictionaryExact(size); |
| 1419 | if (res == NULL) { |
| 1420 | // Try hard not to fail since an allocation failure will likely |
| 1421 | // trigger a synchronous GC. Try to get the space from the |
| 1422 | // allocation blocks. |
| 1423 | res = getChunkFromSmallLinearAllocBlockRemainder(size); |
| 1424 | } |
| 1425 | } |
| 1426 | |
| 1427 | return res; |
| 1428 | } |
| 1429 | |
| 1430 | // A worst-case estimate of the space required (in HeapWords) to expand the heap |
| 1431 | // when promoting obj. |
| 1432 | size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const { |
| 1433 | // Depending on the object size, expansion may require refilling either a |
| 1434 | // bigLAB or a smallLAB plus refilling a PromotionInfo object. MinChunkSize |
| 1435 | // is added because the dictionary may over-allocate to avoid fragmentation. |
| 1436 | size_t space = obj_size; |
| 1437 | space += _promoInfo.refillSize() + 2 * MinChunkSize; |
| 1438 | return space; |
| 1439 | } |
| 1440 | |
| 1441 | FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) { |
| 1442 | FreeChunk* ret; |
| 1443 | |
| 1444 | assert(numWords >= MinChunkSize, "Size is less than minimum" ); |
| 1445 | assert(linearAllocationWouldFail() || bestFitFirst(), |
| 1446 | "Should not be here" ); |
| 1447 | |
| 1448 | size_t i; |
| 1449 | size_t currSize = numWords + MinChunkSize; |
| 1450 | assert(is_object_aligned(currSize), "currSize should be aligned" ); |
| 1451 | for (i = currSize; i < IndexSetSize; i += IndexSetStride) { |
| 1452 | AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i]; |
| 1453 | if (fl->head()) { |
| 1454 | ret = getFromListGreater(fl, numWords); |
| 1455 | assert(ret == NULL || ret->is_free(), "Should be returning a free chunk" ); |
| 1456 | return ret; |
| 1457 | } |
| 1458 | } |
| 1459 | |
| 1460 | currSize = MAX2((size_t)SmallForDictionary, |
| 1461 | (size_t)(numWords + MinChunkSize)); |
| 1462 | |
| 1463 | /* Try to get a chunk that satisfies request, while avoiding |
| 1464 | fragmentation that can't be handled. */ |
| 1465 | { |
| 1466 | ret = dictionary()->get_chunk(currSize); |
| 1467 | if (ret != NULL) { |
| 1468 | assert(ret->size() - numWords >= MinChunkSize, |
| 1469 | "Chunk is too small" ); |
| 1470 | _bt.allocated((HeapWord*)ret, ret->size()); |
| 1471 | /* Carve returned chunk. */ |
| 1472 | (void) splitChunkAndReturnRemainder(ret, numWords); |
| 1473 | /* Label this as no longer a free chunk. */ |
| 1474 | assert(ret->is_free(), "This chunk should be free" ); |
| 1475 | ret->link_prev(NULL); |
| 1476 | } |
| 1477 | assert(ret == NULL || ret->is_free(), "Should be returning a free chunk" ); |
| 1478 | return ret; |
| 1479 | } |
| 1480 | ShouldNotReachHere(); |
| 1481 | } |
| 1482 | |
| 1483 | bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const { |
| 1484 | assert(fc->size() < IndexSetSize, "Size of chunk is too large" ); |
| 1485 | return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc); |
| 1486 | } |
| 1487 | |
| 1488 | bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const { |
| 1489 | assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) || |
| 1490 | (_smallLinearAllocBlock._word_size == fc->size()), |
| 1491 | "Linear allocation block shows incorrect size" ); |
| 1492 | return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) && |
| 1493 | (_smallLinearAllocBlock._word_size == fc->size())); |
| 1494 | } |
| 1495 | |
| 1496 | // Check if the purported free chunk is present either as a linear |
| 1497 | // allocation block, the size-indexed table of (smaller) free blocks, |
| 1498 | // or the larger free blocks kept in the binary tree dictionary. |
| 1499 | bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const { |
| 1500 | if (verify_chunk_is_linear_alloc_block(fc)) { |
| 1501 | return true; |
| 1502 | } else if (fc->size() < IndexSetSize) { |
| 1503 | return verifyChunkInIndexedFreeLists(fc); |
| 1504 | } else { |
| 1505 | return dictionary()->verify_chunk_in_free_list(fc); |
| 1506 | } |
| 1507 | } |
| 1508 | |
| 1509 | #ifndef PRODUCT |
| 1510 | void CompactibleFreeListSpace::assert_locked() const { |
| 1511 | CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock()); |
| 1512 | } |
| 1513 | |
| 1514 | void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const { |
| 1515 | CMSLockVerifier::assert_locked(lock); |
| 1516 | } |
| 1517 | #endif |
| 1518 | |
| 1519 | FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) { |
| 1520 | // In the parallel case, the main thread holds the free list lock |
| 1521 | // on behalf the parallel threads. |
| 1522 | FreeChunk* fc; |
| 1523 | { |
| 1524 | // If GC is parallel, this might be called by several threads. |
| 1525 | // This should be rare enough that the locking overhead won't affect |
| 1526 | // the sequential code. |
| 1527 | MutexLocker x(parDictionaryAllocLock(), |
| 1528 | Mutex::_no_safepoint_check_flag); |
| 1529 | fc = getChunkFromDictionary(size); |
| 1530 | } |
| 1531 | if (fc != NULL) { |
| 1532 | fc->dontCoalesce(); |
| 1533 | assert(fc->is_free(), "Should be free, but not coalescable" ); |
| 1534 | // Verify that the block offset table shows this to |
| 1535 | // be a single block, but not one which is unallocated. |
| 1536 | _bt.verify_single_block((HeapWord*)fc, fc->size()); |
| 1537 | _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); |
| 1538 | } |
| 1539 | return fc; |
| 1540 | } |
| 1541 | |
| 1542 | oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) { |
| 1543 | assert(obj_size == (size_t)obj->size(), "bad obj_size passed in" ); |
| 1544 | assert_locked(); |
| 1545 | |
| 1546 | // if we are tracking promotions, then first ensure space for |
| 1547 | // promotion (including spooling space for saving header if necessary). |
| 1548 | // then allocate and copy, then track promoted info if needed. |
| 1549 | // When tracking (see PromotionInfo::track()), the mark word may |
| 1550 | // be displaced and in this case restoration of the mark word |
| 1551 | // occurs in the (oop_since_save_marks_)iterate phase. |
| 1552 | if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) { |
| 1553 | return NULL; |
| 1554 | } |
| 1555 | // Call the allocate(size_t, bool) form directly to avoid the |
| 1556 | // additional call through the allocate(size_t) form. Having |
| 1557 | // the compile inline the call is problematic because allocate(size_t) |
| 1558 | // is a virtual method. |
| 1559 | HeapWord* res = allocate(adjustObjectSize(obj_size)); |
| 1560 | if (res != NULL) { |
| 1561 | Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size); |
| 1562 | // if we should be tracking promotions, do so. |
| 1563 | if (_promoInfo.tracking()) { |
| 1564 | _promoInfo.track((PromotedObject*)res); |
| 1565 | } |
| 1566 | } |
| 1567 | return oop(res); |
| 1568 | } |
| 1569 | |
| 1570 | HeapWord* |
| 1571 | CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) { |
| 1572 | assert_locked(); |
| 1573 | assert(size >= MinChunkSize, "minimum chunk size" ); |
| 1574 | assert(size < _smallLinearAllocBlock._allocation_size_limit, |
| 1575 | "maximum from smallLinearAllocBlock" ); |
| 1576 | return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size); |
| 1577 | } |
| 1578 | |
| 1579 | HeapWord* |
| 1580 | CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk, |
| 1581 | size_t size) { |
| 1582 | assert_locked(); |
| 1583 | assert(size >= MinChunkSize, "too small" ); |
| 1584 | HeapWord* res = NULL; |
| 1585 | // Try to do linear allocation from blk, making sure that |
| 1586 | if (blk->_word_size == 0) { |
| 1587 | // We have probably been unable to fill this either in the prologue or |
| 1588 | // when it was exhausted at the last linear allocation. Bail out until |
| 1589 | // next time. |
| 1590 | assert(blk->_ptr == NULL, "consistency check" ); |
| 1591 | return NULL; |
| 1592 | } |
| 1593 | assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check" ); |
| 1594 | res = getChunkFromLinearAllocBlockRemainder(blk, size); |
| 1595 | if (res != NULL) return res; |
| 1596 | |
| 1597 | // about to exhaust this linear allocation block |
| 1598 | if (blk->_word_size == size) { // exactly satisfied |
| 1599 | res = blk->_ptr; |
| 1600 | _bt.allocated(res, blk->_word_size); |
| 1601 | } else if (size + MinChunkSize <= blk->_refillSize) { |
| 1602 | size_t sz = blk->_word_size; |
| 1603 | // Update _unallocated_block if the size is such that chunk would be |
| 1604 | // returned to the indexed free list. All other chunks in the indexed |
| 1605 | // free lists are allocated from the dictionary so that _unallocated_block |
| 1606 | // has already been adjusted for them. Do it here so that the cost |
| 1607 | // for all chunks added back to the indexed free lists. |
| 1608 | if (sz < SmallForDictionary) { |
| 1609 | _bt.allocated(blk->_ptr, sz); |
| 1610 | } |
| 1611 | // Return the chunk that isn't big enough, and then refill below. |
| 1612 | addChunkToFreeLists(blk->_ptr, sz); |
| 1613 | split_birth(sz); |
| 1614 | // Don't keep statistics on adding back chunk from a LinAB. |
| 1615 | } else { |
| 1616 | // A refilled block would not satisfy the request. |
| 1617 | return NULL; |
| 1618 | } |
| 1619 | |
| 1620 | blk->_ptr = NULL; blk->_word_size = 0; |
| 1621 | refillLinearAllocBlock(blk); |
| 1622 | assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize, |
| 1623 | "block was replenished" ); |
| 1624 | if (res != NULL) { |
| 1625 | split_birth(size); |
| 1626 | repairLinearAllocBlock(blk); |
| 1627 | } else if (blk->_ptr != NULL) { |
| 1628 | res = blk->_ptr; |
| 1629 | size_t blk_size = blk->_word_size; |
| 1630 | blk->_word_size -= size; |
| 1631 | blk->_ptr += size; |
| 1632 | split_birth(size); |
| 1633 | repairLinearAllocBlock(blk); |
| 1634 | // Update BOT last so that other (parallel) GC threads see a consistent |
| 1635 | // view of the BOT and free blocks. |
| 1636 | // Above must occur before BOT is updated below. |
| 1637 | OrderAccess::storestore(); |
| 1638 | _bt.split_block(res, blk_size, size); // adjust block offset table |
| 1639 | } |
| 1640 | return res; |
| 1641 | } |
| 1642 | |
| 1643 | HeapWord* CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder( |
| 1644 | LinearAllocBlock* blk, |
| 1645 | size_t size) { |
| 1646 | assert_locked(); |
| 1647 | assert(size >= MinChunkSize, "too small" ); |
| 1648 | |
| 1649 | HeapWord* res = NULL; |
| 1650 | // This is the common case. Keep it simple. |
| 1651 | if (blk->_word_size >= size + MinChunkSize) { |
| 1652 | assert(blk->_ptr != NULL, "consistency check" ); |
| 1653 | res = blk->_ptr; |
| 1654 | // Note that the BOT is up-to-date for the linAB before allocation. It |
| 1655 | // indicates the start of the linAB. The split_block() updates the |
| 1656 | // BOT for the linAB after the allocation (indicates the start of the |
| 1657 | // next chunk to be allocated). |
| 1658 | size_t blk_size = blk->_word_size; |
| 1659 | blk->_word_size -= size; |
| 1660 | blk->_ptr += size; |
| 1661 | split_birth(size); |
| 1662 | repairLinearAllocBlock(blk); |
| 1663 | // Update BOT last so that other (parallel) GC threads see a consistent |
| 1664 | // view of the BOT and free blocks. |
| 1665 | // Above must occur before BOT is updated below. |
| 1666 | OrderAccess::storestore(); |
| 1667 | _bt.split_block(res, blk_size, size); // adjust block offset table |
| 1668 | _bt.allocated(res, size); |
| 1669 | } |
| 1670 | return res; |
| 1671 | } |
| 1672 | |
| 1673 | FreeChunk* |
| 1674 | CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) { |
| 1675 | assert_locked(); |
| 1676 | assert(size < SmallForDictionary, "just checking" ); |
| 1677 | FreeChunk* res; |
| 1678 | res = _indexedFreeList[size].get_chunk_at_head(); |
| 1679 | if (res == NULL) { |
| 1680 | res = getChunkFromIndexedFreeListHelper(size); |
| 1681 | } |
| 1682 | _bt.verify_not_unallocated((HeapWord*) res, size); |
| 1683 | assert(res == NULL || res->size() == size, "Incorrect block size" ); |
| 1684 | return res; |
| 1685 | } |
| 1686 | |
| 1687 | FreeChunk* |
| 1688 | CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size, |
| 1689 | bool replenish) { |
| 1690 | assert_locked(); |
| 1691 | FreeChunk* fc = NULL; |
| 1692 | if (size < SmallForDictionary) { |
| 1693 | assert(_indexedFreeList[size].head() == NULL || |
| 1694 | _indexedFreeList[size].surplus() <= 0, |
| 1695 | "List for this size should be empty or under populated" ); |
| 1696 | // Try best fit in exact lists before replenishing the list |
| 1697 | if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) { |
| 1698 | // Replenish list. |
| 1699 | // |
| 1700 | // Things tried that failed. |
| 1701 | // Tried allocating out of the two LinAB's first before |
| 1702 | // replenishing lists. |
| 1703 | // Tried small linAB of size 256 (size in indexed list) |
| 1704 | // and replenishing indexed lists from the small linAB. |
| 1705 | // |
| 1706 | FreeChunk* newFc = NULL; |
| 1707 | const size_t replenish_size = CMSIndexedFreeListReplenish * size; |
| 1708 | if (replenish_size < SmallForDictionary) { |
| 1709 | // Do not replenish from an underpopulated size. |
| 1710 | if (_indexedFreeList[replenish_size].surplus() > 0 && |
| 1711 | _indexedFreeList[replenish_size].head() != NULL) { |
| 1712 | newFc = _indexedFreeList[replenish_size].get_chunk_at_head(); |
| 1713 | } else if (bestFitFirst()) { |
| 1714 | newFc = bestFitSmall(replenish_size); |
| 1715 | } |
| 1716 | } |
| 1717 | if (newFc == NULL && replenish_size > size) { |
| 1718 | assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant" ); |
| 1719 | newFc = getChunkFromIndexedFreeListHelper(replenish_size, false); |
| 1720 | } |
| 1721 | // Note: The stats update re split-death of block obtained above |
| 1722 | // will be recorded below precisely when we know we are going to |
| 1723 | // be actually splitting it into more than one pieces below. |
| 1724 | if (newFc != NULL) { |
| 1725 | if (replenish || CMSReplenishIntermediate) { |
| 1726 | // Replenish this list and return one block to caller. |
| 1727 | size_t i; |
| 1728 | FreeChunk *curFc, *nextFc; |
| 1729 | size_t num_blk = newFc->size() / size; |
| 1730 | assert(num_blk >= 1, "Smaller than requested?" ); |
| 1731 | assert(newFc->size() % size == 0, "Should be integral multiple of request" ); |
| 1732 | if (num_blk > 1) { |
| 1733 | // we are sure we will be splitting the block just obtained |
| 1734 | // into multiple pieces; record the split-death of the original |
| 1735 | splitDeath(replenish_size); |
| 1736 | } |
| 1737 | // carve up and link blocks 0, ..., num_blk - 2 |
| 1738 | // The last chunk is not added to the lists but is returned as the |
| 1739 | // free chunk. |
| 1740 | for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), |
| 1741 | i = 0; |
| 1742 | i < (num_blk - 1); |
| 1743 | curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), |
| 1744 | i++) { |
| 1745 | curFc->set_size(size); |
| 1746 | // Don't record this as a return in order to try and |
| 1747 | // determine the "returns" from a GC. |
| 1748 | _bt.verify_not_unallocated((HeapWord*) fc, size); |
| 1749 | _indexedFreeList[size].return_chunk_at_tail(curFc, false); |
| 1750 | _bt.mark_block((HeapWord*)curFc, size); |
| 1751 | split_birth(size); |
| 1752 | // Don't record the initial population of the indexed list |
| 1753 | // as a split birth. |
| 1754 | } |
| 1755 | |
| 1756 | // check that the arithmetic was OK above |
| 1757 | assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size, |
| 1758 | "inconsistency in carving newFc" ); |
| 1759 | curFc->set_size(size); |
| 1760 | _bt.mark_block((HeapWord*)curFc, size); |
| 1761 | split_birth(size); |
| 1762 | fc = curFc; |
| 1763 | } else { |
| 1764 | // Return entire block to caller |
| 1765 | fc = newFc; |
| 1766 | } |
| 1767 | } |
| 1768 | } |
| 1769 | } else { |
| 1770 | // Get a free chunk from the free chunk dictionary to be returned to |
| 1771 | // replenish the indexed free list. |
| 1772 | fc = getChunkFromDictionaryExact(size); |
| 1773 | } |
| 1774 | // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk"); |
| 1775 | return fc; |
| 1776 | } |
| 1777 | |
| 1778 | FreeChunk* |
| 1779 | CompactibleFreeListSpace::getChunkFromDictionary(size_t size) { |
| 1780 | assert_locked(); |
| 1781 | FreeChunk* fc = _dictionary->get_chunk(size); |
| 1782 | if (fc == NULL) { |
| 1783 | return NULL; |
| 1784 | } |
| 1785 | _bt.allocated((HeapWord*)fc, fc->size()); |
| 1786 | if (fc->size() >= size + MinChunkSize) { |
| 1787 | fc = splitChunkAndReturnRemainder(fc, size); |
| 1788 | } |
| 1789 | assert(fc->size() >= size, "chunk too small" ); |
| 1790 | assert(fc->size() < size + MinChunkSize, "chunk too big" ); |
| 1791 | _bt.verify_single_block((HeapWord*)fc, fc->size()); |
| 1792 | return fc; |
| 1793 | } |
| 1794 | |
| 1795 | FreeChunk* |
| 1796 | CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) { |
| 1797 | assert_locked(); |
| 1798 | FreeChunk* fc = _dictionary->get_chunk(size); |
| 1799 | if (fc == NULL) { |
| 1800 | return fc; |
| 1801 | } |
| 1802 | _bt.allocated((HeapWord*)fc, fc->size()); |
| 1803 | if (fc->size() == size) { |
| 1804 | _bt.verify_single_block((HeapWord*)fc, size); |
| 1805 | return fc; |
| 1806 | } |
| 1807 | assert(fc->size() > size, "get_chunk() guarantee" ); |
| 1808 | if (fc->size() < size + MinChunkSize) { |
| 1809 | // Return the chunk to the dictionary and go get a bigger one. |
| 1810 | returnChunkToDictionary(fc); |
| 1811 | fc = _dictionary->get_chunk(size + MinChunkSize); |
| 1812 | if (fc == NULL) { |
| 1813 | return NULL; |
| 1814 | } |
| 1815 | _bt.allocated((HeapWord*)fc, fc->size()); |
| 1816 | } |
| 1817 | assert(fc->size() >= size + MinChunkSize, "tautology" ); |
| 1818 | fc = splitChunkAndReturnRemainder(fc, size); |
| 1819 | assert(fc->size() == size, "chunk is wrong size" ); |
| 1820 | _bt.verify_single_block((HeapWord*)fc, size); |
| 1821 | return fc; |
| 1822 | } |
| 1823 | |
| 1824 | void |
| 1825 | CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) { |
| 1826 | assert_locked(); |
| 1827 | |
| 1828 | size_t size = chunk->size(); |
| 1829 | _bt.verify_single_block((HeapWord*)chunk, size); |
| 1830 | // adjust _unallocated_block downward, as necessary |
| 1831 | _bt.freed((HeapWord*)chunk, size); |
| 1832 | _dictionary->return_chunk(chunk); |
| 1833 | #ifndef PRODUCT |
| 1834 | if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
| 1835 | TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk); |
| 1836 | TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list(); |
| 1837 | tl->verify_stats(); |
| 1838 | } |
| 1839 | #endif // PRODUCT |
| 1840 | } |
| 1841 | |
| 1842 | void |
| 1843 | CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) { |
| 1844 | assert_locked(); |
| 1845 | size_t size = fc->size(); |
| 1846 | _bt.verify_single_block((HeapWord*) fc, size); |
| 1847 | _bt.verify_not_unallocated((HeapWord*) fc, size); |
| 1848 | _indexedFreeList[size].return_chunk_at_tail(fc); |
| 1849 | #ifndef PRODUCT |
| 1850 | if (CMSCollector::abstract_state() != CMSCollector::Sweeping) { |
| 1851 | _indexedFreeList[size].verify_stats(); |
| 1852 | } |
| 1853 | #endif // PRODUCT |
| 1854 | } |
| 1855 | |
| 1856 | // Add chunk to end of last block -- if it's the largest |
| 1857 | // block -- and update BOT and census data. We would |
| 1858 | // of course have preferred to coalesce it with the |
| 1859 | // last block, but it's currently less expensive to find the |
| 1860 | // largest block than it is to find the last. |
| 1861 | void |
| 1862 | CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats( |
| 1863 | HeapWord* chunk, size_t size) { |
| 1864 | // check that the chunk does lie in this space! |
| 1865 | assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!" ); |
| 1866 | // One of the parallel gc task threads may be here |
| 1867 | // whilst others are allocating. |
| 1868 | Mutex* lock = &_parDictionaryAllocLock; |
| 1869 | FreeChunk* ec; |
| 1870 | { |
| 1871 | MutexLocker x(lock, Mutex::_no_safepoint_check_flag); |
| 1872 | ec = dictionary()->find_largest_dict(); // get largest block |
| 1873 | if (ec != NULL && ec->end() == (uintptr_t*) chunk) { |
| 1874 | // It's a coterminal block - we can coalesce. |
| 1875 | size_t old_size = ec->size(); |
| 1876 | coalDeath(old_size); |
| 1877 | removeChunkFromDictionary(ec); |
| 1878 | size += old_size; |
| 1879 | } else { |
| 1880 | ec = (FreeChunk*)chunk; |
| 1881 | } |
| 1882 | } |
| 1883 | ec->set_size(size); |
| 1884 | debug_only(ec->mangleFreed(size)); |
| 1885 | if (size < SmallForDictionary) { |
| 1886 | lock = _indexedFreeListParLocks[size]; |
| 1887 | } |
| 1888 | MutexLocker x(lock, Mutex::_no_safepoint_check_flag); |
| 1889 | addChunkAndRepairOffsetTable((HeapWord*)ec, size, true); |
| 1890 | // record the birth under the lock since the recording involves |
| 1891 | // manipulation of the list on which the chunk lives and |
| 1892 | // if the chunk is allocated and is the last on the list, |
| 1893 | // the list can go away. |
| 1894 | coalBirth(size); |
| 1895 | } |
| 1896 | |
| 1897 | void |
| 1898 | CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk, |
| 1899 | size_t size) { |
| 1900 | // check that the chunk does lie in this space! |
| 1901 | assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!" ); |
| 1902 | assert_locked(); |
| 1903 | _bt.verify_single_block(chunk, size); |
| 1904 | |
| 1905 | FreeChunk* fc = (FreeChunk*) chunk; |
| 1906 | fc->set_size(size); |
| 1907 | debug_only(fc->mangleFreed(size)); |
| 1908 | if (size < SmallForDictionary) { |
| 1909 | returnChunkToFreeList(fc); |
| 1910 | } else { |
| 1911 | returnChunkToDictionary(fc); |
| 1912 | } |
| 1913 | } |
| 1914 | |
| 1915 | void |
| 1916 | CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk, |
| 1917 | size_t size, bool coalesced) { |
| 1918 | assert_locked(); |
| 1919 | assert(chunk != NULL, "null chunk" ); |
| 1920 | if (coalesced) { |
| 1921 | // repair BOT |
| 1922 | _bt.single_block(chunk, size); |
| 1923 | } |
| 1924 | addChunkToFreeLists(chunk, size); |
| 1925 | } |
| 1926 | |
| 1927 | // We _must_ find the purported chunk on our free lists; |
| 1928 | // we assert if we don't. |
| 1929 | void |
| 1930 | CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) { |
| 1931 | size_t size = fc->size(); |
| 1932 | assert_locked(); |
| 1933 | debug_only(verifyFreeLists()); |
| 1934 | if (size < SmallForDictionary) { |
| 1935 | removeChunkFromIndexedFreeList(fc); |
| 1936 | } else { |
| 1937 | removeChunkFromDictionary(fc); |
| 1938 | } |
| 1939 | _bt.verify_single_block((HeapWord*)fc, size); |
| 1940 | debug_only(verifyFreeLists()); |
| 1941 | } |
| 1942 | |
| 1943 | void |
| 1944 | CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) { |
| 1945 | size_t size = fc->size(); |
| 1946 | assert_locked(); |
| 1947 | assert(fc != NULL, "null chunk" ); |
| 1948 | _bt.verify_single_block((HeapWord*)fc, size); |
| 1949 | _dictionary->remove_chunk(fc); |
| 1950 | // adjust _unallocated_block upward, as necessary |
| 1951 | _bt.allocated((HeapWord*)fc, size); |
| 1952 | } |
| 1953 | |
| 1954 | void |
| 1955 | CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) { |
| 1956 | assert_locked(); |
| 1957 | size_t size = fc->size(); |
| 1958 | _bt.verify_single_block((HeapWord*)fc, size); |
| 1959 | NOT_PRODUCT( |
| 1960 | if (FLSVerifyIndexTable) { |
| 1961 | verifyIndexedFreeList(size); |
| 1962 | } |
| 1963 | ) |
| 1964 | _indexedFreeList[size].remove_chunk(fc); |
| 1965 | NOT_PRODUCT( |
| 1966 | if (FLSVerifyIndexTable) { |
| 1967 | verifyIndexedFreeList(size); |
| 1968 | } |
| 1969 | ) |
| 1970 | } |
| 1971 | |
| 1972 | FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) { |
| 1973 | /* A hint is the next larger size that has a surplus. |
| 1974 | Start search at a size large enough to guarantee that |
| 1975 | the excess is >= MIN_CHUNK. */ |
| 1976 | size_t start = align_object_size(numWords + MinChunkSize); |
| 1977 | if (start < IndexSetSize) { |
| 1978 | AdaptiveFreeList<FreeChunk>* it = _indexedFreeList; |
| 1979 | size_t hint = _indexedFreeList[start].hint(); |
| 1980 | while (hint < IndexSetSize) { |
| 1981 | assert(is_object_aligned(hint), "hint should be aligned" ); |
| 1982 | AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint]; |
| 1983 | if (fl->surplus() > 0 && fl->head() != NULL) { |
| 1984 | // Found a list with surplus, reset original hint |
| 1985 | // and split out a free chunk which is returned. |
| 1986 | _indexedFreeList[start].set_hint(hint); |
| 1987 | FreeChunk* res = getFromListGreater(fl, numWords); |
| 1988 | assert(res == NULL || res->is_free(), |
| 1989 | "Should be returning a free chunk" ); |
| 1990 | return res; |
| 1991 | } |
| 1992 | hint = fl->hint(); /* keep looking */ |
| 1993 | } |
| 1994 | /* None found. */ |
| 1995 | it[start].set_hint(IndexSetSize); |
| 1996 | } |
| 1997 | return NULL; |
| 1998 | } |
| 1999 | |
| 2000 | /* Requires fl->size >= numWords + MinChunkSize */ |
| 2001 | FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, |
| 2002 | size_t numWords) { |
| 2003 | FreeChunk *curr = fl->head(); |
| 2004 | size_t oldNumWords = curr->size(); |
| 2005 | assert(numWords >= MinChunkSize, "Word size is too small" ); |
| 2006 | assert(curr != NULL, "List is empty" ); |
| 2007 | assert(oldNumWords >= numWords + MinChunkSize, |
| 2008 | "Size of chunks in the list is too small" ); |
| 2009 | |
| 2010 | fl->remove_chunk(curr); |
| 2011 | // recorded indirectly by splitChunkAndReturnRemainder - |
| 2012 | // smallSplit(oldNumWords, numWords); |
| 2013 | FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords); |
| 2014 | // Does anything have to be done for the remainder in terms of |
| 2015 | // fixing the card table? |
| 2016 | assert(new_chunk == NULL || new_chunk->is_free(), |
| 2017 | "Should be returning a free chunk" ); |
| 2018 | return new_chunk; |
| 2019 | } |
| 2020 | |
| 2021 | FreeChunk* |
| 2022 | CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk, |
| 2023 | size_t new_size) { |
| 2024 | assert_locked(); |
| 2025 | size_t size = chunk->size(); |
| 2026 | assert(size > new_size, "Split from a smaller block?" ); |
| 2027 | assert(is_aligned(chunk), "alignment problem" ); |
| 2028 | assert(size == adjustObjectSize(size), "alignment problem" ); |
| 2029 | size_t rem_sz = size - new_size; |
| 2030 | assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem" ); |
| 2031 | assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum" ); |
| 2032 | FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size); |
| 2033 | assert(is_aligned(ffc), "alignment problem" ); |
| 2034 | ffc->set_size(rem_sz); |
| 2035 | ffc->link_next(NULL); |
| 2036 | ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads. |
| 2037 | // Above must occur before BOT is updated below. |
| 2038 | // adjust block offset table |
| 2039 | OrderAccess::storestore(); |
| 2040 | assert(chunk->is_free() && ffc->is_free(), "Error" ); |
| 2041 | _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); |
| 2042 | if (rem_sz < SmallForDictionary) { |
| 2043 | // The freeList lock is held, but multiple GC task threads might be executing in parallel. |
| 2044 | bool is_par = Thread::current()->is_GC_task_thread(); |
| 2045 | if (is_par) _indexedFreeListParLocks[rem_sz]->lock_without_safepoint_check(); |
| 2046 | returnChunkToFreeList(ffc); |
| 2047 | split(size, rem_sz); |
| 2048 | if (is_par) _indexedFreeListParLocks[rem_sz]->unlock(); |
| 2049 | } else { |
| 2050 | returnChunkToDictionary(ffc); |
| 2051 | split(size, rem_sz); |
| 2052 | } |
| 2053 | chunk->set_size(new_size); |
| 2054 | return chunk; |
| 2055 | } |
| 2056 | |
| 2057 | void |
| 2058 | CompactibleFreeListSpace::sweep_completed() { |
| 2059 | // Now that space is probably plentiful, refill linear |
| 2060 | // allocation blocks as needed. |
| 2061 | refillLinearAllocBlocksIfNeeded(); |
| 2062 | } |
| 2063 | |
| 2064 | void |
| 2065 | CompactibleFreeListSpace::gc_prologue() { |
| 2066 | assert_locked(); |
| 2067 | reportFreeListStatistics("Before GC:" ); |
| 2068 | refillLinearAllocBlocksIfNeeded(); |
| 2069 | } |
| 2070 | |
| 2071 | void |
| 2072 | CompactibleFreeListSpace::gc_epilogue() { |
| 2073 | assert_locked(); |
| 2074 | assert(_promoInfo.noPromotions(), "_promoInfo inconsistency" ); |
| 2075 | _promoInfo.stopTrackingPromotions(); |
| 2076 | repairLinearAllocationBlocks(); |
| 2077 | reportFreeListStatistics("After GC:" ); |
| 2078 | } |
| 2079 | |
| 2080 | // Iteration support, mostly delegated from a CMS generation |
| 2081 | |
| 2082 | void CompactibleFreeListSpace::save_marks() { |
| 2083 | assert(Thread::current()->is_VM_thread(), |
| 2084 | "Global variable should only be set when single-threaded" ); |
| 2085 | // Mark the "end" of the used space at the time of this call; |
| 2086 | // note, however, that promoted objects from this point |
| 2087 | // on are tracked in the _promoInfo below. |
| 2088 | set_saved_mark_word(unallocated_block()); |
| 2089 | #ifdef ASSERT |
| 2090 | // Check the sanity of save_marks() etc. |
| 2091 | MemRegion ur = used_region(); |
| 2092 | MemRegion urasm = used_region_at_save_marks(); |
| 2093 | assert(ur.contains(urasm), |
| 2094 | " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")" |
| 2095 | " should contain [" PTR_FORMAT "," PTR_FORMAT ")" , |
| 2096 | p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())); |
| 2097 | #endif |
| 2098 | // inform allocator that promotions should be tracked. |
| 2099 | assert(_promoInfo.noPromotions(), "_promoInfo inconsistency" ); |
| 2100 | _promoInfo.startTrackingPromotions(); |
| 2101 | } |
| 2102 | |
| 2103 | bool CompactibleFreeListSpace::no_allocs_since_save_marks() { |
| 2104 | assert(_promoInfo.tracking(), "No preceding save_marks?" ); |
| 2105 | return _promoInfo.noPromotions(); |
| 2106 | } |
| 2107 | |
| 2108 | bool CompactibleFreeListSpace::linearAllocationWouldFail() const { |
| 2109 | return _smallLinearAllocBlock._word_size == 0; |
| 2110 | } |
| 2111 | |
| 2112 | void CompactibleFreeListSpace::repairLinearAllocationBlocks() { |
| 2113 | // Fix up linear allocation blocks to look like free blocks |
| 2114 | repairLinearAllocBlock(&_smallLinearAllocBlock); |
| 2115 | } |
| 2116 | |
| 2117 | void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) { |
| 2118 | assert_locked(); |
| 2119 | if (blk->_ptr != NULL) { |
| 2120 | assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize, |
| 2121 | "Minimum block size requirement" ); |
| 2122 | FreeChunk* fc = (FreeChunk*)(blk->_ptr); |
| 2123 | fc->set_size(blk->_word_size); |
| 2124 | fc->link_prev(NULL); // mark as free |
| 2125 | fc->dontCoalesce(); |
| 2126 | assert(fc->is_free(), "just marked it free" ); |
| 2127 | assert(fc->cantCoalesce(), "just marked it uncoalescable" ); |
| 2128 | } |
| 2129 | } |
| 2130 | |
| 2131 | void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() { |
| 2132 | assert_locked(); |
| 2133 | if (_smallLinearAllocBlock._ptr == NULL) { |
| 2134 | assert(_smallLinearAllocBlock._word_size == 0, |
| 2135 | "Size of linAB should be zero if the ptr is NULL" ); |
| 2136 | // Reset the linAB refill and allocation size limit. |
| 2137 | _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc); |
| 2138 | } |
| 2139 | refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock); |
| 2140 | } |
| 2141 | |
| 2142 | void |
| 2143 | CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) { |
| 2144 | assert_locked(); |
| 2145 | assert((blk->_ptr == NULL && blk->_word_size == 0) || |
| 2146 | (blk->_ptr != NULL && blk->_word_size >= MinChunkSize), |
| 2147 | "blk invariant" ); |
| 2148 | if (blk->_ptr == NULL) { |
| 2149 | refillLinearAllocBlock(blk); |
| 2150 | } |
| 2151 | } |
| 2152 | |
| 2153 | void |
| 2154 | CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) { |
| 2155 | assert_locked(); |
| 2156 | assert(blk->_word_size == 0 && blk->_ptr == NULL, |
| 2157 | "linear allocation block should be empty" ); |
| 2158 | FreeChunk* fc; |
| 2159 | if (blk->_refillSize < SmallForDictionary && |
| 2160 | (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) { |
| 2161 | // A linAB's strategy might be to use small sizes to reduce |
| 2162 | // fragmentation but still get the benefits of allocation from a |
| 2163 | // linAB. |
| 2164 | } else { |
| 2165 | fc = getChunkFromDictionary(blk->_refillSize); |
| 2166 | } |
| 2167 | if (fc != NULL) { |
| 2168 | blk->_ptr = (HeapWord*)fc; |
| 2169 | blk->_word_size = fc->size(); |
| 2170 | fc->dontCoalesce(); // to prevent sweeper from sweeping us up |
| 2171 | } |
| 2172 | } |
| 2173 | |
| 2174 | // Support for compaction |
| 2175 | void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) { |
| 2176 | scan_and_forward(this, cp); |
| 2177 | // Prepare_for_compaction() uses the space between live objects |
| 2178 | // so that later phase can skip dead space quickly. So verification |
| 2179 | // of the free lists doesn't work after. |
| 2180 | } |
| 2181 | |
| 2182 | void CompactibleFreeListSpace::adjust_pointers() { |
| 2183 | // In other versions of adjust_pointers(), a bail out |
| 2184 | // based on the amount of live data in the generation |
| 2185 | // (i.e., if 0, bail out) may be used. |
| 2186 | // Cannot test used() == 0 here because the free lists have already |
| 2187 | // been mangled by the compaction. |
| 2188 | |
| 2189 | scan_and_adjust_pointers(this); |
| 2190 | // See note about verification in prepare_for_compaction(). |
| 2191 | } |
| 2192 | |
| 2193 | void CompactibleFreeListSpace::compact() { |
| 2194 | scan_and_compact(this); |
| 2195 | } |
| 2196 | |
| 2197 | // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2] |
| 2198 | // where fbs is free block sizes |
| 2199 | double CompactibleFreeListSpace::flsFrag() const { |
| 2200 | size_t itabFree = totalSizeInIndexedFreeLists(); |
| 2201 | double frag = 0.0; |
| 2202 | size_t i; |
| 2203 | |
| 2204 | for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 2205 | double sz = i; |
| 2206 | frag += _indexedFreeList[i].count() * (sz * sz); |
| 2207 | } |
| 2208 | |
| 2209 | double totFree = itabFree + |
| 2210 | _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())); |
| 2211 | if (totFree > 0) { |
| 2212 | frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / |
| 2213 | (totFree * totFree)); |
| 2214 | frag = (double)1.0 - frag; |
| 2215 | } else { |
| 2216 | assert(frag == 0.0, "Follows from totFree == 0" ); |
| 2217 | } |
| 2218 | return frag; |
| 2219 | } |
| 2220 | |
| 2221 | void CompactibleFreeListSpace::beginSweepFLCensus( |
| 2222 | float inter_sweep_current, |
| 2223 | float inter_sweep_estimate, |
| 2224 | float intra_sweep_estimate) { |
| 2225 | assert_locked(); |
| 2226 | size_t i; |
| 2227 | for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 2228 | AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i]; |
| 2229 | log_trace(gc, freelist)("size[" SIZE_FORMAT "] : " , i); |
| 2230 | fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate); |
| 2231 | fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent)); |
| 2232 | fl->set_before_sweep(fl->count()); |
| 2233 | fl->set_bfr_surp(fl->surplus()); |
| 2234 | } |
| 2235 | _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent, |
| 2236 | inter_sweep_current, |
| 2237 | inter_sweep_estimate, |
| 2238 | intra_sweep_estimate); |
| 2239 | } |
| 2240 | |
| 2241 | void CompactibleFreeListSpace::setFLSurplus() { |
| 2242 | assert_locked(); |
| 2243 | size_t i; |
| 2244 | for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 2245 | AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i]; |
| 2246 | fl->set_surplus(fl->count() - |
| 2247 | (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent)); |
| 2248 | } |
| 2249 | } |
| 2250 | |
| 2251 | void CompactibleFreeListSpace::setFLHints() { |
| 2252 | assert_locked(); |
| 2253 | size_t i; |
| 2254 | size_t h = IndexSetSize; |
| 2255 | for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) { |
| 2256 | AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i]; |
| 2257 | fl->set_hint(h); |
| 2258 | if (fl->surplus() > 0) { |
| 2259 | h = i; |
| 2260 | } |
| 2261 | } |
| 2262 | } |
| 2263 | |
| 2264 | void CompactibleFreeListSpace::clearFLCensus() { |
| 2265 | assert_locked(); |
| 2266 | size_t i; |
| 2267 | for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 2268 | AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i]; |
| 2269 | fl->set_prev_sweep(fl->count()); |
| 2270 | fl->set_coal_births(0); |
| 2271 | fl->set_coal_deaths(0); |
| 2272 | fl->set_split_births(0); |
| 2273 | fl->set_split_deaths(0); |
| 2274 | } |
| 2275 | } |
| 2276 | |
| 2277 | void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) { |
| 2278 | log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict())); |
| 2279 | setFLSurplus(); |
| 2280 | setFLHints(); |
| 2281 | printFLCensus(sweep_count); |
| 2282 | clearFLCensus(); |
| 2283 | assert_locked(); |
| 2284 | _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent); |
| 2285 | } |
| 2286 | |
| 2287 | bool CompactibleFreeListSpace::coalOverPopulated(size_t size) { |
| 2288 | if (size < SmallForDictionary) { |
| 2289 | AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size]; |
| 2290 | return (fl->coal_desired() < 0) || |
| 2291 | ((int)fl->count() > fl->coal_desired()); |
| 2292 | } else { |
| 2293 | return dictionary()->coal_dict_over_populated(size); |
| 2294 | } |
| 2295 | } |
| 2296 | |
| 2297 | void CompactibleFreeListSpace::smallCoalBirth(size_t size) { |
| 2298 | assert(size < SmallForDictionary, "Size too large for indexed list" ); |
| 2299 | AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size]; |
| 2300 | fl->increment_coal_births(); |
| 2301 | fl->increment_surplus(); |
| 2302 | } |
| 2303 | |
| 2304 | void CompactibleFreeListSpace::smallCoalDeath(size_t size) { |
| 2305 | assert(size < SmallForDictionary, "Size too large for indexed list" ); |
| 2306 | AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size]; |
| 2307 | fl->increment_coal_deaths(); |
| 2308 | fl->decrement_surplus(); |
| 2309 | } |
| 2310 | |
| 2311 | void CompactibleFreeListSpace::coalBirth(size_t size) { |
| 2312 | if (size < SmallForDictionary) { |
| 2313 | smallCoalBirth(size); |
| 2314 | } else { |
| 2315 | dictionary()->dict_census_update(size, |
| 2316 | false /* split */, |
| 2317 | true /* birth */); |
| 2318 | } |
| 2319 | } |
| 2320 | |
| 2321 | void CompactibleFreeListSpace::coalDeath(size_t size) { |
| 2322 | if(size < SmallForDictionary) { |
| 2323 | smallCoalDeath(size); |
| 2324 | } else { |
| 2325 | dictionary()->dict_census_update(size, |
| 2326 | false /* split */, |
| 2327 | false /* birth */); |
| 2328 | } |
| 2329 | } |
| 2330 | |
| 2331 | void CompactibleFreeListSpace::smallSplitBirth(size_t size) { |
| 2332 | assert(size < SmallForDictionary, "Size too large for indexed list" ); |
| 2333 | AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size]; |
| 2334 | fl->increment_split_births(); |
| 2335 | fl->increment_surplus(); |
| 2336 | } |
| 2337 | |
| 2338 | void CompactibleFreeListSpace::smallSplitDeath(size_t size) { |
| 2339 | assert(size < SmallForDictionary, "Size too large for indexed list" ); |
| 2340 | AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size]; |
| 2341 | fl->increment_split_deaths(); |
| 2342 | fl->decrement_surplus(); |
| 2343 | } |
| 2344 | |
| 2345 | void CompactibleFreeListSpace::split_birth(size_t size) { |
| 2346 | if (size < SmallForDictionary) { |
| 2347 | smallSplitBirth(size); |
| 2348 | } else { |
| 2349 | dictionary()->dict_census_update(size, |
| 2350 | true /* split */, |
| 2351 | true /* birth */); |
| 2352 | } |
| 2353 | } |
| 2354 | |
| 2355 | void CompactibleFreeListSpace::splitDeath(size_t size) { |
| 2356 | if (size < SmallForDictionary) { |
| 2357 | smallSplitDeath(size); |
| 2358 | } else { |
| 2359 | dictionary()->dict_census_update(size, |
| 2360 | true /* split */, |
| 2361 | false /* birth */); |
| 2362 | } |
| 2363 | } |
| 2364 | |
| 2365 | void CompactibleFreeListSpace::split(size_t from, size_t to1) { |
| 2366 | size_t to2 = from - to1; |
| 2367 | splitDeath(from); |
| 2368 | split_birth(to1); |
| 2369 | split_birth(to2); |
| 2370 | } |
| 2371 | |
| 2372 | void CompactibleFreeListSpace::print() const { |
| 2373 | print_on(tty); |
| 2374 | } |
| 2375 | |
| 2376 | void CompactibleFreeListSpace::prepare_for_verify() { |
| 2377 | assert_locked(); |
| 2378 | repairLinearAllocationBlocks(); |
| 2379 | // Verify that the SpoolBlocks look like free blocks of |
| 2380 | // appropriate sizes... To be done ... |
| 2381 | } |
| 2382 | |
| 2383 | class VerifyAllBlksClosure: public BlkClosure { |
| 2384 | private: |
| 2385 | const CompactibleFreeListSpace* _sp; |
| 2386 | const MemRegion _span; |
| 2387 | HeapWord* _last_addr; |
| 2388 | size_t _last_size; |
| 2389 | bool _last_was_obj; |
| 2390 | bool _last_was_live; |
| 2391 | |
| 2392 | public: |
| 2393 | VerifyAllBlksClosure(const CompactibleFreeListSpace* sp, |
| 2394 | MemRegion span) : _sp(sp), _span(span), |
| 2395 | _last_addr(NULL), _last_size(0), |
| 2396 | _last_was_obj(false), _last_was_live(false) { } |
| 2397 | |
| 2398 | virtual size_t do_blk(HeapWord* addr) { |
| 2399 | size_t res; |
| 2400 | bool was_obj = false; |
| 2401 | bool was_live = false; |
| 2402 | if (_sp->block_is_obj(addr)) { |
| 2403 | was_obj = true; |
| 2404 | oop p = oop(addr); |
| 2405 | guarantee(oopDesc::is_oop(p), "Should be an oop" ); |
| 2406 | res = _sp->adjustObjectSize(p->size()); |
| 2407 | if (_sp->obj_is_alive(addr)) { |
| 2408 | was_live = true; |
| 2409 | oopDesc::verify(p); |
| 2410 | } |
| 2411 | } else { |
| 2412 | FreeChunk* fc = (FreeChunk*)addr; |
| 2413 | res = fc->size(); |
| 2414 | if (FLSVerifyLists && !fc->cantCoalesce()) { |
| 2415 | guarantee(_sp->verify_chunk_in_free_list(fc), |
| 2416 | "Chunk should be on a free list" ); |
| 2417 | } |
| 2418 | } |
| 2419 | if (res == 0) { |
| 2420 | Log(gc, verify) log; |
| 2421 | log.error("Livelock: no rank reduction!" ); |
| 2422 | log.error(" Current: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" |
| 2423 | " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n" , |
| 2424 | p2i(addr), res, was_obj ?"true" :"false" , was_live ?"true" :"false" , |
| 2425 | p2i(_last_addr), _last_size, _last_was_obj?"true" :"false" , _last_was_live?"true" :"false" ); |
| 2426 | LogStream ls(log.error()); |
| 2427 | _sp->print_on(&ls); |
| 2428 | guarantee(false, "Verification failed." ); |
| 2429 | } |
| 2430 | _last_addr = addr; |
| 2431 | _last_size = res; |
| 2432 | _last_was_obj = was_obj; |
| 2433 | _last_was_live = was_live; |
| 2434 | return res; |
| 2435 | } |
| 2436 | }; |
| 2437 | |
| 2438 | class VerifyAllOopsClosure: public BasicOopIterateClosure { |
| 2439 | private: |
| 2440 | const CMSCollector* _collector; |
| 2441 | const CompactibleFreeListSpace* _sp; |
| 2442 | const MemRegion _span; |
| 2443 | const bool ; |
| 2444 | const CMSBitMap* _bit_map; |
| 2445 | |
| 2446 | protected: |
| 2447 | void do_oop(void* p, oop obj) { |
| 2448 | if (_span.contains(obj)) { // the interior oop points into CMS heap |
| 2449 | if (!_span.contains(p)) { // reference from outside CMS heap |
| 2450 | // Should be a valid object; the first disjunct below allows |
| 2451 | // us to sidestep an assertion in block_is_obj() that insists |
| 2452 | // that p be in _sp. Note that several generations (and spaces) |
| 2453 | // are spanned by _span (CMS heap) above. |
| 2454 | guarantee(!_sp->is_in_reserved(obj) || |
| 2455 | _sp->block_is_obj((HeapWord*)obj), |
| 2456 | "Should be an object" ); |
| 2457 | guarantee(oopDesc::is_oop(obj), "Should be an oop" ); |
| 2458 | oopDesc::verify(obj); |
| 2459 | if (_past_remark) { |
| 2460 | // Remark has been completed, the object should be marked |
| 2461 | _bit_map->isMarked((HeapWord*)obj); |
| 2462 | } |
| 2463 | } else { // reference within CMS heap |
| 2464 | if (_past_remark) { |
| 2465 | // Remark has been completed -- so the referent should have |
| 2466 | // been marked, if referring object is. |
| 2467 | if (_bit_map->isMarked(_collector->block_start(p))) { |
| 2468 | guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?" ); |
| 2469 | } |
| 2470 | } |
| 2471 | } |
| 2472 | } else if (_sp->is_in_reserved(p)) { |
| 2473 | // the reference is from FLS, and points out of FLS |
| 2474 | guarantee(oopDesc::is_oop(obj), "Should be an oop" ); |
| 2475 | oopDesc::verify(obj); |
| 2476 | } |
| 2477 | } |
| 2478 | |
| 2479 | template <class T> void do_oop_work(T* p) { |
| 2480 | T heap_oop = RawAccess<>::oop_load(p); |
| 2481 | if (!CompressedOops::is_null(heap_oop)) { |
| 2482 | oop obj = CompressedOops::decode_not_null(heap_oop); |
| 2483 | do_oop(p, obj); |
| 2484 | } |
| 2485 | } |
| 2486 | |
| 2487 | public: |
| 2488 | VerifyAllOopsClosure(const CMSCollector* collector, |
| 2489 | const CompactibleFreeListSpace* sp, MemRegion span, |
| 2490 | bool , CMSBitMap* bit_map) : |
| 2491 | _collector(collector), _sp(sp), _span(span), |
| 2492 | _past_remark(past_remark), _bit_map(bit_map) { } |
| 2493 | |
| 2494 | virtual void do_oop(oop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
| 2495 | virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); } |
| 2496 | }; |
| 2497 | |
| 2498 | void CompactibleFreeListSpace::verify() const { |
| 2499 | assert_lock_strong(&_freelistLock); |
| 2500 | verify_objects_initialized(); |
| 2501 | MemRegion span = _collector->_span; |
| 2502 | bool = (_collector->abstract_state() == |
| 2503 | CMSCollector::Sweeping); |
| 2504 | |
| 2505 | ResourceMark rm; |
| 2506 | HandleMark hm; |
| 2507 | |
| 2508 | // Check integrity of CFL data structures |
| 2509 | _promoInfo.verify(); |
| 2510 | _dictionary->verify(); |
| 2511 | if (FLSVerifyIndexTable) { |
| 2512 | verifyIndexedFreeLists(); |
| 2513 | } |
| 2514 | // Check integrity of all objects and free blocks in space |
| 2515 | { |
| 2516 | VerifyAllBlksClosure cl(this, span); |
| 2517 | ((CompactibleFreeListSpace*)this)->blk_iterate(&cl); // cast off const |
| 2518 | } |
| 2519 | // Check that all references in the heap to FLS |
| 2520 | // are to valid objects in FLS or that references in |
| 2521 | // FLS are to valid objects elsewhere in the heap |
| 2522 | if (FLSVerifyAllHeapReferences) |
| 2523 | { |
| 2524 | VerifyAllOopsClosure cl(_collector, this, span, past_remark, |
| 2525 | _collector->markBitMap()); |
| 2526 | |
| 2527 | // Iterate over all oops in the heap. |
| 2528 | CMSHeap::heap()->oop_iterate(&cl); |
| 2529 | } |
| 2530 | |
| 2531 | if (VerifyObjectStartArray) { |
| 2532 | // Verify the block offset table |
| 2533 | _bt.verify(); |
| 2534 | } |
| 2535 | } |
| 2536 | |
| 2537 | #ifndef PRODUCT |
| 2538 | void CompactibleFreeListSpace::verifyFreeLists() const { |
| 2539 | if (FLSVerifyLists) { |
| 2540 | _dictionary->verify(); |
| 2541 | verifyIndexedFreeLists(); |
| 2542 | } else { |
| 2543 | if (FLSVerifyDictionary) { |
| 2544 | _dictionary->verify(); |
| 2545 | } |
| 2546 | if (FLSVerifyIndexTable) { |
| 2547 | verifyIndexedFreeLists(); |
| 2548 | } |
| 2549 | } |
| 2550 | } |
| 2551 | #endif |
| 2552 | |
| 2553 | void CompactibleFreeListSpace::verifyIndexedFreeLists() const { |
| 2554 | size_t i = 0; |
| 2555 | for (; i < IndexSetStart; i++) { |
| 2556 | guarantee(_indexedFreeList[i].head() == NULL, "should be NULL" ); |
| 2557 | } |
| 2558 | for (; i < IndexSetSize; i++) { |
| 2559 | verifyIndexedFreeList(i); |
| 2560 | } |
| 2561 | } |
| 2562 | |
| 2563 | void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const { |
| 2564 | FreeChunk* fc = _indexedFreeList[size].head(); |
| 2565 | FreeChunk* tail = _indexedFreeList[size].tail(); |
| 2566 | size_t num = _indexedFreeList[size].count(); |
| 2567 | size_t n = 0; |
| 2568 | guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL, |
| 2569 | "Slot should have been empty" ); |
| 2570 | for (; fc != NULL; fc = fc->next(), n++) { |
| 2571 | guarantee(fc->size() == size, "Size inconsistency" ); |
| 2572 | guarantee(fc->is_free(), "!free?" ); |
| 2573 | guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list" ); |
| 2574 | guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail" ); |
| 2575 | } |
| 2576 | guarantee(n == num, "Incorrect count" ); |
| 2577 | } |
| 2578 | |
| 2579 | #ifndef PRODUCT |
| 2580 | void CompactibleFreeListSpace::check_free_list_consistency() const { |
| 2581 | assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize), |
| 2582 | "Some sizes can't be allocated without recourse to" |
| 2583 | " linear allocation buffers" ); |
| 2584 | assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)), |
| 2585 | "else MIN_TREE_CHUNK_SIZE is wrong" ); |
| 2586 | assert(IndexSetStart != 0, "IndexSetStart not initialized" ); |
| 2587 | assert(IndexSetStride != 0, "IndexSetStride not initialized" ); |
| 2588 | } |
| 2589 | #endif |
| 2590 | |
| 2591 | void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const { |
| 2592 | assert_lock_strong(&_freelistLock); |
| 2593 | LogTarget(Debug, gc, freelist, census) log; |
| 2594 | if (!log.is_enabled()) { |
| 2595 | return; |
| 2596 | } |
| 2597 | AdaptiveFreeList<FreeChunk> total; |
| 2598 | log.print("end sweep# " SIZE_FORMAT, sweep_count); |
| 2599 | ResourceMark rm; |
| 2600 | LogStream ls(log); |
| 2601 | outputStream* out = &ls; |
| 2602 | AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size" ); |
| 2603 | size_t total_free = 0; |
| 2604 | for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) { |
| 2605 | const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i]; |
| 2606 | total_free += fl->count() * fl->size(); |
| 2607 | if (i % (40*IndexSetStride) == 0) { |
| 2608 | AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size" ); |
| 2609 | } |
| 2610 | fl->print_on(out); |
| 2611 | total.set_bfr_surp( total.bfr_surp() + fl->bfr_surp() ); |
| 2612 | total.set_surplus( total.surplus() + fl->surplus() ); |
| 2613 | total.set_desired( total.desired() + fl->desired() ); |
| 2614 | total.set_prev_sweep( total.prev_sweep() + fl->prev_sweep() ); |
| 2615 | total.set_before_sweep(total.before_sweep() + fl->before_sweep()); |
| 2616 | total.set_count( total.count() + fl->count() ); |
| 2617 | total.set_coal_births( total.coal_births() + fl->coal_births() ); |
| 2618 | total.set_coal_deaths( total.coal_deaths() + fl->coal_deaths() ); |
| 2619 | total.set_split_births(total.split_births() + fl->split_births()); |
| 2620 | total.set_split_deaths(total.split_deaths() + fl->split_deaths()); |
| 2621 | } |
| 2622 | total.print_on(out, "TOTAL" ); |
| 2623 | log.print("Total free in indexed lists " SIZE_FORMAT " words" , total_free); |
| 2624 | log.print("growth: %8.5f deficit: %8.5f" , |
| 2625 | (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/ |
| 2626 | (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0), |
| 2627 | (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0)); |
| 2628 | _dictionary->print_dict_census(out); |
| 2629 | } |
| 2630 | |
| 2631 | /////////////////////////////////////////////////////////////////////////// |
| 2632 | // CompactibleFreeListSpaceLAB |
| 2633 | /////////////////////////////////////////////////////////////////////////// |
| 2634 | |
| 2635 | #define VECTOR_257(x) \ |
| 2636 | /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \ |
| 2637 | { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
| 2638 | x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
| 2639 | x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
| 2640 | x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
| 2641 | x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
| 2642 | x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
| 2643 | x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
| 2644 | x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \ |
| 2645 | x } |
| 2646 | |
| 2647 | // Initialize with default setting for CMS, _not_ |
| 2648 | // generic OldPLABSize, whose static default is different; if overridden at the |
| 2649 | // command-line, this will get reinitialized via a call to |
| 2650 | // modify_initialization() below. |
| 2651 | AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[] = |
| 2652 | VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size)); |
| 2653 | size_t CompactibleFreeListSpaceLAB::_global_num_blocks[] = VECTOR_257(0); |
| 2654 | uint CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0); |
| 2655 | |
| 2656 | CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) : |
| 2657 | _cfls(cfls) |
| 2658 | { |
| 2659 | assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above" ); |
| 2660 | for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
| 2661 | i < CompactibleFreeListSpace::IndexSetSize; |
| 2662 | i += CompactibleFreeListSpace::IndexSetStride) { |
| 2663 | _indexedFreeList[i].set_size(i); |
| 2664 | _num_blocks[i] = 0; |
| 2665 | } |
| 2666 | } |
| 2667 | |
| 2668 | static bool _CFLS_LAB_modified = false; |
| 2669 | |
| 2670 | void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) { |
| 2671 | assert(!_CFLS_LAB_modified, "Call only once" ); |
| 2672 | _CFLS_LAB_modified = true; |
| 2673 | for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
| 2674 | i < CompactibleFreeListSpace::IndexSetSize; |
| 2675 | i += CompactibleFreeListSpace::IndexSetStride) { |
| 2676 | _blocks_to_claim[i].modify(n, wt, true /* force */); |
| 2677 | } |
| 2678 | } |
| 2679 | |
| 2680 | HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) { |
| 2681 | FreeChunk* res; |
| 2682 | assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error" ); |
| 2683 | if (word_sz >= CompactibleFreeListSpace::IndexSetSize) { |
| 2684 | // This locking manages sync with other large object allocations. |
| 2685 | MutexLocker x(_cfls->parDictionaryAllocLock(), |
| 2686 | Mutex::_no_safepoint_check_flag); |
| 2687 | res = _cfls->getChunkFromDictionaryExact(word_sz); |
| 2688 | if (res == NULL) return NULL; |
| 2689 | } else { |
| 2690 | AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz]; |
| 2691 | if (fl->count() == 0) { |
| 2692 | // Attempt to refill this local free list. |
| 2693 | get_from_global_pool(word_sz, fl); |
| 2694 | // If it didn't work, give up. |
| 2695 | if (fl->count() == 0) return NULL; |
| 2696 | } |
| 2697 | res = fl->get_chunk_at_head(); |
| 2698 | assert(res != NULL, "Why was count non-zero?" ); |
| 2699 | } |
| 2700 | res->markNotFree(); |
| 2701 | assert(!res->is_free(), "shouldn't be marked free" ); |
| 2702 | assert(oop(res)->klass_or_null() == NULL, "should look uninitialized" ); |
| 2703 | // mangle a just allocated object with a distinct pattern. |
| 2704 | debug_only(res->mangleAllocated(word_sz)); |
| 2705 | return (HeapWord*)res; |
| 2706 | } |
| 2707 | |
| 2708 | // Get a chunk of blocks of the right size and update related |
| 2709 | // book-keeping stats |
| 2710 | void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) { |
| 2711 | // Get the #blocks we want to claim |
| 2712 | size_t n_blks = (size_t)_blocks_to_claim[word_sz].average(); |
| 2713 | assert(n_blks > 0, "Error" ); |
| 2714 | assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error" ); |
| 2715 | // In some cases, when the application has a phase change, |
| 2716 | // there may be a sudden and sharp shift in the object survival |
| 2717 | // profile, and updating the counts at the end of a scavenge |
| 2718 | // may not be quick enough, giving rise to large scavenge pauses |
| 2719 | // during these phase changes. It is beneficial to detect such |
| 2720 | // changes on-the-fly during a scavenge and avoid such a phase-change |
| 2721 | // pothole. The following code is a heuristic attempt to do that. |
| 2722 | // It is protected by a product flag until we have gained |
| 2723 | // enough experience with this heuristic and fine-tuned its behavior. |
| 2724 | // WARNING: This might increase fragmentation if we overreact to |
| 2725 | // small spikes, so some kind of historical smoothing based on |
| 2726 | // previous experience with the greater reactivity might be useful. |
| 2727 | // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by |
| 2728 | // default. |
| 2729 | if (ResizeOldPLAB && CMSOldPLABResizeQuicker) { |
| 2730 | // |
| 2731 | // On a 32-bit VM, the denominator can become zero because of integer overflow, |
| 2732 | // which is why there is a cast to double. |
| 2733 | // |
| 2734 | size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks)); |
| 2735 | n_blks += CMSOldPLABReactivityFactor*multiple*n_blks; |
| 2736 | n_blks = MIN2(n_blks, CMSOldPLABMax); |
| 2737 | } |
| 2738 | assert(n_blks > 0, "Error" ); |
| 2739 | _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl); |
| 2740 | // Update stats table entry for this block size |
| 2741 | _num_blocks[word_sz] += fl->count(); |
| 2742 | } |
| 2743 | |
| 2744 | void CompactibleFreeListSpaceLAB::compute_desired_plab_size() { |
| 2745 | for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
| 2746 | i < CompactibleFreeListSpace::IndexSetSize; |
| 2747 | i += CompactibleFreeListSpace::IndexSetStride) { |
| 2748 | assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0), |
| 2749 | "Counter inconsistency" ); |
| 2750 | if (_global_num_workers[i] > 0) { |
| 2751 | // Need to smooth wrt historical average |
| 2752 | if (ResizeOldPLAB) { |
| 2753 | _blocks_to_claim[i].sample( |
| 2754 | MAX2(CMSOldPLABMin, |
| 2755 | MIN2(CMSOldPLABMax, |
| 2756 | _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills))); |
| 2757 | } |
| 2758 | // Reset counters for next round |
| 2759 | _global_num_workers[i] = 0; |
| 2760 | _global_num_blocks[i] = 0; |
| 2761 | log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average()); |
| 2762 | } |
| 2763 | } |
| 2764 | } |
| 2765 | |
| 2766 | // If this is changed in the future to allow parallel |
| 2767 | // access, one would need to take the FL locks and, |
| 2768 | // depending on how it is used, stagger access from |
| 2769 | // parallel threads to reduce contention. |
| 2770 | void CompactibleFreeListSpaceLAB::retire(int tid) { |
| 2771 | // We run this single threaded with the world stopped; |
| 2772 | // so no need for locks and such. |
| 2773 | NOT_PRODUCT(Thread* t = Thread::current();) |
| 2774 | assert(Thread::current()->is_VM_thread(), "Error" ); |
| 2775 | for (size_t i = CompactibleFreeListSpace::IndexSetStart; |
| 2776 | i < CompactibleFreeListSpace::IndexSetSize; |
| 2777 | i += CompactibleFreeListSpace::IndexSetStride) { |
| 2778 | assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(), |
| 2779 | "Can't retire more than what we obtained" ); |
| 2780 | if (_num_blocks[i] > 0) { |
| 2781 | size_t num_retire = _indexedFreeList[i].count(); |
| 2782 | assert(_num_blocks[i] > num_retire, "Should have used at least one" ); |
| 2783 | { |
| 2784 | // MutexLocker x(_cfls->_indexedFreeListParLocks[i], |
| 2785 | // Mutex::_no_safepoint_check_flag); |
| 2786 | |
| 2787 | // Update globals stats for num_blocks used |
| 2788 | _global_num_blocks[i] += (_num_blocks[i] - num_retire); |
| 2789 | _global_num_workers[i]++; |
| 2790 | assert(_global_num_workers[i] <= ParallelGCThreads, "Too big" ); |
| 2791 | if (num_retire > 0) { |
| 2792 | _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]); |
| 2793 | // Reset this list. |
| 2794 | _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>(); |
| 2795 | _indexedFreeList[i].set_size(i); |
| 2796 | } |
| 2797 | } |
| 2798 | log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT, |
| 2799 | tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average()); |
| 2800 | // Reset stats for next round |
| 2801 | _num_blocks[i] = 0; |
| 2802 | } |
| 2803 | } |
| 2804 | } |
| 2805 | |
| 2806 | // Used by par_get_chunk_of_blocks() for the chunks from the |
| 2807 | // indexed_free_lists. Looks for a chunk with size that is a multiple |
| 2808 | // of "word_sz" and if found, splits it into "word_sz" chunks and add |
| 2809 | // to the free list "fl". "n" is the maximum number of chunks to |
| 2810 | // be added to "fl". |
| 2811 | bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) { |
| 2812 | |
| 2813 | // We'll try all multiples of word_sz in the indexed set, starting with |
| 2814 | // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples, |
| 2815 | // then try getting a big chunk and splitting it. |
| 2816 | { |
| 2817 | bool found; |
| 2818 | int k; |
| 2819 | size_t cur_sz; |
| 2820 | for (k = 1, cur_sz = k * word_sz, found = false; |
| 2821 | (cur_sz < CompactibleFreeListSpace::IndexSetSize) && |
| 2822 | (CMSSplitIndexedFreeListBlocks || k <= 1); |
| 2823 | k++, cur_sz = k * word_sz) { |
| 2824 | AdaptiveFreeList<FreeChunk> fl_for_cur_sz; // Empty. |
| 2825 | fl_for_cur_sz.set_size(cur_sz); |
| 2826 | { |
| 2827 | MutexLocker x(_indexedFreeListParLocks[cur_sz], |
| 2828 | Mutex::_no_safepoint_check_flag); |
| 2829 | AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz]; |
| 2830 | if (gfl->count() != 0) { |
| 2831 | // nn is the number of chunks of size cur_sz that |
| 2832 | // we'd need to split k-ways each, in order to create |
| 2833 | // "n" chunks of size word_sz each. |
| 2834 | const size_t nn = MAX2(n/k, (size_t)1); |
| 2835 | gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz); |
| 2836 | found = true; |
| 2837 | if (k > 1) { |
| 2838 | // Update split death stats for the cur_sz-size blocks list: |
| 2839 | // we increment the split death count by the number of blocks |
| 2840 | // we just took from the cur_sz-size blocks list and which |
| 2841 | // we will be splitting below. |
| 2842 | ssize_t deaths = gfl->split_deaths() + |
| 2843 | fl_for_cur_sz.count(); |
| 2844 | gfl->set_split_deaths(deaths); |
| 2845 | } |
| 2846 | } |
| 2847 | } |
| 2848 | // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1. |
| 2849 | if (found) { |
| 2850 | if (k == 1) { |
| 2851 | fl->prepend(&fl_for_cur_sz); |
| 2852 | } else { |
| 2853 | // Divide each block on fl_for_cur_sz up k ways. |
| 2854 | FreeChunk* fc; |
| 2855 | while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) { |
| 2856 | // Must do this in reverse order, so that anybody attempting to |
| 2857 | // access the main chunk sees it as a single free block until we |
| 2858 | // change it. |
| 2859 | size_t fc_size = fc->size(); |
| 2860 | assert(fc->is_free(), "Error" ); |
| 2861 | for (int i = k-1; i >= 0; i--) { |
| 2862 | FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); |
| 2863 | assert((i != 0) || |
| 2864 | ((fc == ffc) && ffc->is_free() && |
| 2865 | (ffc->size() == k*word_sz) && (fc_size == word_sz)), |
| 2866 | "Counting error" ); |
| 2867 | ffc->set_size(word_sz); |
| 2868 | ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads. |
| 2869 | ffc->link_next(NULL); |
| 2870 | // Above must occur before BOT is updated below. |
| 2871 | OrderAccess::storestore(); |
| 2872 | // splitting from the right, fc_size == i * word_sz |
| 2873 | _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
| 2874 | fc_size -= word_sz; |
| 2875 | assert(fc_size == i*word_sz, "Error" ); |
| 2876 | _bt.verify_not_unallocated((HeapWord*)ffc, word_sz); |
| 2877 | _bt.verify_single_block((HeapWord*)fc, fc_size); |
| 2878 | _bt.verify_single_block((HeapWord*)ffc, word_sz); |
| 2879 | // Push this on "fl". |
| 2880 | fl->return_chunk_at_head(ffc); |
| 2881 | } |
| 2882 | // TRAP |
| 2883 | assert(fl->tail()->next() == NULL, "List invariant." ); |
| 2884 | } |
| 2885 | } |
| 2886 | // Update birth stats for this block size. |
| 2887 | size_t num = fl->count(); |
| 2888 | MutexLocker x(_indexedFreeListParLocks[word_sz], |
| 2889 | Mutex::_no_safepoint_check_flag); |
| 2890 | ssize_t births = _indexedFreeList[word_sz].split_births() + num; |
| 2891 | _indexedFreeList[word_sz].set_split_births(births); |
| 2892 | return true; |
| 2893 | } |
| 2894 | } |
| 2895 | return found; |
| 2896 | } |
| 2897 | } |
| 2898 | |
| 2899 | FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) { |
| 2900 | |
| 2901 | FreeChunk* fc = NULL; |
| 2902 | FreeChunk* rem_fc = NULL; |
| 2903 | size_t rem; |
| 2904 | { |
| 2905 | MutexLocker x(parDictionaryAllocLock(), |
| 2906 | Mutex::_no_safepoint_check_flag); |
| 2907 | while (n > 0) { |
| 2908 | fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size())); |
| 2909 | if (fc != NULL) { |
| 2910 | break; |
| 2911 | } else { |
| 2912 | n--; |
| 2913 | } |
| 2914 | } |
| 2915 | if (fc == NULL) return NULL; |
| 2916 | // Otherwise, split up that block. |
| 2917 | assert((ssize_t)n >= 1, "Control point invariant" ); |
| 2918 | assert(fc->is_free(), "Error: should be a free block" ); |
| 2919 | _bt.verify_single_block((HeapWord*)fc, fc->size()); |
| 2920 | const size_t nn = fc->size() / word_sz; |
| 2921 | n = MIN2(nn, n); |
| 2922 | assert((ssize_t)n >= 1, "Control point invariant" ); |
| 2923 | rem = fc->size() - n * word_sz; |
| 2924 | // If there is a remainder, and it's too small, allocate one fewer. |
| 2925 | if (rem > 0 && rem < MinChunkSize) { |
| 2926 | n--; rem += word_sz; |
| 2927 | } |
| 2928 | // Note that at this point we may have n == 0. |
| 2929 | assert((ssize_t)n >= 0, "Control point invariant" ); |
| 2930 | |
| 2931 | // If n is 0, the chunk fc that was found is not large |
| 2932 | // enough to leave a viable remainder. We are unable to |
| 2933 | // allocate even one block. Return fc to the |
| 2934 | // dictionary and return, leaving "fl" empty. |
| 2935 | if (n == 0) { |
| 2936 | returnChunkToDictionary(fc); |
| 2937 | return NULL; |
| 2938 | } |
| 2939 | |
| 2940 | _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */); // update _unallocated_blk |
| 2941 | dictionary()->dict_census_update(fc->size(), |
| 2942 | true /*split*/, |
| 2943 | false /*birth*/); |
| 2944 | |
| 2945 | // First return the remainder, if any. |
| 2946 | // Note that we hold the lock until we decide if we're going to give |
| 2947 | // back the remainder to the dictionary, since a concurrent allocation |
| 2948 | // may otherwise see the heap as empty. (We're willing to take that |
| 2949 | // hit if the block is a small block.) |
| 2950 | if (rem > 0) { |
| 2951 | size_t prefix_size = n * word_sz; |
| 2952 | rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size); |
| 2953 | rem_fc->set_size(rem); |
| 2954 | rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads. |
| 2955 | rem_fc->link_next(NULL); |
| 2956 | // Above must occur before BOT is updated below. |
| 2957 | assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error" ); |
| 2958 | OrderAccess::storestore(); |
| 2959 | _bt.split_block((HeapWord*)fc, fc->size(), prefix_size); |
| 2960 | assert(fc->is_free(), "Error" ); |
| 2961 | fc->set_size(prefix_size); |
| 2962 | if (rem >= IndexSetSize) { |
| 2963 | returnChunkToDictionary(rem_fc); |
| 2964 | dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/); |
| 2965 | rem_fc = NULL; |
| 2966 | } |
| 2967 | // Otherwise, return it to the small list below. |
| 2968 | } |
| 2969 | } |
| 2970 | if (rem_fc != NULL) { |
| 2971 | MutexLocker x(_indexedFreeListParLocks[rem], |
| 2972 | Mutex::_no_safepoint_check_flag); |
| 2973 | _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size()); |
| 2974 | _indexedFreeList[rem].return_chunk_at_head(rem_fc); |
| 2975 | smallSplitBirth(rem); |
| 2976 | } |
| 2977 | assert(n * word_sz == fc->size(), |
| 2978 | "Chunk size " SIZE_FORMAT " is not exactly splittable by " |
| 2979 | SIZE_FORMAT " sized chunks of size " SIZE_FORMAT, |
| 2980 | fc->size(), n, word_sz); |
| 2981 | return fc; |
| 2982 | } |
| 2983 | |
| 2984 | void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) { |
| 2985 | |
| 2986 | FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks); |
| 2987 | |
| 2988 | if (fc == NULL) { |
| 2989 | return; |
| 2990 | } |
| 2991 | |
| 2992 | size_t n = fc->size() / word_sz; |
| 2993 | |
| 2994 | assert((ssize_t)n > 0, "Consistency" ); |
| 2995 | // Now do the splitting up. |
| 2996 | // Must do this in reverse order, so that anybody attempting to |
| 2997 | // access the main chunk sees it as a single free block until we |
| 2998 | // change it. |
| 2999 | size_t fc_size = n * word_sz; |
| 3000 | // All but first chunk in this loop |
| 3001 | for (ssize_t i = n-1; i > 0; i--) { |
| 3002 | FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz); |
| 3003 | ffc->set_size(word_sz); |
| 3004 | ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads. |
| 3005 | ffc->link_next(NULL); |
| 3006 | // Above must occur before BOT is updated below. |
| 3007 | OrderAccess::storestore(); |
| 3008 | // splitting from the right, fc_size == (n - i + 1) * wordsize |
| 3009 | _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */); |
| 3010 | fc_size -= word_sz; |
| 3011 | _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size()); |
| 3012 | _bt.verify_single_block((HeapWord*)ffc, ffc->size()); |
| 3013 | _bt.verify_single_block((HeapWord*)fc, fc_size); |
| 3014 | // Push this on "fl". |
| 3015 | fl->return_chunk_at_head(ffc); |
| 3016 | } |
| 3017 | // First chunk |
| 3018 | assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block" ); |
| 3019 | // The blocks above should show their new sizes before the first block below |
| 3020 | fc->set_size(word_sz); |
| 3021 | fc->link_prev(NULL); // idempotent wrt free-ness, see assert above |
| 3022 | fc->link_next(NULL); |
| 3023 | _bt.verify_not_unallocated((HeapWord*)fc, fc->size()); |
| 3024 | _bt.verify_single_block((HeapWord*)fc, fc->size()); |
| 3025 | fl->return_chunk_at_head(fc); |
| 3026 | |
| 3027 | assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks" ); |
| 3028 | { |
| 3029 | // Update the stats for this block size. |
| 3030 | MutexLocker x(_indexedFreeListParLocks[word_sz], |
| 3031 | Mutex::_no_safepoint_check_flag); |
| 3032 | const ssize_t births = _indexedFreeList[word_sz].split_births() + n; |
| 3033 | _indexedFreeList[word_sz].set_split_births(births); |
| 3034 | // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n; |
| 3035 | // _indexedFreeList[word_sz].set_surplus(new_surplus); |
| 3036 | } |
| 3037 | |
| 3038 | // TRAP |
| 3039 | assert(fl->tail()->next() == NULL, "List invariant." ); |
| 3040 | } |
| 3041 | |
| 3042 | void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) { |
| 3043 | assert(fl->count() == 0, "Precondition." ); |
| 3044 | assert(word_sz < CompactibleFreeListSpace::IndexSetSize, |
| 3045 | "Precondition" ); |
| 3046 | |
| 3047 | if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) { |
| 3048 | // Got it |
| 3049 | return; |
| 3050 | } |
| 3051 | |
| 3052 | // Otherwise, we'll split a block from the dictionary. |
| 3053 | par_get_chunk_of_blocks_dictionary(word_sz, n, fl); |
| 3054 | } |
| 3055 | |
| 3056 | const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const { |
| 3057 | const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord); |
| 3058 | return ergo_max; |
| 3059 | } |
| 3060 | |
| 3061 | // Set up the space's par_seq_tasks structure for work claiming |
| 3062 | // for parallel rescan. See CMSParRemarkTask where this is currently used. |
| 3063 | // XXX Need to suitably abstract and generalize this and the next |
| 3064 | // method into one. |
| 3065 | void |
| 3066 | CompactibleFreeListSpace:: |
| 3067 | initialize_sequential_subtasks_for_rescan(int n_threads) { |
| 3068 | // The "size" of each task is fixed according to rescan_task_size. |
| 3069 | assert(n_threads > 0, "Unexpected n_threads argument" ); |
| 3070 | const size_t task_size = rescan_task_size(); |
| 3071 | size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size; |
| 3072 | assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect" ); |
| 3073 | assert(n_tasks == 0 || |
| 3074 | ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) && |
| 3075 | (used_region().start() + n_tasks*task_size >= used_region().end())), |
| 3076 | "n_tasks calculation incorrect" ); |
| 3077 | SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
| 3078 | assert(!pst->valid(), "Clobbering existing data?" ); |
| 3079 | // Sets the condition for completion of the subtask (how many threads |
| 3080 | // need to finish in order to be done). |
| 3081 | pst->set_n_threads(n_threads); |
| 3082 | pst->set_n_tasks((int)n_tasks); |
| 3083 | } |
| 3084 | |
| 3085 | // Set up the space's par_seq_tasks structure for work claiming |
| 3086 | // for parallel concurrent marking. See CMSConcMarkTask where this is currently used. |
| 3087 | void |
| 3088 | CompactibleFreeListSpace:: |
| 3089 | initialize_sequential_subtasks_for_marking(int n_threads, |
| 3090 | HeapWord* low) { |
| 3091 | // The "size" of each task is fixed according to rescan_task_size. |
| 3092 | assert(n_threads > 0, "Unexpected n_threads argument" ); |
| 3093 | const size_t task_size = marking_task_size(); |
| 3094 | assert(task_size > CardTable::card_size_in_words && |
| 3095 | (task_size % CardTable::card_size_in_words == 0), |
| 3096 | "Otherwise arithmetic below would be incorrect" ); |
| 3097 | MemRegion span = _old_gen->reserved(); |
| 3098 | if (low != NULL) { |
| 3099 | if (span.contains(low)) { |
| 3100 | // Align low down to a card boundary so that |
| 3101 | // we can use block_offset_careful() on span boundaries. |
| 3102 | HeapWord* aligned_low = align_down(low, CardTable::card_size); |
| 3103 | // Clip span prefix at aligned_low |
| 3104 | span = span.intersection(MemRegion(aligned_low, span.end())); |
| 3105 | } else if (low > span.end()) { |
| 3106 | span = MemRegion(low, low); // Null region |
| 3107 | } // else use entire span |
| 3108 | } |
| 3109 | assert(span.is_empty() || |
| 3110 | ((uintptr_t)span.start() % CardTable::card_size == 0), |
| 3111 | "span should start at a card boundary" ); |
| 3112 | size_t n_tasks = (span.word_size() + task_size - 1)/task_size; |
| 3113 | assert((n_tasks == 0) == span.is_empty(), "Inconsistency" ); |
| 3114 | assert(n_tasks == 0 || |
| 3115 | ((span.start() + (n_tasks - 1)*task_size < span.end()) && |
| 3116 | (span.start() + n_tasks*task_size >= span.end())), |
| 3117 | "n_tasks calculation incorrect" ); |
| 3118 | SequentialSubTasksDone* pst = conc_par_seq_tasks(); |
| 3119 | assert(!pst->valid(), "Clobbering existing data?" ); |
| 3120 | // Sets the condition for completion of the subtask (how many threads |
| 3121 | // need to finish in order to be done). |
| 3122 | pst->set_n_threads(n_threads); |
| 3123 | pst->set_n_tasks((int)n_tasks); |
| 3124 | } |
| 3125 | |