| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP |
| 26 | #define SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP |
| 27 | |
| 28 | #include "gc/cms/adaptiveFreeList.hpp" |
| 29 | #include "gc/cms/promotionInfo.hpp" |
| 30 | #include "gc/shared/blockOffsetTable.hpp" |
| 31 | #include "gc/shared/cardTable.hpp" |
| 32 | #include "gc/shared/space.hpp" |
| 33 | #include "logging/log.hpp" |
| 34 | #include "memory/binaryTreeDictionary.hpp" |
| 35 | #include "memory/freeList.hpp" |
| 36 | |
| 37 | // Classes in support of keeping track of promotions into a non-Contiguous |
| 38 | // space, in this case a CompactibleFreeListSpace. |
| 39 | |
| 40 | // Forward declarations |
| 41 | class CMSCollector; |
| 42 | class CompactibleFreeListSpace; |
| 43 | class ConcurrentMarkSweepGeneration; |
| 44 | class BlkClosure; |
| 45 | class BlkClosureCareful; |
| 46 | class FreeChunk; |
| 47 | class UpwardsObjectClosure; |
| 48 | class ObjectClosureCareful; |
| 49 | class Klass; |
| 50 | |
| 51 | class AFLBinaryTreeDictionary : public BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> > { |
| 52 | public: |
| 53 | AFLBinaryTreeDictionary(MemRegion mr) |
| 54 | : BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> >(mr) {} |
| 55 | |
| 56 | // Find the list with size "size" in the binary tree and update |
| 57 | // the statistics in the list according to "split" (chunk was |
| 58 | // split or coalesce) and "birth" (chunk was added or removed). |
| 59 | void dict_census_update(size_t size, bool split, bool birth); |
| 60 | // Return true if the dictionary is overpopulated (more chunks of |
| 61 | // this size than desired) for size "size". |
| 62 | bool coal_dict_over_populated(size_t size); |
| 63 | // Methods called at the beginning of a sweep to prepare the |
| 64 | // statistics for the sweep. |
| 65 | void begin_sweep_dict_census(double coalSurplusPercent, |
| 66 | float inter_sweep_current, |
| 67 | float inter_sweep_estimate, |
| 68 | float intra_sweep_estimate); |
| 69 | // Methods called after the end of a sweep to modify the |
| 70 | // statistics for the sweep. |
| 71 | void end_sweep_dict_census(double splitSurplusPercent); |
| 72 | // Accessors for statistics |
| 73 | void set_tree_surplus(double splitSurplusPercent); |
| 74 | void set_tree_hints(void); |
| 75 | // Reset statistics for all the lists in the tree. |
| 76 | void clear_tree_census(void); |
| 77 | // Print the statistics for all the lists in the tree. Also may |
| 78 | // print out summaries. |
| 79 | void print_dict_census(outputStream* st) const; |
| 80 | }; |
| 81 | |
| 82 | class LinearAllocBlock { |
| 83 | public: |
| 84 | LinearAllocBlock() : _ptr(0), _word_size(0), _refillSize(0), |
| 85 | _allocation_size_limit(0) {} |
| 86 | void set(HeapWord* ptr, size_t word_size, size_t refill_size, |
| 87 | size_t allocation_size_limit) { |
| 88 | _ptr = ptr; |
| 89 | _word_size = word_size; |
| 90 | _refillSize = refill_size; |
| 91 | _allocation_size_limit = allocation_size_limit; |
| 92 | } |
| 93 | HeapWord* _ptr; |
| 94 | size_t _word_size; |
| 95 | size_t _refillSize; |
| 96 | size_t _allocation_size_limit; // Largest size that will be allocated |
| 97 | |
| 98 | void print_on(outputStream* st) const; |
| 99 | }; |
| 100 | |
| 101 | // Concrete subclass of CompactibleSpace that implements |
| 102 | // a free list space, such as used in the concurrent mark sweep |
| 103 | // generation. |
| 104 | |
| 105 | class CompactibleFreeListSpace: public CompactibleSpace { |
| 106 | friend class VMStructs; |
| 107 | friend class ConcurrentMarkSweepGeneration; |
| 108 | friend class CMSCollector; |
| 109 | // Local alloc buffer for promotion into this space. |
| 110 | friend class CompactibleFreeListSpaceLAB; |
| 111 | // Allow scan_and_* functions to call (private) overrides of the auxiliary functions on this class |
| 112 | template <typename SpaceType> |
| 113 | friend void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space); |
| 114 | template <typename SpaceType> |
| 115 | friend void CompactibleSpace::scan_and_compact(SpaceType* space); |
| 116 | template <typename SpaceType> |
| 117 | friend void CompactibleSpace::verify_up_to_first_dead(SpaceType* space); |
| 118 | template <typename SpaceType> |
| 119 | friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp); |
| 120 | |
| 121 | // "Size" of chunks of work (executed during parallel remark phases |
| 122 | // of CMS collection); this probably belongs in CMSCollector, although |
| 123 | // it's cached here because it's used in |
| 124 | // initialize_sequential_subtasks_for_rescan() which modifies |
| 125 | // par_seq_tasks which also lives in Space. XXX |
| 126 | const size_t _rescan_task_size; |
| 127 | const size_t _marking_task_size; |
| 128 | |
| 129 | // Yet another sequential tasks done structure. This supports |
| 130 | // CMS GC, where we have threads dynamically |
| 131 | // claiming sub-tasks from a larger parallel task. |
| 132 | SequentialSubTasksDone _conc_par_seq_tasks; |
| 133 | |
| 134 | BlockOffsetArrayNonContigSpace _bt; |
| 135 | |
| 136 | CMSCollector* _collector; |
| 137 | ConcurrentMarkSweepGeneration* _old_gen; |
| 138 | |
| 139 | // Data structures for free blocks (used during allocation/sweeping) |
| 140 | |
| 141 | // Allocation is done linearly from two different blocks depending on |
| 142 | // whether the request is small or large, in an effort to reduce |
| 143 | // fragmentation. We assume that any locking for allocation is done |
| 144 | // by the containing generation. Thus, none of the methods in this |
| 145 | // space are re-entrant. |
| 146 | enum SomeConstants { |
| 147 | SmallForLinearAlloc = 16, // size < this then use _sLAB |
| 148 | SmallForDictionary = 257, // size < this then use _indexedFreeList |
| 149 | IndexSetSize = SmallForDictionary // keep this odd-sized |
| 150 | }; |
| 151 | static size_t IndexSetStart; |
| 152 | static size_t IndexSetStride; |
| 153 | static size_t _min_chunk_size_in_bytes; |
| 154 | |
| 155 | private: |
| 156 | enum FitStrategyOptions { |
| 157 | FreeBlockStrategyNone = 0, |
| 158 | FreeBlockBestFitFirst |
| 159 | }; |
| 160 | |
| 161 | PromotionInfo _promoInfo; |
| 162 | |
| 163 | // Helps to impose a global total order on freelistLock ranks; |
| 164 | // assumes that CFLSpace's are allocated in global total order |
| 165 | static int _lockRank; |
| 166 | |
| 167 | // A lock protecting the free lists and free blocks; |
| 168 | // mutable because of ubiquity of locking even for otherwise const methods |
| 169 | mutable Mutex _freelistLock; |
| 170 | |
| 171 | // Locking verifier convenience function |
| 172 | void assert_locked() const PRODUCT_RETURN; |
| 173 | void assert_locked(const Mutex* lock) const PRODUCT_RETURN; |
| 174 | |
| 175 | // Linear allocation blocks |
| 176 | LinearAllocBlock _smallLinearAllocBlock; |
| 177 | |
| 178 | AFLBinaryTreeDictionary* _dictionary; // Pointer to dictionary for large size blocks |
| 179 | |
| 180 | // Indexed array for small size blocks |
| 181 | AdaptiveFreeList<FreeChunk> _indexedFreeList[IndexSetSize]; |
| 182 | |
| 183 | // Allocation strategy |
| 184 | bool _fitStrategy; // Use best fit strategy |
| 185 | |
| 186 | // This is an address close to the largest free chunk in the heap. |
| 187 | // It is currently assumed to be at the end of the heap. Free |
| 188 | // chunks with addresses greater than nearLargestChunk are coalesced |
| 189 | // in an effort to maintain a large chunk at the end of the heap. |
| 190 | HeapWord* _nearLargestChunk; |
| 191 | |
| 192 | // Used to keep track of limit of sweep for the space |
| 193 | HeapWord* _sweep_limit; |
| 194 | |
| 195 | // Used to make the young collector update the mod union table |
| 196 | MemRegionClosure* _preconsumptionDirtyCardClosure; |
| 197 | |
| 198 | // Support for compacting cms |
| 199 | HeapWord* cross_threshold(HeapWord* start, HeapWord* end); |
| 200 | HeapWord* forward(oop q, size_t size, CompactPoint* cp, HeapWord* compact_top); |
| 201 | |
| 202 | // Initialization helpers. |
| 203 | void initializeIndexedFreeListArray(); |
| 204 | |
| 205 | // Extra stuff to manage promotion parallelism. |
| 206 | |
| 207 | // A lock protecting the dictionary during par promotion allocation. |
| 208 | mutable Mutex _parDictionaryAllocLock; |
| 209 | Mutex* parDictionaryAllocLock() const { return &_parDictionaryAllocLock; } |
| 210 | |
| 211 | // Locks protecting the exact lists during par promotion allocation. |
| 212 | Mutex* _indexedFreeListParLocks[IndexSetSize]; |
| 213 | |
| 214 | // Attempt to obtain up to "n" blocks of the size "word_sz" (which is |
| 215 | // required to be smaller than "IndexSetSize".) If successful, |
| 216 | // adds them to "fl", which is required to be an empty free list. |
| 217 | // If the count of "fl" is negative, it's absolute value indicates a |
| 218 | // number of free chunks that had been previously "borrowed" from global |
| 219 | // list of size "word_sz", and must now be decremented. |
| 220 | void par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
| 221 | |
| 222 | // Used by par_get_chunk_of_blocks() for the chunks from the |
| 223 | // indexed_free_lists. |
| 224 | bool par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
| 225 | |
| 226 | // Used by par_get_chunk_of_blocks_dictionary() to get a chunk |
| 227 | // evenly splittable into "n" "word_sz" chunks. Returns that |
| 228 | // evenly splittable chunk. May split a larger chunk to get the |
| 229 | // evenly splittable chunk. |
| 230 | FreeChunk* get_n_way_chunk_to_split(size_t word_sz, size_t n); |
| 231 | |
| 232 | // Used by par_get_chunk_of_blocks() for the chunks from the |
| 233 | // dictionary. |
| 234 | void par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl); |
| 235 | |
| 236 | // Allocation helper functions |
| 237 | // Allocate using a strategy that takes from the indexed free lists |
| 238 | // first. This allocation strategy assumes a companion sweeping |
| 239 | // strategy that attempts to keep the needed number of chunks in each |
| 240 | // indexed free lists. |
| 241 | HeapWord* allocate_adaptive_freelists(size_t size); |
| 242 | |
| 243 | // Gets a chunk from the linear allocation block (LinAB). If there |
| 244 | // is not enough space in the LinAB, refills it. |
| 245 | HeapWord* getChunkFromLinearAllocBlock(LinearAllocBlock* blk, size_t size); |
| 246 | HeapWord* getChunkFromSmallLinearAllocBlock(size_t size); |
| 247 | // Get a chunk from the space remaining in the linear allocation block. Do |
| 248 | // not attempt to refill if the space is not available, return NULL. Do the |
| 249 | // repairs on the linear allocation block as appropriate. |
| 250 | HeapWord* getChunkFromLinearAllocBlockRemainder(LinearAllocBlock* blk, size_t size); |
| 251 | inline HeapWord* getChunkFromSmallLinearAllocBlockRemainder(size_t size); |
| 252 | |
| 253 | // Helper function for getChunkFromIndexedFreeList. |
| 254 | // Replenish the indexed free list for this "size". Do not take from an |
| 255 | // underpopulated size. |
| 256 | FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true); |
| 257 | |
| 258 | // Get a chunk from the indexed free list. If the indexed free list |
| 259 | // does not have a free chunk, try to replenish the indexed free list |
| 260 | // then get the free chunk from the replenished indexed free list. |
| 261 | inline FreeChunk* getChunkFromIndexedFreeList(size_t size); |
| 262 | |
| 263 | // The returned chunk may be larger than requested (or null). |
| 264 | FreeChunk* getChunkFromDictionary(size_t size); |
| 265 | // The returned chunk is the exact size requested (or null). |
| 266 | FreeChunk* getChunkFromDictionaryExact(size_t size); |
| 267 | |
| 268 | // Find a chunk in the indexed free list that is the best |
| 269 | // fit for size "numWords". |
| 270 | FreeChunk* bestFitSmall(size_t numWords); |
| 271 | // For free list "fl" of chunks of size > numWords, |
| 272 | // remove a chunk, split off a chunk of size numWords |
| 273 | // and return it. The split off remainder is returned to |
| 274 | // the free lists. The old name for getFromListGreater |
| 275 | // was lookInListGreater. |
| 276 | FreeChunk* getFromListGreater(AdaptiveFreeList<FreeChunk>* fl, size_t numWords); |
| 277 | // Get a chunk in the indexed free list or dictionary, |
| 278 | // by considering a larger chunk and splitting it. |
| 279 | FreeChunk* getChunkFromGreater(size_t numWords); |
| 280 | // Verify that the given chunk is in the indexed free lists. |
| 281 | bool verifyChunkInIndexedFreeLists(FreeChunk* fc) const; |
| 282 | // Remove the specified chunk from the indexed free lists. |
| 283 | void removeChunkFromIndexedFreeList(FreeChunk* fc); |
| 284 | // Remove the specified chunk from the dictionary. |
| 285 | void removeChunkFromDictionary(FreeChunk* fc); |
| 286 | // Split a free chunk into a smaller free chunk of size "new_size". |
| 287 | // Return the smaller free chunk and return the remainder to the |
| 288 | // free lists. |
| 289 | FreeChunk* splitChunkAndReturnRemainder(FreeChunk* chunk, size_t new_size); |
| 290 | // Add a chunk to the free lists. |
| 291 | void addChunkToFreeLists(HeapWord* chunk, size_t size); |
| 292 | // Add a chunk to the free lists, preferring to suffix it |
| 293 | // to the last free chunk at end of space if possible, and |
| 294 | // updating the block census stats as well as block offset table. |
| 295 | // Take any locks as appropriate if we are multithreaded. |
| 296 | void addChunkToFreeListsAtEndRecordingStats(HeapWord* chunk, size_t size); |
| 297 | // Add a free chunk to the indexed free lists. |
| 298 | void returnChunkToFreeList(FreeChunk* chunk); |
| 299 | // Add a free chunk to the dictionary. |
| 300 | void returnChunkToDictionary(FreeChunk* chunk); |
| 301 | |
| 302 | // Functions for maintaining the linear allocation buffers (LinAB). |
| 303 | // Repairing a linear allocation block refers to operations |
| 304 | // performed on the remainder of a LinAB after an allocation |
| 305 | // has been made from it. |
| 306 | void repairLinearAllocationBlocks(); |
| 307 | void repairLinearAllocBlock(LinearAllocBlock* blk); |
| 308 | void refillLinearAllocBlock(LinearAllocBlock* blk); |
| 309 | void refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk); |
| 310 | void refillLinearAllocBlocksIfNeeded(); |
| 311 | |
| 312 | void verify_objects_initialized() const; |
| 313 | |
| 314 | // Statistics reporting helper functions |
| 315 | void reportFreeListStatistics(const char* title) const; |
| 316 | void reportIndexedFreeListStatistics(outputStream* st) const; |
| 317 | size_t maxChunkSizeInIndexedFreeLists() const; |
| 318 | size_t numFreeBlocksInIndexedFreeLists() const; |
| 319 | // Accessor |
| 320 | HeapWord* unallocated_block() const { |
| 321 | if (BlockOffsetArrayUseUnallocatedBlock) { |
| 322 | HeapWord* ub = _bt.unallocated_block(); |
| 323 | assert(ub >= bottom() && |
| 324 | ub <= end(), "space invariant" ); |
| 325 | return ub; |
| 326 | } else { |
| 327 | return end(); |
| 328 | } |
| 329 | } |
| 330 | void freed(HeapWord* start, size_t size) { |
| 331 | _bt.freed(start, size); |
| 332 | } |
| 333 | |
| 334 | // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support. |
| 335 | // See comments for CompactibleSpace for more information. |
| 336 | inline HeapWord* scan_limit() const { |
| 337 | return end(); |
| 338 | } |
| 339 | |
| 340 | inline bool scanned_block_is_obj(const HeapWord* addr) const { |
| 341 | return CompactibleFreeListSpace::block_is_obj(addr); // Avoid virtual call |
| 342 | } |
| 343 | |
| 344 | inline size_t scanned_block_size(const HeapWord* addr) const { |
| 345 | return CompactibleFreeListSpace::block_size(addr); // Avoid virtual call |
| 346 | } |
| 347 | |
| 348 | inline size_t adjust_obj_size(size_t size) const { |
| 349 | return adjustObjectSize(size); |
| 350 | } |
| 351 | |
| 352 | inline size_t obj_size(const HeapWord* addr) const; |
| 353 | |
| 354 | protected: |
| 355 | // Reset the indexed free list to its initial empty condition. |
| 356 | void resetIndexedFreeListArray(); |
| 357 | // Reset to an initial state with a single free block described |
| 358 | // by the MemRegion parameter. |
| 359 | void reset(MemRegion mr); |
| 360 | // Return the total number of words in the indexed free lists. |
| 361 | size_t totalSizeInIndexedFreeLists() const; |
| 362 | |
| 363 | public: |
| 364 | // Constructor |
| 365 | CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr); |
| 366 | // Accessors |
| 367 | bool bestFitFirst() { return _fitStrategy == FreeBlockBestFitFirst; } |
| 368 | AFLBinaryTreeDictionary* dictionary() const { return _dictionary; } |
| 369 | HeapWord* nearLargestChunk() const { return _nearLargestChunk; } |
| 370 | void set_nearLargestChunk(HeapWord* v) { _nearLargestChunk = v; } |
| 371 | |
| 372 | // Set CMS global values. |
| 373 | static void set_cms_values(); |
| 374 | |
| 375 | // Return the free chunk at the end of the space. If no such |
| 376 | // chunk exists, return NULL. |
| 377 | FreeChunk* find_chunk_at_end(); |
| 378 | |
| 379 | void set_collector(CMSCollector* collector) { _collector = collector; } |
| 380 | |
| 381 | // Support for parallelization of rescan and marking. |
| 382 | const size_t rescan_task_size() const { return _rescan_task_size; } |
| 383 | const size_t marking_task_size() const { return _marking_task_size; } |
| 384 | // Return ergonomic max size for CMSRescanMultiple and CMSConcMarkMultiple. |
| 385 | const size_t max_flag_size_for_task_size() const; |
| 386 | SequentialSubTasksDone* conc_par_seq_tasks() {return &_conc_par_seq_tasks; } |
| 387 | void initialize_sequential_subtasks_for_rescan(int n_threads); |
| 388 | void initialize_sequential_subtasks_for_marking(int n_threads, |
| 389 | HeapWord* low = NULL); |
| 390 | |
| 391 | virtual MemRegionClosure* preconsumptionDirtyCardClosure() const { |
| 392 | return _preconsumptionDirtyCardClosure; |
| 393 | } |
| 394 | |
| 395 | void setPreconsumptionDirtyCardClosure(MemRegionClosure* cl) { |
| 396 | _preconsumptionDirtyCardClosure = cl; |
| 397 | } |
| 398 | |
| 399 | // Space enquiries |
| 400 | size_t used() const; |
| 401 | size_t free() const; |
| 402 | size_t max_alloc_in_words() const; |
| 403 | // XXX: should have a less conservative used_region() than that of |
| 404 | // Space; we could consider keeping track of highest allocated |
| 405 | // address and correcting that at each sweep, as the sweeper |
| 406 | // goes through the entire allocated part of the generation. We |
| 407 | // could also use that information to keep the sweeper from |
| 408 | // sweeping more than is necessary. The allocator and sweeper will |
| 409 | // of course need to synchronize on this, since the sweeper will |
| 410 | // try to bump down the address and the allocator will try to bump it up. |
| 411 | // For now, however, we'll just use the default used_region() |
| 412 | // which overestimates the region by returning the entire |
| 413 | // committed region (this is safe, but inefficient). |
| 414 | |
| 415 | // Returns a subregion of the space containing all the objects in |
| 416 | // the space. |
| 417 | MemRegion used_region() const { |
| 418 | return MemRegion(bottom(), |
| 419 | BlockOffsetArrayUseUnallocatedBlock ? |
| 420 | unallocated_block() : end()); |
| 421 | } |
| 422 | |
| 423 | virtual bool is_free_block(const HeapWord* p) const; |
| 424 | |
| 425 | // Resizing support |
| 426 | void set_end(HeapWord* value); // override |
| 427 | |
| 428 | // Never mangle CompactibleFreeListSpace |
| 429 | void mangle_unused_area() {} |
| 430 | void mangle_unused_area_complete() {} |
| 431 | |
| 432 | // Mutual exclusion support |
| 433 | Mutex* freelistLock() const { return &_freelistLock; } |
| 434 | |
| 435 | // Iteration support |
| 436 | void oop_iterate(OopIterateClosure* cl); |
| 437 | |
| 438 | void object_iterate(ObjectClosure* blk); |
| 439 | // Apply the closure to each object in the space whose references |
| 440 | // point to objects in the heap. The usage of CompactibleFreeListSpace |
| 441 | // by the ConcurrentMarkSweepGeneration for concurrent GC's allows |
| 442 | // objects in the space with references to objects that are no longer |
| 443 | // valid. For example, an object may reference another object |
| 444 | // that has already been sweep up (collected). This method uses |
| 445 | // obj_is_alive() to determine whether it is safe to iterate of |
| 446 | // an object. |
| 447 | void safe_object_iterate(ObjectClosure* blk); |
| 448 | |
| 449 | // Iterate over all objects that intersect with mr, calling "cl->do_object" |
| 450 | // on each. There is an exception to this: if this closure has already |
| 451 | // been invoked on an object, it may skip such objects in some cases. This is |
| 452 | // Most likely to happen in an "upwards" (ascending address) iteration of |
| 453 | // MemRegions. |
| 454 | void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); |
| 455 | |
| 456 | // Requires that "mr" be entirely within the space. |
| 457 | // Apply "cl->do_object" to all objects that intersect with "mr". |
| 458 | // If the iteration encounters an unparseable portion of the region, |
| 459 | // terminate the iteration and return the address of the start of the |
| 460 | // subregion that isn't done. Return of "NULL" indicates that the |
| 461 | // iteration completed. |
| 462 | HeapWord* object_iterate_careful_m(MemRegion mr, |
| 463 | ObjectClosureCareful* cl); |
| 464 | |
| 465 | // Override: provides a DCTO_CL specific to this kind of space. |
| 466 | DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl, |
| 467 | CardTable::PrecisionStyle precision, |
| 468 | HeapWord* boundary, |
| 469 | bool parallel); |
| 470 | |
| 471 | void blk_iterate(BlkClosure* cl); |
| 472 | void blk_iterate_careful(BlkClosureCareful* cl); |
| 473 | HeapWord* block_start_const(const void* p) const; |
| 474 | HeapWord* block_start_careful(const void* p) const; |
| 475 | size_t block_size(const HeapWord* p) const; |
| 476 | size_t block_size_no_stall(HeapWord* p, const CMSCollector* c) const; |
| 477 | bool block_is_obj(const HeapWord* p) const; |
| 478 | bool obj_is_alive(const HeapWord* p) const; |
| 479 | size_t block_size_nopar(const HeapWord* p) const; |
| 480 | bool block_is_obj_nopar(const HeapWord* p) const; |
| 481 | |
| 482 | // Iteration support for promotion |
| 483 | void save_marks(); |
| 484 | bool no_allocs_since_save_marks(); |
| 485 | |
| 486 | // Iteration support for sweeping |
| 487 | void save_sweep_limit() { |
| 488 | _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ? |
| 489 | unallocated_block() : end(); |
| 490 | log_develop_trace(gc, sweep)(">>>>> Saving sweep limit " PTR_FORMAT |
| 491 | " for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<" , |
| 492 | p2i(_sweep_limit), p2i(bottom()), p2i(end())); |
| 493 | } |
| 494 | NOT_PRODUCT( |
| 495 | void clear_sweep_limit() { _sweep_limit = NULL; } |
| 496 | ) |
| 497 | HeapWord* sweep_limit() { return _sweep_limit; } |
| 498 | |
| 499 | // Apply "blk->do_oop" to the addresses of all reference fields in objects |
| 500 | // promoted into this generation since the most recent save_marks() call. |
| 501 | // Fields in objects allocated by applications of the closure |
| 502 | // *are* included in the iteration. Thus, when the iteration completes |
| 503 | // there should be no further such objects remaining. |
| 504 | template <typename OopClosureType> |
| 505 | void oop_since_save_marks_iterate(OopClosureType* blk); |
| 506 | |
| 507 | // Allocation support |
| 508 | HeapWord* allocate(size_t size); |
| 509 | HeapWord* par_allocate(size_t size); |
| 510 | |
| 511 | oop promote(oop obj, size_t obj_size); |
| 512 | void gc_prologue(); |
| 513 | void gc_epilogue(); |
| 514 | |
| 515 | // This call is used by a containing CMS generation / collector |
| 516 | // to inform the CFLS space that a sweep has been completed |
| 517 | // and that the space can do any related house-keeping functions. |
| 518 | void sweep_completed(); |
| 519 | |
| 520 | // For an object in this space, the mark-word's two |
| 521 | // LSB's having the value [11] indicates that it has been |
| 522 | // promoted since the most recent call to save_marks() on |
| 523 | // this generation and has not subsequently been iterated |
| 524 | // over (using oop_since_save_marks_iterate() above). |
| 525 | // This property holds only for single-threaded collections, |
| 526 | // and is typically used for Cheney scans; for MT scavenges, |
| 527 | // the property holds for all objects promoted during that |
| 528 | // scavenge for the duration of the scavenge and is used |
| 529 | // by card-scanning to avoid scanning objects (being) promoted |
| 530 | // during that scavenge. |
| 531 | bool obj_allocated_since_save_marks(const oop obj) const { |
| 532 | assert(is_in_reserved(obj), "Wrong space?" ); |
| 533 | return ((PromotedObject*)obj)->hasPromotedMark(); |
| 534 | } |
| 535 | |
| 536 | // A worst-case estimate of the space required (in HeapWords) to expand the |
| 537 | // heap when promoting an obj of size obj_size. |
| 538 | size_t expansionSpaceRequired(size_t obj_size) const; |
| 539 | |
| 540 | FreeChunk* allocateScratch(size_t size); |
| 541 | |
| 542 | // Returns true if either the small or large linear allocation buffer is empty. |
| 543 | bool linearAllocationWouldFail() const; |
| 544 | |
| 545 | // Adjust the chunk for the minimum size. This version is called in |
| 546 | // most cases in CompactibleFreeListSpace methods. |
| 547 | inline static size_t adjustObjectSize(size_t size) { |
| 548 | return align_object_size(MAX2(size, (size_t)MinChunkSize)); |
| 549 | } |
| 550 | // This is a virtual version of adjustObjectSize() that is called |
| 551 | // only occasionally when the compaction space changes and the type |
| 552 | // of the new compaction space is is only known to be CompactibleSpace. |
| 553 | size_t adjust_object_size_v(size_t size) const { |
| 554 | return adjustObjectSize(size); |
| 555 | } |
| 556 | // Minimum size of a free block. |
| 557 | virtual size_t minimum_free_block_size() const { return MinChunkSize; } |
| 558 | void removeFreeChunkFromFreeLists(FreeChunk* chunk); |
| 559 | void addChunkAndRepairOffsetTable(HeapWord* chunk, size_t size, |
| 560 | bool coalesced); |
| 561 | |
| 562 | // Support for compaction. |
| 563 | void prepare_for_compaction(CompactPoint* cp); |
| 564 | void adjust_pointers(); |
| 565 | void compact(); |
| 566 | // Reset the space to reflect the fact that a compaction of the |
| 567 | // space has been done. |
| 568 | virtual void reset_after_compaction(); |
| 569 | |
| 570 | // Debugging support. |
| 571 | void print() const; |
| 572 | void print_on(outputStream* st) const; |
| 573 | void prepare_for_verify(); |
| 574 | void verify() const; |
| 575 | void verifyFreeLists() const PRODUCT_RETURN; |
| 576 | void verifyIndexedFreeLists() const; |
| 577 | void verifyIndexedFreeList(size_t size) const; |
| 578 | // Verify that the given chunk is in the free lists: |
| 579 | // i.e. either the binary tree dictionary, the indexed free lists |
| 580 | // or the linear allocation block. |
| 581 | bool verify_chunk_in_free_list(FreeChunk* fc) const; |
| 582 | // Verify that the given chunk is the linear allocation block. |
| 583 | bool verify_chunk_is_linear_alloc_block(FreeChunk* fc) const; |
| 584 | // Do some basic checks on the the free lists. |
| 585 | void check_free_list_consistency() const PRODUCT_RETURN; |
| 586 | |
| 587 | // Printing support |
| 588 | void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st); |
| 589 | void print_indexed_free_lists(outputStream* st) const; |
| 590 | void print_dictionary_free_lists(outputStream* st) const; |
| 591 | void print_promo_info_blocks(outputStream* st) const; |
| 592 | |
| 593 | NOT_PRODUCT ( |
| 594 | void initializeIndexedFreeListArrayReturnedBytes(); |
| 595 | size_t sumIndexedFreeListArrayReturnedBytes(); |
| 596 | // Return the total number of chunks in the indexed free lists. |
| 597 | size_t totalCountInIndexedFreeLists() const; |
| 598 | // Return the total number of chunks in the space. |
| 599 | size_t totalCount(); |
| 600 | ) |
| 601 | |
| 602 | // The census consists of counts of the quantities such as |
| 603 | // the current count of the free chunks, number of chunks |
| 604 | // created as a result of the split of a larger chunk or |
| 605 | // coalescing of smaller chucks, etc. The counts in the |
| 606 | // census is used to make decisions on splitting and |
| 607 | // coalescing of chunks during the sweep of garbage. |
| 608 | |
| 609 | // Print the statistics for the free lists. |
| 610 | void printFLCensus(size_t sweep_count) const; |
| 611 | |
| 612 | // Statistics functions |
| 613 | // Initialize census for lists before the sweep. |
| 614 | void beginSweepFLCensus(float inter_sweep_current, |
| 615 | float inter_sweep_estimate, |
| 616 | float intra_sweep_estimate); |
| 617 | // Set the surplus for each of the free lists. |
| 618 | void setFLSurplus(); |
| 619 | // Set the hint for each of the free lists. |
| 620 | void setFLHints(); |
| 621 | // Clear the census for each of the free lists. |
| 622 | void clearFLCensus(); |
| 623 | // Perform functions for the census after the end of the sweep. |
| 624 | void endSweepFLCensus(size_t sweep_count); |
| 625 | // Return true if the count of free chunks is greater |
| 626 | // than the desired number of free chunks. |
| 627 | bool coalOverPopulated(size_t size); |
| 628 | |
| 629 | // Record (for each size): |
| 630 | // |
| 631 | // split-births = #chunks added due to splits in (prev-sweep-end, |
| 632 | // this-sweep-start) |
| 633 | // split-deaths = #chunks removed for splits in (prev-sweep-end, |
| 634 | // this-sweep-start) |
| 635 | // num-curr = #chunks at start of this sweep |
| 636 | // num-prev = #chunks at end of previous sweep |
| 637 | // |
| 638 | // The above are quantities that are measured. Now define: |
| 639 | // |
| 640 | // num-desired := num-prev + split-births - split-deaths - num-curr |
| 641 | // |
| 642 | // Roughly, num-prev + split-births is the supply, |
| 643 | // split-deaths is demand due to other sizes |
| 644 | // and num-curr is what we have left. |
| 645 | // |
| 646 | // Thus, num-desired is roughly speaking the "legitimate demand" |
| 647 | // for blocks of this size and what we are striving to reach at the |
| 648 | // end of the current sweep. |
| 649 | // |
| 650 | // For a given list, let num-len be its current population. |
| 651 | // Define, for a free list of a given size: |
| 652 | // |
| 653 | // coal-overpopulated := num-len >= num-desired * coal-surplus |
| 654 | // (coal-surplus is set to 1.05, i.e. we allow a little slop when |
| 655 | // coalescing -- we do not coalesce unless we think that the current |
| 656 | // supply has exceeded the estimated demand by more than 5%). |
| 657 | // |
| 658 | // For the set of sizes in the binary tree, which is neither dense nor |
| 659 | // closed, it may be the case that for a particular size we have never |
| 660 | // had, or do not now have, or did not have at the previous sweep, |
| 661 | // chunks of that size. We need to extend the definition of |
| 662 | // coal-overpopulated to such sizes as well: |
| 663 | // |
| 664 | // For a chunk in/not in the binary tree, extend coal-overpopulated |
| 665 | // defined above to include all sizes as follows: |
| 666 | // |
| 667 | // . a size that is non-existent is coal-overpopulated |
| 668 | // . a size that has a num-desired <= 0 as defined above is |
| 669 | // coal-overpopulated. |
| 670 | // |
| 671 | // Also define, for a chunk heap-offset C and mountain heap-offset M: |
| 672 | // |
| 673 | // close-to-mountain := C >= 0.99 * M |
| 674 | // |
| 675 | // Now, the coalescing strategy is: |
| 676 | // |
| 677 | // Coalesce left-hand chunk with right-hand chunk if and |
| 678 | // only if: |
| 679 | // |
| 680 | // EITHER |
| 681 | // . left-hand chunk is of a size that is coal-overpopulated |
| 682 | // OR |
| 683 | // . right-hand chunk is close-to-mountain |
| 684 | void smallCoalBirth(size_t size); |
| 685 | void smallCoalDeath(size_t size); |
| 686 | void coalBirth(size_t size); |
| 687 | void coalDeath(size_t size); |
| 688 | void smallSplitBirth(size_t size); |
| 689 | void smallSplitDeath(size_t size); |
| 690 | void split_birth(size_t size); |
| 691 | void splitDeath(size_t size); |
| 692 | void split(size_t from, size_t to1); |
| 693 | |
| 694 | double flsFrag() const; |
| 695 | }; |
| 696 | |
| 697 | // A parallel-GC-thread-local allocation buffer for allocation into a |
| 698 | // CompactibleFreeListSpace. |
| 699 | class CompactibleFreeListSpaceLAB : public CHeapObj<mtGC> { |
| 700 | // The space that this buffer allocates into. |
| 701 | CompactibleFreeListSpace* _cfls; |
| 702 | |
| 703 | // Our local free lists. |
| 704 | AdaptiveFreeList<FreeChunk> _indexedFreeList[CompactibleFreeListSpace::IndexSetSize]; |
| 705 | |
| 706 | // Initialized from a command-line arg. |
| 707 | |
| 708 | // Allocation statistics in support of dynamic adjustment of |
| 709 | // #blocks to claim per get_from_global_pool() call below. |
| 710 | static AdaptiveWeightedAverage |
| 711 | _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize]; |
| 712 | static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
| 713 | static uint _global_num_workers[CompactibleFreeListSpace::IndexSetSize]; |
| 714 | size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize]; |
| 715 | |
| 716 | // Internal work method |
| 717 | void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl); |
| 718 | |
| 719 | public: |
| 720 | static const int _default_dynamic_old_plab_size = 16; |
| 721 | static const int _default_static_old_plab_size = 50; |
| 722 | |
| 723 | CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls); |
| 724 | |
| 725 | // Allocate and return a block of the given size, or else return NULL. |
| 726 | HeapWord* alloc(size_t word_sz); |
| 727 | |
| 728 | // Return any unused portions of the buffer to the global pool. |
| 729 | void retire(int tid); |
| 730 | |
| 731 | // Dynamic OldPLABSize sizing |
| 732 | static void compute_desired_plab_size(); |
| 733 | // When the settings are modified from default static initialization |
| 734 | static void modify_initialization(size_t n, unsigned wt); |
| 735 | }; |
| 736 | |
| 737 | size_t PromotionInfo::refillSize() const { |
| 738 | const size_t CMSSpoolBlockSize = 256; |
| 739 | const size_t sz = heap_word_size(sizeof(SpoolBlock) + sizeof(markOop) |
| 740 | * CMSSpoolBlockSize); |
| 741 | return CompactibleFreeListSpace::adjustObjectSize(sz); |
| 742 | } |
| 743 | |
| 744 | #endif // SHARE_GC_CMS_COMPACTIBLEFREELISTSPACE_HPP |
| 745 | |