| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_GC_SHARED_GENERATION_HPP |
| 26 | #define SHARE_GC_SHARED_GENERATION_HPP |
| 27 | |
| 28 | #include "gc/shared/collectorCounters.hpp" |
| 29 | #include "gc/shared/referenceProcessor.hpp" |
| 30 | #include "logging/log.hpp" |
| 31 | #include "memory/allocation.hpp" |
| 32 | #include "memory/memRegion.hpp" |
| 33 | #include "memory/virtualspace.hpp" |
| 34 | #include "runtime/mutex.hpp" |
| 35 | #include "runtime/perfData.hpp" |
| 36 | |
| 37 | // A Generation models a heap area for similarly-aged objects. |
| 38 | // It will contain one ore more spaces holding the actual objects. |
| 39 | // |
| 40 | // The Generation class hierarchy: |
| 41 | // |
| 42 | // Generation - abstract base class |
| 43 | // - DefNewGeneration - allocation area (copy collected) |
| 44 | // - ParNewGeneration - a DefNewGeneration that is collected by |
| 45 | // several threads |
| 46 | // - CardGeneration - abstract class adding offset array behavior |
| 47 | // - TenuredGeneration - tenured (old object) space (markSweepCompact) |
| 48 | // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation |
| 49 | // (Detlefs-Printezis refinement of |
| 50 | // Boehm-Demers-Schenker) |
| 51 | // |
| 52 | // The system configurations currently allowed are: |
| 53 | // |
| 54 | // DefNewGeneration + TenuredGeneration |
| 55 | // |
| 56 | // ParNewGeneration + ConcurrentMarkSweepGeneration |
| 57 | // |
| 58 | |
| 59 | class DefNewGeneration; |
| 60 | class GCMemoryManager; |
| 61 | class GenerationSpec; |
| 62 | class CompactibleSpace; |
| 63 | class ContiguousSpace; |
| 64 | class CompactPoint; |
| 65 | class OopsInGenClosure; |
| 66 | class OopClosure; |
| 67 | class ScanClosure; |
| 68 | class FastScanClosure; |
| 69 | class GenCollectedHeap; |
| 70 | class GCStats; |
| 71 | |
| 72 | // A "ScratchBlock" represents a block of memory in one generation usable by |
| 73 | // another. It represents "num_words" free words, starting at and including |
| 74 | // the address of "this". |
| 75 | struct ScratchBlock { |
| 76 | ScratchBlock* next; |
| 77 | size_t num_words; |
| 78 | HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming |
| 79 | // first two fields are word-sized.) |
| 80 | }; |
| 81 | |
| 82 | class Generation: public CHeapObj<mtGC> { |
| 83 | friend class VMStructs; |
| 84 | private: |
| 85 | jlong _time_of_last_gc; // time when last gc on this generation happened (ms) |
| 86 | MemRegion _prev_used_region; // for collectors that want to "remember" a value for |
| 87 | // used region at some specific point during collection. |
| 88 | |
| 89 | GCMemoryManager* _gc_manager; |
| 90 | |
| 91 | protected: |
| 92 | // Minimum and maximum addresses for memory reserved (not necessarily |
| 93 | // committed) for generation. |
| 94 | // Used by card marking code. Must not overlap with address ranges of |
| 95 | // other generations. |
| 96 | MemRegion _reserved; |
| 97 | |
| 98 | // Memory area reserved for generation |
| 99 | VirtualSpace _virtual_space; |
| 100 | |
| 101 | // ("Weak") Reference processing support |
| 102 | SpanSubjectToDiscoveryClosure _span_based_discoverer; |
| 103 | ReferenceProcessor* _ref_processor; |
| 104 | |
| 105 | // Performance Counters |
| 106 | CollectorCounters* _gc_counters; |
| 107 | |
| 108 | // Statistics for garbage collection |
| 109 | GCStats* _gc_stats; |
| 110 | |
| 111 | // Initialize the generation. |
| 112 | Generation(ReservedSpace rs, size_t initial_byte_size); |
| 113 | |
| 114 | // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in |
| 115 | // "sp" that point into younger generations. |
| 116 | // The iteration is only over objects allocated at the start of the |
| 117 | // iterations; objects allocated as a result of applying the closure are |
| 118 | // not included. |
| 119 | void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); |
| 120 | |
| 121 | public: |
| 122 | // The set of possible generation kinds. |
| 123 | enum Name { |
| 124 | DefNew, |
| 125 | ParNew, |
| 126 | MarkSweepCompact, |
| 127 | ConcurrentMarkSweep, |
| 128 | Other |
| 129 | }; |
| 130 | |
| 131 | enum SomePublicConstants { |
| 132 | // Generations are GenGrain-aligned and have size that are multiples of |
| 133 | // GenGrain. |
| 134 | // Note: on ARM we add 1 bit for card_table_base to be properly aligned |
| 135 | // (we expect its low byte to be zero - see implementation of post_barrier) |
| 136 | LogOfGenGrain = 16 ARM32_ONLY(+1), |
| 137 | GenGrain = 1 << LogOfGenGrain |
| 138 | }; |
| 139 | |
| 140 | // allocate and initialize ("weak") refs processing support |
| 141 | virtual void ref_processor_init(); |
| 142 | void set_ref_processor(ReferenceProcessor* rp) { |
| 143 | assert(_ref_processor == NULL, "clobbering existing _ref_processor" ); |
| 144 | _ref_processor = rp; |
| 145 | } |
| 146 | |
| 147 | virtual Generation::Name kind() { return Generation::Other; } |
| 148 | |
| 149 | // This properly belongs in the collector, but for now this |
| 150 | // will do. |
| 151 | virtual bool refs_discovery_is_atomic() const { return true; } |
| 152 | virtual bool refs_discovery_is_mt() const { return false; } |
| 153 | |
| 154 | // Space inquiries (results in bytes) |
| 155 | size_t initial_size(); |
| 156 | virtual size_t capacity() const = 0; // The maximum number of object bytes the |
| 157 | // generation can currently hold. |
| 158 | virtual size_t used() const = 0; // The number of used bytes in the gen. |
| 159 | virtual size_t free() const = 0; // The number of free bytes in the gen. |
| 160 | |
| 161 | // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. |
| 162 | // Returns the total number of bytes available in a generation |
| 163 | // for the allocation of objects. |
| 164 | virtual size_t max_capacity() const; |
| 165 | |
| 166 | // If this is a young generation, the maximum number of bytes that can be |
| 167 | // allocated in this generation before a GC is triggered. |
| 168 | virtual size_t capacity_before_gc() const { return 0; } |
| 169 | |
| 170 | // The largest number of contiguous free bytes in the generation, |
| 171 | // including expansion (Assumes called at a safepoint.) |
| 172 | virtual size_t contiguous_available() const = 0; |
| 173 | // The largest number of contiguous free bytes in this or any higher generation. |
| 174 | virtual size_t max_contiguous_available() const; |
| 175 | |
| 176 | // Returns true if promotions of the specified amount are |
| 177 | // likely to succeed without a promotion failure. |
| 178 | // Promotion of the full amount is not guaranteed but |
| 179 | // might be attempted in the worst case. |
| 180 | virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; |
| 181 | |
| 182 | // For a non-young generation, this interface can be used to inform a |
| 183 | // generation that a promotion attempt into that generation failed. |
| 184 | // Typically used to enable diagnostic output for post-mortem analysis, |
| 185 | // but other uses of the interface are not ruled out. |
| 186 | virtual void promotion_failure_occurred() { /* does nothing */ } |
| 187 | |
| 188 | // Return an estimate of the maximum allocation that could be performed |
| 189 | // in the generation without triggering any collection or expansion |
| 190 | // activity. It is "unsafe" because no locks are taken; the result |
| 191 | // should be treated as an approximation, not a guarantee, for use in |
| 192 | // heuristic resizing decisions. |
| 193 | virtual size_t unsafe_max_alloc_nogc() const = 0; |
| 194 | |
| 195 | // Returns true if this generation cannot be expanded further |
| 196 | // without a GC. Override as appropriate. |
| 197 | virtual bool is_maximal_no_gc() const { |
| 198 | return _virtual_space.uncommitted_size() == 0; |
| 199 | } |
| 200 | |
| 201 | MemRegion reserved() const { return _reserved; } |
| 202 | |
| 203 | // Returns a region guaranteed to contain all the objects in the |
| 204 | // generation. |
| 205 | virtual MemRegion used_region() const { return _reserved; } |
| 206 | |
| 207 | MemRegion prev_used_region() const { return _prev_used_region; } |
| 208 | virtual void save_used_region() { _prev_used_region = used_region(); } |
| 209 | |
| 210 | // Returns "TRUE" iff "p" points into the committed areas in the generation. |
| 211 | // For some kinds of generations, this may be an expensive operation. |
| 212 | // To avoid performance problems stemming from its inadvertent use in |
| 213 | // product jvm's, we restrict its use to assertion checking or |
| 214 | // verification only. |
| 215 | virtual bool is_in(const void* p) const; |
| 216 | |
| 217 | /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ |
| 218 | bool is_in_reserved(const void* p) const { |
| 219 | return _reserved.contains(p); |
| 220 | } |
| 221 | |
| 222 | // If some space in the generation contains the given "addr", return a |
| 223 | // pointer to that space, else return "NULL". |
| 224 | virtual Space* space_containing(const void* addr) const; |
| 225 | |
| 226 | // Iteration - do not use for time critical operations |
| 227 | virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; |
| 228 | |
| 229 | // Returns the first space, if any, in the generation that can participate |
| 230 | // in compaction, or else "NULL". |
| 231 | virtual CompactibleSpace* first_compaction_space() const = 0; |
| 232 | |
| 233 | // Returns "true" iff this generation should be used to allocate an |
| 234 | // object of the given size. Young generations might |
| 235 | // wish to exclude very large objects, for example, since, if allocated |
| 236 | // often, they would greatly increase the frequency of young-gen |
| 237 | // collection. |
| 238 | virtual bool should_allocate(size_t word_size, bool is_tlab) { |
| 239 | bool result = false; |
| 240 | size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); |
| 241 | if (!is_tlab || supports_tlab_allocation()) { |
| 242 | result = (word_size > 0) && (word_size < overflow_limit); |
| 243 | } |
| 244 | return result; |
| 245 | } |
| 246 | |
| 247 | // Allocate and returns a block of the requested size, or returns "NULL". |
| 248 | // Assumes the caller has done any necessary locking. |
| 249 | virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; |
| 250 | |
| 251 | // Like "allocate", but performs any necessary locking internally. |
| 252 | virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; |
| 253 | |
| 254 | // Some generation may offer a region for shared, contiguous allocation, |
| 255 | // via inlined code (by exporting the address of the top and end fields |
| 256 | // defining the extent of the contiguous allocation region.) |
| 257 | |
| 258 | // This function returns "true" iff the heap supports this kind of |
| 259 | // allocation. (More precisely, this means the style of allocation that |
| 260 | // increments *top_addr()" with a CAS.) (Default is "no".) |
| 261 | // A generation that supports this allocation style must use lock-free |
| 262 | // allocation for *all* allocation, since there are times when lock free |
| 263 | // allocation will be concurrent with plain "allocate" calls. |
| 264 | virtual bool supports_inline_contig_alloc() const { return false; } |
| 265 | |
| 266 | // These functions return the addresses of the fields that define the |
| 267 | // boundaries of the contiguous allocation area. (These fields should be |
| 268 | // physically near to one another.) |
| 269 | virtual HeapWord* volatile* top_addr() const { return NULL; } |
| 270 | virtual HeapWord** end_addr() const { return NULL; } |
| 271 | |
| 272 | // Thread-local allocation buffers |
| 273 | virtual bool supports_tlab_allocation() const { return false; } |
| 274 | virtual size_t tlab_capacity() const { |
| 275 | guarantee(false, "Generation doesn't support thread local allocation buffers" ); |
| 276 | return 0; |
| 277 | } |
| 278 | virtual size_t tlab_used() const { |
| 279 | guarantee(false, "Generation doesn't support thread local allocation buffers" ); |
| 280 | return 0; |
| 281 | } |
| 282 | virtual size_t unsafe_max_tlab_alloc() const { |
| 283 | guarantee(false, "Generation doesn't support thread local allocation buffers" ); |
| 284 | return 0; |
| 285 | } |
| 286 | |
| 287 | // "obj" is the address of an object in a younger generation. Allocate space |
| 288 | // for "obj" in the current (or some higher) generation, and copy "obj" into |
| 289 | // the newly allocated space, if possible, returning the result (or NULL if |
| 290 | // the allocation failed). |
| 291 | // |
| 292 | // The "obj_size" argument is just obj->size(), passed along so the caller can |
| 293 | // avoid repeating the virtual call to retrieve it. |
| 294 | virtual oop promote(oop obj, size_t obj_size); |
| 295 | |
| 296 | // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote |
| 297 | // object "obj", whose original mark word was "m", and whose size is |
| 298 | // "word_sz". If possible, allocate space for "obj", copy obj into it |
| 299 | // (taking care to copy "m" into the mark word when done, since the mark |
| 300 | // word of "obj" may have been overwritten with a forwarding pointer, and |
| 301 | // also taking care to copy the klass pointer *last*. Returns the new |
| 302 | // object if successful, or else NULL. |
| 303 | virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz); |
| 304 | |
| 305 | // Informs the current generation that all par_promote_alloc's in the |
| 306 | // collection have been completed; any supporting data structures can be |
| 307 | // reset. Default is to do nothing. |
| 308 | virtual void par_promote_alloc_done(int thread_num) {} |
| 309 | |
| 310 | // Informs the current generation that all oop_since_save_marks_iterates |
| 311 | // performed by "thread_num" in the current collection, if any, have been |
| 312 | // completed; any supporting data structures can be reset. Default is to |
| 313 | // do nothing. |
| 314 | virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} |
| 315 | |
| 316 | // Returns "true" iff collect() should subsequently be called on this |
| 317 | // this generation. See comment below. |
| 318 | // This is a generic implementation which can be overridden. |
| 319 | // |
| 320 | // Note: in the current (1.4) implementation, when genCollectedHeap's |
| 321 | // incremental_collection_will_fail flag is set, all allocations are |
| 322 | // slow path (the only fast-path place to allocate is DefNew, which |
| 323 | // will be full if the flag is set). |
| 324 | // Thus, older generations which collect younger generations should |
| 325 | // test this flag and collect if it is set. |
| 326 | virtual bool should_collect(bool full, |
| 327 | size_t word_size, |
| 328 | bool is_tlab) { |
| 329 | return (full || should_allocate(word_size, is_tlab)); |
| 330 | } |
| 331 | |
| 332 | // Returns true if the collection is likely to be safely |
| 333 | // completed. Even if this method returns true, a collection |
| 334 | // may not be guaranteed to succeed, and the system should be |
| 335 | // able to safely unwind and recover from that failure, albeit |
| 336 | // at some additional cost. |
| 337 | virtual bool collection_attempt_is_safe() { |
| 338 | guarantee(false, "Are you sure you want to call this method?" ); |
| 339 | return true; |
| 340 | } |
| 341 | |
| 342 | // Perform a garbage collection. |
| 343 | // If full is true attempt a full garbage collection of this generation. |
| 344 | // Otherwise, attempting to (at least) free enough space to support an |
| 345 | // allocation of the given "word_size". |
| 346 | virtual void collect(bool full, |
| 347 | bool clear_all_soft_refs, |
| 348 | size_t word_size, |
| 349 | bool is_tlab) = 0; |
| 350 | |
| 351 | // Perform a heap collection, attempting to create (at least) enough |
| 352 | // space to support an allocation of the given "word_size". If |
| 353 | // successful, perform the allocation and return the resulting |
| 354 | // "oop" (initializing the allocated block). If the allocation is |
| 355 | // still unsuccessful, return "NULL". |
| 356 | virtual HeapWord* expand_and_allocate(size_t word_size, |
| 357 | bool is_tlab, |
| 358 | bool parallel = false) = 0; |
| 359 | |
| 360 | // Some generations may require some cleanup or preparation actions before |
| 361 | // allowing a collection. The default is to do nothing. |
| 362 | virtual void gc_prologue(bool full) {} |
| 363 | |
| 364 | // Some generations may require some cleanup actions after a collection. |
| 365 | // The default is to do nothing. |
| 366 | virtual void gc_epilogue(bool full) {} |
| 367 | |
| 368 | // Save the high water marks for the used space in a generation. |
| 369 | virtual void record_spaces_top() {} |
| 370 | |
| 371 | // Some generations may need to be "fixed-up" after some allocation |
| 372 | // activity to make them parsable again. The default is to do nothing. |
| 373 | virtual void ensure_parsability() {} |
| 374 | |
| 375 | // Time (in ms) when we were last collected or now if a collection is |
| 376 | // in progress. |
| 377 | virtual jlong time_of_last_gc(jlong now) { |
| 378 | // Both _time_of_last_gc and now are set using a time source |
| 379 | // that guarantees monotonically non-decreasing values provided |
| 380 | // the underlying platform provides such a source. So we still |
| 381 | // have to guard against non-monotonicity. |
| 382 | NOT_PRODUCT( |
| 383 | if (now < _time_of_last_gc) { |
| 384 | log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now); |
| 385 | } |
| 386 | ) |
| 387 | return _time_of_last_gc; |
| 388 | } |
| 389 | |
| 390 | virtual void update_time_of_last_gc(jlong now) { |
| 391 | _time_of_last_gc = now; |
| 392 | } |
| 393 | |
| 394 | // Generations may keep statistics about collection. This method |
| 395 | // updates those statistics. current_generation is the generation |
| 396 | // that was most recently collected. This allows the generation to |
| 397 | // decide what statistics are valid to collect. For example, the |
| 398 | // generation can decide to gather the amount of promoted data if |
| 399 | // the collection of the young generation has completed. |
| 400 | GCStats* gc_stats() const { return _gc_stats; } |
| 401 | virtual void update_gc_stats(Generation* current_generation, bool full) {} |
| 402 | |
| 403 | #if INCLUDE_SERIALGC |
| 404 | // Mark sweep support phase2 |
| 405 | virtual void prepare_for_compaction(CompactPoint* cp); |
| 406 | // Mark sweep support phase3 |
| 407 | virtual void adjust_pointers(); |
| 408 | // Mark sweep support phase4 |
| 409 | virtual void compact(); |
| 410 | virtual void post_compact() { ShouldNotReachHere(); } |
| 411 | #endif |
| 412 | |
| 413 | // Support for CMS's rescan. In this general form we return a pointer |
| 414 | // to an abstract object that can be used, based on specific previously |
| 415 | // decided protocols, to exchange information between generations, |
| 416 | // information that may be useful for speeding up certain types of |
| 417 | // garbage collectors. A NULL value indicates to the client that |
| 418 | // no data recording is expected by the provider. The data-recorder is |
| 419 | // expected to be GC worker thread-local, with the worker index |
| 420 | // indicated by "thr_num". |
| 421 | virtual void* get_data_recorder(int thr_num) { return NULL; } |
| 422 | virtual void sample_eden_chunk() {} |
| 423 | |
| 424 | // Some generations may require some cleanup actions before allowing |
| 425 | // a verification. |
| 426 | virtual void prepare_for_verify() {} |
| 427 | |
| 428 | // Accessing "marks". |
| 429 | |
| 430 | // This function gives a generation a chance to note a point between |
| 431 | // collections. For example, a contiguous generation might note the |
| 432 | // beginning allocation point post-collection, which might allow some later |
| 433 | // operations to be optimized. |
| 434 | virtual void save_marks() {} |
| 435 | |
| 436 | // This function allows generations to initialize any "saved marks". That |
| 437 | // is, should only be called when the generation is empty. |
| 438 | virtual void reset_saved_marks() {} |
| 439 | |
| 440 | // This function is "true" iff any no allocations have occurred in the |
| 441 | // generation since the last call to "save_marks". |
| 442 | virtual bool no_allocs_since_save_marks() = 0; |
| 443 | |
| 444 | // The "requestor" generation is performing some garbage collection |
| 445 | // action for which it would be useful to have scratch space. If |
| 446 | // the target is not the requestor, no gc actions will be required |
| 447 | // of the target. The requestor promises to allocate no more than |
| 448 | // "max_alloc_words" in the target generation (via promotion say, |
| 449 | // if the requestor is a young generation and the target is older). |
| 450 | // If the target generation can provide any scratch space, it adds |
| 451 | // it to "list", leaving "list" pointing to the head of the |
| 452 | // augmented list. The default is to offer no space. |
| 453 | virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, |
| 454 | size_t max_alloc_words) {} |
| 455 | |
| 456 | // Give each generation an opportunity to do clean up for any |
| 457 | // contributed scratch. |
| 458 | virtual void reset_scratch() {} |
| 459 | |
| 460 | // When an older generation has been collected, and perhaps resized, |
| 461 | // this method will be invoked on all younger generations (from older to |
| 462 | // younger), allowing them to resize themselves as appropriate. |
| 463 | virtual void compute_new_size() = 0; |
| 464 | |
| 465 | // Printing |
| 466 | virtual const char* name() const = 0; |
| 467 | virtual const char* short_name() const = 0; |
| 468 | |
| 469 | // Reference Processing accessor |
| 470 | ReferenceProcessor* const ref_processor() { return _ref_processor; } |
| 471 | |
| 472 | // Iteration. |
| 473 | |
| 474 | // Iterate over all the ref-containing fields of all objects in the |
| 475 | // generation, calling "cl.do_oop" on each. |
| 476 | virtual void oop_iterate(OopIterateClosure* cl); |
| 477 | |
| 478 | // Iterate over all objects in the generation, calling "cl.do_object" on |
| 479 | // each. |
| 480 | virtual void object_iterate(ObjectClosure* cl); |
| 481 | |
| 482 | // Iterate over all safe objects in the generation, calling "cl.do_object" on |
| 483 | // each. An object is safe if its references point to other objects in |
| 484 | // the heap. This defaults to object_iterate() unless overridden. |
| 485 | virtual void safe_object_iterate(ObjectClosure* cl); |
| 486 | |
| 487 | // Apply "cl->do_oop" to (the address of) all and only all the ref fields |
| 488 | // in the current generation that contain pointers to objects in younger |
| 489 | // generations. Objects allocated since the last "save_marks" call are |
| 490 | // excluded. |
| 491 | virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0; |
| 492 | |
| 493 | // Inform a generation that it longer contains references to objects |
| 494 | // in any younger generation. [e.g. Because younger gens are empty, |
| 495 | // clear the card table.] |
| 496 | virtual void clear_remembered_set() { } |
| 497 | |
| 498 | // Inform a generation that some of its objects have moved. [e.g. The |
| 499 | // generation's spaces were compacted, invalidating the card table.] |
| 500 | virtual void invalidate_remembered_set() { } |
| 501 | |
| 502 | // Block abstraction. |
| 503 | |
| 504 | // Returns the address of the start of the "block" that contains the |
| 505 | // address "addr". We say "blocks" instead of "object" since some heaps |
| 506 | // may not pack objects densely; a chunk may either be an object or a |
| 507 | // non-object. |
| 508 | virtual HeapWord* block_start(const void* addr) const; |
| 509 | |
| 510 | // Requires "addr" to be the start of a chunk, and returns its size. |
| 511 | // "addr + size" is required to be the start of a new chunk, or the end |
| 512 | // of the active area of the heap. |
| 513 | virtual size_t block_size(const HeapWord* addr) const ; |
| 514 | |
| 515 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
| 516 | // the block is an object. |
| 517 | virtual bool block_is_obj(const HeapWord* addr) const; |
| 518 | |
| 519 | void print_heap_change(size_t prev_used) const; |
| 520 | |
| 521 | virtual void print() const; |
| 522 | virtual void print_on(outputStream* st) const; |
| 523 | |
| 524 | virtual void verify() = 0; |
| 525 | |
| 526 | struct StatRecord { |
| 527 | int invocations; |
| 528 | elapsedTimer accumulated_time; |
| 529 | StatRecord() : |
| 530 | invocations(0), |
| 531 | accumulated_time(elapsedTimer()) {} |
| 532 | }; |
| 533 | private: |
| 534 | StatRecord _stat_record; |
| 535 | public: |
| 536 | StatRecord* stat_record() { return &_stat_record; } |
| 537 | |
| 538 | virtual void print_summary_info_on(outputStream* st); |
| 539 | |
| 540 | // Performance Counter support |
| 541 | virtual void update_counters() = 0; |
| 542 | virtual CollectorCounters* counters() { return _gc_counters; } |
| 543 | |
| 544 | GCMemoryManager* gc_manager() const { |
| 545 | assert(_gc_manager != NULL, "not initialized yet" ); |
| 546 | return _gc_manager; |
| 547 | } |
| 548 | |
| 549 | void set_gc_manager(GCMemoryManager* gc_manager) { |
| 550 | _gc_manager = gc_manager; |
| 551 | } |
| 552 | |
| 553 | }; |
| 554 | |
| 555 | #endif // SHARE_GC_SHARED_GENERATION_HPP |
| 556 | |