| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_GC_SERIAL_DEFNEWGENERATION_HPP |
| 26 | #define SHARE_GC_SERIAL_DEFNEWGENERATION_HPP |
| 27 | |
| 28 | #include "gc/serial/cSpaceCounters.hpp" |
| 29 | #include "gc/shared/ageTable.hpp" |
| 30 | #include "gc/shared/copyFailedInfo.hpp" |
| 31 | #include "gc/shared/generation.hpp" |
| 32 | #include "gc/shared/generationCounters.hpp" |
| 33 | #include "gc/shared/preservedMarks.hpp" |
| 34 | #include "utilities/align.hpp" |
| 35 | #include "utilities/stack.hpp" |
| 36 | |
| 37 | class ContiguousSpace; |
| 38 | class ScanClosure; |
| 39 | class STWGCTimer; |
| 40 | class CSpaceCounters; |
| 41 | class ScanWeakRefClosure; |
| 42 | class SerialHeap; |
| 43 | |
| 44 | // DefNewGeneration is a young generation containing eden, from- and |
| 45 | // to-space. |
| 46 | |
| 47 | class DefNewGeneration: public Generation { |
| 48 | friend class VMStructs; |
| 49 | |
| 50 | protected: |
| 51 | Generation* _old_gen; |
| 52 | uint _tenuring_threshold; // Tenuring threshold for next collection. |
| 53 | AgeTable _age_table; |
| 54 | // Size of object to pretenure in words; command line provides bytes |
| 55 | size_t _pretenure_size_threshold_words; |
| 56 | |
| 57 | AgeTable* age_table() { return &_age_table; } |
| 58 | |
| 59 | // Initialize state to optimistically assume no promotion failure will |
| 60 | // happen. |
| 61 | void init_assuming_no_promotion_failure(); |
| 62 | // True iff a promotion has failed in the current collection. |
| 63 | bool _promotion_failed; |
| 64 | bool promotion_failed() { return _promotion_failed; } |
| 65 | PromotionFailedInfo _promotion_failed_info; |
| 66 | |
| 67 | // Handling promotion failure. A young generation collection |
| 68 | // can fail if a live object cannot be copied out of its |
| 69 | // location in eden or from-space during the collection. If |
| 70 | // a collection fails, the young generation is left in a |
| 71 | // consistent state such that it can be collected by a |
| 72 | // full collection. |
| 73 | // Before the collection |
| 74 | // Objects are in eden or from-space |
| 75 | // All roots into the young generation point into eden or from-space. |
| 76 | // |
| 77 | // After a failed collection |
| 78 | // Objects may be in eden, from-space, or to-space |
| 79 | // An object A in eden or from-space may have a copy B |
| 80 | // in to-space. If B exists, all roots that once pointed |
| 81 | // to A must now point to B. |
| 82 | // All objects in the young generation are unmarked. |
| 83 | // Eden, from-space, and to-space will all be collected by |
| 84 | // the full collection. |
| 85 | void handle_promotion_failure(oop); |
| 86 | |
| 87 | // In the absence of promotion failure, we wouldn't look at "from-space" |
| 88 | // objects after a young-gen collection. When promotion fails, however, |
| 89 | // the subsequent full collection will look at from-space objects: |
| 90 | // therefore we must remove their forwarding pointers. |
| 91 | void remove_forwarding_pointers(); |
| 92 | |
| 93 | virtual void restore_preserved_marks(); |
| 94 | |
| 95 | // Preserved marks |
| 96 | PreservedMarksSet _preserved_marks_set; |
| 97 | |
| 98 | // Promotion failure handling |
| 99 | OopIterateClosure *_promo_failure_scan_stack_closure; |
| 100 | void set_promo_failure_scan_stack_closure(OopIterateClosure *scan_stack_closure) { |
| 101 | _promo_failure_scan_stack_closure = scan_stack_closure; |
| 102 | } |
| 103 | |
| 104 | Stack<oop, mtGC> _promo_failure_scan_stack; |
| 105 | void drain_promo_failure_scan_stack(void); |
| 106 | bool _promo_failure_drain_in_progress; |
| 107 | |
| 108 | // Performance Counters |
| 109 | GenerationCounters* _gen_counters; |
| 110 | CSpaceCounters* _eden_counters; |
| 111 | CSpaceCounters* _from_counters; |
| 112 | CSpaceCounters* _to_counters; |
| 113 | |
| 114 | // sizing information |
| 115 | size_t _max_eden_size; |
| 116 | size_t _max_survivor_size; |
| 117 | |
| 118 | // Allocation support |
| 119 | bool _should_allocate_from_space; |
| 120 | bool should_allocate_from_space() const { |
| 121 | return _should_allocate_from_space; |
| 122 | } |
| 123 | void clear_should_allocate_from_space() { |
| 124 | _should_allocate_from_space = false; |
| 125 | } |
| 126 | void set_should_allocate_from_space() { |
| 127 | _should_allocate_from_space = true; |
| 128 | } |
| 129 | |
| 130 | // Tenuring |
| 131 | void adjust_desired_tenuring_threshold(); |
| 132 | |
| 133 | // Spaces |
| 134 | ContiguousSpace* _eden_space; |
| 135 | ContiguousSpace* _from_space; |
| 136 | ContiguousSpace* _to_space; |
| 137 | |
| 138 | STWGCTimer* _gc_timer; |
| 139 | |
| 140 | enum SomeProtectedConstants { |
| 141 | // Generations are GenGrain-aligned and have size that are multiples of |
| 142 | // GenGrain. |
| 143 | MinFreeScratchWords = 100 |
| 144 | }; |
| 145 | |
| 146 | // Return the size of a survivor space if this generation were of size |
| 147 | // gen_size. |
| 148 | size_t compute_survivor_size(size_t gen_size, size_t alignment) const { |
| 149 | size_t n = gen_size / (SurvivorRatio + 2); |
| 150 | return n > alignment ? align_down(n, alignment) : alignment; |
| 151 | } |
| 152 | |
| 153 | public: // was "protected" but caused compile error on win32 |
| 154 | class IsAliveClosure: public BoolObjectClosure { |
| 155 | Generation* _young_gen; |
| 156 | public: |
| 157 | IsAliveClosure(Generation* young_gen); |
| 158 | bool do_object_b(oop p); |
| 159 | }; |
| 160 | |
| 161 | class KeepAliveClosure: public OopClosure { |
| 162 | protected: |
| 163 | ScanWeakRefClosure* _cl; |
| 164 | CardTableRS* _rs; |
| 165 | template <class T> void do_oop_work(T* p); |
| 166 | public: |
| 167 | KeepAliveClosure(ScanWeakRefClosure* cl); |
| 168 | virtual void do_oop(oop* p); |
| 169 | virtual void do_oop(narrowOop* p); |
| 170 | }; |
| 171 | |
| 172 | class FastKeepAliveClosure: public KeepAliveClosure { |
| 173 | protected: |
| 174 | HeapWord* _boundary; |
| 175 | template <class T> void do_oop_work(T* p); |
| 176 | public: |
| 177 | FastKeepAliveClosure(DefNewGeneration* g, ScanWeakRefClosure* cl); |
| 178 | virtual void do_oop(oop* p); |
| 179 | virtual void do_oop(narrowOop* p); |
| 180 | }; |
| 181 | |
| 182 | class FastEvacuateFollowersClosure: public VoidClosure { |
| 183 | SerialHeap* _heap; |
| 184 | FastScanClosure* _scan_cur_or_nonheap; |
| 185 | FastScanClosure* _scan_older; |
| 186 | public: |
| 187 | FastEvacuateFollowersClosure(SerialHeap* heap, |
| 188 | FastScanClosure* cur, |
| 189 | FastScanClosure* older); |
| 190 | void do_void(); |
| 191 | }; |
| 192 | |
| 193 | public: |
| 194 | DefNewGeneration(ReservedSpace rs, |
| 195 | size_t initial_byte_size, |
| 196 | size_t min_byte_size, |
| 197 | size_t max_byte_size, |
| 198 | const char* policy="Serial young collection pauses" ); |
| 199 | |
| 200 | virtual void ref_processor_init(); |
| 201 | |
| 202 | virtual Generation::Name kind() { return Generation::DefNew; } |
| 203 | |
| 204 | // Accessing spaces |
| 205 | ContiguousSpace* eden() const { return _eden_space; } |
| 206 | ContiguousSpace* from() const { return _from_space; } |
| 207 | ContiguousSpace* to() const { return _to_space; } |
| 208 | |
| 209 | virtual CompactibleSpace* first_compaction_space() const; |
| 210 | |
| 211 | // Space enquiries |
| 212 | size_t capacity() const; |
| 213 | size_t used() const; |
| 214 | size_t free() const; |
| 215 | size_t max_capacity() const; |
| 216 | size_t capacity_before_gc() const; |
| 217 | size_t unsafe_max_alloc_nogc() const; |
| 218 | size_t contiguous_available() const; |
| 219 | |
| 220 | size_t max_eden_size() const { return _max_eden_size; } |
| 221 | size_t max_survivor_size() const { return _max_survivor_size; } |
| 222 | |
| 223 | bool supports_inline_contig_alloc() const { return true; } |
| 224 | HeapWord* volatile* top_addr() const; |
| 225 | HeapWord** end_addr() const; |
| 226 | |
| 227 | // Thread-local allocation buffers |
| 228 | bool supports_tlab_allocation() const { return true; } |
| 229 | size_t tlab_capacity() const; |
| 230 | size_t tlab_used() const; |
| 231 | size_t unsafe_max_tlab_alloc() const; |
| 232 | |
| 233 | // Grow the generation by the specified number of bytes. |
| 234 | // The size of bytes is assumed to be properly aligned. |
| 235 | // Return true if the expansion was successful. |
| 236 | bool expand(size_t bytes); |
| 237 | |
| 238 | // DefNewGeneration cannot currently expand except at |
| 239 | // a GC. |
| 240 | virtual bool is_maximal_no_gc() const { return true; } |
| 241 | |
| 242 | // Iteration |
| 243 | void object_iterate(ObjectClosure* blk); |
| 244 | |
| 245 | void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads); |
| 246 | |
| 247 | void space_iterate(SpaceClosure* blk, bool usedOnly = false); |
| 248 | |
| 249 | // Allocation support |
| 250 | virtual bool should_allocate(size_t word_size, bool is_tlab) { |
| 251 | assert(UseTLAB || !is_tlab, "Should not allocate tlab" ); |
| 252 | |
| 253 | size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); |
| 254 | |
| 255 | const bool non_zero = word_size > 0; |
| 256 | const bool overflows = word_size >= overflow_limit; |
| 257 | const bool check_too_big = _pretenure_size_threshold_words > 0; |
| 258 | const bool not_too_big = word_size < _pretenure_size_threshold_words; |
| 259 | const bool size_ok = is_tlab || !check_too_big || not_too_big; |
| 260 | |
| 261 | bool result = !overflows && |
| 262 | non_zero && |
| 263 | size_ok; |
| 264 | |
| 265 | return result; |
| 266 | } |
| 267 | |
| 268 | HeapWord* allocate(size_t word_size, bool is_tlab); |
| 269 | HeapWord* allocate_from_space(size_t word_size); |
| 270 | |
| 271 | HeapWord* par_allocate(size_t word_size, bool is_tlab); |
| 272 | |
| 273 | virtual void gc_epilogue(bool full); |
| 274 | |
| 275 | // Save the tops for eden, from, and to |
| 276 | virtual void record_spaces_top(); |
| 277 | |
| 278 | // Accessing marks |
| 279 | void save_marks(); |
| 280 | void reset_saved_marks(); |
| 281 | bool no_allocs_since_save_marks(); |
| 282 | |
| 283 | // Need to declare the full complement of closures, whether we'll |
| 284 | // override them or not, or get message from the compiler: |
| 285 | // oop_since_save_marks_iterate_nv hides virtual function... |
| 286 | template <typename OopClosureType> |
| 287 | void oop_since_save_marks_iterate(OopClosureType* cl); |
| 288 | |
| 289 | // For non-youngest collection, the DefNewGeneration can contribute |
| 290 | // "to-space". |
| 291 | virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, |
| 292 | size_t max_alloc_words); |
| 293 | |
| 294 | // Reset for contribution of "to-space". |
| 295 | virtual void reset_scratch(); |
| 296 | |
| 297 | // GC support |
| 298 | virtual void compute_new_size(); |
| 299 | |
| 300 | // Returns true if the collection is likely to be safely |
| 301 | // completed. Even if this method returns true, a collection |
| 302 | // may not be guaranteed to succeed, and the system should be |
| 303 | // able to safely unwind and recover from that failure, albeit |
| 304 | // at some additional cost. Override superclass's implementation. |
| 305 | virtual bool collection_attempt_is_safe(); |
| 306 | |
| 307 | virtual void collect(bool full, |
| 308 | bool clear_all_soft_refs, |
| 309 | size_t size, |
| 310 | bool is_tlab); |
| 311 | HeapWord* expand_and_allocate(size_t size, |
| 312 | bool is_tlab, |
| 313 | bool parallel = false); |
| 314 | |
| 315 | oop copy_to_survivor_space(oop old); |
| 316 | uint tenuring_threshold() { return _tenuring_threshold; } |
| 317 | |
| 318 | // Performance Counter support |
| 319 | void update_counters(); |
| 320 | |
| 321 | // Printing |
| 322 | virtual const char* name() const; |
| 323 | virtual const char* short_name() const { return "DefNew" ; } |
| 324 | |
| 325 | void print_on(outputStream* st) const; |
| 326 | |
| 327 | void verify(); |
| 328 | |
| 329 | bool promo_failure_scan_is_complete() const { |
| 330 | return _promo_failure_scan_stack.is_empty(); |
| 331 | } |
| 332 | |
| 333 | protected: |
| 334 | // If clear_space is true, clear the survivor spaces. Eden is |
| 335 | // cleared if the minimum size of eden is 0. If mangle_space |
| 336 | // is true, also mangle the space in debug mode. |
| 337 | void compute_space_boundaries(uintx minimum_eden_size, |
| 338 | bool clear_space, |
| 339 | bool mangle_space); |
| 340 | |
| 341 | // Return adjusted new size for NewSizeThreadIncrease. |
| 342 | // If any overflow happens, revert to previous new size. |
| 343 | size_t adjust_for_thread_increase(size_t new_size_candidate, |
| 344 | size_t new_size_before, |
| 345 | size_t alignment) const; |
| 346 | |
| 347 | |
| 348 | // Scavenge support |
| 349 | void swap_spaces(); |
| 350 | }; |
| 351 | |
| 352 | #endif // SHARE_GC_SERIAL_DEFNEWGENERATION_HPP |
| 353 | |