| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_GC_CMS_PARNEWGENERATION_HPP |
| 26 | #define SHARE_GC_CMS_PARNEWGENERATION_HPP |
| 27 | |
| 28 | #include "gc/cms/parOopClosures.hpp" |
| 29 | #include "gc/serial/defNewGeneration.hpp" |
| 30 | #include "gc/shared/copyFailedInfo.hpp" |
| 31 | #include "gc/shared/gcTrace.hpp" |
| 32 | #include "gc/shared/oopStorageParState.hpp" |
| 33 | #include "gc/shared/plab.hpp" |
| 34 | #include "gc/shared/preservedMarks.hpp" |
| 35 | #include "gc/shared/taskqueue.hpp" |
| 36 | #include "memory/padded.hpp" |
| 37 | |
| 38 | class ChunkArray; |
| 39 | class CMSHeap; |
| 40 | class ParScanWithoutBarrierClosure; |
| 41 | class ParScanWithBarrierClosure; |
| 42 | class ParRootScanWithoutBarrierClosure; |
| 43 | class ParRootScanWithBarrierTwoGensClosure; |
| 44 | class ParEvacuateFollowersClosure; |
| 45 | class StrongRootsScope; |
| 46 | |
| 47 | // It would be better if these types could be kept local to the .cpp file, |
| 48 | // but they must be here to allow ParScanClosure::do_oop_work to be defined |
| 49 | // in genOopClosures.inline.hpp. |
| 50 | |
| 51 | typedef Padded<OopTaskQueue> ObjToScanQueue; |
| 52 | typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet; |
| 53 | |
| 54 | class ParKeepAliveClosure: public DefNewGeneration::KeepAliveClosure { |
| 55 | private: |
| 56 | ParScanWeakRefClosure* _par_cl; |
| 57 | protected: |
| 58 | template <class T> void do_oop_work(T* p); |
| 59 | public: |
| 60 | ParKeepAliveClosure(ParScanWeakRefClosure* cl); |
| 61 | virtual void do_oop(oop* p); |
| 62 | virtual void do_oop(narrowOop* p); |
| 63 | }; |
| 64 | |
| 65 | // The state needed by thread performing parallel young-gen collection. |
| 66 | class ParScanThreadState { |
| 67 | friend class ParScanThreadStateSet; |
| 68 | private: |
| 69 | ObjToScanQueue *_work_queue; |
| 70 | Stack<oop, mtGC>* const _overflow_stack; |
| 71 | PreservedMarks* const _preserved_marks; |
| 72 | |
| 73 | PLAB _to_space_alloc_buffer; |
| 74 | |
| 75 | ParScanWithoutBarrierClosure _to_space_closure; // scan_without_gc_barrier |
| 76 | ParScanWithBarrierClosure _old_gen_closure; // scan_with_gc_barrier |
| 77 | ParRootScanWithoutBarrierClosure _to_space_root_closure; // scan_root_without_gc_barrier |
| 78 | // Will be passed to process_roots to set its generation. |
| 79 | ParRootScanWithBarrierTwoGensClosure _older_gen_closure; |
| 80 | // This closure will always be bound to the old gen; it will be used |
| 81 | // in evacuate_followers. |
| 82 | ParRootScanWithBarrierTwoGensClosure _old_gen_root_closure; // scan_old_root_with_gc_barrier |
| 83 | ParEvacuateFollowersClosure _evacuate_followers; |
| 84 | DefNewGeneration::IsAliveClosure _is_alive_closure; |
| 85 | ParScanWeakRefClosure _scan_weak_ref_closure; |
| 86 | ParKeepAliveClosure _keep_alive_closure; |
| 87 | |
| 88 | Space* _to_space; |
| 89 | Space* to_space() { return _to_space; } |
| 90 | |
| 91 | ParNewGeneration* _young_gen; |
| 92 | ParNewGeneration* young_gen() const { return _young_gen; } |
| 93 | |
| 94 | Generation* _old_gen; |
| 95 | Generation* old_gen() { return _old_gen; } |
| 96 | |
| 97 | HeapWord *_young_old_boundary; |
| 98 | |
| 99 | int _thread_num; |
| 100 | AgeTable _ageTable; |
| 101 | |
| 102 | bool _to_space_full; |
| 103 | |
| 104 | #if TASKQUEUE_STATS |
| 105 | size_t _term_attempts; |
| 106 | size_t _overflow_refills; |
| 107 | size_t _overflow_refill_objs; |
| 108 | #endif // TASKQUEUE_STATS |
| 109 | |
| 110 | // Stats for promotion failure |
| 111 | PromotionFailedInfo _promotion_failed_info; |
| 112 | |
| 113 | // Timing numbers. |
| 114 | double _start; |
| 115 | double _start_strong_roots; |
| 116 | double _strong_roots_time; |
| 117 | double _start_term; |
| 118 | double _term_time; |
| 119 | |
| 120 | // Helper for trim_queues. Scans subset of an array and makes |
| 121 | // remainder available for work stealing. |
| 122 | void scan_partial_array_and_push_remainder(oop obj); |
| 123 | |
| 124 | // In support of CMS' parallel rescan of survivor space. |
| 125 | ChunkArray* _survivor_chunk_array; |
| 126 | ChunkArray* survivor_chunk_array() { return _survivor_chunk_array; } |
| 127 | |
| 128 | void record_survivor_plab(HeapWord* plab_start, size_t plab_word_size); |
| 129 | |
| 130 | ParScanThreadState(Space* to_space_, ParNewGeneration* gen_, |
| 131 | Generation* old_gen_, int thread_num_, |
| 132 | ObjToScanQueueSet* work_queue_set_, |
| 133 | Stack<oop, mtGC>* overflow_stacks_, |
| 134 | PreservedMarks* preserved_marks_, |
| 135 | size_t desired_plab_sz_, |
| 136 | TaskTerminator& term_); |
| 137 | |
| 138 | public: |
| 139 | AgeTable* age_table() {return &_ageTable;} |
| 140 | |
| 141 | ObjToScanQueue* work_queue() { return _work_queue; } |
| 142 | |
| 143 | PreservedMarks* preserved_marks() const { return _preserved_marks; } |
| 144 | |
| 145 | PLAB* to_space_alloc_buffer() { |
| 146 | return &_to_space_alloc_buffer; |
| 147 | } |
| 148 | |
| 149 | ParEvacuateFollowersClosure& evacuate_followers_closure() { return _evacuate_followers; } |
| 150 | DefNewGeneration::IsAliveClosure& is_alive_closure() { return _is_alive_closure; } |
| 151 | ParScanWeakRefClosure& scan_weak_ref_closure() { return _scan_weak_ref_closure; } |
| 152 | ParKeepAliveClosure& keep_alive_closure() { return _keep_alive_closure; } |
| 153 | ParScanClosure& older_gen_closure() { return _older_gen_closure; } |
| 154 | ParRootScanWithoutBarrierClosure& to_space_root_closure() { return _to_space_root_closure; }; |
| 155 | |
| 156 | // Decrease queue size below "max_size". |
| 157 | void trim_queues(int max_size); |
| 158 | |
| 159 | // Private overflow stack usage |
| 160 | Stack<oop, mtGC>* overflow_stack() { return _overflow_stack; } |
| 161 | bool take_from_overflow_stack(); |
| 162 | void push_on_overflow_stack(oop p); |
| 163 | |
| 164 | // Is new_obj a candidate for scan_partial_array_and_push_remainder method. |
| 165 | inline bool should_be_partially_scanned(oop new_obj, oop old_obj) const; |
| 166 | |
| 167 | int thread_num() { return _thread_num; } |
| 168 | |
| 169 | // Allocate a to-space block of size "sz", or else return NULL. |
| 170 | HeapWord* alloc_in_to_space_slow(size_t word_sz); |
| 171 | |
| 172 | inline HeapWord* alloc_in_to_space(size_t word_sz); |
| 173 | |
| 174 | HeapWord* young_old_boundary() { return _young_old_boundary; } |
| 175 | |
| 176 | void set_young_old_boundary(HeapWord *boundary) { |
| 177 | _young_old_boundary = boundary; |
| 178 | } |
| 179 | |
| 180 | // Undo the most recent allocation ("obj", of "word_sz"). |
| 181 | void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz); |
| 182 | |
| 183 | // Promotion failure stats |
| 184 | void register_promotion_failure(size_t sz) { |
| 185 | _promotion_failed_info.register_copy_failure(sz); |
| 186 | } |
| 187 | PromotionFailedInfo& promotion_failed_info() { |
| 188 | return _promotion_failed_info; |
| 189 | } |
| 190 | bool promotion_failed() { |
| 191 | return _promotion_failed_info.has_failed(); |
| 192 | } |
| 193 | void print_promotion_failure_size(); |
| 194 | |
| 195 | #if TASKQUEUE_STATS |
| 196 | TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; } |
| 197 | |
| 198 | size_t term_attempts() const { return _term_attempts; } |
| 199 | size_t overflow_refills() const { return _overflow_refills; } |
| 200 | size_t overflow_refill_objs() const { return _overflow_refill_objs; } |
| 201 | |
| 202 | void note_term_attempt() { ++_term_attempts; } |
| 203 | void note_overflow_refill(size_t objs) { |
| 204 | ++_overflow_refills; _overflow_refill_objs += objs; |
| 205 | } |
| 206 | |
| 207 | void reset_stats(); |
| 208 | #endif // TASKQUEUE_STATS |
| 209 | |
| 210 | void start_strong_roots() { |
| 211 | _start_strong_roots = os::elapsedTime(); |
| 212 | } |
| 213 | void end_strong_roots() { |
| 214 | _strong_roots_time += (os::elapsedTime() - _start_strong_roots); |
| 215 | } |
| 216 | double strong_roots_time() const { return _strong_roots_time; } |
| 217 | void start_term_time() { |
| 218 | TASKQUEUE_STATS_ONLY(note_term_attempt()); |
| 219 | _start_term = os::elapsedTime(); |
| 220 | } |
| 221 | void end_term_time() { |
| 222 | _term_time += (os::elapsedTime() - _start_term); |
| 223 | } |
| 224 | double term_time() const { return _term_time; } |
| 225 | |
| 226 | double elapsed_time() const { |
| 227 | return os::elapsedTime() - _start; |
| 228 | } |
| 229 | }; |
| 230 | |
| 231 | class ParNewGenTask: public AbstractGangTask { |
| 232 | private: |
| 233 | ParNewGeneration* _young_gen; |
| 234 | Generation* _old_gen; |
| 235 | HeapWord* _young_old_boundary; |
| 236 | class ParScanThreadStateSet* _state_set; |
| 237 | StrongRootsScope* _strong_roots_scope; |
| 238 | |
| 239 | public: |
| 240 | ParNewGenTask(ParNewGeneration* young_gen, |
| 241 | Generation* old_gen, |
| 242 | HeapWord* young_old_boundary, |
| 243 | ParScanThreadStateSet* state_set, |
| 244 | StrongRootsScope* strong_roots_scope); |
| 245 | |
| 246 | HeapWord* young_old_boundary() { return _young_old_boundary; } |
| 247 | |
| 248 | void work(uint worker_id); |
| 249 | }; |
| 250 | |
| 251 | class KeepAliveClosure: public DefNewGeneration::KeepAliveClosure { |
| 252 | protected: |
| 253 | template <class T> void do_oop_work(T* p); |
| 254 | public: |
| 255 | KeepAliveClosure(ScanWeakRefClosure* cl); |
| 256 | virtual void do_oop(oop* p); |
| 257 | virtual void do_oop(narrowOop* p); |
| 258 | }; |
| 259 | |
| 260 | template <typename OopClosureType1, typename OopClosureType2> |
| 261 | class EvacuateFollowersClosureGeneral: public VoidClosure { |
| 262 | private: |
| 263 | CMSHeap* _heap; |
| 264 | OopClosureType1* _scan_cur_or_nonheap; |
| 265 | OopClosureType2* _scan_older; |
| 266 | public: |
| 267 | EvacuateFollowersClosureGeneral(CMSHeap* heap, |
| 268 | OopClosureType1* cur, |
| 269 | OopClosureType2* older); |
| 270 | virtual void do_void(); |
| 271 | }; |
| 272 | |
| 273 | // Closure for scanning ParNewGeneration. |
| 274 | // Same as ScanClosure, except does parallel GC barrier. |
| 275 | class ScanClosureWithParBarrier: public OopsInClassLoaderDataOrGenClosure { |
| 276 | private: |
| 277 | ParNewGeneration* _g; |
| 278 | HeapWord* _boundary; |
| 279 | bool _gc_barrier; |
| 280 | |
| 281 | template <class T> void do_oop_work(T* p); |
| 282 | |
| 283 | public: |
| 284 | ScanClosureWithParBarrier(ParNewGeneration* g, bool gc_barrier); |
| 285 | virtual void do_oop(oop* p); |
| 286 | virtual void do_oop(narrowOop* p); |
| 287 | }; |
| 288 | |
| 289 | // Implements AbstractRefProcTaskExecutor for ParNew. |
| 290 | class ParNewRefProcTaskExecutor: public AbstractRefProcTaskExecutor { |
| 291 | private: |
| 292 | ParNewGeneration& _young_gen; |
| 293 | Generation& _old_gen; |
| 294 | ParScanThreadStateSet& _state_set; |
| 295 | public: |
| 296 | ParNewRefProcTaskExecutor(ParNewGeneration& young_gen, |
| 297 | Generation& old_gen, |
| 298 | ParScanThreadStateSet& state_set) |
| 299 | : _young_gen(young_gen), _old_gen(old_gen), _state_set(state_set) |
| 300 | { } |
| 301 | |
| 302 | // Executes a task using worker threads. |
| 303 | virtual void execute(ProcessTask& task, uint ergo_workers); |
| 304 | // Switch to single threaded mode. |
| 305 | virtual void set_single_threaded_mode(); |
| 306 | }; |
| 307 | |
| 308 | |
| 309 | // A Generation that does parallel young-gen collection. |
| 310 | |
| 311 | class ParNewGeneration: public DefNewGeneration { |
| 312 | friend class ParNewGenTask; |
| 313 | friend class ParNewRefProcTask; |
| 314 | friend class ParNewRefProcTaskExecutor; |
| 315 | friend class ParScanThreadStateSet; |
| 316 | friend class ParEvacuateFollowersClosure; |
| 317 | |
| 318 | private: |
| 319 | // The per-worker-thread work queues |
| 320 | ObjToScanQueueSet* _task_queues; |
| 321 | |
| 322 | // Per-worker-thread local overflow stacks |
| 323 | Stack<oop, mtGC>* _overflow_stacks; |
| 324 | |
| 325 | // Desired size of survivor space plab's |
| 326 | PLABStats _plab_stats; |
| 327 | |
| 328 | // A list of from-space images of to-be-scanned objects, threaded through |
| 329 | // klass-pointers (klass information already copied to the forwarded |
| 330 | // image.) Manipulated with CAS. |
| 331 | oopDesc* volatile _overflow_list; |
| 332 | NOT_PRODUCT(ssize_t _num_par_pushes;) |
| 333 | |
| 334 | // This closure is used by the reference processor to filter out |
| 335 | // references to live referent. |
| 336 | DefNewGeneration::IsAliveClosure _is_alive_closure; |
| 337 | |
| 338 | // GC tracer that should be used during collection. |
| 339 | ParNewTracer _gc_tracer; |
| 340 | |
| 341 | static oop real_forwardee_slow(oop obj); |
| 342 | static void waste_some_time(); |
| 343 | |
| 344 | void handle_promotion_failed(CMSHeap* gch, ParScanThreadStateSet& thread_state_set); |
| 345 | |
| 346 | protected: |
| 347 | |
| 348 | void restore_preserved_marks(); |
| 349 | |
| 350 | public: |
| 351 | ParNewGeneration(ReservedSpace rs, |
| 352 | size_t initial_byte_size, |
| 353 | size_t min_byte_size, |
| 354 | size_t max_byte_size); |
| 355 | |
| 356 | ~ParNewGeneration() { |
| 357 | for (uint i = 0; i < ParallelGCThreads; i++) |
| 358 | delete _task_queues->queue(i); |
| 359 | |
| 360 | delete _task_queues; |
| 361 | } |
| 362 | |
| 363 | virtual void ref_processor_init(); |
| 364 | virtual Generation::Name kind() { return Generation::ParNew; } |
| 365 | virtual const char* name() const; |
| 366 | virtual const char* short_name() const { return "ParNew" ; } |
| 367 | |
| 368 | // override |
| 369 | virtual bool refs_discovery_is_mt() const { |
| 370 | return ParallelGCThreads > 1; |
| 371 | } |
| 372 | |
| 373 | // Make the collection virtual. |
| 374 | virtual void collect(bool full, |
| 375 | bool clear_all_soft_refs, |
| 376 | size_t size, |
| 377 | bool is_tlab); |
| 378 | |
| 379 | // This needs to be visible to the closure function. |
| 380 | // "obj" is the object to be copied, "m" is a recent value of its mark |
| 381 | // that must not contain a forwarding pointer (though one might be |
| 382 | // inserted in "obj"s mark word by a parallel thread). |
| 383 | oop copy_to_survivor_space(ParScanThreadState* par_scan_state, |
| 384 | oop obj, size_t obj_sz, markOop m); |
| 385 | |
| 386 | // in support of testing overflow code |
| 387 | NOT_PRODUCT(int _overflow_counter;) |
| 388 | NOT_PRODUCT(bool should_simulate_overflow();) |
| 389 | |
| 390 | // Accessor for overflow list |
| 391 | oop overflow_list() { return _overflow_list; } |
| 392 | |
| 393 | // Push the given (from-space) object on the global overflow list. |
| 394 | void push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state); |
| 395 | |
| 396 | // If the global overflow list is non-empty, move some tasks from it |
| 397 | // onto "work_q" (which need not be empty). No more than 1/4 of the |
| 398 | // available space on "work_q" is used. |
| 399 | bool take_from_overflow_list(ParScanThreadState* par_scan_state); |
| 400 | bool take_from_overflow_list_work(ParScanThreadState* par_scan_state); |
| 401 | |
| 402 | // The task queues to be used by parallel GC threads. |
| 403 | ObjToScanQueueSet* task_queues() { |
| 404 | return _task_queues; |
| 405 | } |
| 406 | |
| 407 | PLABStats* plab_stats() { |
| 408 | return &_plab_stats; |
| 409 | } |
| 410 | |
| 411 | size_t desired_plab_sz(); |
| 412 | |
| 413 | const ParNewTracer* gc_tracer() const { |
| 414 | return &_gc_tracer; |
| 415 | } |
| 416 | |
| 417 | static oop real_forwardee(oop obj); |
| 418 | }; |
| 419 | |
| 420 | #endif // SHARE_GC_CMS_PARNEWGENERATION_HPP |
| 421 | |