| 1 | /* |
| 2 | * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. |
| 3 | * |
| 4 | * This code is free software; you can redistribute it and/or modify it |
| 5 | * under the terms of the GNU General Public License version 2 only, as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 11 | * version 2 for more details (a copy is included in the LICENSE file that |
| 12 | * accompanied this code). |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License version |
| 15 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 17 | * |
| 18 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 19 | * or visit www.oracle.com if you need additional information or have any |
| 20 | * questions. |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP |
| 25 | #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP |
| 26 | |
| 27 | #include "gc/shared/markBitMap.hpp" |
| 28 | #include "gc/shared/softRefPolicy.hpp" |
| 29 | #include "gc/shared/collectedHeap.hpp" |
| 30 | #include "gc/shenandoah/shenandoahAsserts.hpp" |
| 31 | #include "gc/shenandoah/shenandoahAllocRequest.hpp" |
| 32 | #include "gc/shenandoah/shenandoahLock.hpp" |
| 33 | #include "gc/shenandoah/shenandoahEvacOOMHandler.hpp" |
| 34 | #include "gc/shenandoah/shenandoahSharedVariables.hpp" |
| 35 | #include "services/memoryManager.hpp" |
| 36 | |
| 37 | class ConcurrentGCTimer; |
| 38 | class ReferenceProcessor; |
| 39 | class ShenandoahAllocTracker; |
| 40 | class ShenandoahCollectorPolicy; |
| 41 | class ShenandoahControlThread; |
| 42 | class ShenandoahGCSession; |
| 43 | class ShenandoahGCStateResetter; |
| 44 | class ShenandoahHeuristics; |
| 45 | class ShenandoahMarkingContext; |
| 46 | class ShenandoahPhaseTimings; |
| 47 | class ShenandoahHeap; |
| 48 | class ShenandoahHeapRegion; |
| 49 | class ShenandoahHeapRegionClosure; |
| 50 | class ShenandoahCollectionSet; |
| 51 | class ShenandoahFreeSet; |
| 52 | class ShenandoahConcurrentMark; |
| 53 | class ShenandoahMarkCompact; |
| 54 | class ShenandoahMonitoringSupport; |
| 55 | class ShenandoahPacer; |
| 56 | class ShenandoahTraversalGC; |
| 57 | class ShenandoahVerifier; |
| 58 | class ShenandoahWorkGang; |
| 59 | class VMStructs; |
| 60 | |
| 61 | class ShenandoahRegionIterator : public StackObj { |
| 62 | private: |
| 63 | ShenandoahHeap* _heap; |
| 64 | |
| 65 | DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); |
| 66 | volatile size_t _index; |
| 67 | DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); |
| 68 | |
| 69 | // No implicit copying: iterators should be passed by reference to capture the state |
| 70 | ShenandoahRegionIterator(const ShenandoahRegionIterator& that); |
| 71 | ShenandoahRegionIterator& operator=(const ShenandoahRegionIterator& o); |
| 72 | |
| 73 | public: |
| 74 | ShenandoahRegionIterator(); |
| 75 | ShenandoahRegionIterator(ShenandoahHeap* heap); |
| 76 | |
| 77 | // Reset iterator to default state |
| 78 | void reset(); |
| 79 | |
| 80 | // Returns next region, or NULL if there are no more regions. |
| 81 | // This is multi-thread-safe. |
| 82 | inline ShenandoahHeapRegion* next(); |
| 83 | |
| 84 | // This is *not* MT safe. However, in the absence of multithreaded access, it |
| 85 | // can be used to determine if there is more work to do. |
| 86 | bool has_next() const; |
| 87 | }; |
| 88 | |
| 89 | class ShenandoahHeapRegionClosure : public StackObj { |
| 90 | public: |
| 91 | virtual void heap_region_do(ShenandoahHeapRegion* r) = 0; |
| 92 | virtual bool is_thread_safe() { return false; } |
| 93 | }; |
| 94 | |
| 95 | #ifdef ASSERT |
| 96 | class ShenandoahAssertToSpaceClosure : public OopClosure { |
| 97 | private: |
| 98 | template <class T> |
| 99 | void do_oop_work(T* p); |
| 100 | public: |
| 101 | void do_oop(narrowOop* p); |
| 102 | void do_oop(oop* p); |
| 103 | }; |
| 104 | #endif |
| 105 | |
| 106 | typedef ShenandoahLock ShenandoahHeapLock; |
| 107 | typedef ShenandoahLocker ShenandoahHeapLocker; |
| 108 | |
| 109 | // Shenandoah GC is low-pause concurrent GC that uses Brooks forwarding pointers |
| 110 | // to encode forwarding data. See BrooksPointer for details on forwarding data encoding. |
| 111 | // See ShenandoahControlThread for GC cycle structure. |
| 112 | // |
| 113 | class ShenandoahHeap : public CollectedHeap { |
| 114 | friend class ShenandoahAsserts; |
| 115 | friend class VMStructs; |
| 116 | friend class ShenandoahGCSession; |
| 117 | friend class ShenandoahGCStateResetter; |
| 118 | |
| 119 | // ---------- Locks that guard important data structures in Heap |
| 120 | // |
| 121 | private: |
| 122 | ShenandoahHeapLock _lock; |
| 123 | |
| 124 | public: |
| 125 | ShenandoahHeapLock* lock() { |
| 126 | return &_lock; |
| 127 | } |
| 128 | |
| 129 | void assert_heaplock_owned_by_current_thread() NOT_DEBUG_RETURN; |
| 130 | void assert_heaplock_not_owned_by_current_thread() NOT_DEBUG_RETURN; |
| 131 | void assert_heaplock_or_safepoint() NOT_DEBUG_RETURN; |
| 132 | |
| 133 | // ---------- Initialization, termination, identification, printing routines |
| 134 | // |
| 135 | public: |
| 136 | static ShenandoahHeap* heap(); |
| 137 | static ShenandoahHeap* heap_no_check(); |
| 138 | |
| 139 | const char* name() const { return "Shenandoah" ; } |
| 140 | ShenandoahHeap::Name kind() const { return CollectedHeap::Shenandoah; } |
| 141 | |
| 142 | ShenandoahHeap(ShenandoahCollectorPolicy* policy); |
| 143 | jint initialize(); |
| 144 | void post_initialize(); |
| 145 | void initialize_heuristics(); |
| 146 | |
| 147 | void initialize_serviceability(); |
| 148 | |
| 149 | void print_on(outputStream* st) const; |
| 150 | void print_extended_on(outputStream *st) const; |
| 151 | void print_tracing_info() const; |
| 152 | void print_gc_threads_on(outputStream* st) const; |
| 153 | void print_heap_regions_on(outputStream* st) const; |
| 154 | |
| 155 | void stop(); |
| 156 | |
| 157 | void prepare_for_verify(); |
| 158 | void verify(VerifyOption vo); |
| 159 | |
| 160 | // ---------- Heap counters and metrics |
| 161 | // |
| 162 | private: |
| 163 | size_t _initial_size; |
| 164 | size_t _minimum_size; |
| 165 | DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); |
| 166 | volatile size_t _used; |
| 167 | volatile size_t _committed; |
| 168 | volatile size_t _bytes_allocated_since_gc_start; |
| 169 | DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); |
| 170 | |
| 171 | public: |
| 172 | void increase_used(size_t bytes); |
| 173 | void decrease_used(size_t bytes); |
| 174 | void set_used(size_t bytes); |
| 175 | |
| 176 | void increase_committed(size_t bytes); |
| 177 | void decrease_committed(size_t bytes); |
| 178 | void increase_allocated(size_t bytes); |
| 179 | |
| 180 | size_t bytes_allocated_since_gc_start(); |
| 181 | void reset_bytes_allocated_since_gc_start(); |
| 182 | |
| 183 | size_t min_capacity() const; |
| 184 | size_t max_capacity() const; |
| 185 | size_t initial_capacity() const; |
| 186 | size_t capacity() const; |
| 187 | size_t used() const; |
| 188 | size_t committed() const; |
| 189 | |
| 190 | // ---------- Workers handling |
| 191 | // |
| 192 | private: |
| 193 | uint _max_workers; |
| 194 | ShenandoahWorkGang* _workers; |
| 195 | ShenandoahWorkGang* _safepoint_workers; |
| 196 | |
| 197 | public: |
| 198 | uint max_workers(); |
| 199 | void assert_gc_workers(uint nworker) NOT_DEBUG_RETURN; |
| 200 | |
| 201 | WorkGang* workers() const; |
| 202 | WorkGang* get_safepoint_workers(); |
| 203 | |
| 204 | void gc_threads_do(ThreadClosure* tcl) const; |
| 205 | |
| 206 | // ---------- Heap regions handling machinery |
| 207 | // |
| 208 | private: |
| 209 | MemRegion _heap_region; |
| 210 | bool _heap_region_special; |
| 211 | size_t _num_regions; |
| 212 | ShenandoahHeapRegion** _regions; |
| 213 | ShenandoahRegionIterator _update_refs_iterator; |
| 214 | |
| 215 | public: |
| 216 | inline size_t num_regions() const { return _num_regions; } |
| 217 | inline bool is_heap_region_special() { return _heap_region_special; } |
| 218 | |
| 219 | inline ShenandoahHeapRegion* const heap_region_containing(const void* addr) const; |
| 220 | inline size_t heap_region_index_containing(const void* addr) const; |
| 221 | |
| 222 | inline ShenandoahHeapRegion* const get_region(size_t region_idx) const; |
| 223 | |
| 224 | void heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; |
| 225 | void parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const; |
| 226 | |
| 227 | // ---------- GC state machinery |
| 228 | // |
| 229 | // GC state describes the important parts of collector state, that may be |
| 230 | // used to make barrier selection decisions in the native and generated code. |
| 231 | // Multiple bits can be set at once. |
| 232 | // |
| 233 | // Important invariant: when GC state is zero, the heap is stable, and no barriers |
| 234 | // are required. |
| 235 | // |
| 236 | public: |
| 237 | enum GCStateBitPos { |
| 238 | // Heap has forwarded objects: needs LRB barriers. |
| 239 | HAS_FORWARDED_BITPOS = 0, |
| 240 | |
| 241 | // Heap is under marking: needs SATB barriers. |
| 242 | MARKING_BITPOS = 1, |
| 243 | |
| 244 | // Heap is under evacuation: needs LRB barriers. (Set together with HAS_FORWARDED) |
| 245 | EVACUATION_BITPOS = 2, |
| 246 | |
| 247 | // Heap is under updating: needs no additional barriers. |
| 248 | UPDATEREFS_BITPOS = 3, |
| 249 | |
| 250 | // Heap is under traversal collection |
| 251 | TRAVERSAL_BITPOS = 4 |
| 252 | }; |
| 253 | |
| 254 | enum GCState { |
| 255 | STABLE = 0, |
| 256 | HAS_FORWARDED = 1 << HAS_FORWARDED_BITPOS, |
| 257 | MARKING = 1 << MARKING_BITPOS, |
| 258 | EVACUATION = 1 << EVACUATION_BITPOS, |
| 259 | UPDATEREFS = 1 << UPDATEREFS_BITPOS, |
| 260 | TRAVERSAL = 1 << TRAVERSAL_BITPOS |
| 261 | }; |
| 262 | |
| 263 | private: |
| 264 | ShenandoahSharedBitmap _gc_state; |
| 265 | ShenandoahSharedFlag _degenerated_gc_in_progress; |
| 266 | ShenandoahSharedFlag _full_gc_in_progress; |
| 267 | ShenandoahSharedFlag _full_gc_move_in_progress; |
| 268 | ShenandoahSharedFlag _progress_last_gc; |
| 269 | |
| 270 | void set_gc_state_all_threads(char state); |
| 271 | void set_gc_state_mask(uint mask, bool value); |
| 272 | |
| 273 | public: |
| 274 | char gc_state() const; |
| 275 | static address gc_state_addr(); |
| 276 | |
| 277 | void set_concurrent_mark_in_progress(bool in_progress); |
| 278 | void set_evacuation_in_progress(bool in_progress); |
| 279 | void set_update_refs_in_progress(bool in_progress); |
| 280 | void set_degenerated_gc_in_progress(bool in_progress); |
| 281 | void set_full_gc_in_progress(bool in_progress); |
| 282 | void set_full_gc_move_in_progress(bool in_progress); |
| 283 | void set_concurrent_traversal_in_progress(bool in_progress); |
| 284 | void set_has_forwarded_objects(bool cond); |
| 285 | |
| 286 | inline bool is_stable() const; |
| 287 | inline bool is_idle() const; |
| 288 | inline bool is_concurrent_mark_in_progress() const; |
| 289 | inline bool is_update_refs_in_progress() const; |
| 290 | inline bool is_evacuation_in_progress() const; |
| 291 | inline bool is_degenerated_gc_in_progress() const; |
| 292 | inline bool is_full_gc_in_progress() const; |
| 293 | inline bool is_full_gc_move_in_progress() const; |
| 294 | inline bool is_concurrent_traversal_in_progress() const; |
| 295 | inline bool has_forwarded_objects() const; |
| 296 | inline bool is_gc_in_progress_mask(uint mask) const; |
| 297 | |
| 298 | // ---------- GC cancellation and degeneration machinery |
| 299 | // |
| 300 | // Cancelled GC flag is used to notify concurrent phases that they should terminate. |
| 301 | // |
| 302 | public: |
| 303 | enum ShenandoahDegenPoint { |
| 304 | _degenerated_unset, |
| 305 | _degenerated_traversal, |
| 306 | _degenerated_outside_cycle, |
| 307 | _degenerated_mark, |
| 308 | _degenerated_evac, |
| 309 | _degenerated_updaterefs, |
| 310 | _DEGENERATED_LIMIT |
| 311 | }; |
| 312 | |
| 313 | static const char* degen_point_to_string(ShenandoahDegenPoint point) { |
| 314 | switch (point) { |
| 315 | case _degenerated_unset: |
| 316 | return "<UNSET>" ; |
| 317 | case _degenerated_traversal: |
| 318 | return "Traversal" ; |
| 319 | case _degenerated_outside_cycle: |
| 320 | return "Outside of Cycle" ; |
| 321 | case _degenerated_mark: |
| 322 | return "Mark" ; |
| 323 | case _degenerated_evac: |
| 324 | return "Evacuation" ; |
| 325 | case _degenerated_updaterefs: |
| 326 | return "Update Refs" ; |
| 327 | default: |
| 328 | ShouldNotReachHere(); |
| 329 | return "ERROR" ; |
| 330 | } |
| 331 | }; |
| 332 | |
| 333 | private: |
| 334 | enum CancelState { |
| 335 | // Normal state. GC has not been cancelled and is open for cancellation. |
| 336 | // Worker threads can suspend for safepoint. |
| 337 | CANCELLABLE, |
| 338 | |
| 339 | // GC has been cancelled. Worker threads can not suspend for |
| 340 | // safepoint but must finish their work as soon as possible. |
| 341 | CANCELLED, |
| 342 | |
| 343 | // GC has not been cancelled and must not be cancelled. At least |
| 344 | // one worker thread checks for pending safepoint and may suspend |
| 345 | // if a safepoint is pending. |
| 346 | NOT_CANCELLED |
| 347 | }; |
| 348 | |
| 349 | ShenandoahSharedEnumFlag<CancelState> _cancelled_gc; |
| 350 | bool try_cancel_gc(); |
| 351 | |
| 352 | public: |
| 353 | static address cancelled_gc_addr(); |
| 354 | |
| 355 | inline bool cancelled_gc() const; |
| 356 | inline bool check_cancelled_gc_and_yield(bool sts_active = true); |
| 357 | |
| 358 | inline void clear_cancelled_gc(); |
| 359 | |
| 360 | void cancel_gc(GCCause::Cause cause); |
| 361 | |
| 362 | // ---------- GC operations entry points |
| 363 | // |
| 364 | public: |
| 365 | // Entry points to STW GC operations, these cause a related safepoint, that then |
| 366 | // call the entry method below |
| 367 | void vmop_entry_init_mark(); |
| 368 | void vmop_entry_final_mark(); |
| 369 | void vmop_entry_final_evac(); |
| 370 | void vmop_entry_init_updaterefs(); |
| 371 | void vmop_entry_final_updaterefs(); |
| 372 | void vmop_entry_init_traversal(); |
| 373 | void vmop_entry_final_traversal(); |
| 374 | void vmop_entry_full(GCCause::Cause cause); |
| 375 | void vmop_degenerated(ShenandoahDegenPoint point); |
| 376 | |
| 377 | // Entry methods to normally STW GC operations. These set up logging, monitoring |
| 378 | // and workers for net VM operation |
| 379 | void entry_init_mark(); |
| 380 | void entry_final_mark(); |
| 381 | void entry_final_evac(); |
| 382 | void entry_init_updaterefs(); |
| 383 | void entry_final_updaterefs(); |
| 384 | void entry_init_traversal(); |
| 385 | void entry_final_traversal(); |
| 386 | void entry_full(GCCause::Cause cause); |
| 387 | void entry_degenerated(int point); |
| 388 | |
| 389 | // Entry methods to normally concurrent GC operations. These set up logging, monitoring |
| 390 | // for concurrent operation. |
| 391 | void entry_reset(); |
| 392 | void entry_mark(); |
| 393 | void entry_preclean(); |
| 394 | void entry_cleanup(); |
| 395 | void entry_evac(); |
| 396 | void entry_updaterefs(); |
| 397 | void entry_traversal(); |
| 398 | void entry_uncommit(double shrink_before); |
| 399 | |
| 400 | private: |
| 401 | // Actual work for the phases |
| 402 | void op_init_mark(); |
| 403 | void op_final_mark(); |
| 404 | void op_final_evac(); |
| 405 | void op_init_updaterefs(); |
| 406 | void op_final_updaterefs(); |
| 407 | void op_init_traversal(); |
| 408 | void op_final_traversal(); |
| 409 | void op_full(GCCause::Cause cause); |
| 410 | void op_degenerated(ShenandoahDegenPoint point); |
| 411 | void op_degenerated_fail(); |
| 412 | void op_degenerated_futile(); |
| 413 | |
| 414 | void op_reset(); |
| 415 | void op_mark(); |
| 416 | void op_preclean(); |
| 417 | void op_cleanup(); |
| 418 | void op_conc_evac(); |
| 419 | void op_stw_evac(); |
| 420 | void op_updaterefs(); |
| 421 | void op_traversal(); |
| 422 | void op_uncommit(double shrink_before); |
| 423 | |
| 424 | // Messages for GC trace events, they have to be immortal for |
| 425 | // passing around the logging/tracing systems |
| 426 | const char* init_mark_event_message() const; |
| 427 | const char* final_mark_event_message() const; |
| 428 | const char* conc_mark_event_message() const; |
| 429 | const char* degen_event_message(ShenandoahDegenPoint point) const; |
| 430 | |
| 431 | // ---------- GC subsystems |
| 432 | // |
| 433 | private: |
| 434 | ShenandoahControlThread* _control_thread; |
| 435 | ShenandoahCollectorPolicy* _shenandoah_policy; |
| 436 | ShenandoahHeuristics* _heuristics; |
| 437 | ShenandoahFreeSet* _free_set; |
| 438 | ShenandoahConcurrentMark* _scm; |
| 439 | ShenandoahTraversalGC* _traversal_gc; |
| 440 | ShenandoahMarkCompact* _full_gc; |
| 441 | ShenandoahPacer* _pacer; |
| 442 | ShenandoahVerifier* _verifier; |
| 443 | |
| 444 | ShenandoahAllocTracker* _alloc_tracker; |
| 445 | ShenandoahPhaseTimings* _phase_timings; |
| 446 | |
| 447 | ShenandoahControlThread* control_thread() { return _control_thread; } |
| 448 | ShenandoahMarkCompact* full_gc() { return _full_gc; } |
| 449 | |
| 450 | public: |
| 451 | ShenandoahCollectorPolicy* shenandoah_policy() const { return _shenandoah_policy; } |
| 452 | ShenandoahHeuristics* heuristics() const { return _heuristics; } |
| 453 | ShenandoahFreeSet* free_set() const { return _free_set; } |
| 454 | ShenandoahConcurrentMark* concurrent_mark() { return _scm; } |
| 455 | ShenandoahTraversalGC* traversal_gc() { return _traversal_gc; } |
| 456 | ShenandoahPacer* pacer() const { return _pacer; } |
| 457 | |
| 458 | ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; } |
| 459 | ShenandoahAllocTracker* alloc_tracker() const { return _alloc_tracker; } |
| 460 | |
| 461 | ShenandoahVerifier* verifier(); |
| 462 | |
| 463 | // ---------- VM subsystem bindings |
| 464 | // |
| 465 | private: |
| 466 | ShenandoahMonitoringSupport* _monitoring_support; |
| 467 | MemoryPool* _memory_pool; |
| 468 | GCMemoryManager _stw_memory_manager; |
| 469 | GCMemoryManager _cycle_memory_manager; |
| 470 | ConcurrentGCTimer* _gc_timer; |
| 471 | SoftRefPolicy _soft_ref_policy; |
| 472 | |
| 473 | // For exporting to SA |
| 474 | int _log_min_obj_alignment_in_bytes; |
| 475 | public: |
| 476 | ShenandoahMonitoringSupport* monitoring_support() { return _monitoring_support; } |
| 477 | GCMemoryManager* cycle_memory_manager() { return &_cycle_memory_manager; } |
| 478 | GCMemoryManager* stw_memory_manager() { return &_stw_memory_manager; } |
| 479 | SoftRefPolicy* soft_ref_policy() { return &_soft_ref_policy; } |
| 480 | |
| 481 | GrowableArray<GCMemoryManager*> memory_managers(); |
| 482 | GrowableArray<MemoryPool*> memory_pools(); |
| 483 | MemoryUsage memory_usage(); |
| 484 | GCTracer* tracer(); |
| 485 | GCTimer* gc_timer() const; |
| 486 | |
| 487 | // ---------- Reference processing |
| 488 | // |
| 489 | private: |
| 490 | AlwaysTrueClosure _subject_to_discovery; |
| 491 | ReferenceProcessor* _ref_processor; |
| 492 | ShenandoahSharedFlag _process_references; |
| 493 | |
| 494 | void ref_processing_init(); |
| 495 | |
| 496 | public: |
| 497 | ReferenceProcessor* ref_processor() { return _ref_processor; } |
| 498 | void set_process_references(bool pr); |
| 499 | bool process_references() const; |
| 500 | |
| 501 | // ---------- Class Unloading |
| 502 | // |
| 503 | private: |
| 504 | ShenandoahSharedFlag _unload_classes; |
| 505 | |
| 506 | public: |
| 507 | void set_unload_classes(bool uc); |
| 508 | bool unload_classes() const; |
| 509 | |
| 510 | // Delete entries for dead interned string and clean up unreferenced symbols |
| 511 | // in symbol table, possibly in parallel. |
| 512 | void unload_classes_and_cleanup_tables(bool full_gc); |
| 513 | |
| 514 | // ---------- Generic interface hooks |
| 515 | // Minor things that super-interface expects us to implement to play nice with |
| 516 | // the rest of runtime. Some of the things here are not required to be implemented, |
| 517 | // and can be stubbed out. |
| 518 | // |
| 519 | public: |
| 520 | AdaptiveSizePolicy* size_policy() shenandoah_not_implemented_return(NULL); |
| 521 | bool is_maximal_no_gc() const shenandoah_not_implemented_return(false); |
| 522 | |
| 523 | bool is_in(const void* p) const; |
| 524 | |
| 525 | void collect(GCCause::Cause cause); |
| 526 | void do_full_collection(bool clear_all_soft_refs); |
| 527 | |
| 528 | // Used for parsing heap during error printing |
| 529 | HeapWord* block_start(const void* addr) const; |
| 530 | bool block_is_obj(const HeapWord* addr) const; |
| 531 | |
| 532 | // Used for native heap walkers: heap dumpers, mostly |
| 533 | void object_iterate(ObjectClosure* cl); |
| 534 | void safe_object_iterate(ObjectClosure* cl); |
| 535 | |
| 536 | // Used by RMI |
| 537 | jlong millis_since_last_gc(); |
| 538 | |
| 539 | // ---------- Safepoint interface hooks |
| 540 | // |
| 541 | public: |
| 542 | void safepoint_synchronize_begin(); |
| 543 | void safepoint_synchronize_end(); |
| 544 | |
| 545 | // ---------- Code roots handling hooks |
| 546 | // |
| 547 | public: |
| 548 | void register_nmethod(nmethod* nm); |
| 549 | void unregister_nmethod(nmethod* nm); |
| 550 | void flush_nmethod(nmethod* nm) {} |
| 551 | void verify_nmethod(nmethod* nm) {} |
| 552 | |
| 553 | // ---------- Pinning hooks |
| 554 | // |
| 555 | public: |
| 556 | // Shenandoah supports per-object (per-region) pinning |
| 557 | bool supports_object_pinning() const { return true; } |
| 558 | |
| 559 | oop pin_object(JavaThread* thread, oop obj); |
| 560 | void unpin_object(JavaThread* thread, oop obj); |
| 561 | |
| 562 | // ---------- Allocation support |
| 563 | // |
| 564 | private: |
| 565 | HeapWord* allocate_memory_under_lock(ShenandoahAllocRequest& request, bool& in_new_region); |
| 566 | inline HeapWord* allocate_from_gclab(Thread* thread, size_t size); |
| 567 | HeapWord* allocate_from_gclab_slow(Thread* thread, size_t size); |
| 568 | HeapWord* allocate_new_gclab(size_t min_size, size_t word_size, size_t* actual_size); |
| 569 | void retire_and_reset_gclabs(); |
| 570 | |
| 571 | public: |
| 572 | HeapWord* allocate_memory(ShenandoahAllocRequest& request); |
| 573 | HeapWord* mem_allocate(size_t size, bool* what); |
| 574 | MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, |
| 575 | size_t size, |
| 576 | Metaspace::MetadataType mdtype); |
| 577 | |
| 578 | void notify_mutator_alloc_words(size_t words, bool waste); |
| 579 | |
| 580 | // Shenandoah supports TLAB allocation |
| 581 | bool supports_tlab_allocation() const { return true; } |
| 582 | |
| 583 | HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size); |
| 584 | size_t tlab_capacity(Thread *thr) const; |
| 585 | size_t unsafe_max_tlab_alloc(Thread *thread) const; |
| 586 | size_t max_tlab_size() const; |
| 587 | size_t tlab_used(Thread* ignored) const; |
| 588 | |
| 589 | void resize_tlabs(); |
| 590 | |
| 591 | void ensure_parsability(bool retire_tlabs); |
| 592 | void make_parsable(bool retire_tlabs); |
| 593 | |
| 594 | // ---------- Marking support |
| 595 | // |
| 596 | private: |
| 597 | ShenandoahMarkingContext* _marking_context; |
| 598 | MemRegion _bitmap_region; |
| 599 | MemRegion _aux_bitmap_region; |
| 600 | MarkBitMap _verification_bit_map; |
| 601 | MarkBitMap _aux_bit_map; |
| 602 | |
| 603 | size_t _bitmap_size; |
| 604 | size_t _bitmap_regions_per_slice; |
| 605 | size_t _bitmap_bytes_per_slice; |
| 606 | |
| 607 | bool _bitmap_region_special; |
| 608 | bool _aux_bitmap_region_special; |
| 609 | |
| 610 | // Used for buffering per-region liveness data. |
| 611 | // Needed since ShenandoahHeapRegion uses atomics to update liveness. |
| 612 | // |
| 613 | // The array has max-workers elements, each of which is an array of |
| 614 | // jushort * max_regions. The choice of jushort is not accidental: |
| 615 | // there is a tradeoff between static/dynamic footprint that translates |
| 616 | // into cache pressure (which is already high during marking), and |
| 617 | // too many atomic updates. size_t/jint is too large, jbyte is too small. |
| 618 | jushort** _liveness_cache; |
| 619 | |
| 620 | public: |
| 621 | inline ShenandoahMarkingContext* complete_marking_context() const; |
| 622 | inline ShenandoahMarkingContext* marking_context() const; |
| 623 | inline void mark_complete_marking_context(); |
| 624 | inline void mark_incomplete_marking_context(); |
| 625 | |
| 626 | template<class T> |
| 627 | inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl); |
| 628 | |
| 629 | template<class T> |
| 630 | inline void marked_object_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); |
| 631 | |
| 632 | template<class T> |
| 633 | inline void marked_object_oop_iterate(ShenandoahHeapRegion* region, T* cl, HeapWord* limit); |
| 634 | |
| 635 | void reset_mark_bitmap(); |
| 636 | |
| 637 | // SATB barriers hooks |
| 638 | template<bool RESOLVE> |
| 639 | inline bool requires_marking(const void* entry) const; |
| 640 | void force_satb_flush_all_threads(); |
| 641 | |
| 642 | // Support for bitmap uncommits |
| 643 | bool commit_bitmap_slice(ShenandoahHeapRegion *r); |
| 644 | bool uncommit_bitmap_slice(ShenandoahHeapRegion *r); |
| 645 | bool is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self = false); |
| 646 | |
| 647 | // Liveness caching support |
| 648 | jushort* get_liveness_cache(uint worker_id); |
| 649 | void flush_liveness_cache(uint worker_id); |
| 650 | |
| 651 | // ---------- Evacuation support |
| 652 | // |
| 653 | private: |
| 654 | ShenandoahCollectionSet* _collection_set; |
| 655 | ShenandoahEvacOOMHandler _oom_evac_handler; |
| 656 | |
| 657 | void evacuate_and_update_roots(); |
| 658 | |
| 659 | public: |
| 660 | static address in_cset_fast_test_addr(); |
| 661 | |
| 662 | ShenandoahCollectionSet* collection_set() const { return _collection_set; } |
| 663 | |
| 664 | template <class T> |
| 665 | inline bool in_collection_set(T obj) const; |
| 666 | |
| 667 | // Avoid accidentally calling the method above with ShenandoahHeapRegion*, which would be *wrong*. |
| 668 | inline bool in_collection_set(ShenandoahHeapRegion* r) shenandoah_not_implemented_return(false); |
| 669 | |
| 670 | // Evacuates object src. Returns the evacuated object, either evacuated |
| 671 | // by this thread, or by some other thread. |
| 672 | inline oop evacuate_object(oop src, Thread* thread); |
| 673 | |
| 674 | // Call before/after evacuation. |
| 675 | void enter_evacuation(); |
| 676 | void leave_evacuation(); |
| 677 | |
| 678 | // ---------- Helper functions |
| 679 | // |
| 680 | public: |
| 681 | template <class T> |
| 682 | inline oop evac_update_with_forwarded(T* p); |
| 683 | |
| 684 | template <class T> |
| 685 | inline oop maybe_update_with_forwarded(T* p); |
| 686 | |
| 687 | template <class T> |
| 688 | inline oop maybe_update_with_forwarded_not_null(T* p, oop obj); |
| 689 | |
| 690 | template <class T> |
| 691 | inline oop update_with_forwarded_not_null(T* p, oop obj); |
| 692 | |
| 693 | static inline oop cas_oop(oop n, narrowOop* addr, oop c); |
| 694 | static inline oop cas_oop(oop n, oop* addr, oop c); |
| 695 | |
| 696 | void trash_humongous_region_at(ShenandoahHeapRegion *r); |
| 697 | |
| 698 | void deduplicate_string(oop str); |
| 699 | |
| 700 | void stop_concurrent_marking(); |
| 701 | |
| 702 | private: |
| 703 | void trash_cset_regions(); |
| 704 | void update_heap_references(bool concurrent); |
| 705 | |
| 706 | // ---------- Testing helpers functions |
| 707 | // |
| 708 | private: |
| 709 | ShenandoahSharedFlag _inject_alloc_failure; |
| 710 | |
| 711 | void try_inject_alloc_failure(); |
| 712 | bool should_inject_alloc_failure(); |
| 713 | }; |
| 714 | |
| 715 | #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAP_HPP |
| 716 | |