| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_GC_SHARED_COLLECTEDHEAP_HPP |
| 26 | #define SHARE_GC_SHARED_COLLECTEDHEAP_HPP |
| 27 | |
| 28 | #include "gc/shared/gcCause.hpp" |
| 29 | #include "gc/shared/gcWhen.hpp" |
| 30 | #include "gc/shared/verifyOption.hpp" |
| 31 | #include "memory/allocation.hpp" |
| 32 | #include "runtime/handles.hpp" |
| 33 | #include "runtime/perfData.hpp" |
| 34 | #include "runtime/safepoint.hpp" |
| 35 | #include "services/memoryUsage.hpp" |
| 36 | #include "utilities/debug.hpp" |
| 37 | #include "utilities/events.hpp" |
| 38 | #include "utilities/formatBuffer.hpp" |
| 39 | #include "utilities/growableArray.hpp" |
| 40 | |
| 41 | // A "CollectedHeap" is an implementation of a java heap for HotSpot. This |
| 42 | // is an abstract class: there may be many different kinds of heaps. This |
| 43 | // class defines the functions that a heap must implement, and contains |
| 44 | // infrastructure common to all heaps. |
| 45 | |
| 46 | class AdaptiveSizePolicy; |
| 47 | class BarrierSet; |
| 48 | class GCHeapSummary; |
| 49 | class GCTimer; |
| 50 | class GCTracer; |
| 51 | class GCMemoryManager; |
| 52 | class MemoryPool; |
| 53 | class MetaspaceSummary; |
| 54 | class SoftRefPolicy; |
| 55 | class Thread; |
| 56 | class ThreadClosure; |
| 57 | class VirtualSpaceSummary; |
| 58 | class WorkGang; |
| 59 | class nmethod; |
| 60 | |
| 61 | class GCMessage : public FormatBuffer<1024> { |
| 62 | public: |
| 63 | bool is_before; |
| 64 | |
| 65 | public: |
| 66 | GCMessage() {} |
| 67 | }; |
| 68 | |
| 69 | class CollectedHeap; |
| 70 | |
| 71 | class GCHeapLog : public EventLogBase<GCMessage> { |
| 72 | private: |
| 73 | void log_heap(CollectedHeap* heap, bool before); |
| 74 | |
| 75 | public: |
| 76 | GCHeapLog() : EventLogBase<GCMessage>("GC Heap History" , "gc" ) {} |
| 77 | |
| 78 | void log_heap_before(CollectedHeap* heap) { |
| 79 | log_heap(heap, true); |
| 80 | } |
| 81 | void log_heap_after(CollectedHeap* heap) { |
| 82 | log_heap(heap, false); |
| 83 | } |
| 84 | }; |
| 85 | |
| 86 | // |
| 87 | // CollectedHeap |
| 88 | // GenCollectedHeap |
| 89 | // SerialHeap |
| 90 | // CMSHeap |
| 91 | // G1CollectedHeap |
| 92 | // ParallelScavengeHeap |
| 93 | // ShenandoahHeap |
| 94 | // ZCollectedHeap |
| 95 | // |
| 96 | class CollectedHeap : public CHeapObj<mtInternal> { |
| 97 | friend class VMStructs; |
| 98 | friend class JVMCIVMStructs; |
| 99 | friend class IsGCActiveMark; // Block structured external access to _is_gc_active |
| 100 | friend class MemAllocator; |
| 101 | |
| 102 | private: |
| 103 | GCHeapLog* _gc_heap_log; |
| 104 | |
| 105 | MemRegion _reserved; |
| 106 | |
| 107 | protected: |
| 108 | bool _is_gc_active; |
| 109 | |
| 110 | // Used for filler objects (static, but initialized in ctor). |
| 111 | static size_t _filler_array_max_size; |
| 112 | |
| 113 | unsigned int _total_collections; // ... started |
| 114 | unsigned int _total_full_collections; // ... started |
| 115 | NOT_PRODUCT(volatile size_t _promotion_failure_alot_count;) |
| 116 | NOT_PRODUCT(volatile size_t _promotion_failure_alot_gc_number;) |
| 117 | |
| 118 | // Reason for current garbage collection. Should be set to |
| 119 | // a value reflecting no collection between collections. |
| 120 | GCCause::Cause _gc_cause; |
| 121 | GCCause::Cause _gc_lastcause; |
| 122 | PerfStringVariable* _perf_gc_cause; |
| 123 | PerfStringVariable* _perf_gc_lastcause; |
| 124 | |
| 125 | // Constructor |
| 126 | CollectedHeap(); |
| 127 | |
| 128 | // Create a new tlab. All TLAB allocations must go through this. |
| 129 | // To allow more flexible TLAB allocations min_size specifies |
| 130 | // the minimum size needed, while requested_size is the requested |
| 131 | // size based on ergonomics. The actually allocated size will be |
| 132 | // returned in actual_size. |
| 133 | virtual HeapWord* allocate_new_tlab(size_t min_size, |
| 134 | size_t requested_size, |
| 135 | size_t* actual_size); |
| 136 | |
| 137 | // Reinitialize tlabs before resuming mutators. |
| 138 | virtual void resize_all_tlabs(); |
| 139 | |
| 140 | // Raw memory allocation facilities |
| 141 | // The obj and array allocate methods are covers for these methods. |
| 142 | // mem_allocate() should never be |
| 143 | // called to allocate TLABs, only individual objects. |
| 144 | virtual HeapWord* mem_allocate(size_t size, |
| 145 | bool* gc_overhead_limit_was_exceeded) = 0; |
| 146 | |
| 147 | // Filler object utilities. |
| 148 | static inline size_t filler_array_hdr_size(); |
| 149 | static inline size_t filler_array_min_size(); |
| 150 | |
| 151 | DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);) |
| 152 | DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);) |
| 153 | |
| 154 | // Fill with a single array; caller must ensure filler_array_min_size() <= |
| 155 | // words <= filler_array_max_size(). |
| 156 | static inline void fill_with_array(HeapWord* start, size_t words, bool zap = true); |
| 157 | |
| 158 | // Fill with a single object (either an int array or a java.lang.Object). |
| 159 | static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true); |
| 160 | |
| 161 | virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer); |
| 162 | |
| 163 | // Verification functions |
| 164 | virtual void check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) |
| 165 | PRODUCT_RETURN; |
| 166 | debug_only(static void check_for_valid_allocation_state();) |
| 167 | |
| 168 | public: |
| 169 | enum Name { |
| 170 | None, |
| 171 | Serial, |
| 172 | Parallel, |
| 173 | CMS, |
| 174 | G1, |
| 175 | Epsilon, |
| 176 | Z, |
| 177 | Shenandoah |
| 178 | }; |
| 179 | |
| 180 | static inline size_t filler_array_max_size() { |
| 181 | return _filler_array_max_size; |
| 182 | } |
| 183 | |
| 184 | virtual Name kind() const = 0; |
| 185 | |
| 186 | virtual const char* name() const = 0; |
| 187 | |
| 188 | /** |
| 189 | * Returns JNI error code JNI_ENOMEM if memory could not be allocated, |
| 190 | * and JNI_OK on success. |
| 191 | */ |
| 192 | virtual jint initialize() = 0; |
| 193 | |
| 194 | // In many heaps, there will be a need to perform some initialization activities |
| 195 | // after the Universe is fully formed, but before general heap allocation is allowed. |
| 196 | // This is the correct place to place such initialization methods. |
| 197 | virtual void post_initialize(); |
| 198 | |
| 199 | // Stop any onging concurrent work and prepare for exit. |
| 200 | virtual void stop() {} |
| 201 | |
| 202 | // Stop and resume concurrent GC threads interfering with safepoint operations |
| 203 | virtual void safepoint_synchronize_begin() {} |
| 204 | virtual void safepoint_synchronize_end() {} |
| 205 | |
| 206 | void initialize_reserved_region(HeapWord *start, HeapWord *end); |
| 207 | MemRegion reserved_region() const { return _reserved; } |
| 208 | address base() const { return (address)reserved_region().start(); } |
| 209 | |
| 210 | virtual size_t capacity() const = 0; |
| 211 | virtual size_t used() const = 0; |
| 212 | |
| 213 | // Returns unused capacity. |
| 214 | virtual size_t unused() const; |
| 215 | |
| 216 | // Return "true" if the part of the heap that allocates Java |
| 217 | // objects has reached the maximal committed limit that it can |
| 218 | // reach, without a garbage collection. |
| 219 | virtual bool is_maximal_no_gc() const = 0; |
| 220 | |
| 221 | // Support for java.lang.Runtime.maxMemory(): return the maximum amount of |
| 222 | // memory that the vm could make available for storing 'normal' java objects. |
| 223 | // This is based on the reserved address space, but should not include space |
| 224 | // that the vm uses internally for bookkeeping or temporary storage |
| 225 | // (e.g., in the case of the young gen, one of the survivor |
| 226 | // spaces). |
| 227 | virtual size_t max_capacity() const = 0; |
| 228 | |
| 229 | // Returns "TRUE" if "p" points into the reserved area of the heap. |
| 230 | bool is_in_reserved(const void* p) const { |
| 231 | return _reserved.contains(p); |
| 232 | } |
| 233 | |
| 234 | bool is_in_reserved_or_null(const void* p) const { |
| 235 | return p == NULL || is_in_reserved(p); |
| 236 | } |
| 237 | |
| 238 | // Returns "TRUE" iff "p" points into the committed areas of the heap. |
| 239 | // This method can be expensive so avoid using it in performance critical |
| 240 | // code. |
| 241 | virtual bool is_in(const void* p) const = 0; |
| 242 | |
| 243 | DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); }) |
| 244 | |
| 245 | virtual uint32_t hash_oop(oop obj) const; |
| 246 | |
| 247 | void set_gc_cause(GCCause::Cause v) { |
| 248 | if (UsePerfData) { |
| 249 | _gc_lastcause = _gc_cause; |
| 250 | _perf_gc_lastcause->set_value(GCCause::to_string(_gc_lastcause)); |
| 251 | _perf_gc_cause->set_value(GCCause::to_string(v)); |
| 252 | } |
| 253 | _gc_cause = v; |
| 254 | } |
| 255 | GCCause::Cause gc_cause() { return _gc_cause; } |
| 256 | |
| 257 | virtual oop obj_allocate(Klass* klass, int size, TRAPS); |
| 258 | virtual oop array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS); |
| 259 | virtual oop class_allocate(Klass* klass, int size, TRAPS); |
| 260 | |
| 261 | // Utilities for turning raw memory into filler objects. |
| 262 | // |
| 263 | // min_fill_size() is the smallest region that can be filled. |
| 264 | // fill_with_objects() can fill arbitrary-sized regions of the heap using |
| 265 | // multiple objects. fill_with_object() is for regions known to be smaller |
| 266 | // than the largest array of integers; it uses a single object to fill the |
| 267 | // region and has slightly less overhead. |
| 268 | static size_t min_fill_size() { |
| 269 | return size_t(align_object_size(oopDesc::header_size())); |
| 270 | } |
| 271 | |
| 272 | static void fill_with_objects(HeapWord* start, size_t words, bool zap = true); |
| 273 | |
| 274 | static void fill_with_object(HeapWord* start, size_t words, bool zap = true); |
| 275 | static void fill_with_object(MemRegion region, bool zap = true) { |
| 276 | fill_with_object(region.start(), region.word_size(), zap); |
| 277 | } |
| 278 | static void fill_with_object(HeapWord* start, HeapWord* end, bool zap = true) { |
| 279 | fill_with_object(start, pointer_delta(end, start), zap); |
| 280 | } |
| 281 | |
| 282 | virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap); |
| 283 | virtual size_t min_dummy_object_size() const; |
| 284 | size_t tlab_alloc_reserve() const; |
| 285 | |
| 286 | // Return the address "addr" aligned by "alignment_in_bytes" if such |
| 287 | // an address is below "end". Return NULL otherwise. |
| 288 | inline static HeapWord* align_allocation_or_fail(HeapWord* addr, |
| 289 | HeapWord* end, |
| 290 | unsigned short alignment_in_bytes); |
| 291 | |
| 292 | // Some heaps may offer a contiguous region for shared non-blocking |
| 293 | // allocation, via inlined code (by exporting the address of the top and |
| 294 | // end fields defining the extent of the contiguous allocation region.) |
| 295 | |
| 296 | // This function returns "true" iff the heap supports this kind of |
| 297 | // allocation. (Default is "no".) |
| 298 | virtual bool supports_inline_contig_alloc() const { |
| 299 | return false; |
| 300 | } |
| 301 | // These functions return the addresses of the fields that define the |
| 302 | // boundaries of the contiguous allocation area. (These fields should be |
| 303 | // physically near to one another.) |
| 304 | virtual HeapWord* volatile* top_addr() const { |
| 305 | guarantee(false, "inline contiguous allocation not supported" ); |
| 306 | return NULL; |
| 307 | } |
| 308 | virtual HeapWord** end_addr() const { |
| 309 | guarantee(false, "inline contiguous allocation not supported" ); |
| 310 | return NULL; |
| 311 | } |
| 312 | |
| 313 | // Some heaps may be in an unparseable state at certain times between |
| 314 | // collections. This may be necessary for efficient implementation of |
| 315 | // certain allocation-related activities. Calling this function before |
| 316 | // attempting to parse a heap ensures that the heap is in a parsable |
| 317 | // state (provided other concurrent activity does not introduce |
| 318 | // unparsability). It is normally expected, therefore, that this |
| 319 | // method is invoked with the world stopped. |
| 320 | // NOTE: if you override this method, make sure you call |
| 321 | // super::ensure_parsability so that the non-generational |
| 322 | // part of the work gets done. See implementation of |
| 323 | // CollectedHeap::ensure_parsability and, for instance, |
| 324 | // that of GenCollectedHeap::ensure_parsability(). |
| 325 | // The argument "retire_tlabs" controls whether existing TLABs |
| 326 | // are merely filled or also retired, thus preventing further |
| 327 | // allocation from them and necessitating allocation of new TLABs. |
| 328 | virtual void ensure_parsability(bool retire_tlabs); |
| 329 | |
| 330 | // Section on thread-local allocation buffers (TLABs) |
| 331 | // If the heap supports thread-local allocation buffers, it should override |
| 332 | // the following methods: |
| 333 | // Returns "true" iff the heap supports thread-local allocation buffers. |
| 334 | // The default is "no". |
| 335 | virtual bool supports_tlab_allocation() const = 0; |
| 336 | |
| 337 | // The amount of space available for thread-local allocation buffers. |
| 338 | virtual size_t tlab_capacity(Thread *thr) const = 0; |
| 339 | |
| 340 | // The amount of used space for thread-local allocation buffers for the given thread. |
| 341 | virtual size_t tlab_used(Thread *thr) const = 0; |
| 342 | |
| 343 | virtual size_t max_tlab_size() const; |
| 344 | |
| 345 | // An estimate of the maximum allocation that could be performed |
| 346 | // for thread-local allocation buffers without triggering any |
| 347 | // collection or expansion activity. |
| 348 | virtual size_t unsafe_max_tlab_alloc(Thread *thr) const { |
| 349 | guarantee(false, "thread-local allocation buffers not supported" ); |
| 350 | return 0; |
| 351 | } |
| 352 | |
| 353 | // Perform a collection of the heap; intended for use in implementing |
| 354 | // "System.gc". This probably implies as full a collection as the |
| 355 | // "CollectedHeap" supports. |
| 356 | virtual void collect(GCCause::Cause cause) = 0; |
| 357 | |
| 358 | // Perform a full collection |
| 359 | virtual void do_full_collection(bool clear_all_soft_refs) = 0; |
| 360 | |
| 361 | // This interface assumes that it's being called by the |
| 362 | // vm thread. It collects the heap assuming that the |
| 363 | // heap lock is already held and that we are executing in |
| 364 | // the context of the vm thread. |
| 365 | virtual void collect_as_vm_thread(GCCause::Cause cause); |
| 366 | |
| 367 | virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data, |
| 368 | size_t size, |
| 369 | Metaspace::MetadataType mdtype); |
| 370 | |
| 371 | // Returns "true" iff there is a stop-world GC in progress. (I assume |
| 372 | // that it should answer "false" for the concurrent part of a concurrent |
| 373 | // collector -- dld). |
| 374 | bool is_gc_active() const { return _is_gc_active; } |
| 375 | |
| 376 | // Total number of GC collections (started) |
| 377 | unsigned int total_collections() const { return _total_collections; } |
| 378 | unsigned int total_full_collections() const { return _total_full_collections;} |
| 379 | |
| 380 | // Increment total number of GC collections (started) |
| 381 | // Should be protected but used by PSMarkSweep - cleanup for 1.4.2 |
| 382 | void increment_total_collections(bool full = false) { |
| 383 | _total_collections++; |
| 384 | if (full) { |
| 385 | increment_total_full_collections(); |
| 386 | } |
| 387 | } |
| 388 | |
| 389 | void increment_total_full_collections() { _total_full_collections++; } |
| 390 | |
| 391 | // Return the SoftRefPolicy for the heap; |
| 392 | virtual SoftRefPolicy* soft_ref_policy() = 0; |
| 393 | |
| 394 | virtual MemoryUsage memory_usage(); |
| 395 | virtual GrowableArray<GCMemoryManager*> memory_managers() = 0; |
| 396 | virtual GrowableArray<MemoryPool*> memory_pools() = 0; |
| 397 | |
| 398 | // Iterate over all objects, calling "cl.do_object" on each. |
| 399 | virtual void object_iterate(ObjectClosure* cl) = 0; |
| 400 | |
| 401 | // Similar to object_iterate() except iterates only |
| 402 | // over live objects. |
| 403 | virtual void safe_object_iterate(ObjectClosure* cl) = 0; |
| 404 | |
| 405 | // NOTE! There is no requirement that a collector implement these |
| 406 | // functions. |
| 407 | // |
| 408 | // A CollectedHeap is divided into a dense sequence of "blocks"; that is, |
| 409 | // each address in the (reserved) heap is a member of exactly |
| 410 | // one block. The defining characteristic of a block is that it is |
| 411 | // possible to find its size, and thus to progress forward to the next |
| 412 | // block. (Blocks may be of different sizes.) Thus, blocks may |
| 413 | // represent Java objects, or they might be free blocks in a |
| 414 | // free-list-based heap (or subheap), as long as the two kinds are |
| 415 | // distinguishable and the size of each is determinable. |
| 416 | |
| 417 | // Returns the address of the start of the "block" that contains the |
| 418 | // address "addr". We say "blocks" instead of "object" since some heaps |
| 419 | // may not pack objects densely; a chunk may either be an object or a |
| 420 | // non-object. |
| 421 | virtual HeapWord* block_start(const void* addr) const = 0; |
| 422 | |
| 423 | // Requires "addr" to be the start of a block, and returns "TRUE" iff |
| 424 | // the block is an object. |
| 425 | virtual bool block_is_obj(const HeapWord* addr) const = 0; |
| 426 | |
| 427 | // Returns the longest time (in ms) that has elapsed since the last |
| 428 | // time that any part of the heap was examined by a garbage collection. |
| 429 | virtual jlong millis_since_last_gc() = 0; |
| 430 | |
| 431 | // Perform any cleanup actions necessary before allowing a verification. |
| 432 | virtual void prepare_for_verify() = 0; |
| 433 | |
| 434 | // Generate any dumps preceding or following a full gc |
| 435 | private: |
| 436 | void full_gc_dump(GCTimer* timer, bool before); |
| 437 | |
| 438 | virtual void initialize_serviceability() = 0; |
| 439 | |
| 440 | public: |
| 441 | void pre_full_gc_dump(GCTimer* timer); |
| 442 | void post_full_gc_dump(GCTimer* timer); |
| 443 | |
| 444 | virtual VirtualSpaceSummary create_heap_space_summary(); |
| 445 | GCHeapSummary create_heap_summary(); |
| 446 | |
| 447 | MetaspaceSummary create_metaspace_summary(); |
| 448 | |
| 449 | // Print heap information on the given outputStream. |
| 450 | virtual void print_on(outputStream* st) const = 0; |
| 451 | // The default behavior is to call print_on() on tty. |
| 452 | virtual void print() const; |
| 453 | |
| 454 | // Print more detailed heap information on the given |
| 455 | // outputStream. The default behavior is to call print_on(). It is |
| 456 | // up to each subclass to override it and add any additional output |
| 457 | // it needs. |
| 458 | virtual void print_extended_on(outputStream* st) const { |
| 459 | print_on(st); |
| 460 | } |
| 461 | |
| 462 | virtual void print_on_error(outputStream* st) const; |
| 463 | |
| 464 | // Print all GC threads (other than the VM thread) |
| 465 | // used by this heap. |
| 466 | virtual void print_gc_threads_on(outputStream* st) const = 0; |
| 467 | // The default behavior is to call print_gc_threads_on() on tty. |
| 468 | void print_gc_threads() { |
| 469 | print_gc_threads_on(tty); |
| 470 | } |
| 471 | // Iterator for all GC threads (other than VM thread) |
| 472 | virtual void gc_threads_do(ThreadClosure* tc) const = 0; |
| 473 | |
| 474 | // Print any relevant tracing info that flags imply. |
| 475 | // Default implementation does nothing. |
| 476 | virtual void print_tracing_info() const = 0; |
| 477 | |
| 478 | void print_heap_before_gc(); |
| 479 | void print_heap_after_gc(); |
| 480 | |
| 481 | // Registering and unregistering an nmethod (compiled code) with the heap. |
| 482 | virtual void register_nmethod(nmethod* nm) = 0; |
| 483 | virtual void unregister_nmethod(nmethod* nm) = 0; |
| 484 | // Callback for when nmethod is about to be deleted. |
| 485 | virtual void flush_nmethod(nmethod* nm) = 0; |
| 486 | virtual void verify_nmethod(nmethod* nm) = 0; |
| 487 | |
| 488 | void trace_heap_before_gc(const GCTracer* gc_tracer); |
| 489 | void trace_heap_after_gc(const GCTracer* gc_tracer); |
| 490 | |
| 491 | // Heap verification |
| 492 | virtual void verify(VerifyOption option) = 0; |
| 493 | |
| 494 | // Return true if concurrent phase control (via |
| 495 | // request_concurrent_phase_control) is supported by this collector. |
| 496 | // The default implementation returns false. |
| 497 | virtual bool supports_concurrent_phase_control() const; |
| 498 | |
| 499 | // Request the collector enter the indicated concurrent phase, and |
| 500 | // wait until it does so. Supports WhiteBox testing. Only one |
| 501 | // request may be active at a time. Phases are designated by name; |
| 502 | // the set of names and their meaning is GC-specific. Once the |
| 503 | // requested phase has been reached, the collector will attempt to |
| 504 | // avoid transitioning to a new phase until a new request is made. |
| 505 | // [Note: A collector might not be able to remain in a given phase. |
| 506 | // For example, a full collection might cancel an in-progress |
| 507 | // concurrent collection.] |
| 508 | // |
| 509 | // Returns true when the phase is reached. Returns false for an |
| 510 | // unknown phase. The default implementation returns false. |
| 511 | virtual bool request_concurrent_phase(const char* phase); |
| 512 | |
| 513 | // Provides a thread pool to SafepointSynchronize to use |
| 514 | // for parallel safepoint cleanup. |
| 515 | // GCs that use a GC worker thread pool may want to share |
| 516 | // it for use during safepoint cleanup. This is only possible |
| 517 | // if the GC can pause and resume concurrent work (e.g. G1 |
| 518 | // concurrent marking) for an intermittent non-GC safepoint. |
| 519 | // If this method returns NULL, SafepointSynchronize will |
| 520 | // perform cleanup tasks serially in the VMThread. |
| 521 | virtual WorkGang* get_safepoint_workers() { return NULL; } |
| 522 | |
| 523 | // Support for object pinning. This is used by JNI Get*Critical() |
| 524 | // and Release*Critical() family of functions. If supported, the GC |
| 525 | // must guarantee that pinned objects never move. |
| 526 | virtual bool supports_object_pinning() const; |
| 527 | virtual oop pin_object(JavaThread* thread, oop obj); |
| 528 | virtual void unpin_object(JavaThread* thread, oop obj); |
| 529 | |
| 530 | // Deduplicate the string, iff the GC supports string deduplication. |
| 531 | virtual void deduplicate_string(oop str); |
| 532 | |
| 533 | virtual bool is_oop(oop object) const; |
| 534 | |
| 535 | virtual size_t obj_size(oop obj) const; |
| 536 | |
| 537 | // Cells are memory slices allocated by the allocator. Objects are initialized |
| 538 | // in cells. The cell itself may have a header, found at a negative offset of |
| 539 | // oops. Usually, the size of the cell header is 0, but it may be larger. |
| 540 | virtual ptrdiff_t () const { return 0; } |
| 541 | |
| 542 | // Non product verification and debugging. |
| 543 | #ifndef PRODUCT |
| 544 | // Support for PromotionFailureALot. Return true if it's time to cause a |
| 545 | // promotion failure. The no-argument version uses |
| 546 | // this->_promotion_failure_alot_count as the counter. |
| 547 | bool promotion_should_fail(volatile size_t* count); |
| 548 | bool promotion_should_fail(); |
| 549 | |
| 550 | // Reset the PromotionFailureALot counters. Should be called at the end of a |
| 551 | // GC in which promotion failure occurred. |
| 552 | void reset_promotion_should_fail(volatile size_t* count); |
| 553 | void reset_promotion_should_fail(); |
| 554 | #endif // #ifndef PRODUCT |
| 555 | }; |
| 556 | |
| 557 | // Class to set and reset the GC cause for a CollectedHeap. |
| 558 | |
| 559 | class GCCauseSetter : StackObj { |
| 560 | CollectedHeap* _heap; |
| 561 | GCCause::Cause _previous_cause; |
| 562 | public: |
| 563 | GCCauseSetter(CollectedHeap* heap, GCCause::Cause cause) { |
| 564 | _heap = heap; |
| 565 | _previous_cause = _heap->gc_cause(); |
| 566 | _heap->set_gc_cause(cause); |
| 567 | } |
| 568 | |
| 569 | ~GCCauseSetter() { |
| 570 | _heap->set_gc_cause(_previous_cause); |
| 571 | } |
| 572 | }; |
| 573 | |
| 574 | #endif // SHARE_GC_SHARED_COLLECTEDHEAP_HPP |
| 575 | |