| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_GC_SHARED_GCLOCKER_HPP |
| 26 | #define SHARE_GC_SHARED_GCLOCKER_HPP |
| 27 | |
| 28 | #include "memory/allocation.hpp" |
| 29 | #include "utilities/globalDefinitions.hpp" |
| 30 | #include "utilities/macros.hpp" |
| 31 | |
| 32 | class JavaThread; |
| 33 | |
| 34 | // The direct lock/unlock calls do not force a collection if an unlock |
| 35 | // decrements the count to zero. Avoid calling these if at all possible. |
| 36 | |
| 37 | class GCLocker: public AllStatic { |
| 38 | private: |
| 39 | // The _jni_lock_count keeps track of the number of threads that are |
| 40 | // currently in a critical region. It's only kept up to date when |
| 41 | // _needs_gc is true. The current value is computed during |
| 42 | // safepointing and decremented during the slow path of GCLocker |
| 43 | // unlocking. |
| 44 | static volatile jint _jni_lock_count; // number of jni active instances. |
| 45 | static volatile bool _needs_gc; // heap is filling, we need a GC |
| 46 | // note: bool is typedef'd as jint |
| 47 | static volatile bool _doing_gc; // unlock_critical() is doing a GC |
| 48 | |
| 49 | #ifdef ASSERT |
| 50 | // This lock count is updated for all operations and is used to |
| 51 | // validate the jni_lock_count that is computed during safepoints. |
| 52 | static volatile jint _debug_jni_lock_count; |
| 53 | #endif |
| 54 | |
| 55 | // At a safepoint, visit all threads and count the number of active |
| 56 | // critical sections. This is used to ensure that all active |
| 57 | // critical sections are exited before a new one is started. |
| 58 | static void verify_critical_count() NOT_DEBUG_RETURN; |
| 59 | |
| 60 | static void jni_lock(JavaThread* thread); |
| 61 | static void jni_unlock(JavaThread* thread); |
| 62 | |
| 63 | static bool is_active_internal() { |
| 64 | verify_critical_count(); |
| 65 | return _jni_lock_count > 0; |
| 66 | } |
| 67 | |
| 68 | static void log_debug_jni(const char* msg); |
| 69 | |
| 70 | static bool is_at_safepoint(); |
| 71 | |
| 72 | public: |
| 73 | // Accessors |
| 74 | static bool is_active() { |
| 75 | assert(GCLocker::is_at_safepoint(), "only read at safepoint" ); |
| 76 | return is_active_internal(); |
| 77 | } |
| 78 | static bool needs_gc() { return _needs_gc; } |
| 79 | |
| 80 | // Shorthand |
| 81 | static bool is_active_and_needs_gc() { |
| 82 | // Use is_active_internal since _needs_gc can change from true to |
| 83 | // false outside of a safepoint, triggering the assert in |
| 84 | // is_active. |
| 85 | return needs_gc() && is_active_internal(); |
| 86 | } |
| 87 | |
| 88 | // In debug mode track the locking state at all times |
| 89 | static void increment_debug_jni_lock_count() NOT_DEBUG_RETURN; |
| 90 | static void decrement_debug_jni_lock_count() NOT_DEBUG_RETURN; |
| 91 | |
| 92 | // Set the current lock count |
| 93 | static void set_jni_lock_count(int count) { |
| 94 | _jni_lock_count = count; |
| 95 | verify_critical_count(); |
| 96 | } |
| 97 | |
| 98 | // Sets _needs_gc if is_active() is true. Returns is_active(). |
| 99 | static bool check_active_before_gc(); |
| 100 | |
| 101 | // Stalls the caller (who should not be in a jni critical section) |
| 102 | // until needs_gc() clears. Note however that needs_gc() may be |
| 103 | // set at a subsequent safepoint and/or cleared under the |
| 104 | // JNICritical_lock, so the caller may not safely assert upon |
| 105 | // return from this method that "!needs_gc()" since that is |
| 106 | // not a stable predicate. |
| 107 | static void stall_until_clear(); |
| 108 | |
| 109 | // The following two methods are used for JNI critical regions. |
| 110 | // If we find that we failed to perform a GC because the GCLocker |
| 111 | // was active, arrange for one as soon as possible by allowing |
| 112 | // all threads in critical regions to complete, but not allowing |
| 113 | // other critical regions to be entered. The reasons for that are: |
| 114 | // 1) a GC request won't be starved by overlapping JNI critical |
| 115 | // region activities, which can cause unnecessary OutOfMemory errors. |
| 116 | // 2) even if allocation requests can still be satisfied before GC locker |
| 117 | // becomes inactive, for example, in tenured generation possibly with |
| 118 | // heap expansion, those allocations can trigger lots of safepointing |
| 119 | // attempts (ineffective GC attempts) and require Heap_lock which |
| 120 | // slow down allocations tremendously. |
| 121 | // |
| 122 | // Note that critical regions can be nested in a single thread, so |
| 123 | // we must allow threads already in critical regions to continue. |
| 124 | // |
| 125 | // JNI critical regions are the only participants in this scheme |
| 126 | // because they are, by spec, well bounded while in a critical region. |
| 127 | // |
| 128 | // Each of the following two method is split into a fast path and a |
| 129 | // slow path. JNICritical_lock is only grabbed in the slow path. |
| 130 | // _needs_gc is initially false and every java thread will go |
| 131 | // through the fast path, which simply increments or decrements the |
| 132 | // current thread's critical count. When GC happens at a safepoint, |
| 133 | // GCLocker::is_active() is checked. Since there is no safepoint in |
| 134 | // the fast path of lock_critical() and unlock_critical(), there is |
| 135 | // no race condition between the fast path and GC. After _needs_gc |
| 136 | // is set at a safepoint, every thread will go through the slow path |
| 137 | // after the safepoint. Since after a safepoint, each of the |
| 138 | // following two methods is either entered from the method entry and |
| 139 | // falls into the slow path, or is resumed from the safepoints in |
| 140 | // the method, which only exist in the slow path. So when _needs_gc |
| 141 | // is set, the slow path is always taken, till _needs_gc is cleared. |
| 142 | inline static void lock_critical(JavaThread* thread); |
| 143 | inline static void unlock_critical(JavaThread* thread); |
| 144 | |
| 145 | static address needs_gc_address() { return (address) &_needs_gc; } |
| 146 | }; |
| 147 | |
| 148 | #endif // SHARE_GC_SHARED_GCLOCKER_HPP |
| 149 | |