| 1 | /* |
| 2 | * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "code/codeCache.hpp" |
| 27 | #include "code/nativeInst.hpp" |
| 28 | #include "gc/shared/barrierSetNMethod.hpp" |
| 29 | #include "logging/log.hpp" |
| 30 | #include "memory/resourceArea.hpp" |
| 31 | #include "runtime/sharedRuntime.hpp" |
| 32 | #include "runtime/thread.hpp" |
| 33 | #include "utilities/align.hpp" |
| 34 | #include "utilities/debug.hpp" |
| 35 | |
| 36 | class NativeNMethodCmpBarrier: public NativeInstruction { |
| 37 | public: |
| 38 | enum Intel_specific_constants { |
| 39 | instruction_code = 0x81, |
| 40 | instruction_size = 8, |
| 41 | imm_offset = 4, |
| 42 | instruction_rex_prefix = Assembler::REX | Assembler::REX_B, |
| 43 | instruction_modrm = 0x7f // [r15 + offset] |
| 44 | }; |
| 45 | |
| 46 | address instruction_address() const { return addr_at(0); } |
| 47 | address immediate_address() const { return addr_at(imm_offset); } |
| 48 | |
| 49 | jint get_immedate() const { return int_at(imm_offset); } |
| 50 | void set_immediate(jint imm) { set_int_at(imm_offset, imm); } |
| 51 | void verify() const; |
| 52 | }; |
| 53 | |
| 54 | void NativeNMethodCmpBarrier::verify() const { |
| 55 | if (((uintptr_t) instruction_address()) & 0x7) { |
| 56 | fatal("Not properly aligned" ); |
| 57 | } |
| 58 | |
| 59 | int prefix = ubyte_at(0); |
| 60 | if (prefix != instruction_rex_prefix) { |
| 61 | tty->print_cr("Addr: " INTPTR_FORMAT " Prefix: 0x%x" , p2i(instruction_address()), |
| 62 | prefix); |
| 63 | fatal("not a cmp barrier" ); |
| 64 | } |
| 65 | |
| 66 | int inst = ubyte_at(1); |
| 67 | if (inst != instruction_code) { |
| 68 | tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x" , p2i(instruction_address()), |
| 69 | inst); |
| 70 | fatal("not a cmp barrier" ); |
| 71 | } |
| 72 | |
| 73 | int modrm = ubyte_at(2); |
| 74 | if (modrm != instruction_modrm) { |
| 75 | tty->print_cr("Addr: " INTPTR_FORMAT " mod/rm: 0x%x" , p2i(instruction_address()), |
| 76 | modrm); |
| 77 | fatal("not a cmp barrier" ); |
| 78 | } |
| 79 | } |
| 80 | |
| 81 | void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) { |
| 82 | /* |
| 83 | * [ callers frame ] |
| 84 | * [ callers return address ] <- callers rsp |
| 85 | * [ callers rbp ] <- callers rbp |
| 86 | * [ callers frame slots ] |
| 87 | * [ return_address ] <- return_address_ptr |
| 88 | * [ cookie ] <- used to write the new rsp (callers rsp) |
| 89 | * [ stub rbp ] |
| 90 | * [ stub stuff ] |
| 91 | */ |
| 92 | |
| 93 | address* stub_rbp = return_address_ptr - 2; |
| 94 | address* callers_rsp = return_address_ptr + nm->frame_size(); /* points to callers return_address now */ |
| 95 | address* callers_rbp = callers_rsp - 1; // 1 to move to the callers return address, 1 more to move to the rbp |
| 96 | address* cookie = return_address_ptr - 1; |
| 97 | |
| 98 | LogTarget(Trace, nmethod, barrier) out; |
| 99 | if (out.is_enabled()) { |
| 100 | Thread* thread = Thread::current(); |
| 101 | assert(thread->is_Java_thread(), "must be JavaThread" ); |
| 102 | JavaThread* jth = (JavaThread*) thread; |
| 103 | ResourceMark mark; |
| 104 | log_trace(nmethod, barrier)("deoptimize(nmethod: %p, return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p" , |
| 105 | nm, (address *) return_address_ptr, nm->is_osr_method(), jth, |
| 106 | jth->get_thread_name(), callers_rsp, nm->verified_entry_point()); |
| 107 | } |
| 108 | |
| 109 | assert(nm->frame_size() >= 3, "invariant" ); |
| 110 | assert(*cookie == (address) -1, "invariant" ); |
| 111 | |
| 112 | // Preserve caller rbp. |
| 113 | *stub_rbp = *callers_rbp; |
| 114 | |
| 115 | // At the cookie address put the callers rsp. |
| 116 | *cookie = (address) callers_rsp; // should point to the return address |
| 117 | |
| 118 | // In the slot that used to be the callers rbp we put the address that our stub needs to jump to at the end. |
| 119 | // Overwriting the caller rbp should be okay since our stub rbp has the same value. |
| 120 | address* jmp_addr_ptr = callers_rbp; |
| 121 | *jmp_addr_ptr = SharedRuntime::get_handle_wrong_method_stub(); |
| 122 | } |
| 123 | |
| 124 | // This is the offset of the entry barrier from where the frame is completed. |
| 125 | // If any code changes between the end of the verified entry where the entry |
| 126 | // barrier resides, and the completion of the frame, then |
| 127 | // NativeNMethodCmpBarrier::verify() will immediately complain when it does |
| 128 | // not find the expected native instruction at this offset, which needs updating. |
| 129 | // Note that this offset is invariant of PreserveFramePointer. |
| 130 | static const int entry_barrier_offset = -19; |
| 131 | |
| 132 | static NativeNMethodCmpBarrier* native_nmethod_barrier(nmethod* nm) { |
| 133 | address barrier_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset; |
| 134 | NativeNMethodCmpBarrier* barrier = reinterpret_cast<NativeNMethodCmpBarrier*>(barrier_address); |
| 135 | debug_only(barrier->verify()); |
| 136 | return barrier; |
| 137 | } |
| 138 | |
| 139 | void BarrierSetNMethod::disarm(nmethod* nm) { |
| 140 | if (!supports_entry_barrier(nm)) { |
| 141 | return; |
| 142 | } |
| 143 | |
| 144 | NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm); |
| 145 | cmp->set_immediate(disarmed_value()); |
| 146 | } |
| 147 | |
| 148 | bool BarrierSetNMethod::is_armed(nmethod* nm) { |
| 149 | if (!supports_entry_barrier(nm)) { |
| 150 | return false; |
| 151 | } |
| 152 | |
| 153 | NativeNMethodCmpBarrier* cmp = native_nmethod_barrier(nm); |
| 154 | return (disarmed_value() != cmp->get_immedate()); |
| 155 | } |
| 156 | |