| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef CPU_X86_NATIVEINST_X86_HPP |
| 26 | #define CPU_X86_NATIVEINST_X86_HPP |
| 27 | |
| 28 | #include "asm/assembler.hpp" |
| 29 | #include "runtime/icache.hpp" |
| 30 | #include "runtime/os.hpp" |
| 31 | #include "runtime/safepointMechanism.hpp" |
| 32 | |
| 33 | // We have interfaces for the following instructions: |
| 34 | // - NativeInstruction |
| 35 | // - - NativeCall |
| 36 | // - - NativeMovConstReg |
| 37 | // - - NativeMovConstRegPatching |
| 38 | // - - NativeMovRegMem |
| 39 | // - - NativeMovRegMemPatching |
| 40 | // - - NativeJump |
| 41 | // - - NativeFarJump |
| 42 | // - - NativeIllegalOpCode |
| 43 | // - - NativeGeneralJump |
| 44 | // - - NativeReturn |
| 45 | // - - NativeReturnX (return with argument) |
| 46 | // - - NativePushConst |
| 47 | // - - NativeTstRegMem |
| 48 | |
| 49 | // The base class for different kinds of native instruction abstractions. |
| 50 | // Provides the primitive operations to manipulate code relative to this. |
| 51 | |
| 52 | class NativeInstruction { |
| 53 | friend class Relocation; |
| 54 | |
| 55 | public: |
| 56 | enum Intel_specific_constants { |
| 57 | nop_instruction_code = 0x90, |
| 58 | nop_instruction_size = 1 |
| 59 | }; |
| 60 | |
| 61 | bool is_nop() { return ubyte_at(0) == nop_instruction_code; } |
| 62 | inline bool is_call(); |
| 63 | inline bool is_call_reg(); |
| 64 | inline bool is_illegal(); |
| 65 | inline bool is_return(); |
| 66 | inline bool is_jump(); |
| 67 | inline bool is_jump_reg(); |
| 68 | inline bool is_far_jump(); |
| 69 | inline bool is_cond_jump(); |
| 70 | inline bool is_safepoint_poll(); |
| 71 | inline bool is_mov_literal64(); |
| 72 | |
| 73 | protected: |
| 74 | address addr_at(int offset) const { return address(this) + offset; } |
| 75 | |
| 76 | s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); } |
| 77 | u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); } |
| 78 | |
| 79 | jint int_at(int offset) const { return *(jint*) addr_at(offset); } |
| 80 | |
| 81 | intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); } |
| 82 | |
| 83 | oop oop_at (int offset) const { return *(oop*) addr_at(offset); } |
| 84 | |
| 85 | |
| 86 | void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); } |
| 87 | void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); } |
| 88 | void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); } |
| 89 | void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); } |
| 90 | |
| 91 | // This doesn't really do anything on Intel, but it is the place where |
| 92 | // cache invalidation belongs, generically: |
| 93 | void wrote(int offset); |
| 94 | |
| 95 | public: |
| 96 | |
| 97 | // unit test stuff |
| 98 | static void test() {} // override for testing |
| 99 | |
| 100 | inline friend NativeInstruction* nativeInstruction_at(address address); |
| 101 | }; |
| 102 | |
| 103 | inline NativeInstruction* nativeInstruction_at(address address) { |
| 104 | NativeInstruction* inst = (NativeInstruction*)address; |
| 105 | #ifdef ASSERT |
| 106 | //inst->verify(); |
| 107 | #endif |
| 108 | return inst; |
| 109 | } |
| 110 | |
| 111 | class NativePltCall: public NativeInstruction { |
| 112 | public: |
| 113 | enum Intel_specific_constants { |
| 114 | instruction_code = 0xE8, |
| 115 | instruction_size = 5, |
| 116 | instruction_offset = 0, |
| 117 | displacement_offset = 1, |
| 118 | return_address_offset = 5 |
| 119 | }; |
| 120 | address instruction_address() const { return addr_at(instruction_offset); } |
| 121 | address next_instruction_address() const { return addr_at(return_address_offset); } |
| 122 | address displacement_address() const { return addr_at(displacement_offset); } |
| 123 | int displacement() const { return (jint) int_at(displacement_offset); } |
| 124 | address return_address() const { return addr_at(return_address_offset); } |
| 125 | address destination() const; |
| 126 | address plt_entry() const; |
| 127 | address plt_jump() const; |
| 128 | address plt_load_got() const; |
| 129 | address plt_resolve_call() const; |
| 130 | address plt_c2i_stub() const; |
| 131 | void set_stub_to_clean(); |
| 132 | |
| 133 | void reset_to_plt_resolve_call(); |
| 134 | void set_destination_mt_safe(address dest); |
| 135 | |
| 136 | void verify() const; |
| 137 | }; |
| 138 | |
| 139 | inline NativePltCall* nativePltCall_at(address address) { |
| 140 | NativePltCall* call = (NativePltCall*) address; |
| 141 | #ifdef ASSERT |
| 142 | call->verify(); |
| 143 | #endif |
| 144 | return call; |
| 145 | } |
| 146 | |
| 147 | inline NativePltCall* nativePltCall_before(address addr) { |
| 148 | address at = addr - NativePltCall::instruction_size; |
| 149 | return nativePltCall_at(at); |
| 150 | } |
| 151 | |
| 152 | class NativeCall; |
| 153 | inline NativeCall* nativeCall_at(address address); |
| 154 | // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off |
| 155 | // instructions (used to manipulate inline caches, primitive & dll calls, etc.). |
| 156 | |
| 157 | class NativeCall: public NativeInstruction { |
| 158 | public: |
| 159 | enum Intel_specific_constants { |
| 160 | instruction_code = 0xE8, |
| 161 | instruction_size = 5, |
| 162 | instruction_offset = 0, |
| 163 | displacement_offset = 1, |
| 164 | return_address_offset = 5 |
| 165 | }; |
| 166 | |
| 167 | enum { cache_line_size = BytesPerWord }; // conservative estimate! |
| 168 | |
| 169 | address instruction_address() const { return addr_at(instruction_offset); } |
| 170 | address next_instruction_address() const { return addr_at(return_address_offset); } |
| 171 | int displacement() const { return (jint) int_at(displacement_offset); } |
| 172 | address displacement_address() const { return addr_at(displacement_offset); } |
| 173 | address return_address() const { return addr_at(return_address_offset); } |
| 174 | address destination() const; |
| 175 | void set_destination(address dest) { |
| 176 | #ifdef AMD64 |
| 177 | intptr_t disp = dest - return_address(); |
| 178 | guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset" ); |
| 179 | #endif // AMD64 |
| 180 | set_int_at(displacement_offset, dest - return_address()); |
| 181 | } |
| 182 | void set_destination_mt_safe(address dest); |
| 183 | |
| 184 | void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned" ); } |
| 185 | void verify(); |
| 186 | void print(); |
| 187 | |
| 188 | // Creation |
| 189 | inline friend NativeCall* nativeCall_at(address address); |
| 190 | inline friend NativeCall* nativeCall_before(address return_address); |
| 191 | |
| 192 | static bool is_call_at(address instr) { |
| 193 | return ((*instr) & 0xFF) == NativeCall::instruction_code; |
| 194 | } |
| 195 | |
| 196 | static bool is_call_before(address return_address) { |
| 197 | return is_call_at(return_address - NativeCall::return_address_offset); |
| 198 | } |
| 199 | |
| 200 | static bool is_call_to(address instr, address target) { |
| 201 | return nativeInstruction_at(instr)->is_call() && |
| 202 | nativeCall_at(instr)->destination() == target; |
| 203 | } |
| 204 | |
| 205 | #if INCLUDE_AOT |
| 206 | static bool is_far_call(address instr, address target) { |
| 207 | intptr_t disp = target - (instr + sizeof(int32_t)); |
| 208 | return !Assembler::is_simm32(disp); |
| 209 | } |
| 210 | #endif |
| 211 | |
| 212 | // MT-safe patching of a call instruction. |
| 213 | static void insert(address code_pos, address entry); |
| 214 | |
| 215 | static void replace_mt_safe(address instr_addr, address code_buffer); |
| 216 | }; |
| 217 | |
| 218 | inline NativeCall* nativeCall_at(address address) { |
| 219 | NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset); |
| 220 | #ifdef ASSERT |
| 221 | call->verify(); |
| 222 | #endif |
| 223 | return call; |
| 224 | } |
| 225 | |
| 226 | inline NativeCall* nativeCall_before(address return_address) { |
| 227 | NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset); |
| 228 | #ifdef ASSERT |
| 229 | call->verify(); |
| 230 | #endif |
| 231 | return call; |
| 232 | } |
| 233 | |
| 234 | class NativeCallReg: public NativeInstruction { |
| 235 | public: |
| 236 | enum Intel_specific_constants { |
| 237 | instruction_code = 0xFF, |
| 238 | instruction_offset = 0, |
| 239 | return_address_offset_norex = 2, |
| 240 | return_address_offset_rex = 3 |
| 241 | }; |
| 242 | |
| 243 | int next_instruction_offset() const { |
| 244 | if (ubyte_at(0) == NativeCallReg::instruction_code) { |
| 245 | return return_address_offset_norex; |
| 246 | } else { |
| 247 | return return_address_offset_rex; |
| 248 | } |
| 249 | } |
| 250 | }; |
| 251 | |
| 252 | // An interface for accessing/manipulating native mov reg, imm32 instructions. |
| 253 | // (used to manipulate inlined 32bit data dll calls, etc.) |
| 254 | class NativeMovConstReg: public NativeInstruction { |
| 255 | #ifdef AMD64 |
| 256 | static const bool has_rex = true; |
| 257 | static const int rex_size = 1; |
| 258 | #else |
| 259 | static const bool has_rex = false; |
| 260 | static const int rex_size = 0; |
| 261 | #endif // AMD64 |
| 262 | public: |
| 263 | enum Intel_specific_constants { |
| 264 | instruction_code = 0xB8, |
| 265 | instruction_size = 1 + rex_size + wordSize, |
| 266 | instruction_offset = 0, |
| 267 | data_offset = 1 + rex_size, |
| 268 | next_instruction_offset = instruction_size, |
| 269 | register_mask = 0x07 |
| 270 | }; |
| 271 | |
| 272 | address instruction_address() const { return addr_at(instruction_offset); } |
| 273 | address next_instruction_address() const { return addr_at(next_instruction_offset); } |
| 274 | intptr_t data() const { return ptr_at(data_offset); } |
| 275 | void set_data(intptr_t x) { set_ptr_at(data_offset, x); } |
| 276 | |
| 277 | void verify(); |
| 278 | void print(); |
| 279 | |
| 280 | // unit test stuff |
| 281 | static void test() {} |
| 282 | |
| 283 | // Creation |
| 284 | inline friend NativeMovConstReg* nativeMovConstReg_at(address address); |
| 285 | inline friend NativeMovConstReg* nativeMovConstReg_before(address address); |
| 286 | }; |
| 287 | |
| 288 | inline NativeMovConstReg* nativeMovConstReg_at(address address) { |
| 289 | NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset); |
| 290 | #ifdef ASSERT |
| 291 | test->verify(); |
| 292 | #endif |
| 293 | return test; |
| 294 | } |
| 295 | |
| 296 | inline NativeMovConstReg* nativeMovConstReg_before(address address) { |
| 297 | NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset); |
| 298 | #ifdef ASSERT |
| 299 | test->verify(); |
| 300 | #endif |
| 301 | return test; |
| 302 | } |
| 303 | |
| 304 | class NativeMovConstRegPatching: public NativeMovConstReg { |
| 305 | private: |
| 306 | friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) { |
| 307 | NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset); |
| 308 | #ifdef ASSERT |
| 309 | test->verify(); |
| 310 | #endif |
| 311 | return test; |
| 312 | } |
| 313 | }; |
| 314 | |
| 315 | // An interface for accessing/manipulating native moves of the form: |
| 316 | // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem) |
| 317 | // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg |
| 318 | // mov[s/z]x[w/b/q] [reg + offset], reg |
| 319 | // fld_s [reg+offset] |
| 320 | // fld_d [reg+offset] |
| 321 | // fstp_s [reg + offset] |
| 322 | // fstp_d [reg + offset] |
| 323 | // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch) |
| 324 | // |
| 325 | // Warning: These routines must be able to handle any instruction sequences |
| 326 | // that are generated as a result of the load/store byte,word,long |
| 327 | // macros. For example: The load_unsigned_byte instruction generates |
| 328 | // an xor reg,reg inst prior to generating the movb instruction. This |
| 329 | // class must skip the xor instruction. |
| 330 | |
| 331 | class NativeMovRegMem: public NativeInstruction { |
| 332 | public: |
| 333 | enum Intel_specific_constants { |
| 334 | instruction_prefix_wide_lo = Assembler::REX, |
| 335 | instruction_prefix_wide_hi = Assembler::REX_WRXB, |
| 336 | instruction_code_xor = 0x33, |
| 337 | instruction_extended_prefix = 0x0F, |
| 338 | instruction_code_mem2reg_movslq = 0x63, |
| 339 | instruction_code_mem2reg_movzxb = 0xB6, |
| 340 | instruction_code_mem2reg_movsxb = 0xBE, |
| 341 | instruction_code_mem2reg_movzxw = 0xB7, |
| 342 | instruction_code_mem2reg_movsxw = 0xBF, |
| 343 | instruction_operandsize_prefix = 0x66, |
| 344 | instruction_code_reg2mem = 0x89, |
| 345 | instruction_code_mem2reg = 0x8b, |
| 346 | instruction_code_reg2memb = 0x88, |
| 347 | instruction_code_mem2regb = 0x8a, |
| 348 | instruction_code_float_s = 0xd9, |
| 349 | instruction_code_float_d = 0xdd, |
| 350 | instruction_code_long_volatile = 0xdf, |
| 351 | instruction_code_xmm_ss_prefix = 0xf3, |
| 352 | instruction_code_xmm_sd_prefix = 0xf2, |
| 353 | instruction_code_xmm_code = 0x0f, |
| 354 | instruction_code_xmm_load = 0x10, |
| 355 | instruction_code_xmm_store = 0x11, |
| 356 | instruction_code_xmm_lpd = 0x12, |
| 357 | |
| 358 | instruction_code_lea = 0x8d, |
| 359 | |
| 360 | instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes, |
| 361 | instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes, |
| 362 | instruction_EVEX_prefix_4bytes = Assembler::EVEX_4bytes, |
| 363 | |
| 364 | instruction_size = 4, |
| 365 | instruction_offset = 0, |
| 366 | data_offset = 2, |
| 367 | next_instruction_offset = 4 |
| 368 | }; |
| 369 | |
| 370 | // helper |
| 371 | int instruction_start() const; |
| 372 | |
| 373 | address instruction_address() const; |
| 374 | |
| 375 | address next_instruction_address() const; |
| 376 | |
| 377 | int offset() const; |
| 378 | |
| 379 | void set_offset(int x); |
| 380 | |
| 381 | void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); } |
| 382 | |
| 383 | void verify(); |
| 384 | void print (); |
| 385 | |
| 386 | // unit test stuff |
| 387 | static void test() {} |
| 388 | |
| 389 | private: |
| 390 | inline friend NativeMovRegMem* nativeMovRegMem_at (address address); |
| 391 | }; |
| 392 | |
| 393 | inline NativeMovRegMem* nativeMovRegMem_at (address address) { |
| 394 | NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset); |
| 395 | #ifdef ASSERT |
| 396 | test->verify(); |
| 397 | #endif |
| 398 | return test; |
| 399 | } |
| 400 | |
| 401 | |
| 402 | // An interface for accessing/manipulating native leal instruction of form: |
| 403 | // leal reg, [reg + offset] |
| 404 | |
| 405 | class NativeLoadAddress: public NativeMovRegMem { |
| 406 | #ifdef AMD64 |
| 407 | static const bool has_rex = true; |
| 408 | static const int rex_size = 1; |
| 409 | #else |
| 410 | static const bool has_rex = false; |
| 411 | static const int rex_size = 0; |
| 412 | #endif // AMD64 |
| 413 | public: |
| 414 | enum Intel_specific_constants { |
| 415 | instruction_prefix_wide = Assembler::REX_W, |
| 416 | instruction_prefix_wide_extended = Assembler::REX_WB, |
| 417 | lea_instruction_code = 0x8D, |
| 418 | mov64_instruction_code = 0xB8 |
| 419 | }; |
| 420 | |
| 421 | void verify(); |
| 422 | void print (); |
| 423 | |
| 424 | // unit test stuff |
| 425 | static void test() {} |
| 426 | |
| 427 | private: |
| 428 | friend NativeLoadAddress* nativeLoadAddress_at (address address) { |
| 429 | NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset); |
| 430 | #ifdef ASSERT |
| 431 | test->verify(); |
| 432 | #endif |
| 433 | return test; |
| 434 | } |
| 435 | }; |
| 436 | |
| 437 | // destination is rbx or rax |
| 438 | // mov rbx, [rip + offset] |
| 439 | class NativeLoadGot: public NativeInstruction { |
| 440 | #ifdef AMD64 |
| 441 | static const bool has_rex = true; |
| 442 | static const int rex_size = 1; |
| 443 | #else |
| 444 | static const bool has_rex = false; |
| 445 | static const int rex_size = 0; |
| 446 | #endif |
| 447 | public: |
| 448 | enum Intel_specific_constants { |
| 449 | rex_prefix = 0x48, |
| 450 | instruction_code = 0x8b, |
| 451 | modrm_rbx_code = 0x1d, |
| 452 | modrm_rax_code = 0x05, |
| 453 | instruction_length = 6 + rex_size, |
| 454 | offset_offset = 2 + rex_size |
| 455 | }; |
| 456 | |
| 457 | address instruction_address() const { return addr_at(0); } |
| 458 | address rip_offset_address() const { return addr_at(offset_offset); } |
| 459 | int rip_offset() const { return int_at(offset_offset); } |
| 460 | address return_address() const { return addr_at(instruction_length); } |
| 461 | address got_address() const { return return_address() + rip_offset(); } |
| 462 | address next_instruction_address() const { return return_address(); } |
| 463 | intptr_t data() const; |
| 464 | void set_data(intptr_t data) { |
| 465 | intptr_t *addr = (intptr_t *) got_address(); |
| 466 | *addr = data; |
| 467 | } |
| 468 | |
| 469 | void verify() const; |
| 470 | private: |
| 471 | void report_and_fail() const; |
| 472 | }; |
| 473 | |
| 474 | inline NativeLoadGot* nativeLoadGot_at(address addr) { |
| 475 | NativeLoadGot* load = (NativeLoadGot*) addr; |
| 476 | #ifdef ASSERT |
| 477 | load->verify(); |
| 478 | #endif |
| 479 | return load; |
| 480 | } |
| 481 | |
| 482 | // jump rel32off |
| 483 | |
| 484 | class NativeJump: public NativeInstruction { |
| 485 | public: |
| 486 | enum Intel_specific_constants { |
| 487 | instruction_code = 0xe9, |
| 488 | instruction_size = 5, |
| 489 | instruction_offset = 0, |
| 490 | data_offset = 1, |
| 491 | next_instruction_offset = 5 |
| 492 | }; |
| 493 | |
| 494 | address instruction_address() const { return addr_at(instruction_offset); } |
| 495 | address next_instruction_address() const { return addr_at(next_instruction_offset); } |
| 496 | address jump_destination() const { |
| 497 | address dest = (int_at(data_offset)+next_instruction_address()); |
| 498 | // 32bit used to encode unresolved jmp as jmp -1 |
| 499 | // 64bit can't produce this so it used jump to self. |
| 500 | // Now 32bit and 64bit use jump to self as the unresolved address |
| 501 | // which the inline cache code (and relocs) know about |
| 502 | |
| 503 | // return -1 if jump to self |
| 504 | dest = (dest == (address) this) ? (address) -1 : dest; |
| 505 | return dest; |
| 506 | } |
| 507 | |
| 508 | void set_jump_destination(address dest) { |
| 509 | intptr_t val = dest - next_instruction_address(); |
| 510 | if (dest == (address) -1) { |
| 511 | val = -5; // jump to self |
| 512 | } |
| 513 | #ifdef AMD64 |
| 514 | assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1" ); |
| 515 | #endif // AMD64 |
| 516 | set_int_at(data_offset, (jint)val); |
| 517 | } |
| 518 | |
| 519 | // Creation |
| 520 | inline friend NativeJump* nativeJump_at(address address); |
| 521 | |
| 522 | void verify(); |
| 523 | |
| 524 | // Unit testing stuff |
| 525 | static void test() {} |
| 526 | |
| 527 | // Insertion of native jump instruction |
| 528 | static void insert(address code_pos, address entry); |
| 529 | // MT-safe insertion of native jump at verified method entry |
| 530 | static void check_verified_entry_alignment(address entry, address verified_entry); |
| 531 | static void patch_verified_entry(address entry, address verified_entry, address dest); |
| 532 | }; |
| 533 | |
| 534 | inline NativeJump* nativeJump_at(address address) { |
| 535 | NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset); |
| 536 | #ifdef ASSERT |
| 537 | jump->verify(); |
| 538 | #endif |
| 539 | return jump; |
| 540 | } |
| 541 | |
| 542 | // far jump reg |
| 543 | class NativeFarJump: public NativeInstruction { |
| 544 | public: |
| 545 | address jump_destination() const; |
| 546 | |
| 547 | // Creation |
| 548 | inline friend NativeFarJump* nativeFarJump_at(address address); |
| 549 | |
| 550 | void verify(); |
| 551 | |
| 552 | // Unit testing stuff |
| 553 | static void test() {} |
| 554 | |
| 555 | }; |
| 556 | |
| 557 | inline NativeFarJump* nativeFarJump_at(address address) { |
| 558 | NativeFarJump* jump = (NativeFarJump*)(address); |
| 559 | #ifdef ASSERT |
| 560 | jump->verify(); |
| 561 | #endif |
| 562 | return jump; |
| 563 | } |
| 564 | |
| 565 | // Handles all kinds of jump on Intel. Long/far, conditional/unconditional |
| 566 | class NativeGeneralJump: public NativeInstruction { |
| 567 | public: |
| 568 | enum Intel_specific_constants { |
| 569 | // Constants does not apply, since the lengths and offsets depends on the actual jump |
| 570 | // used |
| 571 | // Instruction codes: |
| 572 | // Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off) |
| 573 | // Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off) |
| 574 | unconditional_long_jump = 0xe9, |
| 575 | unconditional_short_jump = 0xeb, |
| 576 | instruction_size = 5 |
| 577 | }; |
| 578 | |
| 579 | address instruction_address() const { return addr_at(0); } |
| 580 | address jump_destination() const; |
| 581 | |
| 582 | // Creation |
| 583 | inline friend NativeGeneralJump* nativeGeneralJump_at(address address); |
| 584 | |
| 585 | // Insertion of native general jump instruction |
| 586 | static void insert_unconditional(address code_pos, address entry); |
| 587 | static void replace_mt_safe(address instr_addr, address code_buffer); |
| 588 | |
| 589 | void verify(); |
| 590 | }; |
| 591 | |
| 592 | inline NativeGeneralJump* nativeGeneralJump_at(address address) { |
| 593 | NativeGeneralJump* jump = (NativeGeneralJump*)(address); |
| 594 | debug_only(jump->verify();) |
| 595 | return jump; |
| 596 | } |
| 597 | |
| 598 | class NativeGotJump: public NativeInstruction { |
| 599 | public: |
| 600 | enum Intel_specific_constants { |
| 601 | instruction_code = 0xff, |
| 602 | instruction_offset = 0, |
| 603 | instruction_size = 6, |
| 604 | rip_offset = 2 |
| 605 | }; |
| 606 | |
| 607 | void verify() const; |
| 608 | address instruction_address() const { return addr_at(instruction_offset); } |
| 609 | address destination() const; |
| 610 | address return_address() const { return addr_at(instruction_size); } |
| 611 | int got_offset() const { return (jint) int_at(rip_offset); } |
| 612 | address got_address() const { return return_address() + got_offset(); } |
| 613 | address next_instruction_address() const { return addr_at(instruction_size); } |
| 614 | bool is_GotJump() const { return ubyte_at(0) == instruction_code; } |
| 615 | |
| 616 | void set_jump_destination(address dest) { |
| 617 | address *got_entry = (address *) got_address(); |
| 618 | *got_entry = dest; |
| 619 | } |
| 620 | }; |
| 621 | |
| 622 | inline NativeGotJump* nativeGotJump_at(address addr) { |
| 623 | NativeGotJump* jump = (NativeGotJump*)(addr); |
| 624 | debug_only(jump->verify()); |
| 625 | return jump; |
| 626 | } |
| 627 | |
| 628 | class NativePopReg : public NativeInstruction { |
| 629 | public: |
| 630 | enum Intel_specific_constants { |
| 631 | instruction_code = 0x58, |
| 632 | instruction_size = 1, |
| 633 | instruction_offset = 0, |
| 634 | data_offset = 1, |
| 635 | next_instruction_offset = 1 |
| 636 | }; |
| 637 | |
| 638 | // Insert a pop instruction |
| 639 | static void insert(address code_pos, Register reg); |
| 640 | }; |
| 641 | |
| 642 | |
| 643 | class NativeIllegalInstruction: public NativeInstruction { |
| 644 | public: |
| 645 | enum Intel_specific_constants { |
| 646 | instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B |
| 647 | instruction_size = 2, |
| 648 | instruction_offset = 0, |
| 649 | next_instruction_offset = 2 |
| 650 | }; |
| 651 | |
| 652 | // Insert illegal opcode as specific address |
| 653 | static void insert(address code_pos); |
| 654 | }; |
| 655 | |
| 656 | // return instruction that does not pop values of the stack |
| 657 | class NativeReturn: public NativeInstruction { |
| 658 | public: |
| 659 | enum Intel_specific_constants { |
| 660 | instruction_code = 0xC3, |
| 661 | instruction_size = 1, |
| 662 | instruction_offset = 0, |
| 663 | next_instruction_offset = 1 |
| 664 | }; |
| 665 | }; |
| 666 | |
| 667 | // return instruction that does pop values of the stack |
| 668 | class NativeReturnX: public NativeInstruction { |
| 669 | public: |
| 670 | enum Intel_specific_constants { |
| 671 | instruction_code = 0xC2, |
| 672 | instruction_size = 2, |
| 673 | instruction_offset = 0, |
| 674 | next_instruction_offset = 2 |
| 675 | }; |
| 676 | }; |
| 677 | |
| 678 | // Simple test vs memory |
| 679 | class NativeTstRegMem: public NativeInstruction { |
| 680 | public: |
| 681 | enum Intel_specific_constants { |
| 682 | instruction_rex_prefix_mask = 0xF0, |
| 683 | instruction_rex_prefix = Assembler::REX, |
| 684 | instruction_rex_b_prefix = Assembler::REX_B, |
| 685 | instruction_code_memXregl = 0x85, |
| 686 | modrm_mask = 0x38, // select reg from the ModRM byte |
| 687 | modrm_reg = 0x00 // rax |
| 688 | }; |
| 689 | }; |
| 690 | |
| 691 | inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; } |
| 692 | inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; } |
| 693 | inline bool NativeInstruction::is_call_reg() { return ubyte_at(0) == NativeCallReg::instruction_code || |
| 694 | (ubyte_at(1) == NativeCallReg::instruction_code && |
| 695 | (ubyte_at(0) == Assembler::REX || ubyte_at(0) == Assembler::REX_B)); } |
| 696 | inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code || |
| 697 | ubyte_at(0) == NativeReturnX::instruction_code; } |
| 698 | inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code || |
| 699 | ubyte_at(0) == 0xEB; /* short jump */ } |
| 700 | inline bool NativeInstruction::is_jump_reg() { |
| 701 | int pos = 0; |
| 702 | if (ubyte_at(0) == Assembler::REX_B) pos = 1; |
| 703 | return ubyte_at(pos) == 0xFF && (ubyte_at(pos + 1) & 0xF0) == 0xE0; |
| 704 | } |
| 705 | inline bool NativeInstruction::is_far_jump() { return is_mov_literal64(); } |
| 706 | inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || |
| 707 | (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } |
| 708 | inline bool NativeInstruction::is_safepoint_poll() { |
| 709 | if (SafepointMechanism::uses_thread_local_poll()) { |
| 710 | #ifdef AMD64 |
| 711 | const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix; |
| 712 | const int test_offset = has_rex_prefix ? 1 : 0; |
| 713 | #else |
| 714 | const int test_offset = 0; |
| 715 | #endif |
| 716 | const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl; |
| 717 | const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg; |
| 718 | return is_test_opcode && is_rax_target; |
| 719 | } |
| 720 | #ifdef AMD64 |
| 721 | // Try decoding a near safepoint first: |
| 722 | if (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && |
| 723 | ubyte_at(1) == 0x05) { // 00 rax 101 |
| 724 | address fault = addr_at(6) + int_at(2); |
| 725 | NOT_JVMCI(assert(!Assembler::is_polling_page_far(), "unexpected poll encoding" );) |
| 726 | return os::is_poll_address(fault); |
| 727 | } |
| 728 | // Now try decoding a far safepoint: |
| 729 | // two cases, depending on the choice of the base register in the address. |
| 730 | if (((ubyte_at(0) & NativeTstRegMem::instruction_rex_prefix_mask) == NativeTstRegMem::instruction_rex_prefix && |
| 731 | ubyte_at(1) == NativeTstRegMem::instruction_code_memXregl && |
| 732 | (ubyte_at(2) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg) || |
| 733 | (ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl && |
| 734 | (ubyte_at(1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg)) { |
| 735 | NOT_JVMCI(assert(Assembler::is_polling_page_far(), "unexpected poll encoding" );) |
| 736 | return true; |
| 737 | } |
| 738 | return false; |
| 739 | #else |
| 740 | return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg || |
| 741 | ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) && |
| 742 | (ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */ |
| 743 | (os::is_poll_address((address)int_at(2))); |
| 744 | #endif // AMD64 |
| 745 | } |
| 746 | |
| 747 | inline bool NativeInstruction::is_mov_literal64() { |
| 748 | #ifdef AMD64 |
| 749 | return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) && |
| 750 | (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8); |
| 751 | #else |
| 752 | return false; |
| 753 | #endif // AMD64 |
| 754 | } |
| 755 | |
| 756 | #endif // CPU_X86_NATIVEINST_X86_HPP |
| 757 | |