1/*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "asm/macroAssembler.hpp"
27#include "code/compiledIC.hpp"
28#include "memory/resourceArea.hpp"
29#include "nativeInst_x86.hpp"
30#include "oops/oop.inline.hpp"
31#include "runtime/handles.hpp"
32#include "runtime/sharedRuntime.hpp"
33#include "runtime/stubRoutines.hpp"
34#include "utilities/ostream.hpp"
35#ifdef COMPILER1
36#include "c1/c1_Runtime1.hpp"
37#endif
38
39void NativeInstruction::wrote(int offset) {
40 ICache::invalidate_word(addr_at(offset));
41}
42
43void NativeLoadGot::report_and_fail() const {
44 tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address()));
45 fatal("not a indirect rip mov to rbx");
46}
47
48void NativeLoadGot::verify() const {
49 if (has_rex) {
50 int rex = ubyte_at(0);
51 if (rex != rex_prefix) {
52 report_and_fail();
53 }
54 }
55
56 int inst = ubyte_at(rex_size);
57 if (inst != instruction_code) {
58 report_and_fail();
59 }
60 int modrm = ubyte_at(rex_size + 1);
61 if (modrm != modrm_rbx_code && modrm != modrm_rax_code) {
62 report_and_fail();
63 }
64}
65
66intptr_t NativeLoadGot::data() const {
67 return *(intptr_t *) got_address();
68}
69
70address NativePltCall::destination() const {
71 NativeGotJump* jump = nativeGotJump_at(plt_jump());
72 return jump->destination();
73}
74
75address NativePltCall::plt_entry() const {
76 return return_address() + displacement();
77}
78
79address NativePltCall::plt_jump() const {
80 address entry = plt_entry();
81 // Virtual PLT code has move instruction first
82 if (((NativeGotJump*)entry)->is_GotJump()) {
83 return entry;
84 } else {
85 return nativeLoadGot_at(entry)->next_instruction_address();
86 }
87}
88
89address NativePltCall::plt_load_got() const {
90 address entry = plt_entry();
91 if (!((NativeGotJump*)entry)->is_GotJump()) {
92 // Virtual PLT code has move instruction first
93 return entry;
94 } else {
95 // Static PLT code has move instruction second (from c2i stub)
96 return nativeGotJump_at(entry)->next_instruction_address();
97 }
98}
99
100address NativePltCall::plt_c2i_stub() const {
101 address entry = plt_load_got();
102 // This method should be called only for static calls which has C2I stub.
103 NativeLoadGot* load = nativeLoadGot_at(entry);
104 return entry;
105}
106
107address NativePltCall::plt_resolve_call() const {
108 NativeGotJump* jump = nativeGotJump_at(plt_jump());
109 address entry = jump->next_instruction_address();
110 if (((NativeGotJump*)entry)->is_GotJump()) {
111 return entry;
112 } else {
113 // c2i stub 2 instructions
114 entry = nativeLoadGot_at(entry)->next_instruction_address();
115 return nativeGotJump_at(entry)->next_instruction_address();
116 }
117}
118
119void NativePltCall::reset_to_plt_resolve_call() {
120 set_destination_mt_safe(plt_resolve_call());
121}
122
123void NativePltCall::set_destination_mt_safe(address dest) {
124 // rewriting the value in the GOT, it should always be aligned
125 NativeGotJump* jump = nativeGotJump_at(plt_jump());
126 address* got = (address *) jump->got_address();
127 *got = dest;
128}
129
130void NativePltCall::set_stub_to_clean() {
131 NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub());
132 NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
133 method_loader->set_data(0);
134 jump->set_jump_destination((address)-1);
135}
136
137void NativePltCall::verify() const {
138 // Make sure code pattern is actually a call rip+off32 instruction.
139 int inst = ubyte_at(0);
140 if (inst != instruction_code) {
141 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
142 inst);
143 fatal("not a call rip+off32");
144 }
145}
146
147address NativeGotJump::destination() const {
148 address *got_entry = (address *) got_address();
149 return *got_entry;
150}
151
152void NativeGotJump::verify() const {
153 int inst = ubyte_at(0);
154 if (inst != instruction_code) {
155 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
156 inst);
157 fatal("not a indirect rip jump");
158 }
159}
160
161void NativeCall::verify() {
162 // Make sure code pattern is actually a call imm32 instruction.
163 int inst = ubyte_at(0);
164 if (inst != instruction_code) {
165 tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()),
166 inst);
167 fatal("not a call disp32");
168 }
169}
170
171address NativeCall::destination() const {
172 // Getting the destination of a call isn't safe because that call can
173 // be getting patched while you're calling this. There's only special
174 // places where this can be called but not automatically verifiable by
175 // checking which locks are held. The solution is true atomic patching
176 // on x86, nyi.
177 return return_address() + displacement();
178}
179
180void NativeCall::print() {
181 tty->print_cr(PTR_FORMAT ": call " PTR_FORMAT,
182 p2i(instruction_address()), p2i(destination()));
183}
184
185// Inserts a native call instruction at a given pc
186void NativeCall::insert(address code_pos, address entry) {
187 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
188#ifdef AMD64
189 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
190#endif // AMD64
191 *code_pos = instruction_code;
192 *((int32_t *)(code_pos+1)) = (int32_t) disp;
193 ICache::invalidate_range(code_pos, instruction_size);
194}
195
196// MT-safe patching of a call instruction.
197// First patches first word of instruction to two jmp's that jmps to them
198// selfs (spinlock). Then patches the last byte, and then atomicly replaces
199// the jmp's with the first 4 byte of the new instruction.
200void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
201 assert(Patching_lock->is_locked() ||
202 SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
203 assert (instr_addr != NULL, "illegal address for code patching");
204
205 NativeCall* n_call = nativeCall_at (instr_addr); // checking that it is a call
206 guarantee((intptr_t)instr_addr % BytesPerWord == 0, "must be aligned");
207
208 // First patch dummy jmp in place
209 unsigned char patch[4];
210 assert(sizeof(patch)==sizeof(jint), "sanity check");
211 patch[0] = 0xEB; // jmp rel8
212 patch[1] = 0xFE; // jmp to self
213 patch[2] = 0xEB;
214 patch[3] = 0xFE;
215
216 // First patch dummy jmp in place
217 *(jint*)instr_addr = *(jint *)patch;
218
219 // Invalidate. Opteron requires a flush after every write.
220 n_call->wrote(0);
221
222 // Patch 4th byte
223 instr_addr[4] = code_buffer[4];
224
225 n_call->wrote(4);
226
227 // Patch bytes 0-3
228 *(jint*)instr_addr = *(jint *)code_buffer;
229
230 n_call->wrote(0);
231
232#ifdef ASSERT
233 // verify patching
234 for ( int i = 0; i < instruction_size; i++) {
235 address ptr = (address)((intptr_t)code_buffer + i);
236 int a_byte = (*ptr) & 0xFF;
237 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
238 }
239#endif
240
241}
242
243
244// Similar to replace_mt_safe, but just changes the destination. The
245// important thing is that free-running threads are able to execute this
246// call instruction at all times. If the displacement field is aligned
247// we can simply rely on atomicity of 32-bit writes to make sure other threads
248// will see no intermediate states. Otherwise, the first two bytes of the
249// call are guaranteed to be aligned, and can be atomically patched to a
250// self-loop to guard the instruction while we change the other bytes.
251
252// We cannot rely on locks here, since the free-running threads must run at
253// full speed.
254//
255// Used in the runtime linkage of calls; see class CompiledIC.
256// (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
257void NativeCall::set_destination_mt_safe(address dest) {
258 debug_only(verify());
259 // Make sure patching code is locked. No two threads can patch at the same
260 // time but one may be executing this code.
261 assert(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint() ||
262 CompiledICLocker::is_safe(instruction_address()), "concurrent code patching");
263 // Both C1 and C2 should now be generating code which aligns the patched address
264 // to be within a single cache line.
265 bool is_aligned = ((uintptr_t)displacement_address() + 0) / cache_line_size ==
266 ((uintptr_t)displacement_address() + 3) / cache_line_size;
267
268 guarantee(is_aligned, "destination must be aligned");
269
270 // The destination lies within a single cache line.
271 set_destination(dest);
272}
273
274
275void NativeMovConstReg::verify() {
276#ifdef AMD64
277 // make sure code pattern is actually a mov reg64, imm64 instruction
278 if ((ubyte_at(0) != Assembler::REX_W && ubyte_at(0) != Assembler::REX_WB) ||
279 (ubyte_at(1) & (0xff ^ register_mask)) != 0xB8) {
280 print();
281 fatal("not a REX.W[B] mov reg64, imm64");
282 }
283#else
284 // make sure code pattern is actually a mov reg, imm32 instruction
285 u_char test_byte = *(u_char*)instruction_address();
286 u_char test_byte_2 = test_byte & ( 0xff ^ register_mask);
287 if (test_byte_2 != instruction_code) fatal("not a mov reg, imm32");
288#endif // AMD64
289}
290
291
292void NativeMovConstReg::print() {
293 tty->print_cr(PTR_FORMAT ": mov reg, " INTPTR_FORMAT,
294 p2i(instruction_address()), data());
295}
296
297//-------------------------------------------------------------------
298
299int NativeMovRegMem::instruction_start() const {
300 int off = 0;
301 u_char instr_0 = ubyte_at(off);
302
303 // See comment in Assembler::locate_operand() about VEX prefixes.
304 if (instr_0 == instruction_VEX_prefix_2bytes) {
305 assert((UseAVX > 0), "shouldn't have VEX prefix");
306 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
307 return 2;
308 }
309 if (instr_0 == instruction_VEX_prefix_3bytes) {
310 assert((UseAVX > 0), "shouldn't have VEX prefix");
311 NOT_LP64(assert((0xC0 & ubyte_at(1)) == 0xC0, "shouldn't have LDS and LES instructions"));
312 return 3;
313 }
314 if (instr_0 == instruction_EVEX_prefix_4bytes) {
315 assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix");
316 return 4;
317 }
318
319 // First check to see if we have a (prefixed or not) xor
320 if (instr_0 >= instruction_prefix_wide_lo && // 0x40
321 instr_0 <= instruction_prefix_wide_hi) { // 0x4f
322 off++;
323 instr_0 = ubyte_at(off);
324 }
325
326 if (instr_0 == instruction_code_xor) {
327 off += 2;
328 instr_0 = ubyte_at(off);
329 }
330
331 // Now look for the real instruction and the many prefix/size specifiers.
332
333 if (instr_0 == instruction_operandsize_prefix ) { // 0x66
334 off++; // Not SSE instructions
335 instr_0 = ubyte_at(off);
336 }
337
338 if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
339 instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2
340 off++;
341 instr_0 = ubyte_at(off);
342 }
343
344 if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
345 instr_0 <= instruction_prefix_wide_hi) { // 0x4f
346 off++;
347 instr_0 = ubyte_at(off);
348 }
349
350
351 if (instr_0 == instruction_extended_prefix ) { // 0x0f
352 off++;
353 }
354
355 return off;
356}
357
358address NativeMovRegMem::instruction_address() const {
359 return addr_at(instruction_start());
360}
361
362address NativeMovRegMem::next_instruction_address() const {
363 address ret = instruction_address() + instruction_size;
364 u_char instr_0 = *(u_char*) instruction_address();
365 switch (instr_0) {
366 case instruction_operandsize_prefix:
367
368 fatal("should have skipped instruction_operandsize_prefix");
369 break;
370
371 case instruction_extended_prefix:
372 fatal("should have skipped instruction_extended_prefix");
373 break;
374
375 case instruction_code_mem2reg_movslq: // 0x63
376 case instruction_code_mem2reg_movzxb: // 0xB6
377 case instruction_code_mem2reg_movsxb: // 0xBE
378 case instruction_code_mem2reg_movzxw: // 0xB7
379 case instruction_code_mem2reg_movsxw: // 0xBF
380 case instruction_code_reg2mem: // 0x89 (q/l)
381 case instruction_code_mem2reg: // 0x8B (q/l)
382 case instruction_code_reg2memb: // 0x88
383 case instruction_code_mem2regb: // 0x8a
384
385 case instruction_code_lea: // 0x8d
386
387 case instruction_code_float_s: // 0xd9 fld_s a
388 case instruction_code_float_d: // 0xdd fld_d a
389
390 case instruction_code_xmm_load: // 0x10
391 case instruction_code_xmm_store: // 0x11
392 case instruction_code_xmm_lpd: // 0x12
393 {
394 // If there is an SIB then instruction is longer than expected
395 u_char mod_rm = *(u_char*)(instruction_address() + 1);
396 if ((mod_rm & 7) == 0x4) {
397 ret++;
398 }
399 }
400 case instruction_code_xor:
401 fatal("should have skipped xor lead in");
402 break;
403
404 default:
405 fatal("not a NativeMovRegMem");
406 }
407 return ret;
408
409}
410
411int NativeMovRegMem::offset() const{
412 int off = data_offset + instruction_start();
413 u_char mod_rm = *(u_char*)(instruction_address() + 1);
414 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
415 // the encoding to use an SIB byte. Which will have the nnnn
416 // field off by one byte
417 if ((mod_rm & 7) == 0x4) {
418 off++;
419 }
420 return int_at(off);
421}
422
423void NativeMovRegMem::set_offset(int x) {
424 int off = data_offset + instruction_start();
425 u_char mod_rm = *(u_char*)(instruction_address() + 1);
426 // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
427 // the encoding to use an SIB byte. Which will have the nnnn
428 // field off by one byte
429 if ((mod_rm & 7) == 0x4) {
430 off++;
431 }
432 set_int_at(off, x);
433}
434
435void NativeMovRegMem::verify() {
436 // make sure code pattern is actually a mov [reg+offset], reg instruction
437 u_char test_byte = *(u_char*)instruction_address();
438 switch (test_byte) {
439 case instruction_code_reg2memb: // 0x88 movb a, r
440 case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit)
441 case instruction_code_mem2regb: // 0x8a movb r, a
442 case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit)
443 break;
444
445 case instruction_code_mem2reg_movslq: // 0x63 movsql r, a
446 case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb)
447 case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw)
448 case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb)
449 case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw)
450 break;
451
452 case instruction_code_float_s: // 0xd9 fld_s a
453 case instruction_code_float_d: // 0xdd fld_d a
454 case instruction_code_xmm_load: // 0x10 movsd xmm, a
455 case instruction_code_xmm_store: // 0x11 movsd a, xmm
456 case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a
457 break;
458
459 case instruction_code_lea: // 0x8d lea r, a
460 break;
461
462 default:
463 fatal ("not a mov [reg+offs], reg instruction");
464 }
465}
466
467
468void NativeMovRegMem::print() {
469 tty->print_cr(PTR_FORMAT ": mov reg, [reg + %x]", p2i(instruction_address()), offset());
470}
471
472//-------------------------------------------------------------------
473
474void NativeLoadAddress::verify() {
475 // make sure code pattern is actually a mov [reg+offset], reg instruction
476 u_char test_byte = *(u_char*)instruction_address();
477#ifdef _LP64
478 if ( (test_byte == instruction_prefix_wide ||
479 test_byte == instruction_prefix_wide_extended) ) {
480 test_byte = *(u_char*)(instruction_address() + 1);
481 }
482#endif // _LP64
483 if ( ! ((test_byte == lea_instruction_code)
484 LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
485 fatal ("not a lea reg, [reg+offs] instruction");
486 }
487}
488
489
490void NativeLoadAddress::print() {
491 tty->print_cr(PTR_FORMAT ": lea [reg + %x], reg", p2i(instruction_address()), offset());
492}
493
494//--------------------------------------------------------------------------------
495
496void NativeJump::verify() {
497 if (*(u_char*)instruction_address() != instruction_code) {
498 // far jump
499 NativeMovConstReg* mov = nativeMovConstReg_at(instruction_address());
500 NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address());
501 if (!jmp->is_jump_reg()) {
502 fatal("not a jump instruction");
503 }
504 }
505}
506
507
508void NativeJump::insert(address code_pos, address entry) {
509 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
510#ifdef AMD64
511 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
512#endif // AMD64
513
514 *code_pos = instruction_code;
515 *((int32_t*)(code_pos + 1)) = (int32_t)disp;
516
517 ICache::invalidate_range(code_pos, instruction_size);
518}
519
520void NativeJump::check_verified_entry_alignment(address entry, address verified_entry) {
521 // Patching to not_entrant can happen while activations of the method are
522 // in use. The patching in that instance must happen only when certain
523 // alignment restrictions are true. These guarantees check those
524 // conditions.
525#ifdef AMD64
526 const int linesize = 64;
527#else
528 const int linesize = 32;
529#endif // AMD64
530
531 // Must be wordSize aligned
532 guarantee(((uintptr_t) verified_entry & (wordSize -1)) == 0,
533 "illegal address for code patching 2");
534 // First 5 bytes must be within the same cache line - 4827828
535 guarantee((uintptr_t) verified_entry / linesize ==
536 ((uintptr_t) verified_entry + 4) / linesize,
537 "illegal address for code patching 3");
538}
539
540
541// MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
542// The problem: jmp <dest> is a 5-byte instruction. Atomical write can be only with 4 bytes.
543// First patches the first word atomically to be a jump to itself.
544// Then patches the last byte and then atomically patches the first word (4-bytes),
545// thus inserting the desired jump
546// This code is mt-safe with the following conditions: entry point is 4 byte aligned,
547// entry point is in same cache line as unverified entry point, and the instruction being
548// patched is >= 5 byte (size of patch).
549//
550// In C2 the 5+ byte sized instruction is enforced by code in MachPrologNode::emit.
551// In C1 the restriction is enforced by CodeEmitter::method_entry
552// In JVMCI, the restriction is enforced by HotSpotFrameContext.enter(...)
553//
554void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
555 // complete jump instruction (to be inserted) is in code_buffer;
556 unsigned char code_buffer[5];
557 code_buffer[0] = instruction_code;
558 intptr_t disp = (intptr_t)dest - ((intptr_t)verified_entry + 1 + 4);
559#ifdef AMD64
560 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
561#endif // AMD64
562 *(int32_t*)(code_buffer + 1) = (int32_t)disp;
563
564 check_verified_entry_alignment(entry, verified_entry);
565
566 // Can't call nativeJump_at() because it's asserts jump exists
567 NativeJump* n_jump = (NativeJump*) verified_entry;
568
569 //First patch dummy jmp in place
570
571 unsigned char patch[4];
572 assert(sizeof(patch)==sizeof(int32_t), "sanity check");
573 patch[0] = 0xEB; // jmp rel8
574 patch[1] = 0xFE; // jmp to self
575 patch[2] = 0xEB;
576 patch[3] = 0xFE;
577
578 // First patch dummy jmp in place
579 *(int32_t*)verified_entry = *(int32_t *)patch;
580
581 n_jump->wrote(0);
582
583 // Patch 5th byte (from jump instruction)
584 verified_entry[4] = code_buffer[4];
585
586 n_jump->wrote(4);
587
588 // Patch bytes 0-3 (from jump instruction)
589 *(int32_t*)verified_entry = *(int32_t *)code_buffer;
590 // Invalidate. Opteron requires a flush after every write.
591 n_jump->wrote(0);
592
593}
594
595address NativeFarJump::jump_destination() const {
596 NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0));
597 return (address)mov->data();
598}
599
600void NativeFarJump::verify() {
601 if (is_far_jump()) {
602 NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0));
603 NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address());
604 if (jmp->is_jump_reg()) return;
605 }
606 fatal("not a jump instruction");
607}
608
609void NativePopReg::insert(address code_pos, Register reg) {
610 assert(reg->encoding() < 8, "no space for REX");
611 assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update");
612 *code_pos = (u_char)(instruction_code | reg->encoding());
613 ICache::invalidate_range(code_pos, instruction_size);
614}
615
616
617void NativeIllegalInstruction::insert(address code_pos) {
618 assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update");
619 *(short *)code_pos = instruction_code;
620 ICache::invalidate_range(code_pos, instruction_size);
621}
622
623void NativeGeneralJump::verify() {
624 assert(((NativeInstruction *)this)->is_jump() ||
625 ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
626}
627
628
629void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
630 intptr_t disp = (intptr_t)entry - ((intptr_t)code_pos + 1 + 4);
631#ifdef AMD64
632 guarantee(disp == (intptr_t)(int32_t)disp, "must be 32-bit offset");
633#endif // AMD64
634
635 *code_pos = unconditional_long_jump;
636 *((int32_t *)(code_pos+1)) = (int32_t) disp;
637 ICache::invalidate_range(code_pos, instruction_size);
638}
639
640
641// MT-safe patching of a long jump instruction.
642// First patches first word of instruction to two jmp's that jmps to them
643// selfs (spinlock). Then patches the last byte, and then atomicly replaces
644// the jmp's with the first 4 byte of the new instruction.
645void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
646 assert (instr_addr != NULL, "illegal address for code patching (4)");
647 NativeGeneralJump* n_jump = nativeGeneralJump_at (instr_addr); // checking that it is a jump
648
649 // Temporary code
650 unsigned char patch[4];
651 assert(sizeof(patch)==sizeof(int32_t), "sanity check");
652 patch[0] = 0xEB; // jmp rel8
653 patch[1] = 0xFE; // jmp to self
654 patch[2] = 0xEB;
655 patch[3] = 0xFE;
656
657 // First patch dummy jmp in place
658 *(int32_t*)instr_addr = *(int32_t *)patch;
659 n_jump->wrote(0);
660
661 // Patch 4th byte
662 instr_addr[4] = code_buffer[4];
663
664 n_jump->wrote(4);
665
666 // Patch bytes 0-3
667 *(jint*)instr_addr = *(jint *)code_buffer;
668
669 n_jump->wrote(0);
670
671#ifdef ASSERT
672 // verify patching
673 for ( int i = 0; i < instruction_size; i++) {
674 address ptr = (address)((intptr_t)code_buffer + i);
675 int a_byte = (*ptr) & 0xFF;
676 assert(*((address)((intptr_t)instr_addr + i)) == a_byte, "mt safe patching failed");
677 }
678#endif
679
680}
681
682
683
684address NativeGeneralJump::jump_destination() const {
685 int op_code = ubyte_at(0);
686 bool is_rel32off = (op_code == 0xE9 || op_code == 0x0F);
687 int offset = (op_code == 0x0F) ? 2 : 1;
688 int length = offset + ((is_rel32off) ? 4 : 1);
689
690 if (is_rel32off)
691 return addr_at(0) + length + int_at(offset);
692 else
693 return addr_at(0) + length + sbyte_at(offset);
694}
695