1/*
2 * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_CodeStubs.hpp"
27#include "c1/c1_FrameMap.hpp"
28#include "c1/c1_LIRAssembler.hpp"
29#include "c1/c1_MacroAssembler.hpp"
30#include "c1/c1_Runtime1.hpp"
31#include "nativeInst_x86.hpp"
32#include "runtime/sharedRuntime.hpp"
33#include "utilities/align.hpp"
34#include "utilities/macros.hpp"
35#include "vmreg_x86.inline.hpp"
36
37
38#define __ ce->masm()->
39
40float ConversionStub::float_zero = 0.0;
41double ConversionStub::double_zero = 0.0;
42
43void ConversionStub::emit_code(LIR_Assembler* ce) {
44 __ bind(_entry);
45 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
46
47
48 if (input()->is_single_xmm()) {
49 __ comiss(input()->as_xmm_float_reg(),
50 ExternalAddress((address)&float_zero));
51 } else if (input()->is_double_xmm()) {
52 __ comisd(input()->as_xmm_double_reg(),
53 ExternalAddress((address)&double_zero));
54 } else {
55 LP64_ONLY(ShouldNotReachHere());
56 __ push(rax);
57 __ ftst();
58 __ fnstsw_ax();
59 __ sahf();
60 __ pop(rax);
61 }
62
63 Label NaN, do_return;
64 __ jccb(Assembler::parity, NaN);
65 __ jccb(Assembler::below, do_return);
66
67 // input is > 0 -> return maxInt
68 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
69 __ decrement(result()->as_register());
70 __ jmpb(do_return);
71
72 // input is NaN -> return 0
73 __ bind(NaN);
74 __ xorptr(result()->as_register(), result()->as_register());
75
76 __ bind(do_return);
77 __ jmp(_continuation);
78}
79
80void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
81 __ bind(_entry);
82 Metadata *m = _method->as_constant_ptr()->as_metadata();
83 ce->store_parameter(m, 1);
84 ce->store_parameter(_bci, 0);
85 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
86 ce->add_call_info_here(_info);
87 ce->verify_oop_map(_info);
88 __ jmp(_continuation);
89}
90
91RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
92 : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
93 assert(info != NULL, "must have info");
94 _info = new CodeEmitInfo(info);
95}
96
97RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
98 : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
99 assert(info != NULL, "must have info");
100 _info = new CodeEmitInfo(info);
101}
102
103void RangeCheckStub::emit_code(LIR_Assembler* ce) {
104 __ bind(_entry);
105 if (_info->deoptimize_on_exception()) {
106 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
107 __ call(RuntimeAddress(a));
108 ce->add_call_info_here(_info);
109 ce->verify_oop_map(_info);
110 debug_only(__ should_not_reach_here());
111 return;
112 }
113
114 // pass the array index on stack because all registers must be preserved
115 if (_index->is_cpu_register()) {
116 ce->store_parameter(_index->as_register(), 0);
117 } else {
118 ce->store_parameter(_index->as_jint(), 0);
119 }
120 Runtime1::StubID stub_id;
121 if (_throw_index_out_of_bounds_exception) {
122 stub_id = Runtime1::throw_index_exception_id;
123 } else {
124 stub_id = Runtime1::throw_range_check_failed_id;
125 ce->store_parameter(_array->as_pointer_register(), 1);
126 }
127 __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
128 ce->add_call_info_here(_info);
129 ce->verify_oop_map(_info);
130 debug_only(__ should_not_reach_here());
131}
132
133PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
134 _info = new CodeEmitInfo(info);
135}
136
137void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
138 __ bind(_entry);
139 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
140 __ call(RuntimeAddress(a));
141 ce->add_call_info_here(_info);
142 ce->verify_oop_map(_info);
143 debug_only(__ should_not_reach_here());
144}
145
146void DivByZeroStub::emit_code(LIR_Assembler* ce) {
147 if (_offset != -1) {
148 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
149 }
150 __ bind(_entry);
151 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
152 ce->add_call_info_here(_info);
153 debug_only(__ should_not_reach_here());
154}
155
156
157// Implementation of NewInstanceStub
158
159NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
160 _result = result;
161 _klass = klass;
162 _klass_reg = klass_reg;
163 _info = new CodeEmitInfo(info);
164 assert(stub_id == Runtime1::new_instance_id ||
165 stub_id == Runtime1::fast_new_instance_id ||
166 stub_id == Runtime1::fast_new_instance_init_check_id,
167 "need new_instance id");
168 _stub_id = stub_id;
169}
170
171
172void NewInstanceStub::emit_code(LIR_Assembler* ce) {
173 assert(__ rsp_offset() == 0, "frame size should be fixed");
174 __ bind(_entry);
175 __ movptr(rdx, _klass_reg->as_register());
176 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
177 ce->add_call_info_here(_info);
178 ce->verify_oop_map(_info);
179 assert(_result->as_register() == rax, "result must in rax,");
180 __ jmp(_continuation);
181}
182
183
184// Implementation of NewTypeArrayStub
185
186NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
187 _klass_reg = klass_reg;
188 _length = length;
189 _result = result;
190 _info = new CodeEmitInfo(info);
191}
192
193
194void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
195 assert(__ rsp_offset() == 0, "frame size should be fixed");
196 __ bind(_entry);
197 assert(_length->as_register() == rbx, "length must in rbx,");
198 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
199 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
200 ce->add_call_info_here(_info);
201 ce->verify_oop_map(_info);
202 assert(_result->as_register() == rax, "result must in rax,");
203 __ jmp(_continuation);
204}
205
206
207// Implementation of NewObjectArrayStub
208
209NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
210 _klass_reg = klass_reg;
211 _result = result;
212 _length = length;
213 _info = new CodeEmitInfo(info);
214}
215
216
217void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
218 assert(__ rsp_offset() == 0, "frame size should be fixed");
219 __ bind(_entry);
220 assert(_length->as_register() == rbx, "length must in rbx,");
221 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
222 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
223 ce->add_call_info_here(_info);
224 ce->verify_oop_map(_info);
225 assert(_result->as_register() == rax, "result must in rax,");
226 __ jmp(_continuation);
227}
228
229
230// Implementation of MonitorAccessStubs
231
232MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
233: MonitorAccessStub(obj_reg, lock_reg)
234{
235 _info = new CodeEmitInfo(info);
236}
237
238
239void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
240 assert(__ rsp_offset() == 0, "frame size should be fixed");
241 __ bind(_entry);
242 ce->store_parameter(_obj_reg->as_register(), 1);
243 ce->store_parameter(_lock_reg->as_register(), 0);
244 Runtime1::StubID enter_id;
245 if (ce->compilation()->has_fpu_code()) {
246 enter_id = Runtime1::monitorenter_id;
247 } else {
248 enter_id = Runtime1::monitorenter_nofpu_id;
249 }
250 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
251 ce->add_call_info_here(_info);
252 ce->verify_oop_map(_info);
253 __ jmp(_continuation);
254}
255
256
257void MonitorExitStub::emit_code(LIR_Assembler* ce) {
258 __ bind(_entry);
259 if (_compute_lock) {
260 // lock_reg was destroyed by fast unlocking attempt => recompute it
261 ce->monitor_address(_monitor_ix, _lock_reg);
262 }
263 ce->store_parameter(_lock_reg->as_register(), 0);
264 // note: non-blocking leaf routine => no call info needed
265 Runtime1::StubID exit_id;
266 if (ce->compilation()->has_fpu_code()) {
267 exit_id = Runtime1::monitorexit_id;
268 } else {
269 exit_id = Runtime1::monitorexit_nofpu_id;
270 }
271 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
272 __ jmp(_continuation);
273}
274
275
276// Implementation of patching:
277// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
278// - Replace original code with a call to the stub
279// At Runtime:
280// - call to stub, jump to runtime
281// - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
282// - in runtime: after initializing class, restore original code, reexecute instruction
283
284int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
285
286void PatchingStub::align_patch_site(MacroAssembler* masm) {
287 // We're patching a 5-7 byte instruction on intel and we need to
288 // make sure that we don't see a piece of the instruction. It
289 // appears mostly impossible on Intel to simply invalidate other
290 // processors caches and since they may do aggressive prefetch it's
291 // very hard to make a guess about what code might be in the icache.
292 // Force the instruction to be double word aligned so that it
293 // doesn't span a cache line.
294 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
295}
296
297void PatchingStub::emit_code(LIR_Assembler* ce) {
298 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
299
300 Label call_patch;
301
302 // static field accesses have special semantics while the class
303 // initializer is being run so we emit a test which can be used to
304 // check that this code is being executed by the initializing
305 // thread.
306 address being_initialized_entry = __ pc();
307 if (CommentedAssembly) {
308 __ block_comment(" patch template");
309 }
310 if (_id == load_klass_id) {
311 // produce a copy of the load klass instruction for use by the being initialized case
312#ifdef ASSERT
313 address start = __ pc();
314#endif
315 Metadata* o = NULL;
316 __ mov_metadata(_obj, o);
317#ifdef ASSERT
318 for (int i = 0; i < _bytes_to_copy; i++) {
319 address ptr = (address)(_pc_start + i);
320 int a_byte = (*ptr) & 0xFF;
321 assert(a_byte == *start++, "should be the same code");
322 }
323#endif
324 } else if (_id == load_mirror_id) {
325 // produce a copy of the load mirror instruction for use by the being
326 // initialized case
327#ifdef ASSERT
328 address start = __ pc();
329#endif
330 jobject o = NULL;
331 __ movoop(_obj, o);
332#ifdef ASSERT
333 for (int i = 0; i < _bytes_to_copy; i++) {
334 address ptr = (address)(_pc_start + i);
335 int a_byte = (*ptr) & 0xFF;
336 assert(a_byte == *start++, "should be the same code");
337 }
338#endif
339 } else {
340 // make a copy the code which is going to be patched.
341 for (int i = 0; i < _bytes_to_copy; i++) {
342 address ptr = (address)(_pc_start + i);
343 int a_byte = (*ptr) & 0xFF;
344 __ emit_int8(a_byte);
345 *ptr = 0x90; // make the site look like a nop
346 }
347 }
348
349 address end_of_patch = __ pc();
350 int bytes_to_skip = 0;
351 if (_id == load_mirror_id) {
352 int offset = __ offset();
353 if (CommentedAssembly) {
354 __ block_comment(" being_initialized check");
355 }
356 assert(_obj != noreg, "must be a valid register");
357 Register tmp = rax;
358 Register tmp2 = rbx;
359 __ push(tmp);
360 __ push(tmp2);
361 // Load without verification to keep code size small. We need it because
362 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
363 __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset_in_bytes()));
364 __ get_thread(tmp);
365 __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
366 __ pop(tmp2);
367 __ pop(tmp);
368 __ jcc(Assembler::notEqual, call_patch);
369
370 // access_field patches may execute the patched code before it's
371 // copied back into place so we need to jump back into the main
372 // code of the nmethod to continue execution.
373 __ jmp(_patch_site_continuation);
374
375 // make sure this extra code gets skipped
376 bytes_to_skip += __ offset() - offset;
377 }
378 if (CommentedAssembly) {
379 __ block_comment("patch data encoded as movl");
380 }
381 // Now emit the patch record telling the runtime how to find the
382 // pieces of the patch. We only need 3 bytes but for readability of
383 // the disassembly we make the data look like a movl reg, imm32,
384 // which requires 5 bytes
385 int sizeof_patch_record = 5;
386 bytes_to_skip += sizeof_patch_record;
387
388 // emit the offsets needed to find the code to patch
389 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
390
391 __ emit_int8((unsigned char)0xB8);
392 __ emit_int8(0);
393 __ emit_int8(being_initialized_entry_offset);
394 __ emit_int8(bytes_to_skip);
395 __ emit_int8(_bytes_to_copy);
396 address patch_info_pc = __ pc();
397 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
398
399 address entry = __ pc();
400 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
401 address target = NULL;
402 relocInfo::relocType reloc_type = relocInfo::none;
403 switch (_id) {
404 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
405 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
406 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
407 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
408 default: ShouldNotReachHere();
409 }
410 __ bind(call_patch);
411
412 if (CommentedAssembly) {
413 __ block_comment("patch entry point");
414 }
415 __ call(RuntimeAddress(target));
416 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
417 ce->add_call_info_here(_info);
418 int jmp_off = __ offset();
419 __ jmp(_patch_site_entry);
420 // Add enough nops so deoptimization can overwrite the jmp above with a call
421 // and not destroy the world. We cannot use fat nops here, since the concurrent
422 // code rewrite may transiently create the illegal instruction sequence.
423 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
424 __ nop();
425 }
426 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
427 CodeSection* cs = __ code_section();
428 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
429 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
430 }
431}
432
433
434void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
435 __ bind(_entry);
436 ce->store_parameter(_trap_request, 0);
437 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
438 ce->add_call_info_here(_info);
439 DEBUG_ONLY(__ should_not_reach_here());
440}
441
442
443void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
444 address a;
445 if (_info->deoptimize_on_exception()) {
446 // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
447 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
448 } else {
449 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
450 }
451
452 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
453 __ bind(_entry);
454 __ call(RuntimeAddress(a));
455 ce->add_call_info_here(_info);
456 ce->verify_oop_map(_info);
457 debug_only(__ should_not_reach_here());
458}
459
460
461void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
462 assert(__ rsp_offset() == 0, "frame size should be fixed");
463
464 __ bind(_entry);
465 // pass the object on stack because all registers must be preserved
466 if (_obj->is_cpu_register()) {
467 ce->store_parameter(_obj->as_register(), 0);
468 }
469 __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
470 ce->add_call_info_here(_info);
471 debug_only(__ should_not_reach_here());
472}
473
474
475void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
476 //---------------slow case: call to native-----------------
477 __ bind(_entry);
478 // Figure out where the args should go
479 // This should really convert the IntrinsicID to the Method* and signature
480 // but I don't know how to do that.
481 //
482 VMRegPair args[5];
483 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
484 SharedRuntime::java_calling_convention(signature, args, 5, true);
485
486 // push parameters
487 // (src, src_pos, dest, destPos, length)
488 Register r[5];
489 r[0] = src()->as_register();
490 r[1] = src_pos()->as_register();
491 r[2] = dst()->as_register();
492 r[3] = dst_pos()->as_register();
493 r[4] = length()->as_register();
494
495 // next registers will get stored on the stack
496 for (int i = 0; i < 5 ; i++ ) {
497 VMReg r_1 = args[i].first();
498 if (r_1->is_stack()) {
499 int st_off = r_1->reg2stack() * wordSize;
500 __ movptr (Address(rsp, st_off), r[i]);
501 } else {
502 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
503 }
504 }
505
506 ce->align_call(lir_static_call);
507
508 ce->emit_static_call_stub();
509 if (ce->compilation()->bailed_out()) {
510 return; // CodeCache is full
511 }
512 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
513 relocInfo::static_call_type);
514 __ call(resolve);
515 ce->add_call_info_here(info());
516
517#ifndef PRODUCT
518 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
519#endif
520
521 __ jmp(_continuation);
522}
523
524#undef __
525