1 | /* |
2 | * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "asm/assembler.hpp" |
27 | #include "c1/c1_Defs.hpp" |
28 | #include "c1/c1_MacroAssembler.hpp" |
29 | #include "c1/c1_Runtime1.hpp" |
30 | #include "ci/ciUtilities.hpp" |
31 | #include "gc/shared/cardTable.hpp" |
32 | #include "gc/shared/cardTableBarrierSet.hpp" |
33 | #include "interpreter/interpreter.hpp" |
34 | #include "memory/universe.hpp" |
35 | #include "nativeInst_x86.hpp" |
36 | #include "oops/compiledICHolder.hpp" |
37 | #include "oops/oop.inline.hpp" |
38 | #include "prims/jvmtiExport.hpp" |
39 | #include "register_x86.hpp" |
40 | #include "runtime/sharedRuntime.hpp" |
41 | #include "runtime/signature.hpp" |
42 | #include "runtime/vframeArray.hpp" |
43 | #include "utilities/macros.hpp" |
44 | #include "vmreg_x86.inline.hpp" |
45 | |
46 | // Implementation of StubAssembler |
47 | |
48 | int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, int args_size) { |
49 | // setup registers |
50 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions) |
51 | assert(!(oop_result1->is_valid() || metadata_result->is_valid()) || oop_result1 != metadata_result, "registers must be different" ); |
52 | assert(oop_result1 != thread && metadata_result != thread, "registers must be different" ); |
53 | assert(args_size >= 0, "illegal args_size" ); |
54 | bool align_stack = false; |
55 | #ifdef _LP64 |
56 | // At a method handle call, the stack may not be properly aligned |
57 | // when returning with an exception. |
58 | align_stack = (stub_id() == Runtime1::handle_exception_from_callee_id); |
59 | #endif |
60 | |
61 | #ifdef _LP64 |
62 | mov(c_rarg0, thread); |
63 | set_num_rt_args(0); // Nothing on stack |
64 | #else |
65 | set_num_rt_args(1 + args_size); |
66 | |
67 | // push java thread (becomes first argument of C function) |
68 | get_thread(thread); |
69 | push(thread); |
70 | #endif // _LP64 |
71 | |
72 | int call_offset; |
73 | if (!align_stack) { |
74 | set_last_Java_frame(thread, noreg, rbp, NULL); |
75 | } else { |
76 | address the_pc = pc(); |
77 | call_offset = offset(); |
78 | set_last_Java_frame(thread, noreg, rbp, the_pc); |
79 | andptr(rsp, -(StackAlignmentInBytes)); // Align stack |
80 | } |
81 | |
82 | // do the call |
83 | call(RuntimeAddress(entry)); |
84 | if (!align_stack) { |
85 | call_offset = offset(); |
86 | } |
87 | // verify callee-saved register |
88 | #ifdef ASSERT |
89 | guarantee(thread != rax, "change this code" ); |
90 | push(rax); |
91 | { Label L; |
92 | get_thread(rax); |
93 | cmpptr(thread, rax); |
94 | jcc(Assembler::equal, L); |
95 | int3(); |
96 | stop("StubAssembler::call_RT: rdi not callee saved?" ); |
97 | bind(L); |
98 | } |
99 | pop(rax); |
100 | #endif |
101 | reset_last_Java_frame(thread, true); |
102 | |
103 | // discard thread and arguments |
104 | NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord)); |
105 | |
106 | // check for pending exceptions |
107 | { Label L; |
108 | cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); |
109 | jcc(Assembler::equal, L); |
110 | // exception pending => remove activation and forward to exception handler |
111 | movptr(rax, Address(thread, Thread::pending_exception_offset())); |
112 | // make sure that the vm_results are cleared |
113 | if (oop_result1->is_valid()) { |
114 | movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); |
115 | } |
116 | if (metadata_result->is_valid()) { |
117 | movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); |
118 | } |
119 | if (frame_size() == no_frame_size) { |
120 | leave(); |
121 | jump(RuntimeAddress(StubRoutines::forward_exception_entry())); |
122 | } else if (_stub_id == Runtime1::forward_exception_id) { |
123 | should_not_reach_here(); |
124 | } else { |
125 | jump(RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); |
126 | } |
127 | bind(L); |
128 | } |
129 | // get oop results if there are any and reset the values in the thread |
130 | if (oop_result1->is_valid()) { |
131 | get_vm_result(oop_result1, thread); |
132 | } |
133 | if (metadata_result->is_valid()) { |
134 | get_vm_result_2(metadata_result, thread); |
135 | } |
136 | return call_offset; |
137 | } |
138 | |
139 | |
140 | int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1) { |
141 | #ifdef _LP64 |
142 | mov(c_rarg1, arg1); |
143 | #else |
144 | push(arg1); |
145 | #endif // _LP64 |
146 | return call_RT(oop_result1, metadata_result, entry, 1); |
147 | } |
148 | |
149 | |
150 | int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2) { |
151 | #ifdef _LP64 |
152 | if (c_rarg1 == arg2) { |
153 | if (c_rarg2 == arg1) { |
154 | xchgq(arg1, arg2); |
155 | } else { |
156 | mov(c_rarg2, arg2); |
157 | mov(c_rarg1, arg1); |
158 | } |
159 | } else { |
160 | mov(c_rarg1, arg1); |
161 | mov(c_rarg2, arg2); |
162 | } |
163 | #else |
164 | push(arg2); |
165 | push(arg1); |
166 | #endif // _LP64 |
167 | return call_RT(oop_result1, metadata_result, entry, 2); |
168 | } |
169 | |
170 | |
171 | int StubAssembler::call_RT(Register oop_result1, Register metadata_result, address entry, Register arg1, Register arg2, Register arg3) { |
172 | #ifdef _LP64 |
173 | // if there is any conflict use the stack |
174 | if (arg1 == c_rarg2 || arg1 == c_rarg3 || |
175 | arg2 == c_rarg1 || arg1 == c_rarg3 || |
176 | arg3 == c_rarg1 || arg1 == c_rarg2) { |
177 | push(arg3); |
178 | push(arg2); |
179 | push(arg1); |
180 | pop(c_rarg1); |
181 | pop(c_rarg2); |
182 | pop(c_rarg3); |
183 | } else { |
184 | mov(c_rarg1, arg1); |
185 | mov(c_rarg2, arg2); |
186 | mov(c_rarg3, arg3); |
187 | } |
188 | #else |
189 | push(arg3); |
190 | push(arg2); |
191 | push(arg1); |
192 | #endif // _LP64 |
193 | return call_RT(oop_result1, metadata_result, entry, 3); |
194 | } |
195 | |
196 | |
197 | // Implementation of StubFrame |
198 | |
199 | class StubFrame: public StackObj { |
200 | private: |
201 | StubAssembler* _sasm; |
202 | |
203 | public: |
204 | StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments); |
205 | void load_argument(int offset_in_words, Register reg); |
206 | |
207 | ~StubFrame(); |
208 | }; |
209 | |
210 | void StubAssembler::prologue(const char* name, bool must_gc_arguments) { |
211 | set_info(name, must_gc_arguments); |
212 | enter(); |
213 | } |
214 | |
215 | void StubAssembler::epilogue() { |
216 | leave(); |
217 | ret(0); |
218 | } |
219 | |
220 | #define __ _sasm-> |
221 | |
222 | StubFrame::StubFrame(StubAssembler* sasm, const char* name, bool must_gc_arguments) { |
223 | _sasm = sasm; |
224 | __ prologue(name, must_gc_arguments); |
225 | } |
226 | |
227 | // load parameters that were stored with LIR_Assembler::store_parameter |
228 | // Note: offsets for store_parameter and load_argument must match |
229 | void StubFrame::load_argument(int offset_in_words, Register reg) { |
230 | __ load_parameter(offset_in_words, reg); |
231 | } |
232 | |
233 | |
234 | StubFrame::~StubFrame() { |
235 | __ epilogue(); |
236 | } |
237 | |
238 | #undef __ |
239 | |
240 | |
241 | // Implementation of Runtime1 |
242 | |
243 | const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2; |
244 | const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2; |
245 | |
246 | // Stack layout for saving/restoring all the registers needed during a runtime |
247 | // call (this includes deoptimization) |
248 | // Note: note that users of this frame may well have arguments to some runtime |
249 | // while these values are on the stack. These positions neglect those arguments |
250 | // but the code in save_live_registers will take the argument count into |
251 | // account. |
252 | // |
253 | #ifdef _LP64 |
254 | #define SLOT2(x) x, |
255 | #define SLOT_PER_WORD 2 |
256 | #else |
257 | #define SLOT2(x) |
258 | #define SLOT_PER_WORD 1 |
259 | #endif // _LP64 |
260 | |
261 | enum reg_save_layout { |
262 | // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that |
263 | // happen and will assert if the stack size we create is misaligned |
264 | #ifdef _LP64 |
265 | align_dummy_0, align_dummy_1, |
266 | #endif // _LP64 |
267 | #ifdef _WIN64 |
268 | // Windows always allocates space for it's argument registers (see |
269 | // frame::arg_reg_save_area_bytes). |
270 | arg_reg_save_1, arg_reg_save_1H, // 0, 4 |
271 | arg_reg_save_2, arg_reg_save_2H, // 8, 12 |
272 | arg_reg_save_3, arg_reg_save_3H, // 16, 20 |
273 | arg_reg_save_4, arg_reg_save_4H, // 24, 28 |
274 | #endif // _WIN64 |
275 | xmm_regs_as_doubles_off, // 32 |
276 | float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 |
277 | fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 |
278 | // fpu_state_end_off is exclusive |
279 | fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352 |
280 | marker = fpu_state_end_off, SLOT2(markerH) // 352, 356 |
281 | , // 360 |
282 | #ifdef _LP64 |
283 | r15_off = extra_space_offset, r15H_off, // 360, 364 |
284 | r14_off, r14H_off, // 368, 372 |
285 | r13_off, r13H_off, // 376, 380 |
286 | r12_off, r12H_off, // 384, 388 |
287 | r11_off, r11H_off, // 392, 396 |
288 | r10_off, r10H_off, // 400, 404 |
289 | r9_off, r9H_off, // 408, 412 |
290 | r8_off, r8H_off, // 416, 420 |
291 | rdi_off, rdiH_off, // 424, 428 |
292 | #else |
293 | rdi_off = extra_space_offset, |
294 | #endif // _LP64 |
295 | rsi_off, SLOT2(rsiH_off) // 432, 436 |
296 | rbp_off, SLOT2(rbpH_off) // 440, 444 |
297 | rsp_off, SLOT2(rspH_off) // 448, 452 |
298 | rbx_off, SLOT2(rbxH_off) // 456, 460 |
299 | rdx_off, SLOT2(rdxH_off) // 464, 468 |
300 | rcx_off, SLOT2(rcxH_off) // 472, 476 |
301 | rax_off, SLOT2(raxH_off) // 480, 484 |
302 | saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 |
303 | return_off, SLOT2(returnH_off) // 496, 500 |
304 | reg_save_frame_size // As noted: neglects any parameters to runtime // 504 |
305 | }; |
306 | |
307 | // Save off registers which might be killed by calls into the runtime. |
308 | // Tries to smart of about FP registers. In particular we separate |
309 | // saving and describing the FPU registers for deoptimization since we |
310 | // have to save the FPU registers twice if we describe them and on P4 |
311 | // saving FPU registers which don't contain anything appears |
312 | // expensive. The deopt blob is the only thing which needs to |
313 | // describe FPU registers. In all other cases it should be sufficient |
314 | // to simply save their current value. |
315 | |
316 | static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args, |
317 | bool save_fpu_registers = true) { |
318 | |
319 | // In 64bit all the args are in regs so there are no additional stack slots |
320 | LP64_ONLY(num_rt_args = 0); |
321 | LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned" );) |
322 | int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread |
323 | sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word); |
324 | |
325 | // record saved value locations in an OopMap |
326 | // locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread |
327 | OopMap* map = new OopMap(frame_size_in_slots, 0); |
328 | map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg()); |
329 | map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg()); |
330 | map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg()); |
331 | map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg()); |
332 | map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg()); |
333 | map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg()); |
334 | #ifdef _LP64 |
335 | map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg()); |
336 | map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg()); |
337 | map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg()); |
338 | map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg()); |
339 | map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg()); |
340 | map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg()); |
341 | map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg()); |
342 | map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg()); |
343 | |
344 | // This is stupid but needed. |
345 | map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next()); |
346 | map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next()); |
347 | map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next()); |
348 | map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next()); |
349 | map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next()); |
350 | map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next()); |
351 | |
352 | map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next()); |
353 | map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next()); |
354 | map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next()); |
355 | map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next()); |
356 | map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next()); |
357 | map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next()); |
358 | map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next()); |
359 | map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next()); |
360 | #endif // _LP64 |
361 | |
362 | int xmm_bypass_limit = FrameMap::nof_xmm_regs; |
363 | #ifdef _LP64 |
364 | if (UseAVX < 3) { |
365 | xmm_bypass_limit = xmm_bypass_limit / 2; |
366 | } |
367 | #endif |
368 | |
369 | if (save_fpu_registers) { |
370 | if (UseSSE < 2) { |
371 | int fpu_off = float_regs_as_doubles_off; |
372 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { |
373 | VMReg fpu_name_0 = FrameMap::fpu_regname(n); |
374 | map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + num_rt_args), fpu_name_0); |
375 | // %%% This is really a waste but we'll keep things as they were for now |
376 | if (true) { |
377 | map->set_callee_saved(VMRegImpl::stack2reg(fpu_off + 1 + num_rt_args), fpu_name_0->next()); |
378 | } |
379 | fpu_off += 2; |
380 | } |
381 | assert(fpu_off == fpu_state_off, "incorrect number of fpu stack slots" ); |
382 | } |
383 | |
384 | if (UseSSE >= 2) { |
385 | int xmm_off = xmm_regs_as_doubles_off; |
386 | for (int n = 0; n < FrameMap::nof_xmm_regs; n++) { |
387 | if (n < xmm_bypass_limit) { |
388 | VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); |
389 | map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); |
390 | // %%% This is really a waste but we'll keep things as they were for now |
391 | if (true) { |
392 | map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + 1 + num_rt_args), xmm_name_0->next()); |
393 | } |
394 | } |
395 | xmm_off += 2; |
396 | } |
397 | assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers" ); |
398 | |
399 | } else if (UseSSE == 1) { |
400 | int xmm_off = xmm_regs_as_doubles_off; |
401 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { |
402 | VMReg xmm_name_0 = as_XMMRegister(n)->as_VMReg(); |
403 | map->set_callee_saved(VMRegImpl::stack2reg(xmm_off + num_rt_args), xmm_name_0); |
404 | xmm_off += 2; |
405 | } |
406 | assert(xmm_off == float_regs_as_doubles_off, "incorrect number of xmm registers" ); |
407 | } |
408 | } |
409 | |
410 | return map; |
411 | } |
412 | |
413 | #define __ this-> |
414 | |
415 | void C1_MacroAssembler::save_live_registers_no_oop_map(bool save_fpu_registers) { |
416 | __ block_comment("save_live_registers" ); |
417 | |
418 | __ pusha(); // integer registers |
419 | |
420 | // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); |
421 | // assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset"); |
422 | |
423 | __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); |
424 | |
425 | #ifdef ASSERT |
426 | __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); |
427 | #endif |
428 | |
429 | if (save_fpu_registers) { |
430 | if (UseSSE < 2) { |
431 | // save FPU stack |
432 | __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); |
433 | __ fwait(); |
434 | |
435 | #ifdef ASSERT |
436 | Label ok; |
437 | __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); |
438 | __ jccb(Assembler::equal, ok); |
439 | __ stop("corrupted control word detected" ); |
440 | __ bind(ok); |
441 | #endif |
442 | |
443 | // Reset the control word to guard against exceptions being unmasked |
444 | // since fstp_d can cause FPU stack underflow exceptions. Write it |
445 | // into the on stack copy and then reload that to make sure that the |
446 | // current and future values are correct. |
447 | __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std()); |
448 | __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); |
449 | |
450 | // Save the FPU registers in de-opt-able form |
451 | int offset = 0; |
452 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { |
453 | __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); |
454 | offset += 8; |
455 | } |
456 | } |
457 | |
458 | if (UseSSE >= 2) { |
459 | // save XMM registers |
460 | // XMM registers can contain float or double values, but this is not known here, |
461 | // so always save them as doubles. |
462 | // note that float values are _not_ converted automatically, so for float values |
463 | // the second word contains only garbage data. |
464 | int xmm_bypass_limit = FrameMap::nof_xmm_regs; |
465 | int offset = 0; |
466 | #ifdef _LP64 |
467 | if (UseAVX < 3) { |
468 | xmm_bypass_limit = xmm_bypass_limit / 2; |
469 | } |
470 | #endif |
471 | for (int n = 0; n < xmm_bypass_limit; n++) { |
472 | XMMRegister xmm_name = as_XMMRegister(n); |
473 | __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); |
474 | offset += 8; |
475 | } |
476 | } else if (UseSSE == 1) { |
477 | // save XMM registers as float because double not supported without SSE2(num MMX == num fpu) |
478 | int offset = 0; |
479 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { |
480 | XMMRegister xmm_name = as_XMMRegister(n); |
481 | __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset), xmm_name); |
482 | offset += 8; |
483 | } |
484 | } |
485 | } |
486 | |
487 | // FPU stack must be empty now |
488 | __ verify_FPU(0, "save_live_registers" ); |
489 | } |
490 | |
491 | #undef __ |
492 | #define __ sasm-> |
493 | |
494 | static void restore_fpu(C1_MacroAssembler* sasm, bool restore_fpu_registers) { |
495 | if (restore_fpu_registers) { |
496 | if (UseSSE >= 2) { |
497 | // restore XMM registers |
498 | int xmm_bypass_limit = FrameMap::nof_xmm_regs; |
499 | #ifdef _LP64 |
500 | if (UseAVX < 3) { |
501 | xmm_bypass_limit = xmm_bypass_limit / 2; |
502 | } |
503 | #endif |
504 | int offset = 0; |
505 | for (int n = 0; n < xmm_bypass_limit; n++) { |
506 | XMMRegister xmm_name = as_XMMRegister(n); |
507 | __ movdbl(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); |
508 | offset += 8; |
509 | } |
510 | } else if (UseSSE == 1) { |
511 | // restore XMM registers(num MMX == num fpu) |
512 | int offset = 0; |
513 | for (int n = 0; n < FrameMap::nof_fpu_regs; n++) { |
514 | XMMRegister xmm_name = as_XMMRegister(n); |
515 | __ movflt(xmm_name, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + offset)); |
516 | offset += 8; |
517 | } |
518 | } |
519 | |
520 | if (UseSSE < 2) { |
521 | __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size)); |
522 | } else { |
523 | // check that FPU stack is really empty |
524 | __ verify_FPU(0, "restore_live_registers" ); |
525 | } |
526 | |
527 | } else { |
528 | // check that FPU stack is really empty |
529 | __ verify_FPU(0, "restore_live_registers" ); |
530 | } |
531 | |
532 | #ifdef ASSERT |
533 | { |
534 | Label ok; |
535 | __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef); |
536 | __ jcc(Assembler::equal, ok); |
537 | __ stop("bad offsets in frame" ); |
538 | __ bind(ok); |
539 | } |
540 | #endif // ASSERT |
541 | |
542 | __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size); |
543 | } |
544 | |
545 | #undef __ |
546 | #define __ this-> |
547 | |
548 | void C1_MacroAssembler::restore_live_registers(bool restore_fpu_registers) { |
549 | __ block_comment("restore_live_registers" ); |
550 | |
551 | restore_fpu(this, restore_fpu_registers); |
552 | __ popa(); |
553 | } |
554 | |
555 | |
556 | void C1_MacroAssembler::restore_live_registers_except_rax(bool restore_fpu_registers) { |
557 | __ block_comment("restore_live_registers_except_rax" ); |
558 | |
559 | restore_fpu(this, restore_fpu_registers); |
560 | |
561 | #ifdef _LP64 |
562 | __ movptr(r15, Address(rsp, 0)); |
563 | __ movptr(r14, Address(rsp, wordSize)); |
564 | __ movptr(r13, Address(rsp, 2 * wordSize)); |
565 | __ movptr(r12, Address(rsp, 3 * wordSize)); |
566 | __ movptr(r11, Address(rsp, 4 * wordSize)); |
567 | __ movptr(r10, Address(rsp, 5 * wordSize)); |
568 | __ movptr(r9, Address(rsp, 6 * wordSize)); |
569 | __ movptr(r8, Address(rsp, 7 * wordSize)); |
570 | __ movptr(rdi, Address(rsp, 8 * wordSize)); |
571 | __ movptr(rsi, Address(rsp, 9 * wordSize)); |
572 | __ movptr(rbp, Address(rsp, 10 * wordSize)); |
573 | // skip rsp |
574 | __ movptr(rbx, Address(rsp, 12 * wordSize)); |
575 | __ movptr(rdx, Address(rsp, 13 * wordSize)); |
576 | __ movptr(rcx, Address(rsp, 14 * wordSize)); |
577 | |
578 | __ addptr(rsp, 16 * wordSize); |
579 | #else |
580 | |
581 | __ pop(rdi); |
582 | __ pop(rsi); |
583 | __ pop(rbp); |
584 | __ pop(rbx); // skip this value |
585 | __ pop(rbx); |
586 | __ pop(rdx); |
587 | __ pop(rcx); |
588 | __ addptr(rsp, BytesPerWord); |
589 | #endif // _LP64 |
590 | } |
591 | |
592 | #undef __ |
593 | #define __ sasm-> |
594 | |
595 | static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args, |
596 | bool save_fpu_registers = true) { |
597 | __ save_live_registers_no_oop_map(save_fpu_registers); |
598 | return generate_oop_map(sasm, num_rt_args, save_fpu_registers); |
599 | } |
600 | |
601 | static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) { |
602 | __ restore_live_registers(restore_fpu_registers); |
603 | } |
604 | |
605 | static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_fpu_registers = true) { |
606 | sasm->restore_live_registers_except_rax(restore_fpu_registers); |
607 | } |
608 | |
609 | |
610 | void Runtime1::initialize_pd() { |
611 | // nothing to do |
612 | } |
613 | |
614 | |
615 | // Target: the entry point of the method that creates and posts the exception oop. |
616 | // has_argument: true if the exception needs arguments (passed on the stack because |
617 | // registers must be preserved). |
618 | OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address target, bool has_argument) { |
619 | // Preserve all registers. |
620 | int num_rt_args = has_argument ? (2 + 1) : 1; |
621 | OopMap* oop_map = save_live_registers(sasm, num_rt_args); |
622 | |
623 | // Now all registers are saved and can be used freely. |
624 | // Verify that no old value is used accidentally. |
625 | __ invalidate_registers(true, true, true, true, true, true); |
626 | |
627 | // Registers used by this stub. |
628 | const Register temp_reg = rbx; |
629 | |
630 | // Load arguments for exception that are passed as arguments into the stub. |
631 | if (has_argument) { |
632 | #ifdef _LP64 |
633 | __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord)); |
634 | __ movptr(c_rarg2, Address(rbp, 3*BytesPerWord)); |
635 | #else |
636 | __ movptr(temp_reg, Address(rbp, 3*BytesPerWord)); |
637 | __ push(temp_reg); |
638 | __ movptr(temp_reg, Address(rbp, 2*BytesPerWord)); |
639 | __ push(temp_reg); |
640 | #endif // _LP64 |
641 | } |
642 | int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1); |
643 | |
644 | OopMapSet* oop_maps = new OopMapSet(); |
645 | oop_maps->add_gc_map(call_offset, oop_map); |
646 | |
647 | __ stop("should not reach here" ); |
648 | |
649 | return oop_maps; |
650 | } |
651 | |
652 | |
653 | OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { |
654 | __ block_comment("generate_handle_exception" ); |
655 | |
656 | // incoming parameters |
657 | const Register exception_oop = rax; |
658 | const Register exception_pc = rdx; |
659 | // other registers used in this stub |
660 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
661 | |
662 | // Save registers, if required. |
663 | OopMapSet* oop_maps = new OopMapSet(); |
664 | OopMap* oop_map = NULL; |
665 | switch (id) { |
666 | case forward_exception_id: |
667 | // We're handling an exception in the context of a compiled frame. |
668 | // The registers have been saved in the standard places. Perform |
669 | // an exception lookup in the caller and dispatch to the handler |
670 | // if found. Otherwise unwind and dispatch to the callers |
671 | // exception handler. |
672 | oop_map = generate_oop_map(sasm, 1 /*thread*/); |
673 | |
674 | // load and clear pending exception oop into RAX |
675 | __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); |
676 | __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); |
677 | |
678 | // load issuing PC (the return address for this stub) into rdx |
679 | __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); |
680 | |
681 | // make sure that the vm_results are cleared (may be unnecessary) |
682 | __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); |
683 | __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); |
684 | break; |
685 | case handle_exception_nofpu_id: |
686 | case handle_exception_id: |
687 | // At this point all registers MAY be live. |
688 | oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id); |
689 | break; |
690 | case handle_exception_from_callee_id: { |
691 | // At this point all registers except exception oop (RAX) and |
692 | // exception pc (RDX) are dead. |
693 | const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); |
694 | oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); |
695 | sasm->set_frame_size(frame_size); |
696 | WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); |
697 | break; |
698 | } |
699 | default: ShouldNotReachHere(); |
700 | } |
701 | |
702 | #ifdef TIERED |
703 | // C2 can leave the fpu stack dirty |
704 | if (UseSSE < 2) { |
705 | __ empty_FPU_stack(); |
706 | } |
707 | #endif // TIERED |
708 | |
709 | // verify that only rax, and rdx is valid at this time |
710 | __ invalidate_registers(false, true, true, false, true, true); |
711 | // verify that rax, contains a valid exception |
712 | __ verify_not_null_oop(exception_oop); |
713 | |
714 | // load address of JavaThread object for thread-local data |
715 | NOT_LP64(__ get_thread(thread);) |
716 | |
717 | #ifdef ASSERT |
718 | // check that fields in JavaThread for exception oop and issuing pc are |
719 | // empty before writing to them |
720 | Label oop_empty; |
721 | __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD); |
722 | __ jcc(Assembler::equal, oop_empty); |
723 | __ stop("exception oop already set" ); |
724 | __ bind(oop_empty); |
725 | |
726 | Label pc_empty; |
727 | __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); |
728 | __ jcc(Assembler::equal, pc_empty); |
729 | __ stop("exception pc already set" ); |
730 | __ bind(pc_empty); |
731 | #endif |
732 | |
733 | // save exception oop and issuing pc into JavaThread |
734 | // (exception handler will load it from here) |
735 | __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); |
736 | __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); |
737 | |
738 | // patch throwing pc into return address (has bci & oop map) |
739 | __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); |
740 | |
741 | // compute the exception handler. |
742 | // the exception oop and the throwing pc are read from the fields in JavaThread |
743 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); |
744 | oop_maps->add_gc_map(call_offset, oop_map); |
745 | |
746 | // rax: handler address |
747 | // will be the deopt blob if nmethod was deoptimized while we looked up |
748 | // handler regardless of whether handler existed in the nmethod. |
749 | |
750 | // only rax, is valid at this time, all other registers have been destroyed by the runtime call |
751 | __ invalidate_registers(false, true, true, true, true, true); |
752 | |
753 | // patch the return address, this stub will directly return to the exception handler |
754 | __ movptr(Address(rbp, 1*BytesPerWord), rax); |
755 | |
756 | switch (id) { |
757 | case forward_exception_id: |
758 | case handle_exception_nofpu_id: |
759 | case handle_exception_id: |
760 | // Restore the registers that were saved at the beginning. |
761 | restore_live_registers(sasm, id != handle_exception_nofpu_id); |
762 | break; |
763 | case handle_exception_from_callee_id: |
764 | // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP |
765 | // since we do a leave anyway. |
766 | |
767 | // Pop the return address. |
768 | __ leave(); |
769 | __ pop(rcx); |
770 | __ jmp(rcx); // jump to exception handler |
771 | break; |
772 | default: ShouldNotReachHere(); |
773 | } |
774 | |
775 | return oop_maps; |
776 | } |
777 | |
778 | |
779 | void Runtime1::generate_unwind_exception(StubAssembler *sasm) { |
780 | // incoming parameters |
781 | const Register exception_oop = rax; |
782 | // callee-saved copy of exception_oop during runtime call |
783 | const Register exception_oop_callee_saved = NOT_LP64(rsi) LP64_ONLY(r14); |
784 | // other registers used in this stub |
785 | const Register exception_pc = rdx; |
786 | const Register handler_addr = rbx; |
787 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
788 | |
789 | // verify that only rax, is valid at this time |
790 | __ invalidate_registers(false, true, true, true, true, true); |
791 | |
792 | #ifdef ASSERT |
793 | // check that fields in JavaThread for exception oop and issuing pc are empty |
794 | NOT_LP64(__ get_thread(thread);) |
795 | Label oop_empty; |
796 | __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0); |
797 | __ jcc(Assembler::equal, oop_empty); |
798 | __ stop("exception oop must be empty" ); |
799 | __ bind(oop_empty); |
800 | |
801 | Label pc_empty; |
802 | __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0); |
803 | __ jcc(Assembler::equal, pc_empty); |
804 | __ stop("exception pc must be empty" ); |
805 | __ bind(pc_empty); |
806 | #endif |
807 | |
808 | // clear the FPU stack in case any FPU results are left behind |
809 | __ empty_FPU_stack(); |
810 | |
811 | // save exception_oop in callee-saved register to preserve it during runtime calls |
812 | __ verify_not_null_oop(exception_oop); |
813 | __ movptr(exception_oop_callee_saved, exception_oop); |
814 | |
815 | NOT_LP64(__ get_thread(thread);) |
816 | // Get return address (is on top of stack after leave). |
817 | __ movptr(exception_pc, Address(rsp, 0)); |
818 | |
819 | // search the exception handler address of the caller (using the return address) |
820 | __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), thread, exception_pc); |
821 | // rax: exception handler address of the caller |
822 | |
823 | // Only RAX and RSI are valid at this time, all other registers have been destroyed by the call. |
824 | __ invalidate_registers(false, true, true, true, false, true); |
825 | |
826 | // move result of call into correct register |
827 | __ movptr(handler_addr, rax); |
828 | |
829 | // Restore exception oop to RAX (required convention of exception handler). |
830 | __ movptr(exception_oop, exception_oop_callee_saved); |
831 | |
832 | // verify that there is really a valid exception in rax |
833 | __ verify_not_null_oop(exception_oop); |
834 | |
835 | // get throwing pc (= return address). |
836 | // rdx has been destroyed by the call, so it must be set again |
837 | // the pop is also necessary to simulate the effect of a ret(0) |
838 | __ pop(exception_pc); |
839 | |
840 | // continue at exception handler (return address removed) |
841 | // note: do *not* remove arguments when unwinding the |
842 | // activation since the caller assumes having |
843 | // all arguments on the stack when entering the |
844 | // runtime to determine the exception handler |
845 | // (GC happens at call site with arguments!) |
846 | // rax: exception oop |
847 | // rdx: throwing pc |
848 | // rbx: exception handler |
849 | __ jmp(handler_addr); |
850 | } |
851 | |
852 | |
853 | OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) { |
854 | // use the maximum number of runtime-arguments here because it is difficult to |
855 | // distinguish each RT-Call. |
856 | // Note: This number affects also the RT-Call in generate_handle_exception because |
857 | // the oop-map is shared for all calls. |
858 | const int num_rt_args = 2; // thread + dummy |
859 | |
860 | DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); |
861 | assert(deopt_blob != NULL, "deoptimization blob must have been created" ); |
862 | |
863 | OopMap* oop_map = save_live_registers(sasm, num_rt_args); |
864 | |
865 | #ifdef _LP64 |
866 | const Register thread = r15_thread; |
867 | // No need to worry about dummy |
868 | __ mov(c_rarg0, thread); |
869 | #else |
870 | __ push(rax); // push dummy |
871 | |
872 | const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions) |
873 | // push java thread (becomes first argument of C function) |
874 | __ get_thread(thread); |
875 | __ push(thread); |
876 | #endif // _LP64 |
877 | __ set_last_Java_frame(thread, noreg, rbp, NULL); |
878 | // do the call |
879 | __ call(RuntimeAddress(target)); |
880 | OopMapSet* oop_maps = new OopMapSet(); |
881 | oop_maps->add_gc_map(__ offset(), oop_map); |
882 | // verify callee-saved register |
883 | #ifdef ASSERT |
884 | guarantee(thread != rax, "change this code" ); |
885 | __ push(rax); |
886 | { Label L; |
887 | __ get_thread(rax); |
888 | __ cmpptr(thread, rax); |
889 | __ jcc(Assembler::equal, L); |
890 | __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?" ); |
891 | __ bind(L); |
892 | } |
893 | __ pop(rax); |
894 | #endif |
895 | __ reset_last_Java_frame(thread, true); |
896 | #ifndef _LP64 |
897 | __ pop(rcx); // discard thread arg |
898 | __ pop(rcx); // discard dummy |
899 | #endif // _LP64 |
900 | |
901 | // check for pending exceptions |
902 | { Label L; |
903 | __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); |
904 | __ jcc(Assembler::equal, L); |
905 | // exception pending => remove activation and forward to exception handler |
906 | |
907 | __ testptr(rax, rax); // have we deoptimized? |
908 | __ jump_cc(Assembler::equal, |
909 | RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id))); |
910 | |
911 | // the deopt blob expects exceptions in the special fields of |
912 | // JavaThread, so copy and clear pending exception. |
913 | |
914 | // load and clear pending exception |
915 | __ movptr(rax, Address(thread, Thread::pending_exception_offset())); |
916 | __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); |
917 | |
918 | // check that there is really a valid exception |
919 | __ verify_not_null_oop(rax); |
920 | |
921 | // load throwing pc: this is the return address of the stub |
922 | __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size)); |
923 | |
924 | #ifdef ASSERT |
925 | // check that fields in JavaThread for exception oop and issuing pc are empty |
926 | Label oop_empty; |
927 | __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD); |
928 | __ jcc(Assembler::equal, oop_empty); |
929 | __ stop("exception oop must be empty" ); |
930 | __ bind(oop_empty); |
931 | |
932 | Label pc_empty; |
933 | __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD); |
934 | __ jcc(Assembler::equal, pc_empty); |
935 | __ stop("exception pc must be empty" ); |
936 | __ bind(pc_empty); |
937 | #endif |
938 | |
939 | // store exception oop and throwing pc to JavaThread |
940 | __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax); |
941 | __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx); |
942 | |
943 | restore_live_registers(sasm); |
944 | |
945 | __ leave(); |
946 | __ addptr(rsp, BytesPerWord); // remove return address from stack |
947 | |
948 | // Forward the exception directly to deopt blob. We can blow no |
949 | // registers and must leave throwing pc on the stack. A patch may |
950 | // have values live in registers so the entry point with the |
951 | // exception in tls. |
952 | __ jump(RuntimeAddress(deopt_blob->unpack_with_exception_in_tls())); |
953 | |
954 | __ bind(L); |
955 | } |
956 | |
957 | |
958 | // Runtime will return true if the nmethod has been deoptimized during |
959 | // the patching process. In that case we must do a deopt reexecute instead. |
960 | |
961 | Label cont; |
962 | |
963 | __ testptr(rax, rax); // have we deoptimized? |
964 | __ jcc(Assembler::equal, cont); // no |
965 | |
966 | // Will reexecute. Proper return address is already on the stack we just restore |
967 | // registers, pop all of our frame but the return address and jump to the deopt blob |
968 | restore_live_registers(sasm); |
969 | __ leave(); |
970 | __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); |
971 | |
972 | __ bind(cont); |
973 | restore_live_registers(sasm); |
974 | __ leave(); |
975 | __ ret(0); |
976 | |
977 | return oop_maps; |
978 | } |
979 | |
980 | |
981 | OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) { |
982 | |
983 | // for better readability |
984 | const bool must_gc_arguments = true; |
985 | const bool dont_gc_arguments = false; |
986 | |
987 | // default value; overwritten for some optimized stubs that are called from methods that do not use the fpu |
988 | bool save_fpu_registers = true; |
989 | |
990 | // stub code & info for the different stubs |
991 | OopMapSet* oop_maps = NULL; |
992 | switch (id) { |
993 | case forward_exception_id: |
994 | { |
995 | oop_maps = generate_handle_exception(id, sasm); |
996 | __ leave(); |
997 | __ ret(0); |
998 | } |
999 | break; |
1000 | |
1001 | case new_instance_id: |
1002 | case fast_new_instance_id: |
1003 | case fast_new_instance_init_check_id: |
1004 | { |
1005 | Register klass = rdx; // Incoming |
1006 | Register obj = rax; // Result |
1007 | |
1008 | if (id == new_instance_id) { |
1009 | __ set_info("new_instance" , dont_gc_arguments); |
1010 | } else if (id == fast_new_instance_id) { |
1011 | __ set_info("fast new_instance" , dont_gc_arguments); |
1012 | } else { |
1013 | assert(id == fast_new_instance_init_check_id, "bad StubID" ); |
1014 | __ set_info("fast new_instance init check" , dont_gc_arguments); |
1015 | } |
1016 | |
1017 | // If TLAB is disabled, see if there is support for inlining contiguous |
1018 | // allocations. |
1019 | // Otherwise, just go to the slow path. |
1020 | if ((id == fast_new_instance_id || id == fast_new_instance_init_check_id) && !UseTLAB |
1021 | && Universe::heap()->supports_inline_contig_alloc()) { |
1022 | Label slow_path; |
1023 | Register obj_size = rcx; |
1024 | Register t1 = rbx; |
1025 | Register t2 = rsi; |
1026 | assert_different_registers(klass, obj, obj_size, t1, t2); |
1027 | |
1028 | __ push(rdi); |
1029 | __ push(rbx); |
1030 | |
1031 | if (id == fast_new_instance_init_check_id) { |
1032 | // make sure the klass is initialized |
1033 | __ cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); |
1034 | __ jcc(Assembler::notEqual, slow_path); |
1035 | } |
1036 | |
1037 | #ifdef ASSERT |
1038 | // assert object can be fast path allocated |
1039 | { |
1040 | Label ok, not_ok; |
1041 | __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); |
1042 | __ cmpl(obj_size, 0); // make sure it's an instance (LH > 0) |
1043 | __ jcc(Assembler::lessEqual, not_ok); |
1044 | __ testl(obj_size, Klass::_lh_instance_slow_path_bit); |
1045 | __ jcc(Assembler::zero, ok); |
1046 | __ bind(not_ok); |
1047 | __ stop("assert(can be fast path allocated)" ); |
1048 | __ should_not_reach_here(); |
1049 | __ bind(ok); |
1050 | } |
1051 | #endif // ASSERT |
1052 | |
1053 | const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); |
1054 | NOT_LP64(__ get_thread(thread)); |
1055 | |
1056 | // get the instance size (size is postive so movl is fine for 64bit) |
1057 | __ movl(obj_size, Address(klass, Klass::layout_helper_offset())); |
1058 | |
1059 | __ eden_allocate(thread, obj, obj_size, 0, t1, slow_path); |
1060 | |
1061 | __ initialize_object(obj, klass, obj_size, 0, t1, t2, /* is_tlab_allocated */ false); |
1062 | __ verify_oop(obj); |
1063 | __ pop(rbx); |
1064 | __ pop(rdi); |
1065 | __ ret(0); |
1066 | |
1067 | __ bind(slow_path); |
1068 | __ pop(rbx); |
1069 | __ pop(rdi); |
1070 | } |
1071 | |
1072 | __ enter(); |
1073 | OopMap* map = save_live_registers(sasm, 2); |
1074 | int call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_instance), klass); |
1075 | oop_maps = new OopMapSet(); |
1076 | oop_maps->add_gc_map(call_offset, map); |
1077 | restore_live_registers_except_rax(sasm); |
1078 | __ verify_oop(obj); |
1079 | __ leave(); |
1080 | __ ret(0); |
1081 | |
1082 | // rax,: new instance |
1083 | } |
1084 | |
1085 | break; |
1086 | |
1087 | case counter_overflow_id: |
1088 | { |
1089 | Register bci = rax, method = rbx; |
1090 | __ enter(); |
1091 | OopMap* map = save_live_registers(sasm, 3); |
1092 | // Retrieve bci |
1093 | __ movl(bci, Address(rbp, 2*BytesPerWord)); |
1094 | // And a pointer to the Method* |
1095 | __ movptr(method, Address(rbp, 3*BytesPerWord)); |
1096 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method); |
1097 | oop_maps = new OopMapSet(); |
1098 | oop_maps->add_gc_map(call_offset, map); |
1099 | restore_live_registers(sasm); |
1100 | __ leave(); |
1101 | __ ret(0); |
1102 | } |
1103 | break; |
1104 | |
1105 | case new_type_array_id: |
1106 | case new_object_array_id: |
1107 | { |
1108 | Register length = rbx; // Incoming |
1109 | Register klass = rdx; // Incoming |
1110 | Register obj = rax; // Result |
1111 | |
1112 | if (id == new_type_array_id) { |
1113 | __ set_info("new_type_array" , dont_gc_arguments); |
1114 | } else { |
1115 | __ set_info("new_object_array" , dont_gc_arguments); |
1116 | } |
1117 | |
1118 | #ifdef ASSERT |
1119 | // assert object type is really an array of the proper kind |
1120 | { |
1121 | Label ok; |
1122 | Register t0 = obj; |
1123 | __ movl(t0, Address(klass, Klass::layout_helper_offset())); |
1124 | __ sarl(t0, Klass::_lh_array_tag_shift); |
1125 | int tag = ((id == new_type_array_id) |
1126 | ? Klass::_lh_array_tag_type_value |
1127 | : Klass::_lh_array_tag_obj_value); |
1128 | __ cmpl(t0, tag); |
1129 | __ jcc(Assembler::equal, ok); |
1130 | __ stop("assert(is an array klass)" ); |
1131 | __ should_not_reach_here(); |
1132 | __ bind(ok); |
1133 | } |
1134 | #endif // ASSERT |
1135 | |
1136 | // If TLAB is disabled, see if there is support for inlining contiguous |
1137 | // allocations. |
1138 | // Otherwise, just go to the slow path. |
1139 | if (!UseTLAB && Universe::heap()->supports_inline_contig_alloc()) { |
1140 | Register arr_size = rsi; |
1141 | Register t1 = rcx; // must be rcx for use as shift count |
1142 | Register t2 = rdi; |
1143 | Label slow_path; |
1144 | |
1145 | // get the allocation size: round_up(hdr + length << (layout_helper & 0x1F)) |
1146 | // since size is positive movl does right thing on 64bit |
1147 | __ movl(t1, Address(klass, Klass::layout_helper_offset())); |
1148 | // since size is postive movl does right thing on 64bit |
1149 | __ movl(arr_size, length); |
1150 | assert(t1 == rcx, "fixed register usage" ); |
1151 | __ shlptr(arr_size /* by t1=rcx, mod 32 */); |
1152 | __ shrptr(t1, Klass::_lh_header_size_shift); |
1153 | __ andptr(t1, Klass::_lh_header_size_mask); |
1154 | __ addptr(arr_size, t1); |
1155 | __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up |
1156 | __ andptr(arr_size, ~MinObjAlignmentInBytesMask); |
1157 | |
1158 | // Using t2 for non 64-bit. |
1159 | const Register thread = NOT_LP64(t2) LP64_ONLY(r15_thread); |
1160 | NOT_LP64(__ get_thread(thread)); |
1161 | __ eden_allocate(thread, obj, arr_size, 0, t1, slow_path); // preserves arr_size |
1162 | |
1163 | __ initialize_header(obj, klass, length, t1, t2); |
1164 | __ movb(t1, Address(klass, in_bytes(Klass::layout_helper_offset()) + (Klass::_lh_header_size_shift / BitsPerByte))); |
1165 | assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise" ); |
1166 | assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise" ); |
1167 | __ andptr(t1, Klass::_lh_header_size_mask); |
1168 | __ subptr(arr_size, t1); // body length |
1169 | __ addptr(t1, obj); // body start |
1170 | __ initialize_body(t1, arr_size, 0, t2); |
1171 | __ verify_oop(obj); |
1172 | __ ret(0); |
1173 | |
1174 | __ bind(slow_path); |
1175 | } |
1176 | |
1177 | __ enter(); |
1178 | OopMap* map = save_live_registers(sasm, 3); |
1179 | int call_offset; |
1180 | if (id == new_type_array_id) { |
1181 | call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_type_array), klass, length); |
1182 | } else { |
1183 | call_offset = __ call_RT(obj, noreg, CAST_FROM_FN_PTR(address, new_object_array), klass, length); |
1184 | } |
1185 | |
1186 | oop_maps = new OopMapSet(); |
1187 | oop_maps->add_gc_map(call_offset, map); |
1188 | restore_live_registers_except_rax(sasm); |
1189 | |
1190 | __ verify_oop(obj); |
1191 | __ leave(); |
1192 | __ ret(0); |
1193 | |
1194 | // rax,: new array |
1195 | } |
1196 | break; |
1197 | |
1198 | case new_multi_array_id: |
1199 | { StubFrame f(sasm, "new_multi_array" , dont_gc_arguments); |
1200 | // rax,: klass |
1201 | // rbx,: rank |
1202 | // rcx: address of 1st dimension |
1203 | OopMap* map = save_live_registers(sasm, 4); |
1204 | int call_offset = __ call_RT(rax, noreg, CAST_FROM_FN_PTR(address, new_multi_array), rax, rbx, rcx); |
1205 | |
1206 | oop_maps = new OopMapSet(); |
1207 | oop_maps->add_gc_map(call_offset, map); |
1208 | restore_live_registers_except_rax(sasm); |
1209 | |
1210 | // rax,: new multi array |
1211 | __ verify_oop(rax); |
1212 | } |
1213 | break; |
1214 | |
1215 | case register_finalizer_id: |
1216 | { |
1217 | __ set_info("register_finalizer" , dont_gc_arguments); |
1218 | |
1219 | // This is called via call_runtime so the arguments |
1220 | // will be place in C abi locations |
1221 | |
1222 | #ifdef _LP64 |
1223 | __ verify_oop(c_rarg0); |
1224 | __ mov(rax, c_rarg0); |
1225 | #else |
1226 | // The object is passed on the stack and we haven't pushed a |
1227 | // frame yet so it's one work away from top of stack. |
1228 | __ movptr(rax, Address(rsp, 1 * BytesPerWord)); |
1229 | __ verify_oop(rax); |
1230 | #endif // _LP64 |
1231 | |
1232 | // load the klass and check the has finalizer flag |
1233 | Label register_finalizer; |
1234 | Register t = rsi; |
1235 | __ load_klass(t, rax); |
1236 | __ movl(t, Address(t, Klass::access_flags_offset())); |
1237 | __ testl(t, JVM_ACC_HAS_FINALIZER); |
1238 | __ jcc(Assembler::notZero, register_finalizer); |
1239 | __ ret(0); |
1240 | |
1241 | __ bind(register_finalizer); |
1242 | __ enter(); |
1243 | OopMap* oop_map = save_live_registers(sasm, 2 /*num_rt_args */); |
1244 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, SharedRuntime::register_finalizer), rax); |
1245 | oop_maps = new OopMapSet(); |
1246 | oop_maps->add_gc_map(call_offset, oop_map); |
1247 | |
1248 | // Now restore all the live registers |
1249 | restore_live_registers(sasm); |
1250 | |
1251 | __ leave(); |
1252 | __ ret(0); |
1253 | } |
1254 | break; |
1255 | |
1256 | case throw_range_check_failed_id: |
1257 | { StubFrame f(sasm, "range_check_failed" , dont_gc_arguments); |
1258 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_range_check_exception), true); |
1259 | } |
1260 | break; |
1261 | |
1262 | case throw_index_exception_id: |
1263 | { StubFrame f(sasm, "index_range_check_failed" , dont_gc_arguments); |
1264 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_index_exception), true); |
1265 | } |
1266 | break; |
1267 | |
1268 | case throw_div0_exception_id: |
1269 | { StubFrame f(sasm, "throw_div0_exception" , dont_gc_arguments); |
1270 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_div0_exception), false); |
1271 | } |
1272 | break; |
1273 | |
1274 | case throw_null_pointer_exception_id: |
1275 | { StubFrame f(sasm, "throw_null_pointer_exception" , dont_gc_arguments); |
1276 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_null_pointer_exception), false); |
1277 | } |
1278 | break; |
1279 | |
1280 | case handle_exception_nofpu_id: |
1281 | case handle_exception_id: |
1282 | { StubFrame f(sasm, "handle_exception" , dont_gc_arguments); |
1283 | oop_maps = generate_handle_exception(id, sasm); |
1284 | } |
1285 | break; |
1286 | |
1287 | case handle_exception_from_callee_id: |
1288 | { StubFrame f(sasm, "handle_exception_from_callee" , dont_gc_arguments); |
1289 | oop_maps = generate_handle_exception(id, sasm); |
1290 | } |
1291 | break; |
1292 | |
1293 | case unwind_exception_id: |
1294 | { __ set_info("unwind_exception" , dont_gc_arguments); |
1295 | // note: no stubframe since we are about to leave the current |
1296 | // activation and we are calling a leaf VM function only. |
1297 | generate_unwind_exception(sasm); |
1298 | } |
1299 | break; |
1300 | |
1301 | case throw_array_store_exception_id: |
1302 | { StubFrame f(sasm, "throw_array_store_exception" , dont_gc_arguments); |
1303 | // tos + 0: link |
1304 | // + 1: return address |
1305 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_array_store_exception), true); |
1306 | } |
1307 | break; |
1308 | |
1309 | case throw_class_cast_exception_id: |
1310 | { StubFrame f(sasm, "throw_class_cast_exception" , dont_gc_arguments); |
1311 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_class_cast_exception), true); |
1312 | } |
1313 | break; |
1314 | |
1315 | case throw_incompatible_class_change_error_id: |
1316 | { StubFrame f(sasm, "throw_incompatible_class_cast_exception" , dont_gc_arguments); |
1317 | oop_maps = generate_exception_throw(sasm, CAST_FROM_FN_PTR(address, throw_incompatible_class_change_error), false); |
1318 | } |
1319 | break; |
1320 | |
1321 | case slow_subtype_check_id: |
1322 | { |
1323 | // Typical calling sequence: |
1324 | // __ push(klass_RInfo); // object klass or other subclass |
1325 | // __ push(sup_k_RInfo); // array element klass or other superclass |
1326 | // __ call(slow_subtype_check); |
1327 | // Note that the subclass is pushed first, and is therefore deepest. |
1328 | // Previous versions of this code reversed the names 'sub' and 'super'. |
1329 | // This was operationally harmless but made the code unreadable. |
1330 | enum layout { |
1331 | rax_off, SLOT2(raxH_off) |
1332 | rcx_off, SLOT2(rcxH_off) |
1333 | rsi_off, SLOT2(rsiH_off) |
1334 | rdi_off, SLOT2(rdiH_off) |
1335 | // saved_rbp_off, SLOT2(saved_rbpH_off) |
1336 | return_off, SLOT2(returnH_off) |
1337 | sup_k_off, SLOT2(sup_kH_off) |
1338 | klass_off, SLOT2(superH_off) |
1339 | framesize, |
1340 | result_off = klass_off // deepest argument is also the return value |
1341 | }; |
1342 | |
1343 | __ set_info("slow_subtype_check" , dont_gc_arguments); |
1344 | __ push(rdi); |
1345 | __ push(rsi); |
1346 | __ push(rcx); |
1347 | __ push(rax); |
1348 | |
1349 | // This is called by pushing args and not with C abi |
1350 | __ movptr(rsi, Address(rsp, (klass_off) * VMRegImpl::stack_slot_size)); // subclass |
1351 | __ movptr(rax, Address(rsp, (sup_k_off) * VMRegImpl::stack_slot_size)); // superclass |
1352 | |
1353 | Label miss; |
1354 | __ check_klass_subtype_slow_path(rsi, rax, rcx, rdi, NULL, &miss); |
1355 | |
1356 | // fallthrough on success: |
1357 | __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), 1); // result |
1358 | __ pop(rax); |
1359 | __ pop(rcx); |
1360 | __ pop(rsi); |
1361 | __ pop(rdi); |
1362 | __ ret(0); |
1363 | |
1364 | __ bind(miss); |
1365 | __ movptr(Address(rsp, (result_off) * VMRegImpl::stack_slot_size), NULL_WORD); // result |
1366 | __ pop(rax); |
1367 | __ pop(rcx); |
1368 | __ pop(rsi); |
1369 | __ pop(rdi); |
1370 | __ ret(0); |
1371 | } |
1372 | break; |
1373 | |
1374 | case monitorenter_nofpu_id: |
1375 | save_fpu_registers = false; |
1376 | // fall through |
1377 | case monitorenter_id: |
1378 | { |
1379 | StubFrame f(sasm, "monitorenter" , dont_gc_arguments); |
1380 | OopMap* map = save_live_registers(sasm, 3, save_fpu_registers); |
1381 | |
1382 | // Called with store_parameter and not C abi |
1383 | |
1384 | f.load_argument(1, rax); // rax,: object |
1385 | f.load_argument(0, rbx); // rbx,: lock address |
1386 | |
1387 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorenter), rax, rbx); |
1388 | |
1389 | oop_maps = new OopMapSet(); |
1390 | oop_maps->add_gc_map(call_offset, map); |
1391 | restore_live_registers(sasm, save_fpu_registers); |
1392 | } |
1393 | break; |
1394 | |
1395 | case monitorexit_nofpu_id: |
1396 | save_fpu_registers = false; |
1397 | // fall through |
1398 | case monitorexit_id: |
1399 | { |
1400 | StubFrame f(sasm, "monitorexit" , dont_gc_arguments); |
1401 | OopMap* map = save_live_registers(sasm, 2, save_fpu_registers); |
1402 | |
1403 | // Called with store_parameter and not C abi |
1404 | |
1405 | f.load_argument(0, rax); // rax,: lock address |
1406 | |
1407 | // note: really a leaf routine but must setup last java sp |
1408 | // => use call_RT for now (speed can be improved by |
1409 | // doing last java sp setup manually) |
1410 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, monitorexit), rax); |
1411 | |
1412 | oop_maps = new OopMapSet(); |
1413 | oop_maps->add_gc_map(call_offset, map); |
1414 | restore_live_registers(sasm, save_fpu_registers); |
1415 | } |
1416 | break; |
1417 | |
1418 | case deoptimize_id: |
1419 | { |
1420 | StubFrame f(sasm, "deoptimize" , dont_gc_arguments); |
1421 | const int num_rt_args = 2; // thread, trap_request |
1422 | OopMap* oop_map = save_live_registers(sasm, num_rt_args); |
1423 | f.load_argument(0, rax); |
1424 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax); |
1425 | oop_maps = new OopMapSet(); |
1426 | oop_maps->add_gc_map(call_offset, oop_map); |
1427 | restore_live_registers(sasm); |
1428 | DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); |
1429 | assert(deopt_blob != NULL, "deoptimization blob must have been created" ); |
1430 | __ leave(); |
1431 | __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); |
1432 | } |
1433 | break; |
1434 | |
1435 | case access_field_patching_id: |
1436 | { StubFrame f(sasm, "access_field_patching" , dont_gc_arguments); |
1437 | // we should set up register map |
1438 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, access_field_patching)); |
1439 | } |
1440 | break; |
1441 | |
1442 | case load_klass_patching_id: |
1443 | { StubFrame f(sasm, "load_klass_patching" , dont_gc_arguments); |
1444 | // we should set up register map |
1445 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_klass_patching)); |
1446 | } |
1447 | break; |
1448 | |
1449 | case load_mirror_patching_id: |
1450 | { StubFrame f(sasm, "load_mirror_patching" , dont_gc_arguments); |
1451 | // we should set up register map |
1452 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_mirror_patching)); |
1453 | } |
1454 | break; |
1455 | |
1456 | case load_appendix_patching_id: |
1457 | { StubFrame f(sasm, "load_appendix_patching" , dont_gc_arguments); |
1458 | // we should set up register map |
1459 | oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching)); |
1460 | } |
1461 | break; |
1462 | |
1463 | case dtrace_object_alloc_id: |
1464 | { // rax,: object |
1465 | StubFrame f(sasm, "dtrace_object_alloc" , dont_gc_arguments); |
1466 | // we can't gc here so skip the oopmap but make sure that all |
1467 | // the live registers get saved. |
1468 | save_live_registers(sasm, 1); |
1469 | |
1470 | __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax)); |
1471 | __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc))); |
1472 | NOT_LP64(__ pop(rax)); |
1473 | |
1474 | restore_live_registers(sasm); |
1475 | } |
1476 | break; |
1477 | |
1478 | case fpu2long_stub_id: |
1479 | { |
1480 | // rax, and rdx are destroyed, but should be free since the result is returned there |
1481 | // preserve rsi,ecx |
1482 | __ push(rsi); |
1483 | __ push(rcx); |
1484 | LP64_ONLY(__ push(rdx);) |
1485 | |
1486 | // check for NaN |
1487 | Label return0, do_return, return_min_jlong, do_convert; |
1488 | |
1489 | Address value_high_word(rsp, wordSize + 4); |
1490 | Address value_low_word(rsp, wordSize); |
1491 | Address result_high_word(rsp, 3*wordSize + 4); |
1492 | Address result_low_word(rsp, 3*wordSize); |
1493 | |
1494 | __ subptr(rsp, 32); // more than enough on 32bit |
1495 | __ fst_d(value_low_word); |
1496 | __ movl(rax, value_high_word); |
1497 | __ andl(rax, 0x7ff00000); |
1498 | __ cmpl(rax, 0x7ff00000); |
1499 | __ jcc(Assembler::notEqual, do_convert); |
1500 | __ movl(rax, value_high_word); |
1501 | __ andl(rax, 0xfffff); |
1502 | __ orl(rax, value_low_word); |
1503 | __ jcc(Assembler::notZero, return0); |
1504 | |
1505 | __ bind(do_convert); |
1506 | __ fnstcw(Address(rsp, 0)); |
1507 | __ movzwl(rax, Address(rsp, 0)); |
1508 | __ orl(rax, 0xc00); |
1509 | __ movw(Address(rsp, 2), rax); |
1510 | __ fldcw(Address(rsp, 2)); |
1511 | __ fwait(); |
1512 | __ fistp_d(result_low_word); |
1513 | __ fldcw(Address(rsp, 0)); |
1514 | __ fwait(); |
1515 | // This gets the entire long in rax on 64bit |
1516 | __ movptr(rax, result_low_word); |
1517 | // testing of high bits |
1518 | __ movl(rdx, result_high_word); |
1519 | __ mov(rcx, rax); |
1520 | // What the heck is the point of the next instruction??? |
1521 | __ xorl(rcx, 0x0); |
1522 | __ movl(rsi, 0x80000000); |
1523 | __ xorl(rsi, rdx); |
1524 | __ orl(rcx, rsi); |
1525 | __ jcc(Assembler::notEqual, do_return); |
1526 | __ fldz(); |
1527 | __ fcomp_d(value_low_word); |
1528 | __ fnstsw_ax(); |
1529 | #ifdef _LP64 |
1530 | __ testl(rax, 0x4100); // ZF & CF == 0 |
1531 | __ jcc(Assembler::equal, return_min_jlong); |
1532 | #else |
1533 | __ sahf(); |
1534 | __ jcc(Assembler::above, return_min_jlong); |
1535 | #endif // _LP64 |
1536 | // return max_jlong |
1537 | #ifndef _LP64 |
1538 | __ movl(rdx, 0x7fffffff); |
1539 | __ movl(rax, 0xffffffff); |
1540 | #else |
1541 | __ mov64(rax, CONST64(0x7fffffffffffffff)); |
1542 | #endif // _LP64 |
1543 | __ jmp(do_return); |
1544 | |
1545 | __ bind(return_min_jlong); |
1546 | #ifndef _LP64 |
1547 | __ movl(rdx, 0x80000000); |
1548 | __ xorl(rax, rax); |
1549 | #else |
1550 | __ mov64(rax, UCONST64(0x8000000000000000)); |
1551 | #endif // _LP64 |
1552 | __ jmp(do_return); |
1553 | |
1554 | __ bind(return0); |
1555 | __ fpop(); |
1556 | #ifndef _LP64 |
1557 | __ xorptr(rdx,rdx); |
1558 | __ xorptr(rax,rax); |
1559 | #else |
1560 | __ xorptr(rax, rax); |
1561 | #endif // _LP64 |
1562 | |
1563 | __ bind(do_return); |
1564 | __ addptr(rsp, 32); |
1565 | LP64_ONLY(__ pop(rdx);) |
1566 | __ pop(rcx); |
1567 | __ pop(rsi); |
1568 | __ ret(0); |
1569 | } |
1570 | break; |
1571 | |
1572 | case predicate_failed_trap_id: |
1573 | { |
1574 | StubFrame f(sasm, "predicate_failed_trap" , dont_gc_arguments); |
1575 | |
1576 | OopMap* map = save_live_registers(sasm, 1); |
1577 | |
1578 | int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, predicate_failed_trap)); |
1579 | oop_maps = new OopMapSet(); |
1580 | oop_maps->add_gc_map(call_offset, map); |
1581 | restore_live_registers(sasm); |
1582 | __ leave(); |
1583 | DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob(); |
1584 | assert(deopt_blob != NULL, "deoptimization blob must have been created" ); |
1585 | |
1586 | __ jump(RuntimeAddress(deopt_blob->unpack_with_reexecution())); |
1587 | } |
1588 | break; |
1589 | |
1590 | default: |
1591 | { StubFrame f(sasm, "unimplemented entry" , dont_gc_arguments); |
1592 | __ movptr(rax, (int)id); |
1593 | __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax); |
1594 | __ should_not_reach_here(); |
1595 | } |
1596 | break; |
1597 | } |
1598 | return oop_maps; |
1599 | } |
1600 | |
1601 | #undef __ |
1602 | |
1603 | const char *Runtime1::pd_name_for_address(address entry) { |
1604 | return "<unknown function>" ; |
1605 | } |
1606 | |