1 | /* |
2 | * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "c1/c1_FrameMap.hpp" |
27 | #include "c1/c1_LIR.hpp" |
28 | #include "runtime/sharedRuntime.hpp" |
29 | #include "vmreg_x86.inline.hpp" |
30 | |
31 | const int FrameMap::pd_c_runtime_reserved_arg_size = 0; |
32 | |
33 | LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) { |
34 | LIR_Opr opr = LIR_OprFact::illegalOpr; |
35 | VMReg r_1 = reg->first(); |
36 | VMReg r_2 = reg->second(); |
37 | if (r_1->is_stack()) { |
38 | // Convert stack slot to an SP offset |
39 | // The calling convention does not count the SharedRuntime::out_preserve_stack_slots() value |
40 | // so we must add it in here. |
41 | int st_off = (r_1->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; |
42 | opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type)); |
43 | } else if (r_1->is_Register()) { |
44 | Register reg = r_1->as_Register(); |
45 | if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) { |
46 | Register reg2 = r_2->as_Register(); |
47 | #ifdef _LP64 |
48 | assert(reg2 == reg, "must be same register" ); |
49 | opr = as_long_opr(reg); |
50 | #else |
51 | opr = as_long_opr(reg2, reg); |
52 | #endif // _LP64 |
53 | } else if (type == T_OBJECT || type == T_ARRAY) { |
54 | opr = as_oop_opr(reg); |
55 | } else if (type == T_METADATA) { |
56 | opr = as_metadata_opr(reg); |
57 | } else { |
58 | opr = as_opr(reg); |
59 | } |
60 | } else if (r_1->is_FloatRegister()) { |
61 | assert(type == T_DOUBLE || type == T_FLOAT, "wrong type" ); |
62 | int num = r_1->as_FloatRegister()->encoding(); |
63 | if (type == T_FLOAT) { |
64 | opr = LIR_OprFact::single_fpu(num); |
65 | } else { |
66 | opr = LIR_OprFact::double_fpu(num); |
67 | } |
68 | } else if (r_1->is_XMMRegister()) { |
69 | assert(type == T_DOUBLE || type == T_FLOAT, "wrong type" ); |
70 | int num = r_1->as_XMMRegister()->encoding(); |
71 | if (type == T_FLOAT) { |
72 | opr = LIR_OprFact::single_xmm(num); |
73 | } else { |
74 | opr = LIR_OprFact::double_xmm(num); |
75 | } |
76 | } else { |
77 | ShouldNotReachHere(); |
78 | } |
79 | return opr; |
80 | } |
81 | |
82 | |
83 | LIR_Opr FrameMap::rsi_opr; |
84 | LIR_Opr FrameMap::rdi_opr; |
85 | LIR_Opr FrameMap::rbx_opr; |
86 | LIR_Opr FrameMap::rax_opr; |
87 | LIR_Opr FrameMap::rdx_opr; |
88 | LIR_Opr FrameMap::rcx_opr; |
89 | LIR_Opr FrameMap::rsp_opr; |
90 | LIR_Opr FrameMap::rbp_opr; |
91 | |
92 | LIR_Opr FrameMap::receiver_opr; |
93 | |
94 | LIR_Opr FrameMap::rsi_oop_opr; |
95 | LIR_Opr FrameMap::rdi_oop_opr; |
96 | LIR_Opr FrameMap::rbx_oop_opr; |
97 | LIR_Opr FrameMap::rax_oop_opr; |
98 | LIR_Opr FrameMap::rdx_oop_opr; |
99 | LIR_Opr FrameMap::rcx_oop_opr; |
100 | |
101 | LIR_Opr FrameMap::rsi_metadata_opr; |
102 | LIR_Opr FrameMap::rdi_metadata_opr; |
103 | LIR_Opr FrameMap::rbx_metadata_opr; |
104 | LIR_Opr FrameMap::rax_metadata_opr; |
105 | LIR_Opr FrameMap::rdx_metadata_opr; |
106 | LIR_Opr FrameMap::rcx_metadata_opr; |
107 | |
108 | LIR_Opr FrameMap::long0_opr; |
109 | LIR_Opr FrameMap::long1_opr; |
110 | LIR_Opr FrameMap::fpu0_float_opr; |
111 | LIR_Opr FrameMap::fpu0_double_opr; |
112 | LIR_Opr FrameMap::xmm0_float_opr; |
113 | LIR_Opr FrameMap::xmm0_double_opr; |
114 | |
115 | #ifdef _LP64 |
116 | |
117 | LIR_Opr FrameMap::r8_opr; |
118 | LIR_Opr FrameMap::r9_opr; |
119 | LIR_Opr FrameMap::r10_opr; |
120 | LIR_Opr FrameMap::r11_opr; |
121 | LIR_Opr FrameMap::r12_opr; |
122 | LIR_Opr FrameMap::r13_opr; |
123 | LIR_Opr FrameMap::r14_opr; |
124 | LIR_Opr FrameMap::r15_opr; |
125 | |
126 | // r10 and r15 can never contain oops since they aren't available to |
127 | // the allocator |
128 | LIR_Opr FrameMap::r8_oop_opr; |
129 | LIR_Opr FrameMap::r9_oop_opr; |
130 | LIR_Opr FrameMap::r11_oop_opr; |
131 | LIR_Opr FrameMap::r12_oop_opr; |
132 | LIR_Opr FrameMap::r13_oop_opr; |
133 | LIR_Opr FrameMap::r14_oop_opr; |
134 | |
135 | LIR_Opr FrameMap::r8_metadata_opr; |
136 | LIR_Opr FrameMap::r9_metadata_opr; |
137 | LIR_Opr FrameMap::r11_metadata_opr; |
138 | LIR_Opr FrameMap::r12_metadata_opr; |
139 | LIR_Opr FrameMap::r13_metadata_opr; |
140 | LIR_Opr FrameMap::r14_metadata_opr; |
141 | #endif // _LP64 |
142 | |
143 | LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, }; |
144 | LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, }; |
145 | LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, }; |
146 | |
147 | XMMRegister FrameMap::_xmm_regs [] = { 0, }; |
148 | |
149 | XMMRegister FrameMap::nr2xmmreg(int rnr) { |
150 | assert(_init_done, "tables not initialized" ); |
151 | return _xmm_regs[rnr]; |
152 | } |
153 | |
154 | //-------------------------------------------------------- |
155 | // FrameMap |
156 | //-------------------------------------------------------- |
157 | |
158 | void FrameMap::initialize() { |
159 | assert(!_init_done, "once" ); |
160 | |
161 | assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers" ); |
162 | map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0); |
163 | map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1); |
164 | map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2); |
165 | map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3); |
166 | map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4); |
167 | map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5); |
168 | |
169 | #ifndef _LP64 |
170 | // The unallocatable registers are at the end |
171 | map_register(6, rsp); |
172 | map_register(7, rbp); |
173 | #else |
174 | map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6); |
175 | map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7); |
176 | map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8); |
177 | map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9); |
178 | map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10); |
179 | // r12 is allocated conditionally. With compressed oops it holds |
180 | // the heapbase value and is not visible to the allocator. |
181 | map_register(11, r12); r12_opr = LIR_OprFact::single_cpu(11); |
182 | // The unallocatable registers are at the end |
183 | map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12); |
184 | map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13); |
185 | map_register(14, rsp); |
186 | map_register(15, rbp); |
187 | #endif // _LP64 |
188 | |
189 | #ifdef _LP64 |
190 | long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 3 /*eax*/); |
191 | long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 2 /*ebx*/); |
192 | #else |
193 | long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/); |
194 | long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/); |
195 | #endif // _LP64 |
196 | fpu0_float_opr = LIR_OprFact::single_fpu(0); |
197 | fpu0_double_opr = LIR_OprFact::double_fpu(0); |
198 | xmm0_float_opr = LIR_OprFact::single_xmm(0); |
199 | xmm0_double_opr = LIR_OprFact::double_xmm(0); |
200 | |
201 | _caller_save_cpu_regs[0] = rsi_opr; |
202 | _caller_save_cpu_regs[1] = rdi_opr; |
203 | _caller_save_cpu_regs[2] = rbx_opr; |
204 | _caller_save_cpu_regs[3] = rax_opr; |
205 | _caller_save_cpu_regs[4] = rdx_opr; |
206 | _caller_save_cpu_regs[5] = rcx_opr; |
207 | |
208 | #ifdef _LP64 |
209 | _caller_save_cpu_regs[6] = r8_opr; |
210 | _caller_save_cpu_regs[7] = r9_opr; |
211 | _caller_save_cpu_regs[8] = r11_opr; |
212 | _caller_save_cpu_regs[9] = r13_opr; |
213 | _caller_save_cpu_regs[10] = r14_opr; |
214 | _caller_save_cpu_regs[11] = r12_opr; |
215 | #endif // _LP64 |
216 | |
217 | |
218 | _xmm_regs[0] = xmm0; |
219 | _xmm_regs[1] = xmm1; |
220 | _xmm_regs[2] = xmm2; |
221 | _xmm_regs[3] = xmm3; |
222 | _xmm_regs[4] = xmm4; |
223 | _xmm_regs[5] = xmm5; |
224 | _xmm_regs[6] = xmm6; |
225 | _xmm_regs[7] = xmm7; |
226 | |
227 | #ifdef _LP64 |
228 | _xmm_regs[8] = xmm8; |
229 | _xmm_regs[9] = xmm9; |
230 | _xmm_regs[10] = xmm10; |
231 | _xmm_regs[11] = xmm11; |
232 | _xmm_regs[12] = xmm12; |
233 | _xmm_regs[13] = xmm13; |
234 | _xmm_regs[14] = xmm14; |
235 | _xmm_regs[15] = xmm15; |
236 | _xmm_regs[16] = xmm16; |
237 | _xmm_regs[17] = xmm17; |
238 | _xmm_regs[18] = xmm18; |
239 | _xmm_regs[19] = xmm19; |
240 | _xmm_regs[20] = xmm20; |
241 | _xmm_regs[21] = xmm21; |
242 | _xmm_regs[22] = xmm22; |
243 | _xmm_regs[23] = xmm23; |
244 | _xmm_regs[24] = xmm24; |
245 | _xmm_regs[25] = xmm25; |
246 | _xmm_regs[26] = xmm26; |
247 | _xmm_regs[27] = xmm27; |
248 | _xmm_regs[28] = xmm28; |
249 | _xmm_regs[29] = xmm29; |
250 | _xmm_regs[30] = xmm30; |
251 | _xmm_regs[31] = xmm31; |
252 | #endif // _LP64 |
253 | |
254 | for (int i = 0; i < 8; i++) { |
255 | _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); |
256 | } |
257 | |
258 | int num_caller_save_xmm_regs = get_num_caller_save_xmms(); |
259 | for (int i = 0; i < num_caller_save_xmm_regs; i++) { |
260 | _caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i); |
261 | } |
262 | |
263 | _init_done = true; |
264 | |
265 | rsi_oop_opr = as_oop_opr(rsi); |
266 | rdi_oop_opr = as_oop_opr(rdi); |
267 | rbx_oop_opr = as_oop_opr(rbx); |
268 | rax_oop_opr = as_oop_opr(rax); |
269 | rdx_oop_opr = as_oop_opr(rdx); |
270 | rcx_oop_opr = as_oop_opr(rcx); |
271 | |
272 | rsi_metadata_opr = as_metadata_opr(rsi); |
273 | rdi_metadata_opr = as_metadata_opr(rdi); |
274 | rbx_metadata_opr = as_metadata_opr(rbx); |
275 | rax_metadata_opr = as_metadata_opr(rax); |
276 | rdx_metadata_opr = as_metadata_opr(rdx); |
277 | rcx_metadata_opr = as_metadata_opr(rcx); |
278 | |
279 | rsp_opr = as_pointer_opr(rsp); |
280 | rbp_opr = as_pointer_opr(rbp); |
281 | |
282 | #ifdef _LP64 |
283 | r8_oop_opr = as_oop_opr(r8); |
284 | r9_oop_opr = as_oop_opr(r9); |
285 | r11_oop_opr = as_oop_opr(r11); |
286 | r12_oop_opr = as_oop_opr(r12); |
287 | r13_oop_opr = as_oop_opr(r13); |
288 | r14_oop_opr = as_oop_opr(r14); |
289 | |
290 | r8_metadata_opr = as_metadata_opr(r8); |
291 | r9_metadata_opr = as_metadata_opr(r9); |
292 | r11_metadata_opr = as_metadata_opr(r11); |
293 | r12_metadata_opr = as_metadata_opr(r12); |
294 | r13_metadata_opr = as_metadata_opr(r13); |
295 | r14_metadata_opr = as_metadata_opr(r14); |
296 | #endif // _LP64 |
297 | |
298 | VMRegPair regs; |
299 | BasicType sig_bt = T_OBJECT; |
300 | SharedRuntime::java_calling_convention(&sig_bt, ®s, 1, true); |
301 | receiver_opr = as_oop_opr(regs.first()->as_Register()); |
302 | |
303 | } |
304 | |
305 | |
306 | Address FrameMap::make_new_address(ByteSize sp_offset) const { |
307 | // for rbp, based address use this: |
308 | // return Address(rbp, in_bytes(sp_offset) - (framesize() - 2) * 4); |
309 | return Address(rsp, in_bytes(sp_offset)); |
310 | } |
311 | |
312 | |
313 | // ----------------mapping----------------------- |
314 | // all mapping is based on rbp, addressing, except for simple leaf methods where we access |
315 | // the locals rsp based (and no frame is built) |
316 | |
317 | |
318 | // Frame for simple leaf methods (quick entries) |
319 | // |
320 | // +----------+ |
321 | // | ret addr | <- TOS |
322 | // +----------+ |
323 | // | args | |
324 | // | ...... | |
325 | |
326 | // Frame for standard methods |
327 | // |
328 | // | .........| <- TOS |
329 | // | locals | |
330 | // +----------+ |
331 | // | old rbp, | <- EBP |
332 | // +----------+ |
333 | // | ret addr | |
334 | // +----------+ |
335 | // | args | |
336 | // | .........| |
337 | |
338 | |
339 | // For OopMaps, map a local variable or spill index to an VMRegImpl name. |
340 | // This is the offset from sp() in the frame of the slot for the index, |
341 | // skewed by VMRegImpl::stack0 to indicate a stack location (vs.a register.) |
342 | // |
343 | // framesize + |
344 | // stack0 stack0 0 <- VMReg |
345 | // | | <registers> | |
346 | // ...........|..............|.............| |
347 | // 0 1 2 3 x x 4 5 6 ... | <- local indices |
348 | // ^ ^ sp() ( x x indicate link |
349 | // | | and return addr) |
350 | // arguments non-argument locals |
351 | |
352 | |
353 | VMReg FrameMap::fpu_regname (int n) { |
354 | // Return the OptoReg name for the fpu stack slot "n" |
355 | // A spilled fpu stack slot comprises to two single-word OptoReg's. |
356 | return as_FloatRegister(n)->as_VMReg(); |
357 | } |
358 | |
359 | LIR_Opr FrameMap::stack_pointer() { |
360 | return FrameMap::rsp_opr; |
361 | } |
362 | |
363 | // JSR 292 |
364 | // On x86, there is no need to save the SP, because neither |
365 | // method handle intrinsics, nor compiled lambda forms modify it. |
366 | LIR_Opr FrameMap::method_handle_invoke_SP_save_opr() { |
367 | return LIR_OprFact::illegalOpr; |
368 | } |
369 | |
370 | bool FrameMap::validate_frame() { |
371 | return true; |
372 | } |
373 | |