1 | /* |
2 | * Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "memory/resourceArea.hpp" |
27 | #include "opto/addnode.hpp" |
28 | #include "opto/callnode.hpp" |
29 | #include "opto/cfgnode.hpp" |
30 | #include "opto/compile.hpp" |
31 | #include "opto/convertnode.hpp" |
32 | #include "opto/locknode.hpp" |
33 | #include "opto/memnode.hpp" |
34 | #include "opto/mulnode.hpp" |
35 | #include "opto/node.hpp" |
36 | #include "opto/parse.hpp" |
37 | #include "opto/phaseX.hpp" |
38 | #include "opto/rootnode.hpp" |
39 | #include "opto/runtime.hpp" |
40 | #include "opto/type.hpp" |
41 | |
42 | //--------------------gen_stub------------------------------- |
43 | void GraphKit::gen_stub(address C_function, |
44 | const char *name, |
45 | int is_fancy_jump, |
46 | bool pass_tls, |
47 | bool return_pc) { |
48 | ResourceMark rm; |
49 | |
50 | const TypeTuple *jdomain = C->tf()->domain(); |
51 | const TypeTuple *jrange = C->tf()->range(); |
52 | |
53 | // The procedure start |
54 | StartNode* start = new StartNode(root(), jdomain); |
55 | _gvn.set_type_bottom(start); |
56 | |
57 | // Make a map, with JVM state |
58 | uint parm_cnt = jdomain->cnt(); |
59 | uint max_map = MAX2(2*parm_cnt+1, jrange->cnt()); |
60 | // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces |
61 | assert(SynchronizationEntryBCI == InvocationEntryBci, "" ); |
62 | JVMState* jvms = new (C) JVMState(0); |
63 | jvms->set_bci(InvocationEntryBci); |
64 | jvms->set_monoff(max_map); |
65 | jvms->set_scloff(max_map); |
66 | jvms->set_endoff(max_map); |
67 | { |
68 | SafePointNode *map = new SafePointNode( max_map, jvms ); |
69 | jvms->set_map(map); |
70 | set_jvms(jvms); |
71 | assert(map == this->map(), "kit.map is set" ); |
72 | } |
73 | |
74 | // Make up the parameters |
75 | uint i; |
76 | for (i = 0; i < parm_cnt; i++) { |
77 | map()->init_req(i, _gvn.transform(new ParmNode(start, i))); |
78 | } |
79 | for ( ; i<map()->req(); i++) { |
80 | map()->init_req(i, top()); // For nicer debugging |
81 | } |
82 | |
83 | // GraphKit requires memory to be a MergeMemNode: |
84 | set_all_memory(map()->memory()); |
85 | |
86 | // Get base of thread-local storage area |
87 | Node* thread = _gvn.transform(new ThreadLocalNode()); |
88 | |
89 | const int NoAlias = Compile::AliasIdxBot; |
90 | |
91 | Node* adr_last_Java_pc = basic_plus_adr(top(), |
92 | thread, |
93 | in_bytes(JavaThread::frame_anchor_offset()) + |
94 | in_bytes(JavaFrameAnchor::last_Java_pc_offset())); |
95 | #if defined(SPARC) |
96 | Node* adr_flags = basic_plus_adr(top(), |
97 | thread, |
98 | in_bytes(JavaThread::frame_anchor_offset()) + |
99 | in_bytes(JavaFrameAnchor::flags_offset())); |
100 | #endif /* defined(SPARC) */ |
101 | |
102 | |
103 | // Drop in the last_Java_sp. last_Java_fp is not touched. |
104 | // Always do this after the other "last_Java_frame" fields are set since |
105 | // as soon as last_Java_sp != NULL the has_last_Java_frame is true and |
106 | // users will look at the other fields. |
107 | // |
108 | Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset())); |
109 | Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS); |
110 | store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias, MemNode::unordered); |
111 | |
112 | // Set _thread_in_native |
113 | // The order of stores into TLS is critical! Setting _thread_in_native MUST |
114 | // be last, because a GC is allowed at any time after setting it and the GC |
115 | // will require last_Java_pc and last_Java_sp. |
116 | |
117 | //----------------------------- |
118 | // Compute signature for C call. Varies from the Java signature! |
119 | |
120 | const Type **fields = TypeTuple::fields(2*parm_cnt+2); |
121 | uint cnt = TypeFunc::Parms; |
122 | // The C routines gets the base of thread-local storage passed in as an |
123 | // extra argument. Not all calls need it, but it is cheap to add here. |
124 | for (uint pcnt = cnt; pcnt < parm_cnt; pcnt++, cnt++) { |
125 | const Type *f = jdomain->field_at(pcnt); |
126 | if (CCallingConventionRequiresIntsAsLongs && f->isa_int()) { |
127 | fields[cnt++] = TypeLong::LONG; |
128 | fields[cnt] = Type::HALF; // Must add an additional half for a long. |
129 | } else { |
130 | fields[cnt] = f; |
131 | } |
132 | } |
133 | fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage |
134 | // Also pass in the caller's PC, if asked for. |
135 | if (return_pc) { |
136 | fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC |
137 | } |
138 | const TypeTuple* domain = TypeTuple::make(cnt, fields); |
139 | |
140 | // The C routine we are about to call cannot return an oop; it can block on |
141 | // exit and a GC will trash the oop while it sits in C-land. Instead, we |
142 | // return the oop through TLS for runtime calls. |
143 | // Also, C routines returning integer subword values leave the high |
144 | // order bits dirty; these must be cleaned up by explicit sign extension. |
145 | const Type* retval = (jrange->cnt() == TypeFunc::Parms) ? Type::TOP : jrange->field_at(TypeFunc::Parms); |
146 | // Make a private copy of jrange->fields(); |
147 | const Type **rfields = TypeTuple::fields(jrange->cnt() - TypeFunc::Parms); |
148 | // Fixup oop returns |
149 | int retval_ptr = retval->isa_oop_ptr(); |
150 | if (retval_ptr) { |
151 | assert( pass_tls, "Oop must be returned thru TLS" ); |
152 | // Fancy-jumps return address; others return void |
153 | rfields[TypeFunc::Parms] = is_fancy_jump ? TypeRawPtr::BOTTOM : Type::TOP; |
154 | |
155 | } else if (retval->isa_int()) { // Returning any integer subtype? |
156 | // "Fatten" byte, char & short return types to 'int' to show that |
157 | // the native C code can return values with junk high order bits. |
158 | // We'll sign-extend it below later. |
159 | rfields[TypeFunc::Parms] = TypeInt::INT; // It's "dirty" and needs sign-ext |
160 | |
161 | } else if (jrange->cnt() >= TypeFunc::Parms+1) { // Else copy other types |
162 | rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms); |
163 | if (jrange->cnt() == TypeFunc::Parms+2) { |
164 | rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1); |
165 | } |
166 | } |
167 | const TypeTuple* range = TypeTuple::make(jrange->cnt(), rfields); |
168 | |
169 | // Final C signature |
170 | const TypeFunc *c_sig = TypeFunc::make(domain, range); |
171 | |
172 | //----------------------------- |
173 | // Make the call node. |
174 | CallRuntimeNode *call = new CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM); |
175 | //----------------------------- |
176 | |
177 | // Fix-up the debug info for the call. |
178 | call->set_jvms(new (C) JVMState(0)); |
179 | call->jvms()->set_bci(0); |
180 | call->jvms()->set_offsets(cnt); |
181 | |
182 | // Set fixed predefined input arguments. |
183 | cnt = 0; |
184 | for (i = 0; i < TypeFunc::Parms; i++) { |
185 | call->init_req(cnt++, map()->in(i)); |
186 | } |
187 | // A little too aggressive on the parm copy; return address is not an input. |
188 | call->set_req(TypeFunc::ReturnAdr, top()); |
189 | for (; i < parm_cnt; i++) { // Regular input arguments. |
190 | const Type *f = jdomain->field_at(i); |
191 | if (CCallingConventionRequiresIntsAsLongs && f->isa_int()) { |
192 | call->init_req(cnt++, _gvn.transform(new ConvI2LNode(map()->in(i)))); |
193 | call->init_req(cnt++, top()); |
194 | } else { |
195 | call->init_req(cnt++, map()->in(i)); |
196 | } |
197 | } |
198 | call->init_req(cnt++, thread); |
199 | if (return_pc) { // Return PC, if asked for. |
200 | call->init_req(cnt++, returnadr()); |
201 | } |
202 | |
203 | _gvn.transform_no_reclaim(call); |
204 | |
205 | //----------------------------- |
206 | // Now set up the return results |
207 | set_control( _gvn.transform( new ProjNode(call,TypeFunc::Control)) ); |
208 | set_i_o( _gvn.transform( new ProjNode(call,TypeFunc::I_O )) ); |
209 | set_all_memory_call(call); |
210 | if (range->cnt() > TypeFunc::Parms) { |
211 | Node* retnode = _gvn.transform( new ProjNode(call,TypeFunc::Parms) ); |
212 | // C-land is allowed to return sub-word values. Convert to integer type. |
213 | assert( retval != Type::TOP, "" ); |
214 | if (retval == TypeInt::BOOL) { |
215 | retnode = _gvn.transform( new AndINode(retnode, intcon(0xFF)) ); |
216 | } else if (retval == TypeInt::CHAR) { |
217 | retnode = _gvn.transform( new AndINode(retnode, intcon(0xFFFF)) ); |
218 | } else if (retval == TypeInt::BYTE) { |
219 | retnode = _gvn.transform( new LShiftINode(retnode, intcon(24)) ); |
220 | retnode = _gvn.transform( new RShiftINode(retnode, intcon(24)) ); |
221 | } else if (retval == TypeInt::SHORT) { |
222 | retnode = _gvn.transform( new LShiftINode(retnode, intcon(16)) ); |
223 | retnode = _gvn.transform( new RShiftINode(retnode, intcon(16)) ); |
224 | } |
225 | map()->set_req( TypeFunc::Parms, retnode ); |
226 | } |
227 | |
228 | //----------------------------- |
229 | |
230 | // Clear last_Java_sp |
231 | store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias, MemNode::unordered); |
232 | // Clear last_Java_pc and (optionally)_flags |
233 | store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias, MemNode::unordered); |
234 | #if defined(SPARC) |
235 | store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias, MemNode::unordered); |
236 | #endif /* defined(SPARC) */ |
237 | #if (defined(IA64) && !defined(AIX)) |
238 | Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset())); |
239 | store_to_memory(NULL, adr_last_Java_fp, null(), T_ADDRESS, NoAlias, MemNode::unordered); |
240 | #endif |
241 | |
242 | // For is-fancy-jump, the C-return value is also the branch target |
243 | Node* target = map()->in(TypeFunc::Parms); |
244 | // Runtime call returning oop in TLS? Fetch it out |
245 | if( pass_tls ) { |
246 | Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset())); |
247 | Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered); |
248 | map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result |
249 | // clear thread-local-storage(tls) |
250 | store_to_memory(NULL, adr, null(), T_ADDRESS, NoAlias, MemNode::unordered); |
251 | } |
252 | |
253 | //----------------------------- |
254 | // check exception |
255 | Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset())); |
256 | Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, MemNode::unordered); |
257 | |
258 | Node* exit_memory = reset_memory(); |
259 | |
260 | Node* cmp = _gvn.transform( new CmpPNode(pending, null()) ); |
261 | Node* bo = _gvn.transform( new BoolNode(cmp, BoolTest::ne) ); |
262 | IfNode *iff = create_and_map_if(control(), bo, PROB_MIN, COUNT_UNKNOWN); |
263 | |
264 | Node* if_null = _gvn.transform( new IfFalseNode(iff) ); |
265 | Node* if_not_null = _gvn.transform( new IfTrueNode(iff) ); |
266 | |
267 | assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before" ); |
268 | Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() )); |
269 | Node *to_exc = new TailCallNode(if_not_null, |
270 | i_o(), |
271 | exit_memory, |
272 | frameptr(), |
273 | returnadr(), |
274 | exc_target, null()); |
275 | root()->add_req(_gvn.transform(to_exc)); // bind to root to keep live |
276 | C->init_start(start); |
277 | |
278 | //----------------------------- |
279 | // If this is a normal subroutine return, issue the return and be done. |
280 | Node *ret = NULL; |
281 | switch( is_fancy_jump ) { |
282 | case 0: // Make a return instruction |
283 | // Return to caller, free any space for return address |
284 | ret = new ReturnNode(TypeFunc::Parms, if_null, |
285 | i_o(), |
286 | exit_memory, |
287 | frameptr(), |
288 | returnadr()); |
289 | if (C->tf()->range()->cnt() > TypeFunc::Parms) |
290 | ret->add_req( map()->in(TypeFunc::Parms) ); |
291 | break; |
292 | case 1: // This is a fancy tail-call jump. Jump to computed address. |
293 | // Jump to new callee; leave old return address alone. |
294 | ret = new TailCallNode(if_null, |
295 | i_o(), |
296 | exit_memory, |
297 | frameptr(), |
298 | returnadr(), |
299 | target, map()->in(TypeFunc::Parms)); |
300 | break; |
301 | case 2: // Pop return address & jump |
302 | // Throw away old return address; jump to new computed address |
303 | //assert(C_function == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C), "fancy_jump==2 only for rethrow"); |
304 | ret = new TailJumpNode(if_null, |
305 | i_o(), |
306 | exit_memory, |
307 | frameptr(), |
308 | target, map()->in(TypeFunc::Parms)); |
309 | break; |
310 | default: |
311 | ShouldNotReachHere(); |
312 | } |
313 | root()->add_req(_gvn.transform(ret)); |
314 | } |
315 | |