1 | /* |
2 | * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_C1_C1_GRAPHBUILDER_HPP |
26 | #define SHARE_C1_C1_GRAPHBUILDER_HPP |
27 | |
28 | #include "c1/c1_IR.hpp" |
29 | #include "c1/c1_Instruction.hpp" |
30 | #include "c1/c1_ValueMap.hpp" |
31 | #include "c1/c1_ValueStack.hpp" |
32 | #include "ci/ciMethodData.hpp" |
33 | #include "ci/ciStreams.hpp" |
34 | #include "compiler/compileLog.hpp" |
35 | |
36 | class MemoryBuffer; |
37 | |
38 | class GraphBuilder { |
39 | private: |
40 | // Per-scope data. These are pushed and popped as we descend into |
41 | // inlined methods. Currently in order to generate good code in the |
42 | // inliner we have to attempt to inline methods directly into the |
43 | // basic block we are parsing; this adds complexity. |
44 | class ScopeData: public CompilationResourceObj { |
45 | private: |
46 | ScopeData* _parent; |
47 | // bci-to-block mapping |
48 | BlockList* _bci2block; |
49 | // Scope |
50 | IRScope* _scope; |
51 | // Whether this scope or any parent scope has exception handlers |
52 | bool _has_handler; |
53 | // The bytecodes |
54 | ciBytecodeStream* _stream; |
55 | |
56 | // Work list |
57 | BlockList* _work_list; |
58 | |
59 | // Maximum inline size for this scope |
60 | intx _max_inline_size; |
61 | // Expression stack depth at point where inline occurred |
62 | int _caller_stack_size; |
63 | |
64 | // The continuation point for the inline. Currently only used in |
65 | // multi-block inlines, but eventually would like to use this for |
66 | // all inlines for uniformity and simplicity; in this case would |
67 | // get the continuation point from the BlockList instead of |
68 | // fabricating it anew because Invokes would be considered to be |
69 | // BlockEnds. |
70 | BlockBegin* _continuation; |
71 | |
72 | // Was this ScopeData created only for the parsing and inlining of |
73 | // a jsr? |
74 | bool _parsing_jsr; |
75 | // We track the destination bci of the jsr only to determine |
76 | // bailout conditions, since we only handle a subset of all of the |
77 | // possible jsr-ret control structures. Recursive invocations of a |
78 | // jsr are disallowed by the verifier. |
79 | int _jsr_entry_bci; |
80 | // We need to track the local variable in which the return address |
81 | // was stored to ensure we can handle inlining the jsr, because we |
82 | // don't handle arbitrary jsr/ret constructs. |
83 | int _jsr_ret_addr_local; |
84 | // If we are parsing a jsr, the continuation point for rets |
85 | BlockBegin* _jsr_continuation; |
86 | // Cloned XHandlers for jsr-related ScopeDatas |
87 | XHandlers* _jsr_xhandlers; |
88 | |
89 | // Number of returns seen in this scope |
90 | int _num_returns; |
91 | |
92 | // In order to generate profitable code for inlining, we currently |
93 | // have to perform an optimization for single-block inlined |
94 | // methods where we continue parsing into the same block. This |
95 | // allows us to perform CSE across inlined scopes and to avoid |
96 | // storing parameters to the stack. Having a global register |
97 | // allocator and being able to perform global CSE would allow this |
98 | // code to be removed and thereby simplify the inliner. |
99 | BlockBegin* _cleanup_block; // The block to which the return was added |
100 | Instruction* _cleanup_return_prev; // Instruction before return instruction |
101 | ValueStack* _cleanup_state; // State of that block (not yet pinned) |
102 | |
103 | // When inlining do not push the result on the stack |
104 | bool _ignore_return; |
105 | |
106 | public: |
107 | ScopeData(ScopeData* parent); |
108 | |
109 | ScopeData* parent() const { return _parent; } |
110 | |
111 | BlockList* bci2block() const { return _bci2block; } |
112 | void set_bci2block(BlockList* bci2block) { _bci2block = bci2block; } |
113 | |
114 | // NOTE: this has a different effect when parsing jsrs |
115 | BlockBegin* block_at(int bci); |
116 | |
117 | IRScope* scope() const { return _scope; } |
118 | // Has side-effect of setting has_handler flag |
119 | void set_scope(IRScope* scope); |
120 | |
121 | // Whether this or any parent scope has exception handlers |
122 | bool has_handler() const { return _has_handler; } |
123 | void set_has_handler() { _has_handler = true; } |
124 | |
125 | // Exception handlers list to be used for this scope |
126 | XHandlers* xhandlers() const; |
127 | |
128 | // How to get a block to be parsed |
129 | void add_to_work_list(BlockBegin* block); |
130 | // How to remove the next block to be parsed; returns NULL if none left |
131 | BlockBegin* remove_from_work_list(); |
132 | // Indicates parse is over |
133 | bool is_work_list_empty() const; |
134 | |
135 | ciBytecodeStream* stream() { return _stream; } |
136 | void set_stream(ciBytecodeStream* stream) { _stream = stream; } |
137 | |
138 | intx max_inline_size() const { return _max_inline_size; } |
139 | |
140 | BlockBegin* continuation() const { return _continuation; } |
141 | void set_continuation(BlockBegin* cont) { _continuation = cont; } |
142 | |
143 | // Indicates whether this ScopeData was pushed only for the |
144 | // parsing and inlining of a jsr |
145 | bool parsing_jsr() const { return _parsing_jsr; } |
146 | void set_parsing_jsr() { _parsing_jsr = true; } |
147 | int jsr_entry_bci() const { return _jsr_entry_bci; } |
148 | void set_jsr_entry_bci(int bci) { _jsr_entry_bci = bci; } |
149 | void set_jsr_return_address_local(int local_no){ _jsr_ret_addr_local = local_no; } |
150 | int jsr_return_address_local() const { return _jsr_ret_addr_local; } |
151 | // Must be called after scope is set up for jsr ScopeData |
152 | void setup_jsr_xhandlers(); |
153 | |
154 | // The jsr continuation is only used when parsing_jsr is true, and |
155 | // is different from the "normal" continuation since we can end up |
156 | // doing a return (rather than a ret) from within a subroutine |
157 | BlockBegin* jsr_continuation() const { return _jsr_continuation; } |
158 | void set_jsr_continuation(BlockBegin* cont) { _jsr_continuation = cont; } |
159 | |
160 | int num_returns(); |
161 | void incr_num_returns(); |
162 | |
163 | void set_inline_cleanup_info(BlockBegin* block, |
164 | Instruction* return_prev, |
165 | ValueStack* return_state); |
166 | BlockBegin* inline_cleanup_block() const { return _cleanup_block; } |
167 | Instruction* inline_cleanup_return_prev() const{ return _cleanup_return_prev; } |
168 | ValueStack* inline_cleanup_state() const { return _cleanup_state; } |
169 | |
170 | bool ignore_return() const { return _ignore_return; } |
171 | void set_ignore_return(bool ignore_return) { _ignore_return = ignore_return; } |
172 | }; |
173 | |
174 | // for all GraphBuilders |
175 | static bool _can_trap[Bytecodes::number_of_java_codes]; |
176 | |
177 | // for each instance of GraphBuilder |
178 | ScopeData* _scope_data; // Per-scope data; used for inlining |
179 | Compilation* _compilation; // the current compilation |
180 | ValueMap* _vmap; // the map of values encountered (for CSE) |
181 | MemoryBuffer* _memory; |
182 | const char* _inline_bailout_msg; // non-null if most recent inline attempt failed |
183 | int _instruction_count; // for bailing out in pathological jsr/ret cases |
184 | BlockBegin* _start; // the start block |
185 | BlockBegin* _osr_entry; // the osr entry block block |
186 | ValueStack* _initial_state; // The state for the start block |
187 | |
188 | // for each call to connect_to_end; can also be set by inliner |
189 | BlockBegin* _block; // the current block |
190 | ValueStack* _state; // the current execution state |
191 | Instruction* _last; // the last instruction added |
192 | bool _skip_block; // skip processing of the rest of this block |
193 | |
194 | // accessors |
195 | ScopeData* scope_data() const { return _scope_data; } |
196 | Compilation* compilation() const { return _compilation; } |
197 | BlockList* bci2block() const { return scope_data()->bci2block(); } |
198 | ValueMap* vmap() const { assert(UseLocalValueNumbering, "should not access otherwise" ); return _vmap; } |
199 | bool has_handler() const { return scope_data()->has_handler(); } |
200 | |
201 | BlockBegin* block() const { return _block; } |
202 | ValueStack* state() const { return _state; } |
203 | void set_state(ValueStack* state) { _state = state; } |
204 | IRScope* scope() const { return scope_data()->scope(); } |
205 | ciMethod* method() const { return scope()->method(); } |
206 | ciBytecodeStream* stream() const { return scope_data()->stream(); } |
207 | Instruction* last() const { return _last; } |
208 | Bytecodes::Code code() const { return stream()->cur_bc(); } |
209 | int bci() const { return stream()->cur_bci(); } |
210 | int next_bci() const { return stream()->next_bci(); } |
211 | |
212 | // unified bailout support |
213 | void bailout(const char* msg) const { compilation()->bailout(msg); } |
214 | bool bailed_out() const { return compilation()->bailed_out(); } |
215 | |
216 | // stack manipulation helpers |
217 | void ipush(Value t) const { state()->ipush(t); } |
218 | void lpush(Value t) const { state()->lpush(t); } |
219 | void fpush(Value t) const { state()->fpush(t); } |
220 | void dpush(Value t) const { state()->dpush(t); } |
221 | void apush(Value t) const { state()->apush(t); } |
222 | void push(ValueType* type, Value t) const { state()-> push(type, t); } |
223 | |
224 | Value ipop() { return state()->ipop(); } |
225 | Value lpop() { return state()->lpop(); } |
226 | Value fpop() { return state()->fpop(); } |
227 | Value dpop() { return state()->dpop(); } |
228 | Value apop() { return state()->apop(); } |
229 | Value pop(ValueType* type) { return state()-> pop(type); } |
230 | |
231 | // instruction helpers |
232 | void load_constant(); |
233 | void load_local(ValueType* type, int index); |
234 | void store_local(ValueType* type, int index); |
235 | void store_local(ValueStack* state, Value value, int index); |
236 | void load_indexed (BasicType type); |
237 | void store_indexed(BasicType type); |
238 | void stack_op(Bytecodes::Code code); |
239 | void arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before = NULL); |
240 | void negate_op(ValueType* type); |
241 | void shift_op(ValueType* type, Bytecodes::Code code); |
242 | void logic_op(ValueType* type, Bytecodes::Code code); |
243 | void compare_op(ValueType* type, Bytecodes::Code code); |
244 | void convert(Bytecodes::Code op, BasicType from, BasicType to); |
245 | void increment(); |
246 | void _goto(int from_bci, int to_bci); |
247 | void if_node(Value x, If::Condition cond, Value y, ValueStack* stack_before); |
248 | void if_zero(ValueType* type, If::Condition cond); |
249 | void if_null(ValueType* type, If::Condition cond); |
250 | void if_same(ValueType* type, If::Condition cond); |
251 | void jsr(int dest); |
252 | void ret(int local_index); |
253 | void table_switch(); |
254 | void lookup_switch(); |
255 | void method_return(Value x, bool ignore_return = false); |
256 | void call_register_finalizer(); |
257 | void access_field(Bytecodes::Code code); |
258 | void invoke(Bytecodes::Code code); |
259 | void new_instance(int klass_index); |
260 | void new_type_array(); |
261 | void new_object_array(); |
262 | void check_cast(int klass_index); |
263 | void instance_of(int klass_index); |
264 | void monitorenter(Value x, int bci); |
265 | void monitorexit(Value x, int bci); |
266 | void new_multi_array(int dimensions); |
267 | void throw_op(int bci); |
268 | Value round_fp(Value fp_value); |
269 | |
270 | // stack/code manipulation helpers |
271 | Instruction* append_with_bci(Instruction* instr, int bci); |
272 | Instruction* append(Instruction* instr); |
273 | Instruction* append_split(StateSplit* instr); |
274 | |
275 | // other helpers |
276 | BlockBegin* block_at(int bci) { return scope_data()->block_at(bci); } |
277 | XHandlers* handle_exception(Instruction* instruction); |
278 | void connect_to_end(BlockBegin* beg); |
279 | void null_check(Value value); |
280 | void eliminate_redundant_phis(BlockBegin* start); |
281 | BlockEnd* iterate_bytecodes_for_block(int bci); |
282 | void iterate_all_blocks(bool start_in_current_block_for_inlining = false); |
283 | Dependencies* dependency_recorder() const; // = compilation()->dependencies() |
284 | bool direct_compare(ciKlass* k); |
285 | Value make_constant(ciConstant value, ciField* field); |
286 | |
287 | void kill_all(); |
288 | |
289 | // use of state copy routines (try to minimize unnecessary state |
290 | // object allocations): |
291 | |
292 | // - if the instruction unconditionally needs a full copy of the |
293 | // state (for patching for example), then use copy_state_before* |
294 | |
295 | // - if the instruction needs a full copy of the state only for |
296 | // handler generation (Instruction::needs_exception_state() returns |
297 | // false) then use copy_state_exhandling* |
298 | |
299 | // - if the instruction needs either a full copy of the state for |
300 | // handler generation and a least a minimal copy of the state (as |
301 | // returned by Instruction::exception_state()) for debug info |
302 | // generation (that is when Instruction::needs_exception_state() |
303 | // returns true) then use copy_state_for_exception* |
304 | |
305 | ValueStack* copy_state_before_with_bci(int bci); |
306 | ValueStack* copy_state_before(); |
307 | ValueStack* copy_state_exhandling_with_bci(int bci); |
308 | ValueStack* copy_state_exhandling(); |
309 | ValueStack* copy_state_for_exception_with_bci(int bci); |
310 | ValueStack* copy_state_for_exception(); |
311 | ValueStack* copy_state_if_bb(bool is_bb) { return (is_bb || compilation()->is_optimistic()) ? copy_state_before() : NULL; } |
312 | ValueStack* copy_state_indexed_access() { return compilation()->is_optimistic() ? copy_state_before() : copy_state_for_exception(); } |
313 | |
314 | // |
315 | // Inlining support |
316 | // |
317 | |
318 | // accessors |
319 | bool parsing_jsr() const { return scope_data()->parsing_jsr(); } |
320 | BlockBegin* continuation() const { return scope_data()->continuation(); } |
321 | BlockBegin* jsr_continuation() const { return scope_data()->jsr_continuation(); } |
322 | void set_continuation(BlockBegin* continuation) { scope_data()->set_continuation(continuation); } |
323 | void set_inline_cleanup_info(BlockBegin* block, |
324 | Instruction* return_prev, |
325 | ValueStack* return_state) { scope_data()->set_inline_cleanup_info(block, |
326 | return_prev, |
327 | return_state); } |
328 | void set_inline_cleanup_info() { |
329 | set_inline_cleanup_info(_block, _last, _state); |
330 | } |
331 | BlockBegin* inline_cleanup_block() const { return scope_data()->inline_cleanup_block(); } |
332 | Instruction* inline_cleanup_return_prev() const { return scope_data()->inline_cleanup_return_prev(); } |
333 | ValueStack* inline_cleanup_state() const { return scope_data()->inline_cleanup_state(); } |
334 | void restore_inline_cleanup_info() { |
335 | _block = inline_cleanup_block(); |
336 | _last = inline_cleanup_return_prev(); |
337 | _state = inline_cleanup_state(); |
338 | } |
339 | void incr_num_returns() { scope_data()->incr_num_returns(); } |
340 | int num_returns() const { return scope_data()->num_returns(); } |
341 | intx max_inline_size() const { return scope_data()->max_inline_size(); } |
342 | int inline_level() const { return scope()->level(); } |
343 | int recursive_inline_level(ciMethod* callee) const; |
344 | |
345 | // inlining of synchronized methods |
346 | void inline_sync_entry(Value lock, BlockBegin* sync_handler); |
347 | void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false); |
348 | |
349 | void build_graph_for_intrinsic(ciMethod* callee, bool ignore_return); |
350 | |
351 | // inliners |
352 | bool try_inline( ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL); |
353 | bool try_inline_intrinsics(ciMethod* callee, bool ignore_return = false); |
354 | bool try_inline_full( ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL); |
355 | bool try_inline_jsr(int jsr_dest_bci); |
356 | |
357 | const char* check_can_parse(ciMethod* callee) const; |
358 | const char* should_not_inline(ciMethod* callee) const; |
359 | |
360 | // JSR 292 support |
361 | bool try_method_handle_inline(ciMethod* callee, bool ignore_return); |
362 | |
363 | // helpers |
364 | void inline_bailout(const char* msg); |
365 | BlockBegin* (BlockBegin* entry, BlockBegin::Flag f, ValueStack* state); |
366 | BlockBegin* setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* init_state); |
367 | void setup_osr_entry_block(); |
368 | void clear_inline_bailout(); |
369 | ValueStack* state_at_entry(); |
370 | void push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start); |
371 | void push_scope(ciMethod* callee, BlockBegin* continuation); |
372 | void push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci); |
373 | void pop_scope(); |
374 | void pop_scope_for_jsr(); |
375 | |
376 | void append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile); |
377 | void append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile); |
378 | void append_unsafe_get_raw(ciMethod* callee, BasicType t); |
379 | void append_unsafe_put_raw(ciMethod* callee, BasicType t); |
380 | void append_unsafe_CAS(ciMethod* callee); |
381 | void append_unsafe_get_and_set_obj(ciMethod* callee, bool is_add); |
382 | void append_char_access(ciMethod* callee, bool is_store); |
383 | |
384 | void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true); |
385 | |
386 | void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined); |
387 | void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1); |
388 | void profile_invocation(ciMethod* inlinee, ValueStack* state); |
389 | |
390 | // Shortcuts to profiling control. |
391 | bool is_profiling() { return _compilation->is_profiling(); } |
392 | bool count_invocations() { return _compilation->count_invocations(); } |
393 | bool count_backedges() { return _compilation->count_backedges(); } |
394 | bool profile_branches() { return _compilation->profile_branches(); } |
395 | bool profile_calls() { return _compilation->profile_calls(); } |
396 | bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); } |
397 | bool profile_checkcasts() { return _compilation->profile_checkcasts(); } |
398 | bool profile_parameters() { return _compilation->profile_parameters(); } |
399 | bool profile_arguments() { return _compilation->profile_arguments(); } |
400 | bool profile_return() { return _compilation->profile_return(); } |
401 | |
402 | Values* args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver); |
403 | Values* collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver); |
404 | void check_args_for_profiling(Values* obj_args, int expected); |
405 | |
406 | public: |
407 | NOT_PRODUCT(void print_stats();) |
408 | |
409 | // initialization |
410 | static void initialize(); |
411 | |
412 | // public |
413 | static bool can_trap(ciMethod* method, Bytecodes::Code code) { |
414 | assert(0 <= code && code < Bytecodes::number_of_java_codes, "illegal bytecode" ); |
415 | if (_can_trap[code]) return true; |
416 | // special handling for finalizer registration |
417 | return code == Bytecodes::_return && method->intrinsic_id() == vmIntrinsics::_Object_init; |
418 | } |
419 | |
420 | // creation |
421 | GraphBuilder(Compilation* compilation, IRScope* scope); |
422 | static void sort_top_into_worklist(BlockList* worklist, BlockBegin* top); |
423 | |
424 | BlockBegin* start() const { return _start; } |
425 | }; |
426 | |
427 | #endif // SHARE_C1_C1_GRAPHBUILDER_HPP |
428 | |