1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_OPTO_PARSE_HPP
26#define SHARE_OPTO_PARSE_HPP
27
28#include "ci/ciMethodData.hpp"
29#include "ci/ciTypeFlow.hpp"
30#include "compiler/methodLiveness.hpp"
31#include "libadt/vectset.hpp"
32#include "oops/generateOopMap.hpp"
33#include "opto/graphKit.hpp"
34#include "opto/subnode.hpp"
35
36class BytecodeParseHistogram;
37class InlineTree;
38class Parse;
39class SwitchRange;
40
41
42//------------------------------InlineTree-------------------------------------
43class InlineTree : public ResourceObj {
44 friend class VMStructs;
45
46 Compile* C; // cache
47 JVMState* _caller_jvms; // state of caller
48 ciMethod* _method; // method being called by the caller_jvms
49 InlineTree* _caller_tree;
50 uint _count_inline_bcs; // Accumulated count of inlined bytecodes
51 // Call-site count / interpreter invocation count, scaled recursively.
52 // Always between 0.0 and 1.0. Represents the percentage of the method's
53 // total execution time used at this call site.
54 const float _site_invoke_ratio;
55 const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted)
56 float compute_callee_frequency( int caller_bci ) const;
57
58 GrowableArray<InlineTree*> _subtrees;
59
60 bool pass_initial_checks(ciMethod* caller_method, int caller_bci, ciMethod* callee_method);
61
62 void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
63 const char* _msg;
64protected:
65 InlineTree(Compile* C,
66 const InlineTree* caller_tree,
67 ciMethod* callee_method,
68 JVMState* caller_jvms,
69 int caller_bci,
70 float site_invoke_ratio,
71 int max_inline_level);
72 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
73 JVMState* caller_jvms,
74 int caller_bci);
75 bool try_to_inline(ciMethod* callee_method,
76 ciMethod* caller_method,
77 int caller_bci,
78 JVMState* jvms,
79 ciCallProfile& profile,
80 WarmCallInfo* wci_result,
81 bool& should_delay);
82 bool should_inline(ciMethod* callee_method,
83 ciMethod* caller_method,
84 int caller_bci,
85 ciCallProfile& profile,
86 WarmCallInfo* wci_result);
87 bool should_not_inline(ciMethod* callee_method,
88 ciMethod* caller_method,
89 JVMState* jvms,
90 WarmCallInfo* wci_result);
91 bool is_not_reached(ciMethod* callee_method,
92 ciMethod* caller_method,
93 int caller_bci,
94 ciCallProfile& profile);
95 void print_inlining(ciMethod* callee_method, int caller_bci,
96 ciMethod* caller_method, bool success) const;
97
98 InlineTree* caller_tree() const { return _caller_tree; }
99 InlineTree* callee_at(int bci, ciMethod* m) const;
100 int inline_level() const { return stack_depth(); }
101 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
102 const char* msg() const { return _msg; }
103 void set_msg(const char* msg) { _msg = msg; }
104public:
105 static const char* check_can_parse(ciMethod* callee);
106
107 static InlineTree* build_inline_tree_root();
108 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
109
110 // For temporary (stack-allocated, stateless) ilts:
111 InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
112
113 // See if it is OK to inline.
114 // The receiver is the inline tree for the caller.
115 //
116 // The result is a temperature indication. If it is hot or cold,
117 // inlining is immediate or undesirable. Otherwise, the info block
118 // returned is newly allocated and may be enqueued.
119 //
120 // If the method is inlinable, a new inline subtree is created on the fly,
121 // and may be accessed by find_subtree_from_root.
122 // The call_method is the dest_method for a special or static invocation.
123 // The call_method is an optimized virtual method candidate otherwise.
124 WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay);
125
126 // Information about inlined method
127 JVMState* caller_jvms() const { return _caller_jvms; }
128 ciMethod *method() const { return _method; }
129 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
130 uint count_inline_bcs() const { return _count_inline_bcs; }
131 float site_invoke_ratio() const { return _site_invoke_ratio; };
132
133#ifndef PRODUCT
134private:
135 uint _count_inlines; // Count of inlined methods
136public:
137 // Debug information collected during parse
138 uint count_inlines() const { return _count_inlines; };
139#endif
140 GrowableArray<InlineTree*> subtrees() { return _subtrees; }
141
142 void print_value_on(outputStream* st) const PRODUCT_RETURN;
143
144 bool _forced_inline; // Inlining was forced by CompilerOracle, ciReplay or annotation
145 bool forced_inline() const { return _forced_inline; }
146 // Count number of nodes in this subtree
147 int count() const;
148 // Dump inlining replay data to the stream.
149 void dump_replay_data(outputStream* out);
150};
151
152
153//-----------------------------------------------------------------------------
154//------------------------------Parse------------------------------------------
155// Parse bytecodes, build a Graph
156class Parse : public GraphKit {
157 public:
158 // Per-block information needed by the parser:
159 class Block {
160 private:
161 ciTypeFlow::Block* _flow;
162 int _pred_count; // how many predecessors in CFG?
163 int _preds_parsed; // how many of these have been parsed?
164 uint _count; // how many times executed? Currently only set by _goto's
165 bool _is_parsed; // has this block been parsed yet?
166 bool _is_handler; // is this block an exception handler?
167 bool _has_merged_backedge; // does this block have merged backedge?
168 SafePointNode* _start_map; // all values flowing into this block
169 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap
170 bool _has_predicates; // Were predicates added before parsing of the loop head?
171
172 int _num_successors; // Includes only normal control flow.
173 int _all_successors; // Include exception paths also.
174 Block** _successors;
175
176 public:
177
178 // Set up the block data structure itself.
179 Block(Parse* outer, int rpo);
180
181 // Set up the block's relations to other blocks.
182 void init_graph(Parse* outer);
183
184 ciTypeFlow::Block* flow() const { return _flow; }
185 int pred_count() const { return _pred_count; }
186 int preds_parsed() const { return _preds_parsed; }
187 bool is_parsed() const { return _is_parsed; }
188 bool is_handler() const { return _is_handler; }
189 void set_count( uint x ) { _count = x; }
190 uint count() const { return _count; }
191
192 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; }
193 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; }
194
195 // True after any predecessor flows control into this block
196 bool is_merged() const { return _start_map != NULL; }
197
198#ifdef ASSERT
199 // True after backedge predecessor flows control into this block
200 bool has_merged_backedge() const { return _has_merged_backedge; }
201 void mark_merged_backedge(Block* pred) {
202 assert(is_SEL_head(), "should be loop head");
203 if (pred != NULL && is_SEL_backedge(pred)) {
204 assert(is_parsed(), "block should be parsed before merging backedges");
205 _has_merged_backedge = true;
206 }
207 }
208#endif
209
210 // True when all non-exception predecessors have been parsed.
211 bool is_ready() const { return preds_parsed() == pred_count(); }
212
213 bool has_predicates() const { return _has_predicates; }
214 void set_has_predicates() { _has_predicates = true; }
215
216 int num_successors() const { return _num_successors; }
217 int all_successors() const { return _all_successors; }
218 Block* successor_at(int i) const {
219 assert((uint)i < (uint)all_successors(), "");
220 return _successors[i];
221 }
222 Block* successor_for_bci(int bci);
223
224 int start() const { return flow()->start(); }
225 int limit() const { return flow()->limit(); }
226 int rpo() const { return flow()->rpo(); }
227 int start_sp() const { return flow()->stack_size(); }
228
229 bool is_loop_head() const { return flow()->is_loop_head(); }
230 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); }
231 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
232 bool is_invariant_local(uint i) const {
233 const JVMState* jvms = start_map()->jvms();
234 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
235 return flow()->is_invariant_local(i - jvms->locoff());
236 }
237 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); }
238
239 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); }
240
241 const Type* stack_type_at(int i) const;
242 const Type* local_type_at(int i) const;
243 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
244
245 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; }
246
247 // Call this just before parsing a block.
248 void mark_parsed() {
249 assert(!_is_parsed, "must parse each block exactly once");
250 _is_parsed = true;
251 }
252
253 // Return the phi/region input index for the "current" pred,
254 // and bump the pred number. For historical reasons these index
255 // numbers are handed out in descending order. The last index is
256 // always PhiNode::Input (i.e., 1). The value returned is known
257 // as a "path number" because it distinguishes by which path we are
258 // entering the block.
259 int next_path_num() {
260 assert(preds_parsed() < pred_count(), "too many preds?");
261 return pred_count() - _preds_parsed++;
262 }
263
264 // Add a previously unaccounted predecessor to this block.
265 // This operates by increasing the size of the block's region
266 // and all its phi nodes (if any). The value returned is a
267 // path number ("pnum").
268 int add_new_path();
269
270 // Initialize me by recording the parser's map. My own map must be NULL.
271 void record_state(Parse* outer);
272 };
273
274#ifndef PRODUCT
275 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
276 class BytecodeParseHistogram : public ResourceObj {
277 private:
278 enum BPHType {
279 BPH_transforms,
280 BPH_values
281 };
282 static bool _initialized;
283 static uint _bytecodes_parsed [Bytecodes::number_of_codes];
284 static uint _nodes_constructed[Bytecodes::number_of_codes];
285 static uint _nodes_transformed[Bytecodes::number_of_codes];
286 static uint _new_values [Bytecodes::number_of_codes];
287
288 Bytecodes::Code _initial_bytecode;
289 int _initial_node_count;
290 int _initial_transforms;
291 int _initial_values;
292
293 Parse *_parser;
294 Compile *_compiler;
295
296 // Initialization
297 static void reset();
298
299 // Return info being collected, select with global flag 'BytecodeParseInfo'
300 int current_count(BPHType info_selector);
301
302 public:
303 BytecodeParseHistogram(Parse *p, Compile *c);
304 static bool initialized();
305
306 // Record info when starting to parse one bytecode
307 void set_initial_state( Bytecodes::Code bc );
308 // Record results of parsing one bytecode
309 void record_change();
310
311 // Profile printing
312 static void print(float cutoff = 0.01F); // cutoff in percent
313 };
314
315 public:
316 // Record work done during parsing
317 BytecodeParseHistogram* _parse_histogram;
318 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
319 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; }
320#endif
321
322 private:
323 friend class Block;
324
325 // Variables which characterize this compilation as a whole:
326
327 JVMState* _caller; // JVMS which carries incoming args & state.
328 float _expected_uses; // expected number of calls to this code
329 float _prof_factor; // discount applied to my profile counts
330 int _depth; // Inline tree depth, for debug printouts
331 const TypeFunc*_tf; // My kind of function type
332 int _entry_bci; // the osr bci or InvocationEntryBci
333
334 ciTypeFlow* _flow; // Results of previous flow pass.
335 Block* _blocks; // Array of basic-block structs.
336 int _block_count; // Number of elements in _blocks.
337
338 GraphKit _exits; // Record all normal returns and throws here.
339 bool _wrote_final; // Did we write a final field?
340 bool _wrote_volatile; // Did we write a volatile field?
341 bool _wrote_stable; // Did we write a @Stable field?
342 bool _wrote_fields; // Did we write any field?
343 bool _count_invocations; // update and test invocation counter
344 bool _method_data_update; // update method data oop
345 Node* _alloc_with_final; // An allocation node with final field
346
347 // Variables which track Java semantics during bytecode parsing:
348
349 Block* _block; // block currently getting parsed
350 ciBytecodeStream _iter; // stream of this method's bytecodes
351
352 const FastLockNode* _synch_lock; // FastLockNode for synchronized method
353
354#ifndef PRODUCT
355 int _max_switch_depth; // Debugging SwitchRanges.
356 int _est_switch_depth; // Debugging SwitchRanges.
357#endif
358
359 bool _first_return; // true if return is the first to be parsed
360 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
361 uint _new_idx; // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
362
363 public:
364 // Constructor
365 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
366
367 virtual Parse* is_Parse() const { return (Parse*)this; }
368
369 // Accessors.
370 JVMState* caller() const { return _caller; }
371 float expected_uses() const { return _expected_uses; }
372 float prof_factor() const { return _prof_factor; }
373 int depth() const { return _depth; }
374 const TypeFunc* tf() const { return _tf; }
375 // entry_bci() -- see osr_bci, etc.
376
377 ciTypeFlow* flow() const { return _flow; }
378 // blocks() -- see rpo_at, start_block, etc.
379 int block_count() const { return _block_count; }
380
381 GraphKit& exits() { return _exits; }
382 bool wrote_final() const { return _wrote_final; }
383 void set_wrote_final(bool z) { _wrote_final = z; }
384 bool wrote_volatile() const { return _wrote_volatile; }
385 void set_wrote_volatile(bool z) { _wrote_volatile = z; }
386 bool wrote_stable() const { return _wrote_stable; }
387 void set_wrote_stable(bool z) { _wrote_stable = z; }
388 bool wrote_fields() const { return _wrote_fields; }
389 void set_wrote_fields(bool z) { _wrote_fields = z; }
390 bool count_invocations() const { return _count_invocations; }
391 bool method_data_update() const { return _method_data_update; }
392 Node* alloc_with_final() const { return _alloc_with_final; }
393 void set_alloc_with_final(Node* n) {
394 assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?");
395 _alloc_with_final = n;
396 }
397
398 Block* block() const { return _block; }
399 ciBytecodeStream& iter() { return _iter; }
400 Bytecodes::Code bc() const { return _iter.cur_bc(); }
401
402 void set_block(Block* b) { _block = b; }
403
404 // Derived accessors:
405 bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; }
406 bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; }
407 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; }
408
409 void set_parse_bci(int bci);
410
411 // Must this parse be aborted?
412 bool failing() { return C->failing(); }
413
414 Block* rpo_at(int rpo) {
415 assert(0 <= rpo && rpo < _block_count, "oob");
416 return &_blocks[rpo];
417 }
418 Block* start_block() {
419 return rpo_at(flow()->start_block()->rpo());
420 }
421 // Can return NULL if the flow pass did not complete a block.
422 Block* successor_for_bci(int bci) {
423 return block()->successor_for_bci(bci);
424 }
425
426 private:
427 // Create a JVMS & map for the initial state of this method.
428 SafePointNode* create_entry_map();
429
430 // OSR helpers
431 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
432 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
433 void load_interpreter_state(Node* osr_buf);
434
435 // Functions for managing basic blocks:
436 void init_blocks();
437 void load_state_from(Block* b);
438 void store_state_to(Block* b) { b->record_state(this); }
439
440 // Parse all the basic blocks.
441 void do_all_blocks();
442
443 // Parse the current basic block
444 void do_one_block();
445
446 // Raise an error if we get a bad ciTypeFlow CFG.
447 void handle_missing_successor(int bci);
448
449 // first actions (before BCI 0)
450 void do_method_entry();
451
452 // implementation of monitorenter/monitorexit
453 void do_monitor_enter();
454 void do_monitor_exit();
455
456 // Eagerly create phie throughout the state, to cope with back edges.
457 void ensure_phis_everywhere();
458
459 // Merge the current mapping into the basic block starting at bci
460 void merge( int target_bci);
461 // Same as plain merge, except that it allocates a new path number.
462 void merge_new_path( int target_bci);
463 // Merge the current mapping into an exception handler.
464 void merge_exception(int target_bci);
465 // Helper: Merge the current mapping into the given basic block
466 void merge_common(Block* target, int pnum);
467 // Helper functions for merging individual cells.
468 PhiNode *ensure_phi( int idx, bool nocreate = false);
469 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
470 // Helper to merge the current memory state into the given basic block
471 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
472
473 // Parse this bytecode, and alter the Parsers JVM->Node mapping
474 void do_one_bytecode();
475
476 // helper function to generate array store check
477 void array_store_check();
478 // Helper function to generate array load
479 void array_load(BasicType etype);
480 // Helper function to generate array store
481 void array_store(BasicType etype);
482 // Helper function to compute array addressing
483 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
484
485 void clinit_deopt();
486
487 void rtm_deopt();
488
489 // Pass current map to exits
490 void return_current(Node* value);
491
492 // Register finalizers on return from Object.<init>
493 void call_register_finalizer();
494
495 // Insert a compiler safepoint into the graph
496 void add_safepoint();
497
498 // Insert a compiler safepoint into the graph, if there is a back-branch.
499 void maybe_add_safepoint(int target_bci) {
500 if (UseLoopSafepoints && target_bci <= bci()) {
501 add_safepoint();
502 }
503 }
504
505 // Note: Intrinsic generation routines may be found in library_call.cpp.
506
507 // Helper function to setup Ideal Call nodes
508 void do_call();
509
510 // Helper function to uncommon-trap or bailout for non-compilable call-sites
511 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
512
513 // Helper function to setup for type-profile based inlining
514 bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method);
515
516 // Helper functions for type checking bytecodes:
517 void do_checkcast();
518 void do_instanceof();
519
520 // Helper functions for shifting & arithmetic
521 void modf();
522 void modd();
523 void l2f();
524
525 void do_irem();
526
527 // implementation of _get* and _put* bytecodes
528 void do_getstatic() { do_field_access(true, false); }
529 void do_getfield () { do_field_access(true, true); }
530 void do_putstatic() { do_field_access(false, false); }
531 void do_putfield () { do_field_access(false, true); }
532
533 // common code for making initial checks and forming addresses
534 void do_field_access(bool is_get, bool is_field);
535
536 // common code for actually performing the load or store
537 void do_get_xxx(Node* obj, ciField* field, bool is_field);
538 void do_put_xxx(Node* obj, ciField* field, bool is_field);
539
540 // implementation of object creation bytecodes
541 void do_new();
542 void do_newarray(BasicType elemtype);
543 void do_anewarray();
544 void do_multianewarray();
545 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
546
547 // implementation of jsr/ret
548 void do_jsr();
549 void do_ret();
550
551 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
552 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
553 bool seems_never_taken(float prob) const;
554 bool path_is_suitable_for_uncommon_trap(float prob) const;
555 bool seems_stable_comparison() const;
556
557 void do_ifnull(BoolTest::mask btest, Node* c);
558 void do_if(BoolTest::mask btest, Node* c);
559 int repush_if_args();
560 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
561 Block* path, Block* other_path);
562 void sharpen_type_after_if(BoolTest::mask btest,
563 Node* con, const Type* tcon,
564 Node* val, const Type* tval);
565 void maybe_add_predicate_after_if(Block* path);
566 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt);
567 Node* jump_if_join(Node* iffalse, Node* iftrue);
568 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index, bool unc);
569 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index, bool unc);
570 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index, bool unc);
571
572 friend class SwitchRange;
573 void do_tableswitch();
574 void do_lookupswitch();
575 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
576 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
577 void linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi);
578
579 void decrement_age();
580 // helper functions for methodData style profiling
581 void test_counter_against_threshold(Node* cnt, int limit);
582 void increment_and_test_invocation_counter(int limit);
583 void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit);
584 Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
585 void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
586 void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant);
587
588 void profile_method_entry();
589 void profile_taken_branch(int target_bci, bool force_update = false);
590 void profile_not_taken_branch(bool force_update = false);
591 void profile_call(Node* receiver);
592 void profile_generic_call();
593 void profile_receiver_type(Node* receiver);
594 void profile_ret(int target_bci);
595 void profile_null_checkcast();
596 void profile_switch_case(int table_index);
597
598 // helper function for call statistics
599 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
600
601 Node_Notes* make_node_notes(Node_Notes* caller_nn);
602
603 // Helper functions for handling normal and abnormal exits.
604 void build_exits();
605
606 // Fix up all exceptional control flow exiting a single bytecode.
607 void do_exceptions();
608
609 // Fix up all exiting control flow at the end of the parse.
610 void do_exits();
611
612 // Add Catch/CatchProjs
613 // The call is either a Java call or the VM's rethrow stub
614 void catch_call_exceptions(ciExceptionHandlerStream&);
615
616 // Handle all exceptions thrown by the inlined method.
617 // Also handles exceptions for individual bytecodes.
618 void catch_inline_exceptions(SafePointNode* ex_map);
619
620 // Merge the given map into correct exceptional exit state.
621 // Assumes that there is no applicable local handler.
622 void throw_to_exit(SafePointNode* ex_map);
623
624 // Use speculative type to optimize CmpP node
625 Node* optimize_cmp_with_klass(Node* c);
626
627 public:
628#ifndef PRODUCT
629 // Handle PrintOpto, etc.
630 void show_parse_info();
631 void dump_map_adr_mem() const;
632 static void print_statistics(); // Print some performance counters
633 void dump();
634 void dump_bci(int bci);
635#endif
636};
637
638#endif // SHARE_OPTO_PARSE_HPP
639