| 1 | /* |
| 2 | * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "c1/c1_CFGPrinter.hpp" |
| 27 | #include "c1/c1_Canonicalizer.hpp" |
| 28 | #include "c1/c1_Compilation.hpp" |
| 29 | #include "c1/c1_GraphBuilder.hpp" |
| 30 | #include "c1/c1_InstructionPrinter.hpp" |
| 31 | #include "ci/ciCallSite.hpp" |
| 32 | #include "ci/ciField.hpp" |
| 33 | #include "ci/ciKlass.hpp" |
| 34 | #include "ci/ciMemberName.hpp" |
| 35 | #include "ci/ciUtilities.inline.hpp" |
| 36 | #include "compiler/compileBroker.hpp" |
| 37 | #include "interpreter/bytecode.hpp" |
| 38 | #include "jfr/jfrEvents.hpp" |
| 39 | #include "memory/resourceArea.hpp" |
| 40 | #include "oops/oop.inline.hpp" |
| 41 | #include "runtime/sharedRuntime.hpp" |
| 42 | #include "runtime/compilationPolicy.hpp" |
| 43 | #include "runtime/vm_version.hpp" |
| 44 | #include "utilities/bitMap.inline.hpp" |
| 45 | |
| 46 | class BlockListBuilder { |
| 47 | private: |
| 48 | Compilation* _compilation; |
| 49 | IRScope* _scope; |
| 50 | |
| 51 | BlockList _blocks; // internal list of all blocks |
| 52 | BlockList* _bci2block; // mapping from bci to blocks for GraphBuilder |
| 53 | |
| 54 | // fields used by mark_loops |
| 55 | ResourceBitMap _active; // for iteration of control flow graph |
| 56 | ResourceBitMap _visited; // for iteration of control flow graph |
| 57 | intArray _loop_map; // caches the information if a block is contained in a loop |
| 58 | int _next_loop_index; // next free loop number |
| 59 | int _next_block_number; // for reverse postorder numbering of blocks |
| 60 | |
| 61 | // accessors |
| 62 | Compilation* compilation() const { return _compilation; } |
| 63 | IRScope* scope() const { return _scope; } |
| 64 | ciMethod* method() const { return scope()->method(); } |
| 65 | XHandlers* xhandlers() const { return scope()->xhandlers(); } |
| 66 | |
| 67 | // unified bailout support |
| 68 | void bailout(const char* msg) const { compilation()->bailout(msg); } |
| 69 | bool bailed_out() const { return compilation()->bailed_out(); } |
| 70 | |
| 71 | // helper functions |
| 72 | BlockBegin* make_block_at(int bci, BlockBegin* predecessor); |
| 73 | void handle_exceptions(BlockBegin* current, int cur_bci); |
| 74 | void handle_jsr(BlockBegin* current, int sr_bci, int next_bci); |
| 75 | void store_one(BlockBegin* current, int local); |
| 76 | void store_two(BlockBegin* current, int local); |
| 77 | void set_entries(int osr_bci); |
| 78 | void set_leaders(); |
| 79 | |
| 80 | void make_loop_header(BlockBegin* block); |
| 81 | void mark_loops(); |
| 82 | int mark_loops(BlockBegin* b, bool in_subroutine); |
| 83 | |
| 84 | // debugging |
| 85 | #ifndef PRODUCT |
| 86 | void print(); |
| 87 | #endif |
| 88 | |
| 89 | public: |
| 90 | // creation |
| 91 | BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci); |
| 92 | |
| 93 | // accessors for GraphBuilder |
| 94 | BlockList* bci2block() const { return _bci2block; } |
| 95 | }; |
| 96 | |
| 97 | |
| 98 | // Implementation of BlockListBuilder |
| 99 | |
| 100 | BlockListBuilder::BlockListBuilder(Compilation* compilation, IRScope* scope, int osr_bci) |
| 101 | : _compilation(compilation) |
| 102 | , _scope(scope) |
| 103 | , _blocks(16) |
| 104 | , _bci2block(new BlockList(scope->method()->code_size(), NULL)) |
| 105 | , _active() // size not known yet |
| 106 | , _visited() // size not known yet |
| 107 | , _loop_map() // size not known yet |
| 108 | , _next_loop_index(0) |
| 109 | , _next_block_number(0) |
| 110 | { |
| 111 | set_entries(osr_bci); |
| 112 | set_leaders(); |
| 113 | CHECK_BAILOUT(); |
| 114 | |
| 115 | mark_loops(); |
| 116 | NOT_PRODUCT(if (PrintInitialBlockList) print()); |
| 117 | |
| 118 | #ifndef PRODUCT |
| 119 | if (PrintCFGToFile) { |
| 120 | stringStream title; |
| 121 | title.print("BlockListBuilder " ); |
| 122 | scope->method()->print_name(&title); |
| 123 | CFGPrinter::print_cfg(_bci2block, title.as_string(), false, false); |
| 124 | } |
| 125 | #endif |
| 126 | } |
| 127 | |
| 128 | |
| 129 | void BlockListBuilder::set_entries(int osr_bci) { |
| 130 | // generate start blocks |
| 131 | BlockBegin* std_entry = make_block_at(0, NULL); |
| 132 | if (scope()->caller() == NULL) { |
| 133 | std_entry->set(BlockBegin::std_entry_flag); |
| 134 | } |
| 135 | if (osr_bci != -1) { |
| 136 | BlockBegin* osr_entry = make_block_at(osr_bci, NULL); |
| 137 | osr_entry->set(BlockBegin::osr_entry_flag); |
| 138 | } |
| 139 | |
| 140 | // generate exception entry blocks |
| 141 | XHandlers* list = xhandlers(); |
| 142 | const int n = list->length(); |
| 143 | for (int i = 0; i < n; i++) { |
| 144 | XHandler* h = list->handler_at(i); |
| 145 | BlockBegin* entry = make_block_at(h->handler_bci(), NULL); |
| 146 | entry->set(BlockBegin::exception_entry_flag); |
| 147 | h->set_entry_block(entry); |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | |
| 152 | BlockBegin* BlockListBuilder::make_block_at(int cur_bci, BlockBegin* predecessor) { |
| 153 | assert(method()->bci_block_start().at(cur_bci), "wrong block starts of MethodLivenessAnalyzer" ); |
| 154 | |
| 155 | BlockBegin* block = _bci2block->at(cur_bci); |
| 156 | if (block == NULL) { |
| 157 | block = new BlockBegin(cur_bci); |
| 158 | block->init_stores_to_locals(method()->max_locals()); |
| 159 | _bci2block->at_put(cur_bci, block); |
| 160 | _blocks.append(block); |
| 161 | |
| 162 | assert(predecessor == NULL || predecessor->bci() < cur_bci, "targets for backward branches must already exist" ); |
| 163 | } |
| 164 | |
| 165 | if (predecessor != NULL) { |
| 166 | if (block->is_set(BlockBegin::exception_entry_flag)) { |
| 167 | BAILOUT_("Exception handler can be reached by both normal and exceptional control flow" , block); |
| 168 | } |
| 169 | |
| 170 | predecessor->add_successor(block); |
| 171 | block->increment_total_preds(); |
| 172 | } |
| 173 | |
| 174 | return block; |
| 175 | } |
| 176 | |
| 177 | |
| 178 | inline void BlockListBuilder::store_one(BlockBegin* current, int local) { |
| 179 | current->stores_to_locals().set_bit(local); |
| 180 | } |
| 181 | inline void BlockListBuilder::store_two(BlockBegin* current, int local) { |
| 182 | store_one(current, local); |
| 183 | store_one(current, local + 1); |
| 184 | } |
| 185 | |
| 186 | |
| 187 | void BlockListBuilder::handle_exceptions(BlockBegin* current, int cur_bci) { |
| 188 | // Draws edges from a block to its exception handlers |
| 189 | XHandlers* list = xhandlers(); |
| 190 | const int n = list->length(); |
| 191 | |
| 192 | for (int i = 0; i < n; i++) { |
| 193 | XHandler* h = list->handler_at(i); |
| 194 | |
| 195 | if (h->covers(cur_bci)) { |
| 196 | BlockBegin* entry = h->entry_block(); |
| 197 | assert(entry != NULL && entry == _bci2block->at(h->handler_bci()), "entry must be set" ); |
| 198 | assert(entry->is_set(BlockBegin::exception_entry_flag), "flag must be set" ); |
| 199 | |
| 200 | // add each exception handler only once |
| 201 | if (!current->is_successor(entry)) { |
| 202 | current->add_successor(entry); |
| 203 | entry->increment_total_preds(); |
| 204 | } |
| 205 | |
| 206 | // stop when reaching catchall |
| 207 | if (h->catch_type() == 0) break; |
| 208 | } |
| 209 | } |
| 210 | } |
| 211 | |
| 212 | void BlockListBuilder::handle_jsr(BlockBegin* current, int sr_bci, int next_bci) { |
| 213 | // start a new block after jsr-bytecode and link this block into cfg |
| 214 | make_block_at(next_bci, current); |
| 215 | |
| 216 | // start a new block at the subroutine entry at mark it with special flag |
| 217 | BlockBegin* sr_block = make_block_at(sr_bci, current); |
| 218 | if (!sr_block->is_set(BlockBegin::subroutine_entry_flag)) { |
| 219 | sr_block->set(BlockBegin::subroutine_entry_flag); |
| 220 | } |
| 221 | } |
| 222 | |
| 223 | |
| 224 | void BlockListBuilder::set_leaders() { |
| 225 | bool has_xhandlers = xhandlers()->has_handlers(); |
| 226 | BlockBegin* current = NULL; |
| 227 | |
| 228 | // The information which bci starts a new block simplifies the analysis |
| 229 | // Without it, backward branches could jump to a bci where no block was created |
| 230 | // during bytecode iteration. This would require the creation of a new block at the |
| 231 | // branch target and a modification of the successor lists. |
| 232 | const BitMap& bci_block_start = method()->bci_block_start(); |
| 233 | |
| 234 | ciBytecodeStream s(method()); |
| 235 | while (s.next() != ciBytecodeStream::EOBC()) { |
| 236 | int cur_bci = s.cur_bci(); |
| 237 | |
| 238 | if (bci_block_start.at(cur_bci)) { |
| 239 | current = make_block_at(cur_bci, current); |
| 240 | } |
| 241 | assert(current != NULL, "must have current block" ); |
| 242 | |
| 243 | if (has_xhandlers && GraphBuilder::can_trap(method(), s.cur_bc())) { |
| 244 | handle_exceptions(current, cur_bci); |
| 245 | } |
| 246 | |
| 247 | switch (s.cur_bc()) { |
| 248 | // track stores to local variables for selective creation of phi functions |
| 249 | case Bytecodes::_iinc: store_one(current, s.get_index()); break; |
| 250 | case Bytecodes::_istore: store_one(current, s.get_index()); break; |
| 251 | case Bytecodes::_lstore: store_two(current, s.get_index()); break; |
| 252 | case Bytecodes::_fstore: store_one(current, s.get_index()); break; |
| 253 | case Bytecodes::_dstore: store_two(current, s.get_index()); break; |
| 254 | case Bytecodes::_astore: store_one(current, s.get_index()); break; |
| 255 | case Bytecodes::_istore_0: store_one(current, 0); break; |
| 256 | case Bytecodes::_istore_1: store_one(current, 1); break; |
| 257 | case Bytecodes::_istore_2: store_one(current, 2); break; |
| 258 | case Bytecodes::_istore_3: store_one(current, 3); break; |
| 259 | case Bytecodes::_lstore_0: store_two(current, 0); break; |
| 260 | case Bytecodes::_lstore_1: store_two(current, 1); break; |
| 261 | case Bytecodes::_lstore_2: store_two(current, 2); break; |
| 262 | case Bytecodes::_lstore_3: store_two(current, 3); break; |
| 263 | case Bytecodes::_fstore_0: store_one(current, 0); break; |
| 264 | case Bytecodes::_fstore_1: store_one(current, 1); break; |
| 265 | case Bytecodes::_fstore_2: store_one(current, 2); break; |
| 266 | case Bytecodes::_fstore_3: store_one(current, 3); break; |
| 267 | case Bytecodes::_dstore_0: store_two(current, 0); break; |
| 268 | case Bytecodes::_dstore_1: store_two(current, 1); break; |
| 269 | case Bytecodes::_dstore_2: store_two(current, 2); break; |
| 270 | case Bytecodes::_dstore_3: store_two(current, 3); break; |
| 271 | case Bytecodes::_astore_0: store_one(current, 0); break; |
| 272 | case Bytecodes::_astore_1: store_one(current, 1); break; |
| 273 | case Bytecodes::_astore_2: store_one(current, 2); break; |
| 274 | case Bytecodes::_astore_3: store_one(current, 3); break; |
| 275 | |
| 276 | // track bytecodes that affect the control flow |
| 277 | case Bytecodes::_athrow: // fall through |
| 278 | case Bytecodes::_ret: // fall through |
| 279 | case Bytecodes::_ireturn: // fall through |
| 280 | case Bytecodes::_lreturn: // fall through |
| 281 | case Bytecodes::_freturn: // fall through |
| 282 | case Bytecodes::_dreturn: // fall through |
| 283 | case Bytecodes::_areturn: // fall through |
| 284 | case Bytecodes::_return: |
| 285 | current = NULL; |
| 286 | break; |
| 287 | |
| 288 | case Bytecodes::_ifeq: // fall through |
| 289 | case Bytecodes::_ifne: // fall through |
| 290 | case Bytecodes::_iflt: // fall through |
| 291 | case Bytecodes::_ifge: // fall through |
| 292 | case Bytecodes::_ifgt: // fall through |
| 293 | case Bytecodes::_ifle: // fall through |
| 294 | case Bytecodes::_if_icmpeq: // fall through |
| 295 | case Bytecodes::_if_icmpne: // fall through |
| 296 | case Bytecodes::_if_icmplt: // fall through |
| 297 | case Bytecodes::_if_icmpge: // fall through |
| 298 | case Bytecodes::_if_icmpgt: // fall through |
| 299 | case Bytecodes::_if_icmple: // fall through |
| 300 | case Bytecodes::_if_acmpeq: // fall through |
| 301 | case Bytecodes::_if_acmpne: // fall through |
| 302 | case Bytecodes::_ifnull: // fall through |
| 303 | case Bytecodes::_ifnonnull: |
| 304 | make_block_at(s.next_bci(), current); |
| 305 | make_block_at(s.get_dest(), current); |
| 306 | current = NULL; |
| 307 | break; |
| 308 | |
| 309 | case Bytecodes::_goto: |
| 310 | make_block_at(s.get_dest(), current); |
| 311 | current = NULL; |
| 312 | break; |
| 313 | |
| 314 | case Bytecodes::_goto_w: |
| 315 | make_block_at(s.get_far_dest(), current); |
| 316 | current = NULL; |
| 317 | break; |
| 318 | |
| 319 | case Bytecodes::_jsr: |
| 320 | handle_jsr(current, s.get_dest(), s.next_bci()); |
| 321 | current = NULL; |
| 322 | break; |
| 323 | |
| 324 | case Bytecodes::_jsr_w: |
| 325 | handle_jsr(current, s.get_far_dest(), s.next_bci()); |
| 326 | current = NULL; |
| 327 | break; |
| 328 | |
| 329 | case Bytecodes::_tableswitch: { |
| 330 | // set block for each case |
| 331 | Bytecode_tableswitch sw(&s); |
| 332 | int l = sw.length(); |
| 333 | for (int i = 0; i < l; i++) { |
| 334 | make_block_at(cur_bci + sw.dest_offset_at(i), current); |
| 335 | } |
| 336 | make_block_at(cur_bci + sw.default_offset(), current); |
| 337 | current = NULL; |
| 338 | break; |
| 339 | } |
| 340 | |
| 341 | case Bytecodes::_lookupswitch: { |
| 342 | // set block for each case |
| 343 | Bytecode_lookupswitch sw(&s); |
| 344 | int l = sw.number_of_pairs(); |
| 345 | for (int i = 0; i < l; i++) { |
| 346 | make_block_at(cur_bci + sw.pair_at(i).offset(), current); |
| 347 | } |
| 348 | make_block_at(cur_bci + sw.default_offset(), current); |
| 349 | current = NULL; |
| 350 | break; |
| 351 | } |
| 352 | |
| 353 | default: |
| 354 | break; |
| 355 | } |
| 356 | } |
| 357 | } |
| 358 | |
| 359 | |
| 360 | void BlockListBuilder::mark_loops() { |
| 361 | ResourceMark rm; |
| 362 | |
| 363 | _active.initialize(BlockBegin::number_of_blocks()); |
| 364 | _visited.initialize(BlockBegin::number_of_blocks()); |
| 365 | _loop_map = intArray(BlockBegin::number_of_blocks(), BlockBegin::number_of_blocks(), 0); |
| 366 | _next_loop_index = 0; |
| 367 | _next_block_number = _blocks.length(); |
| 368 | |
| 369 | // recursively iterate the control flow graph |
| 370 | mark_loops(_bci2block->at(0), false); |
| 371 | assert(_next_block_number >= 0, "invalid block numbers" ); |
| 372 | |
| 373 | // Remove dangling Resource pointers before the ResourceMark goes out-of-scope. |
| 374 | _active.resize(0); |
| 375 | _visited.resize(0); |
| 376 | } |
| 377 | |
| 378 | void BlockListBuilder::(BlockBegin* block) { |
| 379 | if (block->is_set(BlockBegin::exception_entry_flag)) { |
| 380 | // exception edges may look like loops but don't mark them as such |
| 381 | // since it screws up block ordering. |
| 382 | return; |
| 383 | } |
| 384 | if (!block->is_set(BlockBegin::parser_loop_header_flag)) { |
| 385 | block->set(BlockBegin::parser_loop_header_flag); |
| 386 | |
| 387 | assert(_loop_map.at(block->block_id()) == 0, "must not be set yet" ); |
| 388 | assert(0 <= _next_loop_index && _next_loop_index < BitsPerInt, "_next_loop_index is used as a bit-index in integer" ); |
| 389 | _loop_map.at_put(block->block_id(), 1 << _next_loop_index); |
| 390 | if (_next_loop_index < 31) _next_loop_index++; |
| 391 | } else { |
| 392 | // block already marked as loop header |
| 393 | assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set" ); |
| 394 | } |
| 395 | } |
| 396 | |
| 397 | int BlockListBuilder::mark_loops(BlockBegin* block, bool in_subroutine) { |
| 398 | int block_id = block->block_id(); |
| 399 | |
| 400 | if (_visited.at(block_id)) { |
| 401 | if (_active.at(block_id)) { |
| 402 | // reached block via backward branch |
| 403 | make_loop_header(block); |
| 404 | } |
| 405 | // return cached loop information for this block |
| 406 | return _loop_map.at(block_id); |
| 407 | } |
| 408 | |
| 409 | if (block->is_set(BlockBegin::subroutine_entry_flag)) { |
| 410 | in_subroutine = true; |
| 411 | } |
| 412 | |
| 413 | // set active and visited bits before successors are processed |
| 414 | _visited.set_bit(block_id); |
| 415 | _active.set_bit(block_id); |
| 416 | |
| 417 | intptr_t loop_state = 0; |
| 418 | for (int i = block->number_of_sux() - 1; i >= 0; i--) { |
| 419 | // recursively process all successors |
| 420 | loop_state |= mark_loops(block->sux_at(i), in_subroutine); |
| 421 | } |
| 422 | |
| 423 | // clear active-bit after all successors are processed |
| 424 | _active.clear_bit(block_id); |
| 425 | |
| 426 | // reverse-post-order numbering of all blocks |
| 427 | block->set_depth_first_number(_next_block_number); |
| 428 | _next_block_number--; |
| 429 | |
| 430 | if (loop_state != 0 || in_subroutine ) { |
| 431 | // block is contained at least in one loop, so phi functions are necessary |
| 432 | // phi functions are also necessary for all locals stored in a subroutine |
| 433 | scope()->requires_phi_function().set_union(block->stores_to_locals()); |
| 434 | } |
| 435 | |
| 436 | if (block->is_set(BlockBegin::parser_loop_header_flag)) { |
| 437 | int = _loop_map.at(block_id); |
| 438 | assert(is_power_of_2((unsigned)header_loop_state), "exactly one bit must be set" ); |
| 439 | |
| 440 | // If the highest bit is set (i.e. when integer value is negative), the method |
| 441 | // has 32 or more loops. This bit is never cleared because it is used for multiple loops |
| 442 | if (header_loop_state >= 0) { |
| 443 | clear_bits(loop_state, header_loop_state); |
| 444 | } |
| 445 | } |
| 446 | |
| 447 | // cache and return loop information for this block |
| 448 | _loop_map.at_put(block_id, loop_state); |
| 449 | return loop_state; |
| 450 | } |
| 451 | |
| 452 | |
| 453 | #ifndef PRODUCT |
| 454 | |
| 455 | int compare_depth_first(BlockBegin** a, BlockBegin** b) { |
| 456 | return (*a)->depth_first_number() - (*b)->depth_first_number(); |
| 457 | } |
| 458 | |
| 459 | void BlockListBuilder::print() { |
| 460 | tty->print("----- initial block list of BlockListBuilder for method " ); |
| 461 | method()->print_short_name(); |
| 462 | tty->cr(); |
| 463 | |
| 464 | // better readability if blocks are sorted in processing order |
| 465 | _blocks.sort(compare_depth_first); |
| 466 | |
| 467 | for (int i = 0; i < _blocks.length(); i++) { |
| 468 | BlockBegin* cur = _blocks.at(i); |
| 469 | tty->print("%4d: B%-4d bci: %-4d preds: %-4d " , cur->depth_first_number(), cur->block_id(), cur->bci(), cur->total_preds()); |
| 470 | |
| 471 | tty->print(cur->is_set(BlockBegin::std_entry_flag) ? " std" : " " ); |
| 472 | tty->print(cur->is_set(BlockBegin::osr_entry_flag) ? " osr" : " " ); |
| 473 | tty->print(cur->is_set(BlockBegin::exception_entry_flag) ? " ex" : " " ); |
| 474 | tty->print(cur->is_set(BlockBegin::subroutine_entry_flag) ? " sr" : " " ); |
| 475 | tty->print(cur->is_set(BlockBegin::parser_loop_header_flag) ? " lh" : " " ); |
| 476 | |
| 477 | if (cur->number_of_sux() > 0) { |
| 478 | tty->print(" sux: " ); |
| 479 | for (int j = 0; j < cur->number_of_sux(); j++) { |
| 480 | BlockBegin* sux = cur->sux_at(j); |
| 481 | tty->print("B%d " , sux->block_id()); |
| 482 | } |
| 483 | } |
| 484 | tty->cr(); |
| 485 | } |
| 486 | } |
| 487 | |
| 488 | #endif |
| 489 | |
| 490 | |
| 491 | // A simple growable array of Values indexed by ciFields |
| 492 | class FieldBuffer: public CompilationResourceObj { |
| 493 | private: |
| 494 | GrowableArray<Value> _values; |
| 495 | |
| 496 | public: |
| 497 | FieldBuffer() {} |
| 498 | |
| 499 | void kill() { |
| 500 | _values.trunc_to(0); |
| 501 | } |
| 502 | |
| 503 | Value at(ciField* field) { |
| 504 | assert(field->holder()->is_loaded(), "must be a loaded field" ); |
| 505 | int offset = field->offset(); |
| 506 | if (offset < _values.length()) { |
| 507 | return _values.at(offset); |
| 508 | } else { |
| 509 | return NULL; |
| 510 | } |
| 511 | } |
| 512 | |
| 513 | void at_put(ciField* field, Value value) { |
| 514 | assert(field->holder()->is_loaded(), "must be a loaded field" ); |
| 515 | int offset = field->offset(); |
| 516 | _values.at_put_grow(offset, value, NULL); |
| 517 | } |
| 518 | |
| 519 | }; |
| 520 | |
| 521 | |
| 522 | // MemoryBuffer is fairly simple model of the current state of memory. |
| 523 | // It partitions memory into several pieces. The first piece is |
| 524 | // generic memory where little is known about the owner of the memory. |
| 525 | // This is conceptually represented by the tuple <O, F, V> which says |
| 526 | // that the field F of object O has value V. This is flattened so |
| 527 | // that F is represented by the offset of the field and the parallel |
| 528 | // arrays _objects and _values are used for O and V. Loads of O.F can |
| 529 | // simply use V. Newly allocated objects are kept in a separate list |
| 530 | // along with a parallel array for each object which represents the |
| 531 | // current value of its fields. Stores of the default value to fields |
| 532 | // which have never been stored to before are eliminated since they |
| 533 | // are redundant. Once newly allocated objects are stored into |
| 534 | // another object or they are passed out of the current compile they |
| 535 | // are treated like generic memory. |
| 536 | |
| 537 | class MemoryBuffer: public CompilationResourceObj { |
| 538 | private: |
| 539 | FieldBuffer _values; |
| 540 | GrowableArray<Value> _objects; |
| 541 | GrowableArray<Value> _newobjects; |
| 542 | GrowableArray<FieldBuffer*> _fields; |
| 543 | |
| 544 | public: |
| 545 | MemoryBuffer() {} |
| 546 | |
| 547 | StoreField* store(StoreField* st) { |
| 548 | if (!EliminateFieldAccess) { |
| 549 | return st; |
| 550 | } |
| 551 | |
| 552 | Value object = st->obj(); |
| 553 | Value value = st->value(); |
| 554 | ciField* field = st->field(); |
| 555 | if (field->holder()->is_loaded()) { |
| 556 | int offset = field->offset(); |
| 557 | int index = _newobjects.find(object); |
| 558 | if (index != -1) { |
| 559 | // newly allocated object with no other stores performed on this field |
| 560 | FieldBuffer* buf = _fields.at(index); |
| 561 | if (buf->at(field) == NULL && is_default_value(value)) { |
| 562 | #ifndef PRODUCT |
| 563 | if (PrintIRDuringConstruction && Verbose) { |
| 564 | tty->print_cr("Eliminated store for object %d:" , index); |
| 565 | st->print_line(); |
| 566 | } |
| 567 | #endif |
| 568 | return NULL; |
| 569 | } else { |
| 570 | buf->at_put(field, value); |
| 571 | } |
| 572 | } else { |
| 573 | _objects.at_put_grow(offset, object, NULL); |
| 574 | _values.at_put(field, value); |
| 575 | } |
| 576 | |
| 577 | store_value(value); |
| 578 | } else { |
| 579 | // if we held onto field names we could alias based on names but |
| 580 | // we don't know what's being stored to so kill it all. |
| 581 | kill(); |
| 582 | } |
| 583 | return st; |
| 584 | } |
| 585 | |
| 586 | |
| 587 | // return true if this value correspond to the default value of a field. |
| 588 | bool is_default_value(Value value) { |
| 589 | Constant* con = value->as_Constant(); |
| 590 | if (con) { |
| 591 | switch (con->type()->tag()) { |
| 592 | case intTag: return con->type()->as_IntConstant()->value() == 0; |
| 593 | case longTag: return con->type()->as_LongConstant()->value() == 0; |
| 594 | case floatTag: return jint_cast(con->type()->as_FloatConstant()->value()) == 0; |
| 595 | case doubleTag: return jlong_cast(con->type()->as_DoubleConstant()->value()) == jlong_cast(0); |
| 596 | case objectTag: return con->type() == objectNull; |
| 597 | default: ShouldNotReachHere(); |
| 598 | } |
| 599 | } |
| 600 | return false; |
| 601 | } |
| 602 | |
| 603 | |
| 604 | // return either the actual value of a load or the load itself |
| 605 | Value load(LoadField* load) { |
| 606 | if (!EliminateFieldAccess) { |
| 607 | return load; |
| 608 | } |
| 609 | |
| 610 | if (RoundFPResults && UseSSE < 2 && load->type()->is_float_kind()) { |
| 611 | // can't skip load since value might get rounded as a side effect |
| 612 | return load; |
| 613 | } |
| 614 | |
| 615 | ciField* field = load->field(); |
| 616 | Value object = load->obj(); |
| 617 | if (field->holder()->is_loaded() && !field->is_volatile()) { |
| 618 | int offset = field->offset(); |
| 619 | Value result = NULL; |
| 620 | int index = _newobjects.find(object); |
| 621 | if (index != -1) { |
| 622 | result = _fields.at(index)->at(field); |
| 623 | } else if (_objects.at_grow(offset, NULL) == object) { |
| 624 | result = _values.at(field); |
| 625 | } |
| 626 | if (result != NULL) { |
| 627 | #ifndef PRODUCT |
| 628 | if (PrintIRDuringConstruction && Verbose) { |
| 629 | tty->print_cr("Eliminated load: " ); |
| 630 | load->print_line(); |
| 631 | } |
| 632 | #endif |
| 633 | assert(result->type()->tag() == load->type()->tag(), "wrong types" ); |
| 634 | return result; |
| 635 | } |
| 636 | } |
| 637 | return load; |
| 638 | } |
| 639 | |
| 640 | // Record this newly allocated object |
| 641 | void new_instance(NewInstance* object) { |
| 642 | int index = _newobjects.length(); |
| 643 | _newobjects.append(object); |
| 644 | if (_fields.at_grow(index, NULL) == NULL) { |
| 645 | _fields.at_put(index, new FieldBuffer()); |
| 646 | } else { |
| 647 | _fields.at(index)->kill(); |
| 648 | } |
| 649 | } |
| 650 | |
| 651 | void store_value(Value value) { |
| 652 | int index = _newobjects.find(value); |
| 653 | if (index != -1) { |
| 654 | // stored a newly allocated object into another object. |
| 655 | // Assume we've lost track of it as separate slice of memory. |
| 656 | // We could do better by keeping track of whether individual |
| 657 | // fields could alias each other. |
| 658 | _newobjects.remove_at(index); |
| 659 | // pull out the field info and store it at the end up the list |
| 660 | // of field info list to be reused later. |
| 661 | _fields.append(_fields.at(index)); |
| 662 | _fields.remove_at(index); |
| 663 | } |
| 664 | } |
| 665 | |
| 666 | void kill() { |
| 667 | _newobjects.trunc_to(0); |
| 668 | _objects.trunc_to(0); |
| 669 | _values.kill(); |
| 670 | } |
| 671 | }; |
| 672 | |
| 673 | |
| 674 | // Implementation of GraphBuilder's ScopeData |
| 675 | |
| 676 | GraphBuilder::ScopeData::ScopeData(ScopeData* parent) |
| 677 | : _parent(parent) |
| 678 | , _bci2block(NULL) |
| 679 | , _scope(NULL) |
| 680 | , _has_handler(false) |
| 681 | , _stream(NULL) |
| 682 | , _work_list(NULL) |
| 683 | , _caller_stack_size(-1) |
| 684 | , _continuation(NULL) |
| 685 | , _parsing_jsr(false) |
| 686 | , _jsr_xhandlers(NULL) |
| 687 | , _num_returns(0) |
| 688 | , _cleanup_block(NULL) |
| 689 | , _cleanup_return_prev(NULL) |
| 690 | , _cleanup_state(NULL) |
| 691 | , _ignore_return(false) |
| 692 | { |
| 693 | if (parent != NULL) { |
| 694 | _max_inline_size = (intx) ((float) NestedInliningSizeRatio * (float) parent->max_inline_size() / 100.0f); |
| 695 | } else { |
| 696 | _max_inline_size = MaxInlineSize; |
| 697 | } |
| 698 | if (_max_inline_size < MaxTrivialSize) { |
| 699 | _max_inline_size = MaxTrivialSize; |
| 700 | } |
| 701 | } |
| 702 | |
| 703 | |
| 704 | void GraphBuilder::kill_all() { |
| 705 | if (UseLocalValueNumbering) { |
| 706 | vmap()->kill_all(); |
| 707 | } |
| 708 | _memory->kill(); |
| 709 | } |
| 710 | |
| 711 | |
| 712 | BlockBegin* GraphBuilder::ScopeData::block_at(int bci) { |
| 713 | if (parsing_jsr()) { |
| 714 | // It is necessary to clone all blocks associated with a |
| 715 | // subroutine, including those for exception handlers in the scope |
| 716 | // of the method containing the jsr (because those exception |
| 717 | // handlers may contain ret instructions in some cases). |
| 718 | BlockBegin* block = bci2block()->at(bci); |
| 719 | if (block != NULL && block == parent()->bci2block()->at(bci)) { |
| 720 | BlockBegin* new_block = new BlockBegin(block->bci()); |
| 721 | if (PrintInitialBlockList) { |
| 722 | tty->print_cr("CFG: cloned block %d (bci %d) as block %d for jsr" , |
| 723 | block->block_id(), block->bci(), new_block->block_id()); |
| 724 | } |
| 725 | // copy data from cloned blocked |
| 726 | new_block->set_depth_first_number(block->depth_first_number()); |
| 727 | if (block->is_set(BlockBegin::parser_loop_header_flag)) new_block->set(BlockBegin::parser_loop_header_flag); |
| 728 | // Preserve certain flags for assertion checking |
| 729 | if (block->is_set(BlockBegin::subroutine_entry_flag)) new_block->set(BlockBegin::subroutine_entry_flag); |
| 730 | if (block->is_set(BlockBegin::exception_entry_flag)) new_block->set(BlockBegin::exception_entry_flag); |
| 731 | |
| 732 | // copy was_visited_flag to allow early detection of bailouts |
| 733 | // if a block that is used in a jsr has already been visited before, |
| 734 | // it is shared between the normal control flow and a subroutine |
| 735 | // BlockBegin::try_merge returns false when the flag is set, this leads |
| 736 | // to a compilation bailout |
| 737 | if (block->is_set(BlockBegin::was_visited_flag)) new_block->set(BlockBegin::was_visited_flag); |
| 738 | |
| 739 | bci2block()->at_put(bci, new_block); |
| 740 | block = new_block; |
| 741 | } |
| 742 | return block; |
| 743 | } else { |
| 744 | return bci2block()->at(bci); |
| 745 | } |
| 746 | } |
| 747 | |
| 748 | |
| 749 | XHandlers* GraphBuilder::ScopeData::xhandlers() const { |
| 750 | if (_jsr_xhandlers == NULL) { |
| 751 | assert(!parsing_jsr(), "" ); |
| 752 | return scope()->xhandlers(); |
| 753 | } |
| 754 | assert(parsing_jsr(), "" ); |
| 755 | return _jsr_xhandlers; |
| 756 | } |
| 757 | |
| 758 | |
| 759 | void GraphBuilder::ScopeData::set_scope(IRScope* scope) { |
| 760 | _scope = scope; |
| 761 | bool parent_has_handler = false; |
| 762 | if (parent() != NULL) { |
| 763 | parent_has_handler = parent()->has_handler(); |
| 764 | } |
| 765 | _has_handler = parent_has_handler || scope->xhandlers()->has_handlers(); |
| 766 | } |
| 767 | |
| 768 | |
| 769 | void GraphBuilder::ScopeData::set_inline_cleanup_info(BlockBegin* block, |
| 770 | Instruction* return_prev, |
| 771 | ValueStack* return_state) { |
| 772 | _cleanup_block = block; |
| 773 | _cleanup_return_prev = return_prev; |
| 774 | _cleanup_state = return_state; |
| 775 | } |
| 776 | |
| 777 | |
| 778 | void GraphBuilder::ScopeData::add_to_work_list(BlockBegin* block) { |
| 779 | if (_work_list == NULL) { |
| 780 | _work_list = new BlockList(); |
| 781 | } |
| 782 | |
| 783 | if (!block->is_set(BlockBegin::is_on_work_list_flag)) { |
| 784 | // Do not start parsing the continuation block while in a |
| 785 | // sub-scope |
| 786 | if (parsing_jsr()) { |
| 787 | if (block == jsr_continuation()) { |
| 788 | return; |
| 789 | } |
| 790 | } else { |
| 791 | if (block == continuation()) { |
| 792 | return; |
| 793 | } |
| 794 | } |
| 795 | block->set(BlockBegin::is_on_work_list_flag); |
| 796 | _work_list->push(block); |
| 797 | |
| 798 | sort_top_into_worklist(_work_list, block); |
| 799 | } |
| 800 | } |
| 801 | |
| 802 | |
| 803 | void GraphBuilder::sort_top_into_worklist(BlockList* worklist, BlockBegin* top) { |
| 804 | assert(worklist->top() == top, "" ); |
| 805 | // sort block descending into work list |
| 806 | const int dfn = top->depth_first_number(); |
| 807 | assert(dfn != -1, "unknown depth first number" ); |
| 808 | int i = worklist->length()-2; |
| 809 | while (i >= 0) { |
| 810 | BlockBegin* b = worklist->at(i); |
| 811 | if (b->depth_first_number() < dfn) { |
| 812 | worklist->at_put(i+1, b); |
| 813 | } else { |
| 814 | break; |
| 815 | } |
| 816 | i --; |
| 817 | } |
| 818 | if (i >= -1) worklist->at_put(i + 1, top); |
| 819 | } |
| 820 | |
| 821 | |
| 822 | BlockBegin* GraphBuilder::ScopeData::remove_from_work_list() { |
| 823 | if (is_work_list_empty()) { |
| 824 | return NULL; |
| 825 | } |
| 826 | return _work_list->pop(); |
| 827 | } |
| 828 | |
| 829 | |
| 830 | bool GraphBuilder::ScopeData::is_work_list_empty() const { |
| 831 | return (_work_list == NULL || _work_list->length() == 0); |
| 832 | } |
| 833 | |
| 834 | |
| 835 | void GraphBuilder::ScopeData::setup_jsr_xhandlers() { |
| 836 | assert(parsing_jsr(), "" ); |
| 837 | // clone all the exception handlers from the scope |
| 838 | XHandlers* handlers = new XHandlers(scope()->xhandlers()); |
| 839 | const int n = handlers->length(); |
| 840 | for (int i = 0; i < n; i++) { |
| 841 | // The XHandlers need to be adjusted to dispatch to the cloned |
| 842 | // handler block instead of the default one but the synthetic |
| 843 | // unlocker needs to be handled specially. The synthetic unlocker |
| 844 | // should be left alone since there can be only one and all code |
| 845 | // should dispatch to the same one. |
| 846 | XHandler* h = handlers->handler_at(i); |
| 847 | assert(h->handler_bci() != SynchronizationEntryBCI, "must be real" ); |
| 848 | h->set_entry_block(block_at(h->handler_bci())); |
| 849 | } |
| 850 | _jsr_xhandlers = handlers; |
| 851 | } |
| 852 | |
| 853 | |
| 854 | int GraphBuilder::ScopeData::num_returns() { |
| 855 | if (parsing_jsr()) { |
| 856 | return parent()->num_returns(); |
| 857 | } |
| 858 | return _num_returns; |
| 859 | } |
| 860 | |
| 861 | |
| 862 | void GraphBuilder::ScopeData::incr_num_returns() { |
| 863 | if (parsing_jsr()) { |
| 864 | parent()->incr_num_returns(); |
| 865 | } else { |
| 866 | ++_num_returns; |
| 867 | } |
| 868 | } |
| 869 | |
| 870 | |
| 871 | // Implementation of GraphBuilder |
| 872 | |
| 873 | #define INLINE_BAILOUT(msg) { inline_bailout(msg); return false; } |
| 874 | |
| 875 | |
| 876 | void GraphBuilder::load_constant() { |
| 877 | ciConstant con = stream()->get_constant(); |
| 878 | if (con.basic_type() == T_ILLEGAL) { |
| 879 | // FIXME: an unresolved Dynamic constant can get here, |
| 880 | // and that should not terminate the whole compilation. |
| 881 | BAILOUT("could not resolve a constant" ); |
| 882 | } else { |
| 883 | ValueType* t = illegalType; |
| 884 | ValueStack* patch_state = NULL; |
| 885 | switch (con.basic_type()) { |
| 886 | case T_BOOLEAN: t = new IntConstant (con.as_boolean()); break; |
| 887 | case T_BYTE : t = new IntConstant (con.as_byte ()); break; |
| 888 | case T_CHAR : t = new IntConstant (con.as_char ()); break; |
| 889 | case T_SHORT : t = new IntConstant (con.as_short ()); break; |
| 890 | case T_INT : t = new IntConstant (con.as_int ()); break; |
| 891 | case T_LONG : t = new LongConstant (con.as_long ()); break; |
| 892 | case T_FLOAT : t = new FloatConstant (con.as_float ()); break; |
| 893 | case T_DOUBLE : t = new DoubleConstant (con.as_double ()); break; |
| 894 | case T_ARRAY : t = new ArrayConstant (con.as_object ()->as_array ()); break; |
| 895 | case T_OBJECT : |
| 896 | { |
| 897 | ciObject* obj = con.as_object(); |
| 898 | if (!obj->is_loaded() |
| 899 | || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) { |
| 900 | // A Class, MethodType, MethodHandle, or String. |
| 901 | // Unloaded condy nodes show up as T_ILLEGAL, above. |
| 902 | patch_state = copy_state_before(); |
| 903 | t = new ObjectConstant(obj); |
| 904 | } else { |
| 905 | // Might be a Class, MethodType, MethodHandle, or Dynamic constant |
| 906 | // result, which might turn out to be an array. |
| 907 | if (obj->is_null_object()) |
| 908 | t = objectNull; |
| 909 | else if (obj->is_array()) |
| 910 | t = new ArrayConstant(obj->as_array()); |
| 911 | else |
| 912 | t = new InstanceConstant(obj->as_instance()); |
| 913 | } |
| 914 | break; |
| 915 | } |
| 916 | default : ShouldNotReachHere(); |
| 917 | } |
| 918 | Value x; |
| 919 | if (patch_state != NULL) { |
| 920 | x = new Constant(t, patch_state); |
| 921 | } else { |
| 922 | x = new Constant(t); |
| 923 | } |
| 924 | push(t, append(x)); |
| 925 | } |
| 926 | } |
| 927 | |
| 928 | |
| 929 | void GraphBuilder::load_local(ValueType* type, int index) { |
| 930 | Value x = state()->local_at(index); |
| 931 | assert(x != NULL && !x->type()->is_illegal(), "access of illegal local variable" ); |
| 932 | push(type, x); |
| 933 | } |
| 934 | |
| 935 | |
| 936 | void GraphBuilder::store_local(ValueType* type, int index) { |
| 937 | Value x = pop(type); |
| 938 | store_local(state(), x, index); |
| 939 | } |
| 940 | |
| 941 | |
| 942 | void GraphBuilder::store_local(ValueStack* state, Value x, int index) { |
| 943 | if (parsing_jsr()) { |
| 944 | // We need to do additional tracking of the location of the return |
| 945 | // address for jsrs since we don't handle arbitrary jsr/ret |
| 946 | // constructs. Here we are figuring out in which circumstances we |
| 947 | // need to bail out. |
| 948 | if (x->type()->is_address()) { |
| 949 | scope_data()->set_jsr_return_address_local(index); |
| 950 | |
| 951 | // Also check parent jsrs (if any) at this time to see whether |
| 952 | // they are using this local. We don't handle skipping over a |
| 953 | // ret. |
| 954 | for (ScopeData* cur_scope_data = scope_data()->parent(); |
| 955 | cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); |
| 956 | cur_scope_data = cur_scope_data->parent()) { |
| 957 | if (cur_scope_data->jsr_return_address_local() == index) { |
| 958 | BAILOUT("subroutine overwrites return address from previous subroutine" ); |
| 959 | } |
| 960 | } |
| 961 | } else if (index == scope_data()->jsr_return_address_local()) { |
| 962 | scope_data()->set_jsr_return_address_local(-1); |
| 963 | } |
| 964 | } |
| 965 | |
| 966 | state->store_local(index, round_fp(x)); |
| 967 | } |
| 968 | |
| 969 | |
| 970 | void GraphBuilder::load_indexed(BasicType type) { |
| 971 | // In case of in block code motion in range check elimination |
| 972 | ValueStack* state_before = copy_state_indexed_access(); |
| 973 | compilation()->set_has_access_indexed(true); |
| 974 | Value index = ipop(); |
| 975 | Value array = apop(); |
| 976 | Value length = NULL; |
| 977 | if (CSEArrayLength || |
| 978 | (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || |
| 979 | (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) { |
| 980 | length = append(new ArrayLength(array, state_before)); |
| 981 | } |
| 982 | push(as_ValueType(type), append(new LoadIndexed(array, index, length, type, state_before))); |
| 983 | } |
| 984 | |
| 985 | |
| 986 | void GraphBuilder::store_indexed(BasicType type) { |
| 987 | // In case of in block code motion in range check elimination |
| 988 | ValueStack* state_before = copy_state_indexed_access(); |
| 989 | compilation()->set_has_access_indexed(true); |
| 990 | Value value = pop(as_ValueType(type)); |
| 991 | Value index = ipop(); |
| 992 | Value array = apop(); |
| 993 | Value length = NULL; |
| 994 | if (CSEArrayLength || |
| 995 | (array->as_AccessField() && array->as_AccessField()->field()->is_constant()) || |
| 996 | (array->as_NewArray() && array->as_NewArray()->length() && array->as_NewArray()->length()->type()->is_constant())) { |
| 997 | length = append(new ArrayLength(array, state_before)); |
| 998 | } |
| 999 | ciType* array_type = array->declared_type(); |
| 1000 | bool check_boolean = false; |
| 1001 | if (array_type != NULL) { |
| 1002 | if (array_type->is_loaded() && |
| 1003 | array_type->as_array_klass()->element_type()->basic_type() == T_BOOLEAN) { |
| 1004 | assert(type == T_BYTE, "boolean store uses bastore" ); |
| 1005 | Value mask = append(new Constant(new IntConstant(1))); |
| 1006 | value = append(new LogicOp(Bytecodes::_iand, value, mask)); |
| 1007 | } |
| 1008 | } else if (type == T_BYTE) { |
| 1009 | check_boolean = true; |
| 1010 | } |
| 1011 | StoreIndexed* result = new StoreIndexed(array, index, length, type, value, state_before, check_boolean); |
| 1012 | append(result); |
| 1013 | _memory->store_value(value); |
| 1014 | |
| 1015 | if (type == T_OBJECT && is_profiling()) { |
| 1016 | // Note that we'd collect profile data in this method if we wanted it. |
| 1017 | compilation()->set_would_profile(true); |
| 1018 | |
| 1019 | if (profile_checkcasts()) { |
| 1020 | result->set_profiled_method(method()); |
| 1021 | result->set_profiled_bci(bci()); |
| 1022 | result->set_should_profile(true); |
| 1023 | } |
| 1024 | } |
| 1025 | } |
| 1026 | |
| 1027 | |
| 1028 | void GraphBuilder::stack_op(Bytecodes::Code code) { |
| 1029 | switch (code) { |
| 1030 | case Bytecodes::_pop: |
| 1031 | { state()->raw_pop(); |
| 1032 | } |
| 1033 | break; |
| 1034 | case Bytecodes::_pop2: |
| 1035 | { state()->raw_pop(); |
| 1036 | state()->raw_pop(); |
| 1037 | } |
| 1038 | break; |
| 1039 | case Bytecodes::_dup: |
| 1040 | { Value w = state()->raw_pop(); |
| 1041 | state()->raw_push(w); |
| 1042 | state()->raw_push(w); |
| 1043 | } |
| 1044 | break; |
| 1045 | case Bytecodes::_dup_x1: |
| 1046 | { Value w1 = state()->raw_pop(); |
| 1047 | Value w2 = state()->raw_pop(); |
| 1048 | state()->raw_push(w1); |
| 1049 | state()->raw_push(w2); |
| 1050 | state()->raw_push(w1); |
| 1051 | } |
| 1052 | break; |
| 1053 | case Bytecodes::_dup_x2: |
| 1054 | { Value w1 = state()->raw_pop(); |
| 1055 | Value w2 = state()->raw_pop(); |
| 1056 | Value w3 = state()->raw_pop(); |
| 1057 | state()->raw_push(w1); |
| 1058 | state()->raw_push(w3); |
| 1059 | state()->raw_push(w2); |
| 1060 | state()->raw_push(w1); |
| 1061 | } |
| 1062 | break; |
| 1063 | case Bytecodes::_dup2: |
| 1064 | { Value w1 = state()->raw_pop(); |
| 1065 | Value w2 = state()->raw_pop(); |
| 1066 | state()->raw_push(w2); |
| 1067 | state()->raw_push(w1); |
| 1068 | state()->raw_push(w2); |
| 1069 | state()->raw_push(w1); |
| 1070 | } |
| 1071 | break; |
| 1072 | case Bytecodes::_dup2_x1: |
| 1073 | { Value w1 = state()->raw_pop(); |
| 1074 | Value w2 = state()->raw_pop(); |
| 1075 | Value w3 = state()->raw_pop(); |
| 1076 | state()->raw_push(w2); |
| 1077 | state()->raw_push(w1); |
| 1078 | state()->raw_push(w3); |
| 1079 | state()->raw_push(w2); |
| 1080 | state()->raw_push(w1); |
| 1081 | } |
| 1082 | break; |
| 1083 | case Bytecodes::_dup2_x2: |
| 1084 | { Value w1 = state()->raw_pop(); |
| 1085 | Value w2 = state()->raw_pop(); |
| 1086 | Value w3 = state()->raw_pop(); |
| 1087 | Value w4 = state()->raw_pop(); |
| 1088 | state()->raw_push(w2); |
| 1089 | state()->raw_push(w1); |
| 1090 | state()->raw_push(w4); |
| 1091 | state()->raw_push(w3); |
| 1092 | state()->raw_push(w2); |
| 1093 | state()->raw_push(w1); |
| 1094 | } |
| 1095 | break; |
| 1096 | case Bytecodes::_swap: |
| 1097 | { Value w1 = state()->raw_pop(); |
| 1098 | Value w2 = state()->raw_pop(); |
| 1099 | state()->raw_push(w1); |
| 1100 | state()->raw_push(w2); |
| 1101 | } |
| 1102 | break; |
| 1103 | default: |
| 1104 | ShouldNotReachHere(); |
| 1105 | break; |
| 1106 | } |
| 1107 | } |
| 1108 | |
| 1109 | |
| 1110 | void GraphBuilder::arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before) { |
| 1111 | Value y = pop(type); |
| 1112 | Value x = pop(type); |
| 1113 | // NOTE: strictfp can be queried from current method since we don't |
| 1114 | // inline methods with differing strictfp bits |
| 1115 | Value res = new ArithmeticOp(code, x, y, method()->is_strict(), state_before); |
| 1116 | // Note: currently single-precision floating-point rounding on Intel is handled at the LIRGenerator level |
| 1117 | res = append(res); |
| 1118 | if (method()->is_strict()) { |
| 1119 | res = round_fp(res); |
| 1120 | } |
| 1121 | push(type, res); |
| 1122 | } |
| 1123 | |
| 1124 | |
| 1125 | void GraphBuilder::negate_op(ValueType* type) { |
| 1126 | push(type, append(new NegateOp(pop(type)))); |
| 1127 | } |
| 1128 | |
| 1129 | |
| 1130 | void GraphBuilder::shift_op(ValueType* type, Bytecodes::Code code) { |
| 1131 | Value s = ipop(); |
| 1132 | Value x = pop(type); |
| 1133 | // try to simplify |
| 1134 | // Note: This code should go into the canonicalizer as soon as it can |
| 1135 | // can handle canonicalized forms that contain more than one node. |
| 1136 | if (CanonicalizeNodes && code == Bytecodes::_iushr) { |
| 1137 | // pattern: x >>> s |
| 1138 | IntConstant* s1 = s->type()->as_IntConstant(); |
| 1139 | if (s1 != NULL) { |
| 1140 | // pattern: x >>> s1, with s1 constant |
| 1141 | ShiftOp* l = x->as_ShiftOp(); |
| 1142 | if (l != NULL && l->op() == Bytecodes::_ishl) { |
| 1143 | // pattern: (a << b) >>> s1 |
| 1144 | IntConstant* s0 = l->y()->type()->as_IntConstant(); |
| 1145 | if (s0 != NULL) { |
| 1146 | // pattern: (a << s0) >>> s1 |
| 1147 | const int s0c = s0->value() & 0x1F; // only the low 5 bits are significant for shifts |
| 1148 | const int s1c = s1->value() & 0x1F; // only the low 5 bits are significant for shifts |
| 1149 | if (s0c == s1c) { |
| 1150 | if (s0c == 0) { |
| 1151 | // pattern: (a << 0) >>> 0 => simplify to: a |
| 1152 | ipush(l->x()); |
| 1153 | } else { |
| 1154 | // pattern: (a << s0c) >>> s0c => simplify to: a & m, with m constant |
| 1155 | assert(0 < s0c && s0c < BitsPerInt, "adjust code below to handle corner cases" ); |
| 1156 | const int m = (1 << (BitsPerInt - s0c)) - 1; |
| 1157 | Value s = append(new Constant(new IntConstant(m))); |
| 1158 | ipush(append(new LogicOp(Bytecodes::_iand, l->x(), s))); |
| 1159 | } |
| 1160 | return; |
| 1161 | } |
| 1162 | } |
| 1163 | } |
| 1164 | } |
| 1165 | } |
| 1166 | // could not simplify |
| 1167 | push(type, append(new ShiftOp(code, x, s))); |
| 1168 | } |
| 1169 | |
| 1170 | |
| 1171 | void GraphBuilder::logic_op(ValueType* type, Bytecodes::Code code) { |
| 1172 | Value y = pop(type); |
| 1173 | Value x = pop(type); |
| 1174 | push(type, append(new LogicOp(code, x, y))); |
| 1175 | } |
| 1176 | |
| 1177 | |
| 1178 | void GraphBuilder::compare_op(ValueType* type, Bytecodes::Code code) { |
| 1179 | ValueStack* state_before = copy_state_before(); |
| 1180 | Value y = pop(type); |
| 1181 | Value x = pop(type); |
| 1182 | ipush(append(new CompareOp(code, x, y, state_before))); |
| 1183 | } |
| 1184 | |
| 1185 | |
| 1186 | void GraphBuilder::convert(Bytecodes::Code op, BasicType from, BasicType to) { |
| 1187 | push(as_ValueType(to), append(new Convert(op, pop(as_ValueType(from)), as_ValueType(to)))); |
| 1188 | } |
| 1189 | |
| 1190 | |
| 1191 | void GraphBuilder::increment() { |
| 1192 | int index = stream()->get_index(); |
| 1193 | int delta = stream()->is_wide() ? (signed short)Bytes::get_Java_u2(stream()->cur_bcp() + 4) : (signed char)(stream()->cur_bcp()[2]); |
| 1194 | load_local(intType, index); |
| 1195 | ipush(append(new Constant(new IntConstant(delta)))); |
| 1196 | arithmetic_op(intType, Bytecodes::_iadd); |
| 1197 | store_local(intType, index); |
| 1198 | } |
| 1199 | |
| 1200 | |
| 1201 | void GraphBuilder::_goto(int from_bci, int to_bci) { |
| 1202 | Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci); |
| 1203 | if (is_profiling()) { |
| 1204 | compilation()->set_would_profile(true); |
| 1205 | x->set_profiled_bci(bci()); |
| 1206 | if (profile_branches()) { |
| 1207 | x->set_profiled_method(method()); |
| 1208 | x->set_should_profile(true); |
| 1209 | } |
| 1210 | } |
| 1211 | append(x); |
| 1212 | } |
| 1213 | |
| 1214 | |
| 1215 | void GraphBuilder::if_node(Value x, If::Condition cond, Value y, ValueStack* state_before) { |
| 1216 | BlockBegin* tsux = block_at(stream()->get_dest()); |
| 1217 | BlockBegin* fsux = block_at(stream()->next_bci()); |
| 1218 | bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci(); |
| 1219 | // In case of loop invariant code motion or predicate insertion |
| 1220 | // before the body of a loop the state is needed |
| 1221 | Instruction *i = append(new If(x, cond, false, y, tsux, fsux, (is_bb || compilation()->is_optimistic()) ? state_before : NULL, is_bb)); |
| 1222 | |
| 1223 | assert(i->as_Goto() == NULL || |
| 1224 | (i->as_Goto()->sux_at(0) == tsux && i->as_Goto()->is_safepoint() == tsux->bci() < stream()->cur_bci()) || |
| 1225 | (i->as_Goto()->sux_at(0) == fsux && i->as_Goto()->is_safepoint() == fsux->bci() < stream()->cur_bci()), |
| 1226 | "safepoint state of Goto returned by canonicalizer incorrect" ); |
| 1227 | |
| 1228 | if (is_profiling()) { |
| 1229 | If* if_node = i->as_If(); |
| 1230 | if (if_node != NULL) { |
| 1231 | // Note that we'd collect profile data in this method if we wanted it. |
| 1232 | compilation()->set_would_profile(true); |
| 1233 | // At level 2 we need the proper bci to count backedges |
| 1234 | if_node->set_profiled_bci(bci()); |
| 1235 | if (profile_branches()) { |
| 1236 | // Successors can be rotated by the canonicalizer, check for this case. |
| 1237 | if_node->set_profiled_method(method()); |
| 1238 | if_node->set_should_profile(true); |
| 1239 | if (if_node->tsux() == fsux) { |
| 1240 | if_node->set_swapped(true); |
| 1241 | } |
| 1242 | } |
| 1243 | return; |
| 1244 | } |
| 1245 | |
| 1246 | // Check if this If was reduced to Goto. |
| 1247 | Goto *goto_node = i->as_Goto(); |
| 1248 | if (goto_node != NULL) { |
| 1249 | compilation()->set_would_profile(true); |
| 1250 | goto_node->set_profiled_bci(bci()); |
| 1251 | if (profile_branches()) { |
| 1252 | goto_node->set_profiled_method(method()); |
| 1253 | goto_node->set_should_profile(true); |
| 1254 | // Find out which successor is used. |
| 1255 | if (goto_node->default_sux() == tsux) { |
| 1256 | goto_node->set_direction(Goto::taken); |
| 1257 | } else if (goto_node->default_sux() == fsux) { |
| 1258 | goto_node->set_direction(Goto::not_taken); |
| 1259 | } else { |
| 1260 | ShouldNotReachHere(); |
| 1261 | } |
| 1262 | } |
| 1263 | return; |
| 1264 | } |
| 1265 | } |
| 1266 | } |
| 1267 | |
| 1268 | |
| 1269 | void GraphBuilder::if_zero(ValueType* type, If::Condition cond) { |
| 1270 | Value y = append(new Constant(intZero)); |
| 1271 | ValueStack* state_before = copy_state_before(); |
| 1272 | Value x = ipop(); |
| 1273 | if_node(x, cond, y, state_before); |
| 1274 | } |
| 1275 | |
| 1276 | |
| 1277 | void GraphBuilder::if_null(ValueType* type, If::Condition cond) { |
| 1278 | Value y = append(new Constant(objectNull)); |
| 1279 | ValueStack* state_before = copy_state_before(); |
| 1280 | Value x = apop(); |
| 1281 | if_node(x, cond, y, state_before); |
| 1282 | } |
| 1283 | |
| 1284 | |
| 1285 | void GraphBuilder::if_same(ValueType* type, If::Condition cond) { |
| 1286 | ValueStack* state_before = copy_state_before(); |
| 1287 | Value y = pop(type); |
| 1288 | Value x = pop(type); |
| 1289 | if_node(x, cond, y, state_before); |
| 1290 | } |
| 1291 | |
| 1292 | |
| 1293 | void GraphBuilder::jsr(int dest) { |
| 1294 | // We only handle well-formed jsrs (those which are "block-structured"). |
| 1295 | // If the bytecodes are strange (jumping out of a jsr block) then we |
| 1296 | // might end up trying to re-parse a block containing a jsr which |
| 1297 | // has already been activated. Watch for this case and bail out. |
| 1298 | for (ScopeData* cur_scope_data = scope_data(); |
| 1299 | cur_scope_data != NULL && cur_scope_data->parsing_jsr() && cur_scope_data->scope() == scope(); |
| 1300 | cur_scope_data = cur_scope_data->parent()) { |
| 1301 | if (cur_scope_data->jsr_entry_bci() == dest) { |
| 1302 | BAILOUT("too-complicated jsr/ret structure" ); |
| 1303 | } |
| 1304 | } |
| 1305 | |
| 1306 | push(addressType, append(new Constant(new AddressConstant(next_bci())))); |
| 1307 | if (!try_inline_jsr(dest)) { |
| 1308 | return; // bailed out while parsing and inlining subroutine |
| 1309 | } |
| 1310 | } |
| 1311 | |
| 1312 | |
| 1313 | void GraphBuilder::ret(int local_index) { |
| 1314 | if (!parsing_jsr()) BAILOUT("ret encountered while not parsing subroutine" ); |
| 1315 | |
| 1316 | if (local_index != scope_data()->jsr_return_address_local()) { |
| 1317 | BAILOUT("can not handle complicated jsr/ret constructs" ); |
| 1318 | } |
| 1319 | |
| 1320 | // Rets simply become (NON-SAFEPOINT) gotos to the jsr continuation |
| 1321 | append(new Goto(scope_data()->jsr_continuation(), false)); |
| 1322 | } |
| 1323 | |
| 1324 | |
| 1325 | void GraphBuilder::table_switch() { |
| 1326 | Bytecode_tableswitch sw(stream()); |
| 1327 | const int l = sw.length(); |
| 1328 | if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { |
| 1329 | // total of 2 successors => use If instead of switch |
| 1330 | // Note: This code should go into the canonicalizer as soon as it can |
| 1331 | // can handle canonicalized forms that contain more than one node. |
| 1332 | Value key = append(new Constant(new IntConstant(sw.low_key()))); |
| 1333 | BlockBegin* tsux = block_at(bci() + sw.dest_offset_at(0)); |
| 1334 | BlockBegin* fsux = block_at(bci() + sw.default_offset()); |
| 1335 | bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); |
| 1336 | // In case of loop invariant code motion or predicate insertion |
| 1337 | // before the body of a loop the state is needed |
| 1338 | ValueStack* state_before = copy_state_if_bb(is_bb); |
| 1339 | append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); |
| 1340 | } else { |
| 1341 | // collect successors |
| 1342 | BlockList* sux = new BlockList(l + 1, NULL); |
| 1343 | int i; |
| 1344 | bool has_bb = false; |
| 1345 | for (i = 0; i < l; i++) { |
| 1346 | sux->at_put(i, block_at(bci() + sw.dest_offset_at(i))); |
| 1347 | if (sw.dest_offset_at(i) < 0) has_bb = true; |
| 1348 | } |
| 1349 | // add default successor |
| 1350 | if (sw.default_offset() < 0) has_bb = true; |
| 1351 | sux->at_put(i, block_at(bci() + sw.default_offset())); |
| 1352 | // In case of loop invariant code motion or predicate insertion |
| 1353 | // before the body of a loop the state is needed |
| 1354 | ValueStack* state_before = copy_state_if_bb(has_bb); |
| 1355 | Instruction* res = append(new TableSwitch(ipop(), sux, sw.low_key(), state_before, has_bb)); |
| 1356 | #ifdef ASSERT |
| 1357 | if (res->as_Goto()) { |
| 1358 | for (i = 0; i < l; i++) { |
| 1359 | if (sux->at(i) == res->as_Goto()->sux_at(0)) { |
| 1360 | assert(res->as_Goto()->is_safepoint() == sw.dest_offset_at(i) < 0, "safepoint state of Goto returned by canonicalizer incorrect" ); |
| 1361 | } |
| 1362 | } |
| 1363 | } |
| 1364 | #endif |
| 1365 | } |
| 1366 | } |
| 1367 | |
| 1368 | |
| 1369 | void GraphBuilder::lookup_switch() { |
| 1370 | Bytecode_lookupswitch sw(stream()); |
| 1371 | const int l = sw.number_of_pairs(); |
| 1372 | if (CanonicalizeNodes && l == 1 && compilation()->env()->comp_level() != CompLevel_full_profile) { |
| 1373 | // total of 2 successors => use If instead of switch |
| 1374 | // Note: This code should go into the canonicalizer as soon as it can |
| 1375 | // can handle canonicalized forms that contain more than one node. |
| 1376 | // simplify to If |
| 1377 | LookupswitchPair pair = sw.pair_at(0); |
| 1378 | Value key = append(new Constant(new IntConstant(pair.match()))); |
| 1379 | BlockBegin* tsux = block_at(bci() + pair.offset()); |
| 1380 | BlockBegin* fsux = block_at(bci() + sw.default_offset()); |
| 1381 | bool is_bb = tsux->bci() < bci() || fsux->bci() < bci(); |
| 1382 | // In case of loop invariant code motion or predicate insertion |
| 1383 | // before the body of a loop the state is needed |
| 1384 | ValueStack* state_before = copy_state_if_bb(is_bb);; |
| 1385 | append(new If(ipop(), If::eql, true, key, tsux, fsux, state_before, is_bb)); |
| 1386 | } else { |
| 1387 | // collect successors & keys |
| 1388 | BlockList* sux = new BlockList(l + 1, NULL); |
| 1389 | intArray* keys = new intArray(l, l, 0); |
| 1390 | int i; |
| 1391 | bool has_bb = false; |
| 1392 | for (i = 0; i < l; i++) { |
| 1393 | LookupswitchPair pair = sw.pair_at(i); |
| 1394 | if (pair.offset() < 0) has_bb = true; |
| 1395 | sux->at_put(i, block_at(bci() + pair.offset())); |
| 1396 | keys->at_put(i, pair.match()); |
| 1397 | } |
| 1398 | // add default successor |
| 1399 | if (sw.default_offset() < 0) has_bb = true; |
| 1400 | sux->at_put(i, block_at(bci() + sw.default_offset())); |
| 1401 | // In case of loop invariant code motion or predicate insertion |
| 1402 | // before the body of a loop the state is needed |
| 1403 | ValueStack* state_before = copy_state_if_bb(has_bb); |
| 1404 | Instruction* res = append(new LookupSwitch(ipop(), sux, keys, state_before, has_bb)); |
| 1405 | #ifdef ASSERT |
| 1406 | if (res->as_Goto()) { |
| 1407 | for (i = 0; i < l; i++) { |
| 1408 | if (sux->at(i) == res->as_Goto()->sux_at(0)) { |
| 1409 | assert(res->as_Goto()->is_safepoint() == sw.pair_at(i).offset() < 0, "safepoint state of Goto returned by canonicalizer incorrect" ); |
| 1410 | } |
| 1411 | } |
| 1412 | } |
| 1413 | #endif |
| 1414 | } |
| 1415 | } |
| 1416 | |
| 1417 | void GraphBuilder::call_register_finalizer() { |
| 1418 | // If the receiver requires finalization then emit code to perform |
| 1419 | // the registration on return. |
| 1420 | |
| 1421 | // Gather some type information about the receiver |
| 1422 | Value receiver = state()->local_at(0); |
| 1423 | assert(receiver != NULL, "must have a receiver" ); |
| 1424 | ciType* declared_type = receiver->declared_type(); |
| 1425 | ciType* exact_type = receiver->exact_type(); |
| 1426 | if (exact_type == NULL && |
| 1427 | receiver->as_Local() && |
| 1428 | receiver->as_Local()->java_index() == 0) { |
| 1429 | ciInstanceKlass* ik = compilation()->method()->holder(); |
| 1430 | if (ik->is_final()) { |
| 1431 | exact_type = ik; |
| 1432 | } else if (UseCHA && !(ik->has_subklass() || ik->is_interface())) { |
| 1433 | // test class is leaf class |
| 1434 | compilation()->dependency_recorder()->assert_leaf_type(ik); |
| 1435 | exact_type = ik; |
| 1436 | } else { |
| 1437 | declared_type = ik; |
| 1438 | } |
| 1439 | } |
| 1440 | |
| 1441 | // see if we know statically that registration isn't required |
| 1442 | bool needs_check = true; |
| 1443 | if (exact_type != NULL) { |
| 1444 | needs_check = exact_type->as_instance_klass()->has_finalizer(); |
| 1445 | } else if (declared_type != NULL) { |
| 1446 | ciInstanceKlass* ik = declared_type->as_instance_klass(); |
| 1447 | if (!Dependencies::has_finalizable_subclass(ik)) { |
| 1448 | compilation()->dependency_recorder()->assert_has_no_finalizable_subclasses(ik); |
| 1449 | needs_check = false; |
| 1450 | } |
| 1451 | } |
| 1452 | |
| 1453 | if (needs_check) { |
| 1454 | // Perform the registration of finalizable objects. |
| 1455 | ValueStack* state_before = copy_state_for_exception(); |
| 1456 | load_local(objectType, 0); |
| 1457 | append_split(new Intrinsic(voidType, vmIntrinsics::_Object_init, |
| 1458 | state()->pop_arguments(1), |
| 1459 | true, state_before, true)); |
| 1460 | } |
| 1461 | } |
| 1462 | |
| 1463 | |
| 1464 | void GraphBuilder::method_return(Value x, bool ignore_return) { |
| 1465 | if (RegisterFinalizersAtInit && |
| 1466 | method()->intrinsic_id() == vmIntrinsics::_Object_init) { |
| 1467 | call_register_finalizer(); |
| 1468 | } |
| 1469 | |
| 1470 | bool need_mem_bar = false; |
| 1471 | if (method()->name() == ciSymbol::object_initializer_name() && |
| 1472 | (scope()->wrote_final() || (AlwaysSafeConstructors && scope()->wrote_fields()) |
| 1473 | || (support_IRIW_for_not_multiple_copy_atomic_cpu && scope()->wrote_volatile()) |
| 1474 | )){ |
| 1475 | need_mem_bar = true; |
| 1476 | } |
| 1477 | |
| 1478 | BasicType bt = method()->return_type()->basic_type(); |
| 1479 | switch (bt) { |
| 1480 | case T_BYTE: |
| 1481 | { |
| 1482 | Value shift = append(new Constant(new IntConstant(24))); |
| 1483 | x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); |
| 1484 | x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); |
| 1485 | break; |
| 1486 | } |
| 1487 | case T_SHORT: |
| 1488 | { |
| 1489 | Value shift = append(new Constant(new IntConstant(16))); |
| 1490 | x = append(new ShiftOp(Bytecodes::_ishl, x, shift)); |
| 1491 | x = append(new ShiftOp(Bytecodes::_ishr, x, shift)); |
| 1492 | break; |
| 1493 | } |
| 1494 | case T_CHAR: |
| 1495 | { |
| 1496 | Value mask = append(new Constant(new IntConstant(0xFFFF))); |
| 1497 | x = append(new LogicOp(Bytecodes::_iand, x, mask)); |
| 1498 | break; |
| 1499 | } |
| 1500 | case T_BOOLEAN: |
| 1501 | { |
| 1502 | Value mask = append(new Constant(new IntConstant(1))); |
| 1503 | x = append(new LogicOp(Bytecodes::_iand, x, mask)); |
| 1504 | break; |
| 1505 | } |
| 1506 | default: |
| 1507 | break; |
| 1508 | } |
| 1509 | |
| 1510 | // Check to see whether we are inlining. If so, Return |
| 1511 | // instructions become Gotos to the continuation point. |
| 1512 | if (continuation() != NULL) { |
| 1513 | |
| 1514 | int invoke_bci = state()->caller_state()->bci(); |
| 1515 | |
| 1516 | if (x != NULL && !ignore_return) { |
| 1517 | ciMethod* caller = state()->scope()->caller()->method(); |
| 1518 | Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci); |
| 1519 | if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) { |
| 1520 | ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type(); |
| 1521 | if (declared_ret_type->is_klass() && x->exact_type() == NULL && |
| 1522 | x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) { |
| 1523 | x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before())); |
| 1524 | } |
| 1525 | } |
| 1526 | } |
| 1527 | |
| 1528 | assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet" ); |
| 1529 | |
| 1530 | if (compilation()->env()->dtrace_method_probes()) { |
| 1531 | // Report exit from inline methods |
| 1532 | Values* args = new Values(1); |
| 1533 | args->push(append(new Constant(new MethodConstant(method())))); |
| 1534 | append(new RuntimeCall(voidType, "dtrace_method_exit" , CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args)); |
| 1535 | } |
| 1536 | |
| 1537 | // If the inlined method is synchronized, the monitor must be |
| 1538 | // released before we jump to the continuation block. |
| 1539 | if (method()->is_synchronized()) { |
| 1540 | assert(state()->locks_size() == 1, "receiver must be locked here" ); |
| 1541 | monitorexit(state()->lock_at(0), SynchronizationEntryBCI); |
| 1542 | } |
| 1543 | |
| 1544 | if (need_mem_bar) { |
| 1545 | append(new MemBar(lir_membar_storestore)); |
| 1546 | } |
| 1547 | |
| 1548 | // State at end of inlined method is the state of the caller |
| 1549 | // without the method parameters on stack, including the |
| 1550 | // return value, if any, of the inlined method on operand stack. |
| 1551 | set_state(state()->caller_state()->copy_for_parsing()); |
| 1552 | if (x != NULL) { |
| 1553 | if (!ignore_return) { |
| 1554 | state()->push(x->type(), x); |
| 1555 | } |
| 1556 | if (profile_return() && x->type()->is_object_kind()) { |
| 1557 | ciMethod* caller = state()->scope()->method(); |
| 1558 | profile_return_type(x, method(), caller, invoke_bci); |
| 1559 | } |
| 1560 | } |
| 1561 | Goto* goto_callee = new Goto(continuation(), false); |
| 1562 | |
| 1563 | // See whether this is the first return; if so, store off some |
| 1564 | // of the state for later examination |
| 1565 | if (num_returns() == 0) { |
| 1566 | set_inline_cleanup_info(); |
| 1567 | } |
| 1568 | |
| 1569 | // The current bci() is in the wrong scope, so use the bci() of |
| 1570 | // the continuation point. |
| 1571 | append_with_bci(goto_callee, scope_data()->continuation()->bci()); |
| 1572 | incr_num_returns(); |
| 1573 | return; |
| 1574 | } |
| 1575 | |
| 1576 | state()->truncate_stack(0); |
| 1577 | if (method()->is_synchronized()) { |
| 1578 | // perform the unlocking before exiting the method |
| 1579 | Value receiver; |
| 1580 | if (!method()->is_static()) { |
| 1581 | receiver = _initial_state->local_at(0); |
| 1582 | } else { |
| 1583 | receiver = append(new Constant(new ClassConstant(method()->holder()))); |
| 1584 | } |
| 1585 | append_split(new MonitorExit(receiver, state()->unlock())); |
| 1586 | } |
| 1587 | |
| 1588 | if (need_mem_bar) { |
| 1589 | append(new MemBar(lir_membar_storestore)); |
| 1590 | } |
| 1591 | |
| 1592 | assert(!ignore_return, "Ignoring return value works only for inlining" ); |
| 1593 | append(new Return(x)); |
| 1594 | } |
| 1595 | |
| 1596 | Value GraphBuilder::make_constant(ciConstant field_value, ciField* field) { |
| 1597 | if (!field_value.is_valid()) return NULL; |
| 1598 | |
| 1599 | BasicType field_type = field_value.basic_type(); |
| 1600 | ValueType* value = as_ValueType(field_value); |
| 1601 | |
| 1602 | // Attach dimension info to stable arrays. |
| 1603 | if (FoldStableValues && |
| 1604 | field->is_stable() && field_type == T_ARRAY && !field_value.is_null_or_zero()) { |
| 1605 | ciArray* array = field_value.as_object()->as_array(); |
| 1606 | jint dimension = field->type()->as_array_klass()->dimension(); |
| 1607 | value = new StableArrayConstant(array, dimension); |
| 1608 | } |
| 1609 | |
| 1610 | switch (field_type) { |
| 1611 | case T_ARRAY: |
| 1612 | case T_OBJECT: |
| 1613 | if (field_value.as_object()->should_be_constant()) { |
| 1614 | return new Constant(value); |
| 1615 | } |
| 1616 | return NULL; // Not a constant. |
| 1617 | default: |
| 1618 | return new Constant(value); |
| 1619 | } |
| 1620 | } |
| 1621 | |
| 1622 | void GraphBuilder::access_field(Bytecodes::Code code) { |
| 1623 | bool will_link; |
| 1624 | ciField* field = stream()->get_field(will_link); |
| 1625 | ciInstanceKlass* holder = field->holder(); |
| 1626 | BasicType field_type = field->type()->basic_type(); |
| 1627 | ValueType* type = as_ValueType(field_type); |
| 1628 | // call will_link again to determine if the field is valid. |
| 1629 | const bool needs_patching = !holder->is_loaded() || |
| 1630 | !field->will_link(method(), code) || |
| 1631 | PatchALot; |
| 1632 | |
| 1633 | ValueStack* state_before = NULL; |
| 1634 | if (!holder->is_initialized() || needs_patching) { |
| 1635 | // save state before instruction for debug info when |
| 1636 | // deoptimization happens during patching |
| 1637 | state_before = copy_state_before(); |
| 1638 | } |
| 1639 | |
| 1640 | Value obj = NULL; |
| 1641 | if (code == Bytecodes::_getstatic || code == Bytecodes::_putstatic) { |
| 1642 | if (state_before != NULL) { |
| 1643 | // build a patching constant |
| 1644 | obj = new Constant(new InstanceConstant(holder->java_mirror()), state_before); |
| 1645 | } else { |
| 1646 | obj = new Constant(new InstanceConstant(holder->java_mirror())); |
| 1647 | } |
| 1648 | } |
| 1649 | |
| 1650 | if (field->is_final() && (code == Bytecodes::_putfield)) { |
| 1651 | scope()->set_wrote_final(); |
| 1652 | } |
| 1653 | |
| 1654 | if (code == Bytecodes::_putfield) { |
| 1655 | scope()->set_wrote_fields(); |
| 1656 | if (field->is_volatile()) { |
| 1657 | scope()->set_wrote_volatile(); |
| 1658 | } |
| 1659 | } |
| 1660 | |
| 1661 | const int offset = !needs_patching ? field->offset() : -1; |
| 1662 | switch (code) { |
| 1663 | case Bytecodes::_getstatic: { |
| 1664 | // check for compile-time constants, i.e., initialized static final fields |
| 1665 | Value constant = NULL; |
| 1666 | if (field->is_static_constant() && !PatchALot) { |
| 1667 | ciConstant field_value = field->constant_value(); |
| 1668 | assert(!field->is_stable() || !field_value.is_null_or_zero(), |
| 1669 | "stable static w/ default value shouldn't be a constant" ); |
| 1670 | constant = make_constant(field_value, field); |
| 1671 | } |
| 1672 | if (constant != NULL) { |
| 1673 | push(type, append(constant)); |
| 1674 | } else { |
| 1675 | if (state_before == NULL) { |
| 1676 | state_before = copy_state_for_exception(); |
| 1677 | } |
| 1678 | push(type, append(new LoadField(append(obj), offset, field, true, |
| 1679 | state_before, needs_patching))); |
| 1680 | } |
| 1681 | break; |
| 1682 | } |
| 1683 | case Bytecodes::_putstatic: { |
| 1684 | Value val = pop(type); |
| 1685 | if (state_before == NULL) { |
| 1686 | state_before = copy_state_for_exception(); |
| 1687 | } |
| 1688 | if (field->type()->basic_type() == T_BOOLEAN) { |
| 1689 | Value mask = append(new Constant(new IntConstant(1))); |
| 1690 | val = append(new LogicOp(Bytecodes::_iand, val, mask)); |
| 1691 | } |
| 1692 | append(new StoreField(append(obj), offset, field, val, true, state_before, needs_patching)); |
| 1693 | break; |
| 1694 | } |
| 1695 | case Bytecodes::_getfield: { |
| 1696 | // Check for compile-time constants, i.e., trusted final non-static fields. |
| 1697 | Value constant = NULL; |
| 1698 | obj = apop(); |
| 1699 | ObjectType* obj_type = obj->type()->as_ObjectType(); |
| 1700 | if (field->is_constant() && obj_type->is_constant() && !PatchALot) { |
| 1701 | ciObject* const_oop = obj_type->constant_value(); |
| 1702 | if (!const_oop->is_null_object() && const_oop->is_loaded()) { |
| 1703 | ciConstant field_value = field->constant_value_of(const_oop); |
| 1704 | if (field_value.is_valid()) { |
| 1705 | constant = make_constant(field_value, field); |
| 1706 | // For CallSite objects add a dependency for invalidation of the optimization. |
| 1707 | if (field->is_call_site_target()) { |
| 1708 | ciCallSite* call_site = const_oop->as_call_site(); |
| 1709 | if (!call_site->is_constant_call_site()) { |
| 1710 | ciMethodHandle* target = field_value.as_object()->as_method_handle(); |
| 1711 | dependency_recorder()->assert_call_site_target_value(call_site, target); |
| 1712 | } |
| 1713 | } |
| 1714 | } |
| 1715 | } |
| 1716 | } |
| 1717 | if (constant != NULL) { |
| 1718 | push(type, append(constant)); |
| 1719 | } else { |
| 1720 | if (state_before == NULL) { |
| 1721 | state_before = copy_state_for_exception(); |
| 1722 | } |
| 1723 | LoadField* load = new LoadField(obj, offset, field, false, state_before, needs_patching); |
| 1724 | Value replacement = !needs_patching ? _memory->load(load) : load; |
| 1725 | if (replacement != load) { |
| 1726 | assert(replacement->is_linked() || !replacement->can_be_linked(), "should already by linked" ); |
| 1727 | push(type, replacement); |
| 1728 | } else { |
| 1729 | push(type, append(load)); |
| 1730 | } |
| 1731 | } |
| 1732 | break; |
| 1733 | } |
| 1734 | case Bytecodes::_putfield: { |
| 1735 | Value val = pop(type); |
| 1736 | obj = apop(); |
| 1737 | if (state_before == NULL) { |
| 1738 | state_before = copy_state_for_exception(); |
| 1739 | } |
| 1740 | if (field->type()->basic_type() == T_BOOLEAN) { |
| 1741 | Value mask = append(new Constant(new IntConstant(1))); |
| 1742 | val = append(new LogicOp(Bytecodes::_iand, val, mask)); |
| 1743 | } |
| 1744 | StoreField* store = new StoreField(obj, offset, field, val, false, state_before, needs_patching); |
| 1745 | if (!needs_patching) store = _memory->store(store); |
| 1746 | if (store != NULL) { |
| 1747 | append(store); |
| 1748 | } |
| 1749 | break; |
| 1750 | } |
| 1751 | default: |
| 1752 | ShouldNotReachHere(); |
| 1753 | break; |
| 1754 | } |
| 1755 | } |
| 1756 | |
| 1757 | |
| 1758 | Dependencies* GraphBuilder::dependency_recorder() const { |
| 1759 | assert(DeoptC1, "need debug information" ); |
| 1760 | return compilation()->dependency_recorder(); |
| 1761 | } |
| 1762 | |
| 1763 | // How many arguments do we want to profile? |
| 1764 | Values* GraphBuilder::args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver) { |
| 1765 | int n = 0; |
| 1766 | bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci())); |
| 1767 | start = has_receiver ? 1 : 0; |
| 1768 | if (profile_arguments()) { |
| 1769 | ciProfileData* data = method()->method_data()->bci_to_data(bci()); |
| 1770 | if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { |
| 1771 | n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments(); |
| 1772 | } |
| 1773 | } |
| 1774 | // If we are inlining then we need to collect arguments to profile parameters for the target |
| 1775 | if (profile_parameters() && target != NULL) { |
| 1776 | if (target->method_data() != NULL && target->method_data()->parameters_type_data() != NULL) { |
| 1777 | // The receiver is profiled on method entry so it's included in |
| 1778 | // the number of parameters but here we're only interested in |
| 1779 | // actual arguments. |
| 1780 | n = MAX2(n, target->method_data()->parameters_type_data()->number_of_parameters() - start); |
| 1781 | } |
| 1782 | } |
| 1783 | if (n > 0) { |
| 1784 | return new Values(n); |
| 1785 | } |
| 1786 | return NULL; |
| 1787 | } |
| 1788 | |
| 1789 | void GraphBuilder::check_args_for_profiling(Values* obj_args, int expected) { |
| 1790 | #ifdef ASSERT |
| 1791 | bool ignored_will_link; |
| 1792 | ciSignature* declared_signature = NULL; |
| 1793 | ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature); |
| 1794 | assert(expected == obj_args->max_length() || real_target->is_method_handle_intrinsic(), "missed on arg?" ); |
| 1795 | #endif |
| 1796 | } |
| 1797 | |
| 1798 | // Collect arguments that we want to profile in a list |
| 1799 | Values* GraphBuilder::collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver) { |
| 1800 | int start = 0; |
| 1801 | Values* obj_args = args_list_for_profiling(target, start, may_have_receiver); |
| 1802 | if (obj_args == NULL) { |
| 1803 | return NULL; |
| 1804 | } |
| 1805 | int s = obj_args->max_length(); |
| 1806 | // if called through method handle invoke, some arguments may have been popped |
| 1807 | for (int i = start, j = 0; j < s && i < args->length(); i++) { |
| 1808 | if (args->at(i)->type()->is_object_kind()) { |
| 1809 | obj_args->push(args->at(i)); |
| 1810 | j++; |
| 1811 | } |
| 1812 | } |
| 1813 | check_args_for_profiling(obj_args, s); |
| 1814 | return obj_args; |
| 1815 | } |
| 1816 | |
| 1817 | |
| 1818 | void GraphBuilder::invoke(Bytecodes::Code code) { |
| 1819 | bool will_link; |
| 1820 | ciSignature* declared_signature = NULL; |
| 1821 | ciMethod* target = stream()->get_method(will_link, &declared_signature); |
| 1822 | ciKlass* holder = stream()->get_declared_method_holder(); |
| 1823 | const Bytecodes::Code bc_raw = stream()->cur_bc_raw(); |
| 1824 | assert(declared_signature != NULL, "cannot be null" ); |
| 1825 | assert(will_link == target->is_loaded(), "" ); |
| 1826 | |
| 1827 | ciInstanceKlass* klass = target->holder(); |
| 1828 | assert(!target->is_loaded() || klass->is_loaded(), "loaded target must imply loaded klass" ); |
| 1829 | |
| 1830 | // check if CHA possible: if so, change the code to invoke_special |
| 1831 | ciInstanceKlass* calling_klass = method()->holder(); |
| 1832 | ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder); |
| 1833 | ciInstanceKlass* actual_recv = callee_holder; |
| 1834 | |
| 1835 | CompileLog* log = compilation()->log(); |
| 1836 | if (log != NULL) |
| 1837 | log->elem("call method='%d' instr='%s'" , |
| 1838 | log->identify(target), |
| 1839 | Bytecodes::name(code)); |
| 1840 | |
| 1841 | // invoke-special-super |
| 1842 | if (bc_raw == Bytecodes::_invokespecial && !target->is_object_initializer()) { |
| 1843 | ciInstanceKlass* sender_klass = |
| 1844 | calling_klass->is_unsafe_anonymous() ? calling_klass->unsafe_anonymous_host() : |
| 1845 | calling_klass; |
| 1846 | if (sender_klass->is_interface()) { |
| 1847 | int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); |
| 1848 | Value receiver = state()->stack_at(index); |
| 1849 | CheckCast* c = new CheckCast(sender_klass, receiver, copy_state_before()); |
| 1850 | c->set_invokespecial_receiver_check(); |
| 1851 | state()->stack_at_put(index, append_split(c)); |
| 1852 | } |
| 1853 | } |
| 1854 | |
| 1855 | // Some methods are obviously bindable without any type checks so |
| 1856 | // convert them directly to an invokespecial or invokestatic. |
| 1857 | if (target->is_loaded() && !target->is_abstract() && target->can_be_statically_bound()) { |
| 1858 | switch (bc_raw) { |
| 1859 | case Bytecodes::_invokevirtual: |
| 1860 | code = Bytecodes::_invokespecial; |
| 1861 | break; |
| 1862 | case Bytecodes::_invokehandle: |
| 1863 | code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial; |
| 1864 | break; |
| 1865 | default: |
| 1866 | break; |
| 1867 | } |
| 1868 | } else { |
| 1869 | if (bc_raw == Bytecodes::_invokehandle) { |
| 1870 | assert(!will_link, "should come here only for unlinked call" ); |
| 1871 | code = Bytecodes::_invokespecial; |
| 1872 | } |
| 1873 | } |
| 1874 | |
| 1875 | // Push appendix argument (MethodType, CallSite, etc.), if one. |
| 1876 | bool patch_for_appendix = false; |
| 1877 | int patching_appendix_arg = 0; |
| 1878 | if (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot)) { |
| 1879 | Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before())); |
| 1880 | apush(arg); |
| 1881 | patch_for_appendix = true; |
| 1882 | patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1; |
| 1883 | } else if (stream()->has_appendix()) { |
| 1884 | ciObject* appendix = stream()->get_appendix(); |
| 1885 | Value arg = append(new Constant(new ObjectConstant(appendix))); |
| 1886 | apush(arg); |
| 1887 | } |
| 1888 | |
| 1889 | ciMethod* cha_monomorphic_target = NULL; |
| 1890 | ciMethod* exact_target = NULL; |
| 1891 | Value better_receiver = NULL; |
| 1892 | if (UseCHA && DeoptC1 && target->is_loaded() && |
| 1893 | !(// %%% FIXME: Are both of these relevant? |
| 1894 | target->is_method_handle_intrinsic() || |
| 1895 | target->is_compiled_lambda_form()) && |
| 1896 | !patch_for_appendix) { |
| 1897 | Value receiver = NULL; |
| 1898 | ciInstanceKlass* receiver_klass = NULL; |
| 1899 | bool type_is_exact = false; |
| 1900 | // try to find a precise receiver type |
| 1901 | if (will_link && !target->is_static()) { |
| 1902 | int index = state()->stack_size() - (target->arg_size_no_receiver() + 1); |
| 1903 | receiver = state()->stack_at(index); |
| 1904 | ciType* type = receiver->exact_type(); |
| 1905 | if (type != NULL && type->is_loaded() && |
| 1906 | type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { |
| 1907 | receiver_klass = (ciInstanceKlass*) type; |
| 1908 | type_is_exact = true; |
| 1909 | } |
| 1910 | if (type == NULL) { |
| 1911 | type = receiver->declared_type(); |
| 1912 | if (type != NULL && type->is_loaded() && |
| 1913 | type->is_instance_klass() && !type->as_instance_klass()->is_interface()) { |
| 1914 | receiver_klass = (ciInstanceKlass*) type; |
| 1915 | if (receiver_klass->is_leaf_type() && !receiver_klass->is_final()) { |
| 1916 | // Insert a dependency on this type since |
| 1917 | // find_monomorphic_target may assume it's already done. |
| 1918 | dependency_recorder()->assert_leaf_type(receiver_klass); |
| 1919 | type_is_exact = true; |
| 1920 | } |
| 1921 | } |
| 1922 | } |
| 1923 | } |
| 1924 | if (receiver_klass != NULL && type_is_exact && |
| 1925 | receiver_klass->is_loaded() && code != Bytecodes::_invokespecial) { |
| 1926 | // If we have the exact receiver type we can bind directly to |
| 1927 | // the method to call. |
| 1928 | exact_target = target->resolve_invoke(calling_klass, receiver_klass); |
| 1929 | if (exact_target != NULL) { |
| 1930 | target = exact_target; |
| 1931 | code = Bytecodes::_invokespecial; |
| 1932 | } |
| 1933 | } |
| 1934 | if (receiver_klass != NULL && |
| 1935 | receiver_klass->is_subtype_of(actual_recv) && |
| 1936 | actual_recv->is_initialized()) { |
| 1937 | actual_recv = receiver_klass; |
| 1938 | } |
| 1939 | |
| 1940 | if ((code == Bytecodes::_invokevirtual && callee_holder->is_initialized()) || |
| 1941 | (code == Bytecodes::_invokeinterface && callee_holder->is_initialized() && !actual_recv->is_interface())) { |
| 1942 | // Use CHA on the receiver to select a more precise method. |
| 1943 | cha_monomorphic_target = target->find_monomorphic_target(calling_klass, callee_holder, actual_recv); |
| 1944 | } else if (code == Bytecodes::_invokeinterface && callee_holder->is_loaded() && receiver != NULL) { |
| 1945 | assert(callee_holder->is_interface(), "invokeinterface to non interface?" ); |
| 1946 | // If there is only one implementor of this interface then we |
| 1947 | // may be able bind this invoke directly to the implementing |
| 1948 | // klass but we need both a dependence on the single interface |
| 1949 | // and on the method we bind to. Additionally since all we know |
| 1950 | // about the receiver type is the it's supposed to implement the |
| 1951 | // interface we have to insert a check that it's the class we |
| 1952 | // expect. Interface types are not checked by the verifier so |
| 1953 | // they are roughly equivalent to Object. |
| 1954 | // The number of implementors for declared_interface is less or |
| 1955 | // equal to the number of implementors for target->holder() so |
| 1956 | // if number of implementors of target->holder() == 1 then |
| 1957 | // number of implementors for decl_interface is 0 or 1. If |
| 1958 | // it's 0 then no class implements decl_interface and there's |
| 1959 | // no point in inlining. |
| 1960 | ciInstanceKlass* singleton = NULL; |
| 1961 | ciInstanceKlass* declared_interface = callee_holder; |
| 1962 | if (declared_interface->nof_implementors() == 1 && |
| 1963 | (!target->is_default_method() || target->is_overpass()) /* CHA doesn't support default methods yet. */) { |
| 1964 | singleton = declared_interface->implementor(); |
| 1965 | assert(singleton != NULL && singleton != declared_interface, "" ); |
| 1966 | cha_monomorphic_target = target->find_monomorphic_target(calling_klass, declared_interface, singleton); |
| 1967 | if (cha_monomorphic_target != NULL) { |
| 1968 | if (cha_monomorphic_target->holder() != compilation()->env()->Object_klass()) { |
| 1969 | // If CHA is able to bind this invoke then update the class |
| 1970 | // to match that class, otherwise klass will refer to the |
| 1971 | // interface. |
| 1972 | klass = cha_monomorphic_target->holder(); |
| 1973 | actual_recv = declared_interface; |
| 1974 | |
| 1975 | // insert a check it's really the expected class. |
| 1976 | CheckCast* c = new CheckCast(klass, receiver, copy_state_for_exception()); |
| 1977 | c->set_incompatible_class_change_check(); |
| 1978 | c->set_direct_compare(klass->is_final()); |
| 1979 | // pass the result of the checkcast so that the compiler has |
| 1980 | // more accurate type info in the inlinee |
| 1981 | better_receiver = append_split(c); |
| 1982 | } else { |
| 1983 | cha_monomorphic_target = NULL; // subtype check against Object is useless |
| 1984 | } |
| 1985 | } |
| 1986 | } |
| 1987 | } |
| 1988 | } |
| 1989 | |
| 1990 | if (cha_monomorphic_target != NULL) { |
| 1991 | assert(!target->can_be_statically_bound() || target == cha_monomorphic_target, "" ); |
| 1992 | assert(!cha_monomorphic_target->is_abstract(), "" ); |
| 1993 | if (!cha_monomorphic_target->can_be_statically_bound(actual_recv)) { |
| 1994 | // If we inlined because CHA revealed only a single target method, |
| 1995 | // then we are dependent on that target method not getting overridden |
| 1996 | // by dynamic class loading. Be sure to test the "static" receiver |
| 1997 | // dest_method here, as opposed to the actual receiver, which may |
| 1998 | // falsely lead us to believe that the receiver is final or private. |
| 1999 | dependency_recorder()->assert_unique_concrete_method(actual_recv, cha_monomorphic_target); |
| 2000 | } |
| 2001 | code = Bytecodes::_invokespecial; |
| 2002 | } |
| 2003 | |
| 2004 | // check if we could do inlining |
| 2005 | if (!PatchALot && Inline && target->is_loaded() && |
| 2006 | (klass->is_initialized() || (klass->is_interface() && target->holder()->is_initialized())) |
| 2007 | && !patch_for_appendix) { |
| 2008 | // callee is known => check if we have static binding |
| 2009 | if (code == Bytecodes::_invokestatic || |
| 2010 | code == Bytecodes::_invokespecial || |
| 2011 | (code == Bytecodes::_invokevirtual && target->is_final_method()) || |
| 2012 | code == Bytecodes::_invokedynamic) { |
| 2013 | ciMethod* inline_target = (cha_monomorphic_target != NULL) ? cha_monomorphic_target : target; |
| 2014 | // static binding => check if callee is ok |
| 2015 | bool success = try_inline(inline_target, (cha_monomorphic_target != NULL) || (exact_target != NULL), false, code, better_receiver); |
| 2016 | |
| 2017 | CHECK_BAILOUT(); |
| 2018 | clear_inline_bailout(); |
| 2019 | |
| 2020 | if (success) { |
| 2021 | // Register dependence if JVMTI has either breakpoint |
| 2022 | // setting or hotswapping of methods capabilities since they may |
| 2023 | // cause deoptimization. |
| 2024 | if (compilation()->env()->jvmti_can_hotswap_or_post_breakpoint()) { |
| 2025 | dependency_recorder()->assert_evol_method(inline_target); |
| 2026 | } |
| 2027 | return; |
| 2028 | } |
| 2029 | } else { |
| 2030 | print_inlining(target, "no static binding" , /*success*/ false); |
| 2031 | } |
| 2032 | } else { |
| 2033 | print_inlining(target, "not inlineable" , /*success*/ false); |
| 2034 | } |
| 2035 | |
| 2036 | // If we attempted an inline which did not succeed because of a |
| 2037 | // bailout during construction of the callee graph, the entire |
| 2038 | // compilation has to be aborted. This is fairly rare and currently |
| 2039 | // seems to only occur for jasm-generated classes which contain |
| 2040 | // jsr/ret pairs which are not associated with finally clauses and |
| 2041 | // do not have exception handlers in the containing method, and are |
| 2042 | // therefore not caught early enough to abort the inlining without |
| 2043 | // corrupting the graph. (We currently bail out with a non-empty |
| 2044 | // stack at a ret in these situations.) |
| 2045 | CHECK_BAILOUT(); |
| 2046 | |
| 2047 | // inlining not successful => standard invoke |
| 2048 | ValueType* result_type = as_ValueType(declared_signature->return_type()); |
| 2049 | ValueStack* state_before = copy_state_exhandling(); |
| 2050 | |
| 2051 | // The bytecode (code) might change in this method so we are checking this very late. |
| 2052 | const bool has_receiver = |
| 2053 | code == Bytecodes::_invokespecial || |
| 2054 | code == Bytecodes::_invokevirtual || |
| 2055 | code == Bytecodes::_invokeinterface; |
| 2056 | Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg); |
| 2057 | Value recv = has_receiver ? apop() : NULL; |
| 2058 | int vtable_index = Method::invalid_vtable_index; |
| 2059 | |
| 2060 | #ifdef SPARC |
| 2061 | // Currently only supported on Sparc. |
| 2062 | // The UseInlineCaches only controls dispatch to invokevirtuals for |
| 2063 | // loaded classes which we weren't able to statically bind. |
| 2064 | if (!UseInlineCaches && target->is_loaded() && code == Bytecodes::_invokevirtual |
| 2065 | && !target->can_be_statically_bound()) { |
| 2066 | // Find a vtable index if one is available |
| 2067 | // For arrays, callee_holder is Object. Resolving the call with |
| 2068 | // Object would allow an illegal call to finalize() on an |
| 2069 | // array. We use holder instead: illegal calls to finalize() won't |
| 2070 | // be compiled as vtable calls (IC call resolution will catch the |
| 2071 | // illegal call) and the few legal calls on array types won't be |
| 2072 | // either. |
| 2073 | vtable_index = target->resolve_vtable_index(calling_klass, holder); |
| 2074 | } |
| 2075 | #endif |
| 2076 | |
| 2077 | // A null check is required here (when there is a receiver) for any of the following cases |
| 2078 | // - invokespecial, always need a null check. |
| 2079 | // - invokevirtual, when the target is final and loaded. Calls to final targets will become optimized |
| 2080 | // and require null checking. If the target is loaded a null check is emitted here. |
| 2081 | // If the target isn't loaded the null check must happen after the call resolution. We achieve that |
| 2082 | // by using the target methods unverified entry point (see CompiledIC::compute_monomorphic_entry). |
| 2083 | // (The JVM specification requires that LinkageError must be thrown before a NPE. An unloaded target may |
| 2084 | // potentially fail, and can't have the null check before the resolution.) |
| 2085 | // - A call that will be profiled. (But we can't add a null check when the target is unloaded, by the same |
| 2086 | // reason as above, so calls with a receiver to unloaded targets can't be profiled.) |
| 2087 | // |
| 2088 | // Normal invokevirtual will perform the null check during lookup |
| 2089 | |
| 2090 | bool need_null_check = (code == Bytecodes::_invokespecial) || |
| 2091 | (target->is_loaded() && (target->is_final_method() || (is_profiling() && profile_calls()))); |
| 2092 | |
| 2093 | if (need_null_check) { |
| 2094 | if (recv != NULL) { |
| 2095 | null_check(recv); |
| 2096 | } |
| 2097 | |
| 2098 | if (is_profiling()) { |
| 2099 | // Note that we'd collect profile data in this method if we wanted it. |
| 2100 | compilation()->set_would_profile(true); |
| 2101 | |
| 2102 | if (profile_calls()) { |
| 2103 | assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set" ); |
| 2104 | ciKlass* target_klass = NULL; |
| 2105 | if (cha_monomorphic_target != NULL) { |
| 2106 | target_klass = cha_monomorphic_target->holder(); |
| 2107 | } else if (exact_target != NULL) { |
| 2108 | target_klass = exact_target->holder(); |
| 2109 | } |
| 2110 | profile_call(target, recv, target_klass, collect_args_for_profiling(args, NULL, false), false); |
| 2111 | } |
| 2112 | } |
| 2113 | } |
| 2114 | |
| 2115 | Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before); |
| 2116 | // push result |
| 2117 | append_split(result); |
| 2118 | |
| 2119 | if (result_type != voidType) { |
| 2120 | if (method()->is_strict()) { |
| 2121 | push(result_type, round_fp(result)); |
| 2122 | } else { |
| 2123 | push(result_type, result); |
| 2124 | } |
| 2125 | } |
| 2126 | if (profile_return() && result_type->is_object_kind()) { |
| 2127 | profile_return_type(result, target); |
| 2128 | } |
| 2129 | } |
| 2130 | |
| 2131 | |
| 2132 | void GraphBuilder::new_instance(int klass_index) { |
| 2133 | ValueStack* state_before = copy_state_exhandling(); |
| 2134 | bool will_link; |
| 2135 | ciKlass* klass = stream()->get_klass(will_link); |
| 2136 | assert(klass->is_instance_klass(), "must be an instance klass" ); |
| 2137 | NewInstance* new_instance = new NewInstance(klass->as_instance_klass(), state_before, stream()->is_unresolved_klass()); |
| 2138 | _memory->new_instance(new_instance); |
| 2139 | apush(append_split(new_instance)); |
| 2140 | } |
| 2141 | |
| 2142 | |
| 2143 | void GraphBuilder::new_type_array() { |
| 2144 | ValueStack* state_before = copy_state_exhandling(); |
| 2145 | apush(append_split(new NewTypeArray(ipop(), (BasicType)stream()->get_index(), state_before))); |
| 2146 | } |
| 2147 | |
| 2148 | |
| 2149 | void GraphBuilder::new_object_array() { |
| 2150 | bool will_link; |
| 2151 | ciKlass* klass = stream()->get_klass(will_link); |
| 2152 | ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); |
| 2153 | NewArray* n = new NewObjectArray(klass, ipop(), state_before); |
| 2154 | apush(append_split(n)); |
| 2155 | } |
| 2156 | |
| 2157 | |
| 2158 | bool GraphBuilder::direct_compare(ciKlass* k) { |
| 2159 | if (k->is_loaded() && k->is_instance_klass() && !UseSlowPath) { |
| 2160 | ciInstanceKlass* ik = k->as_instance_klass(); |
| 2161 | if (ik->is_final()) { |
| 2162 | return true; |
| 2163 | } else { |
| 2164 | if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) { |
| 2165 | // test class is leaf class |
| 2166 | dependency_recorder()->assert_leaf_type(ik); |
| 2167 | return true; |
| 2168 | } |
| 2169 | } |
| 2170 | } |
| 2171 | return false; |
| 2172 | } |
| 2173 | |
| 2174 | |
| 2175 | void GraphBuilder::check_cast(int klass_index) { |
| 2176 | bool will_link; |
| 2177 | ciKlass* klass = stream()->get_klass(will_link); |
| 2178 | ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_for_exception(); |
| 2179 | CheckCast* c = new CheckCast(klass, apop(), state_before); |
| 2180 | apush(append_split(c)); |
| 2181 | c->set_direct_compare(direct_compare(klass)); |
| 2182 | |
| 2183 | if (is_profiling()) { |
| 2184 | // Note that we'd collect profile data in this method if we wanted it. |
| 2185 | compilation()->set_would_profile(true); |
| 2186 | |
| 2187 | if (profile_checkcasts()) { |
| 2188 | c->set_profiled_method(method()); |
| 2189 | c->set_profiled_bci(bci()); |
| 2190 | c->set_should_profile(true); |
| 2191 | } |
| 2192 | } |
| 2193 | } |
| 2194 | |
| 2195 | |
| 2196 | void GraphBuilder::instance_of(int klass_index) { |
| 2197 | bool will_link; |
| 2198 | ciKlass* klass = stream()->get_klass(will_link); |
| 2199 | ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); |
| 2200 | InstanceOf* i = new InstanceOf(klass, apop(), state_before); |
| 2201 | ipush(append_split(i)); |
| 2202 | i->set_direct_compare(direct_compare(klass)); |
| 2203 | |
| 2204 | if (is_profiling()) { |
| 2205 | // Note that we'd collect profile data in this method if we wanted it. |
| 2206 | compilation()->set_would_profile(true); |
| 2207 | |
| 2208 | if (profile_checkcasts()) { |
| 2209 | i->set_profiled_method(method()); |
| 2210 | i->set_profiled_bci(bci()); |
| 2211 | i->set_should_profile(true); |
| 2212 | } |
| 2213 | } |
| 2214 | } |
| 2215 | |
| 2216 | |
| 2217 | void GraphBuilder::monitorenter(Value x, int bci) { |
| 2218 | // save state before locking in case of deoptimization after a NullPointerException |
| 2219 | ValueStack* state_before = copy_state_for_exception_with_bci(bci); |
| 2220 | append_with_bci(new MonitorEnter(x, state()->lock(x), state_before), bci); |
| 2221 | kill_all(); |
| 2222 | } |
| 2223 | |
| 2224 | |
| 2225 | void GraphBuilder::monitorexit(Value x, int bci) { |
| 2226 | append_with_bci(new MonitorExit(x, state()->unlock()), bci); |
| 2227 | kill_all(); |
| 2228 | } |
| 2229 | |
| 2230 | |
| 2231 | void GraphBuilder::new_multi_array(int dimensions) { |
| 2232 | bool will_link; |
| 2233 | ciKlass* klass = stream()->get_klass(will_link); |
| 2234 | ValueStack* state_before = !klass->is_loaded() || PatchALot ? copy_state_before() : copy_state_exhandling(); |
| 2235 | |
| 2236 | Values* dims = new Values(dimensions, dimensions, NULL); |
| 2237 | // fill in all dimensions |
| 2238 | int i = dimensions; |
| 2239 | while (i-- > 0) dims->at_put(i, ipop()); |
| 2240 | // create array |
| 2241 | NewArray* n = new NewMultiArray(klass, dims, state_before); |
| 2242 | apush(append_split(n)); |
| 2243 | } |
| 2244 | |
| 2245 | |
| 2246 | void GraphBuilder::throw_op(int bci) { |
| 2247 | // We require that the debug info for a Throw be the "state before" |
| 2248 | // the Throw (i.e., exception oop is still on TOS) |
| 2249 | ValueStack* state_before = copy_state_before_with_bci(bci); |
| 2250 | Throw* t = new Throw(apop(), state_before); |
| 2251 | // operand stack not needed after a throw |
| 2252 | state()->truncate_stack(0); |
| 2253 | append_with_bci(t, bci); |
| 2254 | } |
| 2255 | |
| 2256 | |
| 2257 | Value GraphBuilder::round_fp(Value fp_value) { |
| 2258 | // no rounding needed if SSE2 is used |
| 2259 | if (RoundFPResults && UseSSE < 2) { |
| 2260 | // Must currently insert rounding node for doubleword values that |
| 2261 | // are results of expressions (i.e., not loads from memory or |
| 2262 | // constants) |
| 2263 | if (fp_value->type()->tag() == doubleTag && |
| 2264 | fp_value->as_Constant() == NULL && |
| 2265 | fp_value->as_Local() == NULL && // method parameters need no rounding |
| 2266 | fp_value->as_RoundFP() == NULL) { |
| 2267 | return append(new RoundFP(fp_value)); |
| 2268 | } |
| 2269 | } |
| 2270 | return fp_value; |
| 2271 | } |
| 2272 | |
| 2273 | |
| 2274 | Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) { |
| 2275 | Canonicalizer canon(compilation(), instr, bci); |
| 2276 | Instruction* i1 = canon.canonical(); |
| 2277 | if (i1->is_linked() || !i1->can_be_linked()) { |
| 2278 | // Canonicalizer returned an instruction which was already |
| 2279 | // appended so simply return it. |
| 2280 | return i1; |
| 2281 | } |
| 2282 | |
| 2283 | if (UseLocalValueNumbering) { |
| 2284 | // Lookup the instruction in the ValueMap and add it to the map if |
| 2285 | // it's not found. |
| 2286 | Instruction* i2 = vmap()->find_insert(i1); |
| 2287 | if (i2 != i1) { |
| 2288 | // found an entry in the value map, so just return it. |
| 2289 | assert(i2->is_linked(), "should already be linked" ); |
| 2290 | return i2; |
| 2291 | } |
| 2292 | ValueNumberingEffects vne(vmap()); |
| 2293 | i1->visit(&vne); |
| 2294 | } |
| 2295 | |
| 2296 | // i1 was not eliminated => append it |
| 2297 | assert(i1->next() == NULL, "shouldn't already be linked" ); |
| 2298 | _last = _last->set_next(i1, canon.bci()); |
| 2299 | |
| 2300 | if (++_instruction_count >= InstructionCountCutoff && !bailed_out()) { |
| 2301 | // set the bailout state but complete normal processing. We |
| 2302 | // might do a little more work before noticing the bailout so we |
| 2303 | // want processing to continue normally until it's noticed. |
| 2304 | bailout("Method and/or inlining is too large" ); |
| 2305 | } |
| 2306 | |
| 2307 | #ifndef PRODUCT |
| 2308 | if (PrintIRDuringConstruction) { |
| 2309 | InstructionPrinter ip; |
| 2310 | ip.print_line(i1); |
| 2311 | if (Verbose) { |
| 2312 | state()->print(); |
| 2313 | } |
| 2314 | } |
| 2315 | #endif |
| 2316 | |
| 2317 | // save state after modification of operand stack for StateSplit instructions |
| 2318 | StateSplit* s = i1->as_StateSplit(); |
| 2319 | if (s != NULL) { |
| 2320 | if (EliminateFieldAccess) { |
| 2321 | Intrinsic* intrinsic = s->as_Intrinsic(); |
| 2322 | if (s->as_Invoke() != NULL || (intrinsic && !intrinsic->preserves_state())) { |
| 2323 | _memory->kill(); |
| 2324 | } |
| 2325 | } |
| 2326 | s->set_state(state()->copy(ValueStack::StateAfter, canon.bci())); |
| 2327 | } |
| 2328 | |
| 2329 | // set up exception handlers for this instruction if necessary |
| 2330 | if (i1->can_trap()) { |
| 2331 | i1->set_exception_handlers(handle_exception(i1)); |
| 2332 | assert(i1->exception_state() != NULL || !i1->needs_exception_state() || bailed_out(), "handle_exception must set exception state" ); |
| 2333 | } |
| 2334 | return i1; |
| 2335 | } |
| 2336 | |
| 2337 | |
| 2338 | Instruction* GraphBuilder::append(Instruction* instr) { |
| 2339 | assert(instr->as_StateSplit() == NULL || instr->as_BlockEnd() != NULL, "wrong append used" ); |
| 2340 | return append_with_bci(instr, bci()); |
| 2341 | } |
| 2342 | |
| 2343 | |
| 2344 | Instruction* GraphBuilder::append_split(StateSplit* instr) { |
| 2345 | return append_with_bci(instr, bci()); |
| 2346 | } |
| 2347 | |
| 2348 | |
| 2349 | void GraphBuilder::null_check(Value value) { |
| 2350 | if (value->as_NewArray() != NULL || value->as_NewInstance() != NULL) { |
| 2351 | return; |
| 2352 | } else { |
| 2353 | Constant* con = value->as_Constant(); |
| 2354 | if (con) { |
| 2355 | ObjectType* c = con->type()->as_ObjectType(); |
| 2356 | if (c && c->is_loaded()) { |
| 2357 | ObjectConstant* oc = c->as_ObjectConstant(); |
| 2358 | if (!oc || !oc->value()->is_null_object()) { |
| 2359 | return; |
| 2360 | } |
| 2361 | } |
| 2362 | } |
| 2363 | } |
| 2364 | append(new NullCheck(value, copy_state_for_exception())); |
| 2365 | } |
| 2366 | |
| 2367 | |
| 2368 | |
| 2369 | XHandlers* GraphBuilder::handle_exception(Instruction* instruction) { |
| 2370 | if (!has_handler() && (!instruction->needs_exception_state() || instruction->exception_state() != NULL)) { |
| 2371 | assert(instruction->exception_state() == NULL |
| 2372 | || instruction->exception_state()->kind() == ValueStack::EmptyExceptionState |
| 2373 | || (instruction->exception_state()->kind() == ValueStack::ExceptionState && _compilation->env()->should_retain_local_variables()), |
| 2374 | "exception_state should be of exception kind" ); |
| 2375 | return new XHandlers(); |
| 2376 | } |
| 2377 | |
| 2378 | XHandlers* exception_handlers = new XHandlers(); |
| 2379 | ScopeData* cur_scope_data = scope_data(); |
| 2380 | ValueStack* cur_state = instruction->state_before(); |
| 2381 | ValueStack* prev_state = NULL; |
| 2382 | int scope_count = 0; |
| 2383 | |
| 2384 | assert(cur_state != NULL, "state_before must be set" ); |
| 2385 | do { |
| 2386 | int cur_bci = cur_state->bci(); |
| 2387 | assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match" ); |
| 2388 | assert(cur_bci == SynchronizationEntryBCI || cur_bci == cur_scope_data->stream()->cur_bci(), "invalid bci" ); |
| 2389 | |
| 2390 | // join with all potential exception handlers |
| 2391 | XHandlers* list = cur_scope_data->xhandlers(); |
| 2392 | const int n = list->length(); |
| 2393 | for (int i = 0; i < n; i++) { |
| 2394 | XHandler* h = list->handler_at(i); |
| 2395 | if (h->covers(cur_bci)) { |
| 2396 | // h is a potential exception handler => join it |
| 2397 | compilation()->set_has_exception_handlers(true); |
| 2398 | |
| 2399 | BlockBegin* entry = h->entry_block(); |
| 2400 | if (entry == block()) { |
| 2401 | // It's acceptable for an exception handler to cover itself |
| 2402 | // but we don't handle that in the parser currently. It's |
| 2403 | // very rare so we bailout instead of trying to handle it. |
| 2404 | BAILOUT_("exception handler covers itself" , exception_handlers); |
| 2405 | } |
| 2406 | assert(entry->bci() == h->handler_bci(), "must match" ); |
| 2407 | assert(entry->bci() == -1 || entry == cur_scope_data->block_at(entry->bci()), "blocks must correspond" ); |
| 2408 | |
| 2409 | // previously this was a BAILOUT, but this is not necessary |
| 2410 | // now because asynchronous exceptions are not handled this way. |
| 2411 | assert(entry->state() == NULL || cur_state->total_locks_size() == entry->state()->total_locks_size(), "locks do not match" ); |
| 2412 | |
| 2413 | // xhandler start with an empty expression stack |
| 2414 | if (cur_state->stack_size() != 0) { |
| 2415 | cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci()); |
| 2416 | } |
| 2417 | if (instruction->exception_state() == NULL) { |
| 2418 | instruction->set_exception_state(cur_state); |
| 2419 | } |
| 2420 | |
| 2421 | // Note: Usually this join must work. However, very |
| 2422 | // complicated jsr-ret structures where we don't ret from |
| 2423 | // the subroutine can cause the objects on the monitor |
| 2424 | // stacks to not match because blocks can be parsed twice. |
| 2425 | // The only test case we've seen so far which exhibits this |
| 2426 | // problem is caught by the infinite recursion test in |
| 2427 | // GraphBuilder::jsr() if the join doesn't work. |
| 2428 | if (!entry->try_merge(cur_state)) { |
| 2429 | BAILOUT_("error while joining with exception handler, prob. due to complicated jsr/rets" , exception_handlers); |
| 2430 | } |
| 2431 | |
| 2432 | // add current state for correct handling of phi functions at begin of xhandler |
| 2433 | int phi_operand = entry->add_exception_state(cur_state); |
| 2434 | |
| 2435 | // add entry to the list of xhandlers of this block |
| 2436 | _block->add_exception_handler(entry); |
| 2437 | |
| 2438 | // add back-edge from xhandler entry to this block |
| 2439 | if (!entry->is_predecessor(_block)) { |
| 2440 | entry->add_predecessor(_block); |
| 2441 | } |
| 2442 | |
| 2443 | // clone XHandler because phi_operand and scope_count can not be shared |
| 2444 | XHandler* new_xhandler = new XHandler(h); |
| 2445 | new_xhandler->set_phi_operand(phi_operand); |
| 2446 | new_xhandler->set_scope_count(scope_count); |
| 2447 | exception_handlers->append(new_xhandler); |
| 2448 | |
| 2449 | // fill in exception handler subgraph lazily |
| 2450 | assert(!entry->is_set(BlockBegin::was_visited_flag), "entry must not be visited yet" ); |
| 2451 | cur_scope_data->add_to_work_list(entry); |
| 2452 | |
| 2453 | // stop when reaching catchall |
| 2454 | if (h->catch_type() == 0) { |
| 2455 | return exception_handlers; |
| 2456 | } |
| 2457 | } |
| 2458 | } |
| 2459 | |
| 2460 | if (exception_handlers->length() == 0) { |
| 2461 | // This scope and all callees do not handle exceptions, so the local |
| 2462 | // variables of this scope are not needed. However, the scope itself is |
| 2463 | // required for a correct exception stack trace -> clear out the locals. |
| 2464 | if (_compilation->env()->should_retain_local_variables()) { |
| 2465 | cur_state = cur_state->copy(ValueStack::ExceptionState, cur_state->bci()); |
| 2466 | } else { |
| 2467 | cur_state = cur_state->copy(ValueStack::EmptyExceptionState, cur_state->bci()); |
| 2468 | } |
| 2469 | if (prev_state != NULL) { |
| 2470 | prev_state->set_caller_state(cur_state); |
| 2471 | } |
| 2472 | if (instruction->exception_state() == NULL) { |
| 2473 | instruction->set_exception_state(cur_state); |
| 2474 | } |
| 2475 | } |
| 2476 | |
| 2477 | // Set up iteration for next time. |
| 2478 | // If parsing a jsr, do not grab exception handlers from the |
| 2479 | // parent scopes for this method (already got them, and they |
| 2480 | // needed to be cloned) |
| 2481 | |
| 2482 | while (cur_scope_data->parsing_jsr()) { |
| 2483 | cur_scope_data = cur_scope_data->parent(); |
| 2484 | } |
| 2485 | |
| 2486 | assert(cur_scope_data->scope() == cur_state->scope(), "scopes do not match" ); |
| 2487 | assert(cur_state->locks_size() == 0 || cur_state->locks_size() == 1, "unlocking must be done in a catchall exception handler" ); |
| 2488 | |
| 2489 | prev_state = cur_state; |
| 2490 | cur_state = cur_state->caller_state(); |
| 2491 | cur_scope_data = cur_scope_data->parent(); |
| 2492 | scope_count++; |
| 2493 | } while (cur_scope_data != NULL); |
| 2494 | |
| 2495 | return exception_handlers; |
| 2496 | } |
| 2497 | |
| 2498 | |
| 2499 | // Helper class for simplifying Phis. |
| 2500 | class PhiSimplifier : public BlockClosure { |
| 2501 | private: |
| 2502 | bool _has_substitutions; |
| 2503 | Value simplify(Value v); |
| 2504 | |
| 2505 | public: |
| 2506 | PhiSimplifier(BlockBegin* start) : _has_substitutions(false) { |
| 2507 | start->iterate_preorder(this); |
| 2508 | if (_has_substitutions) { |
| 2509 | SubstitutionResolver sr(start); |
| 2510 | } |
| 2511 | } |
| 2512 | void block_do(BlockBegin* b); |
| 2513 | bool has_substitutions() const { return _has_substitutions; } |
| 2514 | }; |
| 2515 | |
| 2516 | |
| 2517 | Value PhiSimplifier::simplify(Value v) { |
| 2518 | Phi* phi = v->as_Phi(); |
| 2519 | |
| 2520 | if (phi == NULL) { |
| 2521 | // no phi function |
| 2522 | return v; |
| 2523 | } else if (v->has_subst()) { |
| 2524 | // already substituted; subst can be phi itself -> simplify |
| 2525 | return simplify(v->subst()); |
| 2526 | } else if (phi->is_set(Phi::cannot_simplify)) { |
| 2527 | // already tried to simplify phi before |
| 2528 | return phi; |
| 2529 | } else if (phi->is_set(Phi::visited)) { |
| 2530 | // break cycles in phi functions |
| 2531 | return phi; |
| 2532 | } else if (phi->type()->is_illegal()) { |
| 2533 | // illegal phi functions are ignored anyway |
| 2534 | return phi; |
| 2535 | |
| 2536 | } else { |
| 2537 | // mark phi function as processed to break cycles in phi functions |
| 2538 | phi->set(Phi::visited); |
| 2539 | |
| 2540 | // simplify x = [y, x] and x = [y, y] to y |
| 2541 | Value subst = NULL; |
| 2542 | int opd_count = phi->operand_count(); |
| 2543 | for (int i = 0; i < opd_count; i++) { |
| 2544 | Value opd = phi->operand_at(i); |
| 2545 | assert(opd != NULL, "Operand must exist!" ); |
| 2546 | |
| 2547 | if (opd->type()->is_illegal()) { |
| 2548 | // if one operand is illegal, the entire phi function is illegal |
| 2549 | phi->make_illegal(); |
| 2550 | phi->clear(Phi::visited); |
| 2551 | return phi; |
| 2552 | } |
| 2553 | |
| 2554 | Value new_opd = simplify(opd); |
| 2555 | assert(new_opd != NULL, "Simplified operand must exist!" ); |
| 2556 | |
| 2557 | if (new_opd != phi && new_opd != subst) { |
| 2558 | if (subst == NULL) { |
| 2559 | subst = new_opd; |
| 2560 | } else { |
| 2561 | // no simplification possible |
| 2562 | phi->set(Phi::cannot_simplify); |
| 2563 | phi->clear(Phi::visited); |
| 2564 | return phi; |
| 2565 | } |
| 2566 | } |
| 2567 | } |
| 2568 | |
| 2569 | // sucessfully simplified phi function |
| 2570 | assert(subst != NULL, "illegal phi function" ); |
| 2571 | _has_substitutions = true; |
| 2572 | phi->clear(Phi::visited); |
| 2573 | phi->set_subst(subst); |
| 2574 | |
| 2575 | #ifndef PRODUCT |
| 2576 | if (PrintPhiFunctions) { |
| 2577 | tty->print_cr("simplified phi function %c%d to %c%d (Block B%d)" , phi->type()->tchar(), phi->id(), subst->type()->tchar(), subst->id(), phi->block()->block_id()); |
| 2578 | } |
| 2579 | #endif |
| 2580 | |
| 2581 | return subst; |
| 2582 | } |
| 2583 | } |
| 2584 | |
| 2585 | |
| 2586 | void PhiSimplifier::block_do(BlockBegin* b) { |
| 2587 | for_each_phi_fun(b, phi, |
| 2588 | simplify(phi); |
| 2589 | ); |
| 2590 | |
| 2591 | #ifdef ASSERT |
| 2592 | for_each_phi_fun(b, phi, |
| 2593 | assert(phi->operand_count() != 1 || phi->subst() != phi, "missed trivial simplification" ); |
| 2594 | ); |
| 2595 | |
| 2596 | ValueStack* state = b->state()->caller_state(); |
| 2597 | for_each_state_value(state, value, |
| 2598 | Phi* phi = value->as_Phi(); |
| 2599 | assert(phi == NULL || phi->block() != b, "must not have phi function to simplify in caller state" ); |
| 2600 | ); |
| 2601 | #endif |
| 2602 | } |
| 2603 | |
| 2604 | // This method is called after all blocks are filled with HIR instructions |
| 2605 | // It eliminates all Phi functions of the form x = [y, y] and x = [y, x] |
| 2606 | void GraphBuilder::eliminate_redundant_phis(BlockBegin* start) { |
| 2607 | PhiSimplifier simplifier(start); |
| 2608 | } |
| 2609 | |
| 2610 | |
| 2611 | void GraphBuilder::connect_to_end(BlockBegin* beg) { |
| 2612 | // setup iteration |
| 2613 | kill_all(); |
| 2614 | _block = beg; |
| 2615 | _state = beg->state()->copy_for_parsing(); |
| 2616 | _last = beg; |
| 2617 | iterate_bytecodes_for_block(beg->bci()); |
| 2618 | } |
| 2619 | |
| 2620 | |
| 2621 | BlockEnd* GraphBuilder::iterate_bytecodes_for_block(int bci) { |
| 2622 | #ifndef PRODUCT |
| 2623 | if (PrintIRDuringConstruction) { |
| 2624 | tty->cr(); |
| 2625 | InstructionPrinter ip; |
| 2626 | ip.print_instr(_block); tty->cr(); |
| 2627 | ip.print_stack(_block->state()); tty->cr(); |
| 2628 | ip.print_inline_level(_block); |
| 2629 | ip.print_head(); |
| 2630 | tty->print_cr("locals size: %d stack size: %d" , state()->locals_size(), state()->stack_size()); |
| 2631 | } |
| 2632 | #endif |
| 2633 | _skip_block = false; |
| 2634 | assert(state() != NULL, "ValueStack missing!" ); |
| 2635 | CompileLog* log = compilation()->log(); |
| 2636 | ciBytecodeStream s(method()); |
| 2637 | s.reset_to_bci(bci); |
| 2638 | int prev_bci = bci; |
| 2639 | scope_data()->set_stream(&s); |
| 2640 | // iterate |
| 2641 | Bytecodes::Code code = Bytecodes::_illegal; |
| 2642 | bool push_exception = false; |
| 2643 | |
| 2644 | if (block()->is_set(BlockBegin::exception_entry_flag) && block()->next() == NULL) { |
| 2645 | // first thing in the exception entry block should be the exception object. |
| 2646 | push_exception = true; |
| 2647 | } |
| 2648 | |
| 2649 | bool ignore_return = scope_data()->ignore_return(); |
| 2650 | |
| 2651 | while (!bailed_out() && last()->as_BlockEnd() == NULL && |
| 2652 | (code = stream()->next()) != ciBytecodeStream::EOBC() && |
| 2653 | (block_at(s.cur_bci()) == NULL || block_at(s.cur_bci()) == block())) { |
| 2654 | assert(state()->kind() == ValueStack::Parsing, "invalid state kind" ); |
| 2655 | |
| 2656 | if (log != NULL) |
| 2657 | log->set_context("bc code='%d' bci='%d'" , (int)code, s.cur_bci()); |
| 2658 | |
| 2659 | // Check for active jsr during OSR compilation |
| 2660 | if (compilation()->is_osr_compile() |
| 2661 | && scope()->is_top_scope() |
| 2662 | && parsing_jsr() |
| 2663 | && s.cur_bci() == compilation()->osr_bci()) { |
| 2664 | bailout("OSR not supported while a jsr is active" ); |
| 2665 | } |
| 2666 | |
| 2667 | if (push_exception) { |
| 2668 | apush(append(new ExceptionObject())); |
| 2669 | push_exception = false; |
| 2670 | } |
| 2671 | |
| 2672 | // handle bytecode |
| 2673 | switch (code) { |
| 2674 | case Bytecodes::_nop : /* nothing to do */ break; |
| 2675 | case Bytecodes::_aconst_null : apush(append(new Constant(objectNull ))); break; |
| 2676 | case Bytecodes::_iconst_m1 : ipush(append(new Constant(new IntConstant (-1)))); break; |
| 2677 | case Bytecodes::_iconst_0 : ipush(append(new Constant(intZero ))); break; |
| 2678 | case Bytecodes::_iconst_1 : ipush(append(new Constant(intOne ))); break; |
| 2679 | case Bytecodes::_iconst_2 : ipush(append(new Constant(new IntConstant ( 2)))); break; |
| 2680 | case Bytecodes::_iconst_3 : ipush(append(new Constant(new IntConstant ( 3)))); break; |
| 2681 | case Bytecodes::_iconst_4 : ipush(append(new Constant(new IntConstant ( 4)))); break; |
| 2682 | case Bytecodes::_iconst_5 : ipush(append(new Constant(new IntConstant ( 5)))); break; |
| 2683 | case Bytecodes::_lconst_0 : lpush(append(new Constant(new LongConstant ( 0)))); break; |
| 2684 | case Bytecodes::_lconst_1 : lpush(append(new Constant(new LongConstant ( 1)))); break; |
| 2685 | case Bytecodes::_fconst_0 : fpush(append(new Constant(new FloatConstant ( 0)))); break; |
| 2686 | case Bytecodes::_fconst_1 : fpush(append(new Constant(new FloatConstant ( 1)))); break; |
| 2687 | case Bytecodes::_fconst_2 : fpush(append(new Constant(new FloatConstant ( 2)))); break; |
| 2688 | case Bytecodes::_dconst_0 : dpush(append(new Constant(new DoubleConstant( 0)))); break; |
| 2689 | case Bytecodes::_dconst_1 : dpush(append(new Constant(new DoubleConstant( 1)))); break; |
| 2690 | case Bytecodes::_bipush : ipush(append(new Constant(new IntConstant(((signed char*)s.cur_bcp())[1])))); break; |
| 2691 | case Bytecodes::_sipush : ipush(append(new Constant(new IntConstant((short)Bytes::get_Java_u2(s.cur_bcp()+1))))); break; |
| 2692 | case Bytecodes::_ldc : // fall through |
| 2693 | case Bytecodes::_ldc_w : // fall through |
| 2694 | case Bytecodes::_ldc2_w : load_constant(); break; |
| 2695 | case Bytecodes::_iload : load_local(intType , s.get_index()); break; |
| 2696 | case Bytecodes::_lload : load_local(longType , s.get_index()); break; |
| 2697 | case Bytecodes::_fload : load_local(floatType , s.get_index()); break; |
| 2698 | case Bytecodes::_dload : load_local(doubleType , s.get_index()); break; |
| 2699 | case Bytecodes::_aload : load_local(instanceType, s.get_index()); break; |
| 2700 | case Bytecodes::_iload_0 : load_local(intType , 0); break; |
| 2701 | case Bytecodes::_iload_1 : load_local(intType , 1); break; |
| 2702 | case Bytecodes::_iload_2 : load_local(intType , 2); break; |
| 2703 | case Bytecodes::_iload_3 : load_local(intType , 3); break; |
| 2704 | case Bytecodes::_lload_0 : load_local(longType , 0); break; |
| 2705 | case Bytecodes::_lload_1 : load_local(longType , 1); break; |
| 2706 | case Bytecodes::_lload_2 : load_local(longType , 2); break; |
| 2707 | case Bytecodes::_lload_3 : load_local(longType , 3); break; |
| 2708 | case Bytecodes::_fload_0 : load_local(floatType , 0); break; |
| 2709 | case Bytecodes::_fload_1 : load_local(floatType , 1); break; |
| 2710 | case Bytecodes::_fload_2 : load_local(floatType , 2); break; |
| 2711 | case Bytecodes::_fload_3 : load_local(floatType , 3); break; |
| 2712 | case Bytecodes::_dload_0 : load_local(doubleType, 0); break; |
| 2713 | case Bytecodes::_dload_1 : load_local(doubleType, 1); break; |
| 2714 | case Bytecodes::_dload_2 : load_local(doubleType, 2); break; |
| 2715 | case Bytecodes::_dload_3 : load_local(doubleType, 3); break; |
| 2716 | case Bytecodes::_aload_0 : load_local(objectType, 0); break; |
| 2717 | case Bytecodes::_aload_1 : load_local(objectType, 1); break; |
| 2718 | case Bytecodes::_aload_2 : load_local(objectType, 2); break; |
| 2719 | case Bytecodes::_aload_3 : load_local(objectType, 3); break; |
| 2720 | case Bytecodes::_iaload : load_indexed(T_INT ); break; |
| 2721 | case Bytecodes::_laload : load_indexed(T_LONG ); break; |
| 2722 | case Bytecodes::_faload : load_indexed(T_FLOAT ); break; |
| 2723 | case Bytecodes::_daload : load_indexed(T_DOUBLE); break; |
| 2724 | case Bytecodes::_aaload : load_indexed(T_OBJECT); break; |
| 2725 | case Bytecodes::_baload : load_indexed(T_BYTE ); break; |
| 2726 | case Bytecodes::_caload : load_indexed(T_CHAR ); break; |
| 2727 | case Bytecodes::_saload : load_indexed(T_SHORT ); break; |
| 2728 | case Bytecodes::_istore : store_local(intType , s.get_index()); break; |
| 2729 | case Bytecodes::_lstore : store_local(longType , s.get_index()); break; |
| 2730 | case Bytecodes::_fstore : store_local(floatType , s.get_index()); break; |
| 2731 | case Bytecodes::_dstore : store_local(doubleType, s.get_index()); break; |
| 2732 | case Bytecodes::_astore : store_local(objectType, s.get_index()); break; |
| 2733 | case Bytecodes::_istore_0 : store_local(intType , 0); break; |
| 2734 | case Bytecodes::_istore_1 : store_local(intType , 1); break; |
| 2735 | case Bytecodes::_istore_2 : store_local(intType , 2); break; |
| 2736 | case Bytecodes::_istore_3 : store_local(intType , 3); break; |
| 2737 | case Bytecodes::_lstore_0 : store_local(longType , 0); break; |
| 2738 | case Bytecodes::_lstore_1 : store_local(longType , 1); break; |
| 2739 | case Bytecodes::_lstore_2 : store_local(longType , 2); break; |
| 2740 | case Bytecodes::_lstore_3 : store_local(longType , 3); break; |
| 2741 | case Bytecodes::_fstore_0 : store_local(floatType , 0); break; |
| 2742 | case Bytecodes::_fstore_1 : store_local(floatType , 1); break; |
| 2743 | case Bytecodes::_fstore_2 : store_local(floatType , 2); break; |
| 2744 | case Bytecodes::_fstore_3 : store_local(floatType , 3); break; |
| 2745 | case Bytecodes::_dstore_0 : store_local(doubleType, 0); break; |
| 2746 | case Bytecodes::_dstore_1 : store_local(doubleType, 1); break; |
| 2747 | case Bytecodes::_dstore_2 : store_local(doubleType, 2); break; |
| 2748 | case Bytecodes::_dstore_3 : store_local(doubleType, 3); break; |
| 2749 | case Bytecodes::_astore_0 : store_local(objectType, 0); break; |
| 2750 | case Bytecodes::_astore_1 : store_local(objectType, 1); break; |
| 2751 | case Bytecodes::_astore_2 : store_local(objectType, 2); break; |
| 2752 | case Bytecodes::_astore_3 : store_local(objectType, 3); break; |
| 2753 | case Bytecodes::_iastore : store_indexed(T_INT ); break; |
| 2754 | case Bytecodes::_lastore : store_indexed(T_LONG ); break; |
| 2755 | case Bytecodes::_fastore : store_indexed(T_FLOAT ); break; |
| 2756 | case Bytecodes::_dastore : store_indexed(T_DOUBLE); break; |
| 2757 | case Bytecodes::_aastore : store_indexed(T_OBJECT); break; |
| 2758 | case Bytecodes::_bastore : store_indexed(T_BYTE ); break; |
| 2759 | case Bytecodes::_castore : store_indexed(T_CHAR ); break; |
| 2760 | case Bytecodes::_sastore : store_indexed(T_SHORT ); break; |
| 2761 | case Bytecodes::_pop : // fall through |
| 2762 | case Bytecodes::_pop2 : // fall through |
| 2763 | case Bytecodes::_dup : // fall through |
| 2764 | case Bytecodes::_dup_x1 : // fall through |
| 2765 | case Bytecodes::_dup_x2 : // fall through |
| 2766 | case Bytecodes::_dup2 : // fall through |
| 2767 | case Bytecodes::_dup2_x1 : // fall through |
| 2768 | case Bytecodes::_dup2_x2 : // fall through |
| 2769 | case Bytecodes::_swap : stack_op(code); break; |
| 2770 | case Bytecodes::_iadd : arithmetic_op(intType , code); break; |
| 2771 | case Bytecodes::_ladd : arithmetic_op(longType , code); break; |
| 2772 | case Bytecodes::_fadd : arithmetic_op(floatType , code); break; |
| 2773 | case Bytecodes::_dadd : arithmetic_op(doubleType, code); break; |
| 2774 | case Bytecodes::_isub : arithmetic_op(intType , code); break; |
| 2775 | case Bytecodes::_lsub : arithmetic_op(longType , code); break; |
| 2776 | case Bytecodes::_fsub : arithmetic_op(floatType , code); break; |
| 2777 | case Bytecodes::_dsub : arithmetic_op(doubleType, code); break; |
| 2778 | case Bytecodes::_imul : arithmetic_op(intType , code); break; |
| 2779 | case Bytecodes::_lmul : arithmetic_op(longType , code); break; |
| 2780 | case Bytecodes::_fmul : arithmetic_op(floatType , code); break; |
| 2781 | case Bytecodes::_dmul : arithmetic_op(doubleType, code); break; |
| 2782 | case Bytecodes::_idiv : arithmetic_op(intType , code, copy_state_for_exception()); break; |
| 2783 | case Bytecodes::_ldiv : arithmetic_op(longType , code, copy_state_for_exception()); break; |
| 2784 | case Bytecodes::_fdiv : arithmetic_op(floatType , code); break; |
| 2785 | case Bytecodes::_ddiv : arithmetic_op(doubleType, code); break; |
| 2786 | case Bytecodes::_irem : arithmetic_op(intType , code, copy_state_for_exception()); break; |
| 2787 | case Bytecodes::_lrem : arithmetic_op(longType , code, copy_state_for_exception()); break; |
| 2788 | case Bytecodes::_frem : arithmetic_op(floatType , code); break; |
| 2789 | case Bytecodes::_drem : arithmetic_op(doubleType, code); break; |
| 2790 | case Bytecodes::_ineg : negate_op(intType ); break; |
| 2791 | case Bytecodes::_lneg : negate_op(longType ); break; |
| 2792 | case Bytecodes::_fneg : negate_op(floatType ); break; |
| 2793 | case Bytecodes::_dneg : negate_op(doubleType); break; |
| 2794 | case Bytecodes::_ishl : shift_op(intType , code); break; |
| 2795 | case Bytecodes::_lshl : shift_op(longType, code); break; |
| 2796 | case Bytecodes::_ishr : shift_op(intType , code); break; |
| 2797 | case Bytecodes::_lshr : shift_op(longType, code); break; |
| 2798 | case Bytecodes::_iushr : shift_op(intType , code); break; |
| 2799 | case Bytecodes::_lushr : shift_op(longType, code); break; |
| 2800 | case Bytecodes::_iand : logic_op(intType , code); break; |
| 2801 | case Bytecodes::_land : logic_op(longType, code); break; |
| 2802 | case Bytecodes::_ior : logic_op(intType , code); break; |
| 2803 | case Bytecodes::_lor : logic_op(longType, code); break; |
| 2804 | case Bytecodes::_ixor : logic_op(intType , code); break; |
| 2805 | case Bytecodes::_lxor : logic_op(longType, code); break; |
| 2806 | case Bytecodes::_iinc : increment(); break; |
| 2807 | case Bytecodes::_i2l : convert(code, T_INT , T_LONG ); break; |
| 2808 | case Bytecodes::_i2f : convert(code, T_INT , T_FLOAT ); break; |
| 2809 | case Bytecodes::_i2d : convert(code, T_INT , T_DOUBLE); break; |
| 2810 | case Bytecodes::_l2i : convert(code, T_LONG , T_INT ); break; |
| 2811 | case Bytecodes::_l2f : convert(code, T_LONG , T_FLOAT ); break; |
| 2812 | case Bytecodes::_l2d : convert(code, T_LONG , T_DOUBLE); break; |
| 2813 | case Bytecodes::_f2i : convert(code, T_FLOAT , T_INT ); break; |
| 2814 | case Bytecodes::_f2l : convert(code, T_FLOAT , T_LONG ); break; |
| 2815 | case Bytecodes::_f2d : convert(code, T_FLOAT , T_DOUBLE); break; |
| 2816 | case Bytecodes::_d2i : convert(code, T_DOUBLE, T_INT ); break; |
| 2817 | case Bytecodes::_d2l : convert(code, T_DOUBLE, T_LONG ); break; |
| 2818 | case Bytecodes::_d2f : convert(code, T_DOUBLE, T_FLOAT ); break; |
| 2819 | case Bytecodes::_i2b : convert(code, T_INT , T_BYTE ); break; |
| 2820 | case Bytecodes::_i2c : convert(code, T_INT , T_CHAR ); break; |
| 2821 | case Bytecodes::_i2s : convert(code, T_INT , T_SHORT ); break; |
| 2822 | case Bytecodes::_lcmp : compare_op(longType , code); break; |
| 2823 | case Bytecodes::_fcmpl : compare_op(floatType , code); break; |
| 2824 | case Bytecodes::_fcmpg : compare_op(floatType , code); break; |
| 2825 | case Bytecodes::_dcmpl : compare_op(doubleType, code); break; |
| 2826 | case Bytecodes::_dcmpg : compare_op(doubleType, code); break; |
| 2827 | case Bytecodes::_ifeq : if_zero(intType , If::eql); break; |
| 2828 | case Bytecodes::_ifne : if_zero(intType , If::neq); break; |
| 2829 | case Bytecodes::_iflt : if_zero(intType , If::lss); break; |
| 2830 | case Bytecodes::_ifge : if_zero(intType , If::geq); break; |
| 2831 | case Bytecodes::_ifgt : if_zero(intType , If::gtr); break; |
| 2832 | case Bytecodes::_ifle : if_zero(intType , If::leq); break; |
| 2833 | case Bytecodes::_if_icmpeq : if_same(intType , If::eql); break; |
| 2834 | case Bytecodes::_if_icmpne : if_same(intType , If::neq); break; |
| 2835 | case Bytecodes::_if_icmplt : if_same(intType , If::lss); break; |
| 2836 | case Bytecodes::_if_icmpge : if_same(intType , If::geq); break; |
| 2837 | case Bytecodes::_if_icmpgt : if_same(intType , If::gtr); break; |
| 2838 | case Bytecodes::_if_icmple : if_same(intType , If::leq); break; |
| 2839 | case Bytecodes::_if_acmpeq : if_same(objectType, If::eql); break; |
| 2840 | case Bytecodes::_if_acmpne : if_same(objectType, If::neq); break; |
| 2841 | case Bytecodes::_goto : _goto(s.cur_bci(), s.get_dest()); break; |
| 2842 | case Bytecodes::_jsr : jsr(s.get_dest()); break; |
| 2843 | case Bytecodes::_ret : ret(s.get_index()); break; |
| 2844 | case Bytecodes::_tableswitch : table_switch(); break; |
| 2845 | case Bytecodes::_lookupswitch : lookup_switch(); break; |
| 2846 | case Bytecodes::_ireturn : method_return(ipop(), ignore_return); break; |
| 2847 | case Bytecodes::_lreturn : method_return(lpop(), ignore_return); break; |
| 2848 | case Bytecodes::_freturn : method_return(fpop(), ignore_return); break; |
| 2849 | case Bytecodes::_dreturn : method_return(dpop(), ignore_return); break; |
| 2850 | case Bytecodes::_areturn : method_return(apop(), ignore_return); break; |
| 2851 | case Bytecodes::_return : method_return(NULL , ignore_return); break; |
| 2852 | case Bytecodes::_getstatic : // fall through |
| 2853 | case Bytecodes::_putstatic : // fall through |
| 2854 | case Bytecodes::_getfield : // fall through |
| 2855 | case Bytecodes::_putfield : access_field(code); break; |
| 2856 | case Bytecodes::_invokevirtual : // fall through |
| 2857 | case Bytecodes::_invokespecial : // fall through |
| 2858 | case Bytecodes::_invokestatic : // fall through |
| 2859 | case Bytecodes::_invokedynamic : // fall through |
| 2860 | case Bytecodes::_invokeinterface: invoke(code); break; |
| 2861 | case Bytecodes::_new : new_instance(s.get_index_u2()); break; |
| 2862 | case Bytecodes::_newarray : new_type_array(); break; |
| 2863 | case Bytecodes::_anewarray : new_object_array(); break; |
| 2864 | case Bytecodes::_arraylength : { ValueStack* state_before = copy_state_for_exception(); ipush(append(new ArrayLength(apop(), state_before))); break; } |
| 2865 | case Bytecodes::_athrow : throw_op(s.cur_bci()); break; |
| 2866 | case Bytecodes::_checkcast : check_cast(s.get_index_u2()); break; |
| 2867 | case Bytecodes::_instanceof : instance_of(s.get_index_u2()); break; |
| 2868 | case Bytecodes::_monitorenter : monitorenter(apop(), s.cur_bci()); break; |
| 2869 | case Bytecodes::_monitorexit : monitorexit (apop(), s.cur_bci()); break; |
| 2870 | case Bytecodes::_wide : ShouldNotReachHere(); break; |
| 2871 | case Bytecodes::_multianewarray : new_multi_array(s.cur_bcp()[3]); break; |
| 2872 | case Bytecodes::_ifnull : if_null(objectType, If::eql); break; |
| 2873 | case Bytecodes::_ifnonnull : if_null(objectType, If::neq); break; |
| 2874 | case Bytecodes::_goto_w : _goto(s.cur_bci(), s.get_far_dest()); break; |
| 2875 | case Bytecodes::_jsr_w : jsr(s.get_far_dest()); break; |
| 2876 | case Bytecodes::_breakpoint : BAILOUT_("concurrent setting of breakpoint" , NULL); |
| 2877 | default : ShouldNotReachHere(); break; |
| 2878 | } |
| 2879 | |
| 2880 | if (log != NULL) |
| 2881 | log->clear_context(); // skip marker if nothing was printed |
| 2882 | |
| 2883 | // save current bci to setup Goto at the end |
| 2884 | prev_bci = s.cur_bci(); |
| 2885 | |
| 2886 | } |
| 2887 | CHECK_BAILOUT_(NULL); |
| 2888 | // stop processing of this block (see try_inline_full) |
| 2889 | if (_skip_block) { |
| 2890 | _skip_block = false; |
| 2891 | assert(_last && _last->as_BlockEnd(), "" ); |
| 2892 | return _last->as_BlockEnd(); |
| 2893 | } |
| 2894 | // if there are any, check if last instruction is a BlockEnd instruction |
| 2895 | BlockEnd* end = last()->as_BlockEnd(); |
| 2896 | if (end == NULL) { |
| 2897 | // all blocks must end with a BlockEnd instruction => add a Goto |
| 2898 | end = new Goto(block_at(s.cur_bci()), false); |
| 2899 | append(end); |
| 2900 | } |
| 2901 | assert(end == last()->as_BlockEnd(), "inconsistency" ); |
| 2902 | |
| 2903 | assert(end->state() != NULL, "state must already be present" ); |
| 2904 | assert(end->as_Return() == NULL || end->as_Throw() == NULL || end->state()->stack_size() == 0, "stack not needed for return and throw" ); |
| 2905 | |
| 2906 | // connect to begin & set state |
| 2907 | // NOTE that inlining may have changed the block we are parsing |
| 2908 | block()->set_end(end); |
| 2909 | // propagate state |
| 2910 | for (int i = end->number_of_sux() - 1; i >= 0; i--) { |
| 2911 | BlockBegin* sux = end->sux_at(i); |
| 2912 | assert(sux->is_predecessor(block()), "predecessor missing" ); |
| 2913 | // be careful, bailout if bytecodes are strange |
| 2914 | if (!sux->try_merge(end->state())) BAILOUT_("block join failed" , NULL); |
| 2915 | scope_data()->add_to_work_list(end->sux_at(i)); |
| 2916 | } |
| 2917 | |
| 2918 | scope_data()->set_stream(NULL); |
| 2919 | |
| 2920 | // done |
| 2921 | return end; |
| 2922 | } |
| 2923 | |
| 2924 | |
| 2925 | void GraphBuilder::iterate_all_blocks(bool start_in_current_block_for_inlining) { |
| 2926 | do { |
| 2927 | if (start_in_current_block_for_inlining && !bailed_out()) { |
| 2928 | iterate_bytecodes_for_block(0); |
| 2929 | start_in_current_block_for_inlining = false; |
| 2930 | } else { |
| 2931 | BlockBegin* b; |
| 2932 | while ((b = scope_data()->remove_from_work_list()) != NULL) { |
| 2933 | if (!b->is_set(BlockBegin::was_visited_flag)) { |
| 2934 | if (b->is_set(BlockBegin::osr_entry_flag)) { |
| 2935 | // we're about to parse the osr entry block, so make sure |
| 2936 | // we setup the OSR edge leading into this block so that |
| 2937 | // Phis get setup correctly. |
| 2938 | setup_osr_entry_block(); |
| 2939 | // this is no longer the osr entry block, so clear it. |
| 2940 | b->clear(BlockBegin::osr_entry_flag); |
| 2941 | } |
| 2942 | b->set(BlockBegin::was_visited_flag); |
| 2943 | connect_to_end(b); |
| 2944 | } |
| 2945 | } |
| 2946 | } |
| 2947 | } while (!bailed_out() && !scope_data()->is_work_list_empty()); |
| 2948 | } |
| 2949 | |
| 2950 | |
| 2951 | bool GraphBuilder::_can_trap [Bytecodes::number_of_java_codes]; |
| 2952 | |
| 2953 | void GraphBuilder::initialize() { |
| 2954 | // the following bytecodes are assumed to potentially |
| 2955 | // throw exceptions in compiled code - note that e.g. |
| 2956 | // monitorexit & the return bytecodes do not throw |
| 2957 | // exceptions since monitor pairing proved that they |
| 2958 | // succeed (if monitor pairing succeeded) |
| 2959 | Bytecodes::Code can_trap_list[] = |
| 2960 | { Bytecodes::_ldc |
| 2961 | , Bytecodes::_ldc_w |
| 2962 | , Bytecodes::_ldc2_w |
| 2963 | , Bytecodes::_iaload |
| 2964 | , Bytecodes::_laload |
| 2965 | , Bytecodes::_faload |
| 2966 | , Bytecodes::_daload |
| 2967 | , Bytecodes::_aaload |
| 2968 | , Bytecodes::_baload |
| 2969 | , Bytecodes::_caload |
| 2970 | , Bytecodes::_saload |
| 2971 | , Bytecodes::_iastore |
| 2972 | , Bytecodes::_lastore |
| 2973 | , Bytecodes::_fastore |
| 2974 | , Bytecodes::_dastore |
| 2975 | , Bytecodes::_aastore |
| 2976 | , Bytecodes::_bastore |
| 2977 | , Bytecodes::_castore |
| 2978 | , Bytecodes::_sastore |
| 2979 | , Bytecodes::_idiv |
| 2980 | , Bytecodes::_ldiv |
| 2981 | , Bytecodes::_irem |
| 2982 | , Bytecodes::_lrem |
| 2983 | , Bytecodes::_getstatic |
| 2984 | , Bytecodes::_putstatic |
| 2985 | , Bytecodes::_getfield |
| 2986 | , Bytecodes::_putfield |
| 2987 | , Bytecodes::_invokevirtual |
| 2988 | , Bytecodes::_invokespecial |
| 2989 | , Bytecodes::_invokestatic |
| 2990 | , Bytecodes::_invokedynamic |
| 2991 | , Bytecodes::_invokeinterface |
| 2992 | , Bytecodes::_new |
| 2993 | , Bytecodes::_newarray |
| 2994 | , Bytecodes::_anewarray |
| 2995 | , Bytecodes::_arraylength |
| 2996 | , Bytecodes::_athrow |
| 2997 | , Bytecodes::_checkcast |
| 2998 | , Bytecodes::_instanceof |
| 2999 | , Bytecodes::_monitorenter |
| 3000 | , Bytecodes::_multianewarray |
| 3001 | }; |
| 3002 | |
| 3003 | // inititialize trap tables |
| 3004 | for (int i = 0; i < Bytecodes::number_of_java_codes; i++) { |
| 3005 | _can_trap[i] = false; |
| 3006 | } |
| 3007 | // set standard trap info |
| 3008 | for (uint j = 0; j < ARRAY_SIZE(can_trap_list); j++) { |
| 3009 | _can_trap[can_trap_list[j]] = true; |
| 3010 | } |
| 3011 | } |
| 3012 | |
| 3013 | |
| 3014 | BlockBegin* GraphBuilder::(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state) { |
| 3015 | assert(entry->is_set(f), "entry/flag mismatch" ); |
| 3016 | // create header block |
| 3017 | BlockBegin* h = new BlockBegin(entry->bci()); |
| 3018 | h->set_depth_first_number(0); |
| 3019 | |
| 3020 | Value l = h; |
| 3021 | BlockEnd* g = new Goto(entry, false); |
| 3022 | l->set_next(g, entry->bci()); |
| 3023 | h->set_end(g); |
| 3024 | h->set(f); |
| 3025 | // setup header block end state |
| 3026 | ValueStack* s = state->copy(ValueStack::StateAfter, entry->bci()); // can use copy since stack is empty (=> no phis) |
| 3027 | assert(s->stack_is_empty(), "must have empty stack at entry point" ); |
| 3028 | g->set_state(s); |
| 3029 | return h; |
| 3030 | } |
| 3031 | |
| 3032 | |
| 3033 | |
| 3034 | BlockBegin* GraphBuilder::setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* state) { |
| 3035 | BlockBegin* start = new BlockBegin(0); |
| 3036 | |
| 3037 | // This code eliminates the empty start block at the beginning of |
| 3038 | // each method. Previously, each method started with the |
| 3039 | // start-block created below, and this block was followed by the |
| 3040 | // header block that was always empty. This header block is only |
| 3041 | // necesary if std_entry is also a backward branch target because |
| 3042 | // then phi functions may be necessary in the header block. It's |
| 3043 | // also necessary when profiling so that there's a single block that |
| 3044 | // can increment the interpreter_invocation_count. |
| 3045 | BlockBegin* ; |
| 3046 | if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) { |
| 3047 | new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state); |
| 3048 | } else { |
| 3049 | new_header_block = std_entry; |
| 3050 | } |
| 3051 | |
| 3052 | // setup start block (root for the IR graph) |
| 3053 | Base* base = |
| 3054 | new Base( |
| 3055 | new_header_block, |
| 3056 | osr_entry |
| 3057 | ); |
| 3058 | start->set_next(base, 0); |
| 3059 | start->set_end(base); |
| 3060 | // create & setup state for start block |
| 3061 | start->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); |
| 3062 | base->set_state(state->copy(ValueStack::StateAfter, std_entry->bci())); |
| 3063 | |
| 3064 | if (base->std_entry()->state() == NULL) { |
| 3065 | // setup states for header blocks |
| 3066 | base->std_entry()->merge(state); |
| 3067 | } |
| 3068 | |
| 3069 | assert(base->std_entry()->state() != NULL, "" ); |
| 3070 | return start; |
| 3071 | } |
| 3072 | |
| 3073 | |
| 3074 | void GraphBuilder::setup_osr_entry_block() { |
| 3075 | assert(compilation()->is_osr_compile(), "only for osrs" ); |
| 3076 | |
| 3077 | int osr_bci = compilation()->osr_bci(); |
| 3078 | ciBytecodeStream s(method()); |
| 3079 | s.reset_to_bci(osr_bci); |
| 3080 | s.next(); |
| 3081 | scope_data()->set_stream(&s); |
| 3082 | |
| 3083 | // create a new block to be the osr setup code |
| 3084 | _osr_entry = new BlockBegin(osr_bci); |
| 3085 | _osr_entry->set(BlockBegin::osr_entry_flag); |
| 3086 | _osr_entry->set_depth_first_number(0); |
| 3087 | BlockBegin* target = bci2block()->at(osr_bci); |
| 3088 | assert(target != NULL && target->is_set(BlockBegin::osr_entry_flag), "must be there" ); |
| 3089 | // the osr entry has no values for locals |
| 3090 | ValueStack* state = target->state()->copy(); |
| 3091 | _osr_entry->set_state(state); |
| 3092 | |
| 3093 | kill_all(); |
| 3094 | _block = _osr_entry; |
| 3095 | _state = _osr_entry->state()->copy(); |
| 3096 | assert(_state->bci() == osr_bci, "mismatch" ); |
| 3097 | _last = _osr_entry; |
| 3098 | Value e = append(new OsrEntry()); |
| 3099 | e->set_needs_null_check(false); |
| 3100 | |
| 3101 | // OSR buffer is |
| 3102 | // |
| 3103 | // locals[nlocals-1..0] |
| 3104 | // monitors[number_of_locks-1..0] |
| 3105 | // |
| 3106 | // locals is a direct copy of the interpreter frame so in the osr buffer |
| 3107 | // so first slot in the local array is the last local from the interpreter |
| 3108 | // and last slot is local[0] (receiver) from the interpreter |
| 3109 | // |
| 3110 | // Similarly with locks. The first lock slot in the osr buffer is the nth lock |
| 3111 | // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock |
| 3112 | // in the interpreter frame (the method lock if a sync method) |
| 3113 | |
| 3114 | // Initialize monitors in the compiled activation. |
| 3115 | |
| 3116 | int index; |
| 3117 | Value local; |
| 3118 | |
| 3119 | // find all the locals that the interpreter thinks contain live oops |
| 3120 | const ResourceBitMap live_oops = method()->live_local_oops_at_bci(osr_bci); |
| 3121 | |
| 3122 | // compute the offset into the locals so that we can treat the buffer |
| 3123 | // as if the locals were still in the interpreter frame |
| 3124 | int locals_offset = BytesPerWord * (method()->max_locals() - 1); |
| 3125 | for_each_local_value(state, index, local) { |
| 3126 | int offset = locals_offset - (index + local->type()->size() - 1) * BytesPerWord; |
| 3127 | Value get; |
| 3128 | if (local->type()->is_object_kind() && !live_oops.at(index)) { |
| 3129 | // The interpreter thinks this local is dead but the compiler |
| 3130 | // doesn't so pretend that the interpreter passed in null. |
| 3131 | get = append(new Constant(objectNull)); |
| 3132 | } else { |
| 3133 | get = append(new UnsafeGetRaw(as_BasicType(local->type()), e, |
| 3134 | append(new Constant(new IntConstant(offset))), |
| 3135 | 0, |
| 3136 | true /*unaligned*/, true /*wide*/)); |
| 3137 | } |
| 3138 | _state->store_local(index, get); |
| 3139 | } |
| 3140 | |
| 3141 | // the storage for the OSR buffer is freed manually in the LIRGenerator. |
| 3142 | |
| 3143 | assert(state->caller_state() == NULL, "should be top scope" ); |
| 3144 | state->clear_locals(); |
| 3145 | Goto* g = new Goto(target, false); |
| 3146 | append(g); |
| 3147 | _osr_entry->set_end(g); |
| 3148 | target->merge(_osr_entry->end()->state()); |
| 3149 | |
| 3150 | scope_data()->set_stream(NULL); |
| 3151 | } |
| 3152 | |
| 3153 | |
| 3154 | ValueStack* GraphBuilder::state_at_entry() { |
| 3155 | ValueStack* state = new ValueStack(scope(), NULL); |
| 3156 | |
| 3157 | // Set up locals for receiver |
| 3158 | int idx = 0; |
| 3159 | if (!method()->is_static()) { |
| 3160 | // we should always see the receiver |
| 3161 | state->store_local(idx, new Local(method()->holder(), objectType, idx, true)); |
| 3162 | idx = 1; |
| 3163 | } |
| 3164 | |
| 3165 | // Set up locals for incoming arguments |
| 3166 | ciSignature* sig = method()->signature(); |
| 3167 | for (int i = 0; i < sig->count(); i++) { |
| 3168 | ciType* type = sig->type_at(i); |
| 3169 | BasicType basic_type = type->basic_type(); |
| 3170 | // don't allow T_ARRAY to propagate into locals types |
| 3171 | if (basic_type == T_ARRAY) basic_type = T_OBJECT; |
| 3172 | ValueType* vt = as_ValueType(basic_type); |
| 3173 | state->store_local(idx, new Local(type, vt, idx, false)); |
| 3174 | idx += type->size(); |
| 3175 | } |
| 3176 | |
| 3177 | // lock synchronized method |
| 3178 | if (method()->is_synchronized()) { |
| 3179 | state->lock(NULL); |
| 3180 | } |
| 3181 | |
| 3182 | return state; |
| 3183 | } |
| 3184 | |
| 3185 | |
| 3186 | GraphBuilder::GraphBuilder(Compilation* compilation, IRScope* scope) |
| 3187 | : _scope_data(NULL) |
| 3188 | , _compilation(compilation) |
| 3189 | , _memory(new MemoryBuffer()) |
| 3190 | , _inline_bailout_msg(NULL) |
| 3191 | , _instruction_count(0) |
| 3192 | , _osr_entry(NULL) |
| 3193 | { |
| 3194 | int osr_bci = compilation->osr_bci(); |
| 3195 | |
| 3196 | // determine entry points and bci2block mapping |
| 3197 | BlockListBuilder blm(compilation, scope, osr_bci); |
| 3198 | CHECK_BAILOUT(); |
| 3199 | |
| 3200 | BlockList* bci2block = blm.bci2block(); |
| 3201 | BlockBegin* start_block = bci2block->at(0); |
| 3202 | |
| 3203 | push_root_scope(scope, bci2block, start_block); |
| 3204 | |
| 3205 | // setup state for std entry |
| 3206 | _initial_state = state_at_entry(); |
| 3207 | start_block->merge(_initial_state); |
| 3208 | |
| 3209 | // complete graph |
| 3210 | _vmap = new ValueMap(); |
| 3211 | switch (scope->method()->intrinsic_id()) { |
| 3212 | case vmIntrinsics::_dabs : // fall through |
| 3213 | case vmIntrinsics::_dsqrt : // fall through |
| 3214 | case vmIntrinsics::_dsin : // fall through |
| 3215 | case vmIntrinsics::_dcos : // fall through |
| 3216 | case vmIntrinsics::_dtan : // fall through |
| 3217 | case vmIntrinsics::_dlog : // fall through |
| 3218 | case vmIntrinsics::_dlog10 : // fall through |
| 3219 | case vmIntrinsics::_dexp : // fall through |
| 3220 | case vmIntrinsics::_dpow : // fall through |
| 3221 | { |
| 3222 | // Compiles where the root method is an intrinsic need a special |
| 3223 | // compilation environment because the bytecodes for the method |
| 3224 | // shouldn't be parsed during the compilation, only the special |
| 3225 | // Intrinsic node should be emitted. If this isn't done the the |
| 3226 | // code for the inlined version will be different than the root |
| 3227 | // compiled version which could lead to monotonicity problems on |
| 3228 | // intel. |
| 3229 | if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { |
| 3230 | BAILOUT("failed to inline intrinsic, method not annotated" ); |
| 3231 | } |
| 3232 | |
| 3233 | // Set up a stream so that appending instructions works properly. |
| 3234 | ciBytecodeStream s(scope->method()); |
| 3235 | s.reset_to_bci(0); |
| 3236 | scope_data()->set_stream(&s); |
| 3237 | s.next(); |
| 3238 | |
| 3239 | // setup the initial block state |
| 3240 | _block = start_block; |
| 3241 | _state = start_block->state()->copy_for_parsing(); |
| 3242 | _last = start_block; |
| 3243 | load_local(doubleType, 0); |
| 3244 | if (scope->method()->intrinsic_id() == vmIntrinsics::_dpow) { |
| 3245 | load_local(doubleType, 2); |
| 3246 | } |
| 3247 | |
| 3248 | // Emit the intrinsic node. |
| 3249 | bool result = try_inline_intrinsics(scope->method()); |
| 3250 | if (!result) BAILOUT("failed to inline intrinsic" ); |
| 3251 | method_return(dpop()); |
| 3252 | |
| 3253 | // connect the begin and end blocks and we're all done. |
| 3254 | BlockEnd* end = last()->as_BlockEnd(); |
| 3255 | block()->set_end(end); |
| 3256 | break; |
| 3257 | } |
| 3258 | |
| 3259 | case vmIntrinsics::_Reference_get: |
| 3260 | { |
| 3261 | { |
| 3262 | // With java.lang.ref.reference.get() we must go through the |
| 3263 | // intrinsic - when G1 is enabled - even when get() is the root |
| 3264 | // method of the compile so that, if necessary, the value in |
| 3265 | // the referent field of the reference object gets recorded by |
| 3266 | // the pre-barrier code. |
| 3267 | // Specifically, if G1 is enabled, the value in the referent |
| 3268 | // field is recorded by the G1 SATB pre barrier. This will |
| 3269 | // result in the referent being marked live and the reference |
| 3270 | // object removed from the list of discovered references during |
| 3271 | // reference processing. |
| 3272 | if (CheckIntrinsics && !scope->method()->intrinsic_candidate()) { |
| 3273 | BAILOUT("failed to inline intrinsic, method not annotated" ); |
| 3274 | } |
| 3275 | |
| 3276 | // Also we need intrinsic to prevent commoning reads from this field |
| 3277 | // across safepoint since GC can change its value. |
| 3278 | |
| 3279 | // Set up a stream so that appending instructions works properly. |
| 3280 | ciBytecodeStream s(scope->method()); |
| 3281 | s.reset_to_bci(0); |
| 3282 | scope_data()->set_stream(&s); |
| 3283 | s.next(); |
| 3284 | |
| 3285 | // setup the initial block state |
| 3286 | _block = start_block; |
| 3287 | _state = start_block->state()->copy_for_parsing(); |
| 3288 | _last = start_block; |
| 3289 | load_local(objectType, 0); |
| 3290 | |
| 3291 | // Emit the intrinsic node. |
| 3292 | bool result = try_inline_intrinsics(scope->method()); |
| 3293 | if (!result) BAILOUT("failed to inline intrinsic" ); |
| 3294 | method_return(apop()); |
| 3295 | |
| 3296 | // connect the begin and end blocks and we're all done. |
| 3297 | BlockEnd* end = last()->as_BlockEnd(); |
| 3298 | block()->set_end(end); |
| 3299 | break; |
| 3300 | } |
| 3301 | // Otherwise, fall thru |
| 3302 | } |
| 3303 | |
| 3304 | default: |
| 3305 | scope_data()->add_to_work_list(start_block); |
| 3306 | iterate_all_blocks(); |
| 3307 | break; |
| 3308 | } |
| 3309 | CHECK_BAILOUT(); |
| 3310 | |
| 3311 | _start = setup_start_block(osr_bci, start_block, _osr_entry, _initial_state); |
| 3312 | |
| 3313 | eliminate_redundant_phis(_start); |
| 3314 | |
| 3315 | NOT_PRODUCT(if (PrintValueNumbering && Verbose) print_stats()); |
| 3316 | // for osr compile, bailout if some requirements are not fulfilled |
| 3317 | if (osr_bci != -1) { |
| 3318 | BlockBegin* osr_block = blm.bci2block()->at(osr_bci); |
| 3319 | if (!osr_block->is_set(BlockBegin::was_visited_flag)) { |
| 3320 | BAILOUT("osr entry must have been visited for osr compile" ); |
| 3321 | } |
| 3322 | |
| 3323 | // check if osr entry point has empty stack - we cannot handle non-empty stacks at osr entry points |
| 3324 | if (!osr_block->state()->stack_is_empty()) { |
| 3325 | BAILOUT("stack not empty at OSR entry point" ); |
| 3326 | } |
| 3327 | } |
| 3328 | #ifndef PRODUCT |
| 3329 | if (PrintCompilation && Verbose) tty->print_cr("Created %d Instructions" , _instruction_count); |
| 3330 | #endif |
| 3331 | } |
| 3332 | |
| 3333 | |
| 3334 | ValueStack* GraphBuilder::copy_state_before() { |
| 3335 | return copy_state_before_with_bci(bci()); |
| 3336 | } |
| 3337 | |
| 3338 | ValueStack* GraphBuilder::copy_state_exhandling() { |
| 3339 | return copy_state_exhandling_with_bci(bci()); |
| 3340 | } |
| 3341 | |
| 3342 | ValueStack* GraphBuilder::copy_state_for_exception() { |
| 3343 | return copy_state_for_exception_with_bci(bci()); |
| 3344 | } |
| 3345 | |
| 3346 | ValueStack* GraphBuilder::copy_state_before_with_bci(int bci) { |
| 3347 | return state()->copy(ValueStack::StateBefore, bci); |
| 3348 | } |
| 3349 | |
| 3350 | ValueStack* GraphBuilder::copy_state_exhandling_with_bci(int bci) { |
| 3351 | if (!has_handler()) return NULL; |
| 3352 | return state()->copy(ValueStack::StateBefore, bci); |
| 3353 | } |
| 3354 | |
| 3355 | ValueStack* GraphBuilder::copy_state_for_exception_with_bci(int bci) { |
| 3356 | ValueStack* s = copy_state_exhandling_with_bci(bci); |
| 3357 | if (s == NULL) { |
| 3358 | if (_compilation->env()->should_retain_local_variables()) { |
| 3359 | s = state()->copy(ValueStack::ExceptionState, bci); |
| 3360 | } else { |
| 3361 | s = state()->copy(ValueStack::EmptyExceptionState, bci); |
| 3362 | } |
| 3363 | } |
| 3364 | return s; |
| 3365 | } |
| 3366 | |
| 3367 | int GraphBuilder::recursive_inline_level(ciMethod* cur_callee) const { |
| 3368 | int recur_level = 0; |
| 3369 | for (IRScope* s = scope(); s != NULL; s = s->caller()) { |
| 3370 | if (s->method() == cur_callee) { |
| 3371 | ++recur_level; |
| 3372 | } |
| 3373 | } |
| 3374 | return recur_level; |
| 3375 | } |
| 3376 | |
| 3377 | |
| 3378 | bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { |
| 3379 | const char* msg = NULL; |
| 3380 | |
| 3381 | // clear out any existing inline bailout condition |
| 3382 | clear_inline_bailout(); |
| 3383 | |
| 3384 | // exclude methods we don't want to inline |
| 3385 | msg = should_not_inline(callee); |
| 3386 | if (msg != NULL) { |
| 3387 | print_inlining(callee, msg, /*success*/ false); |
| 3388 | return false; |
| 3389 | } |
| 3390 | |
| 3391 | // method handle invokes |
| 3392 | if (callee->is_method_handle_intrinsic()) { |
| 3393 | if (try_method_handle_inline(callee, ignore_return)) { |
| 3394 | if (callee->has_reserved_stack_access()) { |
| 3395 | compilation()->set_has_reserved_stack_access(true); |
| 3396 | } |
| 3397 | return true; |
| 3398 | } |
| 3399 | return false; |
| 3400 | } |
| 3401 | |
| 3402 | // handle intrinsics |
| 3403 | if (callee->intrinsic_id() != vmIntrinsics::_none && |
| 3404 | (CheckIntrinsics ? callee->intrinsic_candidate() : true)) { |
| 3405 | if (try_inline_intrinsics(callee, ignore_return)) { |
| 3406 | print_inlining(callee, "intrinsic" ); |
| 3407 | if (callee->has_reserved_stack_access()) { |
| 3408 | compilation()->set_has_reserved_stack_access(true); |
| 3409 | } |
| 3410 | return true; |
| 3411 | } |
| 3412 | // try normal inlining |
| 3413 | } |
| 3414 | |
| 3415 | // certain methods cannot be parsed at all |
| 3416 | msg = check_can_parse(callee); |
| 3417 | if (msg != NULL) { |
| 3418 | print_inlining(callee, msg, /*success*/ false); |
| 3419 | return false; |
| 3420 | } |
| 3421 | |
| 3422 | // If bytecode not set use the current one. |
| 3423 | if (bc == Bytecodes::_illegal) { |
| 3424 | bc = code(); |
| 3425 | } |
| 3426 | if (try_inline_full(callee, holder_known, ignore_return, bc, receiver)) { |
| 3427 | if (callee->has_reserved_stack_access()) { |
| 3428 | compilation()->set_has_reserved_stack_access(true); |
| 3429 | } |
| 3430 | return true; |
| 3431 | } |
| 3432 | |
| 3433 | // Entire compilation could fail during try_inline_full call. |
| 3434 | // In that case printing inlining decision info is useless. |
| 3435 | if (!bailed_out()) |
| 3436 | print_inlining(callee, _inline_bailout_msg, /*success*/ false); |
| 3437 | |
| 3438 | return false; |
| 3439 | } |
| 3440 | |
| 3441 | |
| 3442 | const char* GraphBuilder::check_can_parse(ciMethod* callee) const { |
| 3443 | // Certain methods cannot be parsed at all: |
| 3444 | if ( callee->is_native()) return "native method" ; |
| 3445 | if ( callee->is_abstract()) return "abstract method" ; |
| 3446 | if (!callee->can_be_compiled()) return "not compilable (disabled)" ; |
| 3447 | if (!callee->can_be_parsed()) return "cannot be parsed" ; |
| 3448 | return NULL; |
| 3449 | } |
| 3450 | |
| 3451 | // negative filter: should callee NOT be inlined? returns NULL, ok to inline, or rejection msg |
| 3452 | const char* GraphBuilder::should_not_inline(ciMethod* callee) const { |
| 3453 | if ( compilation()->directive()->should_not_inline(callee)) return "disallowed by CompileCommand" ; |
| 3454 | if ( callee->dont_inline()) return "don't inline by annotation" ; |
| 3455 | return NULL; |
| 3456 | } |
| 3457 | |
| 3458 | void GraphBuilder::build_graph_for_intrinsic(ciMethod* callee, bool ignore_return) { |
| 3459 | vmIntrinsics::ID id = callee->intrinsic_id(); |
| 3460 | assert(id != vmIntrinsics::_none, "must be a VM intrinsic" ); |
| 3461 | |
| 3462 | // Some intrinsics need special IR nodes. |
| 3463 | switch(id) { |
| 3464 | case vmIntrinsics::_getReference : append_unsafe_get_obj(callee, T_OBJECT, false); return; |
| 3465 | case vmIntrinsics::_getBoolean : append_unsafe_get_obj(callee, T_BOOLEAN, false); return; |
| 3466 | case vmIntrinsics::_getByte : append_unsafe_get_obj(callee, T_BYTE, false); return; |
| 3467 | case vmIntrinsics::_getShort : append_unsafe_get_obj(callee, T_SHORT, false); return; |
| 3468 | case vmIntrinsics::_getChar : append_unsafe_get_obj(callee, T_CHAR, false); return; |
| 3469 | case vmIntrinsics::_getInt : append_unsafe_get_obj(callee, T_INT, false); return; |
| 3470 | case vmIntrinsics::_getLong : append_unsafe_get_obj(callee, T_LONG, false); return; |
| 3471 | case vmIntrinsics::_getFloat : append_unsafe_get_obj(callee, T_FLOAT, false); return; |
| 3472 | case vmIntrinsics::_getDouble : append_unsafe_get_obj(callee, T_DOUBLE, false); return; |
| 3473 | case vmIntrinsics::_putReference : append_unsafe_put_obj(callee, T_OBJECT, false); return; |
| 3474 | case vmIntrinsics::_putBoolean : append_unsafe_put_obj(callee, T_BOOLEAN, false); return; |
| 3475 | case vmIntrinsics::_putByte : append_unsafe_put_obj(callee, T_BYTE, false); return; |
| 3476 | case vmIntrinsics::_putShort : append_unsafe_put_obj(callee, T_SHORT, false); return; |
| 3477 | case vmIntrinsics::_putChar : append_unsafe_put_obj(callee, T_CHAR, false); return; |
| 3478 | case vmIntrinsics::_putInt : append_unsafe_put_obj(callee, T_INT, false); return; |
| 3479 | case vmIntrinsics::_putLong : append_unsafe_put_obj(callee, T_LONG, false); return; |
| 3480 | case vmIntrinsics::_putFloat : append_unsafe_put_obj(callee, T_FLOAT, false); return; |
| 3481 | case vmIntrinsics::_putDouble : append_unsafe_put_obj(callee, T_DOUBLE, false); return; |
| 3482 | case vmIntrinsics::_getShortUnaligned : append_unsafe_get_obj(callee, T_SHORT, false); return; |
| 3483 | case vmIntrinsics::_getCharUnaligned : append_unsafe_get_obj(callee, T_CHAR, false); return; |
| 3484 | case vmIntrinsics::_getIntUnaligned : append_unsafe_get_obj(callee, T_INT, false); return; |
| 3485 | case vmIntrinsics::_getLongUnaligned : append_unsafe_get_obj(callee, T_LONG, false); return; |
| 3486 | case vmIntrinsics::_putShortUnaligned : append_unsafe_put_obj(callee, T_SHORT, false); return; |
| 3487 | case vmIntrinsics::_putCharUnaligned : append_unsafe_put_obj(callee, T_CHAR, false); return; |
| 3488 | case vmIntrinsics::_putIntUnaligned : append_unsafe_put_obj(callee, T_INT, false); return; |
| 3489 | case vmIntrinsics::_putLongUnaligned : append_unsafe_put_obj(callee, T_LONG, false); return; |
| 3490 | case vmIntrinsics::_getReferenceVolatile : append_unsafe_get_obj(callee, T_OBJECT, true); return; |
| 3491 | case vmIntrinsics::_getBooleanVolatile : append_unsafe_get_obj(callee, T_BOOLEAN, true); return; |
| 3492 | case vmIntrinsics::_getByteVolatile : append_unsafe_get_obj(callee, T_BYTE, true); return; |
| 3493 | case vmIntrinsics::_getShortVolatile : append_unsafe_get_obj(callee, T_SHORT, true); return; |
| 3494 | case vmIntrinsics::_getCharVolatile : append_unsafe_get_obj(callee, T_CHAR, true); return; |
| 3495 | case vmIntrinsics::_getIntVolatile : append_unsafe_get_obj(callee, T_INT, true); return; |
| 3496 | case vmIntrinsics::_getLongVolatile : append_unsafe_get_obj(callee, T_LONG, true); return; |
| 3497 | case vmIntrinsics::_getFloatVolatile : append_unsafe_get_obj(callee, T_FLOAT, true); return; |
| 3498 | case vmIntrinsics::_getDoubleVolatile : append_unsafe_get_obj(callee, T_DOUBLE, true); return; |
| 3499 | case vmIntrinsics::_putReferenceVolatile : append_unsafe_put_obj(callee, T_OBJECT, true); return; |
| 3500 | case vmIntrinsics::_putBooleanVolatile : append_unsafe_put_obj(callee, T_BOOLEAN, true); return; |
| 3501 | case vmIntrinsics::_putByteVolatile : append_unsafe_put_obj(callee, T_BYTE, true); return; |
| 3502 | case vmIntrinsics::_putShortVolatile : append_unsafe_put_obj(callee, T_SHORT, true); return; |
| 3503 | case vmIntrinsics::_putCharVolatile : append_unsafe_put_obj(callee, T_CHAR, true); return; |
| 3504 | case vmIntrinsics::_putIntVolatile : append_unsafe_put_obj(callee, T_INT, true); return; |
| 3505 | case vmIntrinsics::_putLongVolatile : append_unsafe_put_obj(callee, T_LONG, true); return; |
| 3506 | case vmIntrinsics::_putFloatVolatile : append_unsafe_put_obj(callee, T_FLOAT, true); return; |
| 3507 | case vmIntrinsics::_putDoubleVolatile : append_unsafe_put_obj(callee, T_DOUBLE, true); return; |
| 3508 | case vmIntrinsics::_compareAndSetLong: |
| 3509 | case vmIntrinsics::_compareAndSetInt: |
| 3510 | case vmIntrinsics::_compareAndSetReference : append_unsafe_CAS(callee); return; |
| 3511 | case vmIntrinsics::_getAndAddInt: |
| 3512 | case vmIntrinsics::_getAndAddLong : append_unsafe_get_and_set_obj(callee, true); return; |
| 3513 | case vmIntrinsics::_getAndSetInt : |
| 3514 | case vmIntrinsics::_getAndSetLong : |
| 3515 | case vmIntrinsics::_getAndSetReference : append_unsafe_get_and_set_obj(callee, false); return; |
| 3516 | case vmIntrinsics::_getCharStringU : append_char_access(callee, false); return; |
| 3517 | case vmIntrinsics::_putCharStringU : append_char_access(callee, true); return; |
| 3518 | default: |
| 3519 | break; |
| 3520 | } |
| 3521 | |
| 3522 | // create intrinsic node |
| 3523 | const bool has_receiver = !callee->is_static(); |
| 3524 | ValueType* result_type = as_ValueType(callee->return_type()); |
| 3525 | ValueStack* state_before = copy_state_for_exception(); |
| 3526 | |
| 3527 | Values* args = state()->pop_arguments(callee->arg_size()); |
| 3528 | |
| 3529 | if (is_profiling()) { |
| 3530 | // Don't profile in the special case where the root method |
| 3531 | // is the intrinsic |
| 3532 | if (callee != method()) { |
| 3533 | // Note that we'd collect profile data in this method if we wanted it. |
| 3534 | compilation()->set_would_profile(true); |
| 3535 | if (profile_calls()) { |
| 3536 | Value recv = NULL; |
| 3537 | if (has_receiver) { |
| 3538 | recv = args->at(0); |
| 3539 | null_check(recv); |
| 3540 | } |
| 3541 | profile_call(callee, recv, NULL, collect_args_for_profiling(args, callee, true), true); |
| 3542 | } |
| 3543 | } |
| 3544 | } |
| 3545 | |
| 3546 | Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), |
| 3547 | args, has_receiver, state_before, |
| 3548 | vmIntrinsics::preserves_state(id), |
| 3549 | vmIntrinsics::can_trap(id)); |
| 3550 | // append instruction & push result |
| 3551 | Value value = append_split(result); |
| 3552 | if (result_type != voidType && !ignore_return) { |
| 3553 | push(result_type, value); |
| 3554 | } |
| 3555 | |
| 3556 | if (callee != method() && profile_return() && result_type->is_object_kind()) { |
| 3557 | profile_return_type(result, callee); |
| 3558 | } |
| 3559 | } |
| 3560 | |
| 3561 | bool GraphBuilder::try_inline_intrinsics(ciMethod* callee, bool ignore_return) { |
| 3562 | // For calling is_intrinsic_available we need to transition to |
| 3563 | // the '_thread_in_vm' state because is_intrinsic_available() |
| 3564 | // accesses critical VM-internal data. |
| 3565 | bool is_available = false; |
| 3566 | { |
| 3567 | VM_ENTRY_MARK; |
| 3568 | methodHandle mh(THREAD, callee->get_Method()); |
| 3569 | is_available = _compilation->compiler()->is_intrinsic_available(mh, _compilation->directive()); |
| 3570 | } |
| 3571 | |
| 3572 | if (!is_available) { |
| 3573 | if (!InlineNatives) { |
| 3574 | // Return false and also set message that the inlining of |
| 3575 | // intrinsics has been disabled in general. |
| 3576 | INLINE_BAILOUT("intrinsic method inlining disabled" ); |
| 3577 | } else { |
| 3578 | return false; |
| 3579 | } |
| 3580 | } |
| 3581 | build_graph_for_intrinsic(callee, ignore_return); |
| 3582 | return true; |
| 3583 | } |
| 3584 | |
| 3585 | |
| 3586 | bool GraphBuilder::try_inline_jsr(int jsr_dest_bci) { |
| 3587 | // Introduce a new callee continuation point - all Ret instructions |
| 3588 | // will be replaced with Gotos to this point. |
| 3589 | BlockBegin* cont = block_at(next_bci()); |
| 3590 | assert(cont != NULL, "continuation must exist (BlockListBuilder starts a new block after a jsr" ); |
| 3591 | |
| 3592 | // Note: can not assign state to continuation yet, as we have to |
| 3593 | // pick up the state from the Ret instructions. |
| 3594 | |
| 3595 | // Push callee scope |
| 3596 | push_scope_for_jsr(cont, jsr_dest_bci); |
| 3597 | |
| 3598 | // Temporarily set up bytecode stream so we can append instructions |
| 3599 | // (only using the bci of this stream) |
| 3600 | scope_data()->set_stream(scope_data()->parent()->stream()); |
| 3601 | |
| 3602 | BlockBegin* jsr_start_block = block_at(jsr_dest_bci); |
| 3603 | assert(jsr_start_block != NULL, "jsr start block must exist" ); |
| 3604 | assert(!jsr_start_block->is_set(BlockBegin::was_visited_flag), "should not have visited jsr yet" ); |
| 3605 | Goto* goto_sub = new Goto(jsr_start_block, false); |
| 3606 | // Must copy state to avoid wrong sharing when parsing bytecodes |
| 3607 | assert(jsr_start_block->state() == NULL, "should have fresh jsr starting block" ); |
| 3608 | jsr_start_block->set_state(copy_state_before_with_bci(jsr_dest_bci)); |
| 3609 | append(goto_sub); |
| 3610 | _block->set_end(goto_sub); |
| 3611 | _last = _block = jsr_start_block; |
| 3612 | |
| 3613 | // Clear out bytecode stream |
| 3614 | scope_data()->set_stream(NULL); |
| 3615 | |
| 3616 | scope_data()->add_to_work_list(jsr_start_block); |
| 3617 | |
| 3618 | // Ready to resume parsing in subroutine |
| 3619 | iterate_all_blocks(); |
| 3620 | |
| 3621 | // If we bailed out during parsing, return immediately (this is bad news) |
| 3622 | CHECK_BAILOUT_(false); |
| 3623 | |
| 3624 | // Detect whether the continuation can actually be reached. If not, |
| 3625 | // it has not had state set by the join() operations in |
| 3626 | // iterate_bytecodes_for_block()/ret() and we should not touch the |
| 3627 | // iteration state. The calling activation of |
| 3628 | // iterate_bytecodes_for_block will then complete normally. |
| 3629 | if (cont->state() != NULL) { |
| 3630 | if (!cont->is_set(BlockBegin::was_visited_flag)) { |
| 3631 | // add continuation to work list instead of parsing it immediately |
| 3632 | scope_data()->parent()->add_to_work_list(cont); |
| 3633 | } |
| 3634 | } |
| 3635 | |
| 3636 | assert(jsr_continuation() == cont, "continuation must not have changed" ); |
| 3637 | assert(!jsr_continuation()->is_set(BlockBegin::was_visited_flag) || |
| 3638 | jsr_continuation()->is_set(BlockBegin::parser_loop_header_flag), |
| 3639 | "continuation can only be visited in case of backward branches" ); |
| 3640 | assert(_last && _last->as_BlockEnd(), "block must have end" ); |
| 3641 | |
| 3642 | // continuation is in work list, so end iteration of current block |
| 3643 | _skip_block = true; |
| 3644 | pop_scope_for_jsr(); |
| 3645 | |
| 3646 | return true; |
| 3647 | } |
| 3648 | |
| 3649 | |
| 3650 | // Inline the entry of a synchronized method as a monitor enter and |
| 3651 | // register the exception handler which releases the monitor if an |
| 3652 | // exception is thrown within the callee. Note that the monitor enter |
| 3653 | // cannot throw an exception itself, because the receiver is |
| 3654 | // guaranteed to be non-null by the explicit null check at the |
| 3655 | // beginning of inlining. |
| 3656 | void GraphBuilder::inline_sync_entry(Value lock, BlockBegin* sync_handler) { |
| 3657 | assert(lock != NULL && sync_handler != NULL, "lock or handler missing" ); |
| 3658 | |
| 3659 | monitorenter(lock, SynchronizationEntryBCI); |
| 3660 | assert(_last->as_MonitorEnter() != NULL, "monitor enter expected" ); |
| 3661 | _last->set_needs_null_check(false); |
| 3662 | |
| 3663 | sync_handler->set(BlockBegin::exception_entry_flag); |
| 3664 | sync_handler->set(BlockBegin::is_on_work_list_flag); |
| 3665 | |
| 3666 | ciExceptionHandler* desc = new ciExceptionHandler(method()->holder(), 0, method()->code_size(), -1, 0); |
| 3667 | XHandler* h = new XHandler(desc); |
| 3668 | h->set_entry_block(sync_handler); |
| 3669 | scope_data()->xhandlers()->append(h); |
| 3670 | scope_data()->set_has_handler(); |
| 3671 | } |
| 3672 | |
| 3673 | |
| 3674 | // If an exception is thrown and not handled within an inlined |
| 3675 | // synchronized method, the monitor must be released before the |
| 3676 | // exception is rethrown in the outer scope. Generate the appropriate |
| 3677 | // instructions here. |
| 3678 | void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler) { |
| 3679 | BlockBegin* orig_block = _block; |
| 3680 | ValueStack* orig_state = _state; |
| 3681 | Instruction* orig_last = _last; |
| 3682 | _last = _block = sync_handler; |
| 3683 | _state = sync_handler->state()->copy(); |
| 3684 | |
| 3685 | assert(sync_handler != NULL, "handler missing" ); |
| 3686 | assert(!sync_handler->is_set(BlockBegin::was_visited_flag), "is visited here" ); |
| 3687 | |
| 3688 | assert(lock != NULL || default_handler, "lock or handler missing" ); |
| 3689 | |
| 3690 | XHandler* h = scope_data()->xhandlers()->remove_last(); |
| 3691 | assert(h->entry_block() == sync_handler, "corrupt list of handlers" ); |
| 3692 | |
| 3693 | block()->set(BlockBegin::was_visited_flag); |
| 3694 | Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI); |
| 3695 | assert(exception->is_pinned(), "must be" ); |
| 3696 | |
| 3697 | int bci = SynchronizationEntryBCI; |
| 3698 | if (compilation()->env()->dtrace_method_probes()) { |
| 3699 | // Report exit from inline methods. We don't have a stream here |
| 3700 | // so pass an explicit bci of SynchronizationEntryBCI. |
| 3701 | Values* args = new Values(1); |
| 3702 | args->push(append_with_bci(new Constant(new MethodConstant(method())), bci)); |
| 3703 | append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit" , CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci); |
| 3704 | } |
| 3705 | |
| 3706 | if (lock) { |
| 3707 | assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing" ); |
| 3708 | if (!lock->is_linked()) { |
| 3709 | lock = append_with_bci(lock, bci); |
| 3710 | } |
| 3711 | |
| 3712 | // exit the monitor in the context of the synchronized method |
| 3713 | monitorexit(lock, bci); |
| 3714 | |
| 3715 | // exit the context of the synchronized method |
| 3716 | if (!default_handler) { |
| 3717 | pop_scope(); |
| 3718 | bci = _state->caller_state()->bci(); |
| 3719 | _state = _state->caller_state()->copy_for_parsing(); |
| 3720 | } |
| 3721 | } |
| 3722 | |
| 3723 | // perform the throw as if at the the call site |
| 3724 | apush(exception); |
| 3725 | throw_op(bci); |
| 3726 | |
| 3727 | BlockEnd* end = last()->as_BlockEnd(); |
| 3728 | block()->set_end(end); |
| 3729 | |
| 3730 | _block = orig_block; |
| 3731 | _state = orig_state; |
| 3732 | _last = orig_last; |
| 3733 | } |
| 3734 | |
| 3735 | |
| 3736 | bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) { |
| 3737 | assert(!callee->is_native(), "callee must not be native" ); |
| 3738 | if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) { |
| 3739 | INLINE_BAILOUT("inlining prohibited by policy" ); |
| 3740 | } |
| 3741 | // first perform tests of things it's not possible to inline |
| 3742 | if (callee->has_exception_handlers() && |
| 3743 | !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers" ); |
| 3744 | if (callee->is_synchronized() && |
| 3745 | !InlineSynchronizedMethods ) INLINE_BAILOUT("callee is synchronized" ); |
| 3746 | if (!callee->holder()->is_initialized()) INLINE_BAILOUT("callee's klass not initialized yet" ); |
| 3747 | if (!callee->has_balanced_monitors()) INLINE_BAILOUT("callee's monitors do not match" ); |
| 3748 | |
| 3749 | // Proper inlining of methods with jsrs requires a little more work. |
| 3750 | if (callee->has_jsrs() ) INLINE_BAILOUT("jsrs not handled properly by inliner yet" ); |
| 3751 | |
| 3752 | // When SSE2 is used on intel, then no special handling is needed |
| 3753 | // for strictfp because the enum-constant is fixed at compile time, |
| 3754 | // the check for UseSSE2 is needed here |
| 3755 | if (strict_fp_requires_explicit_rounding && UseSSE < 2 && method()->is_strict() != callee->is_strict()) { |
| 3756 | INLINE_BAILOUT("caller and callee have different strict fp requirements" ); |
| 3757 | } |
| 3758 | |
| 3759 | if (is_profiling() && !callee->ensure_method_data()) { |
| 3760 | INLINE_BAILOUT("mdo allocation failed" ); |
| 3761 | } |
| 3762 | |
| 3763 | // now perform tests that are based on flag settings |
| 3764 | bool inlinee_by_directive = compilation()->directive()->should_inline(callee); |
| 3765 | if (callee->force_inline() || inlinee_by_directive) { |
| 3766 | if (inline_level() > MaxForceInlineLevel ) INLINE_BAILOUT("MaxForceInlineLevel" ); |
| 3767 | if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep" ); |
| 3768 | |
| 3769 | const char* msg = "" ; |
| 3770 | if (callee->force_inline()) msg = "force inline by annotation" ; |
| 3771 | if (inlinee_by_directive) msg = "force inline by CompileCommand" ; |
| 3772 | print_inlining(callee, msg); |
| 3773 | } else { |
| 3774 | // use heuristic controls on inlining |
| 3775 | if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("inlining too deep" ); |
| 3776 | if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("recursive inlining too deep" ); |
| 3777 | if (callee->code_size_for_inlining() > max_inline_size() ) INLINE_BAILOUT("callee is too large" ); |
| 3778 | |
| 3779 | // don't inline throwable methods unless the inlining tree is rooted in a throwable class |
| 3780 | if (callee->name() == ciSymbol::object_initializer_name() && |
| 3781 | callee->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { |
| 3782 | // Throwable constructor call |
| 3783 | IRScope* top = scope(); |
| 3784 | while (top->caller() != NULL) { |
| 3785 | top = top->caller(); |
| 3786 | } |
| 3787 | if (!top->method()->holder()->is_subclass_of(ciEnv::current()->Throwable_klass())) { |
| 3788 | INLINE_BAILOUT("don't inline Throwable constructors" ); |
| 3789 | } |
| 3790 | } |
| 3791 | |
| 3792 | if (compilation()->env()->num_inlined_bytecodes() > DesiredMethodLimit) { |
| 3793 | INLINE_BAILOUT("total inlining greater than DesiredMethodLimit" ); |
| 3794 | } |
| 3795 | // printing |
| 3796 | print_inlining(callee); |
| 3797 | } |
| 3798 | |
| 3799 | // NOTE: Bailouts from this point on, which occur at the |
| 3800 | // GraphBuilder level, do not cause bailout just of the inlining but |
| 3801 | // in fact of the entire compilation. |
| 3802 | |
| 3803 | BlockBegin* orig_block = block(); |
| 3804 | |
| 3805 | const bool is_invokedynamic = bc == Bytecodes::_invokedynamic; |
| 3806 | const bool has_receiver = (bc != Bytecodes::_invokestatic && !is_invokedynamic); |
| 3807 | |
| 3808 | const int args_base = state()->stack_size() - callee->arg_size(); |
| 3809 | assert(args_base >= 0, "stack underflow during inlining" ); |
| 3810 | |
| 3811 | // Insert null check if necessary |
| 3812 | Value recv = NULL; |
| 3813 | if (has_receiver) { |
| 3814 | // note: null check must happen even if first instruction of callee does |
| 3815 | // an implicit null check since the callee is in a different scope |
| 3816 | // and we must make sure exception handling does the right thing |
| 3817 | assert(!callee->is_static(), "callee must not be static" ); |
| 3818 | assert(callee->arg_size() > 0, "must have at least a receiver" ); |
| 3819 | recv = state()->stack_at(args_base); |
| 3820 | null_check(recv); |
| 3821 | } |
| 3822 | |
| 3823 | if (is_profiling()) { |
| 3824 | // Note that we'd collect profile data in this method if we wanted it. |
| 3825 | // this may be redundant here... |
| 3826 | compilation()->set_would_profile(true); |
| 3827 | |
| 3828 | if (profile_calls()) { |
| 3829 | int start = 0; |
| 3830 | Values* obj_args = args_list_for_profiling(callee, start, has_receiver); |
| 3831 | if (obj_args != NULL) { |
| 3832 | int s = obj_args->max_length(); |
| 3833 | // if called through method handle invoke, some arguments may have been popped |
| 3834 | for (int i = args_base+start, j = 0; j < obj_args->max_length() && i < state()->stack_size(); ) { |
| 3835 | Value v = state()->stack_at_inc(i); |
| 3836 | if (v->type()->is_object_kind()) { |
| 3837 | obj_args->push(v); |
| 3838 | j++; |
| 3839 | } |
| 3840 | } |
| 3841 | check_args_for_profiling(obj_args, s); |
| 3842 | } |
| 3843 | profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true); |
| 3844 | } |
| 3845 | } |
| 3846 | |
| 3847 | // Introduce a new callee continuation point - if the callee has |
| 3848 | // more than one return instruction or the return does not allow |
| 3849 | // fall-through of control flow, all return instructions of the |
| 3850 | // callee will need to be replaced by Goto's pointing to this |
| 3851 | // continuation point. |
| 3852 | BlockBegin* cont = block_at(next_bci()); |
| 3853 | bool continuation_existed = true; |
| 3854 | if (cont == NULL) { |
| 3855 | cont = new BlockBegin(next_bci()); |
| 3856 | // low number so that continuation gets parsed as early as possible |
| 3857 | cont->set_depth_first_number(0); |
| 3858 | if (PrintInitialBlockList) { |
| 3859 | tty->print_cr("CFG: created block %d (bci %d) as continuation for inline at bci %d" , |
| 3860 | cont->block_id(), cont->bci(), bci()); |
| 3861 | } |
| 3862 | continuation_existed = false; |
| 3863 | } |
| 3864 | // Record number of predecessors of continuation block before |
| 3865 | // inlining, to detect if inlined method has edges to its |
| 3866 | // continuation after inlining. |
| 3867 | int continuation_preds = cont->number_of_preds(); |
| 3868 | |
| 3869 | // Push callee scope |
| 3870 | push_scope(callee, cont); |
| 3871 | |
| 3872 | // the BlockListBuilder for the callee could have bailed out |
| 3873 | if (bailed_out()) |
| 3874 | return false; |
| 3875 | |
| 3876 | // Temporarily set up bytecode stream so we can append instructions |
| 3877 | // (only using the bci of this stream) |
| 3878 | scope_data()->set_stream(scope_data()->parent()->stream()); |
| 3879 | |
| 3880 | // Pass parameters into callee state: add assignments |
| 3881 | // note: this will also ensure that all arguments are computed before being passed |
| 3882 | ValueStack* callee_state = state(); |
| 3883 | ValueStack* caller_state = state()->caller_state(); |
| 3884 | for (int i = args_base; i < caller_state->stack_size(); ) { |
| 3885 | const int arg_no = i - args_base; |
| 3886 | Value arg = caller_state->stack_at_inc(i); |
| 3887 | store_local(callee_state, arg, arg_no); |
| 3888 | } |
| 3889 | |
| 3890 | // Remove args from stack. |
| 3891 | // Note that we preserve locals state in case we can use it later |
| 3892 | // (see use of pop_scope() below) |
| 3893 | caller_state->truncate_stack(args_base); |
| 3894 | assert(callee_state->stack_size() == 0, "callee stack must be empty" ); |
| 3895 | |
| 3896 | Value lock = NULL; |
| 3897 | BlockBegin* sync_handler = NULL; |
| 3898 | |
| 3899 | // Inline the locking of the receiver if the callee is synchronized |
| 3900 | if (callee->is_synchronized()) { |
| 3901 | lock = callee->is_static() ? append(new Constant(new InstanceConstant(callee->holder()->java_mirror()))) |
| 3902 | : state()->local_at(0); |
| 3903 | sync_handler = new BlockBegin(SynchronizationEntryBCI); |
| 3904 | inline_sync_entry(lock, sync_handler); |
| 3905 | } |
| 3906 | |
| 3907 | if (compilation()->env()->dtrace_method_probes()) { |
| 3908 | Values* args = new Values(1); |
| 3909 | args->push(append(new Constant(new MethodConstant(method())))); |
| 3910 | append(new RuntimeCall(voidType, "dtrace_method_entry" , CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), args)); |
| 3911 | } |
| 3912 | |
| 3913 | if (profile_inlined_calls()) { |
| 3914 | profile_invocation(callee, copy_state_before_with_bci(SynchronizationEntryBCI)); |
| 3915 | } |
| 3916 | |
| 3917 | BlockBegin* callee_start_block = block_at(0); |
| 3918 | if (callee_start_block != NULL) { |
| 3919 | assert(callee_start_block->is_set(BlockBegin::parser_loop_header_flag), "must be loop header" ); |
| 3920 | Goto* goto_callee = new Goto(callee_start_block, false); |
| 3921 | // The state for this goto is in the scope of the callee, so use |
| 3922 | // the entry bci for the callee instead of the call site bci. |
| 3923 | append_with_bci(goto_callee, 0); |
| 3924 | _block->set_end(goto_callee); |
| 3925 | callee_start_block->merge(callee_state); |
| 3926 | |
| 3927 | _last = _block = callee_start_block; |
| 3928 | |
| 3929 | scope_data()->add_to_work_list(callee_start_block); |
| 3930 | } |
| 3931 | |
| 3932 | // Clear out bytecode stream |
| 3933 | scope_data()->set_stream(NULL); |
| 3934 | scope_data()->set_ignore_return(ignore_return); |
| 3935 | |
| 3936 | CompileLog* log = compilation()->log(); |
| 3937 | if (log != NULL) log->head("parse method='%d'" , log->identify(callee)); |
| 3938 | |
| 3939 | // Ready to resume parsing in callee (either in the same block we |
| 3940 | // were in before or in the callee's start block) |
| 3941 | iterate_all_blocks(callee_start_block == NULL); |
| 3942 | |
| 3943 | if (log != NULL) log->done("parse" ); |
| 3944 | |
| 3945 | // If we bailed out during parsing, return immediately (this is bad news) |
| 3946 | if (bailed_out()) |
| 3947 | return false; |
| 3948 | |
| 3949 | // iterate_all_blocks theoretically traverses in random order; in |
| 3950 | // practice, we have only traversed the continuation if we are |
| 3951 | // inlining into a subroutine |
| 3952 | assert(continuation_existed || |
| 3953 | !continuation()->is_set(BlockBegin::was_visited_flag), |
| 3954 | "continuation should not have been parsed yet if we created it" ); |
| 3955 | |
| 3956 | // At this point we are almost ready to return and resume parsing of |
| 3957 | // the caller back in the GraphBuilder. The only thing we want to do |
| 3958 | // first is an optimization: during parsing of the callee we |
| 3959 | // generated at least one Goto to the continuation block. If we |
| 3960 | // generated exactly one, and if the inlined method spanned exactly |
| 3961 | // one block (and we didn't have to Goto its entry), then we snip |
| 3962 | // off the Goto to the continuation, allowing control to fall |
| 3963 | // through back into the caller block and effectively performing |
| 3964 | // block merging. This allows load elimination and CSE to take place |
| 3965 | // across multiple callee scopes if they are relatively simple, and |
| 3966 | // is currently essential to making inlining profitable. |
| 3967 | if (num_returns() == 1 |
| 3968 | && block() == orig_block |
| 3969 | && block() == inline_cleanup_block()) { |
| 3970 | _last = inline_cleanup_return_prev(); |
| 3971 | _state = inline_cleanup_state(); |
| 3972 | } else if (continuation_preds == cont->number_of_preds()) { |
| 3973 | // Inlining caused that the instructions after the invoke in the |
| 3974 | // caller are not reachable any more. So skip filling this block |
| 3975 | // with instructions! |
| 3976 | assert(cont == continuation(), "" ); |
| 3977 | assert(_last && _last->as_BlockEnd(), "" ); |
| 3978 | _skip_block = true; |
| 3979 | } else { |
| 3980 | // Resume parsing in continuation block unless it was already parsed. |
| 3981 | // Note that if we don't change _last here, iteration in |
| 3982 | // iterate_bytecodes_for_block will stop when we return. |
| 3983 | if (!continuation()->is_set(BlockBegin::was_visited_flag)) { |
| 3984 | // add continuation to work list instead of parsing it immediately |
| 3985 | assert(_last && _last->as_BlockEnd(), "" ); |
| 3986 | scope_data()->parent()->add_to_work_list(continuation()); |
| 3987 | _skip_block = true; |
| 3988 | } |
| 3989 | } |
| 3990 | |
| 3991 | // Fill the exception handler for synchronized methods with instructions |
| 3992 | if (callee->is_synchronized() && sync_handler->state() != NULL) { |
| 3993 | fill_sync_handler(lock, sync_handler); |
| 3994 | } else { |
| 3995 | pop_scope(); |
| 3996 | } |
| 3997 | |
| 3998 | compilation()->notice_inlined_method(callee); |
| 3999 | |
| 4000 | return true; |
| 4001 | } |
| 4002 | |
| 4003 | |
| 4004 | bool GraphBuilder::try_method_handle_inline(ciMethod* callee, bool ignore_return) { |
| 4005 | ValueStack* state_before = copy_state_before(); |
| 4006 | vmIntrinsics::ID iid = callee->intrinsic_id(); |
| 4007 | switch (iid) { |
| 4008 | case vmIntrinsics::_invokeBasic: |
| 4009 | { |
| 4010 | // get MethodHandle receiver |
| 4011 | const int args_base = state()->stack_size() - callee->arg_size(); |
| 4012 | ValueType* type = state()->stack_at(args_base)->type(); |
| 4013 | if (type->is_constant()) { |
| 4014 | ciMethod* target = type->as_ObjectType()->constant_value()->as_method_handle()->get_vmtarget(); |
| 4015 | // We don't do CHA here so only inline static and statically bindable methods. |
| 4016 | if (target->is_static() || target->can_be_statically_bound()) { |
| 4017 | if (ciMethod::is_consistent_info(callee, target)) { |
| 4018 | Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; |
| 4019 | ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); |
| 4020 | if (try_inline(target, /*holder_known*/ true, ignore_return, bc)) { |
| 4021 | return true; |
| 4022 | } |
| 4023 | } else { |
| 4024 | print_inlining(target, "signatures mismatch" , /*success*/ false); |
| 4025 | } |
| 4026 | } else { |
| 4027 | print_inlining(target, "not static or statically bindable" , /*success*/ false); |
| 4028 | } |
| 4029 | } else { |
| 4030 | print_inlining(callee, "receiver not constant" , /*success*/ false); |
| 4031 | } |
| 4032 | } |
| 4033 | break; |
| 4034 | |
| 4035 | case vmIntrinsics::_linkToVirtual: |
| 4036 | case vmIntrinsics::_linkToStatic: |
| 4037 | case vmIntrinsics::_linkToSpecial: |
| 4038 | case vmIntrinsics::_linkToInterface: |
| 4039 | { |
| 4040 | // pop MemberName argument |
| 4041 | const int args_base = state()->stack_size() - callee->arg_size(); |
| 4042 | ValueType* type = apop()->type(); |
| 4043 | if (type->is_constant()) { |
| 4044 | ciMethod* target = type->as_ObjectType()->constant_value()->as_member_name()->get_vmtarget(); |
| 4045 | ignore_return = ignore_return || (callee->return_type()->is_void() && !target->return_type()->is_void()); |
| 4046 | // If the target is another method handle invoke, try to recursively get |
| 4047 | // a better target. |
| 4048 | if (target->is_method_handle_intrinsic()) { |
| 4049 | if (try_method_handle_inline(target, ignore_return)) { |
| 4050 | return true; |
| 4051 | } |
| 4052 | } else if (!ciMethod::is_consistent_info(callee, target)) { |
| 4053 | print_inlining(target, "signatures mismatch" , /*success*/ false); |
| 4054 | } else { |
| 4055 | ciSignature* signature = target->signature(); |
| 4056 | const int receiver_skip = target->is_static() ? 0 : 1; |
| 4057 | // Cast receiver to its type. |
| 4058 | if (!target->is_static()) { |
| 4059 | ciKlass* tk = signature->accessing_klass(); |
| 4060 | Value obj = state()->stack_at(args_base); |
| 4061 | if (obj->exact_type() == NULL && |
| 4062 | obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { |
| 4063 | TypeCast* c = new TypeCast(tk, obj, state_before); |
| 4064 | append(c); |
| 4065 | state()->stack_at_put(args_base, c); |
| 4066 | } |
| 4067 | } |
| 4068 | // Cast reference arguments to its type. |
| 4069 | for (int i = 0, j = 0; i < signature->count(); i++) { |
| 4070 | ciType* t = signature->type_at(i); |
| 4071 | if (t->is_klass()) { |
| 4072 | ciKlass* tk = t->as_klass(); |
| 4073 | Value obj = state()->stack_at(args_base + receiver_skip + j); |
| 4074 | if (obj->exact_type() == NULL && |
| 4075 | obj->declared_type() != tk && tk != compilation()->env()->Object_klass()) { |
| 4076 | TypeCast* c = new TypeCast(t, obj, state_before); |
| 4077 | append(c); |
| 4078 | state()->stack_at_put(args_base + receiver_skip + j, c); |
| 4079 | } |
| 4080 | } |
| 4081 | j += t->size(); // long and double take two slots |
| 4082 | } |
| 4083 | // We don't do CHA here so only inline static and statically bindable methods. |
| 4084 | if (target->is_static() || target->can_be_statically_bound()) { |
| 4085 | Bytecodes::Code bc = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokevirtual; |
| 4086 | if (try_inline(target, /*holder_known*/ true, ignore_return, bc)) { |
| 4087 | return true; |
| 4088 | } |
| 4089 | } else { |
| 4090 | print_inlining(target, "not static or statically bindable" , /*success*/ false); |
| 4091 | } |
| 4092 | } |
| 4093 | } else { |
| 4094 | print_inlining(callee, "MemberName not constant" , /*success*/ false); |
| 4095 | } |
| 4096 | } |
| 4097 | break; |
| 4098 | |
| 4099 | default: |
| 4100 | fatal("unexpected intrinsic %d: %s" , iid, vmIntrinsics::name_at(iid)); |
| 4101 | break; |
| 4102 | } |
| 4103 | set_state(state_before->copy_for_parsing()); |
| 4104 | return false; |
| 4105 | } |
| 4106 | |
| 4107 | |
| 4108 | void GraphBuilder::inline_bailout(const char* msg) { |
| 4109 | assert(msg != NULL, "inline bailout msg must exist" ); |
| 4110 | _inline_bailout_msg = msg; |
| 4111 | } |
| 4112 | |
| 4113 | |
| 4114 | void GraphBuilder::clear_inline_bailout() { |
| 4115 | _inline_bailout_msg = NULL; |
| 4116 | } |
| 4117 | |
| 4118 | |
| 4119 | void GraphBuilder::push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start) { |
| 4120 | ScopeData* data = new ScopeData(NULL); |
| 4121 | data->set_scope(scope); |
| 4122 | data->set_bci2block(bci2block); |
| 4123 | _scope_data = data; |
| 4124 | _block = start; |
| 4125 | } |
| 4126 | |
| 4127 | |
| 4128 | void GraphBuilder::push_scope(ciMethod* callee, BlockBegin* continuation) { |
| 4129 | IRScope* callee_scope = new IRScope(compilation(), scope(), bci(), callee, -1, false); |
| 4130 | scope()->add_callee(callee_scope); |
| 4131 | |
| 4132 | BlockListBuilder blb(compilation(), callee_scope, -1); |
| 4133 | CHECK_BAILOUT(); |
| 4134 | |
| 4135 | if (!blb.bci2block()->at(0)->is_set(BlockBegin::parser_loop_header_flag)) { |
| 4136 | // this scope can be inlined directly into the caller so remove |
| 4137 | // the block at bci 0. |
| 4138 | blb.bci2block()->at_put(0, NULL); |
| 4139 | } |
| 4140 | |
| 4141 | set_state(new ValueStack(callee_scope, state()->copy(ValueStack::CallerState, bci()))); |
| 4142 | |
| 4143 | ScopeData* data = new ScopeData(scope_data()); |
| 4144 | data->set_scope(callee_scope); |
| 4145 | data->set_bci2block(blb.bci2block()); |
| 4146 | data->set_continuation(continuation); |
| 4147 | _scope_data = data; |
| 4148 | } |
| 4149 | |
| 4150 | |
| 4151 | void GraphBuilder::push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci) { |
| 4152 | ScopeData* data = new ScopeData(scope_data()); |
| 4153 | data->set_parsing_jsr(); |
| 4154 | data->set_jsr_entry_bci(jsr_dest_bci); |
| 4155 | data->set_jsr_return_address_local(-1); |
| 4156 | // Must clone bci2block list as we will be mutating it in order to |
| 4157 | // properly clone all blocks in jsr region as well as exception |
| 4158 | // handlers containing rets |
| 4159 | BlockList* new_bci2block = new BlockList(bci2block()->length()); |
| 4160 | new_bci2block->appendAll(bci2block()); |
| 4161 | data->set_bci2block(new_bci2block); |
| 4162 | data->set_scope(scope()); |
| 4163 | data->setup_jsr_xhandlers(); |
| 4164 | data->set_continuation(continuation()); |
| 4165 | data->set_jsr_continuation(jsr_continuation); |
| 4166 | _scope_data = data; |
| 4167 | } |
| 4168 | |
| 4169 | |
| 4170 | void GraphBuilder::pop_scope() { |
| 4171 | int number_of_locks = scope()->number_of_locks(); |
| 4172 | _scope_data = scope_data()->parent(); |
| 4173 | // accumulate minimum number of monitor slots to be reserved |
| 4174 | scope()->set_min_number_of_locks(number_of_locks); |
| 4175 | } |
| 4176 | |
| 4177 | |
| 4178 | void GraphBuilder::pop_scope_for_jsr() { |
| 4179 | _scope_data = scope_data()->parent(); |
| 4180 | } |
| 4181 | |
| 4182 | void GraphBuilder::append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile) { |
| 4183 | Values* args = state()->pop_arguments(callee->arg_size()); |
| 4184 | null_check(args->at(0)); |
| 4185 | Instruction* offset = args->at(2); |
| 4186 | #ifndef _LP64 |
| 4187 | offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); |
| 4188 | #endif |
| 4189 | Instruction* op = append(new UnsafeGetObject(t, args->at(1), offset, is_volatile)); |
| 4190 | push(op->type(), op); |
| 4191 | compilation()->set_has_unsafe_access(true); |
| 4192 | } |
| 4193 | |
| 4194 | |
| 4195 | void GraphBuilder::append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile) { |
| 4196 | Values* args = state()->pop_arguments(callee->arg_size()); |
| 4197 | null_check(args->at(0)); |
| 4198 | Instruction* offset = args->at(2); |
| 4199 | #ifndef _LP64 |
| 4200 | offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); |
| 4201 | #endif |
| 4202 | Value val = args->at(3); |
| 4203 | if (t == T_BOOLEAN) { |
| 4204 | Value mask = append(new Constant(new IntConstant(1))); |
| 4205 | val = append(new LogicOp(Bytecodes::_iand, val, mask)); |
| 4206 | } |
| 4207 | Instruction* op = append(new UnsafePutObject(t, args->at(1), offset, val, is_volatile)); |
| 4208 | compilation()->set_has_unsafe_access(true); |
| 4209 | kill_all(); |
| 4210 | } |
| 4211 | |
| 4212 | |
| 4213 | void GraphBuilder::append_unsafe_get_raw(ciMethod* callee, BasicType t) { |
| 4214 | Values* args = state()->pop_arguments(callee->arg_size()); |
| 4215 | null_check(args->at(0)); |
| 4216 | Instruction* op = append(new UnsafeGetRaw(t, args->at(1), false)); |
| 4217 | push(op->type(), op); |
| 4218 | compilation()->set_has_unsafe_access(true); |
| 4219 | } |
| 4220 | |
| 4221 | |
| 4222 | void GraphBuilder::append_unsafe_put_raw(ciMethod* callee, BasicType t) { |
| 4223 | Values* args = state()->pop_arguments(callee->arg_size()); |
| 4224 | null_check(args->at(0)); |
| 4225 | Instruction* op = append(new UnsafePutRaw(t, args->at(1), args->at(2))); |
| 4226 | compilation()->set_has_unsafe_access(true); |
| 4227 | } |
| 4228 | |
| 4229 | |
| 4230 | void GraphBuilder::append_unsafe_CAS(ciMethod* callee) { |
| 4231 | ValueStack* state_before = copy_state_for_exception(); |
| 4232 | ValueType* result_type = as_ValueType(callee->return_type()); |
| 4233 | assert(result_type->is_int(), "int result" ); |
| 4234 | Values* args = state()->pop_arguments(callee->arg_size()); |
| 4235 | |
| 4236 | // Pop off some args to specially handle, then push back |
| 4237 | Value newval = args->pop(); |
| 4238 | Value cmpval = args->pop(); |
| 4239 | Value offset = args->pop(); |
| 4240 | Value src = args->pop(); |
| 4241 | Value unsafe_obj = args->pop(); |
| 4242 | |
| 4243 | // Separately handle the unsafe arg. It is not needed for code |
| 4244 | // generation, but must be null checked |
| 4245 | null_check(unsafe_obj); |
| 4246 | |
| 4247 | #ifndef _LP64 |
| 4248 | offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); |
| 4249 | #endif |
| 4250 | |
| 4251 | args->push(src); |
| 4252 | args->push(offset); |
| 4253 | args->push(cmpval); |
| 4254 | args->push(newval); |
| 4255 | |
| 4256 | // An unsafe CAS can alias with other field accesses, but we don't |
| 4257 | // know which ones so mark the state as no preserved. This will |
| 4258 | // cause CSE to invalidate memory across it. |
| 4259 | bool preserves_state = false; |
| 4260 | Intrinsic* result = new Intrinsic(result_type, callee->intrinsic_id(), args, false, state_before, preserves_state); |
| 4261 | append_split(result); |
| 4262 | push(result_type, result); |
| 4263 | compilation()->set_has_unsafe_access(true); |
| 4264 | } |
| 4265 | |
| 4266 | void GraphBuilder::append_char_access(ciMethod* callee, bool is_store) { |
| 4267 | // This intrinsic accesses byte[] array as char[] array. Computing the offsets |
| 4268 | // correctly requires matched array shapes. |
| 4269 | assert (arrayOopDesc::base_offset_in_bytes(T_CHAR) == arrayOopDesc::base_offset_in_bytes(T_BYTE), |
| 4270 | "sanity: byte[] and char[] bases agree" ); |
| 4271 | assert (type2aelembytes(T_CHAR) == type2aelembytes(T_BYTE)*2, |
| 4272 | "sanity: byte[] and char[] scales agree" ); |
| 4273 | |
| 4274 | ValueStack* state_before = copy_state_indexed_access(); |
| 4275 | compilation()->set_has_access_indexed(true); |
| 4276 | Values* args = state()->pop_arguments(callee->arg_size()); |
| 4277 | Value array = args->at(0); |
| 4278 | Value index = args->at(1); |
| 4279 | if (is_store) { |
| 4280 | Value value = args->at(2); |
| 4281 | Instruction* store = append(new StoreIndexed(array, index, NULL, T_CHAR, value, state_before, false, true)); |
| 4282 | store->set_flag(Instruction::NeedsRangeCheckFlag, false); |
| 4283 | _memory->store_value(value); |
| 4284 | } else { |
| 4285 | Instruction* load = append(new LoadIndexed(array, index, NULL, T_CHAR, state_before, true)); |
| 4286 | load->set_flag(Instruction::NeedsRangeCheckFlag, false); |
| 4287 | push(load->type(), load); |
| 4288 | } |
| 4289 | } |
| 4290 | |
| 4291 | static void post_inlining_event(EventCompilerInlining* event, |
| 4292 | int compile_id, |
| 4293 | const char* msg, |
| 4294 | bool success, |
| 4295 | int bci, |
| 4296 | ciMethod* caller, |
| 4297 | ciMethod* callee) { |
| 4298 | assert(caller != NULL, "invariant" ); |
| 4299 | assert(callee != NULL, "invariant" ); |
| 4300 | assert(event != NULL, "invariant" ); |
| 4301 | assert(event->should_commit(), "invariant" ); |
| 4302 | JfrStructCalleeMethod callee_struct; |
| 4303 | callee_struct.set_type(callee->holder()->name()->as_utf8()); |
| 4304 | callee_struct.set_name(callee->name()->as_utf8()); |
| 4305 | callee_struct.set_descriptor(callee->signature()->as_symbol()->as_utf8()); |
| 4306 | event->set_compileId(compile_id); |
| 4307 | event->set_message(msg); |
| 4308 | event->set_succeeded(success); |
| 4309 | event->set_bci(bci); |
| 4310 | event->set_caller(caller->get_Method()); |
| 4311 | event->set_callee(callee_struct); |
| 4312 | event->commit(); |
| 4313 | } |
| 4314 | |
| 4315 | void GraphBuilder::print_inlining(ciMethod* callee, const char* msg, bool success) { |
| 4316 | CompileLog* log = compilation()->log(); |
| 4317 | if (log != NULL) { |
| 4318 | if (success) { |
| 4319 | if (msg != NULL) |
| 4320 | log->inline_success(msg); |
| 4321 | else |
| 4322 | log->inline_success("receiver is statically known" ); |
| 4323 | } else { |
| 4324 | if (msg != NULL) |
| 4325 | log->inline_fail(msg); |
| 4326 | else |
| 4327 | log->inline_fail("reason unknown" ); |
| 4328 | } |
| 4329 | } |
| 4330 | EventCompilerInlining event; |
| 4331 | if (event.should_commit()) { |
| 4332 | post_inlining_event(&event, compilation()->env()->task()->compile_id(), msg, success, bci(), method(), callee); |
| 4333 | } |
| 4334 | |
| 4335 | CompileTask::print_inlining_ul(callee, scope()->level(), bci(), msg); |
| 4336 | |
| 4337 | if (!compilation()->directive()->PrintInliningOption) { |
| 4338 | return; |
| 4339 | } |
| 4340 | CompileTask::print_inlining_tty(callee, scope()->level(), bci(), msg); |
| 4341 | if (success && CIPrintMethodCodes) { |
| 4342 | callee->print_codes(); |
| 4343 | } |
| 4344 | } |
| 4345 | |
| 4346 | void GraphBuilder::append_unsafe_get_and_set_obj(ciMethod* callee, bool is_add) { |
| 4347 | Values* args = state()->pop_arguments(callee->arg_size()); |
| 4348 | BasicType t = callee->return_type()->basic_type(); |
| 4349 | null_check(args->at(0)); |
| 4350 | Instruction* offset = args->at(2); |
| 4351 | #ifndef _LP64 |
| 4352 | offset = append(new Convert(Bytecodes::_l2i, offset, as_ValueType(T_INT))); |
| 4353 | #endif |
| 4354 | Instruction* op = append(new UnsafeGetAndSetObject(t, args->at(1), offset, args->at(3), is_add)); |
| 4355 | compilation()->set_has_unsafe_access(true); |
| 4356 | kill_all(); |
| 4357 | push(op->type(), op); |
| 4358 | } |
| 4359 | |
| 4360 | #ifndef PRODUCT |
| 4361 | void GraphBuilder::print_stats() { |
| 4362 | vmap()->print(); |
| 4363 | } |
| 4364 | #endif // PRODUCT |
| 4365 | |
| 4366 | void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) { |
| 4367 | assert(known_holder == NULL || (known_holder->is_instance_klass() && |
| 4368 | (!known_holder->is_interface() || |
| 4369 | ((ciInstanceKlass*)known_holder)->has_nonstatic_concrete_methods())), "should be non-static concrete method" ); |
| 4370 | if (known_holder != NULL) { |
| 4371 | if (known_holder->exact_klass() == NULL) { |
| 4372 | known_holder = compilation()->cha_exact_type(known_holder); |
| 4373 | } |
| 4374 | } |
| 4375 | |
| 4376 | append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined)); |
| 4377 | } |
| 4378 | |
| 4379 | void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) { |
| 4380 | assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together" ); |
| 4381 | if (m == NULL) { |
| 4382 | m = method(); |
| 4383 | } |
| 4384 | if (invoke_bci < 0) { |
| 4385 | invoke_bci = bci(); |
| 4386 | } |
| 4387 | ciMethodData* md = m->method_data_or_null(); |
| 4388 | ciProfileData* data = md->bci_to_data(invoke_bci); |
| 4389 | if (data != NULL && (data->is_CallTypeData() || data->is_VirtualCallTypeData())) { |
| 4390 | bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return(); |
| 4391 | if (has_return) { |
| 4392 | append(new ProfileReturnType(m , invoke_bci, callee, ret)); |
| 4393 | } |
| 4394 | } |
| 4395 | } |
| 4396 | |
| 4397 | void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) { |
| 4398 | append(new ProfileInvoke(callee, state)); |
| 4399 | } |
| 4400 | |