| 1 | /* |
| 2 | * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "compiler/compileLog.hpp" |
| 27 | #include "ci/bcEscapeAnalyzer.hpp" |
| 28 | #include "compiler/oopMap.hpp" |
| 29 | #include "gc/shared/barrierSet.hpp" |
| 30 | #include "gc/shared/c2/barrierSetC2.hpp" |
| 31 | #include "interpreter/interpreter.hpp" |
| 32 | #include "opto/callGenerator.hpp" |
| 33 | #include "opto/callnode.hpp" |
| 34 | #include "opto/castnode.hpp" |
| 35 | #include "opto/convertnode.hpp" |
| 36 | #include "opto/escape.hpp" |
| 37 | #include "opto/locknode.hpp" |
| 38 | #include "opto/machnode.hpp" |
| 39 | #include "opto/matcher.hpp" |
| 40 | #include "opto/parse.hpp" |
| 41 | #include "opto/regalloc.hpp" |
| 42 | #include "opto/regmask.hpp" |
| 43 | #include "opto/rootnode.hpp" |
| 44 | #include "opto/runtime.hpp" |
| 45 | |
| 46 | // Portions of code courtesy of Clifford Click |
| 47 | |
| 48 | // Optimization - Graph Style |
| 49 | |
| 50 | //============================================================================= |
| 51 | uint StartNode::size_of() const { return sizeof(*this); } |
| 52 | bool StartNode::cmp( const Node &n ) const |
| 53 | { return _domain == ((StartNode&)n)._domain; } |
| 54 | const Type *StartNode::bottom_type() const { return _domain; } |
| 55 | const Type* StartNode::Value(PhaseGVN* phase) const { return _domain; } |
| 56 | #ifndef PRODUCT |
| 57 | void StartNode::dump_spec(outputStream *st) const { st->print(" #" ); _domain->dump_on(st);} |
| 58 | void StartNode::dump_compact_spec(outputStream *st) const { /* empty */ } |
| 59 | #endif |
| 60 | |
| 61 | //------------------------------Ideal------------------------------------------ |
| 62 | Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
| 63 | return remove_dead_region(phase, can_reshape) ? this : NULL; |
| 64 | } |
| 65 | |
| 66 | //------------------------------calling_convention----------------------------- |
| 67 | void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { |
| 68 | Matcher::calling_convention( sig_bt, parm_regs, argcnt, false ); |
| 69 | } |
| 70 | |
| 71 | //------------------------------Registers-------------------------------------- |
| 72 | const RegMask &StartNode::in_RegMask(uint) const { |
| 73 | return RegMask::Empty; |
| 74 | } |
| 75 | |
| 76 | //------------------------------match------------------------------------------ |
| 77 | // Construct projections for incoming parameters, and their RegMask info |
| 78 | Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { |
| 79 | switch (proj->_con) { |
| 80 | case TypeFunc::Control: |
| 81 | case TypeFunc::I_O: |
| 82 | case TypeFunc::Memory: |
| 83 | return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); |
| 84 | case TypeFunc::FramePtr: |
| 85 | return new MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); |
| 86 | case TypeFunc::ReturnAdr: |
| 87 | return new MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); |
| 88 | case TypeFunc::Parms: |
| 89 | default: { |
| 90 | uint parm_num = proj->_con - TypeFunc::Parms; |
| 91 | const Type *t = _domain->field_at(proj->_con); |
| 92 | if (t->base() == Type::Half) // 2nd half of Longs and Doubles |
| 93 | return new ConNode(Type::TOP); |
| 94 | uint ideal_reg = t->ideal_reg(); |
| 95 | RegMask &rm = match->_calling_convention_mask[parm_num]; |
| 96 | return new MachProjNode(this,proj->_con,rm,ideal_reg); |
| 97 | } |
| 98 | } |
| 99 | return NULL; |
| 100 | } |
| 101 | |
| 102 | //------------------------------StartOSRNode---------------------------------- |
| 103 | // The method start node for an on stack replacement adapter |
| 104 | |
| 105 | //------------------------------osr_domain----------------------------- |
| 106 | const TypeTuple *StartOSRNode::osr_domain() { |
| 107 | const Type **fields = TypeTuple::fields(2); |
| 108 | fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer |
| 109 | |
| 110 | return TypeTuple::make(TypeFunc::Parms+1, fields); |
| 111 | } |
| 112 | |
| 113 | //============================================================================= |
| 114 | const char * const ParmNode::names[TypeFunc::Parms+1] = { |
| 115 | "Control" , "I_O" , "Memory" , "FramePtr" , "ReturnAdr" , "Parms" |
| 116 | }; |
| 117 | |
| 118 | #ifndef PRODUCT |
| 119 | void ParmNode::dump_spec(outputStream *st) const { |
| 120 | if( _con < TypeFunc::Parms ) { |
| 121 | st->print("%s" , names[_con]); |
| 122 | } else { |
| 123 | st->print("Parm%d: " ,_con-TypeFunc::Parms); |
| 124 | // Verbose and WizardMode dump bottom_type for all nodes |
| 125 | if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | void ParmNode::dump_compact_spec(outputStream *st) const { |
| 130 | if (_con < TypeFunc::Parms) { |
| 131 | st->print("%s" , names[_con]); |
| 132 | } else { |
| 133 | st->print("%d:" , _con-TypeFunc::Parms); |
| 134 | // unconditionally dump bottom_type |
| 135 | bottom_type()->dump_on(st); |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | // For a ParmNode, all immediate inputs and outputs are considered relevant |
| 140 | // both in compact and standard representation. |
| 141 | void ParmNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { |
| 142 | this->collect_nodes(in_rel, 1, false, false); |
| 143 | this->collect_nodes(out_rel, -1, false, false); |
| 144 | } |
| 145 | #endif |
| 146 | |
| 147 | uint ParmNode::ideal_reg() const { |
| 148 | switch( _con ) { |
| 149 | case TypeFunc::Control : // fall through |
| 150 | case TypeFunc::I_O : // fall through |
| 151 | case TypeFunc::Memory : return 0; |
| 152 | case TypeFunc::FramePtr : // fall through |
| 153 | case TypeFunc::ReturnAdr: return Op_RegP; |
| 154 | default : assert( _con > TypeFunc::Parms, "" ); |
| 155 | // fall through |
| 156 | case TypeFunc::Parms : { |
| 157 | // Type of argument being passed |
| 158 | const Type *t = in(0)->as_Start()->_domain->field_at(_con); |
| 159 | return t->ideal_reg(); |
| 160 | } |
| 161 | } |
| 162 | ShouldNotReachHere(); |
| 163 | return 0; |
| 164 | } |
| 165 | |
| 166 | //============================================================================= |
| 167 | ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { |
| 168 | init_req(TypeFunc::Control,cntrl); |
| 169 | init_req(TypeFunc::I_O,i_o); |
| 170 | init_req(TypeFunc::Memory,memory); |
| 171 | init_req(TypeFunc::FramePtr,frameptr); |
| 172 | init_req(TypeFunc::ReturnAdr,retadr); |
| 173 | } |
| 174 | |
| 175 | Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
| 176 | return remove_dead_region(phase, can_reshape) ? this : NULL; |
| 177 | } |
| 178 | |
| 179 | const Type* ReturnNode::Value(PhaseGVN* phase) const { |
| 180 | return ( phase->type(in(TypeFunc::Control)) == Type::TOP) |
| 181 | ? Type::TOP |
| 182 | : Type::BOTTOM; |
| 183 | } |
| 184 | |
| 185 | // Do we Match on this edge index or not? No edges on return nodes |
| 186 | uint ReturnNode::match_edge(uint idx) const { |
| 187 | return 0; |
| 188 | } |
| 189 | |
| 190 | |
| 191 | #ifndef PRODUCT |
| 192 | void ReturnNode::dump_req(outputStream *st) const { |
| 193 | // Dump the required inputs, enclosed in '(' and ')' |
| 194 | uint i; // Exit value of loop |
| 195 | for (i = 0; i < req(); i++) { // For all required inputs |
| 196 | if (i == TypeFunc::Parms) st->print("returns" ); |
| 197 | if (in(i)) st->print("%c%d " , Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); |
| 198 | else st->print("_ " ); |
| 199 | } |
| 200 | } |
| 201 | #endif |
| 202 | |
| 203 | //============================================================================= |
| 204 | RethrowNode::RethrowNode( |
| 205 | Node* cntrl, |
| 206 | Node* i_o, |
| 207 | Node* memory, |
| 208 | Node* frameptr, |
| 209 | Node* ret_adr, |
| 210 | Node* exception |
| 211 | ) : Node(TypeFunc::Parms + 1) { |
| 212 | init_req(TypeFunc::Control , cntrl ); |
| 213 | init_req(TypeFunc::I_O , i_o ); |
| 214 | init_req(TypeFunc::Memory , memory ); |
| 215 | init_req(TypeFunc::FramePtr , frameptr ); |
| 216 | init_req(TypeFunc::ReturnAdr, ret_adr); |
| 217 | init_req(TypeFunc::Parms , exception); |
| 218 | } |
| 219 | |
| 220 | Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
| 221 | return remove_dead_region(phase, can_reshape) ? this : NULL; |
| 222 | } |
| 223 | |
| 224 | const Type* RethrowNode::Value(PhaseGVN* phase) const { |
| 225 | return (phase->type(in(TypeFunc::Control)) == Type::TOP) |
| 226 | ? Type::TOP |
| 227 | : Type::BOTTOM; |
| 228 | } |
| 229 | |
| 230 | uint RethrowNode::match_edge(uint idx) const { |
| 231 | return 0; |
| 232 | } |
| 233 | |
| 234 | #ifndef PRODUCT |
| 235 | void RethrowNode::dump_req(outputStream *st) const { |
| 236 | // Dump the required inputs, enclosed in '(' and ')' |
| 237 | uint i; // Exit value of loop |
| 238 | for (i = 0; i < req(); i++) { // For all required inputs |
| 239 | if (i == TypeFunc::Parms) st->print("exception" ); |
| 240 | if (in(i)) st->print("%c%d " , Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); |
| 241 | else st->print("_ " ); |
| 242 | } |
| 243 | } |
| 244 | #endif |
| 245 | |
| 246 | //============================================================================= |
| 247 | // Do we Match on this edge index or not? Match only target address & method |
| 248 | uint TailCallNode::match_edge(uint idx) const { |
| 249 | return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; |
| 250 | } |
| 251 | |
| 252 | //============================================================================= |
| 253 | // Do we Match on this edge index or not? Match only target address & oop |
| 254 | uint TailJumpNode::match_edge(uint idx) const { |
| 255 | return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; |
| 256 | } |
| 257 | |
| 258 | //============================================================================= |
| 259 | JVMState::JVMState(ciMethod* method, JVMState* caller) : |
| 260 | _method(method) { |
| 261 | assert(method != NULL, "must be valid call site" ); |
| 262 | _bci = InvocationEntryBci; |
| 263 | _reexecute = Reexecute_Undefined; |
| 264 | debug_only(_bci = -99); // random garbage value |
| 265 | debug_only(_map = (SafePointNode*)-1); |
| 266 | _caller = caller; |
| 267 | _depth = 1 + (caller == NULL ? 0 : caller->depth()); |
| 268 | _locoff = TypeFunc::Parms; |
| 269 | _stkoff = _locoff + _method->max_locals(); |
| 270 | _monoff = _stkoff + _method->max_stack(); |
| 271 | _scloff = _monoff; |
| 272 | _endoff = _monoff; |
| 273 | _sp = 0; |
| 274 | } |
| 275 | JVMState::JVMState(int stack_size) : |
| 276 | _method(NULL) { |
| 277 | _bci = InvocationEntryBci; |
| 278 | _reexecute = Reexecute_Undefined; |
| 279 | debug_only(_map = (SafePointNode*)-1); |
| 280 | _caller = NULL; |
| 281 | _depth = 1; |
| 282 | _locoff = TypeFunc::Parms; |
| 283 | _stkoff = _locoff; |
| 284 | _monoff = _stkoff + stack_size; |
| 285 | _scloff = _monoff; |
| 286 | _endoff = _monoff; |
| 287 | _sp = 0; |
| 288 | } |
| 289 | |
| 290 | //--------------------------------of_depth------------------------------------- |
| 291 | JVMState* JVMState::of_depth(int d) const { |
| 292 | const JVMState* jvmp = this; |
| 293 | assert(0 < d && (uint)d <= depth(), "oob" ); |
| 294 | for (int skip = depth() - d; skip > 0; skip--) { |
| 295 | jvmp = jvmp->caller(); |
| 296 | } |
| 297 | assert(jvmp->depth() == (uint)d, "found the right one" ); |
| 298 | return (JVMState*)jvmp; |
| 299 | } |
| 300 | |
| 301 | //-----------------------------same_calls_as----------------------------------- |
| 302 | bool JVMState::same_calls_as(const JVMState* that) const { |
| 303 | if (this == that) return true; |
| 304 | if (this->depth() != that->depth()) return false; |
| 305 | const JVMState* p = this; |
| 306 | const JVMState* q = that; |
| 307 | for (;;) { |
| 308 | if (p->_method != q->_method) return false; |
| 309 | if (p->_method == NULL) return true; // bci is irrelevant |
| 310 | if (p->_bci != q->_bci) return false; |
| 311 | if (p->_reexecute != q->_reexecute) return false; |
| 312 | p = p->caller(); |
| 313 | q = q->caller(); |
| 314 | if (p == q) return true; |
| 315 | assert(p != NULL && q != NULL, "depth check ensures we don't run off end" ); |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | //------------------------------debug_start------------------------------------ |
| 320 | uint JVMState::debug_start() const { |
| 321 | debug_only(JVMState* jvmroot = of_depth(1)); |
| 322 | assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last" ); |
| 323 | return of_depth(1)->locoff(); |
| 324 | } |
| 325 | |
| 326 | //-------------------------------debug_end------------------------------------- |
| 327 | uint JVMState::debug_end() const { |
| 328 | debug_only(JVMState* jvmroot = of_depth(1)); |
| 329 | assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last" ); |
| 330 | return endoff(); |
| 331 | } |
| 332 | |
| 333 | //------------------------------debug_depth------------------------------------ |
| 334 | uint JVMState::debug_depth() const { |
| 335 | uint total = 0; |
| 336 | for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) { |
| 337 | total += jvmp->debug_size(); |
| 338 | } |
| 339 | return total; |
| 340 | } |
| 341 | |
| 342 | #ifndef PRODUCT |
| 343 | |
| 344 | //------------------------------format_helper---------------------------------- |
| 345 | // Given an allocation (a Chaitin object) and a Node decide if the Node carries |
| 346 | // any defined value or not. If it does, print out the register or constant. |
| 347 | static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { |
| 348 | if (n == NULL) { st->print(" NULL" ); return; } |
| 349 | if (n->is_SafePointScalarObject()) { |
| 350 | // Scalar replacement. |
| 351 | SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); |
| 352 | scobjs->append_if_missing(spobj); |
| 353 | int sco_n = scobjs->find(spobj); |
| 354 | assert(sco_n >= 0, "" ); |
| 355 | st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); |
| 356 | return; |
| 357 | } |
| 358 | if (regalloc->node_regs_max_index() > 0 && |
| 359 | OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined |
| 360 | char buf[50]; |
| 361 | regalloc->dump_register(n,buf); |
| 362 | st->print(" %s%d]=%s" ,msg,i,buf); |
| 363 | } else { // No register, but might be constant |
| 364 | const Type *t = n->bottom_type(); |
| 365 | switch (t->base()) { |
| 366 | case Type::Int: |
| 367 | st->print(" %s%d]=#" INT32_FORMAT,msg,i,t->is_int()->get_con()); |
| 368 | break; |
| 369 | case Type::AnyPtr: |
| 370 | assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); |
| 371 | st->print(" %s%d]=#NULL" ,msg,i); |
| 372 | break; |
| 373 | case Type::AryPtr: |
| 374 | case Type::InstPtr: |
| 375 | st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop())); |
| 376 | break; |
| 377 | case Type::KlassPtr: |
| 378 | st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->klass())); |
| 379 | break; |
| 380 | case Type::MetadataPtr: |
| 381 | st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata())); |
| 382 | break; |
| 383 | case Type::NarrowOop: |
| 384 | st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop())); |
| 385 | break; |
| 386 | case Type::RawPtr: |
| 387 | st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr())); |
| 388 | break; |
| 389 | case Type::DoubleCon: |
| 390 | st->print(" %s%d]=#%fD" ,msg,i,t->is_double_constant()->_d); |
| 391 | break; |
| 392 | case Type::FloatCon: |
| 393 | st->print(" %s%d]=#%fF" ,msg,i,t->is_float_constant()->_f); |
| 394 | break; |
| 395 | case Type::Long: |
| 396 | st->print(" %s%d]=#" INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con())); |
| 397 | break; |
| 398 | case Type::Half: |
| 399 | case Type::Top: |
| 400 | st->print(" %s%d]=_" ,msg,i); |
| 401 | break; |
| 402 | default: ShouldNotReachHere(); |
| 403 | } |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | //------------------------------format----------------------------------------- |
| 408 | void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { |
| 409 | st->print(" #" ); |
| 410 | if (_method) { |
| 411 | _method->print_short_name(st); |
| 412 | st->print(" @ bci:%d " ,_bci); |
| 413 | } else { |
| 414 | st->print_cr(" runtime stub " ); |
| 415 | return; |
| 416 | } |
| 417 | if (n->is_MachSafePoint()) { |
| 418 | GrowableArray<SafePointScalarObjectNode*> scobjs; |
| 419 | MachSafePointNode *mcall = n->as_MachSafePoint(); |
| 420 | uint i; |
| 421 | // Print locals |
| 422 | for (i = 0; i < (uint)loc_size(); i++) |
| 423 | format_helper(regalloc, st, mcall->local(this, i), "L[" , i, &scobjs); |
| 424 | // Print stack |
| 425 | for (i = 0; i < (uint)stk_size(); i++) { |
| 426 | if ((uint)(_stkoff + i) >= mcall->len()) |
| 427 | st->print(" oob " ); |
| 428 | else |
| 429 | format_helper(regalloc, st, mcall->stack(this, i), "STK[" , i, &scobjs); |
| 430 | } |
| 431 | for (i = 0; (int)i < nof_monitors(); i++) { |
| 432 | Node *box = mcall->monitor_box(this, i); |
| 433 | Node *obj = mcall->monitor_obj(this, i); |
| 434 | if (regalloc->node_regs_max_index() > 0 && |
| 435 | OptoReg::is_valid(regalloc->get_reg_first(box))) { |
| 436 | box = BoxLockNode::box_node(box); |
| 437 | format_helper(regalloc, st, box, "MON-BOX[" , i, &scobjs); |
| 438 | } else { |
| 439 | OptoReg::Name box_reg = BoxLockNode::reg(box); |
| 440 | st->print(" MON-BOX%d=%s+%d" , |
| 441 | i, |
| 442 | OptoReg::regname(OptoReg::c_frame_pointer), |
| 443 | regalloc->reg2offset(box_reg)); |
| 444 | } |
| 445 | const char* obj_msg = "MON-OBJ[" ; |
| 446 | if (EliminateLocks) { |
| 447 | if (BoxLockNode::box_node(box)->is_eliminated()) |
| 448 | obj_msg = "MON-OBJ(LOCK ELIMINATED)[" ; |
| 449 | } |
| 450 | format_helper(regalloc, st, obj, obj_msg, i, &scobjs); |
| 451 | } |
| 452 | |
| 453 | for (i = 0; i < (uint)scobjs.length(); i++) { |
| 454 | // Scalar replaced objects. |
| 455 | st->cr(); |
| 456 | st->print(" # ScObj" INT32_FORMAT " " , i); |
| 457 | SafePointScalarObjectNode* spobj = scobjs.at(i); |
| 458 | ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass(); |
| 459 | assert(cik->is_instance_klass() || |
| 460 | cik->is_array_klass(), "Not supported allocation." ); |
| 461 | ciInstanceKlass *iklass = NULL; |
| 462 | if (cik->is_instance_klass()) { |
| 463 | cik->print_name_on(st); |
| 464 | iklass = cik->as_instance_klass(); |
| 465 | } else if (cik->is_type_array_klass()) { |
| 466 | cik->as_array_klass()->base_element_type()->print_name_on(st); |
| 467 | st->print("[%d]" , spobj->n_fields()); |
| 468 | } else if (cik->is_obj_array_klass()) { |
| 469 | ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); |
| 470 | if (cie->is_instance_klass()) { |
| 471 | cie->print_name_on(st); |
| 472 | } else if (cie->is_type_array_klass()) { |
| 473 | cie->as_array_klass()->base_element_type()->print_name_on(st); |
| 474 | } else { |
| 475 | ShouldNotReachHere(); |
| 476 | } |
| 477 | st->print("[%d]" , spobj->n_fields()); |
| 478 | int ndim = cik->as_array_klass()->dimension() - 1; |
| 479 | while (ndim-- > 0) { |
| 480 | st->print("[]" ); |
| 481 | } |
| 482 | } |
| 483 | st->print("={" ); |
| 484 | uint nf = spobj->n_fields(); |
| 485 | if (nf > 0) { |
| 486 | uint first_ind = spobj->first_index(mcall->jvms()); |
| 487 | Node* fld_node = mcall->in(first_ind); |
| 488 | ciField* cifield; |
| 489 | if (iklass != NULL) { |
| 490 | st->print(" [" ); |
| 491 | cifield = iklass->nonstatic_field_at(0); |
| 492 | cifield->print_name_on(st); |
| 493 | format_helper(regalloc, st, fld_node, ":" , 0, &scobjs); |
| 494 | } else { |
| 495 | format_helper(regalloc, st, fld_node, "[" , 0, &scobjs); |
| 496 | } |
| 497 | for (uint j = 1; j < nf; j++) { |
| 498 | fld_node = mcall->in(first_ind+j); |
| 499 | if (iklass != NULL) { |
| 500 | st->print(", [" ); |
| 501 | cifield = iklass->nonstatic_field_at(j); |
| 502 | cifield->print_name_on(st); |
| 503 | format_helper(regalloc, st, fld_node, ":" , j, &scobjs); |
| 504 | } else { |
| 505 | format_helper(regalloc, st, fld_node, ", [" , j, &scobjs); |
| 506 | } |
| 507 | } |
| 508 | } |
| 509 | st->print(" }" ); |
| 510 | } |
| 511 | } |
| 512 | st->cr(); |
| 513 | if (caller() != NULL) caller()->format(regalloc, n, st); |
| 514 | } |
| 515 | |
| 516 | |
| 517 | void JVMState::dump_spec(outputStream *st) const { |
| 518 | if (_method != NULL) { |
| 519 | bool printed = false; |
| 520 | if (!Verbose) { |
| 521 | // The JVMS dumps make really, really long lines. |
| 522 | // Take out the most boring parts, which are the package prefixes. |
| 523 | char buf[500]; |
| 524 | stringStream namest(buf, sizeof(buf)); |
| 525 | _method->print_short_name(&namest); |
| 526 | if (namest.count() < sizeof(buf)) { |
| 527 | const char* name = namest.base(); |
| 528 | if (name[0] == ' ') ++name; |
| 529 | const char* endcn = strchr(name, ':'); // end of class name |
| 530 | if (endcn == NULL) endcn = strchr(name, '('); |
| 531 | if (endcn == NULL) endcn = name + strlen(name); |
| 532 | while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') |
| 533 | --endcn; |
| 534 | st->print(" %s" , endcn); |
| 535 | printed = true; |
| 536 | } |
| 537 | } |
| 538 | if (!printed) |
| 539 | _method->print_short_name(st); |
| 540 | st->print(" @ bci:%d" ,_bci); |
| 541 | if(_reexecute == Reexecute_True) |
| 542 | st->print(" reexecute" ); |
| 543 | } else { |
| 544 | st->print(" runtime stub" ); |
| 545 | } |
| 546 | if (caller() != NULL) caller()->dump_spec(st); |
| 547 | } |
| 548 | |
| 549 | |
| 550 | void JVMState::dump_on(outputStream* st) const { |
| 551 | bool print_map = _map && !((uintptr_t)_map & 1) && |
| 552 | ((caller() == NULL) || (caller()->map() != _map)); |
| 553 | if (print_map) { |
| 554 | if (_map->len() > _map->req()) { // _map->has_exceptions() |
| 555 | Node* ex = _map->in(_map->req()); // _map->next_exception() |
| 556 | // skip the first one; it's already being printed |
| 557 | while (ex != NULL && ex->len() > ex->req()) { |
| 558 | ex = ex->in(ex->req()); // ex->next_exception() |
| 559 | ex->dump(1); |
| 560 | } |
| 561 | } |
| 562 | _map->dump(Verbose ? 2 : 1); |
| 563 | } |
| 564 | if (caller() != NULL) { |
| 565 | caller()->dump_on(st); |
| 566 | } |
| 567 | st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=" , |
| 568 | depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true" :"false" ); |
| 569 | if (_method == NULL) { |
| 570 | st->print_cr("(none)" ); |
| 571 | } else { |
| 572 | _method->print_name(st); |
| 573 | st->cr(); |
| 574 | if (bci() >= 0 && bci() < _method->code_size()) { |
| 575 | st->print(" bc: " ); |
| 576 | _method->print_codes_on(bci(), bci()+1, st); |
| 577 | } |
| 578 | } |
| 579 | } |
| 580 | |
| 581 | // Extra way to dump a jvms from the debugger, |
| 582 | // to avoid a bug with C++ member function calls. |
| 583 | void dump_jvms(JVMState* jvms) { |
| 584 | jvms->dump(); |
| 585 | } |
| 586 | #endif |
| 587 | |
| 588 | //--------------------------clone_shallow-------------------------------------- |
| 589 | JVMState* JVMState::clone_shallow(Compile* C) const { |
| 590 | JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); |
| 591 | n->set_bci(_bci); |
| 592 | n->_reexecute = _reexecute; |
| 593 | n->set_locoff(_locoff); |
| 594 | n->set_stkoff(_stkoff); |
| 595 | n->set_monoff(_monoff); |
| 596 | n->set_scloff(_scloff); |
| 597 | n->set_endoff(_endoff); |
| 598 | n->set_sp(_sp); |
| 599 | n->set_map(_map); |
| 600 | return n; |
| 601 | } |
| 602 | |
| 603 | //---------------------------clone_deep---------------------------------------- |
| 604 | JVMState* JVMState::clone_deep(Compile* C) const { |
| 605 | JVMState* n = clone_shallow(C); |
| 606 | for (JVMState* p = n; p->_caller != NULL; p = p->_caller) { |
| 607 | p->_caller = p->_caller->clone_shallow(C); |
| 608 | } |
| 609 | assert(n->depth() == depth(), "sanity" ); |
| 610 | assert(n->debug_depth() == debug_depth(), "sanity" ); |
| 611 | return n; |
| 612 | } |
| 613 | |
| 614 | /** |
| 615 | * Reset map for all callers |
| 616 | */ |
| 617 | void JVMState::set_map_deep(SafePointNode* map) { |
| 618 | for (JVMState* p = this; p->_caller != NULL; p = p->_caller) { |
| 619 | p->set_map(map); |
| 620 | } |
| 621 | } |
| 622 | |
| 623 | // Adapt offsets in in-array after adding or removing an edge. |
| 624 | // Prerequisite is that the JVMState is used by only one node. |
| 625 | void JVMState::adapt_position(int delta) { |
| 626 | for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) { |
| 627 | jvms->set_locoff(jvms->locoff() + delta); |
| 628 | jvms->set_stkoff(jvms->stkoff() + delta); |
| 629 | jvms->set_monoff(jvms->monoff() + delta); |
| 630 | jvms->set_scloff(jvms->scloff() + delta); |
| 631 | jvms->set_endoff(jvms->endoff() + delta); |
| 632 | } |
| 633 | } |
| 634 | |
| 635 | // Mirror the stack size calculation in the deopt code |
| 636 | // How much stack space would we need at this point in the program in |
| 637 | // case of deoptimization? |
| 638 | int JVMState::interpreter_frame_size() const { |
| 639 | const JVMState* jvms = this; |
| 640 | int size = 0; |
| 641 | int callee_parameters = 0; |
| 642 | int callee_locals = 0; |
| 643 | int = method()->max_stack() - stk_size(); |
| 644 | |
| 645 | while (jvms != NULL) { |
| 646 | int locks = jvms->nof_monitors(); |
| 647 | int temps = jvms->stk_size(); |
| 648 | bool is_top_frame = (jvms == this); |
| 649 | ciMethod* method = jvms->method(); |
| 650 | |
| 651 | int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(), |
| 652 | temps + callee_parameters, |
| 653 | extra_args, |
| 654 | locks, |
| 655 | callee_parameters, |
| 656 | callee_locals, |
| 657 | is_top_frame); |
| 658 | size += frame_size; |
| 659 | |
| 660 | callee_parameters = method->size_of_parameters(); |
| 661 | callee_locals = method->max_locals(); |
| 662 | extra_args = 0; |
| 663 | jvms = jvms->caller(); |
| 664 | } |
| 665 | return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord; |
| 666 | } |
| 667 | |
| 668 | //============================================================================= |
| 669 | bool CallNode::cmp( const Node &n ) const |
| 670 | { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } |
| 671 | #ifndef PRODUCT |
| 672 | void CallNode::dump_req(outputStream *st) const { |
| 673 | // Dump the required inputs, enclosed in '(' and ')' |
| 674 | uint i; // Exit value of loop |
| 675 | for (i = 0; i < req(); i++) { // For all required inputs |
| 676 | if (i == TypeFunc::Parms) st->print("(" ); |
| 677 | if (in(i)) st->print("%c%d " , Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); |
| 678 | else st->print("_ " ); |
| 679 | } |
| 680 | st->print(")" ); |
| 681 | } |
| 682 | |
| 683 | void CallNode::dump_spec(outputStream *st) const { |
| 684 | st->print(" " ); |
| 685 | if (tf() != NULL) tf()->dump_on(st); |
| 686 | if (_cnt != COUNT_UNKNOWN) st->print(" C=%f" ,_cnt); |
| 687 | if (jvms() != NULL) jvms()->dump_spec(st); |
| 688 | } |
| 689 | #endif |
| 690 | |
| 691 | const Type *CallNode::bottom_type() const { return tf()->range(); } |
| 692 | const Type* CallNode::Value(PhaseGVN* phase) const { |
| 693 | if (phase->type(in(0)) == Type::TOP) return Type::TOP; |
| 694 | return tf()->range(); |
| 695 | } |
| 696 | |
| 697 | //------------------------------calling_convention----------------------------- |
| 698 | void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { |
| 699 | // Use the standard compiler calling convention |
| 700 | Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); |
| 701 | } |
| 702 | |
| 703 | |
| 704 | //------------------------------match------------------------------------------ |
| 705 | // Construct projections for control, I/O, memory-fields, ..., and |
| 706 | // return result(s) along with their RegMask info |
| 707 | Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { |
| 708 | switch (proj->_con) { |
| 709 | case TypeFunc::Control: |
| 710 | case TypeFunc::I_O: |
| 711 | case TypeFunc::Memory: |
| 712 | return new MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); |
| 713 | |
| 714 | case TypeFunc::Parms+1: // For LONG & DOUBLE returns |
| 715 | assert(tf()->range()->field_at(TypeFunc::Parms+1) == Type::HALF, "" ); |
| 716 | // 2nd half of doubles and longs |
| 717 | return new MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); |
| 718 | |
| 719 | case TypeFunc::Parms: { // Normal returns |
| 720 | uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg(); |
| 721 | OptoRegPair regs = is_CallRuntime() |
| 722 | ? match->c_return_value(ideal_reg,true) // Calls into C runtime |
| 723 | : match-> return_value(ideal_reg,true); // Calls into compiled Java code |
| 724 | RegMask rm = RegMask(regs.first()); |
| 725 | if( OptoReg::is_valid(regs.second()) ) |
| 726 | rm.Insert( regs.second() ); |
| 727 | return new MachProjNode(this,proj->_con,rm,ideal_reg); |
| 728 | } |
| 729 | |
| 730 | case TypeFunc::ReturnAdr: |
| 731 | case TypeFunc::FramePtr: |
| 732 | default: |
| 733 | ShouldNotReachHere(); |
| 734 | } |
| 735 | return NULL; |
| 736 | } |
| 737 | |
| 738 | // Do we Match on this edge index or not? Match no edges |
| 739 | uint CallNode::match_edge(uint idx) const { |
| 740 | return 0; |
| 741 | } |
| 742 | |
| 743 | // |
| 744 | // Determine whether the call could modify the field of the specified |
| 745 | // instance at the specified offset. |
| 746 | // |
| 747 | bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { |
| 748 | assert((t_oop != NULL), "sanity" ); |
| 749 | if (is_call_to_arraycopystub() && strcmp(_name, "unsafe_arraycopy" ) != 0) { |
| 750 | const TypeTuple* args = _tf->domain(); |
| 751 | Node* dest = NULL; |
| 752 | // Stubs that can be called once an ArrayCopyNode is expanded have |
| 753 | // different signatures. Look for the second pointer argument, |
| 754 | // that is the destination of the copy. |
| 755 | for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) { |
| 756 | if (args->field_at(i)->isa_ptr()) { |
| 757 | j++; |
| 758 | if (j == 2) { |
| 759 | dest = in(i); |
| 760 | break; |
| 761 | } |
| 762 | } |
| 763 | } |
| 764 | guarantee(dest != NULL, "Call had only one ptr in, broken IR!" ); |
| 765 | if (!dest->is_top() && may_modify_arraycopy_helper(phase->type(dest)->is_oopptr(), t_oop, phase)) { |
| 766 | return true; |
| 767 | } |
| 768 | return false; |
| 769 | } |
| 770 | if (t_oop->is_known_instance()) { |
| 771 | // The instance_id is set only for scalar-replaceable allocations which |
| 772 | // are not passed as arguments according to Escape Analysis. |
| 773 | return false; |
| 774 | } |
| 775 | if (t_oop->is_ptr_to_boxed_value()) { |
| 776 | ciKlass* boxing_klass = t_oop->klass(); |
| 777 | if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { |
| 778 | // Skip unrelated boxing methods. |
| 779 | Node* proj = proj_out_or_null(TypeFunc::Parms); |
| 780 | if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) { |
| 781 | return false; |
| 782 | } |
| 783 | } |
| 784 | if (is_CallJava() && as_CallJava()->method() != NULL) { |
| 785 | ciMethod* meth = as_CallJava()->method(); |
| 786 | if (meth->is_getter()) { |
| 787 | return false; |
| 788 | } |
| 789 | // May modify (by reflection) if an boxing object is passed |
| 790 | // as argument or returned. |
| 791 | Node* proj = returns_pointer() ? proj_out_or_null(TypeFunc::Parms) : NULL; |
| 792 | if (proj != NULL) { |
| 793 | const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); |
| 794 | if ((inst_t != NULL) && (!inst_t->klass_is_exact() || |
| 795 | (inst_t->klass() == boxing_klass))) { |
| 796 | return true; |
| 797 | } |
| 798 | } |
| 799 | const TypeTuple* d = tf()->domain(); |
| 800 | for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { |
| 801 | const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); |
| 802 | if ((inst_t != NULL) && (!inst_t->klass_is_exact() || |
| 803 | (inst_t->klass() == boxing_klass))) { |
| 804 | return true; |
| 805 | } |
| 806 | } |
| 807 | return false; |
| 808 | } |
| 809 | } |
| 810 | return true; |
| 811 | } |
| 812 | |
| 813 | // Does this call have a direct reference to n other than debug information? |
| 814 | bool CallNode::has_non_debug_use(Node *n) { |
| 815 | const TypeTuple * d = tf()->domain(); |
| 816 | for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { |
| 817 | Node *arg = in(i); |
| 818 | if (arg == n) { |
| 819 | return true; |
| 820 | } |
| 821 | } |
| 822 | return false; |
| 823 | } |
| 824 | |
| 825 | // Returns the unique CheckCastPP of a call |
| 826 | // or 'this' if there are several CheckCastPP or unexpected uses |
| 827 | // or returns NULL if there is no one. |
| 828 | Node *CallNode::result_cast() { |
| 829 | Node *cast = NULL; |
| 830 | |
| 831 | Node *p = proj_out_or_null(TypeFunc::Parms); |
| 832 | if (p == NULL) |
| 833 | return NULL; |
| 834 | |
| 835 | for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { |
| 836 | Node *use = p->fast_out(i); |
| 837 | if (use->is_CheckCastPP()) { |
| 838 | if (cast != NULL) { |
| 839 | return this; // more than 1 CheckCastPP |
| 840 | } |
| 841 | cast = use; |
| 842 | } else if (!use->is_Initialize() && |
| 843 | !use->is_AddP() && |
| 844 | use->Opcode() != Op_MemBarStoreStore) { |
| 845 | // Expected uses are restricted to a CheckCastPP, an Initialize |
| 846 | // node, a MemBarStoreStore (clone) and AddP nodes. If we |
| 847 | // encounter any other use (a Phi node can be seen in rare |
| 848 | // cases) return this to prevent incorrect optimizations. |
| 849 | return this; |
| 850 | } |
| 851 | } |
| 852 | return cast; |
| 853 | } |
| 854 | |
| 855 | |
| 856 | void CallNode::(CallProjections* projs, bool separate_io_proj, bool do_asserts) { |
| 857 | projs->fallthrough_proj = NULL; |
| 858 | projs->fallthrough_catchproj = NULL; |
| 859 | projs->fallthrough_ioproj = NULL; |
| 860 | projs->catchall_ioproj = NULL; |
| 861 | projs->catchall_catchproj = NULL; |
| 862 | projs->fallthrough_memproj = NULL; |
| 863 | projs->catchall_memproj = NULL; |
| 864 | projs->resproj = NULL; |
| 865 | projs->exobj = NULL; |
| 866 | |
| 867 | for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { |
| 868 | ProjNode *pn = fast_out(i)->as_Proj(); |
| 869 | if (pn->outcnt() == 0) continue; |
| 870 | switch (pn->_con) { |
| 871 | case TypeFunc::Control: |
| 872 | { |
| 873 | // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj |
| 874 | projs->fallthrough_proj = pn; |
| 875 | DUIterator_Fast jmax, j = pn->fast_outs(jmax); |
| 876 | const Node *cn = pn->fast_out(j); |
| 877 | if (cn->is_Catch()) { |
| 878 | ProjNode *cpn = NULL; |
| 879 | for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { |
| 880 | cpn = cn->fast_out(k)->as_Proj(); |
| 881 | assert(cpn->is_CatchProj(), "must be a CatchProjNode" ); |
| 882 | if (cpn->_con == CatchProjNode::fall_through_index) |
| 883 | projs->fallthrough_catchproj = cpn; |
| 884 | else { |
| 885 | assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index." ); |
| 886 | projs->catchall_catchproj = cpn; |
| 887 | } |
| 888 | } |
| 889 | } |
| 890 | break; |
| 891 | } |
| 892 | case TypeFunc::I_O: |
| 893 | if (pn->_is_io_use) |
| 894 | projs->catchall_ioproj = pn; |
| 895 | else |
| 896 | projs->fallthrough_ioproj = pn; |
| 897 | for (DUIterator j = pn->outs(); pn->has_out(j); j++) { |
| 898 | Node* e = pn->out(j); |
| 899 | if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { |
| 900 | assert(projs->exobj == NULL, "only one" ); |
| 901 | projs->exobj = e; |
| 902 | } |
| 903 | } |
| 904 | break; |
| 905 | case TypeFunc::Memory: |
| 906 | if (pn->_is_io_use) |
| 907 | projs->catchall_memproj = pn; |
| 908 | else |
| 909 | projs->fallthrough_memproj = pn; |
| 910 | break; |
| 911 | case TypeFunc::Parms: |
| 912 | projs->resproj = pn; |
| 913 | break; |
| 914 | default: |
| 915 | assert(false, "unexpected projection from allocation node." ); |
| 916 | } |
| 917 | } |
| 918 | |
| 919 | // The resproj may not exist because the result could be ignored |
| 920 | // and the exception object may not exist if an exception handler |
| 921 | // swallows the exception but all the other must exist and be found. |
| 922 | assert(projs->fallthrough_proj != NULL, "must be found" ); |
| 923 | do_asserts = do_asserts && !Compile::current()->inlining_incrementally(); |
| 924 | assert(!do_asserts || projs->fallthrough_catchproj != NULL, "must be found" ); |
| 925 | assert(!do_asserts || projs->fallthrough_memproj != NULL, "must be found" ); |
| 926 | assert(!do_asserts || projs->fallthrough_ioproj != NULL, "must be found" ); |
| 927 | assert(!do_asserts || projs->catchall_catchproj != NULL, "must be found" ); |
| 928 | if (separate_io_proj) { |
| 929 | assert(!do_asserts || projs->catchall_memproj != NULL, "must be found" ); |
| 930 | assert(!do_asserts || projs->catchall_ioproj != NULL, "must be found" ); |
| 931 | } |
| 932 | } |
| 933 | |
| 934 | Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
| 935 | CallGenerator* cg = generator(); |
| 936 | if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) { |
| 937 | // Check whether this MH handle call becomes a candidate for inlining |
| 938 | ciMethod* callee = cg->method(); |
| 939 | vmIntrinsics::ID iid = callee->intrinsic_id(); |
| 940 | if (iid == vmIntrinsics::_invokeBasic) { |
| 941 | if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { |
| 942 | phase->C->prepend_late_inline(cg); |
| 943 | set_generator(NULL); |
| 944 | } |
| 945 | } else { |
| 946 | assert(callee->has_member_arg(), "wrong type of call?" ); |
| 947 | if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { |
| 948 | phase->C->prepend_late_inline(cg); |
| 949 | set_generator(NULL); |
| 950 | } |
| 951 | } |
| 952 | } |
| 953 | return SafePointNode::Ideal(phase, can_reshape); |
| 954 | } |
| 955 | |
| 956 | bool CallNode::is_call_to_arraycopystub() const { |
| 957 | if (_name != NULL && strstr(_name, "arraycopy" ) != 0) { |
| 958 | return true; |
| 959 | } |
| 960 | return false; |
| 961 | } |
| 962 | |
| 963 | //============================================================================= |
| 964 | uint CallJavaNode::size_of() const { return sizeof(*this); } |
| 965 | bool CallJavaNode::cmp( const Node &n ) const { |
| 966 | CallJavaNode &call = (CallJavaNode&)n; |
| 967 | return CallNode::cmp(call) && _method == call._method && |
| 968 | _override_symbolic_info == call._override_symbolic_info; |
| 969 | } |
| 970 | #ifdef ASSERT |
| 971 | bool CallJavaNode::validate_symbolic_info() const { |
| 972 | if (method() == NULL) { |
| 973 | return true; // call into runtime or uncommon trap |
| 974 | } |
| 975 | ciMethod* symbolic_info = jvms()->method()->get_method_at_bci(_bci); |
| 976 | ciMethod* callee = method(); |
| 977 | if (symbolic_info->is_method_handle_intrinsic() && !callee->is_method_handle_intrinsic()) { |
| 978 | assert(override_symbolic_info(), "should be set" ); |
| 979 | } |
| 980 | assert(ciMethod::is_consistent_info(symbolic_info, callee), "inconsistent info" ); |
| 981 | return true; |
| 982 | } |
| 983 | #endif |
| 984 | |
| 985 | #ifndef PRODUCT |
| 986 | void CallJavaNode::dump_spec(outputStream *st) const { |
| 987 | if( _method ) _method->print_short_name(st); |
| 988 | CallNode::dump_spec(st); |
| 989 | } |
| 990 | |
| 991 | void CallJavaNode::dump_compact_spec(outputStream* st) const { |
| 992 | if (_method) { |
| 993 | _method->print_short_name(st); |
| 994 | } else { |
| 995 | st->print("<?>" ); |
| 996 | } |
| 997 | } |
| 998 | #endif |
| 999 | |
| 1000 | //============================================================================= |
| 1001 | uint CallStaticJavaNode::size_of() const { return sizeof(*this); } |
| 1002 | bool CallStaticJavaNode::cmp( const Node &n ) const { |
| 1003 | CallStaticJavaNode &call = (CallStaticJavaNode&)n; |
| 1004 | return CallJavaNode::cmp(call); |
| 1005 | } |
| 1006 | |
| 1007 | //----------------------------uncommon_trap_request---------------------------- |
| 1008 | // If this is an uncommon trap, return the request code, else zero. |
| 1009 | int CallStaticJavaNode::uncommon_trap_request() const { |
| 1010 | if (_name != NULL && !strcmp(_name, "uncommon_trap" )) { |
| 1011 | return extract_uncommon_trap_request(this); |
| 1012 | } |
| 1013 | return 0; |
| 1014 | } |
| 1015 | int CallStaticJavaNode::(const Node* call) { |
| 1016 | #ifndef PRODUCT |
| 1017 | if (!(call->req() > TypeFunc::Parms && |
| 1018 | call->in(TypeFunc::Parms) != NULL && |
| 1019 | call->in(TypeFunc::Parms)->is_Con() && |
| 1020 | call->in(TypeFunc::Parms)->bottom_type()->isa_int())) { |
| 1021 | assert(in_dump() != 0, "OK if dumping" ); |
| 1022 | tty->print("[bad uncommon trap]" ); |
| 1023 | return 0; |
| 1024 | } |
| 1025 | #endif |
| 1026 | return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); |
| 1027 | } |
| 1028 | |
| 1029 | #ifndef PRODUCT |
| 1030 | void CallStaticJavaNode::dump_spec(outputStream *st) const { |
| 1031 | st->print("# Static " ); |
| 1032 | if (_name != NULL) { |
| 1033 | st->print("%s" , _name); |
| 1034 | int trap_req = uncommon_trap_request(); |
| 1035 | if (trap_req != 0) { |
| 1036 | char buf[100]; |
| 1037 | st->print("(%s)" , |
| 1038 | Deoptimization::format_trap_request(buf, sizeof(buf), |
| 1039 | trap_req)); |
| 1040 | } |
| 1041 | st->print(" " ); |
| 1042 | } |
| 1043 | CallJavaNode::dump_spec(st); |
| 1044 | } |
| 1045 | |
| 1046 | void CallStaticJavaNode::dump_compact_spec(outputStream* st) const { |
| 1047 | if (_method) { |
| 1048 | _method->print_short_name(st); |
| 1049 | } else if (_name) { |
| 1050 | st->print("%s" , _name); |
| 1051 | } else { |
| 1052 | st->print("<?>" ); |
| 1053 | } |
| 1054 | } |
| 1055 | #endif |
| 1056 | |
| 1057 | //============================================================================= |
| 1058 | uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } |
| 1059 | bool CallDynamicJavaNode::cmp( const Node &n ) const { |
| 1060 | CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; |
| 1061 | return CallJavaNode::cmp(call); |
| 1062 | } |
| 1063 | #ifndef PRODUCT |
| 1064 | void CallDynamicJavaNode::dump_spec(outputStream *st) const { |
| 1065 | st->print("# Dynamic " ); |
| 1066 | CallJavaNode::dump_spec(st); |
| 1067 | } |
| 1068 | #endif |
| 1069 | |
| 1070 | //============================================================================= |
| 1071 | uint CallRuntimeNode::size_of() const { return sizeof(*this); } |
| 1072 | bool CallRuntimeNode::cmp( const Node &n ) const { |
| 1073 | CallRuntimeNode &call = (CallRuntimeNode&)n; |
| 1074 | return CallNode::cmp(call) && !strcmp(_name,call._name); |
| 1075 | } |
| 1076 | #ifndef PRODUCT |
| 1077 | void CallRuntimeNode::dump_spec(outputStream *st) const { |
| 1078 | st->print("# " ); |
| 1079 | st->print("%s" , _name); |
| 1080 | CallNode::dump_spec(st); |
| 1081 | } |
| 1082 | #endif |
| 1083 | |
| 1084 | //------------------------------calling_convention----------------------------- |
| 1085 | void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { |
| 1086 | Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); |
| 1087 | } |
| 1088 | |
| 1089 | //============================================================================= |
| 1090 | //------------------------------calling_convention----------------------------- |
| 1091 | |
| 1092 | |
| 1093 | //============================================================================= |
| 1094 | #ifndef PRODUCT |
| 1095 | void CallLeafNode::dump_spec(outputStream *st) const { |
| 1096 | st->print("# " ); |
| 1097 | st->print("%s" , _name); |
| 1098 | CallNode::dump_spec(st); |
| 1099 | } |
| 1100 | #endif |
| 1101 | |
| 1102 | //============================================================================= |
| 1103 | |
| 1104 | void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { |
| 1105 | assert(verify_jvms(jvms), "jvms must match" ); |
| 1106 | int loc = jvms->locoff() + idx; |
| 1107 | if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { |
| 1108 | // If current local idx is top then local idx - 1 could |
| 1109 | // be a long/double that needs to be killed since top could |
| 1110 | // represent the 2nd half ofthe long/double. |
| 1111 | uint ideal = in(loc -1)->ideal_reg(); |
| 1112 | if (ideal == Op_RegD || ideal == Op_RegL) { |
| 1113 | // set other (low index) half to top |
| 1114 | set_req(loc - 1, in(loc)); |
| 1115 | } |
| 1116 | } |
| 1117 | set_req(loc, c); |
| 1118 | } |
| 1119 | |
| 1120 | uint SafePointNode::size_of() const { return sizeof(*this); } |
| 1121 | bool SafePointNode::cmp( const Node &n ) const { |
| 1122 | return (&n == this); // Always fail except on self |
| 1123 | } |
| 1124 | |
| 1125 | //-------------------------set_next_exception---------------------------------- |
| 1126 | void SafePointNode::set_next_exception(SafePointNode* n) { |
| 1127 | assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception" ); |
| 1128 | if (len() == req()) { |
| 1129 | if (n != NULL) add_prec(n); |
| 1130 | } else { |
| 1131 | set_prec(req(), n); |
| 1132 | } |
| 1133 | } |
| 1134 | |
| 1135 | |
| 1136 | //----------------------------next_exception----------------------------------- |
| 1137 | SafePointNode* SafePointNode::next_exception() const { |
| 1138 | if (len() == req()) { |
| 1139 | return NULL; |
| 1140 | } else { |
| 1141 | Node* n = in(req()); |
| 1142 | assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges" ); |
| 1143 | return (SafePointNode*) n; |
| 1144 | } |
| 1145 | } |
| 1146 | |
| 1147 | |
| 1148 | //------------------------------Ideal------------------------------------------ |
| 1149 | // Skip over any collapsed Regions |
| 1150 | Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
| 1151 | return remove_dead_region(phase, can_reshape) ? this : NULL; |
| 1152 | } |
| 1153 | |
| 1154 | //------------------------------Identity--------------------------------------- |
| 1155 | // Remove obviously duplicate safepoints |
| 1156 | Node* SafePointNode::Identity(PhaseGVN* phase) { |
| 1157 | |
| 1158 | // If you have back to back safepoints, remove one |
| 1159 | if( in(TypeFunc::Control)->is_SafePoint() ) |
| 1160 | return in(TypeFunc::Control); |
| 1161 | |
| 1162 | if( in(0)->is_Proj() ) { |
| 1163 | Node *n0 = in(0)->in(0); |
| 1164 | // Check if he is a call projection (except Leaf Call) |
| 1165 | if( n0->is_Catch() ) { |
| 1166 | n0 = n0->in(0)->in(0); |
| 1167 | assert( n0->is_Call(), "expect a call here" ); |
| 1168 | } |
| 1169 | if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { |
| 1170 | // Don't remove a safepoint belonging to an OuterStripMinedLoopEndNode. |
| 1171 | // If the loop dies, they will be removed together. |
| 1172 | if (has_out_with(Op_OuterStripMinedLoopEnd)) { |
| 1173 | return this; |
| 1174 | } |
| 1175 | // Useless Safepoint, so remove it |
| 1176 | return in(TypeFunc::Control); |
| 1177 | } |
| 1178 | } |
| 1179 | |
| 1180 | return this; |
| 1181 | } |
| 1182 | |
| 1183 | //------------------------------Value------------------------------------------ |
| 1184 | const Type* SafePointNode::Value(PhaseGVN* phase) const { |
| 1185 | if( phase->type(in(0)) == Type::TOP ) return Type::TOP; |
| 1186 | if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop |
| 1187 | return Type::CONTROL; |
| 1188 | } |
| 1189 | |
| 1190 | #ifndef PRODUCT |
| 1191 | void SafePointNode::dump_spec(outputStream *st) const { |
| 1192 | st->print(" SafePoint " ); |
| 1193 | _replaced_nodes.dump(st); |
| 1194 | } |
| 1195 | |
| 1196 | // The related nodes of a SafepointNode are all data inputs, excluding the |
| 1197 | // control boundary, as well as all outputs till level 2 (to include projection |
| 1198 | // nodes and targets). In compact mode, just include inputs till level 1 and |
| 1199 | // outputs as before. |
| 1200 | void SafePointNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { |
| 1201 | if (compact) { |
| 1202 | this->collect_nodes(in_rel, 1, false, false); |
| 1203 | } else { |
| 1204 | this->collect_nodes_in_all_data(in_rel, false); |
| 1205 | } |
| 1206 | this->collect_nodes(out_rel, -2, false, false); |
| 1207 | } |
| 1208 | #endif |
| 1209 | |
| 1210 | const RegMask &SafePointNode::in_RegMask(uint idx) const { |
| 1211 | if( idx < TypeFunc::Parms ) return RegMask::Empty; |
| 1212 | // Values outside the domain represent debug info |
| 1213 | return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); |
| 1214 | } |
| 1215 | const RegMask &SafePointNode::out_RegMask() const { |
| 1216 | return RegMask::Empty; |
| 1217 | } |
| 1218 | |
| 1219 | |
| 1220 | void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { |
| 1221 | assert((int)grow_by > 0, "sanity" ); |
| 1222 | int monoff = jvms->monoff(); |
| 1223 | int scloff = jvms->scloff(); |
| 1224 | int endoff = jvms->endoff(); |
| 1225 | assert(endoff == (int)req(), "no other states or debug info after me" ); |
| 1226 | Node* top = Compile::current()->top(); |
| 1227 | for (uint i = 0; i < grow_by; i++) { |
| 1228 | ins_req(monoff, top); |
| 1229 | } |
| 1230 | jvms->set_monoff(monoff + grow_by); |
| 1231 | jvms->set_scloff(scloff + grow_by); |
| 1232 | jvms->set_endoff(endoff + grow_by); |
| 1233 | } |
| 1234 | |
| 1235 | void SafePointNode::push_monitor(const FastLockNode *lock) { |
| 1236 | // Add a LockNode, which points to both the original BoxLockNode (the |
| 1237 | // stack space for the monitor) and the Object being locked. |
| 1238 | const int MonitorEdges = 2; |
| 1239 | assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges" ); |
| 1240 | assert(req() == jvms()->endoff(), "correct sizing" ); |
| 1241 | int nextmon = jvms()->scloff(); |
| 1242 | if (GenerateSynchronizationCode) { |
| 1243 | ins_req(nextmon, lock->box_node()); |
| 1244 | ins_req(nextmon+1, lock->obj_node()); |
| 1245 | } else { |
| 1246 | Node* top = Compile::current()->top(); |
| 1247 | ins_req(nextmon, top); |
| 1248 | ins_req(nextmon, top); |
| 1249 | } |
| 1250 | jvms()->set_scloff(nextmon + MonitorEdges); |
| 1251 | jvms()->set_endoff(req()); |
| 1252 | } |
| 1253 | |
| 1254 | void SafePointNode::pop_monitor() { |
| 1255 | // Delete last monitor from debug info |
| 1256 | debug_only(int num_before_pop = jvms()->nof_monitors()); |
| 1257 | const int MonitorEdges = 2; |
| 1258 | assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges" ); |
| 1259 | int scloff = jvms()->scloff(); |
| 1260 | int endoff = jvms()->endoff(); |
| 1261 | int new_scloff = scloff - MonitorEdges; |
| 1262 | int new_endoff = endoff - MonitorEdges; |
| 1263 | jvms()->set_scloff(new_scloff); |
| 1264 | jvms()->set_endoff(new_endoff); |
| 1265 | while (scloff > new_scloff) del_req_ordered(--scloff); |
| 1266 | assert(jvms()->nof_monitors() == num_before_pop-1, "" ); |
| 1267 | } |
| 1268 | |
| 1269 | Node *SafePointNode::peek_monitor_box() const { |
| 1270 | int mon = jvms()->nof_monitors() - 1; |
| 1271 | assert(mon >= 0, "must have a monitor" ); |
| 1272 | return monitor_box(jvms(), mon); |
| 1273 | } |
| 1274 | |
| 1275 | Node *SafePointNode::peek_monitor_obj() const { |
| 1276 | int mon = jvms()->nof_monitors() - 1; |
| 1277 | assert(mon >= 0, "must have a monitor" ); |
| 1278 | return monitor_obj(jvms(), mon); |
| 1279 | } |
| 1280 | |
| 1281 | // Do we Match on this edge index or not? Match no edges |
| 1282 | uint SafePointNode::match_edge(uint idx) const { |
| 1283 | if( !needs_polling_address_input() ) |
| 1284 | return 0; |
| 1285 | |
| 1286 | return (TypeFunc::Parms == idx); |
| 1287 | } |
| 1288 | |
| 1289 | void SafePointNode::disconnect_from_root(PhaseIterGVN *igvn) { |
| 1290 | assert(Opcode() == Op_SafePoint, "only value for safepoint in loops" ); |
| 1291 | int nb = igvn->C->root()->find_prec_edge(this); |
| 1292 | if (nb != -1) { |
| 1293 | igvn->C->root()->rm_prec(nb); |
| 1294 | } |
| 1295 | } |
| 1296 | |
| 1297 | //============== SafePointScalarObjectNode ============== |
| 1298 | |
| 1299 | SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, |
| 1300 | #ifdef ASSERT |
| 1301 | AllocateNode* alloc, |
| 1302 | #endif |
| 1303 | uint first_index, |
| 1304 | uint n_fields) : |
| 1305 | TypeNode(tp, 1), // 1 control input -- seems required. Get from root. |
| 1306 | _first_index(first_index), |
| 1307 | _n_fields(n_fields) |
| 1308 | #ifdef ASSERT |
| 1309 | , _alloc(alloc) |
| 1310 | #endif |
| 1311 | { |
| 1312 | init_class_id(Class_SafePointScalarObject); |
| 1313 | } |
| 1314 | |
| 1315 | // Do not allow value-numbering for SafePointScalarObject node. |
| 1316 | uint SafePointScalarObjectNode::hash() const { return NO_HASH; } |
| 1317 | bool SafePointScalarObjectNode::cmp( const Node &n ) const { |
| 1318 | return (&n == this); // Always fail except on self |
| 1319 | } |
| 1320 | |
| 1321 | uint SafePointScalarObjectNode::ideal_reg() const { |
| 1322 | return 0; // No matching to machine instruction |
| 1323 | } |
| 1324 | |
| 1325 | const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { |
| 1326 | return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); |
| 1327 | } |
| 1328 | |
| 1329 | const RegMask &SafePointScalarObjectNode::out_RegMask() const { |
| 1330 | return RegMask::Empty; |
| 1331 | } |
| 1332 | |
| 1333 | uint SafePointScalarObjectNode::match_edge(uint idx) const { |
| 1334 | return 0; |
| 1335 | } |
| 1336 | |
| 1337 | SafePointScalarObjectNode* |
| 1338 | SafePointScalarObjectNode::clone(Dict* sosn_map) const { |
| 1339 | void* cached = (*sosn_map)[(void*)this]; |
| 1340 | if (cached != NULL) { |
| 1341 | return (SafePointScalarObjectNode*)cached; |
| 1342 | } |
| 1343 | SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); |
| 1344 | sosn_map->Insert((void*)this, (void*)res); |
| 1345 | return res; |
| 1346 | } |
| 1347 | |
| 1348 | |
| 1349 | #ifndef PRODUCT |
| 1350 | void SafePointScalarObjectNode::dump_spec(outputStream *st) const { |
| 1351 | st->print(" # fields@[%d..%d]" , first_index(), |
| 1352 | first_index() + n_fields() - 1); |
| 1353 | } |
| 1354 | |
| 1355 | #endif |
| 1356 | |
| 1357 | //============================================================================= |
| 1358 | uint AllocateNode::size_of() const { return sizeof(*this); } |
| 1359 | |
| 1360 | AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, |
| 1361 | Node *ctrl, Node *mem, Node *abio, |
| 1362 | Node *size, Node *klass_node, Node *initial_test) |
| 1363 | : CallNode(atype, NULL, TypeRawPtr::BOTTOM) |
| 1364 | { |
| 1365 | init_class_id(Class_Allocate); |
| 1366 | init_flags(Flag_is_macro); |
| 1367 | _is_scalar_replaceable = false; |
| 1368 | _is_non_escaping = false; |
| 1369 | _is_allocation_MemBar_redundant = false; |
| 1370 | Node *topnode = C->top(); |
| 1371 | |
| 1372 | init_req( TypeFunc::Control , ctrl ); |
| 1373 | init_req( TypeFunc::I_O , abio ); |
| 1374 | init_req( TypeFunc::Memory , mem ); |
| 1375 | init_req( TypeFunc::ReturnAdr, topnode ); |
| 1376 | init_req( TypeFunc::FramePtr , topnode ); |
| 1377 | init_req( AllocSize , size); |
| 1378 | init_req( KlassNode , klass_node); |
| 1379 | init_req( InitialTest , initial_test); |
| 1380 | init_req( ALength , topnode); |
| 1381 | C->add_macro_node(this); |
| 1382 | } |
| 1383 | |
| 1384 | void AllocateNode::compute_MemBar_redundancy(ciMethod* initializer) |
| 1385 | { |
| 1386 | assert(initializer != NULL && |
| 1387 | initializer->is_initializer() && |
| 1388 | !initializer->is_static(), |
| 1389 | "unexpected initializer method" ); |
| 1390 | BCEscapeAnalyzer* analyzer = initializer->get_bcea(); |
| 1391 | if (analyzer == NULL) { |
| 1392 | return; |
| 1393 | } |
| 1394 | |
| 1395 | // Allocation node is first parameter in its initializer |
| 1396 | if (analyzer->is_arg_stack(0) || analyzer->is_arg_local(0)) { |
| 1397 | _is_allocation_MemBar_redundant = true; |
| 1398 | } |
| 1399 | } |
| 1400 | |
| 1401 | //============================================================================= |
| 1402 | Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
| 1403 | if (remove_dead_region(phase, can_reshape)) return this; |
| 1404 | // Don't bother trying to transform a dead node |
| 1405 | if (in(0) && in(0)->is_top()) return NULL; |
| 1406 | |
| 1407 | const Type* type = phase->type(Ideal_length()); |
| 1408 | if (type->isa_int() && type->is_int()->_hi < 0) { |
| 1409 | if (can_reshape) { |
| 1410 | PhaseIterGVN *igvn = phase->is_IterGVN(); |
| 1411 | // Unreachable fall through path (negative array length), |
| 1412 | // the allocation can only throw so disconnect it. |
| 1413 | Node* proj = proj_out_or_null(TypeFunc::Control); |
| 1414 | Node* catchproj = NULL; |
| 1415 | if (proj != NULL) { |
| 1416 | for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) { |
| 1417 | Node *cn = proj->fast_out(i); |
| 1418 | if (cn->is_Catch()) { |
| 1419 | catchproj = cn->as_Multi()->proj_out_or_null(CatchProjNode::fall_through_index); |
| 1420 | break; |
| 1421 | } |
| 1422 | } |
| 1423 | } |
| 1424 | if (catchproj != NULL && catchproj->outcnt() > 0 && |
| 1425 | (catchproj->outcnt() > 1 || |
| 1426 | catchproj->unique_out()->Opcode() != Op_Halt)) { |
| 1427 | assert(catchproj->is_CatchProj(), "must be a CatchProjNode" ); |
| 1428 | Node* nproj = catchproj->clone(); |
| 1429 | igvn->register_new_node_with_optimizer(nproj); |
| 1430 | |
| 1431 | Node *frame = new ParmNode( phase->C->start(), TypeFunc::FramePtr ); |
| 1432 | frame = phase->transform(frame); |
| 1433 | // Halt & Catch Fire |
| 1434 | Node *halt = new HaltNode( nproj, frame ); |
| 1435 | phase->C->root()->add_req(halt); |
| 1436 | phase->transform(halt); |
| 1437 | |
| 1438 | igvn->replace_node(catchproj, phase->C->top()); |
| 1439 | return this; |
| 1440 | } |
| 1441 | } else { |
| 1442 | // Can't correct it during regular GVN so register for IGVN |
| 1443 | phase->C->record_for_igvn(this); |
| 1444 | } |
| 1445 | } |
| 1446 | return NULL; |
| 1447 | } |
| 1448 | |
| 1449 | // Retrieve the length from the AllocateArrayNode. Narrow the type with a |
| 1450 | // CastII, if appropriate. If we are not allowed to create new nodes, and |
| 1451 | // a CastII is appropriate, return NULL. |
| 1452 | Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { |
| 1453 | Node *length = in(AllocateNode::ALength); |
| 1454 | assert(length != NULL, "length is not null" ); |
| 1455 | |
| 1456 | const TypeInt* length_type = phase->find_int_type(length); |
| 1457 | const TypeAryPtr* ary_type = oop_type->isa_aryptr(); |
| 1458 | |
| 1459 | if (ary_type != NULL && length_type != NULL) { |
| 1460 | const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); |
| 1461 | if (narrow_length_type != length_type) { |
| 1462 | // Assert one of: |
| 1463 | // - the narrow_length is 0 |
| 1464 | // - the narrow_length is not wider than length |
| 1465 | assert(narrow_length_type == TypeInt::ZERO || |
| 1466 | length_type->is_con() && narrow_length_type->is_con() && |
| 1467 | (narrow_length_type->_hi <= length_type->_lo) || |
| 1468 | (narrow_length_type->_hi <= length_type->_hi && |
| 1469 | narrow_length_type->_lo >= length_type->_lo), |
| 1470 | "narrow type must be narrower than length type" ); |
| 1471 | |
| 1472 | // Return NULL if new nodes are not allowed |
| 1473 | if (!allow_new_nodes) return NULL; |
| 1474 | // Create a cast which is control dependent on the initialization to |
| 1475 | // propagate the fact that the array length must be positive. |
| 1476 | InitializeNode* init = initialization(); |
| 1477 | assert(init != NULL, "initialization not found" ); |
| 1478 | length = new CastIINode(length, narrow_length_type); |
| 1479 | length->set_req(0, init->proj_out_or_null(0)); |
| 1480 | } |
| 1481 | } |
| 1482 | |
| 1483 | return length; |
| 1484 | } |
| 1485 | |
| 1486 | //============================================================================= |
| 1487 | uint LockNode::size_of() const { return sizeof(*this); } |
| 1488 | |
| 1489 | // Redundant lock elimination |
| 1490 | // |
| 1491 | // There are various patterns of locking where we release and |
| 1492 | // immediately reacquire a lock in a piece of code where no operations |
| 1493 | // occur in between that would be observable. In those cases we can |
| 1494 | // skip releasing and reacquiring the lock without violating any |
| 1495 | // fairness requirements. Doing this around a loop could cause a lock |
| 1496 | // to be held for a very long time so we concentrate on non-looping |
| 1497 | // control flow. We also require that the operations are fully |
| 1498 | // redundant meaning that we don't introduce new lock operations on |
| 1499 | // some paths so to be able to eliminate it on others ala PRE. This |
| 1500 | // would probably require some more extensive graph manipulation to |
| 1501 | // guarantee that the memory edges were all handled correctly. |
| 1502 | // |
| 1503 | // Assuming p is a simple predicate which can't trap in any way and s |
| 1504 | // is a synchronized method consider this code: |
| 1505 | // |
| 1506 | // s(); |
| 1507 | // if (p) |
| 1508 | // s(); |
| 1509 | // else |
| 1510 | // s(); |
| 1511 | // s(); |
| 1512 | // |
| 1513 | // 1. The unlocks of the first call to s can be eliminated if the |
| 1514 | // locks inside the then and else branches are eliminated. |
| 1515 | // |
| 1516 | // 2. The unlocks of the then and else branches can be eliminated if |
| 1517 | // the lock of the final call to s is eliminated. |
| 1518 | // |
| 1519 | // Either of these cases subsumes the simple case of sequential control flow |
| 1520 | // |
| 1521 | // Addtionally we can eliminate versions without the else case: |
| 1522 | // |
| 1523 | // s(); |
| 1524 | // if (p) |
| 1525 | // s(); |
| 1526 | // s(); |
| 1527 | // |
| 1528 | // 3. In this case we eliminate the unlock of the first s, the lock |
| 1529 | // and unlock in the then case and the lock in the final s. |
| 1530 | // |
| 1531 | // Note also that in all these cases the then/else pieces don't have |
| 1532 | // to be trivial as long as they begin and end with synchronization |
| 1533 | // operations. |
| 1534 | // |
| 1535 | // s(); |
| 1536 | // if (p) |
| 1537 | // s(); |
| 1538 | // f(); |
| 1539 | // s(); |
| 1540 | // s(); |
| 1541 | // |
| 1542 | // The code will work properly for this case, leaving in the unlock |
| 1543 | // before the call to f and the relock after it. |
| 1544 | // |
| 1545 | // A potentially interesting case which isn't handled here is when the |
| 1546 | // locking is partially redundant. |
| 1547 | // |
| 1548 | // s(); |
| 1549 | // if (p) |
| 1550 | // s(); |
| 1551 | // |
| 1552 | // This could be eliminated putting unlocking on the else case and |
| 1553 | // eliminating the first unlock and the lock in the then side. |
| 1554 | // Alternatively the unlock could be moved out of the then side so it |
| 1555 | // was after the merge and the first unlock and second lock |
| 1556 | // eliminated. This might require less manipulation of the memory |
| 1557 | // state to get correct. |
| 1558 | // |
| 1559 | // Additionally we might allow work between a unlock and lock before |
| 1560 | // giving up eliminating the locks. The current code disallows any |
| 1561 | // conditional control flow between these operations. A formulation |
| 1562 | // similar to partial redundancy elimination computing the |
| 1563 | // availability of unlocking and the anticipatability of locking at a |
| 1564 | // program point would allow detection of fully redundant locking with |
| 1565 | // some amount of work in between. I'm not sure how often I really |
| 1566 | // think that would occur though. Most of the cases I've seen |
| 1567 | // indicate it's likely non-trivial work would occur in between. |
| 1568 | // There may be other more complicated constructs where we could |
| 1569 | // eliminate locking but I haven't seen any others appear as hot or |
| 1570 | // interesting. |
| 1571 | // |
| 1572 | // Locking and unlocking have a canonical form in ideal that looks |
| 1573 | // roughly like this: |
| 1574 | // |
| 1575 | // <obj> |
| 1576 | // | \\------+ |
| 1577 | // | \ \ |
| 1578 | // | BoxLock \ |
| 1579 | // | | | \ |
| 1580 | // | | \ \ |
| 1581 | // | | FastLock |
| 1582 | // | | / |
| 1583 | // | | / |
| 1584 | // | | | |
| 1585 | // |
| 1586 | // Lock |
| 1587 | // | |
| 1588 | // Proj #0 |
| 1589 | // | |
| 1590 | // MembarAcquire |
| 1591 | // | |
| 1592 | // Proj #0 |
| 1593 | // |
| 1594 | // MembarRelease |
| 1595 | // | |
| 1596 | // Proj #0 |
| 1597 | // | |
| 1598 | // Unlock |
| 1599 | // | |
| 1600 | // Proj #0 |
| 1601 | // |
| 1602 | // |
| 1603 | // This code proceeds by processing Lock nodes during PhaseIterGVN |
| 1604 | // and searching back through its control for the proper code |
| 1605 | // patterns. Once it finds a set of lock and unlock operations to |
| 1606 | // eliminate they are marked as eliminatable which causes the |
| 1607 | // expansion of the Lock and Unlock macro nodes to make the operation a NOP |
| 1608 | // |
| 1609 | //============================================================================= |
| 1610 | |
| 1611 | // |
| 1612 | // Utility function to skip over uninteresting control nodes. Nodes skipped are: |
| 1613 | // - copy regions. (These may not have been optimized away yet.) |
| 1614 | // - eliminated locking nodes |
| 1615 | // |
| 1616 | static Node *next_control(Node *ctrl) { |
| 1617 | if (ctrl == NULL) |
| 1618 | return NULL; |
| 1619 | while (1) { |
| 1620 | if (ctrl->is_Region()) { |
| 1621 | RegionNode *r = ctrl->as_Region(); |
| 1622 | Node *n = r->is_copy(); |
| 1623 | if (n == NULL) |
| 1624 | break; // hit a region, return it |
| 1625 | else |
| 1626 | ctrl = n; |
| 1627 | } else if (ctrl->is_Proj()) { |
| 1628 | Node *in0 = ctrl->in(0); |
| 1629 | if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { |
| 1630 | ctrl = in0->in(0); |
| 1631 | } else { |
| 1632 | break; |
| 1633 | } |
| 1634 | } else { |
| 1635 | break; // found an interesting control |
| 1636 | } |
| 1637 | } |
| 1638 | return ctrl; |
| 1639 | } |
| 1640 | // |
| 1641 | // Given a control, see if it's the control projection of an Unlock which |
| 1642 | // operating on the same object as lock. |
| 1643 | // |
| 1644 | bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, |
| 1645 | GrowableArray<AbstractLockNode*> &lock_ops) { |
| 1646 | ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL; |
| 1647 | if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) { |
| 1648 | Node *n = ctrl_proj->in(0); |
| 1649 | if (n != NULL && n->is_Unlock()) { |
| 1650 | UnlockNode *unlock = n->as_Unlock(); |
| 1651 | BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
| 1652 | Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); |
| 1653 | Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); |
| 1654 | if (lock_obj->eqv_uncast(unlock_obj) && |
| 1655 | BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && |
| 1656 | !unlock->is_eliminated()) { |
| 1657 | lock_ops.append(unlock); |
| 1658 | return true; |
| 1659 | } |
| 1660 | } |
| 1661 | } |
| 1662 | return false; |
| 1663 | } |
| 1664 | |
| 1665 | // |
| 1666 | // Find the lock matching an unlock. Returns null if a safepoint |
| 1667 | // or complicated control is encountered first. |
| 1668 | LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { |
| 1669 | LockNode *lock_result = NULL; |
| 1670 | // find the matching lock, or an intervening safepoint |
| 1671 | Node *ctrl = next_control(unlock->in(0)); |
| 1672 | while (1) { |
| 1673 | assert(ctrl != NULL, "invalid control graph" ); |
| 1674 | assert(!ctrl->is_Start(), "missing lock for unlock" ); |
| 1675 | if (ctrl->is_top()) break; // dead control path |
| 1676 | if (ctrl->is_Proj()) ctrl = ctrl->in(0); |
| 1677 | if (ctrl->is_SafePoint()) { |
| 1678 | break; // found a safepoint (may be the lock we are searching for) |
| 1679 | } else if (ctrl->is_Region()) { |
| 1680 | // Check for a simple diamond pattern. Punt on anything more complicated |
| 1681 | if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) { |
| 1682 | Node *in1 = next_control(ctrl->in(1)); |
| 1683 | Node *in2 = next_control(ctrl->in(2)); |
| 1684 | if (((in1->is_IfTrue() && in2->is_IfFalse()) || |
| 1685 | (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { |
| 1686 | ctrl = next_control(in1->in(0)->in(0)); |
| 1687 | } else { |
| 1688 | break; |
| 1689 | } |
| 1690 | } else { |
| 1691 | break; |
| 1692 | } |
| 1693 | } else { |
| 1694 | ctrl = next_control(ctrl->in(0)); // keep searching |
| 1695 | } |
| 1696 | } |
| 1697 | if (ctrl->is_Lock()) { |
| 1698 | LockNode *lock = ctrl->as_Lock(); |
| 1699 | BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
| 1700 | Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); |
| 1701 | Node* unlock_obj = bs->step_over_gc_barrier(unlock->obj_node()); |
| 1702 | if (lock_obj->eqv_uncast(unlock_obj) && |
| 1703 | BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { |
| 1704 | lock_result = lock; |
| 1705 | } |
| 1706 | } |
| 1707 | return lock_result; |
| 1708 | } |
| 1709 | |
| 1710 | // This code corresponds to case 3 above. |
| 1711 | |
| 1712 | bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, |
| 1713 | GrowableArray<AbstractLockNode*> &lock_ops) { |
| 1714 | Node* if_node = node->in(0); |
| 1715 | bool if_true = node->is_IfTrue(); |
| 1716 | |
| 1717 | if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { |
| 1718 | Node *lock_ctrl = next_control(if_node->in(0)); |
| 1719 | if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { |
| 1720 | Node* lock1_node = NULL; |
| 1721 | ProjNode* proj = if_node->as_If()->proj_out(!if_true); |
| 1722 | if (if_true) { |
| 1723 | if (proj->is_IfFalse() && proj->outcnt() == 1) { |
| 1724 | lock1_node = proj->unique_out(); |
| 1725 | } |
| 1726 | } else { |
| 1727 | if (proj->is_IfTrue() && proj->outcnt() == 1) { |
| 1728 | lock1_node = proj->unique_out(); |
| 1729 | } |
| 1730 | } |
| 1731 | if (lock1_node != NULL && lock1_node->is_Lock()) { |
| 1732 | LockNode *lock1 = lock1_node->as_Lock(); |
| 1733 | BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
| 1734 | Node* lock_obj = bs->step_over_gc_barrier(lock->obj_node()); |
| 1735 | Node* lock1_obj = bs->step_over_gc_barrier(lock1->obj_node()); |
| 1736 | if (lock_obj->eqv_uncast(lock1_obj) && |
| 1737 | BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && |
| 1738 | !lock1->is_eliminated()) { |
| 1739 | lock_ops.append(lock1); |
| 1740 | return true; |
| 1741 | } |
| 1742 | } |
| 1743 | } |
| 1744 | } |
| 1745 | |
| 1746 | lock_ops.trunc_to(0); |
| 1747 | return false; |
| 1748 | } |
| 1749 | |
| 1750 | bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, |
| 1751 | GrowableArray<AbstractLockNode*> &lock_ops) { |
| 1752 | // check each control merging at this point for a matching unlock. |
| 1753 | // in(0) should be self edge so skip it. |
| 1754 | for (int i = 1; i < (int)region->req(); i++) { |
| 1755 | Node *in_node = next_control(region->in(i)); |
| 1756 | if (in_node != NULL) { |
| 1757 | if (find_matching_unlock(in_node, lock, lock_ops)) { |
| 1758 | // found a match so keep on checking. |
| 1759 | continue; |
| 1760 | } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { |
| 1761 | continue; |
| 1762 | } |
| 1763 | |
| 1764 | // If we fall through to here then it was some kind of node we |
| 1765 | // don't understand or there wasn't a matching unlock, so give |
| 1766 | // up trying to merge locks. |
| 1767 | lock_ops.trunc_to(0); |
| 1768 | return false; |
| 1769 | } |
| 1770 | } |
| 1771 | return true; |
| 1772 | |
| 1773 | } |
| 1774 | |
| 1775 | #ifndef PRODUCT |
| 1776 | // |
| 1777 | // Create a counter which counts the number of times this lock is acquired |
| 1778 | // |
| 1779 | void AbstractLockNode::create_lock_counter(JVMState* state) { |
| 1780 | _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); |
| 1781 | } |
| 1782 | |
| 1783 | void AbstractLockNode::set_eliminated_lock_counter() { |
| 1784 | if (_counter) { |
| 1785 | // Update the counter to indicate that this lock was eliminated. |
| 1786 | // The counter update code will stay around even though the |
| 1787 | // optimizer will eliminate the lock operation itself. |
| 1788 | _counter->set_tag(NamedCounter::EliminatedLockCounter); |
| 1789 | } |
| 1790 | } |
| 1791 | |
| 1792 | const char* AbstractLockNode::_kind_names[] = {"Regular" , "NonEscObj" , "Coarsened" , "Nested" }; |
| 1793 | |
| 1794 | void AbstractLockNode::dump_spec(outputStream* st) const { |
| 1795 | st->print("%s " , _kind_names[_kind]); |
| 1796 | CallNode::dump_spec(st); |
| 1797 | } |
| 1798 | |
| 1799 | void AbstractLockNode::dump_compact_spec(outputStream* st) const { |
| 1800 | st->print("%s" , _kind_names[_kind]); |
| 1801 | } |
| 1802 | |
| 1803 | // The related set of lock nodes includes the control boundary. |
| 1804 | void AbstractLockNode::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const { |
| 1805 | if (compact) { |
| 1806 | this->collect_nodes(in_rel, 1, false, false); |
| 1807 | } else { |
| 1808 | this->collect_nodes_in_all_data(in_rel, true); |
| 1809 | } |
| 1810 | this->collect_nodes(out_rel, -2, false, false); |
| 1811 | } |
| 1812 | #endif |
| 1813 | |
| 1814 | //============================================================================= |
| 1815 | Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
| 1816 | |
| 1817 | // perform any generic optimizations first (returns 'this' or NULL) |
| 1818 | Node *result = SafePointNode::Ideal(phase, can_reshape); |
| 1819 | if (result != NULL) return result; |
| 1820 | // Don't bother trying to transform a dead node |
| 1821 | if (in(0) && in(0)->is_top()) return NULL; |
| 1822 | |
| 1823 | // Now see if we can optimize away this lock. We don't actually |
| 1824 | // remove the locking here, we simply set the _eliminate flag which |
| 1825 | // prevents macro expansion from expanding the lock. Since we don't |
| 1826 | // modify the graph, the value returned from this function is the |
| 1827 | // one computed above. |
| 1828 | if (can_reshape && EliminateLocks && !is_non_esc_obj()) { |
| 1829 | // |
| 1830 | // If we are locking an unescaped object, the lock/unlock is unnecessary |
| 1831 | // |
| 1832 | ConnectionGraph *cgr = phase->C->congraph(); |
| 1833 | if (cgr != NULL && cgr->not_global_escape(obj_node())) { |
| 1834 | assert(!is_eliminated() || is_coarsened(), "sanity" ); |
| 1835 | // The lock could be marked eliminated by lock coarsening |
| 1836 | // code during first IGVN before EA. Replace coarsened flag |
| 1837 | // to eliminate all associated locks/unlocks. |
| 1838 | #ifdef ASSERT |
| 1839 | this->log_lock_optimization(phase->C,"eliminate_lock_set_non_esc1" ); |
| 1840 | #endif |
| 1841 | this->set_non_esc_obj(); |
| 1842 | return result; |
| 1843 | } |
| 1844 | |
| 1845 | // |
| 1846 | // Try lock coarsening |
| 1847 | // |
| 1848 | PhaseIterGVN* iter = phase->is_IterGVN(); |
| 1849 | if (iter != NULL && !is_eliminated()) { |
| 1850 | |
| 1851 | GrowableArray<AbstractLockNode*> lock_ops; |
| 1852 | |
| 1853 | Node *ctrl = next_control(in(0)); |
| 1854 | |
| 1855 | // now search back for a matching Unlock |
| 1856 | if (find_matching_unlock(ctrl, this, lock_ops)) { |
| 1857 | // found an unlock directly preceding this lock. This is the |
| 1858 | // case of single unlock directly control dependent on a |
| 1859 | // single lock which is the trivial version of case 1 or 2. |
| 1860 | } else if (ctrl->is_Region() ) { |
| 1861 | if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { |
| 1862 | // found lock preceded by multiple unlocks along all paths |
| 1863 | // joining at this point which is case 3 in description above. |
| 1864 | } |
| 1865 | } else { |
| 1866 | // see if this lock comes from either half of an if and the |
| 1867 | // predecessors merges unlocks and the other half of the if |
| 1868 | // performs a lock. |
| 1869 | if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { |
| 1870 | // found unlock splitting to an if with locks on both branches. |
| 1871 | } |
| 1872 | } |
| 1873 | |
| 1874 | if (lock_ops.length() > 0) { |
| 1875 | // add ourselves to the list of locks to be eliminated. |
| 1876 | lock_ops.append(this); |
| 1877 | |
| 1878 | #ifndef PRODUCT |
| 1879 | if (PrintEliminateLocks) { |
| 1880 | int locks = 0; |
| 1881 | int unlocks = 0; |
| 1882 | for (int i = 0; i < lock_ops.length(); i++) { |
| 1883 | AbstractLockNode* lock = lock_ops.at(i); |
| 1884 | if (lock->Opcode() == Op_Lock) |
| 1885 | locks++; |
| 1886 | else |
| 1887 | unlocks++; |
| 1888 | if (Verbose) { |
| 1889 | lock->dump(1); |
| 1890 | } |
| 1891 | } |
| 1892 | tty->print_cr("***Eliminated %d unlocks and %d locks" , unlocks, locks); |
| 1893 | } |
| 1894 | #endif |
| 1895 | |
| 1896 | // for each of the identified locks, mark them |
| 1897 | // as eliminatable |
| 1898 | for (int i = 0; i < lock_ops.length(); i++) { |
| 1899 | AbstractLockNode* lock = lock_ops.at(i); |
| 1900 | |
| 1901 | // Mark it eliminated by coarsening and update any counters |
| 1902 | #ifdef ASSERT |
| 1903 | lock->log_lock_optimization(phase->C, "eliminate_lock_set_coarsened" ); |
| 1904 | #endif |
| 1905 | lock->set_coarsened(); |
| 1906 | } |
| 1907 | } else if (ctrl->is_Region() && |
| 1908 | iter->_worklist.member(ctrl)) { |
| 1909 | // We weren't able to find any opportunities but the region this |
| 1910 | // lock is control dependent on hasn't been processed yet so put |
| 1911 | // this lock back on the worklist so we can check again once any |
| 1912 | // region simplification has occurred. |
| 1913 | iter->_worklist.push(this); |
| 1914 | } |
| 1915 | } |
| 1916 | } |
| 1917 | |
| 1918 | return result; |
| 1919 | } |
| 1920 | |
| 1921 | //============================================================================= |
| 1922 | bool LockNode::is_nested_lock_region() { |
| 1923 | return is_nested_lock_region(NULL); |
| 1924 | } |
| 1925 | |
| 1926 | // p is used for access to compilation log; no logging if NULL |
| 1927 | bool LockNode::is_nested_lock_region(Compile * c) { |
| 1928 | BoxLockNode* box = box_node()->as_BoxLock(); |
| 1929 | int stk_slot = box->stack_slot(); |
| 1930 | if (stk_slot <= 0) { |
| 1931 | #ifdef ASSERT |
| 1932 | this->log_lock_optimization(c, "eliminate_lock_INLR_1" ); |
| 1933 | #endif |
| 1934 | return false; // External lock or it is not Box (Phi node). |
| 1935 | } |
| 1936 | |
| 1937 | // Ignore complex cases: merged locks or multiple locks. |
| 1938 | Node* obj = obj_node(); |
| 1939 | LockNode* unique_lock = NULL; |
| 1940 | if (!box->is_simple_lock_region(&unique_lock, obj)) { |
| 1941 | #ifdef ASSERT |
| 1942 | this->log_lock_optimization(c, "eliminate_lock_INLR_2a" ); |
| 1943 | #endif |
| 1944 | return false; |
| 1945 | } |
| 1946 | if (unique_lock != this) { |
| 1947 | #ifdef ASSERT |
| 1948 | this->log_lock_optimization(c, "eliminate_lock_INLR_2b" ); |
| 1949 | #endif |
| 1950 | return false; |
| 1951 | } |
| 1952 | |
| 1953 | BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); |
| 1954 | obj = bs->step_over_gc_barrier(obj); |
| 1955 | // Look for external lock for the same object. |
| 1956 | SafePointNode* sfn = this->as_SafePoint(); |
| 1957 | JVMState* youngest_jvms = sfn->jvms(); |
| 1958 | int max_depth = youngest_jvms->depth(); |
| 1959 | for (int depth = 1; depth <= max_depth; depth++) { |
| 1960 | JVMState* jvms = youngest_jvms->of_depth(depth); |
| 1961 | int num_mon = jvms->nof_monitors(); |
| 1962 | // Loop over monitors |
| 1963 | for (int idx = 0; idx < num_mon; idx++) { |
| 1964 | Node* obj_node = sfn->monitor_obj(jvms, idx); |
| 1965 | obj_node = bs->step_over_gc_barrier(obj_node); |
| 1966 | BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); |
| 1967 | if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { |
| 1968 | return true; |
| 1969 | } |
| 1970 | } |
| 1971 | } |
| 1972 | #ifdef ASSERT |
| 1973 | this->log_lock_optimization(c, "eliminate_lock_INLR_3" ); |
| 1974 | #endif |
| 1975 | return false; |
| 1976 | } |
| 1977 | |
| 1978 | //============================================================================= |
| 1979 | uint UnlockNode::size_of() const { return sizeof(*this); } |
| 1980 | |
| 1981 | //============================================================================= |
| 1982 | Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
| 1983 | |
| 1984 | // perform any generic optimizations first (returns 'this' or NULL) |
| 1985 | Node *result = SafePointNode::Ideal(phase, can_reshape); |
| 1986 | if (result != NULL) return result; |
| 1987 | // Don't bother trying to transform a dead node |
| 1988 | if (in(0) && in(0)->is_top()) return NULL; |
| 1989 | |
| 1990 | // Now see if we can optimize away this unlock. We don't actually |
| 1991 | // remove the unlocking here, we simply set the _eliminate flag which |
| 1992 | // prevents macro expansion from expanding the unlock. Since we don't |
| 1993 | // modify the graph, the value returned from this function is the |
| 1994 | // one computed above. |
| 1995 | // Escape state is defined after Parse phase. |
| 1996 | if (can_reshape && EliminateLocks && !is_non_esc_obj()) { |
| 1997 | // |
| 1998 | // If we are unlocking an unescaped object, the lock/unlock is unnecessary. |
| 1999 | // |
| 2000 | ConnectionGraph *cgr = phase->C->congraph(); |
| 2001 | if (cgr != NULL && cgr->not_global_escape(obj_node())) { |
| 2002 | assert(!is_eliminated() || is_coarsened(), "sanity" ); |
| 2003 | // The lock could be marked eliminated by lock coarsening |
| 2004 | // code during first IGVN before EA. Replace coarsened flag |
| 2005 | // to eliminate all associated locks/unlocks. |
| 2006 | #ifdef ASSERT |
| 2007 | this->log_lock_optimization(phase->C, "eliminate_lock_set_non_esc2" ); |
| 2008 | #endif |
| 2009 | this->set_non_esc_obj(); |
| 2010 | } |
| 2011 | } |
| 2012 | return result; |
| 2013 | } |
| 2014 | |
| 2015 | const char * AbstractLockNode::kind_as_string() const { |
| 2016 | return is_coarsened() ? "coarsened" : |
| 2017 | is_nested() ? "nested" : |
| 2018 | is_non_esc_obj() ? "non_escaping" : |
| 2019 | "?" ; |
| 2020 | } |
| 2021 | |
| 2022 | void AbstractLockNode::log_lock_optimization(Compile *C, const char * tag) const { |
| 2023 | if (C == NULL) { |
| 2024 | return; |
| 2025 | } |
| 2026 | CompileLog* log = C->log(); |
| 2027 | if (log != NULL) { |
| 2028 | log->begin_head("%s lock='%d' compile_id='%d' class_id='%s' kind='%s'" , |
| 2029 | tag, is_Lock(), C->compile_id(), |
| 2030 | is_Unlock() ? "unlock" : is_Lock() ? "lock" : "?" , |
| 2031 | kind_as_string()); |
| 2032 | log->stamp(); |
| 2033 | log->end_head(); |
| 2034 | JVMState* p = is_Unlock() ? (as_Unlock()->dbg_jvms()) : jvms(); |
| 2035 | while (p != NULL) { |
| 2036 | log->elem("jvms bci='%d' method='%d'" , p->bci(), log->identify(p->method())); |
| 2037 | p = p->caller(); |
| 2038 | } |
| 2039 | log->tail(tag); |
| 2040 | } |
| 2041 | } |
| 2042 | |
| 2043 | bool CallNode::may_modify_arraycopy_helper(const TypeOopPtr* dest_t, const TypeOopPtr *t_oop, PhaseTransform *phase) { |
| 2044 | if (dest_t->is_known_instance() && t_oop->is_known_instance()) { |
| 2045 | return dest_t->instance_id() == t_oop->instance_id(); |
| 2046 | } |
| 2047 | |
| 2048 | if (dest_t->isa_instptr() && !dest_t->klass()->equals(phase->C->env()->Object_klass())) { |
| 2049 | // clone |
| 2050 | if (t_oop->isa_aryptr()) { |
| 2051 | return false; |
| 2052 | } |
| 2053 | if (!t_oop->isa_instptr()) { |
| 2054 | return true; |
| 2055 | } |
| 2056 | if (dest_t->klass()->is_subtype_of(t_oop->klass()) || t_oop->klass()->is_subtype_of(dest_t->klass())) { |
| 2057 | return true; |
| 2058 | } |
| 2059 | // unrelated |
| 2060 | return false; |
| 2061 | } |
| 2062 | |
| 2063 | if (dest_t->isa_aryptr()) { |
| 2064 | // arraycopy or array clone |
| 2065 | if (t_oop->isa_instptr()) { |
| 2066 | return false; |
| 2067 | } |
| 2068 | if (!t_oop->isa_aryptr()) { |
| 2069 | return true; |
| 2070 | } |
| 2071 | |
| 2072 | const Type* elem = dest_t->is_aryptr()->elem(); |
| 2073 | if (elem == Type::BOTTOM) { |
| 2074 | // An array but we don't know what elements are |
| 2075 | return true; |
| 2076 | } |
| 2077 | |
| 2078 | dest_t = dest_t->add_offset(Type::OffsetBot)->is_oopptr(); |
| 2079 | uint dest_alias = phase->C->get_alias_index(dest_t); |
| 2080 | uint t_oop_alias = phase->C->get_alias_index(t_oop); |
| 2081 | |
| 2082 | return dest_alias == t_oop_alias; |
| 2083 | } |
| 2084 | |
| 2085 | return true; |
| 2086 | } |
| 2087 | |