| 1 | /* | 
|---|
| 2 | * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | * | 
|---|
| 23 | */ | 
|---|
| 24 |  | 
|---|
| 25 | #include "precompiled.hpp" | 
|---|
| 26 | #include "ci/bcEscapeAnalyzer.hpp" | 
|---|
| 27 | #include "compiler/compileLog.hpp" | 
|---|
| 28 | #include "gc/shared/barrierSet.hpp" | 
|---|
| 29 | #include "gc/shared/c2/barrierSetC2.hpp" | 
|---|
| 30 | #include "libadt/vectset.hpp" | 
|---|
| 31 | #include "memory/allocation.hpp" | 
|---|
| 32 | #include "memory/resourceArea.hpp" | 
|---|
| 33 | #include "opto/c2compiler.hpp" | 
|---|
| 34 | #include "opto/arraycopynode.hpp" | 
|---|
| 35 | #include "opto/callnode.hpp" | 
|---|
| 36 | #include "opto/cfgnode.hpp" | 
|---|
| 37 | #include "opto/compile.hpp" | 
|---|
| 38 | #include "opto/escape.hpp" | 
|---|
| 39 | #include "opto/phaseX.hpp" | 
|---|
| 40 | #include "opto/movenode.hpp" | 
|---|
| 41 | #include "opto/rootnode.hpp" | 
|---|
| 42 | #include "utilities/macros.hpp" | 
|---|
| 43 |  | 
|---|
| 44 | ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn) : | 
|---|
| 45 | _nodes(C->comp_arena(), C->unique(), C->unique(), NULL), | 
|---|
| 46 | _in_worklist(C->comp_arena()), | 
|---|
| 47 | _next_pidx(0), | 
|---|
| 48 | _collecting(true), | 
|---|
| 49 | _verify(false), | 
|---|
| 50 | _compile(C), | 
|---|
| 51 | _igvn(igvn), | 
|---|
| 52 | _node_map(C->comp_arena()) { | 
|---|
| 53 | // Add unknown java object. | 
|---|
| 54 | add_java_object(C->top(), PointsToNode::GlobalEscape); | 
|---|
| 55 | phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject(); | 
|---|
| 56 | // Add ConP(#NULL) and ConN(#NULL) nodes. | 
|---|
| 57 | Node* oop_null = igvn->zerocon(T_OBJECT); | 
|---|
| 58 | assert(oop_null->_idx < nodes_size(), "should be created already"); | 
|---|
| 59 | add_java_object(oop_null, PointsToNode::NoEscape); | 
|---|
| 60 | null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject(); | 
|---|
| 61 | if (UseCompressedOops) { | 
|---|
| 62 | Node* noop_null = igvn->zerocon(T_NARROWOOP); | 
|---|
| 63 | assert(noop_null->_idx < nodes_size(), "should be created already"); | 
|---|
| 64 | map_ideal_node(noop_null, null_obj); | 
|---|
| 65 | } | 
|---|
| 66 | _pcmp_neq = NULL; // Should be initialized | 
|---|
| 67 | _pcmp_eq  = NULL; | 
|---|
| 68 | } | 
|---|
| 69 |  | 
|---|
| 70 | bool ConnectionGraph::has_candidates(Compile *C) { | 
|---|
| 71 | // EA brings benefits only when the code has allocations and/or locks which | 
|---|
| 72 | // are represented by ideal Macro nodes. | 
|---|
| 73 | int cnt = C->macro_count(); | 
|---|
| 74 | for (int i = 0; i < cnt; i++) { | 
|---|
| 75 | Node *n = C->macro_node(i); | 
|---|
| 76 | if (n->is_Allocate()) | 
|---|
| 77 | return true; | 
|---|
| 78 | if (n->is_Lock()) { | 
|---|
| 79 | Node* obj = n->as_Lock()->obj_node()->uncast(); | 
|---|
| 80 | if (!(obj->is_Parm() || obj->is_Con())) | 
|---|
| 81 | return true; | 
|---|
| 82 | } | 
|---|
| 83 | if (n->is_CallStaticJava() && | 
|---|
| 84 | n->as_CallStaticJava()->is_boxing_method()) { | 
|---|
| 85 | return true; | 
|---|
| 86 | } | 
|---|
| 87 | } | 
|---|
| 88 | return false; | 
|---|
| 89 | } | 
|---|
| 90 |  | 
|---|
| 91 | void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) { | 
|---|
| 92 | Compile::TracePhase tp( "escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]); | 
|---|
| 93 | ResourceMark rm; | 
|---|
| 94 |  | 
|---|
| 95 | // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction | 
|---|
| 96 | // to create space for them in ConnectionGraph::_nodes[]. | 
|---|
| 97 | Node* oop_null = igvn->zerocon(T_OBJECT); | 
|---|
| 98 | Node* noop_null = igvn->zerocon(T_NARROWOOP); | 
|---|
| 99 | ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn); | 
|---|
| 100 | // Perform escape analysis | 
|---|
| 101 | if (congraph->compute_escape()) { | 
|---|
| 102 | // There are non escaping objects. | 
|---|
| 103 | C->set_congraph(congraph); | 
|---|
| 104 | } | 
|---|
| 105 | // Cleanup. | 
|---|
| 106 | if (oop_null->outcnt() == 0) | 
|---|
| 107 | igvn->hash_delete(oop_null); | 
|---|
| 108 | if (noop_null->outcnt() == 0) | 
|---|
| 109 | igvn->hash_delete(noop_null); | 
|---|
| 110 | } | 
|---|
| 111 |  | 
|---|
| 112 | bool ConnectionGraph::compute_escape() { | 
|---|
| 113 | Compile* C = _compile; | 
|---|
| 114 | PhaseGVN* igvn = _igvn; | 
|---|
| 115 |  | 
|---|
| 116 | // Worklists used by EA. | 
|---|
| 117 | Unique_Node_List delayed_worklist; | 
|---|
| 118 | GrowableArray<Node*> alloc_worklist; | 
|---|
| 119 | GrowableArray<Node*> ptr_cmp_worklist; | 
|---|
| 120 | GrowableArray<Node*> storestore_worklist; | 
|---|
| 121 | GrowableArray<ArrayCopyNode*> arraycopy_worklist; | 
|---|
| 122 | GrowableArray<PointsToNode*>   ptnodes_worklist; | 
|---|
| 123 | GrowableArray<JavaObjectNode*> java_objects_worklist; | 
|---|
| 124 | GrowableArray<JavaObjectNode*> non_escaped_worklist; | 
|---|
| 125 | GrowableArray<FieldNode*>      oop_fields_worklist; | 
|---|
| 126 | DEBUG_ONLY( GrowableArray<Node*> addp_worklist; ) | 
|---|
| 127 |  | 
|---|
| 128 | { Compile::TracePhase tp( "connectionGraph", &Phase::timers[Phase::_t_connectionGraph]); | 
|---|
| 129 |  | 
|---|
| 130 | // 1. Populate Connection Graph (CG) with PointsTo nodes. | 
|---|
| 131 | ideal_nodes.map(C->live_nodes(), NULL);  // preallocate space | 
|---|
| 132 | // Initialize worklist | 
|---|
| 133 | if (C->root() != NULL) { | 
|---|
| 134 | ideal_nodes.push(C->root()); | 
|---|
| 135 | } | 
|---|
| 136 | // Processed ideal nodes are unique on ideal_nodes list | 
|---|
| 137 | // but several ideal nodes are mapped to the phantom_obj. | 
|---|
| 138 | // To avoid duplicated entries on the following worklists | 
|---|
| 139 | // add the phantom_obj only once to them. | 
|---|
| 140 | ptnodes_worklist.append(phantom_obj); | 
|---|
| 141 | java_objects_worklist.append(phantom_obj); | 
|---|
| 142 | for( uint next = 0; next < ideal_nodes.size(); ++next ) { | 
|---|
| 143 | Node* n = ideal_nodes.at(next); | 
|---|
| 144 | // Create PointsTo nodes and add them to Connection Graph. Called | 
|---|
| 145 | // only once per ideal node since ideal_nodes is Unique_Node list. | 
|---|
| 146 | add_node_to_connection_graph(n, &delayed_worklist); | 
|---|
| 147 | PointsToNode* ptn = ptnode_adr(n->_idx); | 
|---|
| 148 | if (ptn != NULL && ptn != phantom_obj) { | 
|---|
| 149 | ptnodes_worklist.append(ptn); | 
|---|
| 150 | if (ptn->is_JavaObject()) { | 
|---|
| 151 | java_objects_worklist.append(ptn->as_JavaObject()); | 
|---|
| 152 | if ((n->is_Allocate() || n->is_CallStaticJava()) && | 
|---|
| 153 | (ptn->escape_state() < PointsToNode::GlobalEscape)) { | 
|---|
| 154 | // Only allocations and java static calls results are interesting. | 
|---|
| 155 | non_escaped_worklist.append(ptn->as_JavaObject()); | 
|---|
| 156 | } | 
|---|
| 157 | } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) { | 
|---|
| 158 | oop_fields_worklist.append(ptn->as_Field()); | 
|---|
| 159 | } | 
|---|
| 160 | } | 
|---|
| 161 | if (n->is_MergeMem()) { | 
|---|
| 162 | // Collect all MergeMem nodes to add memory slices for | 
|---|
| 163 | // scalar replaceable objects in split_unique_types(). | 
|---|
| 164 | _mergemem_worklist.append(n->as_MergeMem()); | 
|---|
| 165 | } else if (OptimizePtrCompare && n->is_Cmp() && | 
|---|
| 166 | (n->Opcode() == Op_CmpP || n->Opcode() == Op_CmpN)) { | 
|---|
| 167 | // Collect compare pointers nodes. | 
|---|
| 168 | ptr_cmp_worklist.append(n); | 
|---|
| 169 | } else if (n->is_MemBarStoreStore()) { | 
|---|
| 170 | // Collect all MemBarStoreStore nodes so that depending on the | 
|---|
| 171 | // escape status of the associated Allocate node some of them | 
|---|
| 172 | // may be eliminated. | 
|---|
| 173 | storestore_worklist.append(n); | 
|---|
| 174 | } else if (n->is_MemBar() && (n->Opcode() == Op_MemBarRelease) && | 
|---|
| 175 | (n->req() > MemBarNode::Precedent)) { | 
|---|
| 176 | record_for_optimizer(n); | 
|---|
| 177 | #ifdef ASSERT | 
|---|
| 178 | } else if (n->is_AddP()) { | 
|---|
| 179 | // Collect address nodes for graph verification. | 
|---|
| 180 | addp_worklist.append(n); | 
|---|
| 181 | #endif | 
|---|
| 182 | } else if (n->is_ArrayCopy()) { | 
|---|
| 183 | // Keep a list of ArrayCopy nodes so if one of its input is non | 
|---|
| 184 | // escaping, we can record a unique type | 
|---|
| 185 | arraycopy_worklist.append(n->as_ArrayCopy()); | 
|---|
| 186 | } | 
|---|
| 187 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { | 
|---|
| 188 | Node* m = n->fast_out(i);   // Get user | 
|---|
| 189 | ideal_nodes.push(m); | 
|---|
| 190 | } | 
|---|
| 191 | } | 
|---|
| 192 | if (non_escaped_worklist.length() == 0) { | 
|---|
| 193 | _collecting = false; | 
|---|
| 194 | return false; // Nothing to do. | 
|---|
| 195 | } | 
|---|
| 196 | // Add final simple edges to graph. | 
|---|
| 197 | while(delayed_worklist.size() > 0) { | 
|---|
| 198 | Node* n = delayed_worklist.pop(); | 
|---|
| 199 | add_final_edges(n); | 
|---|
| 200 | } | 
|---|
| 201 | int ptnodes_length = ptnodes_worklist.length(); | 
|---|
| 202 |  | 
|---|
| 203 | #ifdef ASSERT | 
|---|
| 204 | if (VerifyConnectionGraph) { | 
|---|
| 205 | // Verify that no new simple edges could be created and all | 
|---|
| 206 | // local vars has edges. | 
|---|
| 207 | _verify = true; | 
|---|
| 208 | for (int next = 0; next < ptnodes_length; ++next) { | 
|---|
| 209 | PointsToNode* ptn = ptnodes_worklist.at(next); | 
|---|
| 210 | add_final_edges(ptn->ideal_node()); | 
|---|
| 211 | if (ptn->is_LocalVar() && ptn->edge_count() == 0) { | 
|---|
| 212 | ptn->dump(); | 
|---|
| 213 | assert(ptn->as_LocalVar()->edge_count() > 0, "sanity"); | 
|---|
| 214 | } | 
|---|
| 215 | } | 
|---|
| 216 | _verify = false; | 
|---|
| 217 | } | 
|---|
| 218 | #endif | 
|---|
| 219 | // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes | 
|---|
| 220 | // processing, calls to CI to resolve symbols (types, fields, methods) | 
|---|
| 221 | // referenced in bytecode. During symbol resolution VM may throw | 
|---|
| 222 | // an exception which CI cleans and converts to compilation failure. | 
|---|
| 223 | if (C->failing())  return false; | 
|---|
| 224 |  | 
|---|
| 225 | // 2. Finish Graph construction by propagating references to all | 
|---|
| 226 | //    java objects through graph. | 
|---|
| 227 | if (!complete_connection_graph(ptnodes_worklist, non_escaped_worklist, | 
|---|
| 228 | java_objects_worklist, oop_fields_worklist)) { | 
|---|
| 229 | // All objects escaped or hit time or iterations limits. | 
|---|
| 230 | _collecting = false; | 
|---|
| 231 | return false; | 
|---|
| 232 | } | 
|---|
| 233 |  | 
|---|
| 234 | // 3. Adjust scalar_replaceable state of nonescaping objects and push | 
|---|
| 235 | //    scalar replaceable allocations on alloc_worklist for processing | 
|---|
| 236 | //    in split_unique_types(). | 
|---|
| 237 | int non_escaped_length = non_escaped_worklist.length(); | 
|---|
| 238 | for (int next = 0; next < non_escaped_length; next++) { | 
|---|
| 239 | JavaObjectNode* ptn = non_escaped_worklist.at(next); | 
|---|
| 240 | bool noescape = (ptn->escape_state() == PointsToNode::NoEscape); | 
|---|
| 241 | Node* n = ptn->ideal_node(); | 
|---|
| 242 | if (n->is_Allocate()) { | 
|---|
| 243 | n->as_Allocate()->_is_non_escaping = noescape; | 
|---|
| 244 | } | 
|---|
| 245 | if (n->is_CallStaticJava()) { | 
|---|
| 246 | n->as_CallStaticJava()->_is_non_escaping = noescape; | 
|---|
| 247 | } | 
|---|
| 248 | if (noescape && ptn->scalar_replaceable()) { | 
|---|
| 249 | adjust_scalar_replaceable_state(ptn); | 
|---|
| 250 | if (ptn->scalar_replaceable()) { | 
|---|
| 251 | alloc_worklist.append(ptn->ideal_node()); | 
|---|
| 252 | } | 
|---|
| 253 | } | 
|---|
| 254 | } | 
|---|
| 255 |  | 
|---|
| 256 | #ifdef ASSERT | 
|---|
| 257 | if (VerifyConnectionGraph) { | 
|---|
| 258 | // Verify that graph is complete - no new edges could be added or needed. | 
|---|
| 259 | verify_connection_graph(ptnodes_worklist, non_escaped_worklist, | 
|---|
| 260 | java_objects_worklist, addp_worklist); | 
|---|
| 261 | } | 
|---|
| 262 | assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build"); | 
|---|
| 263 | assert(null_obj->escape_state() == PointsToNode::NoEscape && | 
|---|
| 264 | null_obj->edge_count() == 0 && | 
|---|
| 265 | !null_obj->arraycopy_src() && | 
|---|
| 266 | !null_obj->arraycopy_dst(), "sanity"); | 
|---|
| 267 | #endif | 
|---|
| 268 |  | 
|---|
| 269 | _collecting = false; | 
|---|
| 270 |  | 
|---|
| 271 | } // TracePhase t3("connectionGraph") | 
|---|
| 272 |  | 
|---|
| 273 | // 4. Optimize ideal graph based on EA information. | 
|---|
| 274 | bool has_non_escaping_obj = (non_escaped_worklist.length() > 0); | 
|---|
| 275 | if (has_non_escaping_obj) { | 
|---|
| 276 | optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist); | 
|---|
| 277 | } | 
|---|
| 278 |  | 
|---|
| 279 | #ifndef PRODUCT | 
|---|
| 280 | if (PrintEscapeAnalysis) { | 
|---|
| 281 | dump(ptnodes_worklist); // Dump ConnectionGraph | 
|---|
| 282 | } | 
|---|
| 283 | #endif | 
|---|
| 284 |  | 
|---|
| 285 | bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0); | 
|---|
| 286 | #ifdef ASSERT | 
|---|
| 287 | if (VerifyConnectionGraph) { | 
|---|
| 288 | int alloc_length = alloc_worklist.length(); | 
|---|
| 289 | for (int next = 0; next < alloc_length; ++next) { | 
|---|
| 290 | Node* n = alloc_worklist.at(next); | 
|---|
| 291 | PointsToNode* ptn = ptnode_adr(n->_idx); | 
|---|
| 292 | assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity"); | 
|---|
| 293 | } | 
|---|
| 294 | } | 
|---|
| 295 | #endif | 
|---|
| 296 |  | 
|---|
| 297 | // 5. Separate memory graph for scalar replaceable allcations. | 
|---|
| 298 | if (has_scalar_replaceable_candidates && | 
|---|
| 299 | C->AliasLevel() >= 3 && EliminateAllocations) { | 
|---|
| 300 | // Now use the escape information to create unique types for | 
|---|
| 301 | // scalar replaceable objects. | 
|---|
| 302 | split_unique_types(alloc_worklist, arraycopy_worklist); | 
|---|
| 303 | if (C->failing())  return false; | 
|---|
| 304 | C->print_method(PHASE_AFTER_EA, 2); | 
|---|
| 305 |  | 
|---|
| 306 | #ifdef ASSERT | 
|---|
| 307 | } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) { | 
|---|
| 308 | tty->print( "=== No allocations eliminated for "); | 
|---|
| 309 | C->method()->print_short_name(); | 
|---|
| 310 | if(!EliminateAllocations) { | 
|---|
| 311 | tty->print( " since EliminateAllocations is off ==="); | 
|---|
| 312 | } else if(!has_scalar_replaceable_candidates) { | 
|---|
| 313 | tty->print( " since there are no scalar replaceable candidates ==="); | 
|---|
| 314 | } else if(C->AliasLevel() < 3) { | 
|---|
| 315 | tty->print( " since AliasLevel < 3 ==="); | 
|---|
| 316 | } | 
|---|
| 317 | tty->cr(); | 
|---|
| 318 | #endif | 
|---|
| 319 | } | 
|---|
| 320 | return has_non_escaping_obj; | 
|---|
| 321 | } | 
|---|
| 322 |  | 
|---|
| 323 | // Utility function for nodes that load an object | 
|---|
| 324 | void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { | 
|---|
| 325 | // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because | 
|---|
| 326 | // ThreadLocal has RawPtr type. | 
|---|
| 327 | const Type* t = _igvn->type(n); | 
|---|
| 328 | if (t->make_ptr() != NULL) { | 
|---|
| 329 | Node* adr = n->in(MemNode::Address); | 
|---|
| 330 | #ifdef ASSERT | 
|---|
| 331 | if (!adr->is_AddP()) { | 
|---|
| 332 | assert(_igvn->type(adr)->isa_rawptr(), "sanity"); | 
|---|
| 333 | } else { | 
|---|
| 334 | assert((ptnode_adr(adr->_idx) == NULL || | 
|---|
| 335 | ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity"); | 
|---|
| 336 | } | 
|---|
| 337 | #endif | 
|---|
| 338 | add_local_var_and_edge(n, PointsToNode::NoEscape, | 
|---|
| 339 | adr, delayed_worklist); | 
|---|
| 340 | } | 
|---|
| 341 | } | 
|---|
| 342 |  | 
|---|
| 343 | // Populate Connection Graph with PointsTo nodes and create simple | 
|---|
| 344 | // connection graph edges. | 
|---|
| 345 | void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) { | 
|---|
| 346 | assert(!_verify, "this method should not be called for verification"); | 
|---|
| 347 | PhaseGVN* igvn = _igvn; | 
|---|
| 348 | uint n_idx = n->_idx; | 
|---|
| 349 | PointsToNode* n_ptn = ptnode_adr(n_idx); | 
|---|
| 350 | if (n_ptn != NULL) | 
|---|
| 351 | return; // No need to redefine PointsTo node during first iteration. | 
|---|
| 352 |  | 
|---|
| 353 | if (n->is_Call()) { | 
|---|
| 354 | // Arguments to allocation and locking don't escape. | 
|---|
| 355 | if (n->is_AbstractLock()) { | 
|---|
| 356 | // Put Lock and Unlock nodes on IGVN worklist to process them during | 
|---|
| 357 | // first IGVN optimization when escape information is still available. | 
|---|
| 358 | record_for_optimizer(n); | 
|---|
| 359 | } else if (n->is_Allocate()) { | 
|---|
| 360 | add_call_node(n->as_Call()); | 
|---|
| 361 | record_for_optimizer(n); | 
|---|
| 362 | } else { | 
|---|
| 363 | if (n->is_CallStaticJava()) { | 
|---|
| 364 | const char* name = n->as_CallStaticJava()->_name; | 
|---|
| 365 | if (name != NULL && strcmp(name, "uncommon_trap") == 0) | 
|---|
| 366 | return; // Skip uncommon traps | 
|---|
| 367 | } | 
|---|
| 368 | // Don't mark as processed since call's arguments have to be processed. | 
|---|
| 369 | delayed_worklist->push(n); | 
|---|
| 370 | // Check if a call returns an object. | 
|---|
| 371 | if ((n->as_Call()->returns_pointer() && | 
|---|
| 372 | n->as_Call()->proj_out_or_null(TypeFunc::Parms) != NULL) || | 
|---|
| 373 | (n->is_CallStaticJava() && | 
|---|
| 374 | n->as_CallStaticJava()->is_boxing_method())) { | 
|---|
| 375 | add_call_node(n->as_Call()); | 
|---|
| 376 | } | 
|---|
| 377 | } | 
|---|
| 378 | return; | 
|---|
| 379 | } | 
|---|
| 380 | // Put this check here to process call arguments since some call nodes | 
|---|
| 381 | // point to phantom_obj. | 
|---|
| 382 | if (n_ptn == phantom_obj || n_ptn == null_obj) | 
|---|
| 383 | return; // Skip predefined nodes. | 
|---|
| 384 |  | 
|---|
| 385 | int opcode = n->Opcode(); | 
|---|
| 386 | bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode); | 
|---|
| 387 | if (gc_handled) { | 
|---|
| 388 | return; // Ignore node if already handled by GC. | 
|---|
| 389 | } | 
|---|
| 390 | switch (opcode) { | 
|---|
| 391 | case Op_AddP: { | 
|---|
| 392 | Node* base = get_addp_base(n); | 
|---|
| 393 | PointsToNode* ptn_base = ptnode_adr(base->_idx); | 
|---|
| 394 | // Field nodes are created for all field types. They are used in | 
|---|
| 395 | // adjust_scalar_replaceable_state() and split_unique_types(). | 
|---|
| 396 | // Note, non-oop fields will have only base edges in Connection | 
|---|
| 397 | // Graph because such fields are not used for oop loads and stores. | 
|---|
| 398 | int offset = address_offset(n, igvn); | 
|---|
| 399 | add_field(n, PointsToNode::NoEscape, offset); | 
|---|
| 400 | if (ptn_base == NULL) { | 
|---|
| 401 | delayed_worklist->push(n); // Process it later. | 
|---|
| 402 | } else { | 
|---|
| 403 | n_ptn = ptnode_adr(n_idx); | 
|---|
| 404 | add_base(n_ptn->as_Field(), ptn_base); | 
|---|
| 405 | } | 
|---|
| 406 | break; | 
|---|
| 407 | } | 
|---|
| 408 | case Op_CastX2P: { | 
|---|
| 409 | map_ideal_node(n, phantom_obj); | 
|---|
| 410 | break; | 
|---|
| 411 | } | 
|---|
| 412 | case Op_CastPP: | 
|---|
| 413 | case Op_CheckCastPP: | 
|---|
| 414 | case Op_EncodeP: | 
|---|
| 415 | case Op_DecodeN: | 
|---|
| 416 | case Op_EncodePKlass: | 
|---|
| 417 | case Op_DecodeNKlass: { | 
|---|
| 418 | add_local_var_and_edge(n, PointsToNode::NoEscape, | 
|---|
| 419 | n->in(1), delayed_worklist); | 
|---|
| 420 | break; | 
|---|
| 421 | } | 
|---|
| 422 | case Op_CMoveP: { | 
|---|
| 423 | add_local_var(n, PointsToNode::NoEscape); | 
|---|
| 424 | // Do not add edges during first iteration because some could be | 
|---|
| 425 | // not defined yet. | 
|---|
| 426 | delayed_worklist->push(n); | 
|---|
| 427 | break; | 
|---|
| 428 | } | 
|---|
| 429 | case Op_ConP: | 
|---|
| 430 | case Op_ConN: | 
|---|
| 431 | case Op_ConNKlass: { | 
|---|
| 432 | // assume all oop constants globally escape except for null | 
|---|
| 433 | PointsToNode::EscapeState es; | 
|---|
| 434 | const Type* t = igvn->type(n); | 
|---|
| 435 | if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) { | 
|---|
| 436 | es = PointsToNode::NoEscape; | 
|---|
| 437 | } else { | 
|---|
| 438 | es = PointsToNode::GlobalEscape; | 
|---|
| 439 | } | 
|---|
| 440 | add_java_object(n, es); | 
|---|
| 441 | break; | 
|---|
| 442 | } | 
|---|
| 443 | case Op_CreateEx: { | 
|---|
| 444 | // assume that all exception objects globally escape | 
|---|
| 445 | map_ideal_node(n, phantom_obj); | 
|---|
| 446 | break; | 
|---|
| 447 | } | 
|---|
| 448 | case Op_LoadKlass: | 
|---|
| 449 | case Op_LoadNKlass: { | 
|---|
| 450 | // Unknown class is loaded | 
|---|
| 451 | map_ideal_node(n, phantom_obj); | 
|---|
| 452 | break; | 
|---|
| 453 | } | 
|---|
| 454 | case Op_LoadP: | 
|---|
| 455 | case Op_LoadN: | 
|---|
| 456 | case Op_LoadPLocked: { | 
|---|
| 457 | add_objload_to_connection_graph(n, delayed_worklist); | 
|---|
| 458 | break; | 
|---|
| 459 | } | 
|---|
| 460 | case Op_Parm: { | 
|---|
| 461 | map_ideal_node(n, phantom_obj); | 
|---|
| 462 | break; | 
|---|
| 463 | } | 
|---|
| 464 | case Op_PartialSubtypeCheck: { | 
|---|
| 465 | // Produces Null or notNull and is used in only in CmpP so | 
|---|
| 466 | // phantom_obj could be used. | 
|---|
| 467 | map_ideal_node(n, phantom_obj); // Result is unknown | 
|---|
| 468 | break; | 
|---|
| 469 | } | 
|---|
| 470 | case Op_Phi: { | 
|---|
| 471 | // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because | 
|---|
| 472 | // ThreadLocal has RawPtr type. | 
|---|
| 473 | const Type* t = n->as_Phi()->type(); | 
|---|
| 474 | if (t->make_ptr() != NULL) { | 
|---|
| 475 | add_local_var(n, PointsToNode::NoEscape); | 
|---|
| 476 | // Do not add edges during first iteration because some could be | 
|---|
| 477 | // not defined yet. | 
|---|
| 478 | delayed_worklist->push(n); | 
|---|
| 479 | } | 
|---|
| 480 | break; | 
|---|
| 481 | } | 
|---|
| 482 | case Op_Proj: { | 
|---|
| 483 | // we are only interested in the oop result projection from a call | 
|---|
| 484 | if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && | 
|---|
| 485 | n->in(0)->as_Call()->returns_pointer()) { | 
|---|
| 486 | add_local_var_and_edge(n, PointsToNode::NoEscape, | 
|---|
| 487 | n->in(0), delayed_worklist); | 
|---|
| 488 | } | 
|---|
| 489 | break; | 
|---|
| 490 | } | 
|---|
| 491 | case Op_Rethrow: // Exception object escapes | 
|---|
| 492 | case Op_Return: { | 
|---|
| 493 | if (n->req() > TypeFunc::Parms && | 
|---|
| 494 | igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { | 
|---|
| 495 | // Treat Return value as LocalVar with GlobalEscape escape state. | 
|---|
| 496 | add_local_var_and_edge(n, PointsToNode::GlobalEscape, | 
|---|
| 497 | n->in(TypeFunc::Parms), delayed_worklist); | 
|---|
| 498 | } | 
|---|
| 499 | break; | 
|---|
| 500 | } | 
|---|
| 501 | case Op_CompareAndExchangeP: | 
|---|
| 502 | case Op_CompareAndExchangeN: | 
|---|
| 503 | case Op_GetAndSetP: | 
|---|
| 504 | case Op_GetAndSetN: { | 
|---|
| 505 | add_objload_to_connection_graph(n, delayed_worklist); | 
|---|
| 506 | // fallthrough | 
|---|
| 507 | } | 
|---|
| 508 | case Op_StoreP: | 
|---|
| 509 | case Op_StoreN: | 
|---|
| 510 | case Op_StoreNKlass: | 
|---|
| 511 | case Op_StorePConditional: | 
|---|
| 512 | case Op_WeakCompareAndSwapP: | 
|---|
| 513 | case Op_WeakCompareAndSwapN: | 
|---|
| 514 | case Op_CompareAndSwapP: | 
|---|
| 515 | case Op_CompareAndSwapN: { | 
|---|
| 516 | add_to_congraph_unsafe_access(n, opcode, delayed_worklist); | 
|---|
| 517 | break; | 
|---|
| 518 | } | 
|---|
| 519 | case Op_AryEq: | 
|---|
| 520 | case Op_HasNegatives: | 
|---|
| 521 | case Op_StrComp: | 
|---|
| 522 | case Op_StrEquals: | 
|---|
| 523 | case Op_StrIndexOf: | 
|---|
| 524 | case Op_StrIndexOfChar: | 
|---|
| 525 | case Op_StrInflatedCopy: | 
|---|
| 526 | case Op_StrCompressedCopy: | 
|---|
| 527 | case Op_EncodeISOArray: { | 
|---|
| 528 | add_local_var(n, PointsToNode::ArgEscape); | 
|---|
| 529 | delayed_worklist->push(n); // Process it later. | 
|---|
| 530 | break; | 
|---|
| 531 | } | 
|---|
| 532 | case Op_ThreadLocal: { | 
|---|
| 533 | add_java_object(n, PointsToNode::ArgEscape); | 
|---|
| 534 | break; | 
|---|
| 535 | } | 
|---|
| 536 | default: | 
|---|
| 537 | ; // Do nothing for nodes not related to EA. | 
|---|
| 538 | } | 
|---|
| 539 | return; | 
|---|
| 540 | } | 
|---|
| 541 |  | 
|---|
| 542 | #ifdef ASSERT | 
|---|
| 543 | #define ELSE_FAIL(name)                               \ | 
|---|
| 544 | /* Should not be called for not pointer type. */  \ | 
|---|
| 545 | n->dump(1);                                       \ | 
|---|
| 546 | assert(false, name);                              \ | 
|---|
| 547 | break; | 
|---|
| 548 | #else | 
|---|
| 549 | #define ELSE_FAIL(name) \ | 
|---|
| 550 | break; | 
|---|
| 551 | #endif | 
|---|
| 552 |  | 
|---|
| 553 | // Add final simple edges to graph. | 
|---|
| 554 | void ConnectionGraph::add_final_edges(Node *n) { | 
|---|
| 555 | PointsToNode* n_ptn = ptnode_adr(n->_idx); | 
|---|
| 556 | #ifdef ASSERT | 
|---|
| 557 | if (_verify && n_ptn->is_JavaObject()) | 
|---|
| 558 | return; // This method does not change graph for JavaObject. | 
|---|
| 559 | #endif | 
|---|
| 560 |  | 
|---|
| 561 | if (n->is_Call()) { | 
|---|
| 562 | process_call_arguments(n->as_Call()); | 
|---|
| 563 | return; | 
|---|
| 564 | } | 
|---|
| 565 | assert(n->is_Store() || n->is_LoadStore() || | 
|---|
| 566 | (n_ptn != NULL) && (n_ptn->ideal_node() != NULL), | 
|---|
| 567 | "node should be registered already"); | 
|---|
| 568 | int opcode = n->Opcode(); | 
|---|
| 569 | bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode); | 
|---|
| 570 | if (gc_handled) { | 
|---|
| 571 | return; // Ignore node if already handled by GC. | 
|---|
| 572 | } | 
|---|
| 573 | switch (opcode) { | 
|---|
| 574 | case Op_AddP: { | 
|---|
| 575 | Node* base = get_addp_base(n); | 
|---|
| 576 | PointsToNode* ptn_base = ptnode_adr(base->_idx); | 
|---|
| 577 | assert(ptn_base != NULL, "field's base should be registered"); | 
|---|
| 578 | add_base(n_ptn->as_Field(), ptn_base); | 
|---|
| 579 | break; | 
|---|
| 580 | } | 
|---|
| 581 | case Op_CastPP: | 
|---|
| 582 | case Op_CheckCastPP: | 
|---|
| 583 | case Op_EncodeP: | 
|---|
| 584 | case Op_DecodeN: | 
|---|
| 585 | case Op_EncodePKlass: | 
|---|
| 586 | case Op_DecodeNKlass: { | 
|---|
| 587 | add_local_var_and_edge(n, PointsToNode::NoEscape, | 
|---|
| 588 | n->in(1), NULL); | 
|---|
| 589 | break; | 
|---|
| 590 | } | 
|---|
| 591 | case Op_CMoveP: { | 
|---|
| 592 | for (uint i = CMoveNode::IfFalse; i < n->req(); i++) { | 
|---|
| 593 | Node* in = n->in(i); | 
|---|
| 594 | if (in == NULL) | 
|---|
| 595 | continue;  // ignore NULL | 
|---|
| 596 | Node* uncast_in = in->uncast(); | 
|---|
| 597 | if (uncast_in->is_top() || uncast_in == n) | 
|---|
| 598 | continue;  // ignore top or inputs which go back this node | 
|---|
| 599 | PointsToNode* ptn = ptnode_adr(in->_idx); | 
|---|
| 600 | assert(ptn != NULL, "node should be registered"); | 
|---|
| 601 | add_edge(n_ptn, ptn); | 
|---|
| 602 | } | 
|---|
| 603 | break; | 
|---|
| 604 | } | 
|---|
| 605 | case Op_LoadP: | 
|---|
| 606 | case Op_LoadN: | 
|---|
| 607 | case Op_LoadPLocked: { | 
|---|
| 608 | // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because | 
|---|
| 609 | // ThreadLocal has RawPtr type. | 
|---|
| 610 | const Type* t = _igvn->type(n); | 
|---|
| 611 | if (t->make_ptr() != NULL) { | 
|---|
| 612 | Node* adr = n->in(MemNode::Address); | 
|---|
| 613 | add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); | 
|---|
| 614 | break; | 
|---|
| 615 | } | 
|---|
| 616 | ELSE_FAIL( "Op_LoadP"); | 
|---|
| 617 | } | 
|---|
| 618 | case Op_Phi: { | 
|---|
| 619 | // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because | 
|---|
| 620 | // ThreadLocal has RawPtr type. | 
|---|
| 621 | const Type* t = n->as_Phi()->type(); | 
|---|
| 622 | if (t->make_ptr() != NULL) { | 
|---|
| 623 | for (uint i = 1; i < n->req(); i++) { | 
|---|
| 624 | Node* in = n->in(i); | 
|---|
| 625 | if (in == NULL) | 
|---|
| 626 | continue;  // ignore NULL | 
|---|
| 627 | Node* uncast_in = in->uncast(); | 
|---|
| 628 | if (uncast_in->is_top() || uncast_in == n) | 
|---|
| 629 | continue;  // ignore top or inputs which go back this node | 
|---|
| 630 | PointsToNode* ptn = ptnode_adr(in->_idx); | 
|---|
| 631 | assert(ptn != NULL, "node should be registered"); | 
|---|
| 632 | add_edge(n_ptn, ptn); | 
|---|
| 633 | } | 
|---|
| 634 | break; | 
|---|
| 635 | } | 
|---|
| 636 | ELSE_FAIL( "Op_Phi"); | 
|---|
| 637 | } | 
|---|
| 638 | case Op_Proj: { | 
|---|
| 639 | // we are only interested in the oop result projection from a call | 
|---|
| 640 | if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() && | 
|---|
| 641 | n->in(0)->as_Call()->returns_pointer()) { | 
|---|
| 642 | add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), NULL); | 
|---|
| 643 | break; | 
|---|
| 644 | } | 
|---|
| 645 | ELSE_FAIL( "Op_Proj"); | 
|---|
| 646 | } | 
|---|
| 647 | case Op_Rethrow: // Exception object escapes | 
|---|
| 648 | case Op_Return: { | 
|---|
| 649 | if (n->req() > TypeFunc::Parms && | 
|---|
| 650 | _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) { | 
|---|
| 651 | // Treat Return value as LocalVar with GlobalEscape escape state. | 
|---|
| 652 | add_local_var_and_edge(n, PointsToNode::GlobalEscape, | 
|---|
| 653 | n->in(TypeFunc::Parms), NULL); | 
|---|
| 654 | break; | 
|---|
| 655 | } | 
|---|
| 656 | ELSE_FAIL( "Op_Return"); | 
|---|
| 657 | } | 
|---|
| 658 | case Op_StoreP: | 
|---|
| 659 | case Op_StoreN: | 
|---|
| 660 | case Op_StoreNKlass: | 
|---|
| 661 | case Op_StorePConditional: | 
|---|
| 662 | case Op_CompareAndExchangeP: | 
|---|
| 663 | case Op_CompareAndExchangeN: | 
|---|
| 664 | case Op_CompareAndSwapP: | 
|---|
| 665 | case Op_CompareAndSwapN: | 
|---|
| 666 | case Op_WeakCompareAndSwapP: | 
|---|
| 667 | case Op_WeakCompareAndSwapN: | 
|---|
| 668 | case Op_GetAndSetP: | 
|---|
| 669 | case Op_GetAndSetN: { | 
|---|
| 670 | if (add_final_edges_unsafe_access(n, opcode)) { | 
|---|
| 671 | break; | 
|---|
| 672 | } | 
|---|
| 673 | ELSE_FAIL( "Op_StoreP"); | 
|---|
| 674 | } | 
|---|
| 675 | case Op_AryEq: | 
|---|
| 676 | case Op_HasNegatives: | 
|---|
| 677 | case Op_StrComp: | 
|---|
| 678 | case Op_StrEquals: | 
|---|
| 679 | case Op_StrIndexOf: | 
|---|
| 680 | case Op_StrIndexOfChar: | 
|---|
| 681 | case Op_StrInflatedCopy: | 
|---|
| 682 | case Op_StrCompressedCopy: | 
|---|
| 683 | case Op_EncodeISOArray: { | 
|---|
| 684 | // char[]/byte[] arrays passed to string intrinsic do not escape but | 
|---|
| 685 | // they are not scalar replaceable. Adjust escape state for them. | 
|---|
| 686 | // Start from in(2) edge since in(1) is memory edge. | 
|---|
| 687 | for (uint i = 2; i < n->req(); i++) { | 
|---|
| 688 | Node* adr = n->in(i); | 
|---|
| 689 | const Type* at = _igvn->type(adr); | 
|---|
| 690 | if (!adr->is_top() && at->isa_ptr()) { | 
|---|
| 691 | assert(at == Type::TOP || at == TypePtr::NULL_PTR || | 
|---|
| 692 | at->isa_ptr() != NULL, "expecting a pointer"); | 
|---|
| 693 | if (adr->is_AddP()) { | 
|---|
| 694 | adr = get_addp_base(adr); | 
|---|
| 695 | } | 
|---|
| 696 | PointsToNode* ptn = ptnode_adr(adr->_idx); | 
|---|
| 697 | assert(ptn != NULL, "node should be registered"); | 
|---|
| 698 | add_edge(n_ptn, ptn); | 
|---|
| 699 | } | 
|---|
| 700 | } | 
|---|
| 701 | break; | 
|---|
| 702 | } | 
|---|
| 703 | default: { | 
|---|
| 704 | // This method should be called only for EA specific nodes which may | 
|---|
| 705 | // miss some edges when they were created. | 
|---|
| 706 | #ifdef ASSERT | 
|---|
| 707 | n->dump(1); | 
|---|
| 708 | #endif | 
|---|
| 709 | guarantee(false, "unknown node"); | 
|---|
| 710 | } | 
|---|
| 711 | } | 
|---|
| 712 | return; | 
|---|
| 713 | } | 
|---|
| 714 |  | 
|---|
| 715 | void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) { | 
|---|
| 716 | Node* adr = n->in(MemNode::Address); | 
|---|
| 717 | const Type* adr_type = _igvn->type(adr); | 
|---|
| 718 | adr_type = adr_type->make_ptr(); | 
|---|
| 719 | if (adr_type == NULL) { | 
|---|
| 720 | return; // skip dead nodes | 
|---|
| 721 | } | 
|---|
| 722 | if (adr_type->isa_oopptr() | 
|---|
| 723 | || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) | 
|---|
| 724 | && adr_type == TypeRawPtr::NOTNULL | 
|---|
| 725 | && adr->in(AddPNode::Address)->is_Proj() | 
|---|
| 726 | && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { | 
|---|
| 727 | delayed_worklist->push(n); // Process it later. | 
|---|
| 728 | #ifdef ASSERT | 
|---|
| 729 | assert (adr->is_AddP(), "expecting an AddP"); | 
|---|
| 730 | if (adr_type == TypeRawPtr::NOTNULL) { | 
|---|
| 731 | // Verify a raw address for a store captured by Initialize node. | 
|---|
| 732 | int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); | 
|---|
| 733 | assert(offs != Type::OffsetBot, "offset must be a constant"); | 
|---|
| 734 | } | 
|---|
| 735 | #endif | 
|---|
| 736 | } else { | 
|---|
| 737 | // Ignore copy the displaced header to the BoxNode (OSR compilation). | 
|---|
| 738 | if (adr->is_BoxLock()) { | 
|---|
| 739 | return; | 
|---|
| 740 | } | 
|---|
| 741 | // Stored value escapes in unsafe access. | 
|---|
| 742 | if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { | 
|---|
| 743 | delayed_worklist->push(n); // Process unsafe access later. | 
|---|
| 744 | return; | 
|---|
| 745 | } | 
|---|
| 746 | #ifdef ASSERT | 
|---|
| 747 | n->dump(1); | 
|---|
| 748 | assert(false, "not unsafe"); | 
|---|
| 749 | #endif | 
|---|
| 750 | } | 
|---|
| 751 | } | 
|---|
| 752 |  | 
|---|
| 753 | bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) { | 
|---|
| 754 | Node* adr = n->in(MemNode::Address); | 
|---|
| 755 | const Type *adr_type = _igvn->type(adr); | 
|---|
| 756 | adr_type = adr_type->make_ptr(); | 
|---|
| 757 | #ifdef ASSERT | 
|---|
| 758 | if (adr_type == NULL) { | 
|---|
| 759 | n->dump(1); | 
|---|
| 760 | assert(adr_type != NULL, "dead node should not be on list"); | 
|---|
| 761 | return true; | 
|---|
| 762 | } | 
|---|
| 763 | #endif | 
|---|
| 764 |  | 
|---|
| 765 | if (opcode == Op_GetAndSetP || opcode == Op_GetAndSetN || | 
|---|
| 766 | opcode == Op_CompareAndExchangeN || opcode == Op_CompareAndExchangeP) { | 
|---|
| 767 | add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL); | 
|---|
| 768 | } | 
|---|
| 769 |  | 
|---|
| 770 | if (adr_type->isa_oopptr() | 
|---|
| 771 | || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass) | 
|---|
| 772 | && adr_type == TypeRawPtr::NOTNULL | 
|---|
| 773 | && adr->in(AddPNode::Address)->is_Proj() | 
|---|
| 774 | && adr->in(AddPNode::Address)->in(0)->is_Allocate())) { | 
|---|
| 775 | // Point Address to Value | 
|---|
| 776 | PointsToNode* adr_ptn = ptnode_adr(adr->_idx); | 
|---|
| 777 | assert(adr_ptn != NULL && | 
|---|
| 778 | adr_ptn->as_Field()->is_oop(), "node should be registered"); | 
|---|
| 779 | Node* val = n->in(MemNode::ValueIn); | 
|---|
| 780 | PointsToNode* ptn = ptnode_adr(val->_idx); | 
|---|
| 781 | assert(ptn != NULL, "node should be registered"); | 
|---|
| 782 | add_edge(adr_ptn, ptn); | 
|---|
| 783 | return true; | 
|---|
| 784 | } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) { | 
|---|
| 785 | // Stored value escapes in unsafe access. | 
|---|
| 786 | Node* val = n->in(MemNode::ValueIn); | 
|---|
| 787 | PointsToNode* ptn = ptnode_adr(val->_idx); | 
|---|
| 788 | assert(ptn != NULL, "node should be registered"); | 
|---|
| 789 | set_escape_state(ptn, PointsToNode::GlobalEscape); | 
|---|
| 790 | // Add edge to object for unsafe access with offset. | 
|---|
| 791 | PointsToNode* adr_ptn = ptnode_adr(adr->_idx); | 
|---|
| 792 | assert(adr_ptn != NULL, "node should be registered"); | 
|---|
| 793 | if (adr_ptn->is_Field()) { | 
|---|
| 794 | assert(adr_ptn->as_Field()->is_oop(), "should be oop field"); | 
|---|
| 795 | add_edge(adr_ptn, ptn); | 
|---|
| 796 | } | 
|---|
| 797 | return true; | 
|---|
| 798 | } | 
|---|
| 799 | return false; | 
|---|
| 800 | } | 
|---|
| 801 |  | 
|---|
| 802 | void ConnectionGraph::add_call_node(CallNode* call) { | 
|---|
| 803 | assert(call->returns_pointer(), "only for call which returns pointer"); | 
|---|
| 804 | uint call_idx = call->_idx; | 
|---|
| 805 | if (call->is_Allocate()) { | 
|---|
| 806 | Node* k = call->in(AllocateNode::KlassNode); | 
|---|
| 807 | const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr(); | 
|---|
| 808 | assert(kt != NULL, "TypeKlassPtr  required."); | 
|---|
| 809 | ciKlass* cik = kt->klass(); | 
|---|
| 810 | PointsToNode::EscapeState es = PointsToNode::NoEscape; | 
|---|
| 811 | bool scalar_replaceable = true; | 
|---|
| 812 | if (call->is_AllocateArray()) { | 
|---|
| 813 | if (!cik->is_array_klass()) { // StressReflectiveCode | 
|---|
| 814 | es = PointsToNode::GlobalEscape; | 
|---|
| 815 | } else { | 
|---|
| 816 | int length = call->in(AllocateNode::ALength)->find_int_con(-1); | 
|---|
| 817 | if (length < 0 || length > EliminateAllocationArraySizeLimit) { | 
|---|
| 818 | // Not scalar replaceable if the length is not constant or too big. | 
|---|
| 819 | scalar_replaceable = false; | 
|---|
| 820 | } | 
|---|
| 821 | } | 
|---|
| 822 | } else {  // Allocate instance | 
|---|
| 823 | if (cik->is_subclass_of(_compile->env()->Thread_klass()) || | 
|---|
| 824 | cik->is_subclass_of(_compile->env()->Reference_klass()) || | 
|---|
| 825 | !cik->is_instance_klass() || // StressReflectiveCode | 
|---|
| 826 | !cik->as_instance_klass()->can_be_instantiated() || | 
|---|
| 827 | cik->as_instance_klass()->has_finalizer()) { | 
|---|
| 828 | es = PointsToNode::GlobalEscape; | 
|---|
| 829 | } | 
|---|
| 830 | } | 
|---|
| 831 | add_java_object(call, es); | 
|---|
| 832 | PointsToNode* ptn = ptnode_adr(call_idx); | 
|---|
| 833 | if (!scalar_replaceable && ptn->scalar_replaceable()) { | 
|---|
| 834 | ptn->set_scalar_replaceable(false); | 
|---|
| 835 | } | 
|---|
| 836 | } else if (call->is_CallStaticJava()) { | 
|---|
| 837 | // Call nodes could be different types: | 
|---|
| 838 | // | 
|---|
| 839 | // 1. CallDynamicJavaNode (what happened during call is unknown): | 
|---|
| 840 | // | 
|---|
| 841 | //    - mapped to GlobalEscape JavaObject node if oop is returned; | 
|---|
| 842 | // | 
|---|
| 843 | //    - all oop arguments are escaping globally; | 
|---|
| 844 | // | 
|---|
| 845 | // 2. CallStaticJavaNode (execute bytecode analysis if possible): | 
|---|
| 846 | // | 
|---|
| 847 | //    - the same as CallDynamicJavaNode if can't do bytecode analysis; | 
|---|
| 848 | // | 
|---|
| 849 | //    - mapped to GlobalEscape JavaObject node if unknown oop is returned; | 
|---|
| 850 | //    - mapped to NoEscape JavaObject node if non-escaping object allocated | 
|---|
| 851 | //      during call is returned; | 
|---|
| 852 | //    - mapped to ArgEscape LocalVar node pointed to object arguments | 
|---|
| 853 | //      which are returned and does not escape during call; | 
|---|
| 854 | // | 
|---|
| 855 | //    - oop arguments escaping status is defined by bytecode analysis; | 
|---|
| 856 | // | 
|---|
| 857 | // For a static call, we know exactly what method is being called. | 
|---|
| 858 | // Use bytecode estimator to record whether the call's return value escapes. | 
|---|
| 859 | ciMethod* meth = call->as_CallJava()->method(); | 
|---|
| 860 | if (meth == NULL) { | 
|---|
| 861 | const char* name = call->as_CallStaticJava()->_name; | 
|---|
| 862 | assert(strncmp(name, "_multianewarray", 15) == 0, "TODO: add failed case check"); | 
|---|
| 863 | // Returns a newly allocated unescaped object. | 
|---|
| 864 | add_java_object(call, PointsToNode::NoEscape); | 
|---|
| 865 | ptnode_adr(call_idx)->set_scalar_replaceable(false); | 
|---|
| 866 | } else if (meth->is_boxing_method()) { | 
|---|
| 867 | // Returns boxing object | 
|---|
| 868 | PointsToNode::EscapeState es; | 
|---|
| 869 | vmIntrinsics::ID intr = meth->intrinsic_id(); | 
|---|
| 870 | if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) { | 
|---|
| 871 | // It does not escape if object is always allocated. | 
|---|
| 872 | es = PointsToNode::NoEscape; | 
|---|
| 873 | } else { | 
|---|
| 874 | // It escapes globally if object could be loaded from cache. | 
|---|
| 875 | es = PointsToNode::GlobalEscape; | 
|---|
| 876 | } | 
|---|
| 877 | add_java_object(call, es); | 
|---|
| 878 | } else { | 
|---|
| 879 | BCEscapeAnalyzer* call_analyzer = meth->get_bcea(); | 
|---|
| 880 | call_analyzer->copy_dependencies(_compile->dependencies()); | 
|---|
| 881 | if (call_analyzer->is_return_allocated()) { | 
|---|
| 882 | // Returns a newly allocated unescaped object, simply | 
|---|
| 883 | // update dependency information. | 
|---|
| 884 | // Mark it as NoEscape so that objects referenced by | 
|---|
| 885 | // it's fields will be marked as NoEscape at least. | 
|---|
| 886 | add_java_object(call, PointsToNode::NoEscape); | 
|---|
| 887 | ptnode_adr(call_idx)->set_scalar_replaceable(false); | 
|---|
| 888 | } else { | 
|---|
| 889 | // Determine whether any arguments are returned. | 
|---|
| 890 | const TypeTuple* d = call->tf()->domain(); | 
|---|
| 891 | bool ret_arg = false; | 
|---|
| 892 | for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { | 
|---|
| 893 | if (d->field_at(i)->isa_ptr() != NULL && | 
|---|
| 894 | call_analyzer->is_arg_returned(i - TypeFunc::Parms)) { | 
|---|
| 895 | ret_arg = true; | 
|---|
| 896 | break; | 
|---|
| 897 | } | 
|---|
| 898 | } | 
|---|
| 899 | if (ret_arg) { | 
|---|
| 900 | add_local_var(call, PointsToNode::ArgEscape); | 
|---|
| 901 | } else { | 
|---|
| 902 | // Returns unknown object. | 
|---|
| 903 | map_ideal_node(call, phantom_obj); | 
|---|
| 904 | } | 
|---|
| 905 | } | 
|---|
| 906 | } | 
|---|
| 907 | } else { | 
|---|
| 908 | // An other type of call, assume the worst case: | 
|---|
| 909 | // returned value is unknown and globally escapes. | 
|---|
| 910 | assert(call->Opcode() == Op_CallDynamicJava, "add failed case check"); | 
|---|
| 911 | map_ideal_node(call, phantom_obj); | 
|---|
| 912 | } | 
|---|
| 913 | } | 
|---|
| 914 |  | 
|---|
| 915 | void ConnectionGraph::process_call_arguments(CallNode *call) { | 
|---|
| 916 | bool is_arraycopy = false; | 
|---|
| 917 | switch (call->Opcode()) { | 
|---|
| 918 | #ifdef ASSERT | 
|---|
| 919 | case Op_Allocate: | 
|---|
| 920 | case Op_AllocateArray: | 
|---|
| 921 | case Op_Lock: | 
|---|
| 922 | case Op_Unlock: | 
|---|
| 923 | assert(false, "should be done already"); | 
|---|
| 924 | break; | 
|---|
| 925 | #endif | 
|---|
| 926 | case Op_ArrayCopy: | 
|---|
| 927 | case Op_CallLeafNoFP: | 
|---|
| 928 | // Most array copies are ArrayCopy nodes at this point but there | 
|---|
| 929 | // are still a few direct calls to the copy subroutines (See | 
|---|
| 930 | // PhaseStringOpts::copy_string()) | 
|---|
| 931 | is_arraycopy = (call->Opcode() == Op_ArrayCopy) || | 
|---|
| 932 | call->as_CallLeaf()->is_call_to_arraycopystub(); | 
|---|
| 933 | // fall through | 
|---|
| 934 | case Op_CallLeaf: { | 
|---|
| 935 | // Stub calls, objects do not escape but they are not scale replaceable. | 
|---|
| 936 | // Adjust escape state for outgoing arguments. | 
|---|
| 937 | const TypeTuple * d = call->tf()->domain(); | 
|---|
| 938 | bool src_has_oops = false; | 
|---|
| 939 | for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { | 
|---|
| 940 | const Type* at = d->field_at(i); | 
|---|
| 941 | Node *arg = call->in(i); | 
|---|
| 942 | if (arg == NULL) { | 
|---|
| 943 | continue; | 
|---|
| 944 | } | 
|---|
| 945 | const Type *aat = _igvn->type(arg); | 
|---|
| 946 | if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) | 
|---|
| 947 | continue; | 
|---|
| 948 | if (arg->is_AddP()) { | 
|---|
| 949 | // | 
|---|
| 950 | // The inline_native_clone() case when the arraycopy stub is called | 
|---|
| 951 | // after the allocation before Initialize and CheckCastPP nodes. | 
|---|
| 952 | // Or normal arraycopy for object arrays case. | 
|---|
| 953 | // | 
|---|
| 954 | // Set AddP's base (Allocate) as not scalar replaceable since | 
|---|
| 955 | // pointer to the base (with offset) is passed as argument. | 
|---|
| 956 | // | 
|---|
| 957 | arg = get_addp_base(arg); | 
|---|
| 958 | } | 
|---|
| 959 | PointsToNode* arg_ptn = ptnode_adr(arg->_idx); | 
|---|
| 960 | assert(arg_ptn != NULL, "should be registered"); | 
|---|
| 961 | PointsToNode::EscapeState arg_esc = arg_ptn->escape_state(); | 
|---|
| 962 | if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) { | 
|---|
| 963 | assert(aat == Type::TOP || aat == TypePtr::NULL_PTR || | 
|---|
| 964 | aat->isa_ptr() != NULL, "expecting an Ptr"); | 
|---|
| 965 | bool arg_has_oops = aat->isa_oopptr() && | 
|---|
| 966 | (aat->isa_oopptr()->klass() == NULL || aat->isa_instptr() || | 
|---|
| 967 | (aat->isa_aryptr() && aat->isa_aryptr()->klass()->is_obj_array_klass())); | 
|---|
| 968 | if (i == TypeFunc::Parms) { | 
|---|
| 969 | src_has_oops = arg_has_oops; | 
|---|
| 970 | } | 
|---|
| 971 | // | 
|---|
| 972 | // src or dst could be j.l.Object when other is basic type array: | 
|---|
| 973 | // | 
|---|
| 974 | //   arraycopy(char[],0,Object*,0,size); | 
|---|
| 975 | //   arraycopy(Object*,0,char[],0,size); | 
|---|
| 976 | // | 
|---|
| 977 | // Don't add edges in such cases. | 
|---|
| 978 | // | 
|---|
| 979 | bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy && | 
|---|
| 980 | arg_has_oops && (i > TypeFunc::Parms); | 
|---|
| 981 | #ifdef ASSERT | 
|---|
| 982 | if (!(is_arraycopy || | 
|---|
| 983 | BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) || | 
|---|
| 984 | (call->as_CallLeaf()->_name != NULL && | 
|---|
| 985 | (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || | 
|---|
| 986 | strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 || | 
|---|
| 987 | strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 || | 
|---|
| 988 | strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || | 
|---|
| 989 | strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || | 
|---|
| 990 | strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 || | 
|---|
| 991 | strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 || | 
|---|
| 992 | strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 || | 
|---|
| 993 | strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 || | 
|---|
| 994 | strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 || | 
|---|
| 995 | strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 || | 
|---|
| 996 | strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 || | 
|---|
| 997 | strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 || | 
|---|
| 998 | strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 || | 
|---|
| 999 | strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 || | 
|---|
| 1000 | strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 || | 
|---|
| 1001 | strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 || | 
|---|
| 1002 | strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 || | 
|---|
| 1003 | strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 || | 
|---|
| 1004 | strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 || | 
|---|
| 1005 | strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 || | 
|---|
| 1006 | strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0) | 
|---|
| 1007 | ))) { | 
|---|
| 1008 | call->dump(); | 
|---|
| 1009 | fatal( "EA unexpected CallLeaf %s", call->as_CallLeaf()->_name); | 
|---|
| 1010 | } | 
|---|
| 1011 | #endif | 
|---|
| 1012 | // Always process arraycopy's destination object since | 
|---|
| 1013 | // we need to add all possible edges to references in | 
|---|
| 1014 | // source object. | 
|---|
| 1015 | if (arg_esc >= PointsToNode::ArgEscape && | 
|---|
| 1016 | !arg_is_arraycopy_dest) { | 
|---|
| 1017 | continue; | 
|---|
| 1018 | } | 
|---|
| 1019 | PointsToNode::EscapeState es = PointsToNode::ArgEscape; | 
|---|
| 1020 | if (call->is_ArrayCopy()) { | 
|---|
| 1021 | ArrayCopyNode* ac = call->as_ArrayCopy(); | 
|---|
| 1022 | if (ac->is_clonebasic() || | 
|---|
| 1023 | ac->is_arraycopy_validated() || | 
|---|
| 1024 | ac->is_copyof_validated() || | 
|---|
| 1025 | ac->is_copyofrange_validated()) { | 
|---|
| 1026 | es = PointsToNode::NoEscape; | 
|---|
| 1027 | } | 
|---|
| 1028 | } | 
|---|
| 1029 | set_escape_state(arg_ptn, es); | 
|---|
| 1030 | if (arg_is_arraycopy_dest) { | 
|---|
| 1031 | Node* src = call->in(TypeFunc::Parms); | 
|---|
| 1032 | if (src->is_AddP()) { | 
|---|
| 1033 | src = get_addp_base(src); | 
|---|
| 1034 | } | 
|---|
| 1035 | PointsToNode* src_ptn = ptnode_adr(src->_idx); | 
|---|
| 1036 | assert(src_ptn != NULL, "should be registered"); | 
|---|
| 1037 | if (arg_ptn != src_ptn) { | 
|---|
| 1038 | // Special arraycopy edge: | 
|---|
| 1039 | // A destination object's field can't have the source object | 
|---|
| 1040 | // as base since objects escape states are not related. | 
|---|
| 1041 | // Only escape state of destination object's fields affects | 
|---|
| 1042 | // escape state of fields in source object. | 
|---|
| 1043 | add_arraycopy(call, es, src_ptn, arg_ptn); | 
|---|
| 1044 | } | 
|---|
| 1045 | } | 
|---|
| 1046 | } | 
|---|
| 1047 | } | 
|---|
| 1048 | break; | 
|---|
| 1049 | } | 
|---|
| 1050 | case Op_CallStaticJava: { | 
|---|
| 1051 | // For a static call, we know exactly what method is being called. | 
|---|
| 1052 | // Use bytecode estimator to record the call's escape affects | 
|---|
| 1053 | #ifdef ASSERT | 
|---|
| 1054 | const char* name = call->as_CallStaticJava()->_name; | 
|---|
| 1055 | assert((name == NULL || strcmp(name, "uncommon_trap") != 0), "normal calls only"); | 
|---|
| 1056 | #endif | 
|---|
| 1057 | ciMethod* meth = call->as_CallJava()->method(); | 
|---|
| 1058 | if ((meth != NULL) && meth->is_boxing_method()) { | 
|---|
| 1059 | break; // Boxing methods do not modify any oops. | 
|---|
| 1060 | } | 
|---|
| 1061 | BCEscapeAnalyzer* call_analyzer = (meth !=NULL) ? meth->get_bcea() : NULL; | 
|---|
| 1062 | // fall-through if not a Java method or no analyzer information | 
|---|
| 1063 | if (call_analyzer != NULL) { | 
|---|
| 1064 | PointsToNode* call_ptn = ptnode_adr(call->_idx); | 
|---|
| 1065 | const TypeTuple* d = call->tf()->domain(); | 
|---|
| 1066 | for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { | 
|---|
| 1067 | const Type* at = d->field_at(i); | 
|---|
| 1068 | int k = i - TypeFunc::Parms; | 
|---|
| 1069 | Node* arg = call->in(i); | 
|---|
| 1070 | PointsToNode* arg_ptn = ptnode_adr(arg->_idx); | 
|---|
| 1071 | if (at->isa_ptr() != NULL && | 
|---|
| 1072 | call_analyzer->is_arg_returned(k)) { | 
|---|
| 1073 | // The call returns arguments. | 
|---|
| 1074 | if (call_ptn != NULL) { // Is call's result used? | 
|---|
| 1075 | assert(call_ptn->is_LocalVar(), "node should be registered"); | 
|---|
| 1076 | assert(arg_ptn != NULL, "node should be registered"); | 
|---|
| 1077 | add_edge(call_ptn, arg_ptn); | 
|---|
| 1078 | } | 
|---|
| 1079 | } | 
|---|
| 1080 | if (at->isa_oopptr() != NULL && | 
|---|
| 1081 | arg_ptn->escape_state() < PointsToNode::GlobalEscape) { | 
|---|
| 1082 | if (!call_analyzer->is_arg_stack(k)) { | 
|---|
| 1083 | // The argument global escapes | 
|---|
| 1084 | set_escape_state(arg_ptn, PointsToNode::GlobalEscape); | 
|---|
| 1085 | } else { | 
|---|
| 1086 | set_escape_state(arg_ptn, PointsToNode::ArgEscape); | 
|---|
| 1087 | if (!call_analyzer->is_arg_local(k)) { | 
|---|
| 1088 | // The argument itself doesn't escape, but any fields might | 
|---|
| 1089 | set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape); | 
|---|
| 1090 | } | 
|---|
| 1091 | } | 
|---|
| 1092 | } | 
|---|
| 1093 | } | 
|---|
| 1094 | if (call_ptn != NULL && call_ptn->is_LocalVar()) { | 
|---|
| 1095 | // The call returns arguments. | 
|---|
| 1096 | assert(call_ptn->edge_count() > 0, "sanity"); | 
|---|
| 1097 | if (!call_analyzer->is_return_local()) { | 
|---|
| 1098 | // Returns also unknown object. | 
|---|
| 1099 | add_edge(call_ptn, phantom_obj); | 
|---|
| 1100 | } | 
|---|
| 1101 | } | 
|---|
| 1102 | break; | 
|---|
| 1103 | } | 
|---|
| 1104 | } | 
|---|
| 1105 | default: { | 
|---|
| 1106 | // Fall-through here if not a Java method or no analyzer information | 
|---|
| 1107 | // or some other type of call, assume the worst case: all arguments | 
|---|
| 1108 | // globally escape. | 
|---|
| 1109 | const TypeTuple* d = call->tf()->domain(); | 
|---|
| 1110 | for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { | 
|---|
| 1111 | const Type* at = d->field_at(i); | 
|---|
| 1112 | if (at->isa_oopptr() != NULL) { | 
|---|
| 1113 | Node* arg = call->in(i); | 
|---|
| 1114 | if (arg->is_AddP()) { | 
|---|
| 1115 | arg = get_addp_base(arg); | 
|---|
| 1116 | } | 
|---|
| 1117 | assert(ptnode_adr(arg->_idx) != NULL, "should be defined already"); | 
|---|
| 1118 | set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape); | 
|---|
| 1119 | } | 
|---|
| 1120 | } | 
|---|
| 1121 | } | 
|---|
| 1122 | } | 
|---|
| 1123 | } | 
|---|
| 1124 |  | 
|---|
| 1125 |  | 
|---|
| 1126 | // Finish Graph construction. | 
|---|
| 1127 | bool ConnectionGraph::complete_connection_graph( | 
|---|
| 1128 | GrowableArray<PointsToNode*>&   ptnodes_worklist, | 
|---|
| 1129 | GrowableArray<JavaObjectNode*>& non_escaped_worklist, | 
|---|
| 1130 | GrowableArray<JavaObjectNode*>& java_objects_worklist, | 
|---|
| 1131 | GrowableArray<FieldNode*>&      oop_fields_worklist) { | 
|---|
| 1132 | // Normally only 1-3 passes needed to build Connection Graph depending | 
|---|
| 1133 | // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler. | 
|---|
| 1134 | // Set limit to 20 to catch situation when something did go wrong and | 
|---|
| 1135 | // bailout Escape Analysis. | 
|---|
| 1136 | // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag. | 
|---|
| 1137 | #define CG_BUILD_ITER_LIMIT 20 | 
|---|
| 1138 |  | 
|---|
| 1139 | // Propagate GlobalEscape and ArgEscape escape states and check that | 
|---|
| 1140 | // we still have non-escaping objects. The method pushs on _worklist | 
|---|
| 1141 | // Field nodes which reference phantom_object. | 
|---|
| 1142 | if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { | 
|---|
| 1143 | return false; // Nothing to do. | 
|---|
| 1144 | } | 
|---|
| 1145 | // Now propagate references to all JavaObject nodes. | 
|---|
| 1146 | int java_objects_length = java_objects_worklist.length(); | 
|---|
| 1147 | elapsedTimer time; | 
|---|
| 1148 | bool timeout = false; | 
|---|
| 1149 | int new_edges = 1; | 
|---|
| 1150 | int iterations = 0; | 
|---|
| 1151 | do { | 
|---|
| 1152 | while ((new_edges > 0) && | 
|---|
| 1153 | (iterations++ < CG_BUILD_ITER_LIMIT)) { | 
|---|
| 1154 | double start_time = time.seconds(); | 
|---|
| 1155 | time.start(); | 
|---|
| 1156 | new_edges = 0; | 
|---|
| 1157 | // Propagate references to phantom_object for nodes pushed on _worklist | 
|---|
| 1158 | // by find_non_escaped_objects() and find_field_value(). | 
|---|
| 1159 | new_edges += add_java_object_edges(phantom_obj, false); | 
|---|
| 1160 | for (int next = 0; next < java_objects_length; ++next) { | 
|---|
| 1161 | JavaObjectNode* ptn = java_objects_worklist.at(next); | 
|---|
| 1162 | new_edges += add_java_object_edges(ptn, true); | 
|---|
| 1163 |  | 
|---|
| 1164 | #define SAMPLE_SIZE 4 | 
|---|
| 1165 | if ((next % SAMPLE_SIZE) == 0) { | 
|---|
| 1166 | // Each 4 iterations calculate how much time it will take | 
|---|
| 1167 | // to complete graph construction. | 
|---|
| 1168 | time.stop(); | 
|---|
| 1169 | // Poll for requests from shutdown mechanism to quiesce compiler | 
|---|
| 1170 | // because Connection graph construction may take long time. | 
|---|
| 1171 | CompileBroker::maybe_block(); | 
|---|
| 1172 | double stop_time = time.seconds(); | 
|---|
| 1173 | double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE; | 
|---|
| 1174 | double time_until_end = time_per_iter * (double)(java_objects_length - next); | 
|---|
| 1175 | if ((start_time + time_until_end) >= EscapeAnalysisTimeout) { | 
|---|
| 1176 | timeout = true; | 
|---|
| 1177 | break; // Timeout | 
|---|
| 1178 | } | 
|---|
| 1179 | start_time = stop_time; | 
|---|
| 1180 | time.start(); | 
|---|
| 1181 | } | 
|---|
| 1182 | #undef SAMPLE_SIZE | 
|---|
| 1183 |  | 
|---|
| 1184 | } | 
|---|
| 1185 | if (timeout) break; | 
|---|
| 1186 | if (new_edges > 0) { | 
|---|
| 1187 | // Update escape states on each iteration if graph was updated. | 
|---|
| 1188 | if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist)) { | 
|---|
| 1189 | return false; // Nothing to do. | 
|---|
| 1190 | } | 
|---|
| 1191 | } | 
|---|
| 1192 | time.stop(); | 
|---|
| 1193 | if (time.seconds() >= EscapeAnalysisTimeout) { | 
|---|
| 1194 | timeout = true; | 
|---|
| 1195 | break; | 
|---|
| 1196 | } | 
|---|
| 1197 | } | 
|---|
| 1198 | if ((iterations < CG_BUILD_ITER_LIMIT) && !timeout) { | 
|---|
| 1199 | time.start(); | 
|---|
| 1200 | // Find fields which have unknown value. | 
|---|
| 1201 | int fields_length = oop_fields_worklist.length(); | 
|---|
| 1202 | for (int next = 0; next < fields_length; next++) { | 
|---|
| 1203 | FieldNode* field = oop_fields_worklist.at(next); | 
|---|
| 1204 | if (field->edge_count() == 0) { | 
|---|
| 1205 | new_edges += find_field_value(field); | 
|---|
| 1206 | // This code may added new edges to phantom_object. | 
|---|
| 1207 | // Need an other cycle to propagate references to phantom_object. | 
|---|
| 1208 | } | 
|---|
| 1209 | } | 
|---|
| 1210 | time.stop(); | 
|---|
| 1211 | if (time.seconds() >= EscapeAnalysisTimeout) { | 
|---|
| 1212 | timeout = true; | 
|---|
| 1213 | break; | 
|---|
| 1214 | } | 
|---|
| 1215 | } else { | 
|---|
| 1216 | new_edges = 0; // Bailout | 
|---|
| 1217 | } | 
|---|
| 1218 | } while (new_edges > 0); | 
|---|
| 1219 |  | 
|---|
| 1220 | // Bailout if passed limits. | 
|---|
| 1221 | if ((iterations >= CG_BUILD_ITER_LIMIT) || timeout) { | 
|---|
| 1222 | Compile* C = _compile; | 
|---|
| 1223 | if (C->log() != NULL) { | 
|---|
| 1224 | C->log()->begin_elem( "connectionGraph_bailout reason='reached "); | 
|---|
| 1225 | C->log()->text( "%s", timeout ? "time": "iterations"); | 
|---|
| 1226 | C->log()->end_elem( " limit'"); | 
|---|
| 1227 | } | 
|---|
| 1228 | assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build (%f sec, %d iterations) with %d nodes and worklist size %d", | 
|---|
| 1229 | time.seconds(), iterations, nodes_size(), ptnodes_worklist.length()); | 
|---|
| 1230 | // Possible infinite build_connection_graph loop, | 
|---|
| 1231 | // bailout (no changes to ideal graph were made). | 
|---|
| 1232 | return false; | 
|---|
| 1233 | } | 
|---|
| 1234 | #ifdef ASSERT | 
|---|
| 1235 | if (Verbose && PrintEscapeAnalysis) { | 
|---|
| 1236 | tty->print_cr( "EA: %d iterations to build connection graph with %d nodes and worklist size %d", | 
|---|
| 1237 | iterations, nodes_size(), ptnodes_worklist.length()); | 
|---|
| 1238 | } | 
|---|
| 1239 | #endif | 
|---|
| 1240 |  | 
|---|
| 1241 | #undef CG_BUILD_ITER_LIMIT | 
|---|
| 1242 |  | 
|---|
| 1243 | // Find fields initialized by NULL for non-escaping Allocations. | 
|---|
| 1244 | int non_escaped_length = non_escaped_worklist.length(); | 
|---|
| 1245 | for (int next = 0; next < non_escaped_length; next++) { | 
|---|
| 1246 | JavaObjectNode* ptn = non_escaped_worklist.at(next); | 
|---|
| 1247 | PointsToNode::EscapeState es = ptn->escape_state(); | 
|---|
| 1248 | assert(es <= PointsToNode::ArgEscape, "sanity"); | 
|---|
| 1249 | if (es == PointsToNode::NoEscape) { | 
|---|
| 1250 | if (find_init_values(ptn, null_obj, _igvn) > 0) { | 
|---|
| 1251 | // Adding references to NULL object does not change escape states | 
|---|
| 1252 | // since it does not escape. Also no fields are added to NULL object. | 
|---|
| 1253 | add_java_object_edges(null_obj, false); | 
|---|
| 1254 | } | 
|---|
| 1255 | } | 
|---|
| 1256 | Node* n = ptn->ideal_node(); | 
|---|
| 1257 | if (n->is_Allocate()) { | 
|---|
| 1258 | // The object allocated by this Allocate node will never be | 
|---|
| 1259 | // seen by an other thread. Mark it so that when it is | 
|---|
| 1260 | // expanded no MemBarStoreStore is added. | 
|---|
| 1261 | InitializeNode* ini = n->as_Allocate()->initialization(); | 
|---|
| 1262 | if (ini != NULL) | 
|---|
| 1263 | ini->set_does_not_escape(); | 
|---|
| 1264 | } | 
|---|
| 1265 | } | 
|---|
| 1266 | return true; // Finished graph construction. | 
|---|
| 1267 | } | 
|---|
| 1268 |  | 
|---|
| 1269 | // Propagate GlobalEscape and ArgEscape escape states to all nodes | 
|---|
| 1270 | // and check that we still have non-escaping java objects. | 
|---|
| 1271 | bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist, | 
|---|
| 1272 | GrowableArray<JavaObjectNode*>& non_escaped_worklist) { | 
|---|
| 1273 | GrowableArray<PointsToNode*> escape_worklist; | 
|---|
| 1274 | // First, put all nodes with GlobalEscape and ArgEscape states on worklist. | 
|---|
| 1275 | int ptnodes_length = ptnodes_worklist.length(); | 
|---|
| 1276 | for (int next = 0; next < ptnodes_length; ++next) { | 
|---|
| 1277 | PointsToNode* ptn = ptnodes_worklist.at(next); | 
|---|
| 1278 | if (ptn->escape_state() >= PointsToNode::ArgEscape || | 
|---|
| 1279 | ptn->fields_escape_state() >= PointsToNode::ArgEscape) { | 
|---|
| 1280 | escape_worklist.push(ptn); | 
|---|
| 1281 | } | 
|---|
| 1282 | } | 
|---|
| 1283 | // Set escape states to referenced nodes (edges list). | 
|---|
| 1284 | while (escape_worklist.length() > 0) { | 
|---|
| 1285 | PointsToNode* ptn = escape_worklist.pop(); | 
|---|
| 1286 | PointsToNode::EscapeState es  = ptn->escape_state(); | 
|---|
| 1287 | PointsToNode::EscapeState field_es = ptn->fields_escape_state(); | 
|---|
| 1288 | if (ptn->is_Field() && ptn->as_Field()->is_oop() && | 
|---|
| 1289 | es >= PointsToNode::ArgEscape) { | 
|---|
| 1290 | // GlobalEscape or ArgEscape state of field means it has unknown value. | 
|---|
| 1291 | if (add_edge(ptn, phantom_obj)) { | 
|---|
| 1292 | // New edge was added | 
|---|
| 1293 | add_field_uses_to_worklist(ptn->as_Field()); | 
|---|
| 1294 | } | 
|---|
| 1295 | } | 
|---|
| 1296 | for (EdgeIterator i(ptn); i.has_next(); i.next()) { | 
|---|
| 1297 | PointsToNode* e = i.get(); | 
|---|
| 1298 | if (e->is_Arraycopy()) { | 
|---|
| 1299 | assert(ptn->arraycopy_dst(), "sanity"); | 
|---|
| 1300 | // Propagate only fields escape state through arraycopy edge. | 
|---|
| 1301 | if (e->fields_escape_state() < field_es) { | 
|---|
| 1302 | set_fields_escape_state(e, field_es); | 
|---|
| 1303 | escape_worklist.push(e); | 
|---|
| 1304 | } | 
|---|
| 1305 | } else if (es >= field_es) { | 
|---|
| 1306 | // fields_escape_state is also set to 'es' if it is less than 'es'. | 
|---|
| 1307 | if (e->escape_state() < es) { | 
|---|
| 1308 | set_escape_state(e, es); | 
|---|
| 1309 | escape_worklist.push(e); | 
|---|
| 1310 | } | 
|---|
| 1311 | } else { | 
|---|
| 1312 | // Propagate field escape state. | 
|---|
| 1313 | bool es_changed = false; | 
|---|
| 1314 | if (e->fields_escape_state() < field_es) { | 
|---|
| 1315 | set_fields_escape_state(e, field_es); | 
|---|
| 1316 | es_changed = true; | 
|---|
| 1317 | } | 
|---|
| 1318 | if ((e->escape_state() < field_es) && | 
|---|
| 1319 | e->is_Field() && ptn->is_JavaObject() && | 
|---|
| 1320 | e->as_Field()->is_oop()) { | 
|---|
| 1321 | // Change escape state of referenced fields. | 
|---|
| 1322 | set_escape_state(e, field_es); | 
|---|
| 1323 | es_changed = true; | 
|---|
| 1324 | } else if (e->escape_state() < es) { | 
|---|
| 1325 | set_escape_state(e, es); | 
|---|
| 1326 | es_changed = true; | 
|---|
| 1327 | } | 
|---|
| 1328 | if (es_changed) { | 
|---|
| 1329 | escape_worklist.push(e); | 
|---|
| 1330 | } | 
|---|
| 1331 | } | 
|---|
| 1332 | } | 
|---|
| 1333 | } | 
|---|
| 1334 | // Remove escaped objects from non_escaped list. | 
|---|
| 1335 | for (int next = non_escaped_worklist.length()-1; next >= 0 ; --next) { | 
|---|
| 1336 | JavaObjectNode* ptn = non_escaped_worklist.at(next); | 
|---|
| 1337 | if (ptn->escape_state() >= PointsToNode::GlobalEscape) { | 
|---|
| 1338 | non_escaped_worklist.delete_at(next); | 
|---|
| 1339 | } | 
|---|
| 1340 | if (ptn->escape_state() == PointsToNode::NoEscape) { | 
|---|
| 1341 | // Find fields in non-escaped allocations which have unknown value. | 
|---|
| 1342 | find_init_values(ptn, phantom_obj, NULL); | 
|---|
| 1343 | } | 
|---|
| 1344 | } | 
|---|
| 1345 | return (non_escaped_worklist.length() > 0); | 
|---|
| 1346 | } | 
|---|
| 1347 |  | 
|---|
| 1348 | // Add all references to JavaObject node by walking over all uses. | 
|---|
| 1349 | int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) { | 
|---|
| 1350 | int new_edges = 0; | 
|---|
| 1351 | if (populate_worklist) { | 
|---|
| 1352 | // Populate _worklist by uses of jobj's uses. | 
|---|
| 1353 | for (UseIterator i(jobj); i.has_next(); i.next()) { | 
|---|
| 1354 | PointsToNode* use = i.get(); | 
|---|
| 1355 | if (use->is_Arraycopy()) | 
|---|
| 1356 | continue; | 
|---|
| 1357 | add_uses_to_worklist(use); | 
|---|
| 1358 | if (use->is_Field() && use->as_Field()->is_oop()) { | 
|---|
| 1359 | // Put on worklist all field's uses (loads) and | 
|---|
| 1360 | // related field nodes (same base and offset). | 
|---|
| 1361 | add_field_uses_to_worklist(use->as_Field()); | 
|---|
| 1362 | } | 
|---|
| 1363 | } | 
|---|
| 1364 | } | 
|---|
| 1365 | for (int l = 0; l < _worklist.length(); l++) { | 
|---|
| 1366 | PointsToNode* use = _worklist.at(l); | 
|---|
| 1367 | if (PointsToNode::is_base_use(use)) { | 
|---|
| 1368 | // Add reference from jobj to field and from field to jobj (field's base). | 
|---|
| 1369 | use = PointsToNode::get_use_node(use)->as_Field(); | 
|---|
| 1370 | if (add_base(use->as_Field(), jobj)) { | 
|---|
| 1371 | new_edges++; | 
|---|
| 1372 | } | 
|---|
| 1373 | continue; | 
|---|
| 1374 | } | 
|---|
| 1375 | assert(!use->is_JavaObject(), "sanity"); | 
|---|
| 1376 | if (use->is_Arraycopy()) { | 
|---|
| 1377 | if (jobj == null_obj) // NULL object does not have field edges | 
|---|
| 1378 | continue; | 
|---|
| 1379 | // Added edge from Arraycopy node to arraycopy's source java object | 
|---|
| 1380 | if (add_edge(use, jobj)) { | 
|---|
| 1381 | jobj->set_arraycopy_src(); | 
|---|
| 1382 | new_edges++; | 
|---|
| 1383 | } | 
|---|
| 1384 | // and stop here. | 
|---|
| 1385 | continue; | 
|---|
| 1386 | } | 
|---|
| 1387 | if (!add_edge(use, jobj)) | 
|---|
| 1388 | continue; // No new edge added, there was such edge already. | 
|---|
| 1389 | new_edges++; | 
|---|
| 1390 | if (use->is_LocalVar()) { | 
|---|
| 1391 | add_uses_to_worklist(use); | 
|---|
| 1392 | if (use->arraycopy_dst()) { | 
|---|
| 1393 | for (EdgeIterator i(use); i.has_next(); i.next()) { | 
|---|
| 1394 | PointsToNode* e = i.get(); | 
|---|
| 1395 | if (e->is_Arraycopy()) { | 
|---|
| 1396 | if (jobj == null_obj) // NULL object does not have field edges | 
|---|
| 1397 | continue; | 
|---|
| 1398 | // Add edge from arraycopy's destination java object to Arraycopy node. | 
|---|
| 1399 | if (add_edge(jobj, e)) { | 
|---|
| 1400 | new_edges++; | 
|---|
| 1401 | jobj->set_arraycopy_dst(); | 
|---|
| 1402 | } | 
|---|
| 1403 | } | 
|---|
| 1404 | } | 
|---|
| 1405 | } | 
|---|
| 1406 | } else { | 
|---|
| 1407 | // Added new edge to stored in field values. | 
|---|
| 1408 | // Put on worklist all field's uses (loads) and | 
|---|
| 1409 | // related field nodes (same base and offset). | 
|---|
| 1410 | add_field_uses_to_worklist(use->as_Field()); | 
|---|
| 1411 | } | 
|---|
| 1412 | } | 
|---|
| 1413 | _worklist.clear(); | 
|---|
| 1414 | _in_worklist.Reset(); | 
|---|
| 1415 | return new_edges; | 
|---|
| 1416 | } | 
|---|
| 1417 |  | 
|---|
| 1418 | // Put on worklist all related field nodes. | 
|---|
| 1419 | void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) { | 
|---|
| 1420 | assert(field->is_oop(), "sanity"); | 
|---|
| 1421 | int offset = field->offset(); | 
|---|
| 1422 | add_uses_to_worklist(field); | 
|---|
| 1423 | // Loop over all bases of this field and push on worklist Field nodes | 
|---|
| 1424 | // with the same offset and base (since they may reference the same field). | 
|---|
| 1425 | for (BaseIterator i(field); i.has_next(); i.next()) { | 
|---|
| 1426 | PointsToNode* base = i.get(); | 
|---|
| 1427 | add_fields_to_worklist(field, base); | 
|---|
| 1428 | // Check if the base was source object of arraycopy and go over arraycopy's | 
|---|
| 1429 | // destination objects since values stored to a field of source object are | 
|---|
| 1430 | // accessable by uses (loads) of fields of destination objects. | 
|---|
| 1431 | if (base->arraycopy_src()) { | 
|---|
| 1432 | for (UseIterator j(base); j.has_next(); j.next()) { | 
|---|
| 1433 | PointsToNode* arycp = j.get(); | 
|---|
| 1434 | if (arycp->is_Arraycopy()) { | 
|---|
| 1435 | for (UseIterator k(arycp); k.has_next(); k.next()) { | 
|---|
| 1436 | PointsToNode* abase = k.get(); | 
|---|
| 1437 | if (abase->arraycopy_dst() && abase != base) { | 
|---|
| 1438 | // Look for the same arraycopy reference. | 
|---|
| 1439 | add_fields_to_worklist(field, abase); | 
|---|
| 1440 | } | 
|---|
| 1441 | } | 
|---|
| 1442 | } | 
|---|
| 1443 | } | 
|---|
| 1444 | } | 
|---|
| 1445 | } | 
|---|
| 1446 | } | 
|---|
| 1447 |  | 
|---|
| 1448 | // Put on worklist all related field nodes. | 
|---|
| 1449 | void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) { | 
|---|
| 1450 | int offset = field->offset(); | 
|---|
| 1451 | if (base->is_LocalVar()) { | 
|---|
| 1452 | for (UseIterator j(base); j.has_next(); j.next()) { | 
|---|
| 1453 | PointsToNode* f = j.get(); | 
|---|
| 1454 | if (PointsToNode::is_base_use(f)) { // Field | 
|---|
| 1455 | f = PointsToNode::get_use_node(f); | 
|---|
| 1456 | if (f == field || !f->as_Field()->is_oop()) | 
|---|
| 1457 | continue; | 
|---|
| 1458 | int offs = f->as_Field()->offset(); | 
|---|
| 1459 | if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { | 
|---|
| 1460 | add_to_worklist(f); | 
|---|
| 1461 | } | 
|---|
| 1462 | } | 
|---|
| 1463 | } | 
|---|
| 1464 | } else { | 
|---|
| 1465 | assert(base->is_JavaObject(), "sanity"); | 
|---|
| 1466 | if (// Skip phantom_object since it is only used to indicate that | 
|---|
| 1467 | // this field's content globally escapes. | 
|---|
| 1468 | (base != phantom_obj) && | 
|---|
| 1469 | // NULL object node does not have fields. | 
|---|
| 1470 | (base != null_obj)) { | 
|---|
| 1471 | for (EdgeIterator i(base); i.has_next(); i.next()) { | 
|---|
| 1472 | PointsToNode* f = i.get(); | 
|---|
| 1473 | // Skip arraycopy edge since store to destination object field | 
|---|
| 1474 | // does not update value in source object field. | 
|---|
| 1475 | if (f->is_Arraycopy()) { | 
|---|
| 1476 | assert(base->arraycopy_dst(), "sanity"); | 
|---|
| 1477 | continue; | 
|---|
| 1478 | } | 
|---|
| 1479 | if (f == field || !f->as_Field()->is_oop()) | 
|---|
| 1480 | continue; | 
|---|
| 1481 | int offs = f->as_Field()->offset(); | 
|---|
| 1482 | if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) { | 
|---|
| 1483 | add_to_worklist(f); | 
|---|
| 1484 | } | 
|---|
| 1485 | } | 
|---|
| 1486 | } | 
|---|
| 1487 | } | 
|---|
| 1488 | } | 
|---|
| 1489 |  | 
|---|
| 1490 | // Find fields which have unknown value. | 
|---|
| 1491 | int ConnectionGraph::find_field_value(FieldNode* field) { | 
|---|
| 1492 | // Escaped fields should have init value already. | 
|---|
| 1493 | assert(field->escape_state() == PointsToNode::NoEscape, "sanity"); | 
|---|
| 1494 | int new_edges = 0; | 
|---|
| 1495 | for (BaseIterator i(field); i.has_next(); i.next()) { | 
|---|
| 1496 | PointsToNode* base = i.get(); | 
|---|
| 1497 | if (base->is_JavaObject()) { | 
|---|
| 1498 | // Skip Allocate's fields which will be processed later. | 
|---|
| 1499 | if (base->ideal_node()->is_Allocate()) | 
|---|
| 1500 | return 0; | 
|---|
| 1501 | assert(base == null_obj, "only NULL ptr base expected here"); | 
|---|
| 1502 | } | 
|---|
| 1503 | } | 
|---|
| 1504 | if (add_edge(field, phantom_obj)) { | 
|---|
| 1505 | // New edge was added | 
|---|
| 1506 | new_edges++; | 
|---|
| 1507 | add_field_uses_to_worklist(field); | 
|---|
| 1508 | } | 
|---|
| 1509 | return new_edges; | 
|---|
| 1510 | } | 
|---|
| 1511 |  | 
|---|
| 1512 | // Find fields initializing values for allocations. | 
|---|
| 1513 | int ConnectionGraph::find_init_values(JavaObjectNode* pta, PointsToNode* init_val, PhaseTransform* phase) { | 
|---|
| 1514 | assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only"); | 
|---|
| 1515 | int new_edges = 0; | 
|---|
| 1516 | Node* alloc = pta->ideal_node(); | 
|---|
| 1517 | if (init_val == phantom_obj) { | 
|---|
| 1518 | // Do nothing for Allocate nodes since its fields values are | 
|---|
| 1519 | // "known" unless they are initialized by arraycopy/clone. | 
|---|
| 1520 | if (alloc->is_Allocate() && !pta->arraycopy_dst()) | 
|---|
| 1521 | return 0; | 
|---|
| 1522 | assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity"); | 
|---|
| 1523 | #ifdef ASSERT | 
|---|
| 1524 | if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == NULL) { | 
|---|
| 1525 | const char* name = alloc->as_CallStaticJava()->_name; | 
|---|
| 1526 | assert(strncmp(name, "_multianewarray", 15) == 0, "sanity"); | 
|---|
| 1527 | } | 
|---|
| 1528 | #endif | 
|---|
| 1529 | // Non-escaped allocation returned from Java or runtime call have | 
|---|
| 1530 | // unknown values in fields. | 
|---|
| 1531 | for (EdgeIterator i(pta); i.has_next(); i.next()) { | 
|---|
| 1532 | PointsToNode* field = i.get(); | 
|---|
| 1533 | if (field->is_Field() && field->as_Field()->is_oop()) { | 
|---|
| 1534 | if (add_edge(field, phantom_obj)) { | 
|---|
| 1535 | // New edge was added | 
|---|
| 1536 | new_edges++; | 
|---|
| 1537 | add_field_uses_to_worklist(field->as_Field()); | 
|---|
| 1538 | } | 
|---|
| 1539 | } | 
|---|
| 1540 | } | 
|---|
| 1541 | return new_edges; | 
|---|
| 1542 | } | 
|---|
| 1543 | assert(init_val == null_obj, "sanity"); | 
|---|
| 1544 | // Do nothing for Call nodes since its fields values are unknown. | 
|---|
| 1545 | if (!alloc->is_Allocate()) | 
|---|
| 1546 | return 0; | 
|---|
| 1547 |  | 
|---|
| 1548 | InitializeNode* ini = alloc->as_Allocate()->initialization(); | 
|---|
| 1549 | bool visited_bottom_offset = false; | 
|---|
| 1550 | GrowableArray<int> offsets_worklist; | 
|---|
| 1551 |  | 
|---|
| 1552 | // Check if an oop field's initializing value is recorded and add | 
|---|
| 1553 | // a corresponding NULL if field's value if it is not recorded. | 
|---|
| 1554 | // Connection Graph does not record a default initialization by NULL | 
|---|
| 1555 | // captured by Initialize node. | 
|---|
| 1556 | // | 
|---|
| 1557 | for (EdgeIterator i(pta); i.has_next(); i.next()) { | 
|---|
| 1558 | PointsToNode* field = i.get(); // Field (AddP) | 
|---|
| 1559 | if (!field->is_Field() || !field->as_Field()->is_oop()) | 
|---|
| 1560 | continue; // Not oop field | 
|---|
| 1561 | int offset = field->as_Field()->offset(); | 
|---|
| 1562 | if (offset == Type::OffsetBot) { | 
|---|
| 1563 | if (!visited_bottom_offset) { | 
|---|
| 1564 | // OffsetBot is used to reference array's element, | 
|---|
| 1565 | // always add reference to NULL to all Field nodes since we don't | 
|---|
| 1566 | // known which element is referenced. | 
|---|
| 1567 | if (add_edge(field, null_obj)) { | 
|---|
| 1568 | // New edge was added | 
|---|
| 1569 | new_edges++; | 
|---|
| 1570 | add_field_uses_to_worklist(field->as_Field()); | 
|---|
| 1571 | visited_bottom_offset = true; | 
|---|
| 1572 | } | 
|---|
| 1573 | } | 
|---|
| 1574 | } else { | 
|---|
| 1575 | // Check only oop fields. | 
|---|
| 1576 | const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type(); | 
|---|
| 1577 | if (adr_type->isa_rawptr()) { | 
|---|
| 1578 | #ifdef ASSERT | 
|---|
| 1579 | // Raw pointers are used for initializing stores so skip it | 
|---|
| 1580 | // since it should be recorded already | 
|---|
| 1581 | Node* base = get_addp_base(field->ideal_node()); | 
|---|
| 1582 | assert(adr_type->isa_rawptr() && base->is_Proj() && | 
|---|
| 1583 | (base->in(0) == alloc), "unexpected pointer type"); | 
|---|
| 1584 | #endif | 
|---|
| 1585 | continue; | 
|---|
| 1586 | } | 
|---|
| 1587 | if (!offsets_worklist.contains(offset)) { | 
|---|
| 1588 | offsets_worklist.append(offset); | 
|---|
| 1589 | Node* value = NULL; | 
|---|
| 1590 | if (ini != NULL) { | 
|---|
| 1591 | // StoreP::memory_type() == T_ADDRESS | 
|---|
| 1592 | BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS; | 
|---|
| 1593 | Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase); | 
|---|
| 1594 | // Make sure initializing store has the same type as this AddP. | 
|---|
| 1595 | // This AddP may reference non existing field because it is on a | 
|---|
| 1596 | // dead branch of bimorphic call which is not eliminated yet. | 
|---|
| 1597 | if (store != NULL && store->is_Store() && | 
|---|
| 1598 | store->as_Store()->memory_type() == ft) { | 
|---|
| 1599 | value = store->in(MemNode::ValueIn); | 
|---|
| 1600 | #ifdef ASSERT | 
|---|
| 1601 | if (VerifyConnectionGraph) { | 
|---|
| 1602 | // Verify that AddP already points to all objects the value points to. | 
|---|
| 1603 | PointsToNode* val = ptnode_adr(value->_idx); | 
|---|
| 1604 | assert((val != NULL), "should be processed already"); | 
|---|
| 1605 | PointsToNode* missed_obj = NULL; | 
|---|
| 1606 | if (val->is_JavaObject()) { | 
|---|
| 1607 | if (!field->points_to(val->as_JavaObject())) { | 
|---|
| 1608 | missed_obj = val; | 
|---|
| 1609 | } | 
|---|
| 1610 | } else { | 
|---|
| 1611 | if (!val->is_LocalVar() || (val->edge_count() == 0)) { | 
|---|
| 1612 | tty->print_cr( "----------init store has invalid value -----"); | 
|---|
| 1613 | store->dump(); | 
|---|
| 1614 | val->dump(); | 
|---|
| 1615 | assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already"); | 
|---|
| 1616 | } | 
|---|
| 1617 | for (EdgeIterator j(val); j.has_next(); j.next()) { | 
|---|
| 1618 | PointsToNode* obj = j.get(); | 
|---|
| 1619 | if (obj->is_JavaObject()) { | 
|---|
| 1620 | if (!field->points_to(obj->as_JavaObject())) { | 
|---|
| 1621 | missed_obj = obj; | 
|---|
| 1622 | break; | 
|---|
| 1623 | } | 
|---|
| 1624 | } | 
|---|
| 1625 | } | 
|---|
| 1626 | } | 
|---|
| 1627 | if (missed_obj != NULL) { | 
|---|
| 1628 | tty->print_cr( "----------field---------------------------------"); | 
|---|
| 1629 | field->dump(); | 
|---|
| 1630 | tty->print_cr( "----------missed referernce to object-----------"); | 
|---|
| 1631 | missed_obj->dump(); | 
|---|
| 1632 | tty->print_cr( "----------object referernced by init store -----"); | 
|---|
| 1633 | store->dump(); | 
|---|
| 1634 | val->dump(); | 
|---|
| 1635 | assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference"); | 
|---|
| 1636 | } | 
|---|
| 1637 | } | 
|---|
| 1638 | #endif | 
|---|
| 1639 | } else { | 
|---|
| 1640 | // There could be initializing stores which follow allocation. | 
|---|
| 1641 | // For example, a volatile field store is not collected | 
|---|
| 1642 | // by Initialize node. | 
|---|
| 1643 | // | 
|---|
| 1644 | // Need to check for dependent loads to separate such stores from | 
|---|
| 1645 | // stores which follow loads. For now, add initial value NULL so | 
|---|
| 1646 | // that compare pointers optimization works correctly. | 
|---|
| 1647 | } | 
|---|
| 1648 | } | 
|---|
| 1649 | if (value == NULL) { | 
|---|
| 1650 | // A field's initializing value was not recorded. Add NULL. | 
|---|
| 1651 | if (add_edge(field, null_obj)) { | 
|---|
| 1652 | // New edge was added | 
|---|
| 1653 | new_edges++; | 
|---|
| 1654 | add_field_uses_to_worklist(field->as_Field()); | 
|---|
| 1655 | } | 
|---|
| 1656 | } | 
|---|
| 1657 | } | 
|---|
| 1658 | } | 
|---|
| 1659 | } | 
|---|
| 1660 | return new_edges; | 
|---|
| 1661 | } | 
|---|
| 1662 |  | 
|---|
| 1663 | // Adjust scalar_replaceable state after Connection Graph is built. | 
|---|
| 1664 | void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj) { | 
|---|
| 1665 | // Search for non-escaping objects which are not scalar replaceable | 
|---|
| 1666 | // and mark them to propagate the state to referenced objects. | 
|---|
| 1667 |  | 
|---|
| 1668 | // 1. An object is not scalar replaceable if the field into which it is | 
|---|
| 1669 | // stored has unknown offset (stored into unknown element of an array). | 
|---|
| 1670 | // | 
|---|
| 1671 | for (UseIterator i(jobj); i.has_next(); i.next()) { | 
|---|
| 1672 | PointsToNode* use = i.get(); | 
|---|
| 1673 | if (use->is_Arraycopy()) { | 
|---|
| 1674 | continue; | 
|---|
| 1675 | } | 
|---|
| 1676 | if (use->is_Field()) { | 
|---|
| 1677 | FieldNode* field = use->as_Field(); | 
|---|
| 1678 | assert(field->is_oop() && field->scalar_replaceable(), "sanity"); | 
|---|
| 1679 | if (field->offset() == Type::OffsetBot) { | 
|---|
| 1680 | jobj->set_scalar_replaceable(false); | 
|---|
| 1681 | return; | 
|---|
| 1682 | } | 
|---|
| 1683 | // 2. An object is not scalar replaceable if the field into which it is | 
|---|
| 1684 | // stored has multiple bases one of which is null. | 
|---|
| 1685 | if (field->base_count() > 1) { | 
|---|
| 1686 | for (BaseIterator i(field); i.has_next(); i.next()) { | 
|---|
| 1687 | PointsToNode* base = i.get(); | 
|---|
| 1688 | if (base == null_obj) { | 
|---|
| 1689 | jobj->set_scalar_replaceable(false); | 
|---|
| 1690 | return; | 
|---|
| 1691 | } | 
|---|
| 1692 | } | 
|---|
| 1693 | } | 
|---|
| 1694 | } | 
|---|
| 1695 | assert(use->is_Field() || use->is_LocalVar(), "sanity"); | 
|---|
| 1696 | // 3. An object is not scalar replaceable if it is merged with other objects. | 
|---|
| 1697 | for (EdgeIterator j(use); j.has_next(); j.next()) { | 
|---|
| 1698 | PointsToNode* ptn = j.get(); | 
|---|
| 1699 | if (ptn->is_JavaObject() && ptn != jobj) { | 
|---|
| 1700 | // Mark all objects. | 
|---|
| 1701 | jobj->set_scalar_replaceable(false); | 
|---|
| 1702 | ptn->set_scalar_replaceable(false); | 
|---|
| 1703 | } | 
|---|
| 1704 | } | 
|---|
| 1705 | if (!jobj->scalar_replaceable()) { | 
|---|
| 1706 | return; | 
|---|
| 1707 | } | 
|---|
| 1708 | } | 
|---|
| 1709 |  | 
|---|
| 1710 | for (EdgeIterator j(jobj); j.has_next(); j.next()) { | 
|---|
| 1711 | if (j.get()->is_Arraycopy()) { | 
|---|
| 1712 | continue; | 
|---|
| 1713 | } | 
|---|
| 1714 |  | 
|---|
| 1715 | // Non-escaping object node should point only to field nodes. | 
|---|
| 1716 | FieldNode* field = j.get()->as_Field(); | 
|---|
| 1717 | int offset = field->as_Field()->offset(); | 
|---|
| 1718 |  | 
|---|
| 1719 | // 4. An object is not scalar replaceable if it has a field with unknown | 
|---|
| 1720 | // offset (array's element is accessed in loop). | 
|---|
| 1721 | if (offset == Type::OffsetBot) { | 
|---|
| 1722 | jobj->set_scalar_replaceable(false); | 
|---|
| 1723 | return; | 
|---|
| 1724 | } | 
|---|
| 1725 | // 5. Currently an object is not scalar replaceable if a LoadStore node | 
|---|
| 1726 | // access its field since the field value is unknown after it. | 
|---|
| 1727 | // | 
|---|
| 1728 | Node* n = field->ideal_node(); | 
|---|
| 1729 |  | 
|---|
| 1730 | // Test for an unsafe access that was parsed as maybe off heap | 
|---|
| 1731 | // (with a CheckCastPP to raw memory). | 
|---|
| 1732 | assert(n->is_AddP(), "expect an address computation"); | 
|---|
| 1733 | if (n->in(AddPNode::Base)->is_top() && | 
|---|
| 1734 | n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) { | 
|---|
| 1735 | assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected"); | 
|---|
| 1736 | assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected"); | 
|---|
| 1737 | jobj->set_scalar_replaceable(false); | 
|---|
| 1738 | return; | 
|---|
| 1739 | } | 
|---|
| 1740 |  | 
|---|
| 1741 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { | 
|---|
| 1742 | Node* u = n->fast_out(i); | 
|---|
| 1743 | if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) { | 
|---|
| 1744 | jobj->set_scalar_replaceable(false); | 
|---|
| 1745 | return; | 
|---|
| 1746 | } | 
|---|
| 1747 | } | 
|---|
| 1748 |  | 
|---|
| 1749 | // 6. Or the address may point to more then one object. This may produce | 
|---|
| 1750 | // the false positive result (set not scalar replaceable) | 
|---|
| 1751 | // since the flow-insensitive escape analysis can't separate | 
|---|
| 1752 | // the case when stores overwrite the field's value from the case | 
|---|
| 1753 | // when stores happened on different control branches. | 
|---|
| 1754 | // | 
|---|
| 1755 | // Note: it will disable scalar replacement in some cases: | 
|---|
| 1756 | // | 
|---|
| 1757 | //    Point p[] = new Point[1]; | 
|---|
| 1758 | //    p[0] = new Point(); // Will be not scalar replaced | 
|---|
| 1759 | // | 
|---|
| 1760 | // but it will save us from incorrect optimizations in next cases: | 
|---|
| 1761 | // | 
|---|
| 1762 | //    Point p[] = new Point[1]; | 
|---|
| 1763 | //    if ( x ) p[0] = new Point(); // Will be not scalar replaced | 
|---|
| 1764 | // | 
|---|
| 1765 | if (field->base_count() > 1) { | 
|---|
| 1766 | for (BaseIterator i(field); i.has_next(); i.next()) { | 
|---|
| 1767 | PointsToNode* base = i.get(); | 
|---|
| 1768 | // Don't take into account LocalVar nodes which | 
|---|
| 1769 | // may point to only one object which should be also | 
|---|
| 1770 | // this field's base by now. | 
|---|
| 1771 | if (base->is_JavaObject() && base != jobj) { | 
|---|
| 1772 | // Mark all bases. | 
|---|
| 1773 | jobj->set_scalar_replaceable(false); | 
|---|
| 1774 | base->set_scalar_replaceable(false); | 
|---|
| 1775 | } | 
|---|
| 1776 | } | 
|---|
| 1777 | } | 
|---|
| 1778 | } | 
|---|
| 1779 | } | 
|---|
| 1780 |  | 
|---|
| 1781 | #ifdef ASSERT | 
|---|
| 1782 | void ConnectionGraph::verify_connection_graph( | 
|---|
| 1783 | GrowableArray<PointsToNode*>&   ptnodes_worklist, | 
|---|
| 1784 | GrowableArray<JavaObjectNode*>& non_escaped_worklist, | 
|---|
| 1785 | GrowableArray<JavaObjectNode*>& java_objects_worklist, | 
|---|
| 1786 | GrowableArray<Node*>& addp_worklist) { | 
|---|
| 1787 | // Verify that graph is complete - no new edges could be added. | 
|---|
| 1788 | int java_objects_length = java_objects_worklist.length(); | 
|---|
| 1789 | int non_escaped_length  = non_escaped_worklist.length(); | 
|---|
| 1790 | int new_edges = 0; | 
|---|
| 1791 | for (int next = 0; next < java_objects_length; ++next) { | 
|---|
| 1792 | JavaObjectNode* ptn = java_objects_worklist.at(next); | 
|---|
| 1793 | new_edges += add_java_object_edges(ptn, true); | 
|---|
| 1794 | } | 
|---|
| 1795 | assert(new_edges == 0, "graph was not complete"); | 
|---|
| 1796 | // Verify that escape state is final. | 
|---|
| 1797 | int length = non_escaped_worklist.length(); | 
|---|
| 1798 | find_non_escaped_objects(ptnodes_worklist, non_escaped_worklist); | 
|---|
| 1799 | assert((non_escaped_length == non_escaped_worklist.length()) && | 
|---|
| 1800 | (non_escaped_length == length) && | 
|---|
| 1801 | (_worklist.length() == 0), "escape state was not final"); | 
|---|
| 1802 |  | 
|---|
| 1803 | // Verify fields information. | 
|---|
| 1804 | int addp_length = addp_worklist.length(); | 
|---|
| 1805 | for (int next = 0; next < addp_length; ++next ) { | 
|---|
| 1806 | Node* n = addp_worklist.at(next); | 
|---|
| 1807 | FieldNode* field = ptnode_adr(n->_idx)->as_Field(); | 
|---|
| 1808 | if (field->is_oop()) { | 
|---|
| 1809 | // Verify that field has all bases | 
|---|
| 1810 | Node* base = get_addp_base(n); | 
|---|
| 1811 | PointsToNode* ptn = ptnode_adr(base->_idx); | 
|---|
| 1812 | if (ptn->is_JavaObject()) { | 
|---|
| 1813 | assert(field->has_base(ptn->as_JavaObject()), "sanity"); | 
|---|
| 1814 | } else { | 
|---|
| 1815 | assert(ptn->is_LocalVar(), "sanity"); | 
|---|
| 1816 | for (EdgeIterator i(ptn); i.has_next(); i.next()) { | 
|---|
| 1817 | PointsToNode* e = i.get(); | 
|---|
| 1818 | if (e->is_JavaObject()) { | 
|---|
| 1819 | assert(field->has_base(e->as_JavaObject()), "sanity"); | 
|---|
| 1820 | } | 
|---|
| 1821 | } | 
|---|
| 1822 | } | 
|---|
| 1823 | // Verify that all fields have initializing values. | 
|---|
| 1824 | if (field->edge_count() == 0) { | 
|---|
| 1825 | tty->print_cr( "----------field does not have references----------"); | 
|---|
| 1826 | field->dump(); | 
|---|
| 1827 | for (BaseIterator i(field); i.has_next(); i.next()) { | 
|---|
| 1828 | PointsToNode* base = i.get(); | 
|---|
| 1829 | tty->print_cr( "----------field has next base---------------------"); | 
|---|
| 1830 | base->dump(); | 
|---|
| 1831 | if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) { | 
|---|
| 1832 | tty->print_cr( "----------base has fields-------------------------"); | 
|---|
| 1833 | for (EdgeIterator j(base); j.has_next(); j.next()) { | 
|---|
| 1834 | j.get()->dump(); | 
|---|
| 1835 | } | 
|---|
| 1836 | tty->print_cr( "----------base has references---------------------"); | 
|---|
| 1837 | for (UseIterator j(base); j.has_next(); j.next()) { | 
|---|
| 1838 | j.get()->dump(); | 
|---|
| 1839 | } | 
|---|
| 1840 | } | 
|---|
| 1841 | } | 
|---|
| 1842 | for (UseIterator i(field); i.has_next(); i.next()) { | 
|---|
| 1843 | i.get()->dump(); | 
|---|
| 1844 | } | 
|---|
| 1845 | assert(field->edge_count() > 0, "sanity"); | 
|---|
| 1846 | } | 
|---|
| 1847 | } | 
|---|
| 1848 | } | 
|---|
| 1849 | } | 
|---|
| 1850 | #endif | 
|---|
| 1851 |  | 
|---|
| 1852 | // Optimize ideal graph. | 
|---|
| 1853 | void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist, | 
|---|
| 1854 | GrowableArray<Node*>& storestore_worklist) { | 
|---|
| 1855 | Compile* C = _compile; | 
|---|
| 1856 | PhaseIterGVN* igvn = _igvn; | 
|---|
| 1857 | if (EliminateLocks) { | 
|---|
| 1858 | // Mark locks before changing ideal graph. | 
|---|
| 1859 | int cnt = C->macro_count(); | 
|---|
| 1860 | for( int i=0; i < cnt; i++ ) { | 
|---|
| 1861 | Node *n = C->macro_node(i); | 
|---|
| 1862 | if (n->is_AbstractLock()) { // Lock and Unlock nodes | 
|---|
| 1863 | AbstractLockNode* alock = n->as_AbstractLock(); | 
|---|
| 1864 | if (!alock->is_non_esc_obj()) { | 
|---|
| 1865 | if (not_global_escape(alock->obj_node())) { | 
|---|
| 1866 | assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity"); | 
|---|
| 1867 | // The lock could be marked eliminated by lock coarsening | 
|---|
| 1868 | // code during first IGVN before EA. Replace coarsened flag | 
|---|
| 1869 | // to eliminate all associated locks/unlocks. | 
|---|
| 1870 | #ifdef ASSERT | 
|---|
| 1871 | alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3"); | 
|---|
| 1872 | #endif | 
|---|
| 1873 | alock->set_non_esc_obj(); | 
|---|
| 1874 | } | 
|---|
| 1875 | } | 
|---|
| 1876 | } | 
|---|
| 1877 | } | 
|---|
| 1878 | } | 
|---|
| 1879 |  | 
|---|
| 1880 | if (OptimizePtrCompare) { | 
|---|
| 1881 | // Add ConI(#CC_GT) and ConI(#CC_EQ). | 
|---|
| 1882 | _pcmp_neq = igvn->makecon(TypeInt::CC_GT); | 
|---|
| 1883 | _pcmp_eq = igvn->makecon(TypeInt::CC_EQ); | 
|---|
| 1884 | // Optimize objects compare. | 
|---|
| 1885 | while (ptr_cmp_worklist.length() != 0) { | 
|---|
| 1886 | Node *n = ptr_cmp_worklist.pop(); | 
|---|
| 1887 | Node *res = optimize_ptr_compare(n); | 
|---|
| 1888 | if (res != NULL) { | 
|---|
| 1889 | #ifndef PRODUCT | 
|---|
| 1890 | if (PrintOptimizePtrCompare) { | 
|---|
| 1891 | tty->print_cr( "++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP": "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (res == _pcmp_eq ? "EQ": "NotEQ")); | 
|---|
| 1892 | if (Verbose) { | 
|---|
| 1893 | n->dump(1); | 
|---|
| 1894 | } | 
|---|
| 1895 | } | 
|---|
| 1896 | #endif | 
|---|
| 1897 | igvn->replace_node(n, res); | 
|---|
| 1898 | } | 
|---|
| 1899 | } | 
|---|
| 1900 | // cleanup | 
|---|
| 1901 | if (_pcmp_neq->outcnt() == 0) | 
|---|
| 1902 | igvn->hash_delete(_pcmp_neq); | 
|---|
| 1903 | if (_pcmp_eq->outcnt()  == 0) | 
|---|
| 1904 | igvn->hash_delete(_pcmp_eq); | 
|---|
| 1905 | } | 
|---|
| 1906 |  | 
|---|
| 1907 | // For MemBarStoreStore nodes added in library_call.cpp, check | 
|---|
| 1908 | // escape status of associated AllocateNode and optimize out | 
|---|
| 1909 | // MemBarStoreStore node if the allocated object never escapes. | 
|---|
| 1910 | while (storestore_worklist.length() != 0) { | 
|---|
| 1911 | Node *n = storestore_worklist.pop(); | 
|---|
| 1912 | MemBarStoreStoreNode *storestore = n ->as_MemBarStoreStore(); | 
|---|
| 1913 | Node *alloc = storestore->in(MemBarNode::Precedent)->in(0); | 
|---|
| 1914 | assert (alloc->is_Allocate(), "storestore should point to AllocateNode"); | 
|---|
| 1915 | if (not_global_escape(alloc)) { | 
|---|
| 1916 | MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot); | 
|---|
| 1917 | mb->init_req(TypeFunc::Memory, storestore->in(TypeFunc::Memory)); | 
|---|
| 1918 | mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control)); | 
|---|
| 1919 | igvn->register_new_node_with_optimizer(mb); | 
|---|
| 1920 | igvn->replace_node(storestore, mb); | 
|---|
| 1921 | } | 
|---|
| 1922 | } | 
|---|
| 1923 | } | 
|---|
| 1924 |  | 
|---|
| 1925 | // Optimize objects compare. | 
|---|
| 1926 | Node* ConnectionGraph::optimize_ptr_compare(Node* n) { | 
|---|
| 1927 | assert(OptimizePtrCompare, "sanity"); | 
|---|
| 1928 | PointsToNode* ptn1 = ptnode_adr(n->in(1)->_idx); | 
|---|
| 1929 | PointsToNode* ptn2 = ptnode_adr(n->in(2)->_idx); | 
|---|
| 1930 | JavaObjectNode* jobj1 = unique_java_object(n->in(1)); | 
|---|
| 1931 | JavaObjectNode* jobj2 = unique_java_object(n->in(2)); | 
|---|
| 1932 | assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity"); | 
|---|
| 1933 | assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity"); | 
|---|
| 1934 |  | 
|---|
| 1935 | // Check simple cases first. | 
|---|
| 1936 | if (jobj1 != NULL) { | 
|---|
| 1937 | if (jobj1->escape_state() == PointsToNode::NoEscape) { | 
|---|
| 1938 | if (jobj1 == jobj2) { | 
|---|
| 1939 | // Comparing the same not escaping object. | 
|---|
| 1940 | return _pcmp_eq; | 
|---|
| 1941 | } | 
|---|
| 1942 | Node* obj = jobj1->ideal_node(); | 
|---|
| 1943 | // Comparing not escaping allocation. | 
|---|
| 1944 | if ((obj->is_Allocate() || obj->is_CallStaticJava()) && | 
|---|
| 1945 | !ptn2->points_to(jobj1)) { | 
|---|
| 1946 | return _pcmp_neq; // This includes nullness check. | 
|---|
| 1947 | } | 
|---|
| 1948 | } | 
|---|
| 1949 | } | 
|---|
| 1950 | if (jobj2 != NULL) { | 
|---|
| 1951 | if (jobj2->escape_state() == PointsToNode::NoEscape) { | 
|---|
| 1952 | Node* obj = jobj2->ideal_node(); | 
|---|
| 1953 | // Comparing not escaping allocation. | 
|---|
| 1954 | if ((obj->is_Allocate() || obj->is_CallStaticJava()) && | 
|---|
| 1955 | !ptn1->points_to(jobj2)) { | 
|---|
| 1956 | return _pcmp_neq; // This includes nullness check. | 
|---|
| 1957 | } | 
|---|
| 1958 | } | 
|---|
| 1959 | } | 
|---|
| 1960 | if (jobj1 != NULL && jobj1 != phantom_obj && | 
|---|
| 1961 | jobj2 != NULL && jobj2 != phantom_obj && | 
|---|
| 1962 | jobj1->ideal_node()->is_Con() && | 
|---|
| 1963 | jobj2->ideal_node()->is_Con()) { | 
|---|
| 1964 | // Klass or String constants compare. Need to be careful with | 
|---|
| 1965 | // compressed pointers - compare types of ConN and ConP instead of nodes. | 
|---|
| 1966 | const Type* t1 = jobj1->ideal_node()->get_ptr_type(); | 
|---|
| 1967 | const Type* t2 = jobj2->ideal_node()->get_ptr_type(); | 
|---|
| 1968 | if (t1->make_ptr() == t2->make_ptr()) { | 
|---|
| 1969 | return _pcmp_eq; | 
|---|
| 1970 | } else { | 
|---|
| 1971 | return _pcmp_neq; | 
|---|
| 1972 | } | 
|---|
| 1973 | } | 
|---|
| 1974 | if (ptn1->meet(ptn2)) { | 
|---|
| 1975 | return NULL; // Sets are not disjoint | 
|---|
| 1976 | } | 
|---|
| 1977 |  | 
|---|
| 1978 | // Sets are disjoint. | 
|---|
| 1979 | bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj); | 
|---|
| 1980 | bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj); | 
|---|
| 1981 | bool set1_has_null_ptr    = ptn1->points_to(null_obj); | 
|---|
| 1982 | bool set2_has_null_ptr    = ptn2->points_to(null_obj); | 
|---|
| 1983 | if ((set1_has_unknown_ptr && set2_has_null_ptr) || | 
|---|
| 1984 | (set2_has_unknown_ptr && set1_has_null_ptr)) { | 
|---|
| 1985 | // Check nullness of unknown object. | 
|---|
| 1986 | return NULL; | 
|---|
| 1987 | } | 
|---|
| 1988 |  | 
|---|
| 1989 | // Disjointness by itself is not sufficient since | 
|---|
| 1990 | // alias analysis is not complete for escaped objects. | 
|---|
| 1991 | // Disjoint sets are definitely unrelated only when | 
|---|
| 1992 | // at least one set has only not escaping allocations. | 
|---|
| 1993 | if (!set1_has_unknown_ptr && !set1_has_null_ptr) { | 
|---|
| 1994 | if (ptn1->non_escaping_allocation()) { | 
|---|
| 1995 | return _pcmp_neq; | 
|---|
| 1996 | } | 
|---|
| 1997 | } | 
|---|
| 1998 | if (!set2_has_unknown_ptr && !set2_has_null_ptr) { | 
|---|
| 1999 | if (ptn2->non_escaping_allocation()) { | 
|---|
| 2000 | return _pcmp_neq; | 
|---|
| 2001 | } | 
|---|
| 2002 | } | 
|---|
| 2003 | return NULL; | 
|---|
| 2004 | } | 
|---|
| 2005 |  | 
|---|
| 2006 | // Connection Graph constuction functions. | 
|---|
| 2007 |  | 
|---|
| 2008 | void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) { | 
|---|
| 2009 | PointsToNode* ptadr = _nodes.at(n->_idx); | 
|---|
| 2010 | if (ptadr != NULL) { | 
|---|
| 2011 | assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity"); | 
|---|
| 2012 | return; | 
|---|
| 2013 | } | 
|---|
| 2014 | Compile* C = _compile; | 
|---|
| 2015 | ptadr = new (C->comp_arena()) LocalVarNode(this, n, es); | 
|---|
| 2016 | _nodes.at_put(n->_idx, ptadr); | 
|---|
| 2017 | } | 
|---|
| 2018 |  | 
|---|
| 2019 | void ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) { | 
|---|
| 2020 | PointsToNode* ptadr = _nodes.at(n->_idx); | 
|---|
| 2021 | if (ptadr != NULL) { | 
|---|
| 2022 | assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity"); | 
|---|
| 2023 | return; | 
|---|
| 2024 | } | 
|---|
| 2025 | Compile* C = _compile; | 
|---|
| 2026 | ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es); | 
|---|
| 2027 | _nodes.at_put(n->_idx, ptadr); | 
|---|
| 2028 | } | 
|---|
| 2029 |  | 
|---|
| 2030 | void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) { | 
|---|
| 2031 | PointsToNode* ptadr = _nodes.at(n->_idx); | 
|---|
| 2032 | if (ptadr != NULL) { | 
|---|
| 2033 | assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity"); | 
|---|
| 2034 | return; | 
|---|
| 2035 | } | 
|---|
| 2036 | bool unsafe = false; | 
|---|
| 2037 | bool is_oop = is_oop_field(n, offset, &unsafe); | 
|---|
| 2038 | if (unsafe) { | 
|---|
| 2039 | es = PointsToNode::GlobalEscape; | 
|---|
| 2040 | } | 
|---|
| 2041 | Compile* C = _compile; | 
|---|
| 2042 | FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop); | 
|---|
| 2043 | _nodes.at_put(n->_idx, field); | 
|---|
| 2044 | } | 
|---|
| 2045 |  | 
|---|
| 2046 | void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es, | 
|---|
| 2047 | PointsToNode* src, PointsToNode* dst) { | 
|---|
| 2048 | assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar"); | 
|---|
| 2049 | assert((src != null_obj) && (dst != null_obj), "not for ConP NULL"); | 
|---|
| 2050 | PointsToNode* ptadr = _nodes.at(n->_idx); | 
|---|
| 2051 | if (ptadr != NULL) { | 
|---|
| 2052 | assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity"); | 
|---|
| 2053 | return; | 
|---|
| 2054 | } | 
|---|
| 2055 | Compile* C = _compile; | 
|---|
| 2056 | ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es); | 
|---|
| 2057 | _nodes.at_put(n->_idx, ptadr); | 
|---|
| 2058 | // Add edge from arraycopy node to source object. | 
|---|
| 2059 | (void)add_edge(ptadr, src); | 
|---|
| 2060 | src->set_arraycopy_src(); | 
|---|
| 2061 | // Add edge from destination object to arraycopy node. | 
|---|
| 2062 | (void)add_edge(dst, ptadr); | 
|---|
| 2063 | dst->set_arraycopy_dst(); | 
|---|
| 2064 | } | 
|---|
| 2065 |  | 
|---|
| 2066 | bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) { | 
|---|
| 2067 | const Type* adr_type = n->as_AddP()->bottom_type(); | 
|---|
| 2068 | BasicType bt = T_INT; | 
|---|
| 2069 | if (offset == Type::OffsetBot) { | 
|---|
| 2070 | // Check only oop fields. | 
|---|
| 2071 | if (!adr_type->isa_aryptr() || | 
|---|
| 2072 | (adr_type->isa_aryptr()->klass() == NULL) || | 
|---|
| 2073 | adr_type->isa_aryptr()->klass()->is_obj_array_klass()) { | 
|---|
| 2074 | // OffsetBot is used to reference array's element. Ignore first AddP. | 
|---|
| 2075 | if (find_second_addp(n, n->in(AddPNode::Base)) == NULL) { | 
|---|
| 2076 | bt = T_OBJECT; | 
|---|
| 2077 | } | 
|---|
| 2078 | } | 
|---|
| 2079 | } else if (offset != oopDesc::klass_offset_in_bytes()) { | 
|---|
| 2080 | if (adr_type->isa_instptr()) { | 
|---|
| 2081 | ciField* field = _compile->alias_type(adr_type->isa_instptr())->field(); | 
|---|
| 2082 | if (field != NULL) { | 
|---|
| 2083 | bt = field->layout_type(); | 
|---|
| 2084 | } else { | 
|---|
| 2085 | // Check for unsafe oop field access | 
|---|
| 2086 | if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || | 
|---|
| 2087 | n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || | 
|---|
| 2088 | n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || | 
|---|
| 2089 | BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { | 
|---|
| 2090 | bt = T_OBJECT; | 
|---|
| 2091 | (*unsafe) = true; | 
|---|
| 2092 | } | 
|---|
| 2093 | } | 
|---|
| 2094 | } else if (adr_type->isa_aryptr()) { | 
|---|
| 2095 | if (offset == arrayOopDesc::length_offset_in_bytes()) { | 
|---|
| 2096 | // Ignore array length load. | 
|---|
| 2097 | } else if (find_second_addp(n, n->in(AddPNode::Base)) != NULL) { | 
|---|
| 2098 | // Ignore first AddP. | 
|---|
| 2099 | } else { | 
|---|
| 2100 | const Type* elemtype = adr_type->isa_aryptr()->elem(); | 
|---|
| 2101 | bt = elemtype->array_element_basic_type(); | 
|---|
| 2102 | } | 
|---|
| 2103 | } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) { | 
|---|
| 2104 | // Allocation initialization, ThreadLocal field access, unsafe access | 
|---|
| 2105 | if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) || | 
|---|
| 2106 | n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) || | 
|---|
| 2107 | n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) || | 
|---|
| 2108 | BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) { | 
|---|
| 2109 | bt = T_OBJECT; | 
|---|
| 2110 | } | 
|---|
| 2111 | } | 
|---|
| 2112 | } | 
|---|
| 2113 | return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); | 
|---|
| 2114 | } | 
|---|
| 2115 |  | 
|---|
| 2116 | // Returns unique pointed java object or NULL. | 
|---|
| 2117 | JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) { | 
|---|
| 2118 | assert(!_collecting, "should not call when contructed graph"); | 
|---|
| 2119 | // If the node was created after the escape computation we can't answer. | 
|---|
| 2120 | uint idx = n->_idx; | 
|---|
| 2121 | if (idx >= nodes_size()) { | 
|---|
| 2122 | return NULL; | 
|---|
| 2123 | } | 
|---|
| 2124 | PointsToNode* ptn = ptnode_adr(idx); | 
|---|
| 2125 | if (ptn->is_JavaObject()) { | 
|---|
| 2126 | return ptn->as_JavaObject(); | 
|---|
| 2127 | } | 
|---|
| 2128 | assert(ptn->is_LocalVar(), "sanity"); | 
|---|
| 2129 | // Check all java objects it points to. | 
|---|
| 2130 | JavaObjectNode* jobj = NULL; | 
|---|
| 2131 | for (EdgeIterator i(ptn); i.has_next(); i.next()) { | 
|---|
| 2132 | PointsToNode* e = i.get(); | 
|---|
| 2133 | if (e->is_JavaObject()) { | 
|---|
| 2134 | if (jobj == NULL) { | 
|---|
| 2135 | jobj = e->as_JavaObject(); | 
|---|
| 2136 | } else if (jobj != e) { | 
|---|
| 2137 | return NULL; | 
|---|
| 2138 | } | 
|---|
| 2139 | } | 
|---|
| 2140 | } | 
|---|
| 2141 | return jobj; | 
|---|
| 2142 | } | 
|---|
| 2143 |  | 
|---|
| 2144 | // Return true if this node points only to non-escaping allocations. | 
|---|
| 2145 | bool PointsToNode::non_escaping_allocation() { | 
|---|
| 2146 | if (is_JavaObject()) { | 
|---|
| 2147 | Node* n = ideal_node(); | 
|---|
| 2148 | if (n->is_Allocate() || n->is_CallStaticJava()) { | 
|---|
| 2149 | return (escape_state() == PointsToNode::NoEscape); | 
|---|
| 2150 | } else { | 
|---|
| 2151 | return false; | 
|---|
| 2152 | } | 
|---|
| 2153 | } | 
|---|
| 2154 | assert(is_LocalVar(), "sanity"); | 
|---|
| 2155 | // Check all java objects it points to. | 
|---|
| 2156 | for (EdgeIterator i(this); i.has_next(); i.next()) { | 
|---|
| 2157 | PointsToNode* e = i.get(); | 
|---|
| 2158 | if (e->is_JavaObject()) { | 
|---|
| 2159 | Node* n = e->ideal_node(); | 
|---|
| 2160 | if ((e->escape_state() != PointsToNode::NoEscape) || | 
|---|
| 2161 | !(n->is_Allocate() || n->is_CallStaticJava())) { | 
|---|
| 2162 | return false; | 
|---|
| 2163 | } | 
|---|
| 2164 | } | 
|---|
| 2165 | } | 
|---|
| 2166 | return true; | 
|---|
| 2167 | } | 
|---|
| 2168 |  | 
|---|
| 2169 | // Return true if we know the node does not escape globally. | 
|---|
| 2170 | bool ConnectionGraph::not_global_escape(Node *n) { | 
|---|
| 2171 | assert(!_collecting, "should not call during graph construction"); | 
|---|
| 2172 | // If the node was created after the escape computation we can't answer. | 
|---|
| 2173 | uint idx = n->_idx; | 
|---|
| 2174 | if (idx >= nodes_size()) { | 
|---|
| 2175 | return false; | 
|---|
| 2176 | } | 
|---|
| 2177 | PointsToNode* ptn = ptnode_adr(idx); | 
|---|
| 2178 | PointsToNode::EscapeState es = ptn->escape_state(); | 
|---|
| 2179 | // If we have already computed a value, return it. | 
|---|
| 2180 | if (es >= PointsToNode::GlobalEscape) | 
|---|
| 2181 | return false; | 
|---|
| 2182 | if (ptn->is_JavaObject()) { | 
|---|
| 2183 | return true; // (es < PointsToNode::GlobalEscape); | 
|---|
| 2184 | } | 
|---|
| 2185 | assert(ptn->is_LocalVar(), "sanity"); | 
|---|
| 2186 | // Check all java objects it points to. | 
|---|
| 2187 | for (EdgeIterator i(ptn); i.has_next(); i.next()) { | 
|---|
| 2188 | if (i.get()->escape_state() >= PointsToNode::GlobalEscape) | 
|---|
| 2189 | return false; | 
|---|
| 2190 | } | 
|---|
| 2191 | return true; | 
|---|
| 2192 | } | 
|---|
| 2193 |  | 
|---|
| 2194 |  | 
|---|
| 2195 | // Helper functions | 
|---|
| 2196 |  | 
|---|
| 2197 | // Return true if this node points to specified node or nodes it points to. | 
|---|
| 2198 | bool PointsToNode::points_to(JavaObjectNode* ptn) const { | 
|---|
| 2199 | if (is_JavaObject()) { | 
|---|
| 2200 | return (this == ptn); | 
|---|
| 2201 | } | 
|---|
| 2202 | assert(is_LocalVar() || is_Field(), "sanity"); | 
|---|
| 2203 | for (EdgeIterator i(this); i.has_next(); i.next()) { | 
|---|
| 2204 | if (i.get() == ptn) | 
|---|
| 2205 | return true; | 
|---|
| 2206 | } | 
|---|
| 2207 | return false; | 
|---|
| 2208 | } | 
|---|
| 2209 |  | 
|---|
| 2210 | // Return true if one node points to an other. | 
|---|
| 2211 | bool PointsToNode::meet(PointsToNode* ptn) { | 
|---|
| 2212 | if (this == ptn) { | 
|---|
| 2213 | return true; | 
|---|
| 2214 | } else if (ptn->is_JavaObject()) { | 
|---|
| 2215 | return this->points_to(ptn->as_JavaObject()); | 
|---|
| 2216 | } else if (this->is_JavaObject()) { | 
|---|
| 2217 | return ptn->points_to(this->as_JavaObject()); | 
|---|
| 2218 | } | 
|---|
| 2219 | assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity"); | 
|---|
| 2220 | int ptn_count =  ptn->edge_count(); | 
|---|
| 2221 | for (EdgeIterator i(this); i.has_next(); i.next()) { | 
|---|
| 2222 | PointsToNode* this_e = i.get(); | 
|---|
| 2223 | for (int j = 0; j < ptn_count; j++) { | 
|---|
| 2224 | if (this_e == ptn->edge(j)) | 
|---|
| 2225 | return true; | 
|---|
| 2226 | } | 
|---|
| 2227 | } | 
|---|
| 2228 | return false; | 
|---|
| 2229 | } | 
|---|
| 2230 |  | 
|---|
| 2231 | #ifdef ASSERT | 
|---|
| 2232 | // Return true if bases point to this java object. | 
|---|
| 2233 | bool FieldNode::has_base(JavaObjectNode* jobj) const { | 
|---|
| 2234 | for (BaseIterator i(this); i.has_next(); i.next()) { | 
|---|
| 2235 | if (i.get() == jobj) | 
|---|
| 2236 | return true; | 
|---|
| 2237 | } | 
|---|
| 2238 | return false; | 
|---|
| 2239 | } | 
|---|
| 2240 | #endif | 
|---|
| 2241 |  | 
|---|
| 2242 | int ConnectionGraph::address_offset(Node* adr, PhaseTransform *phase) { | 
|---|
| 2243 | const Type *adr_type = phase->type(adr); | 
|---|
| 2244 | if (adr->is_AddP() && adr_type->isa_oopptr() == NULL && | 
|---|
| 2245 | adr->in(AddPNode::Address)->is_Proj() && | 
|---|
| 2246 | adr->in(AddPNode::Address)->in(0)->is_Allocate()) { | 
|---|
| 2247 | // We are computing a raw address for a store captured by an Initialize | 
|---|
| 2248 | // compute an appropriate address type. AddP cases #3 and #5 (see below). | 
|---|
| 2249 | int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot); | 
|---|
| 2250 | assert(offs != Type::OffsetBot || | 
|---|
| 2251 | adr->in(AddPNode::Address)->in(0)->is_AllocateArray(), | 
|---|
| 2252 | "offset must be a constant or it is initialization of array"); | 
|---|
| 2253 | return offs; | 
|---|
| 2254 | } | 
|---|
| 2255 | const TypePtr *t_ptr = adr_type->isa_ptr(); | 
|---|
| 2256 | assert(t_ptr != NULL, "must be a pointer type"); | 
|---|
| 2257 | return t_ptr->offset(); | 
|---|
| 2258 | } | 
|---|
| 2259 |  | 
|---|
| 2260 | Node* ConnectionGraph::get_addp_base(Node *addp) { | 
|---|
| 2261 | assert(addp->is_AddP(), "must be AddP"); | 
|---|
| 2262 | // | 
|---|
| 2263 | // AddP cases for Base and Address inputs: | 
|---|
| 2264 | // case #1. Direct object's field reference: | 
|---|
| 2265 | //     Allocate | 
|---|
| 2266 | //       | | 
|---|
| 2267 | //     Proj #5 ( oop result ) | 
|---|
| 2268 | //       | | 
|---|
| 2269 | //     CheckCastPP (cast to instance type) | 
|---|
| 2270 | //      | | | 
|---|
| 2271 | //     AddP  ( base == address ) | 
|---|
| 2272 | // | 
|---|
| 2273 | // case #2. Indirect object's field reference: | 
|---|
| 2274 | //      Phi | 
|---|
| 2275 | //       | | 
|---|
| 2276 | //     CastPP (cast to instance type) | 
|---|
| 2277 | //      | | | 
|---|
| 2278 | //     AddP  ( base == address ) | 
|---|
| 2279 | // | 
|---|
| 2280 | // case #3. Raw object's field reference for Initialize node: | 
|---|
| 2281 | //      Allocate | 
|---|
| 2282 | //        | | 
|---|
| 2283 | //      Proj #5 ( oop result ) | 
|---|
| 2284 | //  top   | | 
|---|
| 2285 | //     \  | | 
|---|
| 2286 | //     AddP  ( base == top ) | 
|---|
| 2287 | // | 
|---|
| 2288 | // case #4. Array's element reference: | 
|---|
| 2289 | //   {CheckCastPP | CastPP} | 
|---|
| 2290 | //     |  | | | 
|---|
| 2291 | //     |  AddP ( array's element offset ) | 
|---|
| 2292 | //     |  | | 
|---|
| 2293 | //     AddP ( array's offset ) | 
|---|
| 2294 | // | 
|---|
| 2295 | // case #5. Raw object's field reference for arraycopy stub call: | 
|---|
| 2296 | //          The inline_native_clone() case when the arraycopy stub is called | 
|---|
| 2297 | //          after the allocation before Initialize and CheckCastPP nodes. | 
|---|
| 2298 | //      Allocate | 
|---|
| 2299 | //        | | 
|---|
| 2300 | //      Proj #5 ( oop result ) | 
|---|
| 2301 | //       | | | 
|---|
| 2302 | //       AddP  ( base == address ) | 
|---|
| 2303 | // | 
|---|
| 2304 | // case #6. Constant Pool, ThreadLocal, CastX2P or | 
|---|
| 2305 | //          Raw object's field reference: | 
|---|
| 2306 | //      {ConP, ThreadLocal, CastX2P, raw Load} | 
|---|
| 2307 | //  top   | | 
|---|
| 2308 | //     \  | | 
|---|
| 2309 | //     AddP  ( base == top ) | 
|---|
| 2310 | // | 
|---|
| 2311 | // case #7. Klass's field reference. | 
|---|
| 2312 | //      LoadKlass | 
|---|
| 2313 | //       | | | 
|---|
| 2314 | //       AddP  ( base == address ) | 
|---|
| 2315 | // | 
|---|
| 2316 | // case #8. narrow Klass's field reference. | 
|---|
| 2317 | //      LoadNKlass | 
|---|
| 2318 | //       | | 
|---|
| 2319 | //      DecodeN | 
|---|
| 2320 | //       | | | 
|---|
| 2321 | //       AddP  ( base == address ) | 
|---|
| 2322 | // | 
|---|
| 2323 | // case #9. Mixed unsafe access | 
|---|
| 2324 | //    {instance} | 
|---|
| 2325 | //        | | 
|---|
| 2326 | //      CheckCastPP (raw) | 
|---|
| 2327 | //  top   | | 
|---|
| 2328 | //     \  | | 
|---|
| 2329 | //     AddP  ( base == top ) | 
|---|
| 2330 | // | 
|---|
| 2331 | Node *base = addp->in(AddPNode::Base); | 
|---|
| 2332 | if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9. | 
|---|
| 2333 | base = addp->in(AddPNode::Address); | 
|---|
| 2334 | while (base->is_AddP()) { | 
|---|
| 2335 | // Case #6 (unsafe access) may have several chained AddP nodes. | 
|---|
| 2336 | assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only"); | 
|---|
| 2337 | base = base->in(AddPNode::Address); | 
|---|
| 2338 | } | 
|---|
| 2339 | if (base->Opcode() == Op_CheckCastPP && | 
|---|
| 2340 | base->bottom_type()->isa_rawptr() && | 
|---|
| 2341 | _igvn->type(base->in(1))->isa_oopptr()) { | 
|---|
| 2342 | base = base->in(1); // Case #9 | 
|---|
| 2343 | } else { | 
|---|
| 2344 | Node* uncast_base = base->uncast(); | 
|---|
| 2345 | int opcode = uncast_base->Opcode(); | 
|---|
| 2346 | assert(opcode == Op_ConP || opcode == Op_ThreadLocal || | 
|---|
| 2347 | opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() || | 
|---|
| 2348 | (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) || | 
|---|
| 2349 | (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()) || | 
|---|
| 2350 | BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(uncast_base), "sanity"); | 
|---|
| 2351 | } | 
|---|
| 2352 | } | 
|---|
| 2353 | return base; | 
|---|
| 2354 | } | 
|---|
| 2355 |  | 
|---|
| 2356 | Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) { | 
|---|
| 2357 | assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes"); | 
|---|
| 2358 | Node* addp2 = addp->raw_out(0); | 
|---|
| 2359 | if (addp->outcnt() == 1 && addp2->is_AddP() && | 
|---|
| 2360 | addp2->in(AddPNode::Base) == n && | 
|---|
| 2361 | addp2->in(AddPNode::Address) == addp) { | 
|---|
| 2362 | assert(addp->in(AddPNode::Base) == n, "expecting the same base"); | 
|---|
| 2363 | // | 
|---|
| 2364 | // Find array's offset to push it on worklist first and | 
|---|
| 2365 | // as result process an array's element offset first (pushed second) | 
|---|
| 2366 | // to avoid CastPP for the array's offset. | 
|---|
| 2367 | // Otherwise the inserted CastPP (LocalVar) will point to what | 
|---|
| 2368 | // the AddP (Field) points to. Which would be wrong since | 
|---|
| 2369 | // the algorithm expects the CastPP has the same point as | 
|---|
| 2370 | // as AddP's base CheckCastPP (LocalVar). | 
|---|
| 2371 | // | 
|---|
| 2372 | //    ArrayAllocation | 
|---|
| 2373 | //     | | 
|---|
| 2374 | //    CheckCastPP | 
|---|
| 2375 | //     | | 
|---|
| 2376 | //    memProj (from ArrayAllocation CheckCastPP) | 
|---|
| 2377 | //     |  || | 
|---|
| 2378 | //     |  ||   Int (element index) | 
|---|
| 2379 | //     |  ||    |   ConI (log(element size)) | 
|---|
| 2380 | //     |  ||    |   / | 
|---|
| 2381 | //     |  ||   LShift | 
|---|
| 2382 | //     |  ||  / | 
|---|
| 2383 | //     |  AddP (array's element offset) | 
|---|
| 2384 | //     |  | | 
|---|
| 2385 | //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits)) | 
|---|
| 2386 | //     | / / | 
|---|
| 2387 | //     AddP (array's offset) | 
|---|
| 2388 | //      | | 
|---|
| 2389 | //     Load/Store (memory operation on array's element) | 
|---|
| 2390 | // | 
|---|
| 2391 | return addp2; | 
|---|
| 2392 | } | 
|---|
| 2393 | return NULL; | 
|---|
| 2394 | } | 
|---|
| 2395 |  | 
|---|
| 2396 | // | 
|---|
| 2397 | // Adjust the type and inputs of an AddP which computes the | 
|---|
| 2398 | // address of a field of an instance | 
|---|
| 2399 | // | 
|---|
| 2400 | bool ConnectionGraph::split_AddP(Node *addp, Node *base) { | 
|---|
| 2401 | PhaseGVN* igvn = _igvn; | 
|---|
| 2402 | const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); | 
|---|
| 2403 | assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); | 
|---|
| 2404 | const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); | 
|---|
| 2405 | if (t == NULL) { | 
|---|
| 2406 | // We are computing a raw address for a store captured by an Initialize | 
|---|
| 2407 | // compute an appropriate address type (cases #3 and #5). | 
|---|
| 2408 | assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); | 
|---|
| 2409 | assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); | 
|---|
| 2410 | intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); | 
|---|
| 2411 | assert(offs != Type::OffsetBot, "offset must be a constant"); | 
|---|
| 2412 | t = base_t->add_offset(offs)->is_oopptr(); | 
|---|
| 2413 | } | 
|---|
| 2414 | int inst_id =  base_t->instance_id(); | 
|---|
| 2415 | assert(!t->is_known_instance() || t->instance_id() == inst_id, | 
|---|
| 2416 | "old type must be non-instance or match new type"); | 
|---|
| 2417 |  | 
|---|
| 2418 | // The type 't' could be subclass of 'base_t'. | 
|---|
| 2419 | // As result t->offset() could be large then base_t's size and it will | 
|---|
| 2420 | // cause the failure in add_offset() with narrow oops since TypeOopPtr() | 
|---|
| 2421 | // constructor verifies correctness of the offset. | 
|---|
| 2422 | // | 
|---|
| 2423 | // It could happened on subclass's branch (from the type profiling | 
|---|
| 2424 | // inlining) which was not eliminated during parsing since the exactness | 
|---|
| 2425 | // of the allocation type was not propagated to the subclass type check. | 
|---|
| 2426 | // | 
|---|
| 2427 | // Or the type 't' could be not related to 'base_t' at all. | 
|---|
| 2428 | // It could happened when CHA type is different from MDO type on a dead path | 
|---|
| 2429 | // (for example, from instanceof check) which is not collapsed during parsing. | 
|---|
| 2430 | // | 
|---|
| 2431 | // Do nothing for such AddP node and don't process its users since | 
|---|
| 2432 | // this code branch will go away. | 
|---|
| 2433 | // | 
|---|
| 2434 | if (!t->is_known_instance() && | 
|---|
| 2435 | !base_t->klass()->is_subtype_of(t->klass())) { | 
|---|
| 2436 | return false; // bail out | 
|---|
| 2437 | } | 
|---|
| 2438 | const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); | 
|---|
| 2439 | // Do NOT remove the next line: ensure a new alias index is allocated | 
|---|
| 2440 | // for the instance type. Note: C++ will not remove it since the call | 
|---|
| 2441 | // has side effect. | 
|---|
| 2442 | int alias_idx = _compile->get_alias_index(tinst); | 
|---|
| 2443 | igvn->set_type(addp, tinst); | 
|---|
| 2444 | // record the allocation in the node map | 
|---|
| 2445 | set_map(addp, get_map(base->_idx)); | 
|---|
| 2446 | // Set addp's Base and Address to 'base'. | 
|---|
| 2447 | Node *abase = addp->in(AddPNode::Base); | 
|---|
| 2448 | Node *adr   = addp->in(AddPNode::Address); | 
|---|
| 2449 | if (adr->is_Proj() && adr->in(0)->is_Allocate() && | 
|---|
| 2450 | adr->in(0)->_idx == (uint)inst_id) { | 
|---|
| 2451 | // Skip AddP cases #3 and #5. | 
|---|
| 2452 | } else { | 
|---|
| 2453 | assert(!abase->is_top(), "sanity"); // AddP case #3 | 
|---|
| 2454 | if (abase != base) { | 
|---|
| 2455 | igvn->hash_delete(addp); | 
|---|
| 2456 | addp->set_req(AddPNode::Base, base); | 
|---|
| 2457 | if (abase == adr) { | 
|---|
| 2458 | addp->set_req(AddPNode::Address, base); | 
|---|
| 2459 | } else { | 
|---|
| 2460 | // AddP case #4 (adr is array's element offset AddP node) | 
|---|
| 2461 | #ifdef ASSERT | 
|---|
| 2462 | const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr(); | 
|---|
| 2463 | assert(adr->is_AddP() && atype != NULL && | 
|---|
| 2464 | atype->instance_id() == inst_id, "array's element offset should be processed first"); | 
|---|
| 2465 | #endif | 
|---|
| 2466 | } | 
|---|
| 2467 | igvn->hash_insert(addp); | 
|---|
| 2468 | } | 
|---|
| 2469 | } | 
|---|
| 2470 | // Put on IGVN worklist since at least addp's type was changed above. | 
|---|
| 2471 | record_for_optimizer(addp); | 
|---|
| 2472 | return true; | 
|---|
| 2473 | } | 
|---|
| 2474 |  | 
|---|
| 2475 | // | 
|---|
| 2476 | // Create a new version of orig_phi if necessary. Returns either the newly | 
|---|
| 2477 | // created phi or an existing phi.  Sets create_new to indicate whether a new | 
|---|
| 2478 | // phi was created.  Cache the last newly created phi in the node map. | 
|---|
| 2479 | // | 
|---|
| 2480 | PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) { | 
|---|
| 2481 | Compile *C = _compile; | 
|---|
| 2482 | PhaseGVN* igvn = _igvn; | 
|---|
| 2483 | new_created = false; | 
|---|
| 2484 | int phi_alias_idx = C->get_alias_index(orig_phi->adr_type()); | 
|---|
| 2485 | // nothing to do if orig_phi is bottom memory or matches alias_idx | 
|---|
| 2486 | if (phi_alias_idx == alias_idx) { | 
|---|
| 2487 | return orig_phi; | 
|---|
| 2488 | } | 
|---|
| 2489 | // Have we recently created a Phi for this alias index? | 
|---|
| 2490 | PhiNode *result = get_map_phi(orig_phi->_idx); | 
|---|
| 2491 | if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) { | 
|---|
| 2492 | return result; | 
|---|
| 2493 | } | 
|---|
| 2494 | // Previous check may fail when the same wide memory Phi was split into Phis | 
|---|
| 2495 | // for different memory slices. Search all Phis for this region. | 
|---|
| 2496 | if (result != NULL) { | 
|---|
| 2497 | Node* region = orig_phi->in(0); | 
|---|
| 2498 | for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { | 
|---|
| 2499 | Node* phi = region->fast_out(i); | 
|---|
| 2500 | if (phi->is_Phi() && | 
|---|
| 2501 | C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) { | 
|---|
| 2502 | assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice"); | 
|---|
| 2503 | return phi->as_Phi(); | 
|---|
| 2504 | } | 
|---|
| 2505 | } | 
|---|
| 2506 | } | 
|---|
| 2507 | if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) { | 
|---|
| 2508 | if (C->do_escape_analysis() == true && !C->failing()) { | 
|---|
| 2509 | // Retry compilation without escape analysis. | 
|---|
| 2510 | // If this is the first failure, the sentinel string will "stick" | 
|---|
| 2511 | // to the Compile object, and the C2Compiler will see it and retry. | 
|---|
| 2512 | C->record_failure(C2Compiler::retry_no_escape_analysis()); | 
|---|
| 2513 | } | 
|---|
| 2514 | return NULL; | 
|---|
| 2515 | } | 
|---|
| 2516 | orig_phi_worklist.append_if_missing(orig_phi); | 
|---|
| 2517 | const TypePtr *atype = C->get_adr_type(alias_idx); | 
|---|
| 2518 | result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype); | 
|---|
| 2519 | C->copy_node_notes_to(result, orig_phi); | 
|---|
| 2520 | igvn->set_type(result, result->bottom_type()); | 
|---|
| 2521 | record_for_optimizer(result); | 
|---|
| 2522 | set_map(orig_phi, result); | 
|---|
| 2523 | new_created = true; | 
|---|
| 2524 | return result; | 
|---|
| 2525 | } | 
|---|
| 2526 |  | 
|---|
| 2527 | // | 
|---|
| 2528 | // Return a new version of Memory Phi "orig_phi" with the inputs having the | 
|---|
| 2529 | // specified alias index. | 
|---|
| 2530 | // | 
|---|
| 2531 | PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist) { | 
|---|
| 2532 | assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory"); | 
|---|
| 2533 | Compile *C = _compile; | 
|---|
| 2534 | PhaseGVN* igvn = _igvn; | 
|---|
| 2535 | bool new_phi_created; | 
|---|
| 2536 | PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created); | 
|---|
| 2537 | if (!new_phi_created) { | 
|---|
| 2538 | return result; | 
|---|
| 2539 | } | 
|---|
| 2540 | GrowableArray<PhiNode *>  phi_list; | 
|---|
| 2541 | GrowableArray<uint>  cur_input; | 
|---|
| 2542 | PhiNode *phi = orig_phi; | 
|---|
| 2543 | uint idx = 1; | 
|---|
| 2544 | bool finished = false; | 
|---|
| 2545 | while(!finished) { | 
|---|
| 2546 | while (idx < phi->req()) { | 
|---|
| 2547 | Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist); | 
|---|
| 2548 | if (mem != NULL && mem->is_Phi()) { | 
|---|
| 2549 | PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created); | 
|---|
| 2550 | if (new_phi_created) { | 
|---|
| 2551 | // found an phi for which we created a new split, push current one on worklist and begin | 
|---|
| 2552 | // processing new one | 
|---|
| 2553 | phi_list.push(phi); | 
|---|
| 2554 | cur_input.push(idx); | 
|---|
| 2555 | phi = mem->as_Phi(); | 
|---|
| 2556 | result = newphi; | 
|---|
| 2557 | idx = 1; | 
|---|
| 2558 | continue; | 
|---|
| 2559 | } else { | 
|---|
| 2560 | mem = newphi; | 
|---|
| 2561 | } | 
|---|
| 2562 | } | 
|---|
| 2563 | if (C->failing()) { | 
|---|
| 2564 | return NULL; | 
|---|
| 2565 | } | 
|---|
| 2566 | result->set_req(idx++, mem); | 
|---|
| 2567 | } | 
|---|
| 2568 | #ifdef ASSERT | 
|---|
| 2569 | // verify that the new Phi has an input for each input of the original | 
|---|
| 2570 | assert( phi->req() == result->req(), "must have same number of inputs."); | 
|---|
| 2571 | assert( result->in(0) != NULL && result->in(0) == phi->in(0), "regions must match"); | 
|---|
| 2572 | #endif | 
|---|
| 2573 | // Check if all new phi's inputs have specified alias index. | 
|---|
| 2574 | // Otherwise use old phi. | 
|---|
| 2575 | for (uint i = 1; i < phi->req(); i++) { | 
|---|
| 2576 | Node* in = result->in(i); | 
|---|
| 2577 | assert((phi->in(i) == NULL) == (in == NULL), "inputs must correspond."); | 
|---|
| 2578 | } | 
|---|
| 2579 | // we have finished processing a Phi, see if there are any more to do | 
|---|
| 2580 | finished = (phi_list.length() == 0 ); | 
|---|
| 2581 | if (!finished) { | 
|---|
| 2582 | phi = phi_list.pop(); | 
|---|
| 2583 | idx = cur_input.pop(); | 
|---|
| 2584 | PhiNode *prev_result = get_map_phi(phi->_idx); | 
|---|
| 2585 | prev_result->set_req(idx++, result); | 
|---|
| 2586 | result = prev_result; | 
|---|
| 2587 | } | 
|---|
| 2588 | } | 
|---|
| 2589 | return result; | 
|---|
| 2590 | } | 
|---|
| 2591 |  | 
|---|
| 2592 | // | 
|---|
| 2593 | // The next methods are derived from methods in MemNode. | 
|---|
| 2594 | // | 
|---|
| 2595 | Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) { | 
|---|
| 2596 | Node *mem = mmem; | 
|---|
| 2597 | // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally | 
|---|
| 2598 | // means an array I have not precisely typed yet.  Do not do any | 
|---|
| 2599 | // alias stuff with it any time soon. | 
|---|
| 2600 | if (toop->base() != Type::AnyPtr && | 
|---|
| 2601 | !(toop->klass() != NULL && | 
|---|
| 2602 | toop->klass()->is_java_lang_Object() && | 
|---|
| 2603 | toop->offset() == Type::OffsetBot)) { | 
|---|
| 2604 | mem = mmem->memory_at(alias_idx); | 
|---|
| 2605 | // Update input if it is progress over what we have now | 
|---|
| 2606 | } | 
|---|
| 2607 | return mem; | 
|---|
| 2608 | } | 
|---|
| 2609 |  | 
|---|
| 2610 | // | 
|---|
| 2611 | // Move memory users to their memory slices. | 
|---|
| 2612 | // | 
|---|
| 2613 | void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) { | 
|---|
| 2614 | Compile* C = _compile; | 
|---|
| 2615 | PhaseGVN* igvn = _igvn; | 
|---|
| 2616 | const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr(); | 
|---|
| 2617 | assert(tp != NULL, "ptr type"); | 
|---|
| 2618 | int alias_idx = C->get_alias_index(tp); | 
|---|
| 2619 | int general_idx = C->get_general_index(alias_idx); | 
|---|
| 2620 |  | 
|---|
| 2621 | // Move users first | 
|---|
| 2622 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { | 
|---|
| 2623 | Node* use = n->fast_out(i); | 
|---|
| 2624 | if (use->is_MergeMem()) { | 
|---|
| 2625 | MergeMemNode* mmem = use->as_MergeMem(); | 
|---|
| 2626 | assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice"); | 
|---|
| 2627 | if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) { | 
|---|
| 2628 | continue; // Nothing to do | 
|---|
| 2629 | } | 
|---|
| 2630 | // Replace previous general reference to mem node. | 
|---|
| 2631 | uint orig_uniq = C->unique(); | 
|---|
| 2632 | Node* m = find_inst_mem(n, general_idx, orig_phis); | 
|---|
| 2633 | assert(orig_uniq == C->unique(), "no new nodes"); | 
|---|
| 2634 | mmem->set_memory_at(general_idx, m); | 
|---|
| 2635 | --imax; | 
|---|
| 2636 | --i; | 
|---|
| 2637 | } else if (use->is_MemBar()) { | 
|---|
| 2638 | assert(!use->is_Initialize(), "initializing stores should not be moved"); | 
|---|
| 2639 | if (use->req() > MemBarNode::Precedent && | 
|---|
| 2640 | use->in(MemBarNode::Precedent) == n) { | 
|---|
| 2641 | // Don't move related membars. | 
|---|
| 2642 | record_for_optimizer(use); | 
|---|
| 2643 | continue; | 
|---|
| 2644 | } | 
|---|
| 2645 | tp = use->as_MemBar()->adr_type()->isa_ptr(); | 
|---|
| 2646 | if ((tp != NULL && C->get_alias_index(tp) == alias_idx) || | 
|---|
| 2647 | alias_idx == general_idx) { | 
|---|
| 2648 | continue; // Nothing to do | 
|---|
| 2649 | } | 
|---|
| 2650 | // Move to general memory slice. | 
|---|
| 2651 | uint orig_uniq = C->unique(); | 
|---|
| 2652 | Node* m = find_inst_mem(n, general_idx, orig_phis); | 
|---|
| 2653 | assert(orig_uniq == C->unique(), "no new nodes"); | 
|---|
| 2654 | igvn->hash_delete(use); | 
|---|
| 2655 | imax -= use->replace_edge(n, m); | 
|---|
| 2656 | igvn->hash_insert(use); | 
|---|
| 2657 | record_for_optimizer(use); | 
|---|
| 2658 | --i; | 
|---|
| 2659 | #ifdef ASSERT | 
|---|
| 2660 | } else if (use->is_Mem()) { | 
|---|
| 2661 | if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) { | 
|---|
| 2662 | // Don't move related cardmark. | 
|---|
| 2663 | continue; | 
|---|
| 2664 | } | 
|---|
| 2665 | // Memory nodes should have new memory input. | 
|---|
| 2666 | tp = igvn->type(use->in(MemNode::Address))->isa_ptr(); | 
|---|
| 2667 | assert(tp != NULL, "ptr type"); | 
|---|
| 2668 | int idx = C->get_alias_index(tp); | 
|---|
| 2669 | assert(get_map(use->_idx) != NULL || idx == alias_idx, | 
|---|
| 2670 | "Following memory nodes should have new memory input or be on the same memory slice"); | 
|---|
| 2671 | } else if (use->is_Phi()) { | 
|---|
| 2672 | // Phi nodes should be split and moved already. | 
|---|
| 2673 | tp = use->as_Phi()->adr_type()->isa_ptr(); | 
|---|
| 2674 | assert(tp != NULL, "ptr type"); | 
|---|
| 2675 | int idx = C->get_alias_index(tp); | 
|---|
| 2676 | assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice"); | 
|---|
| 2677 | } else { | 
|---|
| 2678 | use->dump(); | 
|---|
| 2679 | assert(false, "should not be here"); | 
|---|
| 2680 | #endif | 
|---|
| 2681 | } | 
|---|
| 2682 | } | 
|---|
| 2683 | } | 
|---|
| 2684 |  | 
|---|
| 2685 | // | 
|---|
| 2686 | // Search memory chain of "mem" to find a MemNode whose address | 
|---|
| 2687 | // is the specified alias index. | 
|---|
| 2688 | // | 
|---|
| 2689 | Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis) { | 
|---|
| 2690 | if (orig_mem == NULL) | 
|---|
| 2691 | return orig_mem; | 
|---|
| 2692 | Compile* C = _compile; | 
|---|
| 2693 | PhaseGVN* igvn = _igvn; | 
|---|
| 2694 | const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr(); | 
|---|
| 2695 | bool is_instance = (toop != NULL) && toop->is_known_instance(); | 
|---|
| 2696 | Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory); | 
|---|
| 2697 | Node *prev = NULL; | 
|---|
| 2698 | Node *result = orig_mem; | 
|---|
| 2699 | while (prev != result) { | 
|---|
| 2700 | prev = result; | 
|---|
| 2701 | if (result == start_mem) | 
|---|
| 2702 | break;  // hit one of our sentinels | 
|---|
| 2703 | if (result->is_Mem()) { | 
|---|
| 2704 | const Type *at = igvn->type(result->in(MemNode::Address)); | 
|---|
| 2705 | if (at == Type::TOP) | 
|---|
| 2706 | break; // Dead | 
|---|
| 2707 | assert (at->isa_ptr() != NULL, "pointer type required."); | 
|---|
| 2708 | int idx = C->get_alias_index(at->is_ptr()); | 
|---|
| 2709 | if (idx == alias_idx) | 
|---|
| 2710 | break; // Found | 
|---|
| 2711 | if (!is_instance && (at->isa_oopptr() == NULL || | 
|---|
| 2712 | !at->is_oopptr()->is_known_instance())) { | 
|---|
| 2713 | break; // Do not skip store to general memory slice. | 
|---|
| 2714 | } | 
|---|
| 2715 | result = result->in(MemNode::Memory); | 
|---|
| 2716 | } | 
|---|
| 2717 | if (!is_instance) | 
|---|
| 2718 | continue;  // don't search further for non-instance types | 
|---|
| 2719 | // skip over a call which does not affect this memory slice | 
|---|
| 2720 | if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { | 
|---|
| 2721 | Node *proj_in = result->in(0); | 
|---|
| 2722 | if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) { | 
|---|
| 2723 | break;  // hit one of our sentinels | 
|---|
| 2724 | } else if (proj_in->is_Call()) { | 
|---|
| 2725 | // ArrayCopy node processed here as well | 
|---|
| 2726 | CallNode *call = proj_in->as_Call(); | 
|---|
| 2727 | if (!call->may_modify(toop, igvn)) { | 
|---|
| 2728 | result = call->in(TypeFunc::Memory); | 
|---|
| 2729 | } | 
|---|
| 2730 | } else if (proj_in->is_Initialize()) { | 
|---|
| 2731 | AllocateNode* alloc = proj_in->as_Initialize()->allocation(); | 
|---|
| 2732 | // Stop if this is the initialization for the object instance which | 
|---|
| 2733 | // which contains this memory slice, otherwise skip over it. | 
|---|
| 2734 | if (alloc == NULL || alloc->_idx != (uint)toop->instance_id()) { | 
|---|
| 2735 | result = proj_in->in(TypeFunc::Memory); | 
|---|
| 2736 | } | 
|---|
| 2737 | } else if (proj_in->is_MemBar()) { | 
|---|
| 2738 | if (proj_in->in(TypeFunc::Memory)->is_MergeMem() && | 
|---|
| 2739 | proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->is_Proj() && | 
|---|
| 2740 | proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->is_ArrayCopy()) { | 
|---|
| 2741 | // clone | 
|---|
| 2742 | ArrayCopyNode* ac = proj_in->in(TypeFunc::Memory)->as_MergeMem()->in(Compile::AliasIdxRaw)->in(0)->as_ArrayCopy(); | 
|---|
| 2743 | if (ac->may_modify(toop, igvn)) { | 
|---|
| 2744 | break; | 
|---|
| 2745 | } | 
|---|
| 2746 | } | 
|---|
| 2747 | result = proj_in->in(TypeFunc::Memory); | 
|---|
| 2748 | } | 
|---|
| 2749 | } else if (result->is_MergeMem()) { | 
|---|
| 2750 | MergeMemNode *mmem = result->as_MergeMem(); | 
|---|
| 2751 | result = step_through_mergemem(mmem, alias_idx, toop); | 
|---|
| 2752 | if (result == mmem->base_memory()) { | 
|---|
| 2753 | // Didn't find instance memory, search through general slice recursively. | 
|---|
| 2754 | result = mmem->memory_at(C->get_general_index(alias_idx)); | 
|---|
| 2755 | result = find_inst_mem(result, alias_idx, orig_phis); | 
|---|
| 2756 | if (C->failing()) { | 
|---|
| 2757 | return NULL; | 
|---|
| 2758 | } | 
|---|
| 2759 | mmem->set_memory_at(alias_idx, result); | 
|---|
| 2760 | } | 
|---|
| 2761 | } else if (result->is_Phi() && | 
|---|
| 2762 | C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) { | 
|---|
| 2763 | Node *un = result->as_Phi()->unique_input(igvn); | 
|---|
| 2764 | if (un != NULL) { | 
|---|
| 2765 | orig_phis.append_if_missing(result->as_Phi()); | 
|---|
| 2766 | result = un; | 
|---|
| 2767 | } else { | 
|---|
| 2768 | break; | 
|---|
| 2769 | } | 
|---|
| 2770 | } else if (result->is_ClearArray()) { | 
|---|
| 2771 | if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) { | 
|---|
| 2772 | // Can not bypass initialization of the instance | 
|---|
| 2773 | // we are looking for. | 
|---|
| 2774 | break; | 
|---|
| 2775 | } | 
|---|
| 2776 | // Otherwise skip it (the call updated 'result' value). | 
|---|
| 2777 | } else if (result->Opcode() == Op_SCMemProj) { | 
|---|
| 2778 | Node* mem = result->in(0); | 
|---|
| 2779 | Node* adr = NULL; | 
|---|
| 2780 | if (mem->is_LoadStore()) { | 
|---|
| 2781 | adr = mem->in(MemNode::Address); | 
|---|
| 2782 | } else { | 
|---|
| 2783 | assert(mem->Opcode() == Op_EncodeISOArray || | 
|---|
| 2784 | mem->Opcode() == Op_StrCompressedCopy, "sanity"); | 
|---|
| 2785 | adr = mem->in(3); // Memory edge corresponds to destination array | 
|---|
| 2786 | } | 
|---|
| 2787 | const Type *at = igvn->type(adr); | 
|---|
| 2788 | if (at != Type::TOP) { | 
|---|
| 2789 | assert(at->isa_ptr() != NULL, "pointer type required."); | 
|---|
| 2790 | int idx = C->get_alias_index(at->is_ptr()); | 
|---|
| 2791 | if (idx == alias_idx) { | 
|---|
| 2792 | // Assert in debug mode | 
|---|
| 2793 | assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field"); | 
|---|
| 2794 | break; // In product mode return SCMemProj node | 
|---|
| 2795 | } | 
|---|
| 2796 | } | 
|---|
| 2797 | result = mem->in(MemNode::Memory); | 
|---|
| 2798 | } else if (result->Opcode() == Op_StrInflatedCopy) { | 
|---|
| 2799 | Node* adr = result->in(3); // Memory edge corresponds to destination array | 
|---|
| 2800 | const Type *at = igvn->type(adr); | 
|---|
| 2801 | if (at != Type::TOP) { | 
|---|
| 2802 | assert(at->isa_ptr() != NULL, "pointer type required."); | 
|---|
| 2803 | int idx = C->get_alias_index(at->is_ptr()); | 
|---|
| 2804 | if (idx == alias_idx) { | 
|---|
| 2805 | // Assert in debug mode | 
|---|
| 2806 | assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field"); | 
|---|
| 2807 | break; // In product mode return SCMemProj node | 
|---|
| 2808 | } | 
|---|
| 2809 | } | 
|---|
| 2810 | result = result->in(MemNode::Memory); | 
|---|
| 2811 | } | 
|---|
| 2812 | } | 
|---|
| 2813 | if (result->is_Phi()) { | 
|---|
| 2814 | PhiNode *mphi = result->as_Phi(); | 
|---|
| 2815 | assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); | 
|---|
| 2816 | const TypePtr *t = mphi->adr_type(); | 
|---|
| 2817 | if (!is_instance) { | 
|---|
| 2818 | // Push all non-instance Phis on the orig_phis worklist to update inputs | 
|---|
| 2819 | // during Phase 4 if needed. | 
|---|
| 2820 | orig_phis.append_if_missing(mphi); | 
|---|
| 2821 | } else if (C->get_alias_index(t) != alias_idx) { | 
|---|
| 2822 | // Create a new Phi with the specified alias index type. | 
|---|
| 2823 | result = split_memory_phi(mphi, alias_idx, orig_phis); | 
|---|
| 2824 | } | 
|---|
| 2825 | } | 
|---|
| 2826 | // the result is either MemNode, PhiNode, InitializeNode. | 
|---|
| 2827 | return result; | 
|---|
| 2828 | } | 
|---|
| 2829 |  | 
|---|
| 2830 | // | 
|---|
| 2831 | //  Convert the types of unescaped object to instance types where possible, | 
|---|
| 2832 | //  propagate the new type information through the graph, and update memory | 
|---|
| 2833 | //  edges and MergeMem inputs to reflect the new type. | 
|---|
| 2834 | // | 
|---|
| 2835 | //  We start with allocations (and calls which may be allocations)  on alloc_worklist. | 
|---|
| 2836 | //  The processing is done in 4 phases: | 
|---|
| 2837 | // | 
|---|
| 2838 | //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance | 
|---|
| 2839 | //            types for the CheckCastPP for allocations where possible. | 
|---|
| 2840 | //            Propagate the new types through users as follows: | 
|---|
| 2841 | //               casts and Phi:  push users on alloc_worklist | 
|---|
| 2842 | //               AddP:  cast Base and Address inputs to the instance type | 
|---|
| 2843 | //                      push any AddP users on alloc_worklist and push any memnode | 
|---|
| 2844 | //                      users onto memnode_worklist. | 
|---|
| 2845 | //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and | 
|---|
| 2846 | //            search the Memory chain for a store with the appropriate type | 
|---|
| 2847 | //            address type.  If a Phi is found, create a new version with | 
|---|
| 2848 | //            the appropriate memory slices from each of the Phi inputs. | 
|---|
| 2849 | //            For stores, process the users as follows: | 
|---|
| 2850 | //               MemNode:  push on memnode_worklist | 
|---|
| 2851 | //               MergeMem: push on mergemem_worklist | 
|---|
| 2852 | //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice | 
|---|
| 2853 | //            moving the first node encountered of each  instance type to the | 
|---|
| 2854 | //            the input corresponding to its alias index. | 
|---|
| 2855 | //            appropriate memory slice. | 
|---|
| 2856 | //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes. | 
|---|
| 2857 | // | 
|---|
| 2858 | // In the following example, the CheckCastPP nodes are the cast of allocation | 
|---|
| 2859 | // results and the allocation of node 29 is unescaped and eligible to be an | 
|---|
| 2860 | // instance type. | 
|---|
| 2861 | // | 
|---|
| 2862 | // We start with: | 
|---|
| 2863 | // | 
|---|
| 2864 | //     7 Parm #memory | 
|---|
| 2865 | //    10  ConI  "12" | 
|---|
| 2866 | //    19  CheckCastPP   "Foo" | 
|---|
| 2867 | //    20  AddP  _ 19 19 10  Foo+12  alias_index=4 | 
|---|
| 2868 | //    29  CheckCastPP   "Foo" | 
|---|
| 2869 | //    30  AddP  _ 29 29 10  Foo+12  alias_index=4 | 
|---|
| 2870 | // | 
|---|
| 2871 | //    40  StoreP  25   7  20   ... alias_index=4 | 
|---|
| 2872 | //    50  StoreP  35  40  30   ... alias_index=4 | 
|---|
| 2873 | //    60  StoreP  45  50  20   ... alias_index=4 | 
|---|
| 2874 | //    70  LoadP    _  60  30   ... alias_index=4 | 
|---|
| 2875 | //    80  Phi     75  50  60   Memory alias_index=4 | 
|---|
| 2876 | //    90  LoadP    _  80  30   ... alias_index=4 | 
|---|
| 2877 | //   100  LoadP    _  80  20   ... alias_index=4 | 
|---|
| 2878 | // | 
|---|
| 2879 | // | 
|---|
| 2880 | // Phase 1 creates an instance type for node 29 assigning it an instance id of 24 | 
|---|
| 2881 | // and creating a new alias index for node 30.  This gives: | 
|---|
| 2882 | // | 
|---|
| 2883 | //     7 Parm #memory | 
|---|
| 2884 | //    10  ConI  "12" | 
|---|
| 2885 | //    19  CheckCastPP   "Foo" | 
|---|
| 2886 | //    20  AddP  _ 19 19 10  Foo+12  alias_index=4 | 
|---|
| 2887 | //    29  CheckCastPP   "Foo"  iid=24 | 
|---|
| 2888 | //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24 | 
|---|
| 2889 | // | 
|---|
| 2890 | //    40  StoreP  25   7  20   ... alias_index=4 | 
|---|
| 2891 | //    50  StoreP  35  40  30   ... alias_index=6 | 
|---|
| 2892 | //    60  StoreP  45  50  20   ... alias_index=4 | 
|---|
| 2893 | //    70  LoadP    _  60  30   ... alias_index=6 | 
|---|
| 2894 | //    80  Phi     75  50  60   Memory alias_index=4 | 
|---|
| 2895 | //    90  LoadP    _  80  30   ... alias_index=6 | 
|---|
| 2896 | //   100  LoadP    _  80  20   ... alias_index=4 | 
|---|
| 2897 | // | 
|---|
| 2898 | // In phase 2, new memory inputs are computed for the loads and stores, | 
|---|
| 2899 | // And a new version of the phi is created.  In phase 4, the inputs to | 
|---|
| 2900 | // node 80 are updated and then the memory nodes are updated with the | 
|---|
| 2901 | // values computed in phase 2.  This results in: | 
|---|
| 2902 | // | 
|---|
| 2903 | //     7 Parm #memory | 
|---|
| 2904 | //    10  ConI  "12" | 
|---|
| 2905 | //    19  CheckCastPP   "Foo" | 
|---|
| 2906 | //    20  AddP  _ 19 19 10  Foo+12  alias_index=4 | 
|---|
| 2907 | //    29  CheckCastPP   "Foo"  iid=24 | 
|---|
| 2908 | //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24 | 
|---|
| 2909 | // | 
|---|
| 2910 | //    40  StoreP  25  7   20   ... alias_index=4 | 
|---|
| 2911 | //    50  StoreP  35  7   30   ... alias_index=6 | 
|---|
| 2912 | //    60  StoreP  45  40  20   ... alias_index=4 | 
|---|
| 2913 | //    70  LoadP    _  50  30   ... alias_index=6 | 
|---|
| 2914 | //    80  Phi     75  40  60   Memory alias_index=4 | 
|---|
| 2915 | //   120  Phi     75  50  50   Memory alias_index=6 | 
|---|
| 2916 | //    90  LoadP    _ 120  30   ... alias_index=6 | 
|---|
| 2917 | //   100  LoadP    _  80  20   ... alias_index=4 | 
|---|
| 2918 | // | 
|---|
| 2919 | void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist, GrowableArray<ArrayCopyNode*> &arraycopy_worklist) { | 
|---|
| 2920 | GrowableArray<Node *>  memnode_worklist; | 
|---|
| 2921 | GrowableArray<PhiNode *>  orig_phis; | 
|---|
| 2922 | PhaseIterGVN  *igvn = _igvn; | 
|---|
| 2923 | uint new_index_start = (uint) _compile->num_alias_types(); | 
|---|
| 2924 | Arena* arena = Thread::current()->resource_area(); | 
|---|
| 2925 | VectorSet visited(arena); | 
|---|
| 2926 | ideal_nodes.clear(); // Reset for use with set_map/get_map. | 
|---|
| 2927 | uint unique_old = _compile->unique(); | 
|---|
| 2928 |  | 
|---|
| 2929 | //  Phase 1:  Process possible allocations from alloc_worklist. | 
|---|
| 2930 | //  Create instance types for the CheckCastPP for allocations where possible. | 
|---|
| 2931 | // | 
|---|
| 2932 | // (Note: don't forget to change the order of the second AddP node on | 
|---|
| 2933 | //  the alloc_worklist if the order of the worklist processing is changed, | 
|---|
| 2934 | //  see the comment in find_second_addp().) | 
|---|
| 2935 | // | 
|---|
| 2936 | while (alloc_worklist.length() != 0) { | 
|---|
| 2937 | Node *n = alloc_worklist.pop(); | 
|---|
| 2938 | uint ni = n->_idx; | 
|---|
| 2939 | if (n->is_Call()) { | 
|---|
| 2940 | CallNode *alloc = n->as_Call(); | 
|---|
| 2941 | // copy escape information to call node | 
|---|
| 2942 | PointsToNode* ptn = ptnode_adr(alloc->_idx); | 
|---|
| 2943 | PointsToNode::EscapeState es = ptn->escape_state(); | 
|---|
| 2944 | // We have an allocation or call which returns a Java object, | 
|---|
| 2945 | // see if it is unescaped. | 
|---|
| 2946 | if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) | 
|---|
| 2947 | continue; | 
|---|
| 2948 | // Find CheckCastPP for the allocate or for the return value of a call | 
|---|
| 2949 | n = alloc->result_cast(); | 
|---|
| 2950 | if (n == NULL) {            // No uses except Initialize node | 
|---|
| 2951 | if (alloc->is_Allocate()) { | 
|---|
| 2952 | // Set the scalar_replaceable flag for allocation | 
|---|
| 2953 | // so it could be eliminated if it has no uses. | 
|---|
| 2954 | alloc->as_Allocate()->_is_scalar_replaceable = true; | 
|---|
| 2955 | } | 
|---|
| 2956 | if (alloc->is_CallStaticJava()) { | 
|---|
| 2957 | // Set the scalar_replaceable flag for boxing method | 
|---|
| 2958 | // so it could be eliminated if it has no uses. | 
|---|
| 2959 | alloc->as_CallStaticJava()->_is_scalar_replaceable = true; | 
|---|
| 2960 | } | 
|---|
| 2961 | continue; | 
|---|
| 2962 | } | 
|---|
| 2963 | if (!n->is_CheckCastPP()) { // not unique CheckCastPP. | 
|---|
| 2964 | assert(!alloc->is_Allocate(), "allocation should have unique type"); | 
|---|
| 2965 | continue; | 
|---|
| 2966 | } | 
|---|
| 2967 |  | 
|---|
| 2968 | // The inline code for Object.clone() casts the allocation result to | 
|---|
| 2969 | // java.lang.Object and then to the actual type of the allocated | 
|---|
| 2970 | // object. Detect this case and use the second cast. | 
|---|
| 2971 | // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when | 
|---|
| 2972 | // the allocation result is cast to java.lang.Object and then | 
|---|
| 2973 | // to the actual Array type. | 
|---|
| 2974 | if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL | 
|---|
| 2975 | && (alloc->is_AllocateArray() || | 
|---|
| 2976 | igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeKlassPtr::OBJECT)) { | 
|---|
| 2977 | Node *cast2 = NULL; | 
|---|
| 2978 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { | 
|---|
| 2979 | Node *use = n->fast_out(i); | 
|---|
| 2980 | if (use->is_CheckCastPP()) { | 
|---|
| 2981 | cast2 = use; | 
|---|
| 2982 | break; | 
|---|
| 2983 | } | 
|---|
| 2984 | } | 
|---|
| 2985 | if (cast2 != NULL) { | 
|---|
| 2986 | n = cast2; | 
|---|
| 2987 | } else { | 
|---|
| 2988 | // Non-scalar replaceable if the allocation type is unknown statically | 
|---|
| 2989 | // (reflection allocation), the object can't be restored during | 
|---|
| 2990 | // deoptimization without precise type. | 
|---|
| 2991 | continue; | 
|---|
| 2992 | } | 
|---|
| 2993 | } | 
|---|
| 2994 |  | 
|---|
| 2995 | const TypeOopPtr *t = igvn->type(n)->isa_oopptr(); | 
|---|
| 2996 | if (t == NULL) | 
|---|
| 2997 | continue;  // not a TypeOopPtr | 
|---|
| 2998 | if (!t->klass_is_exact()) | 
|---|
| 2999 | continue; // not an unique type | 
|---|
| 3000 |  | 
|---|
| 3001 | if (alloc->is_Allocate()) { | 
|---|
| 3002 | // Set the scalar_replaceable flag for allocation | 
|---|
| 3003 | // so it could be eliminated. | 
|---|
| 3004 | alloc->as_Allocate()->_is_scalar_replaceable = true; | 
|---|
| 3005 | } | 
|---|
| 3006 | if (alloc->is_CallStaticJava()) { | 
|---|
| 3007 | // Set the scalar_replaceable flag for boxing method | 
|---|
| 3008 | // so it could be eliminated. | 
|---|
| 3009 | alloc->as_CallStaticJava()->_is_scalar_replaceable = true; | 
|---|
| 3010 | } | 
|---|
| 3011 | set_escape_state(ptnode_adr(n->_idx), es); // CheckCastPP escape state | 
|---|
| 3012 | // in order for an object to be scalar-replaceable, it must be: | 
|---|
| 3013 | //   - a direct allocation (not a call returning an object) | 
|---|
| 3014 | //   - non-escaping | 
|---|
| 3015 | //   - eligible to be a unique type | 
|---|
| 3016 | //   - not determined to be ineligible by escape analysis | 
|---|
| 3017 | set_map(alloc, n); | 
|---|
| 3018 | set_map(n, alloc); | 
|---|
| 3019 | const TypeOopPtr* tinst = t->cast_to_instance_id(ni); | 
|---|
| 3020 | igvn->hash_delete(n); | 
|---|
| 3021 | igvn->set_type(n,  tinst); | 
|---|
| 3022 | n->raise_bottom_type(tinst); | 
|---|
| 3023 | igvn->hash_insert(n); | 
|---|
| 3024 | record_for_optimizer(n); | 
|---|
| 3025 | // Allocate an alias index for the header fields. Accesses to | 
|---|
| 3026 | // the header emitted during macro expansion wouldn't have | 
|---|
| 3027 | // correct memory state otherwise. | 
|---|
| 3028 | _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes())); | 
|---|
| 3029 | _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes())); | 
|---|
| 3030 | if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) { | 
|---|
| 3031 |  | 
|---|
| 3032 | // First, put on the worklist all Field edges from Connection Graph | 
|---|
| 3033 | // which is more accurate than putting immediate users from Ideal Graph. | 
|---|
| 3034 | for (EdgeIterator e(ptn); e.has_next(); e.next()) { | 
|---|
| 3035 | PointsToNode* tgt = e.get(); | 
|---|
| 3036 | if (tgt->is_Arraycopy()) { | 
|---|
| 3037 | continue; | 
|---|
| 3038 | } | 
|---|
| 3039 | Node* use = tgt->ideal_node(); | 
|---|
| 3040 | assert(tgt->is_Field() && use->is_AddP(), | 
|---|
| 3041 | "only AddP nodes are Field edges in CG"); | 
|---|
| 3042 | if (use->outcnt() > 0) { // Don't process dead nodes | 
|---|
| 3043 | Node* addp2 = find_second_addp(use, use->in(AddPNode::Base)); | 
|---|
| 3044 | if (addp2 != NULL) { | 
|---|
| 3045 | assert(alloc->is_AllocateArray(), "array allocation was expected"); | 
|---|
| 3046 | alloc_worklist.append_if_missing(addp2); | 
|---|
| 3047 | } | 
|---|
| 3048 | alloc_worklist.append_if_missing(use); | 
|---|
| 3049 | } | 
|---|
| 3050 | } | 
|---|
| 3051 |  | 
|---|
| 3052 | // An allocation may have an Initialize which has raw stores. Scan | 
|---|
| 3053 | // the users of the raw allocation result and push AddP users | 
|---|
| 3054 | // on alloc_worklist. | 
|---|
| 3055 | Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms); | 
|---|
| 3056 | assert (raw_result != NULL, "must have an allocation result"); | 
|---|
| 3057 | for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) { | 
|---|
| 3058 | Node *use = raw_result->fast_out(i); | 
|---|
| 3059 | if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes | 
|---|
| 3060 | Node* addp2 = find_second_addp(use, raw_result); | 
|---|
| 3061 | if (addp2 != NULL) { | 
|---|
| 3062 | assert(alloc->is_AllocateArray(), "array allocation was expected"); | 
|---|
| 3063 | alloc_worklist.append_if_missing(addp2); | 
|---|
| 3064 | } | 
|---|
| 3065 | alloc_worklist.append_if_missing(use); | 
|---|
| 3066 | } else if (use->is_MemBar()) { | 
|---|
| 3067 | memnode_worklist.append_if_missing(use); | 
|---|
| 3068 | } | 
|---|
| 3069 | } | 
|---|
| 3070 | } | 
|---|
| 3071 | } else if (n->is_AddP()) { | 
|---|
| 3072 | JavaObjectNode* jobj = unique_java_object(get_addp_base(n)); | 
|---|
| 3073 | if (jobj == NULL || jobj == phantom_obj) { | 
|---|
| 3074 | #ifdef ASSERT | 
|---|
| 3075 | ptnode_adr(get_addp_base(n)->_idx)->dump(); | 
|---|
| 3076 | ptnode_adr(n->_idx)->dump(); | 
|---|
| 3077 | assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); | 
|---|
| 3078 | #endif | 
|---|
| 3079 | _compile->record_failure(C2Compiler::retry_no_escape_analysis()); | 
|---|
| 3080 | return; | 
|---|
| 3081 | } | 
|---|
| 3082 | Node *base = get_map(jobj->idx());  // CheckCastPP node | 
|---|
| 3083 | if (!split_AddP(n, base)) continue; // wrong type from dead path | 
|---|
| 3084 | } else if (n->is_Phi() || | 
|---|
| 3085 | n->is_CheckCastPP() || | 
|---|
| 3086 | n->is_EncodeP() || | 
|---|
| 3087 | n->is_DecodeN() || | 
|---|
| 3088 | BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(n) || | 
|---|
| 3089 | (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) { | 
|---|
| 3090 | if (visited.test_set(n->_idx)) { | 
|---|
| 3091 | assert(n->is_Phi(), "loops only through Phi's"); | 
|---|
| 3092 | continue;  // already processed | 
|---|
| 3093 | } | 
|---|
| 3094 | JavaObjectNode* jobj = unique_java_object(n); | 
|---|
| 3095 | if (jobj == NULL || jobj == phantom_obj) { | 
|---|
| 3096 | #ifdef ASSERT | 
|---|
| 3097 | ptnode_adr(n->_idx)->dump(); | 
|---|
| 3098 | assert(jobj != NULL && jobj != phantom_obj, "escaped allocation"); | 
|---|
| 3099 | #endif | 
|---|
| 3100 | _compile->record_failure(C2Compiler::retry_no_escape_analysis()); | 
|---|
| 3101 | return; | 
|---|
| 3102 | } else { | 
|---|
| 3103 | Node *val = get_map(jobj->idx());   // CheckCastPP node | 
|---|
| 3104 | TypeNode *tn = n->as_Type(); | 
|---|
| 3105 | const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr(); | 
|---|
| 3106 | assert(tinst != NULL && tinst->is_known_instance() && | 
|---|
| 3107 | tinst->instance_id() == jobj->idx() , "instance type expected."); | 
|---|
| 3108 |  | 
|---|
| 3109 | const Type *tn_type = igvn->type(tn); | 
|---|
| 3110 | const TypeOopPtr *tn_t; | 
|---|
| 3111 | if (tn_type->isa_narrowoop()) { | 
|---|
| 3112 | tn_t = tn_type->make_ptr()->isa_oopptr(); | 
|---|
| 3113 | } else { | 
|---|
| 3114 | tn_t = tn_type->isa_oopptr(); | 
|---|
| 3115 | } | 
|---|
| 3116 | if (tn_t != NULL && tinst->klass()->is_subtype_of(tn_t->klass())) { | 
|---|
| 3117 | if (tn_type->isa_narrowoop()) { | 
|---|
| 3118 | tn_type = tinst->make_narrowoop(); | 
|---|
| 3119 | } else { | 
|---|
| 3120 | tn_type = tinst; | 
|---|
| 3121 | } | 
|---|
| 3122 | igvn->hash_delete(tn); | 
|---|
| 3123 | igvn->set_type(tn, tn_type); | 
|---|
| 3124 | tn->set_type(tn_type); | 
|---|
| 3125 | igvn->hash_insert(tn); | 
|---|
| 3126 | record_for_optimizer(n); | 
|---|
| 3127 | } else { | 
|---|
| 3128 | assert(tn_type == TypePtr::NULL_PTR || | 
|---|
| 3129 | tn_t != NULL && !tinst->klass()->is_subtype_of(tn_t->klass()), | 
|---|
| 3130 | "unexpected type"); | 
|---|
| 3131 | continue; // Skip dead path with different type | 
|---|
| 3132 | } | 
|---|
| 3133 | } | 
|---|
| 3134 | } else { | 
|---|
| 3135 | debug_only(n->dump();) | 
|---|
| 3136 | assert(false, "EA: unexpected node"); | 
|---|
| 3137 | continue; | 
|---|
| 3138 | } | 
|---|
| 3139 | // push allocation's users on appropriate worklist | 
|---|
| 3140 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { | 
|---|
| 3141 | Node *use = n->fast_out(i); | 
|---|
| 3142 | if(use->is_Mem() && use->in(MemNode::Address) == n) { | 
|---|
| 3143 | // Load/store to instance's field | 
|---|
| 3144 | memnode_worklist.append_if_missing(use); | 
|---|
| 3145 | } else if (use->is_MemBar()) { | 
|---|
| 3146 | if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge | 
|---|
| 3147 | memnode_worklist.append_if_missing(use); | 
|---|
| 3148 | } | 
|---|
| 3149 | } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes | 
|---|
| 3150 | Node* addp2 = find_second_addp(use, n); | 
|---|
| 3151 | if (addp2 != NULL) { | 
|---|
| 3152 | alloc_worklist.append_if_missing(addp2); | 
|---|
| 3153 | } | 
|---|
| 3154 | alloc_worklist.append_if_missing(use); | 
|---|
| 3155 | } else if (use->is_Phi() || | 
|---|
| 3156 | use->is_CheckCastPP() || | 
|---|
| 3157 | use->is_EncodeNarrowPtr() || | 
|---|
| 3158 | use->is_DecodeNarrowPtr() || | 
|---|
| 3159 | BarrierSet::barrier_set()->barrier_set_c2()->escape_is_barrier_node(use) || | 
|---|
| 3160 | (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) { | 
|---|
| 3161 | alloc_worklist.append_if_missing(use); | 
|---|
| 3162 | #ifdef ASSERT | 
|---|
| 3163 | } else if (use->is_Mem()) { | 
|---|
| 3164 | assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path"); | 
|---|
| 3165 | } else if (use->is_MergeMem()) { | 
|---|
| 3166 | assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); | 
|---|
| 3167 | } else if (use->is_SafePoint()) { | 
|---|
| 3168 | // Look for MergeMem nodes for calls which reference unique allocation | 
|---|
| 3169 | // (through CheckCastPP nodes) even for debug info. | 
|---|
| 3170 | Node* m = use->in(TypeFunc::Memory); | 
|---|
| 3171 | if (m->is_MergeMem()) { | 
|---|
| 3172 | assert(_mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist"); | 
|---|
| 3173 | } | 
|---|
| 3174 | } else if (use->Opcode() == Op_EncodeISOArray) { | 
|---|
| 3175 | if (use->in(MemNode::Memory) == n || use->in(3) == n) { | 
|---|
| 3176 | // EncodeISOArray overwrites destination array | 
|---|
| 3177 | memnode_worklist.append_if_missing(use); | 
|---|
| 3178 | } | 
|---|
| 3179 | } else { | 
|---|
| 3180 | uint op = use->Opcode(); | 
|---|
| 3181 | if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) && | 
|---|
| 3182 | (use->in(MemNode::Memory) == n)) { | 
|---|
| 3183 | // They overwrite memory edge corresponding to destination array, | 
|---|
| 3184 | memnode_worklist.append_if_missing(use); | 
|---|
| 3185 | } else if (!(op == Op_CmpP || op == Op_Conv2B || | 
|---|
| 3186 | op == Op_CastP2X || op == Op_StoreCM || | 
|---|
| 3187 | op == Op_FastLock || op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || | 
|---|
| 3188 | op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || | 
|---|
| 3189 | op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || | 
|---|
| 3190 | BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) { | 
|---|
| 3191 | n->dump(); | 
|---|
| 3192 | use->dump(); | 
|---|
| 3193 | assert(false, "EA: missing allocation reference path"); | 
|---|
| 3194 | } | 
|---|
| 3195 | #endif | 
|---|
| 3196 | } | 
|---|
| 3197 | } | 
|---|
| 3198 |  | 
|---|
| 3199 | } | 
|---|
| 3200 |  | 
|---|
| 3201 | // Go over all ArrayCopy nodes and if one of the inputs has a unique | 
|---|
| 3202 | // type, record it in the ArrayCopy node so we know what memory this | 
|---|
| 3203 | // node uses/modified. | 
|---|
| 3204 | for (int next = 0; next < arraycopy_worklist.length(); next++) { | 
|---|
| 3205 | ArrayCopyNode* ac = arraycopy_worklist.at(next); | 
|---|
| 3206 | Node* dest = ac->in(ArrayCopyNode::Dest); | 
|---|
| 3207 | if (dest->is_AddP()) { | 
|---|
| 3208 | dest = get_addp_base(dest); | 
|---|
| 3209 | } | 
|---|
| 3210 | JavaObjectNode* jobj = unique_java_object(dest); | 
|---|
| 3211 | if (jobj != NULL) { | 
|---|
| 3212 | Node *base = get_map(jobj->idx()); | 
|---|
| 3213 | if (base != NULL) { | 
|---|
| 3214 | const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); | 
|---|
| 3215 | ac->_dest_type = base_t; | 
|---|
| 3216 | } | 
|---|
| 3217 | } | 
|---|
| 3218 | Node* src = ac->in(ArrayCopyNode::Src); | 
|---|
| 3219 | if (src->is_AddP()) { | 
|---|
| 3220 | src = get_addp_base(src); | 
|---|
| 3221 | } | 
|---|
| 3222 | jobj = unique_java_object(src); | 
|---|
| 3223 | if (jobj != NULL) { | 
|---|
| 3224 | Node* base = get_map(jobj->idx()); | 
|---|
| 3225 | if (base != NULL) { | 
|---|
| 3226 | const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr(); | 
|---|
| 3227 | ac->_src_type = base_t; | 
|---|
| 3228 | } | 
|---|
| 3229 | } | 
|---|
| 3230 | } | 
|---|
| 3231 |  | 
|---|
| 3232 | // New alias types were created in split_AddP(). | 
|---|
| 3233 | uint new_index_end = (uint) _compile->num_alias_types(); | 
|---|
| 3234 | assert(unique_old == _compile->unique(), "there should be no new ideal nodes after Phase 1"); | 
|---|
| 3235 |  | 
|---|
| 3236 | //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and | 
|---|
| 3237 | //            compute new values for Memory inputs  (the Memory inputs are not | 
|---|
| 3238 | //            actually updated until phase 4.) | 
|---|
| 3239 | if (memnode_worklist.length() == 0) | 
|---|
| 3240 | return;  // nothing to do | 
|---|
| 3241 | while (memnode_worklist.length() != 0) { | 
|---|
| 3242 | Node *n = memnode_worklist.pop(); | 
|---|
| 3243 | if (visited.test_set(n->_idx)) | 
|---|
| 3244 | continue; | 
|---|
| 3245 | if (n->is_Phi() || n->is_ClearArray()) { | 
|---|
| 3246 | // we don't need to do anything, but the users must be pushed | 
|---|
| 3247 | } else if (n->is_MemBar()) { // Initialize, MemBar nodes | 
|---|
| 3248 | // we don't need to do anything, but the users must be pushed | 
|---|
| 3249 | n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory); | 
|---|
| 3250 | if (n == NULL) | 
|---|
| 3251 | continue; | 
|---|
| 3252 | } else if (n->Opcode() == Op_StrCompressedCopy || | 
|---|
| 3253 | n->Opcode() == Op_EncodeISOArray) { | 
|---|
| 3254 | // get the memory projection | 
|---|
| 3255 | n = n->find_out_with(Op_SCMemProj); | 
|---|
| 3256 | assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); | 
|---|
| 3257 | } else { | 
|---|
| 3258 | assert(n->is_Mem(), "memory node required."); | 
|---|
| 3259 | Node *addr = n->in(MemNode::Address); | 
|---|
| 3260 | const Type *addr_t = igvn->type(addr); | 
|---|
| 3261 | if (addr_t == Type::TOP) | 
|---|
| 3262 | continue; | 
|---|
| 3263 | assert (addr_t->isa_ptr() != NULL, "pointer type required."); | 
|---|
| 3264 | int alias_idx = _compile->get_alias_index(addr_t->is_ptr()); | 
|---|
| 3265 | assert ((uint)alias_idx < new_index_end, "wrong alias index"); | 
|---|
| 3266 | Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis); | 
|---|
| 3267 | if (_compile->failing()) { | 
|---|
| 3268 | return; | 
|---|
| 3269 | } | 
|---|
| 3270 | if (mem != n->in(MemNode::Memory)) { | 
|---|
| 3271 | // We delay the memory edge update since we need old one in | 
|---|
| 3272 | // MergeMem code below when instances memory slices are separated. | 
|---|
| 3273 | set_map(n, mem); | 
|---|
| 3274 | } | 
|---|
| 3275 | if (n->is_Load()) { | 
|---|
| 3276 | continue;  // don't push users | 
|---|
| 3277 | } else if (n->is_LoadStore()) { | 
|---|
| 3278 | // get the memory projection | 
|---|
| 3279 | n = n->find_out_with(Op_SCMemProj); | 
|---|
| 3280 | assert(n != NULL && n->Opcode() == Op_SCMemProj, "memory projection required"); | 
|---|
| 3281 | } | 
|---|
| 3282 | } | 
|---|
| 3283 | // push user on appropriate worklist | 
|---|
| 3284 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { | 
|---|
| 3285 | Node *use = n->fast_out(i); | 
|---|
| 3286 | if (use->is_Phi() || use->is_ClearArray()) { | 
|---|
| 3287 | memnode_worklist.append_if_missing(use); | 
|---|
| 3288 | } else if (use->is_Mem() && use->in(MemNode::Memory) == n) { | 
|---|
| 3289 | if (use->Opcode() == Op_StoreCM) // Ignore cardmark stores | 
|---|
| 3290 | continue; | 
|---|
| 3291 | memnode_worklist.append_if_missing(use); | 
|---|
| 3292 | } else if (use->is_MemBar()) { | 
|---|
| 3293 | if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge | 
|---|
| 3294 | memnode_worklist.append_if_missing(use); | 
|---|
| 3295 | } | 
|---|
| 3296 | #ifdef ASSERT | 
|---|
| 3297 | } else if(use->is_Mem()) { | 
|---|
| 3298 | assert(use->in(MemNode::Memory) != n, "EA: missing memory path"); | 
|---|
| 3299 | } else if (use->is_MergeMem()) { | 
|---|
| 3300 | assert(_mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist"); | 
|---|
| 3301 | } else if (use->Opcode() == Op_EncodeISOArray) { | 
|---|
| 3302 | if (use->in(MemNode::Memory) == n || use->in(3) == n) { | 
|---|
| 3303 | // EncodeISOArray overwrites destination array | 
|---|
| 3304 | memnode_worklist.append_if_missing(use); | 
|---|
| 3305 | } | 
|---|
| 3306 | } else { | 
|---|
| 3307 | uint op = use->Opcode(); | 
|---|
| 3308 | if ((use->in(MemNode::Memory) == n) && | 
|---|
| 3309 | (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) { | 
|---|
| 3310 | // They overwrite memory edge corresponding to destination array, | 
|---|
| 3311 | memnode_worklist.append_if_missing(use); | 
|---|
| 3312 | } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) || | 
|---|
| 3313 | op == Op_AryEq || op == Op_StrComp || op == Op_HasNegatives || | 
|---|
| 3314 | op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || | 
|---|
| 3315 | op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) { | 
|---|
| 3316 | n->dump(); | 
|---|
| 3317 | use->dump(); | 
|---|
| 3318 | assert(false, "EA: missing memory path"); | 
|---|
| 3319 | } | 
|---|
| 3320 | #endif | 
|---|
| 3321 | } | 
|---|
| 3322 | } | 
|---|
| 3323 | } | 
|---|
| 3324 |  | 
|---|
| 3325 | //  Phase 3:  Process MergeMem nodes from mergemem_worklist. | 
|---|
| 3326 | //            Walk each memory slice moving the first node encountered of each | 
|---|
| 3327 | //            instance type to the the input corresponding to its alias index. | 
|---|
| 3328 | uint length = _mergemem_worklist.length(); | 
|---|
| 3329 | for( uint next = 0; next < length; ++next ) { | 
|---|
| 3330 | MergeMemNode* nmm = _mergemem_worklist.at(next); | 
|---|
| 3331 | assert(!visited.test_set(nmm->_idx), "should not be visited before"); | 
|---|
| 3332 | // Note: we don't want to use MergeMemStream here because we only want to | 
|---|
| 3333 | // scan inputs which exist at the start, not ones we add during processing. | 
|---|
| 3334 | // Note 2: MergeMem may already contains instance memory slices added | 
|---|
| 3335 | // during find_inst_mem() call when memory nodes were processed above. | 
|---|
| 3336 | igvn->hash_delete(nmm); | 
|---|
| 3337 | uint nslices = MIN2(nmm->req(), new_index_start); | 
|---|
| 3338 | for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { | 
|---|
| 3339 | Node* mem = nmm->in(i); | 
|---|
| 3340 | Node* cur = NULL; | 
|---|
| 3341 | if (mem == NULL || mem->is_top()) | 
|---|
| 3342 | continue; | 
|---|
| 3343 | // First, update mergemem by moving memory nodes to corresponding slices | 
|---|
| 3344 | // if their type became more precise since this mergemem was created. | 
|---|
| 3345 | while (mem->is_Mem()) { | 
|---|
| 3346 | const Type *at = igvn->type(mem->in(MemNode::Address)); | 
|---|
| 3347 | if (at != Type::TOP) { | 
|---|
| 3348 | assert (at->isa_ptr() != NULL, "pointer type required."); | 
|---|
| 3349 | uint idx = (uint)_compile->get_alias_index(at->is_ptr()); | 
|---|
| 3350 | if (idx == i) { | 
|---|
| 3351 | if (cur == NULL) | 
|---|
| 3352 | cur = mem; | 
|---|
| 3353 | } else { | 
|---|
| 3354 | if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) { | 
|---|
| 3355 | nmm->set_memory_at(idx, mem); | 
|---|
| 3356 | } | 
|---|
| 3357 | } | 
|---|
| 3358 | } | 
|---|
| 3359 | mem = mem->in(MemNode::Memory); | 
|---|
| 3360 | } | 
|---|
| 3361 | nmm->set_memory_at(i, (cur != NULL) ? cur : mem); | 
|---|
| 3362 | // Find any instance of the current type if we haven't encountered | 
|---|
| 3363 | // already a memory slice of the instance along the memory chain. | 
|---|
| 3364 | for (uint ni = new_index_start; ni < new_index_end; ni++) { | 
|---|
| 3365 | if((uint)_compile->get_general_index(ni) == i) { | 
|---|
| 3366 | Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni); | 
|---|
| 3367 | if (nmm->is_empty_memory(m)) { | 
|---|
| 3368 | Node* result = find_inst_mem(mem, ni, orig_phis); | 
|---|
| 3369 | if (_compile->failing()) { | 
|---|
| 3370 | return; | 
|---|
| 3371 | } | 
|---|
| 3372 | nmm->set_memory_at(ni, result); | 
|---|
| 3373 | } | 
|---|
| 3374 | } | 
|---|
| 3375 | } | 
|---|
| 3376 | } | 
|---|
| 3377 | // Find the rest of instances values | 
|---|
| 3378 | for (uint ni = new_index_start; ni < new_index_end; ni++) { | 
|---|
| 3379 | const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr(); | 
|---|
| 3380 | Node* result = step_through_mergemem(nmm, ni, tinst); | 
|---|
| 3381 | if (result == nmm->base_memory()) { | 
|---|
| 3382 | // Didn't find instance memory, search through general slice recursively. | 
|---|
| 3383 | result = nmm->memory_at(_compile->get_general_index(ni)); | 
|---|
| 3384 | result = find_inst_mem(result, ni, orig_phis); | 
|---|
| 3385 | if (_compile->failing()) { | 
|---|
| 3386 | return; | 
|---|
| 3387 | } | 
|---|
| 3388 | nmm->set_memory_at(ni, result); | 
|---|
| 3389 | } | 
|---|
| 3390 | } | 
|---|
| 3391 | igvn->hash_insert(nmm); | 
|---|
| 3392 | record_for_optimizer(nmm); | 
|---|
| 3393 | } | 
|---|
| 3394 |  | 
|---|
| 3395 | //  Phase 4:  Update the inputs of non-instance memory Phis and | 
|---|
| 3396 | //            the Memory input of memnodes | 
|---|
| 3397 | // First update the inputs of any non-instance Phi's from | 
|---|
| 3398 | // which we split out an instance Phi.  Note we don't have | 
|---|
| 3399 | // to recursively process Phi's encounted on the input memory | 
|---|
| 3400 | // chains as is done in split_memory_phi() since they  will | 
|---|
| 3401 | // also be processed here. | 
|---|
| 3402 | for (int j = 0; j < orig_phis.length(); j++) { | 
|---|
| 3403 | PhiNode *phi = orig_phis.at(j); | 
|---|
| 3404 | int alias_idx = _compile->get_alias_index(phi->adr_type()); | 
|---|
| 3405 | igvn->hash_delete(phi); | 
|---|
| 3406 | for (uint i = 1; i < phi->req(); i++) { | 
|---|
| 3407 | Node *mem = phi->in(i); | 
|---|
| 3408 | Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis); | 
|---|
| 3409 | if (_compile->failing()) { | 
|---|
| 3410 | return; | 
|---|
| 3411 | } | 
|---|
| 3412 | if (mem != new_mem) { | 
|---|
| 3413 | phi->set_req(i, new_mem); | 
|---|
| 3414 | } | 
|---|
| 3415 | } | 
|---|
| 3416 | igvn->hash_insert(phi); | 
|---|
| 3417 | record_for_optimizer(phi); | 
|---|
| 3418 | } | 
|---|
| 3419 |  | 
|---|
| 3420 | // Update the memory inputs of MemNodes with the value we computed | 
|---|
| 3421 | // in Phase 2 and move stores memory users to corresponding memory slices. | 
|---|
| 3422 | // Disable memory split verification code until the fix for 6984348. | 
|---|
| 3423 | // Currently it produces false negative results since it does not cover all cases. | 
|---|
| 3424 | #if 0 // ifdef ASSERT | 
|---|
| 3425 | visited.Reset(); | 
|---|
| 3426 | Node_Stack old_mems(arena, _compile->unique() >> 2); | 
|---|
| 3427 | #endif | 
|---|
| 3428 | for (uint i = 0; i < ideal_nodes.size(); i++) { | 
|---|
| 3429 | Node*    n = ideal_nodes.at(i); | 
|---|
| 3430 | Node* nmem = get_map(n->_idx); | 
|---|
| 3431 | assert(nmem != NULL, "sanity"); | 
|---|
| 3432 | if (n->is_Mem()) { | 
|---|
| 3433 | #if 0 // ifdef ASSERT | 
|---|
| 3434 | Node* old_mem = n->in(MemNode::Memory); | 
|---|
| 3435 | if (!visited.test_set(old_mem->_idx)) { | 
|---|
| 3436 | old_mems.push(old_mem, old_mem->outcnt()); | 
|---|
| 3437 | } | 
|---|
| 3438 | #endif | 
|---|
| 3439 | assert(n->in(MemNode::Memory) != nmem, "sanity"); | 
|---|
| 3440 | if (!n->is_Load()) { | 
|---|
| 3441 | // Move memory users of a store first. | 
|---|
| 3442 | move_inst_mem(n, orig_phis); | 
|---|
| 3443 | } | 
|---|
| 3444 | // Now update memory input | 
|---|
| 3445 | igvn->hash_delete(n); | 
|---|
| 3446 | n->set_req(MemNode::Memory, nmem); | 
|---|
| 3447 | igvn->hash_insert(n); | 
|---|
| 3448 | record_for_optimizer(n); | 
|---|
| 3449 | } else { | 
|---|
| 3450 | assert(n->is_Allocate() || n->is_CheckCastPP() || | 
|---|
| 3451 | n->is_AddP() || n->is_Phi(), "unknown node used for set_map()"); | 
|---|
| 3452 | } | 
|---|
| 3453 | } | 
|---|
| 3454 | #if 0 // ifdef ASSERT | 
|---|
| 3455 | // Verify that memory was split correctly | 
|---|
| 3456 | while (old_mems.is_nonempty()) { | 
|---|
| 3457 | Node* old_mem = old_mems.node(); | 
|---|
| 3458 | uint  old_cnt = old_mems.index(); | 
|---|
| 3459 | old_mems.pop(); | 
|---|
| 3460 | assert(old_cnt == old_mem->outcnt(), "old mem could be lost"); | 
|---|
| 3461 | } | 
|---|
| 3462 | #endif | 
|---|
| 3463 | } | 
|---|
| 3464 |  | 
|---|
| 3465 | #ifndef PRODUCT | 
|---|
| 3466 | static const char *node_type_names[] = { | 
|---|
| 3467 | "UnknownType", | 
|---|
| 3468 | "JavaObject", | 
|---|
| 3469 | "LocalVar", | 
|---|
| 3470 | "Field", | 
|---|
| 3471 | "Arraycopy" | 
|---|
| 3472 | }; | 
|---|
| 3473 |  | 
|---|
| 3474 | static const char *esc_names[] = { | 
|---|
| 3475 | "UnknownEscape", | 
|---|
| 3476 | "NoEscape", | 
|---|
| 3477 | "ArgEscape", | 
|---|
| 3478 | "GlobalEscape" | 
|---|
| 3479 | }; | 
|---|
| 3480 |  | 
|---|
| 3481 | void PointsToNode::dump(bool print_state) const { | 
|---|
| 3482 | NodeType nt = node_type(); | 
|---|
| 3483 | tty->print( "%s ", node_type_names[(int) nt]); | 
|---|
| 3484 | if (print_state) { | 
|---|
| 3485 | EscapeState es = escape_state(); | 
|---|
| 3486 | EscapeState fields_es = fields_escape_state(); | 
|---|
| 3487 | tty->print( "%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]); | 
|---|
| 3488 | if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) | 
|---|
| 3489 | tty->print( "NSR "); | 
|---|
| 3490 | } | 
|---|
| 3491 | if (is_Field()) { | 
|---|
| 3492 | FieldNode* f = (FieldNode*)this; | 
|---|
| 3493 | if (f->is_oop()) | 
|---|
| 3494 | tty->print( "oop "); | 
|---|
| 3495 | if (f->offset() > 0) | 
|---|
| 3496 | tty->print( "+%d ", f->offset()); | 
|---|
| 3497 | tty->print( "("); | 
|---|
| 3498 | for (BaseIterator i(f); i.has_next(); i.next()) { | 
|---|
| 3499 | PointsToNode* b = i.get(); | 
|---|
| 3500 | tty->print( " %d%s", b->idx(),(b->is_JavaObject() ? "P": "")); | 
|---|
| 3501 | } | 
|---|
| 3502 | tty->print( " )"); | 
|---|
| 3503 | } | 
|---|
| 3504 | tty->print( "["); | 
|---|
| 3505 | for (EdgeIterator i(this); i.has_next(); i.next()) { | 
|---|
| 3506 | PointsToNode* e = i.get(); | 
|---|
| 3507 | tty->print( " %d%s%s", e->idx(),(e->is_JavaObject() ? "P": (e->is_Field() ? "F": "")), e->is_Arraycopy() ? "cp": ""); | 
|---|
| 3508 | } | 
|---|
| 3509 | tty->print( " ["); | 
|---|
| 3510 | for (UseIterator i(this); i.has_next(); i.next()) { | 
|---|
| 3511 | PointsToNode* u = i.get(); | 
|---|
| 3512 | bool is_base = false; | 
|---|
| 3513 | if (PointsToNode::is_base_use(u)) { | 
|---|
| 3514 | is_base = true; | 
|---|
| 3515 | u = PointsToNode::get_use_node(u)->as_Field(); | 
|---|
| 3516 | } | 
|---|
| 3517 | tty->print( " %d%s%s", u->idx(), is_base ? "b": "", u->is_Arraycopy() ? "cp": ""); | 
|---|
| 3518 | } | 
|---|
| 3519 | tty->print( " ]]  "); | 
|---|
| 3520 | if (_node == NULL) | 
|---|
| 3521 | tty->print_cr( "<null>"); | 
|---|
| 3522 | else | 
|---|
| 3523 | _node->dump(); | 
|---|
| 3524 | } | 
|---|
| 3525 |  | 
|---|
| 3526 | void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) { | 
|---|
| 3527 | bool first = true; | 
|---|
| 3528 | int ptnodes_length = ptnodes_worklist.length(); | 
|---|
| 3529 | for (int i = 0; i < ptnodes_length; i++) { | 
|---|
| 3530 | PointsToNode *ptn = ptnodes_worklist.at(i); | 
|---|
| 3531 | if (ptn == NULL || !ptn->is_JavaObject()) | 
|---|
| 3532 | continue; | 
|---|
| 3533 | PointsToNode::EscapeState es = ptn->escape_state(); | 
|---|
| 3534 | if ((es != PointsToNode::NoEscape) && !Verbose) { | 
|---|
| 3535 | continue; | 
|---|
| 3536 | } | 
|---|
| 3537 | Node* n = ptn->ideal_node(); | 
|---|
| 3538 | if (n->is_Allocate() || (n->is_CallStaticJava() && | 
|---|
| 3539 | n->as_CallStaticJava()->is_boxing_method())) { | 
|---|
| 3540 | if (first) { | 
|---|
| 3541 | tty->cr(); | 
|---|
| 3542 | tty->print( "======== Connection graph for "); | 
|---|
| 3543 | _compile->method()->print_short_name(); | 
|---|
| 3544 | tty->cr(); | 
|---|
| 3545 | first = false; | 
|---|
| 3546 | } | 
|---|
| 3547 | ptn->dump(); | 
|---|
| 3548 | // Print all locals and fields which reference this allocation | 
|---|
| 3549 | for (UseIterator j(ptn); j.has_next(); j.next()) { | 
|---|
| 3550 | PointsToNode* use = j.get(); | 
|---|
| 3551 | if (use->is_LocalVar()) { | 
|---|
| 3552 | use->dump(Verbose); | 
|---|
| 3553 | } else if (Verbose) { | 
|---|
| 3554 | use->dump(); | 
|---|
| 3555 | } | 
|---|
| 3556 | } | 
|---|
| 3557 | tty->cr(); | 
|---|
| 3558 | } | 
|---|
| 3559 | } | 
|---|
| 3560 | } | 
|---|
| 3561 | #endif | 
|---|
| 3562 |  | 
|---|
| 3563 | void ConnectionGraph::record_for_optimizer(Node *n) { | 
|---|
| 3564 | _igvn->_worklist.push(n); | 
|---|
| 3565 | _igvn->add_users_to_worklist(n); | 
|---|
| 3566 | } | 
|---|
| 3567 |  | 
|---|