| 1 | /* | 
|---|
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | * | 
|---|
| 23 | */ | 
|---|
| 24 |  | 
|---|
| 25 | #ifndef SHARE_OPTO_NODE_HPP | 
|---|
| 26 | #define SHARE_OPTO_NODE_HPP | 
|---|
| 27 |  | 
|---|
| 28 | #include "libadt/vectset.hpp" | 
|---|
| 29 | #include "opto/compile.hpp" | 
|---|
| 30 | #include "opto/type.hpp" | 
|---|
| 31 |  | 
|---|
| 32 | // Portions of code courtesy of Clifford Click | 
|---|
| 33 |  | 
|---|
| 34 | // Optimization - Graph Style | 
|---|
| 35 |  | 
|---|
| 36 |  | 
|---|
| 37 | class AbstractLockNode; | 
|---|
| 38 | class AddNode; | 
|---|
| 39 | class AddPNode; | 
|---|
| 40 | class AliasInfo; | 
|---|
| 41 | class AllocateArrayNode; | 
|---|
| 42 | class AllocateNode; | 
|---|
| 43 | class ArrayCopyNode; | 
|---|
| 44 | class Block; | 
|---|
| 45 | class BoolNode; | 
|---|
| 46 | class BoxLockNode; | 
|---|
| 47 | class CMoveNode; | 
|---|
| 48 | class CallDynamicJavaNode; | 
|---|
| 49 | class CallJavaNode; | 
|---|
| 50 | class CallLeafNode; | 
|---|
| 51 | class CallNode; | 
|---|
| 52 | class CallRuntimeNode; | 
|---|
| 53 | class CallStaticJavaNode; | 
|---|
| 54 | class CastIINode; | 
|---|
| 55 | class CatchNode; | 
|---|
| 56 | class CatchProjNode; | 
|---|
| 57 | class CheckCastPPNode; | 
|---|
| 58 | class ClearArrayNode; | 
|---|
| 59 | class CmpNode; | 
|---|
| 60 | class CodeBuffer; | 
|---|
| 61 | class ConstraintCastNode; | 
|---|
| 62 | class ConNode; | 
|---|
| 63 | class CompareAndSwapNode; | 
|---|
| 64 | class CompareAndExchangeNode; | 
|---|
| 65 | class CountedLoopNode; | 
|---|
| 66 | class CountedLoopEndNode; | 
|---|
| 67 | class DecodeNarrowPtrNode; | 
|---|
| 68 | class DecodeNNode; | 
|---|
| 69 | class DecodeNKlassNode; | 
|---|
| 70 | class EncodeNarrowPtrNode; | 
|---|
| 71 | class EncodePNode; | 
|---|
| 72 | class EncodePKlassNode; | 
|---|
| 73 | class FastLockNode; | 
|---|
| 74 | class FastUnlockNode; | 
|---|
| 75 | class IfNode; | 
|---|
| 76 | class IfProjNode; | 
|---|
| 77 | class IfFalseNode; | 
|---|
| 78 | class IfTrueNode; | 
|---|
| 79 | class InitializeNode; | 
|---|
| 80 | class JVMState; | 
|---|
| 81 | class JumpNode; | 
|---|
| 82 | class JumpProjNode; | 
|---|
| 83 | class LoadNode; | 
|---|
| 84 | class LoadBarrierNode; | 
|---|
| 85 | class LoadBarrierSlowRegNode; | 
|---|
| 86 | class LoadStoreNode; | 
|---|
| 87 | class LoadStoreConditionalNode; | 
|---|
| 88 | class LockNode; | 
|---|
| 89 | class LoopNode; | 
|---|
| 90 | class MachBranchNode; | 
|---|
| 91 | class MachCallDynamicJavaNode; | 
|---|
| 92 | class MachCallJavaNode; | 
|---|
| 93 | class MachCallLeafNode; | 
|---|
| 94 | class MachCallNode; | 
|---|
| 95 | class MachCallRuntimeNode; | 
|---|
| 96 | class MachCallStaticJavaNode; | 
|---|
| 97 | class MachConstantBaseNode; | 
|---|
| 98 | class MachConstantNode; | 
|---|
| 99 | class MachGotoNode; | 
|---|
| 100 | class MachIfNode; | 
|---|
| 101 | class MachJumpNode; | 
|---|
| 102 | class MachNode; | 
|---|
| 103 | class MachNullCheckNode; | 
|---|
| 104 | class MachProjNode; | 
|---|
| 105 | class MachReturnNode; | 
|---|
| 106 | class MachSafePointNode; | 
|---|
| 107 | class MachSpillCopyNode; | 
|---|
| 108 | class MachTempNode; | 
|---|
| 109 | class MachMergeNode; | 
|---|
| 110 | class MachMemBarNode; | 
|---|
| 111 | class Matcher; | 
|---|
| 112 | class MemBarNode; | 
|---|
| 113 | class MemBarStoreStoreNode; | 
|---|
| 114 | class MemNode; | 
|---|
| 115 | class MergeMemNode; | 
|---|
| 116 | class MulNode; | 
|---|
| 117 | class MultiNode; | 
|---|
| 118 | class MultiBranchNode; | 
|---|
| 119 | class NeverBranchNode; | 
|---|
| 120 | class OuterStripMinedLoopNode; | 
|---|
| 121 | class OuterStripMinedLoopEndNode; | 
|---|
| 122 | class Node; | 
|---|
| 123 | class Node_Array; | 
|---|
| 124 | class Node_List; | 
|---|
| 125 | class Node_Stack; | 
|---|
| 126 | class NullCheckNode; | 
|---|
| 127 | class OopMap; | 
|---|
| 128 | class ParmNode; | 
|---|
| 129 | class PCTableNode; | 
|---|
| 130 | class PhaseCCP; | 
|---|
| 131 | class PhaseGVN; | 
|---|
| 132 | class PhaseIterGVN; | 
|---|
| 133 | class PhaseRegAlloc; | 
|---|
| 134 | class PhaseTransform; | 
|---|
| 135 | class PhaseValues; | 
|---|
| 136 | class PhiNode; | 
|---|
| 137 | class Pipeline; | 
|---|
| 138 | class ProjNode; | 
|---|
| 139 | class RangeCheckNode; | 
|---|
| 140 | class RegMask; | 
|---|
| 141 | class RegionNode; | 
|---|
| 142 | class RootNode; | 
|---|
| 143 | class SafePointNode; | 
|---|
| 144 | class SafePointScalarObjectNode; | 
|---|
| 145 | class StartNode; | 
|---|
| 146 | class State; | 
|---|
| 147 | class StoreNode; | 
|---|
| 148 | class SubNode; | 
|---|
| 149 | class Type; | 
|---|
| 150 | class TypeNode; | 
|---|
| 151 | class UnlockNode; | 
|---|
| 152 | class VectorNode; | 
|---|
| 153 | class LoadVectorNode; | 
|---|
| 154 | class StoreVectorNode; | 
|---|
| 155 | class VectorSet; | 
|---|
| 156 | typedef void (*NFunc)(Node&,void*); | 
|---|
| 157 | extern "C"{ | 
|---|
| 158 | typedef int (*C_sort_func_t)(const void *, const void *); | 
|---|
| 159 | } | 
|---|
| 160 |  | 
|---|
| 161 | // The type of all node counts and indexes. | 
|---|
| 162 | // It must hold at least 16 bits, but must also be fast to load and store. | 
|---|
| 163 | // This type, if less than 32 bits, could limit the number of possible nodes. | 
|---|
| 164 | // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) | 
|---|
| 165 | typedef unsigned int node_idx_t; | 
|---|
| 166 |  | 
|---|
| 167 |  | 
|---|
| 168 | #ifndef OPTO_DU_ITERATOR_ASSERT | 
|---|
| 169 | #ifdef ASSERT | 
|---|
| 170 | #define OPTO_DU_ITERATOR_ASSERT 1 | 
|---|
| 171 | #else | 
|---|
| 172 | #define OPTO_DU_ITERATOR_ASSERT 0 | 
|---|
| 173 | #endif | 
|---|
| 174 | #endif //OPTO_DU_ITERATOR_ASSERT | 
|---|
| 175 |  | 
|---|
| 176 | #if OPTO_DU_ITERATOR_ASSERT | 
|---|
| 177 | class DUIterator; | 
|---|
| 178 | class DUIterator_Fast; | 
|---|
| 179 | class DUIterator_Last; | 
|---|
| 180 | #else | 
|---|
| 181 | typedef uint   DUIterator; | 
|---|
| 182 | typedef Node** DUIterator_Fast; | 
|---|
| 183 | typedef Node** DUIterator_Last; | 
|---|
| 184 | #endif | 
|---|
| 185 |  | 
|---|
| 186 | // Node Sentinel | 
|---|
| 187 | #define NodeSentinel (Node*)-1 | 
|---|
| 188 |  | 
|---|
| 189 | // Unknown count frequency | 
|---|
| 190 | #define COUNT_UNKNOWN (-1.0f) | 
|---|
| 191 |  | 
|---|
| 192 | //------------------------------Node------------------------------------------- | 
|---|
| 193 | // Nodes define actions in the program.  They create values, which have types. | 
|---|
| 194 | // They are both vertices in a directed graph and program primitives.  Nodes | 
|---|
| 195 | // are labeled; the label is the "opcode", the primitive function in the lambda | 
|---|
| 196 | // calculus sense that gives meaning to the Node.  Node inputs are ordered (so | 
|---|
| 197 | // that "a-b" is different from "b-a").  The inputs to a Node are the inputs to | 
|---|
| 198 | // the Node's function.  These inputs also define a Type equation for the Node. | 
|---|
| 199 | // Solving these Type equations amounts to doing dataflow analysis. | 
|---|
| 200 | // Control and data are uniformly represented in the graph.  Finally, Nodes | 
|---|
| 201 | // have a unique dense integer index which is used to index into side arrays | 
|---|
| 202 | // whenever I have phase-specific information. | 
|---|
| 203 |  | 
|---|
| 204 | class Node { | 
|---|
| 205 | friend class VMStructs; | 
|---|
| 206 |  | 
|---|
| 207 | // Lots of restrictions on cloning Nodes | 
|---|
| 208 | Node(const Node&);            // not defined; linker error to use these | 
|---|
| 209 | Node &operator=(const Node &rhs); | 
|---|
| 210 |  | 
|---|
| 211 | public: | 
|---|
| 212 | friend class Compile; | 
|---|
| 213 | #if OPTO_DU_ITERATOR_ASSERT | 
|---|
| 214 | friend class DUIterator_Common; | 
|---|
| 215 | friend class DUIterator; | 
|---|
| 216 | friend class DUIterator_Fast; | 
|---|
| 217 | friend class DUIterator_Last; | 
|---|
| 218 | #endif | 
|---|
| 219 |  | 
|---|
| 220 | // Because Nodes come and go, I define an Arena of Node structures to pull | 
|---|
| 221 | // from.  This should allow fast access to node creation & deletion.  This | 
|---|
| 222 | // field is a local cache of a value defined in some "program fragment" for | 
|---|
| 223 | // which these Nodes are just a part of. | 
|---|
| 224 |  | 
|---|
| 225 | inline void* operator new(size_t x) throw() { | 
|---|
| 226 | Compile* C = Compile::current(); | 
|---|
| 227 | Node* n = (Node*)C->node_arena()->Amalloc_D(x); | 
|---|
| 228 | return (void*)n; | 
|---|
| 229 | } | 
|---|
| 230 |  | 
|---|
| 231 | // Delete is a NOP | 
|---|
| 232 | void operator delete( void *ptr ) {} | 
|---|
| 233 | // Fancy destructor; eagerly attempt to reclaim Node numberings and storage | 
|---|
| 234 | void destruct(); | 
|---|
| 235 |  | 
|---|
| 236 | // Create a new Node.  Required is the number is of inputs required for | 
|---|
| 237 | // semantic correctness. | 
|---|
| 238 | Node( uint required ); | 
|---|
| 239 |  | 
|---|
| 240 | // Create a new Node with given input edges. | 
|---|
| 241 | // This version requires use of the "edge-count" new. | 
|---|
| 242 | // E.g.  new (C,3) FooNode( C, NULL, left, right ); | 
|---|
| 243 | Node( Node *n0 ); | 
|---|
| 244 | Node( Node *n0, Node *n1 ); | 
|---|
| 245 | Node( Node *n0, Node *n1, Node *n2 ); | 
|---|
| 246 | Node( Node *n0, Node *n1, Node *n2, Node *n3 ); | 
|---|
| 247 | Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); | 
|---|
| 248 | Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); | 
|---|
| 249 | Node( Node *n0, Node *n1, Node *n2, Node *n3, | 
|---|
| 250 | Node *n4, Node *n5, Node *n6 ); | 
|---|
| 251 |  | 
|---|
| 252 | // Clone an inherited Node given only the base Node type. | 
|---|
| 253 | Node* clone() const; | 
|---|
| 254 |  | 
|---|
| 255 | // Clone a Node, immediately supplying one or two new edges. | 
|---|
| 256 | // The first and second arguments, if non-null, replace in(1) and in(2), | 
|---|
| 257 | // respectively. | 
|---|
| 258 | Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { | 
|---|
| 259 | Node* nn = clone(); | 
|---|
| 260 | if (in1 != NULL)  nn->set_req(1, in1); | 
|---|
| 261 | if (in2 != NULL)  nn->set_req(2, in2); | 
|---|
| 262 | return nn; | 
|---|
| 263 | } | 
|---|
| 264 |  | 
|---|
| 265 | private: | 
|---|
| 266 | // Shared setup for the above constructors. | 
|---|
| 267 | // Handles all interactions with Compile::current. | 
|---|
| 268 | // Puts initial values in all Node fields except _idx. | 
|---|
| 269 | // Returns the initial value for _idx, which cannot | 
|---|
| 270 | // be initialized by assignment. | 
|---|
| 271 | inline int Init(int req); | 
|---|
| 272 |  | 
|---|
| 273 | //----------------- input edge handling | 
|---|
| 274 | protected: | 
|---|
| 275 | friend class PhaseCFG;        // Access to address of _in array elements | 
|---|
| 276 | Node **_in;                   // Array of use-def references to Nodes | 
|---|
| 277 | Node **_out;                  // Array of def-use references to Nodes | 
|---|
| 278 |  | 
|---|
| 279 | // Input edges are split into two categories.  Required edges are required | 
|---|
| 280 | // for semantic correctness; order is important and NULLs are allowed. | 
|---|
| 281 | // Precedence edges are used to help determine execution order and are | 
|---|
| 282 | // added, e.g., for scheduling purposes.  They are unordered and not | 
|---|
| 283 | // duplicated; they have no embedded NULLs.  Edges from 0 to _cnt-1 | 
|---|
| 284 | // are required, from _cnt to _max-1 are precedence edges. | 
|---|
| 285 | node_idx_t _cnt;              // Total number of required Node inputs. | 
|---|
| 286 |  | 
|---|
| 287 | node_idx_t _max;              // Actual length of input array. | 
|---|
| 288 |  | 
|---|
| 289 | // Output edges are an unordered list of def-use edges which exactly | 
|---|
| 290 | // correspond to required input edges which point from other nodes | 
|---|
| 291 | // to this one.  Thus the count of the output edges is the number of | 
|---|
| 292 | // users of this node. | 
|---|
| 293 | node_idx_t _outcnt;           // Total number of Node outputs. | 
|---|
| 294 |  | 
|---|
| 295 | node_idx_t _outmax;           // Actual length of output array. | 
|---|
| 296 |  | 
|---|
| 297 | // Grow the actual input array to the next larger power-of-2 bigger than len. | 
|---|
| 298 | void grow( uint len ); | 
|---|
| 299 | // Grow the output array to the next larger power-of-2 bigger than len. | 
|---|
| 300 | void out_grow( uint len ); | 
|---|
| 301 |  | 
|---|
| 302 | public: | 
|---|
| 303 | // Each Node is assigned a unique small/dense number.  This number is used | 
|---|
| 304 | // to index into auxiliary arrays of data and bit vectors. | 
|---|
| 305 | // The field _idx is declared constant to defend against inadvertent assignments, | 
|---|
| 306 | // since it is used by clients as a naked field. However, the field's value can be | 
|---|
| 307 | // changed using the set_idx() method. | 
|---|
| 308 | // | 
|---|
| 309 | // The PhaseRenumberLive phase renumbers nodes based on liveness information. | 
|---|
| 310 | // Therefore, it updates the value of the _idx field. The parse-time _idx is | 
|---|
| 311 | // preserved in _parse_idx. | 
|---|
| 312 | const node_idx_t _idx; | 
|---|
| 313 | DEBUG_ONLY(const node_idx_t _parse_idx;) | 
|---|
| 314 |  | 
|---|
| 315 | // Get the (read-only) number of input edges | 
|---|
| 316 | uint req() const { return _cnt; } | 
|---|
| 317 | uint len() const { return _max; } | 
|---|
| 318 | // Get the (read-only) number of output edges | 
|---|
| 319 | uint outcnt() const { return _outcnt; } | 
|---|
| 320 |  | 
|---|
| 321 | #if OPTO_DU_ITERATOR_ASSERT | 
|---|
| 322 | // Iterate over the out-edges of this node.  Deletions are illegal. | 
|---|
| 323 | inline DUIterator outs() const; | 
|---|
| 324 | // Use this when the out array might have changed to suppress asserts. | 
|---|
| 325 | inline DUIterator& refresh_out_pos(DUIterator& i) const; | 
|---|
| 326 | // Does the node have an out at this position?  (Used for iteration.) | 
|---|
| 327 | inline bool has_out(DUIterator& i) const; | 
|---|
| 328 | inline Node*    out(DUIterator& i) const; | 
|---|
| 329 | // Iterate over the out-edges of this node.  All changes are illegal. | 
|---|
| 330 | inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; | 
|---|
| 331 | inline Node*    fast_out(DUIterator_Fast& i) const; | 
|---|
| 332 | // Iterate over the out-edges of this node, deleting one at a time. | 
|---|
| 333 | inline DUIterator_Last last_outs(DUIterator_Last& min) const; | 
|---|
| 334 | inline Node*    last_out(DUIterator_Last& i) const; | 
|---|
| 335 | // The inline bodies of all these methods are after the iterator definitions. | 
|---|
| 336 | #else | 
|---|
| 337 | // Iterate over the out-edges of this node.  Deletions are illegal. | 
|---|
| 338 | // This iteration uses integral indexes, to decouple from array reallocations. | 
|---|
| 339 | DUIterator outs() const  { return 0; } | 
|---|
| 340 | // Use this when the out array might have changed to suppress asserts. | 
|---|
| 341 | DUIterator refresh_out_pos(DUIterator i) const { return i; } | 
|---|
| 342 |  | 
|---|
| 343 | // Reference to the i'th output Node.  Error if out of bounds. | 
|---|
| 344 | Node*    out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } | 
|---|
| 345 | // Does the node have an out at this position?  (Used for iteration.) | 
|---|
| 346 | bool has_out(DUIterator i) const { return i < _outcnt; } | 
|---|
| 347 |  | 
|---|
| 348 | // Iterate over the out-edges of this node.  All changes are illegal. | 
|---|
| 349 | // This iteration uses a pointer internal to the out array. | 
|---|
| 350 | DUIterator_Fast fast_outs(DUIterator_Fast& max) const { | 
|---|
| 351 | Node** out = _out; | 
|---|
| 352 | // Assign a limit pointer to the reference argument: | 
|---|
| 353 | max = out + (ptrdiff_t)_outcnt; | 
|---|
| 354 | // Return the base pointer: | 
|---|
| 355 | return out; | 
|---|
| 356 | } | 
|---|
| 357 | Node*    fast_out(DUIterator_Fast i) const  { return *i; } | 
|---|
| 358 | // Iterate over the out-edges of this node, deleting one at a time. | 
|---|
| 359 | // This iteration uses a pointer internal to the out array. | 
|---|
| 360 | DUIterator_Last last_outs(DUIterator_Last& min) const { | 
|---|
| 361 | Node** out = _out; | 
|---|
| 362 | // Assign a limit pointer to the reference argument: | 
|---|
| 363 | min = out; | 
|---|
| 364 | // Return the pointer to the start of the iteration: | 
|---|
| 365 | return out + (ptrdiff_t)_outcnt - 1; | 
|---|
| 366 | } | 
|---|
| 367 | Node*    last_out(DUIterator_Last i) const  { return *i; } | 
|---|
| 368 | #endif | 
|---|
| 369 |  | 
|---|
| 370 | // Reference to the i'th input Node.  Error if out of bounds. | 
|---|
| 371 | Node* in(uint i) const { assert(i < _max, "oob: i=%d, _max=%d", i, _max); return _in[i]; } | 
|---|
| 372 | // Reference to the i'th input Node.  NULL if out of bounds. | 
|---|
| 373 | Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); } | 
|---|
| 374 | // Reference to the i'th output Node.  Error if out of bounds. | 
|---|
| 375 | // Use this accessor sparingly.  We are going trying to use iterators instead. | 
|---|
| 376 | Node* raw_out(uint i) const { assert(i < _outcnt, "oob"); return _out[i]; } | 
|---|
| 377 | // Return the unique out edge. | 
|---|
| 378 | Node* unique_out() const { assert(_outcnt==1, "not unique"); return _out[0]; } | 
|---|
| 379 | // Delete out edge at position 'i' by moving last out edge to position 'i' | 
|---|
| 380 | void  raw_del_out(uint i) { | 
|---|
| 381 | assert(i < _outcnt, "oob"); | 
|---|
| 382 | assert(_outcnt > 0, "oob"); | 
|---|
| 383 | #if OPTO_DU_ITERATOR_ASSERT | 
|---|
| 384 | // Record that a change happened here. | 
|---|
| 385 | debug_only(_last_del = _out[i]; ++_del_tick); | 
|---|
| 386 | #endif | 
|---|
| 387 | _out[i] = _out[--_outcnt]; | 
|---|
| 388 | // Smash the old edge so it can't be used accidentally. | 
|---|
| 389 | debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); | 
|---|
| 390 | } | 
|---|
| 391 |  | 
|---|
| 392 | #ifdef ASSERT | 
|---|
| 393 | bool is_dead() const; | 
|---|
| 394 | #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) | 
|---|
| 395 | #endif | 
|---|
| 396 | // Check whether node has become unreachable | 
|---|
| 397 | bool is_unreachable(PhaseIterGVN &igvn) const; | 
|---|
| 398 |  | 
|---|
| 399 | // Set a required input edge, also updates corresponding output edge | 
|---|
| 400 | void add_req( Node *n ); // Append a NEW required input | 
|---|
| 401 | void add_req( Node *n0, Node *n1 ) { | 
|---|
| 402 | add_req(n0); add_req(n1); } | 
|---|
| 403 | void add_req( Node *n0, Node *n1, Node *n2 ) { | 
|---|
| 404 | add_req(n0); add_req(n1); add_req(n2); } | 
|---|
| 405 | void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). | 
|---|
| 406 | void del_req( uint idx ); // Delete required edge & compact | 
|---|
| 407 | void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order | 
|---|
| 408 | void ins_req( uint i, Node *n ); // Insert a NEW required input | 
|---|
| 409 | void set_req( uint i, Node *n ) { | 
|---|
| 410 | assert( is_not_dead(n), "can not use dead node"); | 
|---|
| 411 | assert( i < _cnt, "oob: i=%d, _cnt=%d", i, _cnt); | 
|---|
| 412 | assert( !VerifyHashTableKeys || _hash_lock == 0, | 
|---|
| 413 | "remove node from hash table before modifying it"); | 
|---|
| 414 | Node** p = &_in[i];    // cache this._in, across the del_out call | 
|---|
| 415 | if (*p != NULL)  (*p)->del_out((Node *)this); | 
|---|
| 416 | (*p) = n; | 
|---|
| 417 | if (n != NULL)      n->add_out((Node *)this); | 
|---|
| 418 | Compile::current()->record_modified_node(this); | 
|---|
| 419 | } | 
|---|
| 420 | // Light version of set_req() to init inputs after node creation. | 
|---|
| 421 | void init_req( uint i, Node *n ) { | 
|---|
| 422 | assert( i == 0 && this == n || | 
|---|
| 423 | is_not_dead(n), "can not use dead node"); | 
|---|
| 424 | assert( i < _cnt, "oob"); | 
|---|
| 425 | assert( !VerifyHashTableKeys || _hash_lock == 0, | 
|---|
| 426 | "remove node from hash table before modifying it"); | 
|---|
| 427 | assert( _in[i] == NULL, "sanity"); | 
|---|
| 428 | _in[i] = n; | 
|---|
| 429 | if (n != NULL)      n->add_out((Node *)this); | 
|---|
| 430 | Compile::current()->record_modified_node(this); | 
|---|
| 431 | } | 
|---|
| 432 | // Find first occurrence of n among my edges: | 
|---|
| 433 | int find_edge(Node* n); | 
|---|
| 434 | int find_prec_edge(Node* n) { | 
|---|
| 435 | for (uint i = req(); i < len(); i++) { | 
|---|
| 436 | if (_in[i] == n) return i; | 
|---|
| 437 | if (_in[i] == NULL) { | 
|---|
| 438 | DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); ) | 
|---|
| 439 | break; | 
|---|
| 440 | } | 
|---|
| 441 | } | 
|---|
| 442 | return -1; | 
|---|
| 443 | } | 
|---|
| 444 | int replace_edge(Node* old, Node* neww); | 
|---|
| 445 | int replace_edges_in_range(Node* old, Node* neww, int start, int end); | 
|---|
| 446 | // NULL out all inputs to eliminate incoming Def-Use edges. | 
|---|
| 447 | // Return the number of edges between 'n' and 'this' | 
|---|
| 448 | int  disconnect_inputs(Node *n, Compile *c); | 
|---|
| 449 |  | 
|---|
| 450 | // Quickly, return true if and only if I am Compile::current()->top(). | 
|---|
| 451 | bool is_top() const { | 
|---|
| 452 | assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); | 
|---|
| 453 | return (_out == NULL); | 
|---|
| 454 | } | 
|---|
| 455 | // Reaffirm invariants for is_top.  (Only from Compile::set_cached_top_node.) | 
|---|
| 456 | void setup_is_top(); | 
|---|
| 457 |  | 
|---|
| 458 | // Strip away casting.  (It is depth-limited.) | 
|---|
| 459 | Node* uncast(bool keep_deps = false) const; | 
|---|
| 460 | // Return whether two Nodes are equivalent, after stripping casting. | 
|---|
| 461 | bool eqv_uncast(const Node* n, bool keep_deps = false) const { | 
|---|
| 462 | return (this->uncast(keep_deps) == n->uncast(keep_deps)); | 
|---|
| 463 | } | 
|---|
| 464 |  | 
|---|
| 465 | // Find out of current node that matches opcode. | 
|---|
| 466 | Node* find_out_with(int opcode); | 
|---|
| 467 | // Return true if the current node has an out that matches opcode. | 
|---|
| 468 | bool has_out_with(int opcode); | 
|---|
| 469 | // Return true if the current node has an out that matches any of the opcodes. | 
|---|
| 470 | bool has_out_with(int opcode1, int opcode2, int opcode3, int opcode4); | 
|---|
| 471 |  | 
|---|
| 472 | private: | 
|---|
| 473 | static Node* uncast_helper(const Node* n, bool keep_deps); | 
|---|
| 474 |  | 
|---|
| 475 | // Add an output edge to the end of the list | 
|---|
| 476 | void add_out( Node *n ) { | 
|---|
| 477 | if (is_top())  return; | 
|---|
| 478 | if( _outcnt == _outmax ) out_grow(_outcnt); | 
|---|
| 479 | _out[_outcnt++] = n; | 
|---|
| 480 | } | 
|---|
| 481 | // Delete an output edge | 
|---|
| 482 | void del_out( Node *n ) { | 
|---|
| 483 | if (is_top())  return; | 
|---|
| 484 | Node** outp = &_out[_outcnt]; | 
|---|
| 485 | // Find and remove n | 
|---|
| 486 | do { | 
|---|
| 487 | assert(outp > _out, "Missing Def-Use edge"); | 
|---|
| 488 | } while (*--outp != n); | 
|---|
| 489 | *outp = _out[--_outcnt]; | 
|---|
| 490 | // Smash the old edge so it can't be used accidentally. | 
|---|
| 491 | debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); | 
|---|
| 492 | // Record that a change happened here. | 
|---|
| 493 | #if OPTO_DU_ITERATOR_ASSERT | 
|---|
| 494 | debug_only(_last_del = n; ++_del_tick); | 
|---|
| 495 | #endif | 
|---|
| 496 | } | 
|---|
| 497 | // Close gap after removing edge. | 
|---|
| 498 | void close_prec_gap_at(uint gap) { | 
|---|
| 499 | assert(_cnt <= gap && gap < _max, "no valid prec edge"); | 
|---|
| 500 | uint i = gap; | 
|---|
| 501 | Node *last = NULL; | 
|---|
| 502 | for (; i < _max-1; ++i) { | 
|---|
| 503 | Node *next = _in[i+1]; | 
|---|
| 504 | if (next == NULL) break; | 
|---|
| 505 | last = next; | 
|---|
| 506 | } | 
|---|
| 507 | _in[gap] = last; // Move last slot to empty one. | 
|---|
| 508 | _in[i] = NULL;   // NULL out last slot. | 
|---|
| 509 | } | 
|---|
| 510 |  | 
|---|
| 511 | public: | 
|---|
| 512 | // Globally replace this node by a given new node, updating all uses. | 
|---|
| 513 | void replace_by(Node* new_node); | 
|---|
| 514 | // Globally replace this node by a given new node, updating all uses | 
|---|
| 515 | // and cutting input edges of old node. | 
|---|
| 516 | void subsume_by(Node* new_node, Compile* c) { | 
|---|
| 517 | replace_by(new_node); | 
|---|
| 518 | disconnect_inputs(NULL, c); | 
|---|
| 519 | } | 
|---|
| 520 | void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); | 
|---|
| 521 | // Find the one non-null required input.  RegionNode only | 
|---|
| 522 | Node *nonnull_req() const; | 
|---|
| 523 | // Add or remove precedence edges | 
|---|
| 524 | void add_prec( Node *n ); | 
|---|
| 525 | void rm_prec( uint i ); | 
|---|
| 526 |  | 
|---|
| 527 | // Note: prec(i) will not necessarily point to n if edge already exists. | 
|---|
| 528 | void set_prec( uint i, Node *n ) { | 
|---|
| 529 | assert(i < _max, "oob: i=%d, _max=%d", i, _max); | 
|---|
| 530 | assert(is_not_dead(n), "can not use dead node"); | 
|---|
| 531 | assert(i >= _cnt, "not a precedence edge"); | 
|---|
| 532 | // Avoid spec violation: duplicated prec edge. | 
|---|
| 533 | if (_in[i] == n) return; | 
|---|
| 534 | if (n == NULL || find_prec_edge(n) != -1) { | 
|---|
| 535 | rm_prec(i); | 
|---|
| 536 | return; | 
|---|
| 537 | } | 
|---|
| 538 | if (_in[i] != NULL) _in[i]->del_out((Node *)this); | 
|---|
| 539 | _in[i] = n; | 
|---|
| 540 | if (n != NULL) n->add_out((Node *)this); | 
|---|
| 541 | } | 
|---|
| 542 |  | 
|---|
| 543 | // Set this node's index, used by cisc_version to replace current node | 
|---|
| 544 | void set_idx(uint new_idx) { | 
|---|
| 545 | const node_idx_t* ref = &_idx; | 
|---|
| 546 | *(node_idx_t*)ref = new_idx; | 
|---|
| 547 | } | 
|---|
| 548 | // Swap input edge order.  (Edge indexes i1 and i2 are usually 1 and 2.) | 
|---|
| 549 | void swap_edges(uint i1, uint i2) { | 
|---|
| 550 | debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); | 
|---|
| 551 | // Def-Use info is unchanged | 
|---|
| 552 | Node* n1 = in(i1); | 
|---|
| 553 | Node* n2 = in(i2); | 
|---|
| 554 | _in[i1] = n2; | 
|---|
| 555 | _in[i2] = n1; | 
|---|
| 556 | // If this node is in the hash table, make sure it doesn't need a rehash. | 
|---|
| 557 | assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); | 
|---|
| 558 | } | 
|---|
| 559 |  | 
|---|
| 560 | // Iterators over input Nodes for a Node X are written as: | 
|---|
| 561 | // for( i = 0; i < X.req(); i++ ) ... X[i] ... | 
|---|
| 562 | // NOTE: Required edges can contain embedded NULL pointers. | 
|---|
| 563 |  | 
|---|
| 564 | //----------------- Other Node Properties | 
|---|
| 565 |  | 
|---|
| 566 | // Generate class IDs for (some) ideal nodes so that it is possible to determine | 
|---|
| 567 | // the type of a node using a non-virtual method call (the method is_<Node>() below). | 
|---|
| 568 | // | 
|---|
| 569 | // A class ID of an ideal node is a set of bits. In a class ID, a single bit determines | 
|---|
| 570 | // the type of the node the ID represents; another subset of an ID's bits are reserved | 
|---|
| 571 | // for the superclasses of the node represented by the ID. | 
|---|
| 572 | // | 
|---|
| 573 | // By design, if A is a supertype of B, A.is_B() returns true and B.is_A() | 
|---|
| 574 | // returns false. A.is_A() returns true. | 
|---|
| 575 | // | 
|---|
| 576 | // If two classes, A and B, have the same superclass, a different bit of A's class id | 
|---|
| 577 | // is reserved for A's type than for B's type. That bit is specified by the third | 
|---|
| 578 | // parameter in the macro DEFINE_CLASS_ID. | 
|---|
| 579 | // | 
|---|
| 580 | // By convention, classes with deeper hierarchy are declared first. Moreover, | 
|---|
| 581 | // classes with the same hierarchy depth are sorted by usage frequency. | 
|---|
| 582 | // | 
|---|
| 583 | // The query method masks the bits to cut off bits of subclasses and then compares | 
|---|
| 584 | // the result with the class id (see the macro DEFINE_CLASS_QUERY below). | 
|---|
| 585 | // | 
|---|
| 586 | //  Class_MachCall=30, ClassMask_MachCall=31 | 
|---|
| 587 | // 12               8               4               0 | 
|---|
| 588 | //  0   0   0   0   0   0   0   0   1   1   1   1   0 | 
|---|
| 589 | //                                  |   |   |   | | 
|---|
| 590 | //                                  |   |   |   Bit_Mach=2 | 
|---|
| 591 | //                                  |   |   Bit_MachReturn=4 | 
|---|
| 592 | //                                  |   Bit_MachSafePoint=8 | 
|---|
| 593 | //                                  Bit_MachCall=16 | 
|---|
| 594 | // | 
|---|
| 595 | //  Class_CountedLoop=56, ClassMask_CountedLoop=63 | 
|---|
| 596 | // 12               8               4               0 | 
|---|
| 597 | //  0   0   0   0   0   0   0   1   1   1   0   0   0 | 
|---|
| 598 | //                              |   |   | | 
|---|
| 599 | //                              |   |   Bit_Region=8 | 
|---|
| 600 | //                              |   Bit_Loop=16 | 
|---|
| 601 | //                              Bit_CountedLoop=32 | 
|---|
| 602 |  | 
|---|
| 603 | #define DEFINE_CLASS_ID(cl, supcl, subn) \ | 
|---|
| 604 | Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ | 
|---|
| 605 | Class_##cl = Class_##supcl + Bit_##cl , \ | 
|---|
| 606 | ClassMask_##cl = ((Bit_##cl << 1) - 1) , | 
|---|
| 607 |  | 
|---|
| 608 | // This enum is used only for C2 ideal and mach nodes with is_<node>() methods | 
|---|
| 609 | // so that it's values fits into 16 bits. | 
|---|
| 610 | enum NodeClasses { | 
|---|
| 611 | Bit_Node   = 0x0000, | 
|---|
| 612 | Class_Node = 0x0000, | 
|---|
| 613 | ClassMask_Node = 0xFFFF, | 
|---|
| 614 |  | 
|---|
| 615 | DEFINE_CLASS_ID(Multi, Node, 0) | 
|---|
| 616 | DEFINE_CLASS_ID(SafePoint, Multi, 0) | 
|---|
| 617 | DEFINE_CLASS_ID(Call,      SafePoint, 0) | 
|---|
| 618 | DEFINE_CLASS_ID(CallJava,         Call, 0) | 
|---|
| 619 | DEFINE_CLASS_ID(CallStaticJava,   CallJava, 0) | 
|---|
| 620 | DEFINE_CLASS_ID(CallDynamicJava,  CallJava, 1) | 
|---|
| 621 | DEFINE_CLASS_ID(CallRuntime,      Call, 1) | 
|---|
| 622 | DEFINE_CLASS_ID(CallLeaf,         CallRuntime, 0) | 
|---|
| 623 | DEFINE_CLASS_ID(Allocate,         Call, 2) | 
|---|
| 624 | DEFINE_CLASS_ID(AllocateArray,    Allocate, 0) | 
|---|
| 625 | DEFINE_CLASS_ID(AbstractLock,     Call, 3) | 
|---|
| 626 | DEFINE_CLASS_ID(Lock,             AbstractLock, 0) | 
|---|
| 627 | DEFINE_CLASS_ID(Unlock,           AbstractLock, 1) | 
|---|
| 628 | DEFINE_CLASS_ID(ArrayCopy,        Call, 4) | 
|---|
| 629 | DEFINE_CLASS_ID(MultiBranch, Multi, 1) | 
|---|
| 630 | DEFINE_CLASS_ID(PCTable,     MultiBranch, 0) | 
|---|
| 631 | DEFINE_CLASS_ID(Catch,       PCTable, 0) | 
|---|
| 632 | DEFINE_CLASS_ID(Jump,        PCTable, 1) | 
|---|
| 633 | DEFINE_CLASS_ID(If,          MultiBranch, 1) | 
|---|
| 634 | DEFINE_CLASS_ID(CountedLoopEnd,         If, 0) | 
|---|
| 635 | DEFINE_CLASS_ID(RangeCheck,             If, 1) | 
|---|
| 636 | DEFINE_CLASS_ID(OuterStripMinedLoopEnd, If, 2) | 
|---|
| 637 | DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) | 
|---|
| 638 | DEFINE_CLASS_ID(Start,       Multi, 2) | 
|---|
| 639 | DEFINE_CLASS_ID(MemBar,      Multi, 3) | 
|---|
| 640 | DEFINE_CLASS_ID(Initialize,       MemBar, 0) | 
|---|
| 641 | DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) | 
|---|
| 642 | DEFINE_CLASS_ID(LoadBarrier, Multi, 4) | 
|---|
| 643 |  | 
|---|
| 644 | DEFINE_CLASS_ID(Mach,  Node, 1) | 
|---|
| 645 | DEFINE_CLASS_ID(MachReturn, Mach, 0) | 
|---|
| 646 | DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) | 
|---|
| 647 | DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) | 
|---|
| 648 | DEFINE_CLASS_ID(MachCallJava,         MachCall, 0) | 
|---|
| 649 | DEFINE_CLASS_ID(MachCallStaticJava,   MachCallJava, 0) | 
|---|
| 650 | DEFINE_CLASS_ID(MachCallDynamicJava,  MachCallJava, 1) | 
|---|
| 651 | DEFINE_CLASS_ID(MachCallRuntime,      MachCall, 1) | 
|---|
| 652 | DEFINE_CLASS_ID(MachCallLeaf,         MachCallRuntime, 0) | 
|---|
| 653 | DEFINE_CLASS_ID(MachBranch, Mach, 1) | 
|---|
| 654 | DEFINE_CLASS_ID(MachIf,         MachBranch, 0) | 
|---|
| 655 | DEFINE_CLASS_ID(MachGoto,       MachBranch, 1) | 
|---|
| 656 | DEFINE_CLASS_ID(MachNullCheck,  MachBranch, 2) | 
|---|
| 657 | DEFINE_CLASS_ID(MachSpillCopy,    Mach, 2) | 
|---|
| 658 | DEFINE_CLASS_ID(MachTemp,         Mach, 3) | 
|---|
| 659 | DEFINE_CLASS_ID(MachConstantBase, Mach, 4) | 
|---|
| 660 | DEFINE_CLASS_ID(MachConstant,     Mach, 5) | 
|---|
| 661 | DEFINE_CLASS_ID(MachJump,       MachConstant, 0) | 
|---|
| 662 | DEFINE_CLASS_ID(MachMerge,        Mach, 6) | 
|---|
| 663 | DEFINE_CLASS_ID(MachMemBar,       Mach, 7) | 
|---|
| 664 |  | 
|---|
| 665 | DEFINE_CLASS_ID(Type,  Node, 2) | 
|---|
| 666 | DEFINE_CLASS_ID(Phi,   Type, 0) | 
|---|
| 667 | DEFINE_CLASS_ID(ConstraintCast, Type, 1) | 
|---|
| 668 | DEFINE_CLASS_ID(CastII, ConstraintCast, 0) | 
|---|
| 669 | DEFINE_CLASS_ID(CheckCastPP, ConstraintCast, 1) | 
|---|
| 670 | DEFINE_CLASS_ID(CMove, Type, 3) | 
|---|
| 671 | DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) | 
|---|
| 672 | DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5) | 
|---|
| 673 | DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0) | 
|---|
| 674 | DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1) | 
|---|
| 675 | DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) | 
|---|
| 676 | DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) | 
|---|
| 677 | DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) | 
|---|
| 678 | DEFINE_CLASS_ID(LoadBarrierSlowReg, Type, 7) | 
|---|
| 679 |  | 
|---|
| 680 | DEFINE_CLASS_ID(Proj,  Node, 3) | 
|---|
| 681 | DEFINE_CLASS_ID(CatchProj, Proj, 0) | 
|---|
| 682 | DEFINE_CLASS_ID(JumpProj,  Proj, 1) | 
|---|
| 683 | DEFINE_CLASS_ID(IfProj,    Proj, 2) | 
|---|
| 684 | DEFINE_CLASS_ID(IfTrue,    IfProj, 0) | 
|---|
| 685 | DEFINE_CLASS_ID(IfFalse,   IfProj, 1) | 
|---|
| 686 | DEFINE_CLASS_ID(Parm,      Proj, 4) | 
|---|
| 687 | DEFINE_CLASS_ID(MachProj,  Proj, 5) | 
|---|
| 688 |  | 
|---|
| 689 | DEFINE_CLASS_ID(Mem,   Node, 4) | 
|---|
| 690 | DEFINE_CLASS_ID(Load,  Mem, 0) | 
|---|
| 691 | DEFINE_CLASS_ID(LoadVector,  Load, 0) | 
|---|
| 692 | DEFINE_CLASS_ID(Store, Mem, 1) | 
|---|
| 693 | DEFINE_CLASS_ID(StoreVector, Store, 0) | 
|---|
| 694 | DEFINE_CLASS_ID(LoadStore, Mem, 2) | 
|---|
| 695 | DEFINE_CLASS_ID(LoadStoreConditional, LoadStore, 0) | 
|---|
| 696 | DEFINE_CLASS_ID(CompareAndSwap, LoadStoreConditional, 0) | 
|---|
| 697 | DEFINE_CLASS_ID(CompareAndExchangeNode, LoadStore, 1) | 
|---|
| 698 |  | 
|---|
| 699 | DEFINE_CLASS_ID(Region, Node, 5) | 
|---|
| 700 | DEFINE_CLASS_ID(Loop, Region, 0) | 
|---|
| 701 | DEFINE_CLASS_ID(Root,                Loop, 0) | 
|---|
| 702 | DEFINE_CLASS_ID(CountedLoop,         Loop, 1) | 
|---|
| 703 | DEFINE_CLASS_ID(OuterStripMinedLoop, Loop, 2) | 
|---|
| 704 |  | 
|---|
| 705 | DEFINE_CLASS_ID(Sub,   Node, 6) | 
|---|
| 706 | DEFINE_CLASS_ID(Cmp,   Sub, 0) | 
|---|
| 707 | DEFINE_CLASS_ID(FastLock,   Cmp, 0) | 
|---|
| 708 | DEFINE_CLASS_ID(FastUnlock, Cmp, 1) | 
|---|
| 709 |  | 
|---|
| 710 | DEFINE_CLASS_ID(MergeMem, Node, 7) | 
|---|
| 711 | DEFINE_CLASS_ID(Bool,     Node, 8) | 
|---|
| 712 | DEFINE_CLASS_ID(AddP,     Node, 9) | 
|---|
| 713 | DEFINE_CLASS_ID(BoxLock,  Node, 10) | 
|---|
| 714 | DEFINE_CLASS_ID(Add,      Node, 11) | 
|---|
| 715 | DEFINE_CLASS_ID(Mul,      Node, 12) | 
|---|
| 716 | DEFINE_CLASS_ID(Vector,   Node, 13) | 
|---|
| 717 | DEFINE_CLASS_ID(ClearArray, Node, 14) | 
|---|
| 718 |  | 
|---|
| 719 | _max_classes  = ClassMask_ClearArray | 
|---|
| 720 | }; | 
|---|
| 721 | #undef DEFINE_CLASS_ID | 
|---|
| 722 |  | 
|---|
| 723 | // Flags are sorted by usage frequency. | 
|---|
| 724 | enum NodeFlags { | 
|---|
| 725 | Flag_is_Copy                     = 0x01, // should be first bit to avoid shift | 
|---|
| 726 | Flag_rematerialize               = Flag_is_Copy << 1, | 
|---|
| 727 | Flag_needs_anti_dependence_check = Flag_rematerialize << 1, | 
|---|
| 728 | Flag_is_macro                    = Flag_needs_anti_dependence_check << 1, | 
|---|
| 729 | Flag_is_Con                      = Flag_is_macro << 1, | 
|---|
| 730 | Flag_is_cisc_alternate           = Flag_is_Con << 1, | 
|---|
| 731 | Flag_is_dead_loop_safe           = Flag_is_cisc_alternate << 1, | 
|---|
| 732 | Flag_may_be_short_branch         = Flag_is_dead_loop_safe << 1, | 
|---|
| 733 | Flag_avoid_back_to_back_before   = Flag_may_be_short_branch << 1, | 
|---|
| 734 | Flag_avoid_back_to_back_after    = Flag_avoid_back_to_back_before << 1, | 
|---|
| 735 | Flag_has_call                    = Flag_avoid_back_to_back_after << 1, | 
|---|
| 736 | Flag_is_reduction                = Flag_has_call << 1, | 
|---|
| 737 | Flag_is_scheduled                = Flag_is_reduction << 1, | 
|---|
| 738 | Flag_has_vector_mask_set         = Flag_is_scheduled << 1, | 
|---|
| 739 | Flag_is_expensive                = Flag_has_vector_mask_set << 1, | 
|---|
| 740 | _max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination | 
|---|
| 741 | }; | 
|---|
| 742 |  | 
|---|
| 743 | private: | 
|---|
| 744 | jushort _class_id; | 
|---|
| 745 | jushort _flags; | 
|---|
| 746 |  | 
|---|
| 747 | protected: | 
|---|
| 748 | // These methods should be called from constructors only. | 
|---|
| 749 | void init_class_id(jushort c) { | 
|---|
| 750 | assert(c <= _max_classes, "invalid node class"); | 
|---|
| 751 | _class_id = c; // cast out const | 
|---|
| 752 | } | 
|---|
| 753 | void init_flags(jushort fl) { | 
|---|
| 754 | assert(fl <= _max_flags, "invalid node flag"); | 
|---|
| 755 | _flags |= fl; | 
|---|
| 756 | } | 
|---|
| 757 | void clear_flag(jushort fl) { | 
|---|
| 758 | assert(fl <= _max_flags, "invalid node flag"); | 
|---|
| 759 | _flags &= ~fl; | 
|---|
| 760 | } | 
|---|
| 761 |  | 
|---|
| 762 | public: | 
|---|
| 763 | const jushort class_id() const { return _class_id; } | 
|---|
| 764 |  | 
|---|
| 765 | const jushort flags() const { return _flags; } | 
|---|
| 766 |  | 
|---|
| 767 | void add_flag(jushort fl) { init_flags(fl); } | 
|---|
| 768 |  | 
|---|
| 769 | void remove_flag(jushort fl) { clear_flag(fl); } | 
|---|
| 770 |  | 
|---|
| 771 | // Return a dense integer opcode number | 
|---|
| 772 | virtual int Opcode() const; | 
|---|
| 773 |  | 
|---|
| 774 | // Virtual inherited Node size | 
|---|
| 775 | virtual uint size_of() const; | 
|---|
| 776 |  | 
|---|
| 777 | // Other interesting Node properties | 
|---|
| 778 | #define DEFINE_CLASS_QUERY(type)                           \ | 
|---|
| 779 | bool is_##type() const {                                   \ | 
|---|
| 780 | return ((_class_id & ClassMask_##type) == Class_##type); \ | 
|---|
| 781 | }                                                          \ | 
|---|
| 782 | type##Node *as_##type() const {                            \ | 
|---|
| 783 | assert(is_##type(), "invalid node class");               \ | 
|---|
| 784 | return (type##Node*)this;                                \ | 
|---|
| 785 | }                                                          \ | 
|---|
| 786 | type##Node* isa_##type() const {                           \ | 
|---|
| 787 | return (is_##type()) ? as_##type() : NULL;               \ | 
|---|
| 788 | } | 
|---|
| 789 |  | 
|---|
| 790 | DEFINE_CLASS_QUERY(AbstractLock) | 
|---|
| 791 | DEFINE_CLASS_QUERY(Add) | 
|---|
| 792 | DEFINE_CLASS_QUERY(AddP) | 
|---|
| 793 | DEFINE_CLASS_QUERY(Allocate) | 
|---|
| 794 | DEFINE_CLASS_QUERY(AllocateArray) | 
|---|
| 795 | DEFINE_CLASS_QUERY(ArrayCopy) | 
|---|
| 796 | DEFINE_CLASS_QUERY(Bool) | 
|---|
| 797 | DEFINE_CLASS_QUERY(BoxLock) | 
|---|
| 798 | DEFINE_CLASS_QUERY(Call) | 
|---|
| 799 | DEFINE_CLASS_QUERY(CallDynamicJava) | 
|---|
| 800 | DEFINE_CLASS_QUERY(CallJava) | 
|---|
| 801 | DEFINE_CLASS_QUERY(CallLeaf) | 
|---|
| 802 | DEFINE_CLASS_QUERY(CallRuntime) | 
|---|
| 803 | DEFINE_CLASS_QUERY(CallStaticJava) | 
|---|
| 804 | DEFINE_CLASS_QUERY(Catch) | 
|---|
| 805 | DEFINE_CLASS_QUERY(CatchProj) | 
|---|
| 806 | DEFINE_CLASS_QUERY(CheckCastPP) | 
|---|
| 807 | DEFINE_CLASS_QUERY(CastII) | 
|---|
| 808 | DEFINE_CLASS_QUERY(ConstraintCast) | 
|---|
| 809 | DEFINE_CLASS_QUERY(ClearArray) | 
|---|
| 810 | DEFINE_CLASS_QUERY(CMove) | 
|---|
| 811 | DEFINE_CLASS_QUERY(Cmp) | 
|---|
| 812 | DEFINE_CLASS_QUERY(CountedLoop) | 
|---|
| 813 | DEFINE_CLASS_QUERY(CountedLoopEnd) | 
|---|
| 814 | DEFINE_CLASS_QUERY(DecodeNarrowPtr) | 
|---|
| 815 | DEFINE_CLASS_QUERY(DecodeN) | 
|---|
| 816 | DEFINE_CLASS_QUERY(DecodeNKlass) | 
|---|
| 817 | DEFINE_CLASS_QUERY(EncodeNarrowPtr) | 
|---|
| 818 | DEFINE_CLASS_QUERY(EncodeP) | 
|---|
| 819 | DEFINE_CLASS_QUERY(EncodePKlass) | 
|---|
| 820 | DEFINE_CLASS_QUERY(FastLock) | 
|---|
| 821 | DEFINE_CLASS_QUERY(FastUnlock) | 
|---|
| 822 | DEFINE_CLASS_QUERY(If) | 
|---|
| 823 | DEFINE_CLASS_QUERY(RangeCheck) | 
|---|
| 824 | DEFINE_CLASS_QUERY(IfProj) | 
|---|
| 825 | DEFINE_CLASS_QUERY(IfFalse) | 
|---|
| 826 | DEFINE_CLASS_QUERY(IfTrue) | 
|---|
| 827 | DEFINE_CLASS_QUERY(Initialize) | 
|---|
| 828 | DEFINE_CLASS_QUERY(Jump) | 
|---|
| 829 | DEFINE_CLASS_QUERY(JumpProj) | 
|---|
| 830 | DEFINE_CLASS_QUERY(Load) | 
|---|
| 831 | DEFINE_CLASS_QUERY(LoadStore) | 
|---|
| 832 | DEFINE_CLASS_QUERY(LoadStoreConditional) | 
|---|
| 833 | DEFINE_CLASS_QUERY(LoadBarrier) | 
|---|
| 834 | DEFINE_CLASS_QUERY(LoadBarrierSlowReg) | 
|---|
| 835 | DEFINE_CLASS_QUERY(Lock) | 
|---|
| 836 | DEFINE_CLASS_QUERY(Loop) | 
|---|
| 837 | DEFINE_CLASS_QUERY(Mach) | 
|---|
| 838 | DEFINE_CLASS_QUERY(MachBranch) | 
|---|
| 839 | DEFINE_CLASS_QUERY(MachCall) | 
|---|
| 840 | DEFINE_CLASS_QUERY(MachCallDynamicJava) | 
|---|
| 841 | DEFINE_CLASS_QUERY(MachCallJava) | 
|---|
| 842 | DEFINE_CLASS_QUERY(MachCallLeaf) | 
|---|
| 843 | DEFINE_CLASS_QUERY(MachCallRuntime) | 
|---|
| 844 | DEFINE_CLASS_QUERY(MachCallStaticJava) | 
|---|
| 845 | DEFINE_CLASS_QUERY(MachConstantBase) | 
|---|
| 846 | DEFINE_CLASS_QUERY(MachConstant) | 
|---|
| 847 | DEFINE_CLASS_QUERY(MachGoto) | 
|---|
| 848 | DEFINE_CLASS_QUERY(MachIf) | 
|---|
| 849 | DEFINE_CLASS_QUERY(MachJump) | 
|---|
| 850 | DEFINE_CLASS_QUERY(MachNullCheck) | 
|---|
| 851 | DEFINE_CLASS_QUERY(MachProj) | 
|---|
| 852 | DEFINE_CLASS_QUERY(MachReturn) | 
|---|
| 853 | DEFINE_CLASS_QUERY(MachSafePoint) | 
|---|
| 854 | DEFINE_CLASS_QUERY(MachSpillCopy) | 
|---|
| 855 | DEFINE_CLASS_QUERY(MachTemp) | 
|---|
| 856 | DEFINE_CLASS_QUERY(MachMemBar) | 
|---|
| 857 | DEFINE_CLASS_QUERY(MachMerge) | 
|---|
| 858 | DEFINE_CLASS_QUERY(Mem) | 
|---|
| 859 | DEFINE_CLASS_QUERY(MemBar) | 
|---|
| 860 | DEFINE_CLASS_QUERY(MemBarStoreStore) | 
|---|
| 861 | DEFINE_CLASS_QUERY(MergeMem) | 
|---|
| 862 | DEFINE_CLASS_QUERY(Mul) | 
|---|
| 863 | DEFINE_CLASS_QUERY(Multi) | 
|---|
| 864 | DEFINE_CLASS_QUERY(MultiBranch) | 
|---|
| 865 | DEFINE_CLASS_QUERY(OuterStripMinedLoop) | 
|---|
| 866 | DEFINE_CLASS_QUERY(OuterStripMinedLoopEnd) | 
|---|
| 867 | DEFINE_CLASS_QUERY(Parm) | 
|---|
| 868 | DEFINE_CLASS_QUERY(PCTable) | 
|---|
| 869 | DEFINE_CLASS_QUERY(Phi) | 
|---|
| 870 | DEFINE_CLASS_QUERY(Proj) | 
|---|
| 871 | DEFINE_CLASS_QUERY(Region) | 
|---|
| 872 | DEFINE_CLASS_QUERY(Root) | 
|---|
| 873 | DEFINE_CLASS_QUERY(SafePoint) | 
|---|
| 874 | DEFINE_CLASS_QUERY(SafePointScalarObject) | 
|---|
| 875 | DEFINE_CLASS_QUERY(Start) | 
|---|
| 876 | DEFINE_CLASS_QUERY(Store) | 
|---|
| 877 | DEFINE_CLASS_QUERY(Sub) | 
|---|
| 878 | DEFINE_CLASS_QUERY(Type) | 
|---|
| 879 | DEFINE_CLASS_QUERY(Vector) | 
|---|
| 880 | DEFINE_CLASS_QUERY(LoadVector) | 
|---|
| 881 | DEFINE_CLASS_QUERY(StoreVector) | 
|---|
| 882 | DEFINE_CLASS_QUERY(Unlock) | 
|---|
| 883 |  | 
|---|
| 884 | #undef DEFINE_CLASS_QUERY | 
|---|
| 885 |  | 
|---|
| 886 | // duplicate of is_MachSpillCopy() | 
|---|
| 887 | bool is_SpillCopy () const { | 
|---|
| 888 | return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); | 
|---|
| 889 | } | 
|---|
| 890 |  | 
|---|
| 891 | bool is_Con () const { return (_flags & Flag_is_Con) != 0; } | 
|---|
| 892 | // The data node which is safe to leave in dead loop during IGVN optimization. | 
|---|
| 893 | bool is_dead_loop_safe() const { | 
|---|
| 894 | return is_Phi() || (is_Proj() && in(0) == NULL) || | 
|---|
| 895 | ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 && | 
|---|
| 896 | (!is_Proj() || !in(0)->is_Allocate())); | 
|---|
| 897 | } | 
|---|
| 898 |  | 
|---|
| 899 | // is_Copy() returns copied edge index (0 or 1) | 
|---|
| 900 | uint is_Copy() const { return (_flags & Flag_is_Copy); } | 
|---|
| 901 |  | 
|---|
| 902 | virtual bool is_CFG() const { return false; } | 
|---|
| 903 |  | 
|---|
| 904 | // If this node is control-dependent on a test, can it be | 
|---|
| 905 | // rerouted to a dominating equivalent test?  This is usually | 
|---|
| 906 | // true of non-CFG nodes, but can be false for operations which | 
|---|
| 907 | // depend for their correct sequencing on more than one test. | 
|---|
| 908 | // (In that case, hoisting to a dominating test may silently | 
|---|
| 909 | // skip some other important test.) | 
|---|
| 910 | virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; | 
|---|
| 911 |  | 
|---|
| 912 | // When building basic blocks, I need to have a notion of block beginning | 
|---|
| 913 | // Nodes, next block selector Nodes (block enders), and next block | 
|---|
| 914 | // projections.  These calls need to work on their machine equivalents.  The | 
|---|
| 915 | // Ideal beginning Nodes are RootNode, RegionNode and StartNode. | 
|---|
| 916 | bool is_block_start() const { | 
|---|
| 917 | if ( is_Region() ) | 
|---|
| 918 | return this == (const Node*)in(0); | 
|---|
| 919 | else | 
|---|
| 920 | return is_Start(); | 
|---|
| 921 | } | 
|---|
| 922 |  | 
|---|
| 923 | // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, | 
|---|
| 924 | // Goto and Return.  This call also returns the block ending Node. | 
|---|
| 925 | virtual const Node *is_block_proj() const; | 
|---|
| 926 |  | 
|---|
| 927 | // The node is a "macro" node which needs to be expanded before matching | 
|---|
| 928 | bool is_macro() const { return (_flags & Flag_is_macro) != 0; } | 
|---|
| 929 | // The node is expensive: the best control is set during loop opts | 
|---|
| 930 | bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; } | 
|---|
| 931 |  | 
|---|
| 932 | // An arithmetic node which accumulates a data in a loop. | 
|---|
| 933 | // It must have the loop's phi as input and provide a def to the phi. | 
|---|
| 934 | bool is_reduction() const { return (_flags & Flag_is_reduction) != 0; } | 
|---|
| 935 |  | 
|---|
| 936 | // The node is a CountedLoopEnd with a mask annotation so as to emit a restore context | 
|---|
| 937 | bool has_vector_mask_set() const { return (_flags & Flag_has_vector_mask_set) != 0; } | 
|---|
| 938 |  | 
|---|
| 939 | // Used in lcm to mark nodes that have scheduled | 
|---|
| 940 | bool is_scheduled() const { return (_flags & Flag_is_scheduled) != 0; } | 
|---|
| 941 |  | 
|---|
| 942 | //----------------- Optimization | 
|---|
| 943 |  | 
|---|
| 944 | // Get the worst-case Type output for this Node. | 
|---|
| 945 | virtual const class Type *bottom_type() const; | 
|---|
| 946 |  | 
|---|
| 947 | // If we find a better type for a node, try to record it permanently. | 
|---|
| 948 | // Return true if this node actually changed. | 
|---|
| 949 | // Be sure to do the hash_delete game in the "rehash" variant. | 
|---|
| 950 | void raise_bottom_type(const Type* new_type); | 
|---|
| 951 |  | 
|---|
| 952 | // Get the address type with which this node uses and/or defs memory, | 
|---|
| 953 | // or NULL if none.  The address type is conservatively wide. | 
|---|
| 954 | // Returns non-null for calls, membars, loads, stores, etc. | 
|---|
| 955 | // Returns TypePtr::BOTTOM if the node touches memory "broadly". | 
|---|
| 956 | virtual const class TypePtr *adr_type() const { return NULL; } | 
|---|
| 957 |  | 
|---|
| 958 | // Return an existing node which computes the same function as this node. | 
|---|
| 959 | // The optimistic combined algorithm requires this to return a Node which | 
|---|
| 960 | // is a small number of steps away (e.g., one of my inputs). | 
|---|
| 961 | virtual Node* Identity(PhaseGVN* phase); | 
|---|
| 962 |  | 
|---|
| 963 | // Return the set of values this Node can take on at runtime. | 
|---|
| 964 | virtual const Type* Value(PhaseGVN* phase) const; | 
|---|
| 965 |  | 
|---|
| 966 | // Return a node which is more "ideal" than the current node. | 
|---|
| 967 | // The invariants on this call are subtle.  If in doubt, read the | 
|---|
| 968 | // treatise in node.cpp above the default implemention AND TEST WITH | 
|---|
| 969 | // +VerifyIterativeGVN! | 
|---|
| 970 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); | 
|---|
| 971 |  | 
|---|
| 972 | // Some nodes have specific Ideal subgraph transformations only if they are | 
|---|
| 973 | // unique users of specific nodes. Such nodes should be put on IGVN worklist | 
|---|
| 974 | // for the transformations to happen. | 
|---|
| 975 | bool has_special_unique_user() const; | 
|---|
| 976 |  | 
|---|
| 977 | // Skip Proj and CatchProj nodes chains. Check for Null and Top. | 
|---|
| 978 | Node* find_exact_control(Node* ctrl); | 
|---|
| 979 |  | 
|---|
| 980 | // Check if 'this' node dominates or equal to 'sub'. | 
|---|
| 981 | bool dominates(Node* sub, Node_List &nlist); | 
|---|
| 982 |  | 
|---|
| 983 | protected: | 
|---|
| 984 | bool remove_dead_region(PhaseGVN *phase, bool can_reshape); | 
|---|
| 985 | public: | 
|---|
| 986 |  | 
|---|
| 987 | // See if there is valid pipeline info | 
|---|
| 988 | static  const Pipeline *pipeline_class(); | 
|---|
| 989 | virtual const Pipeline *pipeline() const; | 
|---|
| 990 |  | 
|---|
| 991 | // Compute the latency from the def to this instruction of the ith input node | 
|---|
| 992 | uint latency(uint i); | 
|---|
| 993 |  | 
|---|
| 994 | // Hash & compare functions, for pessimistic value numbering | 
|---|
| 995 |  | 
|---|
| 996 | // If the hash function returns the special sentinel value NO_HASH, | 
|---|
| 997 | // the node is guaranteed never to compare equal to any other node. | 
|---|
| 998 | // If we accidentally generate a hash with value NO_HASH the node | 
|---|
| 999 | // won't go into the table and we'll lose a little optimization. | 
|---|
| 1000 | static const uint NO_HASH = 0; | 
|---|
| 1001 | virtual uint hash() const; | 
|---|
| 1002 | virtual bool cmp( const Node &n ) const; | 
|---|
| 1003 |  | 
|---|
| 1004 | // Operation appears to be iteratively computed (such as an induction variable) | 
|---|
| 1005 | // It is possible for this operation to return false for a loop-varying | 
|---|
| 1006 | // value, if it appears (by local graph inspection) to be computed by a simple conditional. | 
|---|
| 1007 | bool is_iteratively_computed(); | 
|---|
| 1008 |  | 
|---|
| 1009 | // Determine if a node is a counted loop induction variable. | 
|---|
| 1010 | // NOTE: The method is defined in "loopnode.cpp". | 
|---|
| 1011 | bool is_cloop_ind_var() const; | 
|---|
| 1012 |  | 
|---|
| 1013 | // Return a node with opcode "opc" and same inputs as "this" if one can | 
|---|
| 1014 | // be found; Otherwise return NULL; | 
|---|
| 1015 | Node* find_similar(int opc); | 
|---|
| 1016 |  | 
|---|
| 1017 | // Return the unique control out if only one. Null if none or more than one. | 
|---|
| 1018 | Node* unique_ctrl_out() const; | 
|---|
| 1019 |  | 
|---|
| 1020 | // Set control or add control as precedence edge | 
|---|
| 1021 | void ensure_control_or_add_prec(Node* c); | 
|---|
| 1022 |  | 
|---|
| 1023 | //----------------- Code Generation | 
|---|
| 1024 |  | 
|---|
| 1025 | // Ideal register class for Matching.  Zero means unmatched instruction | 
|---|
| 1026 | // (these are cloned instead of converted to machine nodes). | 
|---|
| 1027 | virtual uint ideal_reg() const; | 
|---|
| 1028 |  | 
|---|
| 1029 | static const uint NotAMachineReg;   // must be > max. machine register | 
|---|
| 1030 |  | 
|---|
| 1031 | // Do we Match on this edge index or not?  Generally false for Control | 
|---|
| 1032 | // and true for everything else.  Weird for calls & returns. | 
|---|
| 1033 | virtual uint match_edge(uint idx) const; | 
|---|
| 1034 |  | 
|---|
| 1035 | // Register class output is returned in | 
|---|
| 1036 | virtual const RegMask &out_RegMask() const; | 
|---|
| 1037 | // Register class input is expected in | 
|---|
| 1038 | virtual const RegMask &in_RegMask(uint) const; | 
|---|
| 1039 | // Should we clone rather than spill this instruction? | 
|---|
| 1040 | bool rematerialize() const; | 
|---|
| 1041 |  | 
|---|
| 1042 | // Return JVM State Object if this Node carries debug info, or NULL otherwise | 
|---|
| 1043 | virtual JVMState* jvms() const; | 
|---|
| 1044 |  | 
|---|
| 1045 | // Print as assembly | 
|---|
| 1046 | virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; | 
|---|
| 1047 | // Emit bytes starting at parameter 'ptr' | 
|---|
| 1048 | // Bump 'ptr' by the number of output bytes | 
|---|
| 1049 | virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; | 
|---|
| 1050 | // Size of instruction in bytes | 
|---|
| 1051 | virtual uint size(PhaseRegAlloc *ra_) const; | 
|---|
| 1052 |  | 
|---|
| 1053 | // Convenience function to extract an integer constant from a node. | 
|---|
| 1054 | // If it is not an integer constant (either Con, CastII, or Mach), | 
|---|
| 1055 | // return value_if_unknown. | 
|---|
| 1056 | jint find_int_con(jint value_if_unknown) const { | 
|---|
| 1057 | const TypeInt* t = find_int_type(); | 
|---|
| 1058 | return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; | 
|---|
| 1059 | } | 
|---|
| 1060 | // Return the constant, knowing it is an integer constant already | 
|---|
| 1061 | jint get_int() const { | 
|---|
| 1062 | const TypeInt* t = find_int_type(); | 
|---|
| 1063 | guarantee(t != NULL, "must be con"); | 
|---|
| 1064 | return t->get_con(); | 
|---|
| 1065 | } | 
|---|
| 1066 | // Here's where the work is done.  Can produce non-constant int types too. | 
|---|
| 1067 | const TypeInt* find_int_type() const; | 
|---|
| 1068 |  | 
|---|
| 1069 | // Same thing for long (and intptr_t, via type.hpp): | 
|---|
| 1070 | jlong get_long() const { | 
|---|
| 1071 | const TypeLong* t = find_long_type(); | 
|---|
| 1072 | guarantee(t != NULL, "must be con"); | 
|---|
| 1073 | return t->get_con(); | 
|---|
| 1074 | } | 
|---|
| 1075 | jlong find_long_con(jint value_if_unknown) const { | 
|---|
| 1076 | const TypeLong* t = find_long_type(); | 
|---|
| 1077 | return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; | 
|---|
| 1078 | } | 
|---|
| 1079 | const TypeLong* find_long_type() const; | 
|---|
| 1080 |  | 
|---|
| 1081 | const TypePtr* get_ptr_type() const; | 
|---|
| 1082 |  | 
|---|
| 1083 | // These guys are called by code generated by ADLC: | 
|---|
| 1084 | intptr_t get_ptr() const; | 
|---|
| 1085 | intptr_t get_narrowcon() const; | 
|---|
| 1086 | jdouble getd() const; | 
|---|
| 1087 | jfloat getf() const; | 
|---|
| 1088 |  | 
|---|
| 1089 | // Nodes which are pinned into basic blocks | 
|---|
| 1090 | virtual bool pinned() const { return false; } | 
|---|
| 1091 |  | 
|---|
| 1092 | // Nodes which use memory without consuming it, hence need antidependences | 
|---|
| 1093 | // More specifically, needs_anti_dependence_check returns true iff the node | 
|---|
| 1094 | // (a) does a load, and (b) does not perform a store (except perhaps to a | 
|---|
| 1095 | // stack slot or some other unaliased location). | 
|---|
| 1096 | bool needs_anti_dependence_check() const; | 
|---|
| 1097 |  | 
|---|
| 1098 | // Return which operand this instruction may cisc-spill. In other words, | 
|---|
| 1099 | // return operand position that can convert from reg to memory access | 
|---|
| 1100 | virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } | 
|---|
| 1101 | bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } | 
|---|
| 1102 |  | 
|---|
| 1103 | //----------------- Graph walking | 
|---|
| 1104 | public: | 
|---|
| 1105 | // Walk and apply member functions recursively. | 
|---|
| 1106 | // Supplied (this) pointer is root. | 
|---|
| 1107 | void walk(NFunc pre, NFunc post, void *env); | 
|---|
| 1108 | static void nop(Node &, void*); // Dummy empty function | 
|---|
| 1109 | static void packregion( Node &n, void* ); | 
|---|
| 1110 | private: | 
|---|
| 1111 | void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited); | 
|---|
| 1112 |  | 
|---|
| 1113 | //----------------- Printing, etc | 
|---|
| 1114 | public: | 
|---|
| 1115 | #ifndef PRODUCT | 
|---|
| 1116 | Node* find(int idx) const;         // Search the graph for the given idx. | 
|---|
| 1117 | Node* find_ctrl(int idx) const;    // Search control ancestors for the given idx. | 
|---|
| 1118 | void dump() const { dump( "\n"); }  // Print this node. | 
|---|
| 1119 | void dump(const char* suffix, bool mark = false, outputStream *st = tty) const; // Print this node. | 
|---|
| 1120 | void dump(int depth) const;        // Print this node, recursively to depth d | 
|---|
| 1121 | void dump_ctrl(int depth) const;   // Print control nodes, to depth d | 
|---|
| 1122 | void dump_comp() const;            // Print this node in compact representation. | 
|---|
| 1123 | // Print this node in compact representation. | 
|---|
| 1124 | void dump_comp(const char* suffix, outputStream *st = tty) const; | 
|---|
| 1125 | virtual void dump_req(outputStream *st = tty) const;    // Print required-edge info | 
|---|
| 1126 | virtual void dump_prec(outputStream *st = tty) const;   // Print precedence-edge info | 
|---|
| 1127 | virtual void dump_out(outputStream *st = tty) const;    // Print the output edge info | 
|---|
| 1128 | virtual void dump_spec(outputStream *st) const {};      // Print per-node info | 
|---|
| 1129 | // Print compact per-node info | 
|---|
| 1130 | virtual void dump_compact_spec(outputStream *st) const { dump_spec(st); } | 
|---|
| 1131 | void dump_related() const;             // Print related nodes (depends on node at hand). | 
|---|
| 1132 | // Print related nodes up to given depths for input and output nodes. | 
|---|
| 1133 | void dump_related(uint d_in, uint d_out) const; | 
|---|
| 1134 | void dump_related_compact() const;     // Print related nodes in compact representation. | 
|---|
| 1135 | // Collect related nodes. | 
|---|
| 1136 | virtual void related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const; | 
|---|
| 1137 | // Collect nodes starting from this node, explicitly including/excluding control and data links. | 
|---|
| 1138 | void collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const; | 
|---|
| 1139 |  | 
|---|
| 1140 | // Node collectors, to be used in implementations of Node::rel(). | 
|---|
| 1141 | // Collect the entire data input graph. Include control inputs if requested. | 
|---|
| 1142 | void collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const; | 
|---|
| 1143 | // Collect the entire control input graph. Include data inputs if requested. | 
|---|
| 1144 | void collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const; | 
|---|
| 1145 | // Collect the entire output graph until hitting and including control nodes. | 
|---|
| 1146 | void collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const; | 
|---|
| 1147 |  | 
|---|
| 1148 | void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges | 
|---|
| 1149 | void verify() const;               // Check Def-Use info for my subgraph | 
|---|
| 1150 | static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space); | 
|---|
| 1151 |  | 
|---|
| 1152 | // This call defines a class-unique string used to identify class instances | 
|---|
| 1153 | virtual const char *Name() const; | 
|---|
| 1154 |  | 
|---|
| 1155 | void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) | 
|---|
| 1156 | // RegMask Print Functions | 
|---|
| 1157 | void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } | 
|---|
| 1158 | void dump_out_regmask() { out_RegMask().dump(); } | 
|---|
| 1159 | static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } | 
|---|
| 1160 | void fast_dump() const { | 
|---|
| 1161 | tty->print( "%4d: %-17s", _idx, Name()); | 
|---|
| 1162 | for (uint i = 0; i < len(); i++) | 
|---|
| 1163 | if (in(i)) | 
|---|
| 1164 | tty->print( " %4d", in(i)->_idx); | 
|---|
| 1165 | else | 
|---|
| 1166 | tty->print( " NULL"); | 
|---|
| 1167 | tty->print( "\n"); | 
|---|
| 1168 | } | 
|---|
| 1169 | #endif | 
|---|
| 1170 | #ifdef ASSERT | 
|---|
| 1171 | void verify_construction(); | 
|---|
| 1172 | bool verify_jvms(const JVMState* jvms) const; | 
|---|
| 1173 | int  _debug_idx;                     // Unique value assigned to every node. | 
|---|
| 1174 | int   debug_idx() const              { return _debug_idx; } | 
|---|
| 1175 | void  set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } | 
|---|
| 1176 |  | 
|---|
| 1177 | Node* _debug_orig;                   // Original version of this, if any. | 
|---|
| 1178 | Node*  debug_orig() const            { return _debug_orig; } | 
|---|
| 1179 | void   set_debug_orig(Node* orig);   // _debug_orig = orig | 
|---|
| 1180 |  | 
|---|
| 1181 | int        _hash_lock;               // Barrier to modifications of nodes in the hash table | 
|---|
| 1182 | void  enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } | 
|---|
| 1183 | void   exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } | 
|---|
| 1184 |  | 
|---|
| 1185 | static void init_NodeProperty(); | 
|---|
| 1186 |  | 
|---|
| 1187 | #if OPTO_DU_ITERATOR_ASSERT | 
|---|
| 1188 | const Node* _last_del;               // The last deleted node. | 
|---|
| 1189 | uint        _del_tick;               // Bumped when a deletion happens.. | 
|---|
| 1190 | #endif | 
|---|
| 1191 | #endif | 
|---|
| 1192 | }; | 
|---|
| 1193 |  | 
|---|
| 1194 |  | 
|---|
| 1195 | #ifndef PRODUCT | 
|---|
| 1196 |  | 
|---|
| 1197 | // Used in debugging code to avoid walking across dead or uninitialized edges. | 
|---|
| 1198 | inline bool NotANode(const Node* n) { | 
|---|
| 1199 | if (n == NULL)                   return true; | 
|---|
| 1200 | if (((intptr_t)n & 1) != 0)      return true;  // uninitialized, etc. | 
|---|
| 1201 | if (*(address*)n == badAddress)  return true;  // kill by Node::destruct | 
|---|
| 1202 | return false; | 
|---|
| 1203 | } | 
|---|
| 1204 |  | 
|---|
| 1205 | #endif | 
|---|
| 1206 |  | 
|---|
| 1207 |  | 
|---|
| 1208 | //----------------------------------------------------------------------------- | 
|---|
| 1209 | // Iterators over DU info, and associated Node functions. | 
|---|
| 1210 |  | 
|---|
| 1211 | #if OPTO_DU_ITERATOR_ASSERT | 
|---|
| 1212 |  | 
|---|
| 1213 | // Common code for assertion checking on DU iterators. | 
|---|
| 1214 | class DUIterator_Common { | 
|---|
| 1215 | #ifdef ASSERT | 
|---|
| 1216 | protected: | 
|---|
| 1217 | bool         _vdui;               // cached value of VerifyDUIterators | 
|---|
| 1218 | const Node*  _node;               // the node containing the _out array | 
|---|
| 1219 | uint         _outcnt;             // cached node->_outcnt | 
|---|
| 1220 | uint         _del_tick;           // cached node->_del_tick | 
|---|
| 1221 | Node*        _last;               // last value produced by the iterator | 
|---|
| 1222 |  | 
|---|
| 1223 | void sample(const Node* node);    // used by c'tor to set up for verifies | 
|---|
| 1224 | void verify(const Node* node, bool at_end_ok = false); | 
|---|
| 1225 | void verify_resync(); | 
|---|
| 1226 | void reset(const DUIterator_Common& that); | 
|---|
| 1227 |  | 
|---|
| 1228 | // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators | 
|---|
| 1229 | #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } | 
|---|
| 1230 | #else | 
|---|
| 1231 | #define I_VDUI_ONLY(i,x) { } | 
|---|
| 1232 | #endif //ASSERT | 
|---|
| 1233 | }; | 
|---|
| 1234 |  | 
|---|
| 1235 | #define VDUI_ONLY(x)     I_VDUI_ONLY(*this, x) | 
|---|
| 1236 |  | 
|---|
| 1237 | // Default DU iterator.  Allows appends onto the out array. | 
|---|
| 1238 | // Allows deletion from the out array only at the current point. | 
|---|
| 1239 | // Usage: | 
|---|
| 1240 | //  for (DUIterator i = x->outs(); x->has_out(i); i++) { | 
|---|
| 1241 | //    Node* y = x->out(i); | 
|---|
| 1242 | //    ... | 
|---|
| 1243 | //  } | 
|---|
| 1244 | // Compiles in product mode to a unsigned integer index, which indexes | 
|---|
| 1245 | // onto a repeatedly reloaded base pointer of x->_out.  The loop predicate | 
|---|
| 1246 | // also reloads x->_outcnt.  If you delete, you must perform "--i" just | 
|---|
| 1247 | // before continuing the loop.  You must delete only the last-produced | 
|---|
| 1248 | // edge.  You must delete only a single copy of the last-produced edge, | 
|---|
| 1249 | // or else you must delete all copies at once (the first time the edge | 
|---|
| 1250 | // is produced by the iterator). | 
|---|
| 1251 | class DUIterator : public DUIterator_Common { | 
|---|
| 1252 | friend class Node; | 
|---|
| 1253 |  | 
|---|
| 1254 | // This is the index which provides the product-mode behavior. | 
|---|
| 1255 | // Whatever the product-mode version of the system does to the | 
|---|
| 1256 | // DUI index is done to this index.  All other fields in | 
|---|
| 1257 | // this class are used only for assertion checking. | 
|---|
| 1258 | uint         _idx; | 
|---|
| 1259 |  | 
|---|
| 1260 | #ifdef ASSERT | 
|---|
| 1261 | uint         _refresh_tick;    // Records the refresh activity. | 
|---|
| 1262 |  | 
|---|
| 1263 | void sample(const Node* node); // Initialize _refresh_tick etc. | 
|---|
| 1264 | void verify(const Node* node, bool at_end_ok = false); | 
|---|
| 1265 | void verify_increment();       // Verify an increment operation. | 
|---|
| 1266 | void verify_resync();          // Verify that we can back up over a deletion. | 
|---|
| 1267 | void verify_finish();          // Verify that the loop terminated properly. | 
|---|
| 1268 | void refresh();                // Resample verification info. | 
|---|
| 1269 | void reset(const DUIterator& that);  // Resample after assignment. | 
|---|
| 1270 | #endif | 
|---|
| 1271 |  | 
|---|
| 1272 | DUIterator(const Node* node, int dummy_to_avoid_conversion) | 
|---|
| 1273 | { _idx = 0;                         debug_only(sample(node)); } | 
|---|
| 1274 |  | 
|---|
| 1275 | public: | 
|---|
| 1276 | // initialize to garbage; clear _vdui to disable asserts | 
|---|
| 1277 | DUIterator() | 
|---|
| 1278 | { /*initialize to garbage*/         debug_only(_vdui = false); } | 
|---|
| 1279 |  | 
|---|
| 1280 | void operator++(int dummy_to_specify_postfix_op) | 
|---|
| 1281 | { _idx++;                           VDUI_ONLY(verify_increment()); } | 
|---|
| 1282 |  | 
|---|
| 1283 | void operator--() | 
|---|
| 1284 | { VDUI_ONLY(verify_resync());       --_idx; } | 
|---|
| 1285 |  | 
|---|
| 1286 | ~DUIterator() | 
|---|
| 1287 | { VDUI_ONLY(verify_finish()); } | 
|---|
| 1288 |  | 
|---|
| 1289 | void operator=(const DUIterator& that) | 
|---|
| 1290 | { _idx = that._idx;                 debug_only(reset(that)); } | 
|---|
| 1291 | }; | 
|---|
| 1292 |  | 
|---|
| 1293 | DUIterator Node::outs() const | 
|---|
| 1294 | { return DUIterator(this, 0); } | 
|---|
| 1295 | DUIterator& Node::refresh_out_pos(DUIterator& i) const | 
|---|
| 1296 | { I_VDUI_ONLY(i, i.refresh());        return i; } | 
|---|
| 1297 | bool Node::has_out(DUIterator& i) const | 
|---|
| 1298 | { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } | 
|---|
| 1299 | Node*    Node::out(DUIterator& i) const | 
|---|
| 1300 | { I_VDUI_ONLY(i, i.verify(this));     return debug_only(i._last=) _out[i._idx]; } | 
|---|
| 1301 |  | 
|---|
| 1302 |  | 
|---|
| 1303 | // Faster DU iterator.  Disallows insertions into the out array. | 
|---|
| 1304 | // Allows deletion from the out array only at the current point. | 
|---|
| 1305 | // Usage: | 
|---|
| 1306 | //  for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { | 
|---|
| 1307 | //    Node* y = x->fast_out(i); | 
|---|
| 1308 | //    ... | 
|---|
| 1309 | //  } | 
|---|
| 1310 | // Compiles in product mode to raw Node** pointer arithmetic, with | 
|---|
| 1311 | // no reloading of pointers from the original node x.  If you delete, | 
|---|
| 1312 | // you must perform "--i; --imax" just before continuing the loop. | 
|---|
| 1313 | // If you delete multiple copies of the same edge, you must decrement | 
|---|
| 1314 | // imax, but not i, multiple times:  "--i, imax -= num_edges". | 
|---|
| 1315 | class DUIterator_Fast : public DUIterator_Common { | 
|---|
| 1316 | friend class Node; | 
|---|
| 1317 | friend class DUIterator_Last; | 
|---|
| 1318 |  | 
|---|
| 1319 | // This is the pointer which provides the product-mode behavior. | 
|---|
| 1320 | // Whatever the product-mode version of the system does to the | 
|---|
| 1321 | // DUI pointer is done to this pointer.  All other fields in | 
|---|
| 1322 | // this class are used only for assertion checking. | 
|---|
| 1323 | Node**       _outp; | 
|---|
| 1324 |  | 
|---|
| 1325 | #ifdef ASSERT | 
|---|
| 1326 | void verify(const Node* node, bool at_end_ok = false); | 
|---|
| 1327 | void verify_limit(); | 
|---|
| 1328 | void verify_resync(); | 
|---|
| 1329 | void verify_relimit(uint n); | 
|---|
| 1330 | void reset(const DUIterator_Fast& that); | 
|---|
| 1331 | #endif | 
|---|
| 1332 |  | 
|---|
| 1333 | // Note:  offset must be signed, since -1 is sometimes passed | 
|---|
| 1334 | DUIterator_Fast(const Node* node, ptrdiff_t offset) | 
|---|
| 1335 | { _outp = node->_out + offset;      debug_only(sample(node)); } | 
|---|
| 1336 |  | 
|---|
| 1337 | public: | 
|---|
| 1338 | // initialize to garbage; clear _vdui to disable asserts | 
|---|
| 1339 | DUIterator_Fast() | 
|---|
| 1340 | { /*initialize to garbage*/         debug_only(_vdui = false); } | 
|---|
| 1341 |  | 
|---|
| 1342 | void operator++(int dummy_to_specify_postfix_op) | 
|---|
| 1343 | { _outp++;                          VDUI_ONLY(verify(_node, true)); } | 
|---|
| 1344 |  | 
|---|
| 1345 | void operator--() | 
|---|
| 1346 | { VDUI_ONLY(verify_resync());       --_outp; } | 
|---|
| 1347 |  | 
|---|
| 1348 | void operator-=(uint n)   // applied to the limit only | 
|---|
| 1349 | { _outp -= n;           VDUI_ONLY(verify_relimit(n));  } | 
|---|
| 1350 |  | 
|---|
| 1351 | bool operator<(DUIterator_Fast& limit) { | 
|---|
| 1352 | I_VDUI_ONLY(*this, this->verify(_node, true)); | 
|---|
| 1353 | I_VDUI_ONLY(limit, limit.verify_limit()); | 
|---|
| 1354 | return _outp < limit._outp; | 
|---|
| 1355 | } | 
|---|
| 1356 |  | 
|---|
| 1357 | void operator=(const DUIterator_Fast& that) | 
|---|
| 1358 | { _outp = that._outp;               debug_only(reset(that)); } | 
|---|
| 1359 | }; | 
|---|
| 1360 |  | 
|---|
| 1361 | DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { | 
|---|
| 1362 | // Assign a limit pointer to the reference argument: | 
|---|
| 1363 | imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); | 
|---|
| 1364 | // Return the base pointer: | 
|---|
| 1365 | return DUIterator_Fast(this, 0); | 
|---|
| 1366 | } | 
|---|
| 1367 | Node* Node::fast_out(DUIterator_Fast& i) const { | 
|---|
| 1368 | I_VDUI_ONLY(i, i.verify(this)); | 
|---|
| 1369 | return debug_only(i._last=) *i._outp; | 
|---|
| 1370 | } | 
|---|
| 1371 |  | 
|---|
| 1372 |  | 
|---|
| 1373 | // Faster DU iterator.  Requires each successive edge to be removed. | 
|---|
| 1374 | // Does not allow insertion of any edges. | 
|---|
| 1375 | // Usage: | 
|---|
| 1376 | //  for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { | 
|---|
| 1377 | //    Node* y = x->last_out(i); | 
|---|
| 1378 | //    ... | 
|---|
| 1379 | //  } | 
|---|
| 1380 | // Compiles in product mode to raw Node** pointer arithmetic, with | 
|---|
| 1381 | // no reloading of pointers from the original node x. | 
|---|
| 1382 | class DUIterator_Last : private DUIterator_Fast { | 
|---|
| 1383 | friend class Node; | 
|---|
| 1384 |  | 
|---|
| 1385 | #ifdef ASSERT | 
|---|
| 1386 | void verify(const Node* node, bool at_end_ok = false); | 
|---|
| 1387 | void verify_limit(); | 
|---|
| 1388 | void verify_step(uint num_edges); | 
|---|
| 1389 | #endif | 
|---|
| 1390 |  | 
|---|
| 1391 | // Note:  offset must be signed, since -1 is sometimes passed | 
|---|
| 1392 | DUIterator_Last(const Node* node, ptrdiff_t offset) | 
|---|
| 1393 | : DUIterator_Fast(node, offset) { } | 
|---|
| 1394 |  | 
|---|
| 1395 | void operator++(int dummy_to_specify_postfix_op) {} // do not use | 
|---|
| 1396 | void operator<(int)                              {} // do not use | 
|---|
| 1397 |  | 
|---|
| 1398 | public: | 
|---|
| 1399 | DUIterator_Last() { } | 
|---|
| 1400 | // initialize to garbage | 
|---|
| 1401 |  | 
|---|
| 1402 | void operator--() | 
|---|
| 1403 | { _outp--;              VDUI_ONLY(verify_step(1));  } | 
|---|
| 1404 |  | 
|---|
| 1405 | void operator-=(uint n) | 
|---|
| 1406 | { _outp -= n;           VDUI_ONLY(verify_step(n));  } | 
|---|
| 1407 |  | 
|---|
| 1408 | bool operator>=(DUIterator_Last& limit) { | 
|---|
| 1409 | I_VDUI_ONLY(*this, this->verify(_node, true)); | 
|---|
| 1410 | I_VDUI_ONLY(limit, limit.verify_limit()); | 
|---|
| 1411 | return _outp >= limit._outp; | 
|---|
| 1412 | } | 
|---|
| 1413 |  | 
|---|
| 1414 | void operator=(const DUIterator_Last& that) | 
|---|
| 1415 | { DUIterator_Fast::operator=(that); } | 
|---|
| 1416 | }; | 
|---|
| 1417 |  | 
|---|
| 1418 | DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { | 
|---|
| 1419 | // Assign a limit pointer to the reference argument: | 
|---|
| 1420 | imin = DUIterator_Last(this, 0); | 
|---|
| 1421 | // Return the initial pointer: | 
|---|
| 1422 | return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); | 
|---|
| 1423 | } | 
|---|
| 1424 | Node* Node::last_out(DUIterator_Last& i) const { | 
|---|
| 1425 | I_VDUI_ONLY(i, i.verify(this)); | 
|---|
| 1426 | return debug_only(i._last=) *i._outp; | 
|---|
| 1427 | } | 
|---|
| 1428 |  | 
|---|
| 1429 | #endif //OPTO_DU_ITERATOR_ASSERT | 
|---|
| 1430 |  | 
|---|
| 1431 | #undef I_VDUI_ONLY | 
|---|
| 1432 | #undef VDUI_ONLY | 
|---|
| 1433 |  | 
|---|
| 1434 | // An Iterator that truly follows the iterator pattern.  Doesn't | 
|---|
| 1435 | // support deletion but could be made to. | 
|---|
| 1436 | // | 
|---|
| 1437 | //   for (SimpleDUIterator i(n); i.has_next(); i.next()) { | 
|---|
| 1438 | //     Node* m = i.get(); | 
|---|
| 1439 | // | 
|---|
| 1440 | class SimpleDUIterator : public StackObj { | 
|---|
| 1441 | private: | 
|---|
| 1442 | Node* node; | 
|---|
| 1443 | DUIterator_Fast i; | 
|---|
| 1444 | DUIterator_Fast imax; | 
|---|
| 1445 | public: | 
|---|
| 1446 | SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} | 
|---|
| 1447 | bool has_next() { return i < imax; } | 
|---|
| 1448 | void next() { i++; } | 
|---|
| 1449 | Node* get() { return node->fast_out(i); } | 
|---|
| 1450 | }; | 
|---|
| 1451 |  | 
|---|
| 1452 |  | 
|---|
| 1453 | //----------------------------------------------------------------------------- | 
|---|
| 1454 | // Map dense integer indices to Nodes.  Uses classic doubling-array trick. | 
|---|
| 1455 | // Abstractly provides an infinite array of Node*'s, initialized to NULL. | 
|---|
| 1456 | // Note that the constructor just zeros things, and since I use Arena | 
|---|
| 1457 | // allocation I do not need a destructor to reclaim storage. | 
|---|
| 1458 | class Node_Array : public ResourceObj { | 
|---|
| 1459 | friend class VMStructs; | 
|---|
| 1460 | protected: | 
|---|
| 1461 | Arena *_a;                    // Arena to allocate in | 
|---|
| 1462 | uint   _max; | 
|---|
| 1463 | Node **_nodes; | 
|---|
| 1464 | void   grow( uint i );        // Grow array node to fit | 
|---|
| 1465 | public: | 
|---|
| 1466 | Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) { | 
|---|
| 1467 | _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize ); | 
|---|
| 1468 | for( int i = 0; i < OptoNodeListSize; i++ ) { | 
|---|
| 1469 | _nodes[i] = NULL; | 
|---|
| 1470 | } | 
|---|
| 1471 | } | 
|---|
| 1472 |  | 
|---|
| 1473 | Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} | 
|---|
| 1474 | Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped | 
|---|
| 1475 | { return (i<_max) ? _nodes[i] : (Node*)NULL; } | 
|---|
| 1476 | Node *at( uint i ) const { assert(i<_max, "oob"); return _nodes[i]; } | 
|---|
| 1477 | Node **adr() { return _nodes; } | 
|---|
| 1478 | // Extend the mapping: index i maps to Node *n. | 
|---|
| 1479 | void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } | 
|---|
| 1480 | void insert( uint i, Node *n ); | 
|---|
| 1481 | void remove( uint i );        // Remove, preserving order | 
|---|
| 1482 | void sort( C_sort_func_t func); | 
|---|
| 1483 | void reset( Arena *new_a );   // Zap mapping to empty; reclaim storage | 
|---|
| 1484 | void clear();                 // Set all entries to NULL, keep storage | 
|---|
| 1485 | uint Size() const { return _max; } | 
|---|
| 1486 | void dump() const; | 
|---|
| 1487 | }; | 
|---|
| 1488 |  | 
|---|
| 1489 | class Node_List : public Node_Array { | 
|---|
| 1490 | friend class VMStructs; | 
|---|
| 1491 | uint _cnt; | 
|---|
| 1492 | public: | 
|---|
| 1493 | Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {} | 
|---|
| 1494 | Node_List(Arena *a) : Node_Array(a), _cnt(0) {} | 
|---|
| 1495 | bool contains(const Node* n) const { | 
|---|
| 1496 | for (uint e = 0; e < size(); e++) { | 
|---|
| 1497 | if (at(e) == n) return true; | 
|---|
| 1498 | } | 
|---|
| 1499 | return false; | 
|---|
| 1500 | } | 
|---|
| 1501 | void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } | 
|---|
| 1502 | void remove( uint i ) { Node_Array::remove(i); _cnt--; } | 
|---|
| 1503 | void push( Node *b ) { map(_cnt++,b); } | 
|---|
| 1504 | void yank( Node *n );         // Find and remove | 
|---|
| 1505 | Node *pop() { return _nodes[--_cnt]; } | 
|---|
| 1506 | Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;} | 
|---|
| 1507 | void clear() { _cnt = 0; Node_Array::clear(); } // retain storage | 
|---|
| 1508 | uint size() const { return _cnt; } | 
|---|
| 1509 | void dump() const; | 
|---|
| 1510 | void dump_simple() const; | 
|---|
| 1511 | }; | 
|---|
| 1512 |  | 
|---|
| 1513 | //------------------------------Unique_Node_List------------------------------- | 
|---|
| 1514 | class Unique_Node_List : public Node_List { | 
|---|
| 1515 | friend class VMStructs; | 
|---|
| 1516 | VectorSet _in_worklist; | 
|---|
| 1517 | uint _clock_index;            // Index in list where to pop from next | 
|---|
| 1518 | public: | 
|---|
| 1519 | Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {} | 
|---|
| 1520 | Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} | 
|---|
| 1521 |  | 
|---|
| 1522 | void remove( Node *n ); | 
|---|
| 1523 | bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } | 
|---|
| 1524 | VectorSet &member_set(){ return _in_worklist; } | 
|---|
| 1525 |  | 
|---|
| 1526 | void push( Node *b ) { | 
|---|
| 1527 | if( !_in_worklist.test_set(b->_idx) ) | 
|---|
| 1528 | Node_List::push(b); | 
|---|
| 1529 | } | 
|---|
| 1530 | Node *pop() { | 
|---|
| 1531 | if( _clock_index >= size() ) _clock_index = 0; | 
|---|
| 1532 | Node *b = at(_clock_index); | 
|---|
| 1533 | map( _clock_index, Node_List::pop()); | 
|---|
| 1534 | if (size() != 0) _clock_index++; // Always start from 0 | 
|---|
| 1535 | _in_worklist >>= b->_idx; | 
|---|
| 1536 | return b; | 
|---|
| 1537 | } | 
|---|
| 1538 | Node *remove( uint i ) { | 
|---|
| 1539 | Node *b = Node_List::at(i); | 
|---|
| 1540 | _in_worklist >>= b->_idx; | 
|---|
| 1541 | map(i,Node_List::pop()); | 
|---|
| 1542 | return b; | 
|---|
| 1543 | } | 
|---|
| 1544 | void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); } | 
|---|
| 1545 | void  clear() { | 
|---|
| 1546 | _in_worklist.Clear();        // Discards storage but grows automatically | 
|---|
| 1547 | Node_List::clear(); | 
|---|
| 1548 | _clock_index = 0; | 
|---|
| 1549 | } | 
|---|
| 1550 |  | 
|---|
| 1551 | // Used after parsing to remove useless nodes before Iterative GVN | 
|---|
| 1552 | void remove_useless_nodes(VectorSet &useful); | 
|---|
| 1553 |  | 
|---|
| 1554 | #ifndef PRODUCT | 
|---|
| 1555 | void print_set() const { _in_worklist.print(); } | 
|---|
| 1556 | #endif | 
|---|
| 1557 | }; | 
|---|
| 1558 |  | 
|---|
| 1559 | // Inline definition of Compile::record_for_igvn must be deferred to this point. | 
|---|
| 1560 | inline void Compile::record_for_igvn(Node* n) { | 
|---|
| 1561 | _for_igvn->push(n); | 
|---|
| 1562 | } | 
|---|
| 1563 |  | 
|---|
| 1564 | //------------------------------Node_Stack------------------------------------- | 
|---|
| 1565 | class Node_Stack { | 
|---|
| 1566 | friend class VMStructs; | 
|---|
| 1567 | protected: | 
|---|
| 1568 | struct INode { | 
|---|
| 1569 | Node *node; // Processed node | 
|---|
| 1570 | uint  indx; // Index of next node's child | 
|---|
| 1571 | }; | 
|---|
| 1572 | INode *_inode_top; // tos, stack grows up | 
|---|
| 1573 | INode *_inode_max; // End of _inodes == _inodes + _max | 
|---|
| 1574 | INode *_inodes;    // Array storage for the stack | 
|---|
| 1575 | Arena *_a;         // Arena to allocate in | 
|---|
| 1576 | void grow(); | 
|---|
| 1577 | public: | 
|---|
| 1578 | Node_Stack(int size) { | 
|---|
| 1579 | size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; | 
|---|
| 1580 | _a = Thread::current()->resource_area(); | 
|---|
| 1581 | _inodes = NEW_ARENA_ARRAY( _a, INode, max ); | 
|---|
| 1582 | _inode_max = _inodes + max; | 
|---|
| 1583 | _inode_top = _inodes - 1; // stack is empty | 
|---|
| 1584 | } | 
|---|
| 1585 |  | 
|---|
| 1586 | Node_Stack(Arena *a, int size) : _a(a) { | 
|---|
| 1587 | size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; | 
|---|
| 1588 | _inodes = NEW_ARENA_ARRAY( _a, INode, max ); | 
|---|
| 1589 | _inode_max = _inodes + max; | 
|---|
| 1590 | _inode_top = _inodes - 1; // stack is empty | 
|---|
| 1591 | } | 
|---|
| 1592 |  | 
|---|
| 1593 | void pop() { | 
|---|
| 1594 | assert(_inode_top >= _inodes, "node stack underflow"); | 
|---|
| 1595 | --_inode_top; | 
|---|
| 1596 | } | 
|---|
| 1597 | void push(Node *n, uint i) { | 
|---|
| 1598 | ++_inode_top; | 
|---|
| 1599 | if (_inode_top >= _inode_max) grow(); | 
|---|
| 1600 | INode *top = _inode_top; // optimization | 
|---|
| 1601 | top->node = n; | 
|---|
| 1602 | top->indx = i; | 
|---|
| 1603 | } | 
|---|
| 1604 | Node *node() const { | 
|---|
| 1605 | return _inode_top->node; | 
|---|
| 1606 | } | 
|---|
| 1607 | Node* node_at(uint i) const { | 
|---|
| 1608 | assert(_inodes + i <= _inode_top, "in range"); | 
|---|
| 1609 | return _inodes[i].node; | 
|---|
| 1610 | } | 
|---|
| 1611 | uint index() const { | 
|---|
| 1612 | return _inode_top->indx; | 
|---|
| 1613 | } | 
|---|
| 1614 | uint index_at(uint i) const { | 
|---|
| 1615 | assert(_inodes + i <= _inode_top, "in range"); | 
|---|
| 1616 | return _inodes[i].indx; | 
|---|
| 1617 | } | 
|---|
| 1618 | void set_node(Node *n) { | 
|---|
| 1619 | _inode_top->node = n; | 
|---|
| 1620 | } | 
|---|
| 1621 | void set_index(uint i) { | 
|---|
| 1622 | _inode_top->indx = i; | 
|---|
| 1623 | } | 
|---|
| 1624 | uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes,  sizeof(INode)); } // Max size | 
|---|
| 1625 | uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes,  sizeof(INode)); } // Current size | 
|---|
| 1626 | bool is_nonempty() const { return (_inode_top >= _inodes); } | 
|---|
| 1627 | bool is_empty() const { return (_inode_top < _inodes); } | 
|---|
| 1628 | void clear() { _inode_top = _inodes - 1; } // retain storage | 
|---|
| 1629 |  | 
|---|
| 1630 | // Node_Stack is used to map nodes. | 
|---|
| 1631 | Node* find(uint idx) const; | 
|---|
| 1632 | }; | 
|---|
| 1633 |  | 
|---|
| 1634 |  | 
|---|
| 1635 | //-----------------------------Node_Notes-------------------------------------- | 
|---|
| 1636 | // Debugging or profiling annotations loosely and sparsely associated | 
|---|
| 1637 | // with some nodes.  See Compile::node_notes_at for the accessor. | 
|---|
| 1638 | class Node_Notes { | 
|---|
| 1639 | friend class VMStructs; | 
|---|
| 1640 | JVMState* _jvms; | 
|---|
| 1641 |  | 
|---|
| 1642 | public: | 
|---|
| 1643 | Node_Notes(JVMState* jvms = NULL) { | 
|---|
| 1644 | _jvms = jvms; | 
|---|
| 1645 | } | 
|---|
| 1646 |  | 
|---|
| 1647 | JVMState* jvms()            { return _jvms; } | 
|---|
| 1648 | void  set_jvms(JVMState* x) {        _jvms = x; } | 
|---|
| 1649 |  | 
|---|
| 1650 | // True if there is nothing here. | 
|---|
| 1651 | bool is_clear() { | 
|---|
| 1652 | return (_jvms == NULL); | 
|---|
| 1653 | } | 
|---|
| 1654 |  | 
|---|
| 1655 | // Make there be nothing here. | 
|---|
| 1656 | void clear() { | 
|---|
| 1657 | _jvms = NULL; | 
|---|
| 1658 | } | 
|---|
| 1659 |  | 
|---|
| 1660 | // Make a new, clean node notes. | 
|---|
| 1661 | static Node_Notes* make(Compile* C) { | 
|---|
| 1662 | Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); | 
|---|
| 1663 | nn->clear(); | 
|---|
| 1664 | return nn; | 
|---|
| 1665 | } | 
|---|
| 1666 |  | 
|---|
| 1667 | Node_Notes* clone(Compile* C) { | 
|---|
| 1668 | Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); | 
|---|
| 1669 | (*nn) = (*this); | 
|---|
| 1670 | return nn; | 
|---|
| 1671 | } | 
|---|
| 1672 |  | 
|---|
| 1673 | // Absorb any information from source. | 
|---|
| 1674 | bool update_from(Node_Notes* source) { | 
|---|
| 1675 | bool changed = false; | 
|---|
| 1676 | if (source != NULL) { | 
|---|
| 1677 | if (source->jvms() != NULL) { | 
|---|
| 1678 | set_jvms(source->jvms()); | 
|---|
| 1679 | changed = true; | 
|---|
| 1680 | } | 
|---|
| 1681 | } | 
|---|
| 1682 | return changed; | 
|---|
| 1683 | } | 
|---|
| 1684 | }; | 
|---|
| 1685 |  | 
|---|
| 1686 | // Inlined accessors for Compile::node_nodes that require the preceding class: | 
|---|
| 1687 | inline Node_Notes* | 
|---|
| 1688 | Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, | 
|---|
| 1689 | int idx, bool can_grow) { | 
|---|
| 1690 | assert(idx >= 0, "oob"); | 
|---|
| 1691 | int block_idx = (idx >> _log2_node_notes_block_size); | 
|---|
| 1692 | int grow_by = (block_idx - (arr == NULL? 0: arr->length())); | 
|---|
| 1693 | if (grow_by >= 0) { | 
|---|
| 1694 | if (!can_grow) return NULL; | 
|---|
| 1695 | grow_node_notes(arr, grow_by + 1); | 
|---|
| 1696 | } | 
|---|
| 1697 | if (arr == NULL) return NULL; | 
|---|
| 1698 | // (Every element of arr is a sub-array of length _node_notes_block_size.) | 
|---|
| 1699 | return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); | 
|---|
| 1700 | } | 
|---|
| 1701 |  | 
|---|
| 1702 | inline bool | 
|---|
| 1703 | Compile::set_node_notes_at(int idx, Node_Notes* value) { | 
|---|
| 1704 | if (value == NULL || value->is_clear()) | 
|---|
| 1705 | return false;  // nothing to write => write nothing | 
|---|
| 1706 | Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); | 
|---|
| 1707 | assert(loc != NULL, ""); | 
|---|
| 1708 | return loc->update_from(value); | 
|---|
| 1709 | } | 
|---|
| 1710 |  | 
|---|
| 1711 |  | 
|---|
| 1712 | //------------------------------TypeNode--------------------------------------- | 
|---|
| 1713 | // Node with a Type constant. | 
|---|
| 1714 | class TypeNode : public Node { | 
|---|
| 1715 | protected: | 
|---|
| 1716 | virtual uint hash() const;    // Check the type | 
|---|
| 1717 | virtual bool cmp( const Node &n ) const; | 
|---|
| 1718 | virtual uint size_of() const; // Size is bigger | 
|---|
| 1719 | const Type* const _type; | 
|---|
| 1720 | public: | 
|---|
| 1721 | void set_type(const Type* t) { | 
|---|
| 1722 | assert(t != NULL, "sanity"); | 
|---|
| 1723 | debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); | 
|---|
| 1724 | *(const Type**)&_type = t;   // cast away const-ness | 
|---|
| 1725 | // If this node is in the hash table, make sure it doesn't need a rehash. | 
|---|
| 1726 | assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); | 
|---|
| 1727 | } | 
|---|
| 1728 | const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; | 
|---|
| 1729 | TypeNode( const Type *t, uint required ) : Node(required), _type(t) { | 
|---|
| 1730 | init_class_id(Class_Type); | 
|---|
| 1731 | } | 
|---|
| 1732 | virtual const Type* Value(PhaseGVN* phase) const; | 
|---|
| 1733 | virtual const Type *bottom_type() const; | 
|---|
| 1734 | virtual       uint  ideal_reg() const; | 
|---|
| 1735 | #ifndef PRODUCT | 
|---|
| 1736 | virtual void dump_spec(outputStream *st) const; | 
|---|
| 1737 | virtual void dump_compact_spec(outputStream *st) const; | 
|---|
| 1738 | #endif | 
|---|
| 1739 | }; | 
|---|
| 1740 |  | 
|---|
| 1741 | #endif // SHARE_OPTO_NODE_HPP | 
|---|
| 1742 |  | 
|---|