1/*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/barrierSet.hpp"
27#include "gc/shared/c2/barrierSetC2.hpp"
28#include "libadt/vectset.hpp"
29#include "memory/allocation.inline.hpp"
30#include "memory/resourceArea.hpp"
31#include "opto/castnode.hpp"
32#include "opto/cfgnode.hpp"
33#include "opto/connode.hpp"
34#include "opto/loopnode.hpp"
35#include "opto/machnode.hpp"
36#include "opto/matcher.hpp"
37#include "opto/node.hpp"
38#include "opto/opcodes.hpp"
39#include "opto/regmask.hpp"
40#include "opto/rootnode.hpp"
41#include "opto/type.hpp"
42#include "utilities/copy.hpp"
43#include "utilities/macros.hpp"
44
45class RegMask;
46// #include "phase.hpp"
47class PhaseTransform;
48class PhaseGVN;
49
50// Arena we are currently building Nodes in
51const uint Node::NotAMachineReg = 0xffff0000;
52
53#ifndef PRODUCT
54extern int nodes_created;
55#endif
56#ifdef __clang__
57#pragma clang diagnostic push
58#pragma GCC diagnostic ignored "-Wuninitialized"
59#endif
60
61#ifdef ASSERT
62
63//-------------------------- construct_node------------------------------------
64// Set a breakpoint here to identify where a particular node index is built.
65void Node::verify_construction() {
66 _debug_orig = NULL;
67 int old_debug_idx = Compile::debug_idx();
68 int new_debug_idx = old_debug_idx+1;
69 if (new_debug_idx > 0) {
70 // Arrange that the lowest five decimal digits of _debug_idx
71 // will repeat those of _idx. In case this is somehow pathological,
72 // we continue to assign negative numbers (!) consecutively.
73 const int mod = 100000;
74 int bump = (int)(_idx - new_debug_idx) % mod;
75 if (bump < 0) bump += mod;
76 assert(bump >= 0 && bump < mod, "");
77 new_debug_idx += bump;
78 }
79 Compile::set_debug_idx(new_debug_idx);
80 set_debug_idx( new_debug_idx );
81 assert(Compile::current()->unique() < (INT_MAX - 1), "Node limit exceeded INT_MAX");
82 assert(Compile::current()->live_nodes() < Compile::current()->max_node_limit(), "Live Node limit exceeded limit");
83 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
84 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
85 BREAKPOINT;
86 }
87#if OPTO_DU_ITERATOR_ASSERT
88 _last_del = NULL;
89 _del_tick = 0;
90#endif
91 _hash_lock = 0;
92}
93
94
95// #ifdef ASSERT ...
96
97#if OPTO_DU_ITERATOR_ASSERT
98void DUIterator_Common::sample(const Node* node) {
99 _vdui = VerifyDUIterators;
100 _node = node;
101 _outcnt = node->_outcnt;
102 _del_tick = node->_del_tick;
103 _last = NULL;
104}
105
106void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
107 assert(_node == node, "consistent iterator source");
108 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed");
109}
110
111void DUIterator_Common::verify_resync() {
112 // Ensure that the loop body has just deleted the last guy produced.
113 const Node* node = _node;
114 // Ensure that at least one copy of the last-seen edge was deleted.
115 // Note: It is OK to delete multiple copies of the last-seen edge.
116 // Unfortunately, we have no way to verify that all the deletions delete
117 // that same edge. On this point we must use the Honor System.
118 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge");
119 assert(node->_last_del == _last, "must have deleted the edge just produced");
120 // We liked this deletion, so accept the resulting outcnt and tick.
121 _outcnt = node->_outcnt;
122 _del_tick = node->_del_tick;
123}
124
125void DUIterator_Common::reset(const DUIterator_Common& that) {
126 if (this == &that) return; // ignore assignment to self
127 if (!_vdui) {
128 // We need to initialize everything, overwriting garbage values.
129 _last = that._last;
130 _vdui = that._vdui;
131 }
132 // Note: It is legal (though odd) for an iterator over some node x
133 // to be reassigned to iterate over another node y. Some doubly-nested
134 // progress loops depend on being able to do this.
135 const Node* node = that._node;
136 // Re-initialize everything, except _last.
137 _node = node;
138 _outcnt = node->_outcnt;
139 _del_tick = node->_del_tick;
140}
141
142void DUIterator::sample(const Node* node) {
143 DUIterator_Common::sample(node); // Initialize the assertion data.
144 _refresh_tick = 0; // No refreshes have happened, as yet.
145}
146
147void DUIterator::verify(const Node* node, bool at_end_ok) {
148 DUIterator_Common::verify(node, at_end_ok);
149 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range");
150}
151
152void DUIterator::verify_increment() {
153 if (_refresh_tick & 1) {
154 // We have refreshed the index during this loop.
155 // Fix up _idx to meet asserts.
156 if (_idx > _outcnt) _idx = _outcnt;
157 }
158 verify(_node, true);
159}
160
161void DUIterator::verify_resync() {
162 // Note: We do not assert on _outcnt, because insertions are OK here.
163 DUIterator_Common::verify_resync();
164 // Make sure we are still in sync, possibly with no more out-edges:
165 verify(_node, true);
166}
167
168void DUIterator::reset(const DUIterator& that) {
169 if (this == &that) return; // self assignment is always a no-op
170 assert(that._refresh_tick == 0, "assign only the result of Node::outs()");
171 assert(that._idx == 0, "assign only the result of Node::outs()");
172 assert(_idx == that._idx, "already assigned _idx");
173 if (!_vdui) {
174 // We need to initialize everything, overwriting garbage values.
175 sample(that._node);
176 } else {
177 DUIterator_Common::reset(that);
178 if (_refresh_tick & 1) {
179 _refresh_tick++; // Clear the "was refreshed" flag.
180 }
181 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly");
182 }
183}
184
185void DUIterator::refresh() {
186 DUIterator_Common::sample(_node); // Re-fetch assertion data.
187 _refresh_tick |= 1; // Set the "was refreshed" flag.
188}
189
190void DUIterator::verify_finish() {
191 // If the loop has killed the node, do not require it to re-run.
192 if (_node->_outcnt == 0) _refresh_tick &= ~1;
193 // If this assert triggers, it means that a loop used refresh_out_pos
194 // to re-synch an iteration index, but the loop did not correctly
195 // re-run itself, using a "while (progress)" construct.
196 // This iterator enforces the rule that you must keep trying the loop
197 // until it "runs clean" without any need for refreshing.
198 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing");
199}
200
201
202void DUIterator_Fast::verify(const Node* node, bool at_end_ok) {
203 DUIterator_Common::verify(node, at_end_ok);
204 Node** out = node->_out;
205 uint cnt = node->_outcnt;
206 assert(cnt == _outcnt, "no insertions allowed");
207 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range");
208 // This last check is carefully designed to work for NO_OUT_ARRAY.
209}
210
211void DUIterator_Fast::verify_limit() {
212 const Node* node = _node;
213 verify(node, true);
214 assert(_outp == node->_out + node->_outcnt, "limit still correct");
215}
216
217void DUIterator_Fast::verify_resync() {
218 const Node* node = _node;
219 if (_outp == node->_out + _outcnt) {
220 // Note that the limit imax, not the pointer i, gets updated with the
221 // exact count of deletions. (For the pointer it's always "--i".)
222 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)");
223 // This is a limit pointer, with a name like "imax".
224 // Fudge the _last field so that the common assert will be happy.
225 _last = (Node*) node->_last_del;
226 DUIterator_Common::verify_resync();
227 } else {
228 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)");
229 // A normal internal pointer.
230 DUIterator_Common::verify_resync();
231 // Make sure we are still in sync, possibly with no more out-edges:
232 verify(node, true);
233 }
234}
235
236void DUIterator_Fast::verify_relimit(uint n) {
237 const Node* node = _node;
238 assert((int)n > 0, "use imax -= n only with a positive count");
239 // This must be a limit pointer, with a name like "imax".
240 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)");
241 // The reported number of deletions must match what the node saw.
242 assert(node->_del_tick == _del_tick + n, "must have deleted n edges");
243 // Fudge the _last field so that the common assert will be happy.
244 _last = (Node*) node->_last_del;
245 DUIterator_Common::verify_resync();
246}
247
248void DUIterator_Fast::reset(const DUIterator_Fast& that) {
249 assert(_outp == that._outp, "already assigned _outp");
250 DUIterator_Common::reset(that);
251}
252
253void DUIterator_Last::verify(const Node* node, bool at_end_ok) {
254 // at_end_ok means the _outp is allowed to underflow by 1
255 _outp += at_end_ok;
256 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc.
257 _outp -= at_end_ok;
258 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes");
259}
260
261void DUIterator_Last::verify_limit() {
262 // Do not require the limit address to be resynched.
263 //verify(node, true);
264 assert(_outp == _node->_out, "limit still correct");
265}
266
267void DUIterator_Last::verify_step(uint num_edges) {
268 assert((int)num_edges > 0, "need non-zero edge count for loop progress");
269 _outcnt -= num_edges;
270 _del_tick += num_edges;
271 // Make sure we are still in sync, possibly with no more out-edges:
272 const Node* node = _node;
273 verify(node, true);
274 assert(node->_last_del == _last, "must have deleted the edge just produced");
275}
276
277#endif //OPTO_DU_ITERATOR_ASSERT
278
279
280#endif //ASSERT
281
282
283// This constant used to initialize _out may be any non-null value.
284// The value NULL is reserved for the top node only.
285#define NO_OUT_ARRAY ((Node**)-1)
286
287// Out-of-line code from node constructors.
288// Executed only when extra debug info. is being passed around.
289static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
290 C->set_node_notes_at(idx, nn);
291}
292
293// Shared initialization code.
294inline int Node::Init(int req) {
295 Compile* C = Compile::current();
296 int idx = C->next_unique();
297
298 // Allocate memory for the necessary number of edges.
299 if (req > 0) {
300 // Allocate space for _in array to have double alignment.
301 _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*))));
302 }
303 // If there are default notes floating around, capture them:
304 Node_Notes* nn = C->default_node_notes();
305 if (nn != NULL) init_node_notes(C, idx, nn);
306
307 // Note: At this point, C is dead,
308 // and we begin to initialize the new Node.
309
310 _cnt = _max = req;
311 _outcnt = _outmax = 0;
312 _class_id = Class_Node;
313 _flags = 0;
314 _out = NO_OUT_ARRAY;
315 return idx;
316}
317
318//------------------------------Node-------------------------------------------
319// Create a Node, with a given number of required edges.
320Node::Node(uint req)
321 : _idx(Init(req))
322#ifdef ASSERT
323 , _parse_idx(_idx)
324#endif
325{
326 assert( req < Compile::current()->max_node_limit() - NodeLimitFudgeFactor, "Input limit exceeded" );
327 debug_only( verify_construction() );
328 NOT_PRODUCT(nodes_created++);
329 if (req == 0) {
330 _in = NULL;
331 } else {
332 Node** to = _in;
333 for(uint i = 0; i < req; i++) {
334 to[i] = NULL;
335 }
336 }
337}
338
339//------------------------------Node-------------------------------------------
340Node::Node(Node *n0)
341 : _idx(Init(1))
342#ifdef ASSERT
343 , _parse_idx(_idx)
344#endif
345{
346 debug_only( verify_construction() );
347 NOT_PRODUCT(nodes_created++);
348 assert( is_not_dead(n0), "can not use dead node");
349 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
350}
351
352//------------------------------Node-------------------------------------------
353Node::Node(Node *n0, Node *n1)
354 : _idx(Init(2))
355#ifdef ASSERT
356 , _parse_idx(_idx)
357#endif
358{
359 debug_only( verify_construction() );
360 NOT_PRODUCT(nodes_created++);
361 assert( is_not_dead(n0), "can not use dead node");
362 assert( is_not_dead(n1), "can not use dead node");
363 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
364 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
365}
366
367//------------------------------Node-------------------------------------------
368Node::Node(Node *n0, Node *n1, Node *n2)
369 : _idx(Init(3))
370#ifdef ASSERT
371 , _parse_idx(_idx)
372#endif
373{
374 debug_only( verify_construction() );
375 NOT_PRODUCT(nodes_created++);
376 assert( is_not_dead(n0), "can not use dead node");
377 assert( is_not_dead(n1), "can not use dead node");
378 assert( is_not_dead(n2), "can not use dead node");
379 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
380 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
381 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
382}
383
384//------------------------------Node-------------------------------------------
385Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
386 : _idx(Init(4))
387#ifdef ASSERT
388 , _parse_idx(_idx)
389#endif
390{
391 debug_only( verify_construction() );
392 NOT_PRODUCT(nodes_created++);
393 assert( is_not_dead(n0), "can not use dead node");
394 assert( is_not_dead(n1), "can not use dead node");
395 assert( is_not_dead(n2), "can not use dead node");
396 assert( is_not_dead(n3), "can not use dead node");
397 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
398 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
399 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
400 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
401}
402
403//------------------------------Node-------------------------------------------
404Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
405 : _idx(Init(5))
406#ifdef ASSERT
407 , _parse_idx(_idx)
408#endif
409{
410 debug_only( verify_construction() );
411 NOT_PRODUCT(nodes_created++);
412 assert( is_not_dead(n0), "can not use dead node");
413 assert( is_not_dead(n1), "can not use dead node");
414 assert( is_not_dead(n2), "can not use dead node");
415 assert( is_not_dead(n3), "can not use dead node");
416 assert( is_not_dead(n4), "can not use dead node");
417 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
418 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
419 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
420 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
421 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
422}
423
424//------------------------------Node-------------------------------------------
425Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
426 Node *n4, Node *n5)
427 : _idx(Init(6))
428#ifdef ASSERT
429 , _parse_idx(_idx)
430#endif
431{
432 debug_only( verify_construction() );
433 NOT_PRODUCT(nodes_created++);
434 assert( is_not_dead(n0), "can not use dead node");
435 assert( is_not_dead(n1), "can not use dead node");
436 assert( is_not_dead(n2), "can not use dead node");
437 assert( is_not_dead(n3), "can not use dead node");
438 assert( is_not_dead(n4), "can not use dead node");
439 assert( is_not_dead(n5), "can not use dead node");
440 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
441 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
442 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
443 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
444 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
445 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
446}
447
448//------------------------------Node-------------------------------------------
449Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
450 Node *n4, Node *n5, Node *n6)
451 : _idx(Init(7))
452#ifdef ASSERT
453 , _parse_idx(_idx)
454#endif
455{
456 debug_only( verify_construction() );
457 NOT_PRODUCT(nodes_created++);
458 assert( is_not_dead(n0), "can not use dead node");
459 assert( is_not_dead(n1), "can not use dead node");
460 assert( is_not_dead(n2), "can not use dead node");
461 assert( is_not_dead(n3), "can not use dead node");
462 assert( is_not_dead(n4), "can not use dead node");
463 assert( is_not_dead(n5), "can not use dead node");
464 assert( is_not_dead(n6), "can not use dead node");
465 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
466 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
467 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
468 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
469 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
470 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
471 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this);
472}
473
474#ifdef __clang__
475#pragma clang diagnostic pop
476#endif
477
478
479//------------------------------clone------------------------------------------
480// Clone a Node.
481Node *Node::clone() const {
482 Compile* C = Compile::current();
483 uint s = size_of(); // Size of inherited Node
484 Node *n = (Node*)C->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
485 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
486 // Set the new input pointer array
487 n->_in = (Node**)(((char*)n)+s);
488 // Cannot share the old output pointer array, so kill it
489 n->_out = NO_OUT_ARRAY;
490 // And reset the counters to 0
491 n->_outcnt = 0;
492 n->_outmax = 0;
493 // Unlock this guy, since he is not in any hash table.
494 debug_only(n->_hash_lock = 0);
495 // Walk the old node's input list to duplicate its edges
496 uint i;
497 for( i = 0; i < len(); i++ ) {
498 Node *x = in(i);
499 n->_in[i] = x;
500 if (x != NULL) x->add_out(n);
501 }
502 if (is_macro())
503 C->add_macro_node(n);
504 if (is_expensive())
505 C->add_expensive_node(n);
506 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
507 bs->register_potential_barrier_node(n);
508 // If the cloned node is a range check dependent CastII, add it to the list.
509 CastIINode* cast = n->isa_CastII();
510 if (cast != NULL && cast->has_range_check()) {
511 C->add_range_check_cast(cast);
512 }
513 if (n->Opcode() == Op_Opaque4) {
514 C->add_opaque4_node(n);
515 }
516
517 n->set_idx(C->next_unique()); // Get new unique index as well
518 debug_only( n->verify_construction() );
519 NOT_PRODUCT(nodes_created++);
520 // Do not patch over the debug_idx of a clone, because it makes it
521 // impossible to break on the clone's moment of creation.
522 //debug_only( n->set_debug_idx( debug_idx() ) );
523
524 C->copy_node_notes_to(n, (Node*) this);
525
526 // MachNode clone
527 uint nopnds;
528 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
529 MachNode *mach = n->as_Mach();
530 MachNode *mthis = this->as_Mach();
531 // Get address of _opnd_array.
532 // It should be the same offset since it is the clone of this node.
533 MachOper **from = mthis->_opnds;
534 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
535 pointer_delta((const void*)from,
536 (const void*)(&mthis->_opnds), 1));
537 mach->_opnds = to;
538 for ( uint i = 0; i < nopnds; ++i ) {
539 to[i] = from[i]->clone();
540 }
541 }
542 // cloning CallNode may need to clone JVMState
543 if (n->is_Call()) {
544 n->as_Call()->clone_jvms(C);
545 }
546 if (n->is_SafePoint()) {
547 n->as_SafePoint()->clone_replaced_nodes();
548 }
549 if (n->is_Load()) {
550 n->as_Load()->copy_barrier_info(this);
551 }
552 return n; // Return the clone
553}
554
555//---------------------------setup_is_top--------------------------------------
556// Call this when changing the top node, to reassert the invariants
557// required by Node::is_top. See Compile::set_cached_top_node.
558void Node::setup_is_top() {
559 if (this == (Node*)Compile::current()->top()) {
560 // This node has just become top. Kill its out array.
561 _outcnt = _outmax = 0;
562 _out = NULL; // marker value for top
563 assert(is_top(), "must be top");
564 } else {
565 if (_out == NULL) _out = NO_OUT_ARRAY;
566 assert(!is_top(), "must not be top");
567 }
568}
569
570//------------------------------~Node------------------------------------------
571// Fancy destructor; eagerly attempt to reclaim Node numberings and storage
572void Node::destruct() {
573 // Eagerly reclaim unique Node numberings
574 Compile* compile = Compile::current();
575 if ((uint)_idx+1 == compile->unique()) {
576 compile->set_unique(compile->unique()-1);
577 }
578 // Clear debug info:
579 Node_Notes* nn = compile->node_notes_at(_idx);
580 if (nn != NULL) nn->clear();
581 // Walk the input array, freeing the corresponding output edges
582 _cnt = _max; // forget req/prec distinction
583 uint i;
584 for( i = 0; i < _max; i++ ) {
585 set_req(i, NULL);
586 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim");
587 }
588 assert(outcnt() == 0, "deleting a node must not leave a dangling use");
589 // See if the input array was allocated just prior to the object
590 int edge_size = _max*sizeof(void*);
591 int out_edge_size = _outmax*sizeof(void*);
592 char *edge_end = ((char*)_in) + edge_size;
593 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
594 int node_size = size_of();
595
596 // Free the output edge array
597 if (out_edge_size > 0) {
598 compile->node_arena()->Afree(out_array, out_edge_size);
599 }
600
601 // Free the input edge array and the node itself
602 if( edge_end == (char*)this ) {
603 // It was; free the input array and object all in one hit
604#ifndef ASSERT
605 compile->node_arena()->Afree(_in,edge_size+node_size);
606#endif
607 } else {
608 // Free just the input array
609 compile->node_arena()->Afree(_in,edge_size);
610
611 // Free just the object
612#ifndef ASSERT
613 compile->node_arena()->Afree(this,node_size);
614#endif
615 }
616 if (is_macro()) {
617 compile->remove_macro_node(this);
618 }
619 if (is_expensive()) {
620 compile->remove_expensive_node(this);
621 }
622 CastIINode* cast = isa_CastII();
623 if (cast != NULL && cast->has_range_check()) {
624 compile->remove_range_check_cast(cast);
625 }
626 if (Opcode() == Op_Opaque4) {
627 compile->remove_opaque4_node(this);
628 }
629
630 if (is_SafePoint()) {
631 as_SafePoint()->delete_replaced_nodes();
632 }
633 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
634 bs->unregister_potential_barrier_node(this);
635#ifdef ASSERT
636 // We will not actually delete the storage, but we'll make the node unusable.
637 *(address*)this = badAddress; // smash the C++ vtbl, probably
638 _in = _out = (Node**) badAddress;
639 _max = _cnt = _outmax = _outcnt = 0;
640 compile->remove_modified_node(this);
641#endif
642}
643
644//------------------------------grow-------------------------------------------
645// Grow the input array, making space for more edges
646void Node::grow( uint len ) {
647 Arena* arena = Compile::current()->node_arena();
648 uint new_max = _max;
649 if( new_max == 0 ) {
650 _max = 4;
651 _in = (Node**)arena->Amalloc(4*sizeof(Node*));
652 Node** to = _in;
653 to[0] = NULL;
654 to[1] = NULL;
655 to[2] = NULL;
656 to[3] = NULL;
657 return;
658 }
659 while( new_max <= len ) new_max <<= 1; // Find next power-of-2
660 // Trimming to limit allows a uint8 to handle up to 255 edges.
661 // Previously I was using only powers-of-2 which peaked at 128 edges.
662 //if( new_max >= limit ) new_max = limit-1;
663 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*));
664 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space
665 _max = new_max; // Record new max length
666 // This assertion makes sure that Node::_max is wide enough to
667 // represent the numerical value of new_max.
668 assert(_max == new_max && _max > len, "int width of _max is too small");
669}
670
671//-----------------------------out_grow----------------------------------------
672// Grow the input array, making space for more edges
673void Node::out_grow( uint len ) {
674 assert(!is_top(), "cannot grow a top node's out array");
675 Arena* arena = Compile::current()->node_arena();
676 uint new_max = _outmax;
677 if( new_max == 0 ) {
678 _outmax = 4;
679 _out = (Node **)arena->Amalloc(4*sizeof(Node*));
680 return;
681 }
682 while( new_max <= len ) new_max <<= 1; // Find next power-of-2
683 // Trimming to limit allows a uint8 to handle up to 255 edges.
684 // Previously I was using only powers-of-2 which peaked at 128 edges.
685 //if( new_max >= limit ) new_max = limit-1;
686 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value");
687 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*));
688 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space
689 _outmax = new_max; // Record new max length
690 // This assertion makes sure that Node::_max is wide enough to
691 // represent the numerical value of new_max.
692 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small");
693}
694
695#ifdef ASSERT
696//------------------------------is_dead----------------------------------------
697bool Node::is_dead() const {
698 // Mach and pinch point nodes may look like dead.
699 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
700 return false;
701 for( uint i = 0; i < _max; i++ )
702 if( _in[i] != NULL )
703 return false;
704 dump();
705 return true;
706}
707#endif
708
709
710//------------------------------is_unreachable---------------------------------
711bool Node::is_unreachable(PhaseIterGVN &igvn) const {
712 assert(!is_Mach(), "doesn't work with MachNodes");
713 return outcnt() == 0 || igvn.type(this) == Type::TOP || (in(0) != NULL && in(0)->is_top());
714}
715
716//------------------------------add_req----------------------------------------
717// Add a new required input at the end
718void Node::add_req( Node *n ) {
719 assert( is_not_dead(n), "can not use dead node");
720
721 // Look to see if I can move precedence down one without reallocating
722 if( (_cnt >= _max) || (in(_max-1) != NULL) )
723 grow( _max+1 );
724
725 // Find a precedence edge to move
726 if( in(_cnt) != NULL ) { // Next precedence edge is busy?
727 uint i;
728 for( i=_cnt; i<_max; i++ )
729 if( in(i) == NULL ) // Find the NULL at end of prec edge list
730 break; // There must be one, since we grew the array
731 _in[i] = in(_cnt); // Move prec over, making space for req edge
732 }
733 _in[_cnt++] = n; // Stuff over old prec edge
734 if (n != NULL) n->add_out((Node *)this);
735}
736
737//---------------------------add_req_batch-------------------------------------
738// Add a new required input at the end
739void Node::add_req_batch( Node *n, uint m ) {
740 assert( is_not_dead(n), "can not use dead node");
741 // check various edge cases
742 if ((int)m <= 1) {
743 assert((int)m >= 0, "oob");
744 if (m != 0) add_req(n);
745 return;
746 }
747
748 // Look to see if I can move precedence down one without reallocating
749 if( (_cnt+m) > _max || _in[_max-m] )
750 grow( _max+m );
751
752 // Find a precedence edge to move
753 if( _in[_cnt] != NULL ) { // Next precedence edge is busy?
754 uint i;
755 for( i=_cnt; i<_max; i++ )
756 if( _in[i] == NULL ) // Find the NULL at end of prec edge list
757 break; // There must be one, since we grew the array
758 // Slide all the precs over by m positions (assume #prec << m).
759 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
760 }
761
762 // Stuff over the old prec edges
763 for(uint i=0; i<m; i++ ) {
764 _in[_cnt++] = n;
765 }
766
767 // Insert multiple out edges on the node.
768 if (n != NULL && !n->is_top()) {
769 for(uint i=0; i<m; i++ ) {
770 n->add_out((Node *)this);
771 }
772 }
773}
774
775//------------------------------del_req----------------------------------------
776// Delete the required edge and compact the edge array
777void Node::del_req( uint idx ) {
778 assert( idx < _cnt, "oob");
779 assert( !VerifyHashTableKeys || _hash_lock == 0,
780 "remove node from hash table before modifying it");
781 // First remove corresponding def-use edge
782 Node *n = in(idx);
783 if (n != NULL) n->del_out((Node *)this);
784 _in[idx] = in(--_cnt); // Compact the array
785 // Avoid spec violation: Gap in prec edges.
786 close_prec_gap_at(_cnt);
787 Compile::current()->record_modified_node(this);
788}
789
790//------------------------------del_req_ordered--------------------------------
791// Delete the required edge and compact the edge array with preserved order
792void Node::del_req_ordered( uint idx ) {
793 assert( idx < _cnt, "oob");
794 assert( !VerifyHashTableKeys || _hash_lock == 0,
795 "remove node from hash table before modifying it");
796 // First remove corresponding def-use edge
797 Node *n = in(idx);
798 if (n != NULL) n->del_out((Node *)this);
799 if (idx < --_cnt) { // Not last edge ?
800 Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx)*sizeof(Node*)));
801 }
802 // Avoid spec violation: Gap in prec edges.
803 close_prec_gap_at(_cnt);
804 Compile::current()->record_modified_node(this);
805}
806
807//------------------------------ins_req----------------------------------------
808// Insert a new required input at the end
809void Node::ins_req( uint idx, Node *n ) {
810 assert( is_not_dead(n), "can not use dead node");
811 add_req(NULL); // Make space
812 assert( idx < _max, "Must have allocated enough space");
813 // Slide over
814 if(_cnt-idx-1 > 0) {
815 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
816 }
817 _in[idx] = n; // Stuff over old required edge
818 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge
819}
820
821//-----------------------------find_edge---------------------------------------
822int Node::find_edge(Node* n) {
823 for (uint i = 0; i < len(); i++) {
824 if (_in[i] == n) return i;
825 }
826 return -1;
827}
828
829//----------------------------replace_edge-------------------------------------
830int Node::replace_edge(Node* old, Node* neww) {
831 if (old == neww) return 0; // nothing to do
832 uint nrep = 0;
833 for (uint i = 0; i < len(); i++) {
834 if (in(i) == old) {
835 if (i < req()) {
836 set_req(i, neww);
837 } else {
838 assert(find_prec_edge(neww) == -1, "spec violation: duplicated prec edge (node %d -> %d)", _idx, neww->_idx);
839 set_prec(i, neww);
840 }
841 nrep++;
842 }
843 }
844 return nrep;
845}
846
847/**
848 * Replace input edges in the range pointing to 'old' node.
849 */
850int Node::replace_edges_in_range(Node* old, Node* neww, int start, int end) {
851 if (old == neww) return 0; // nothing to do
852 uint nrep = 0;
853 for (int i = start; i < end; i++) {
854 if (in(i) == old) {
855 set_req(i, neww);
856 nrep++;
857 }
858 }
859 return nrep;
860}
861
862//-------------------------disconnect_inputs-----------------------------------
863// NULL out all inputs to eliminate incoming Def-Use edges.
864// Return the number of edges between 'n' and 'this'
865int Node::disconnect_inputs(Node *n, Compile* C) {
866 int edges_to_n = 0;
867
868 uint cnt = req();
869 for( uint i = 0; i < cnt; ++i ) {
870 if( in(i) == 0 ) continue;
871 if( in(i) == n ) ++edges_to_n;
872 set_req(i, NULL);
873 }
874 // Remove precedence edges if any exist
875 // Note: Safepoints may have precedence edges, even during parsing
876 if( (req() != len()) && (in(req()) != NULL) ) {
877 uint max = len();
878 for( uint i = 0; i < max; ++i ) {
879 if( in(i) == 0 ) continue;
880 if( in(i) == n ) ++edges_to_n;
881 set_prec(i, NULL);
882 }
883 }
884
885 // Node::destruct requires all out edges be deleted first
886 // debug_only(destruct();) // no reuse benefit expected
887 if (edges_to_n == 0) {
888 C->record_dead_node(_idx);
889 }
890 return edges_to_n;
891}
892
893//-----------------------------uncast---------------------------------------
894// %%% Temporary, until we sort out CheckCastPP vs. CastPP.
895// Strip away casting. (It is depth-limited.)
896// Optionally, keep casts with dependencies.
897Node* Node::uncast(bool keep_deps) const {
898 // Should be inline:
899 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this;
900 if (is_ConstraintCast()) {
901 return uncast_helper(this, keep_deps);
902 } else {
903 return (Node*) this;
904 }
905}
906
907// Find out of current node that matches opcode.
908Node* Node::find_out_with(int opcode) {
909 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
910 Node* use = fast_out(i);
911 if (use->Opcode() == opcode) {
912 return use;
913 }
914 }
915 return NULL;
916}
917
918// Return true if the current node has an out that matches opcode.
919bool Node::has_out_with(int opcode) {
920 return (find_out_with(opcode) != NULL);
921}
922
923// Return true if the current node has an out that matches any of the opcodes.
924bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) {
925 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
926 int opcode = fast_out(i)->Opcode();
927 if (opcode == opcode1 || opcode == opcode2 || opcode == opcode3 || opcode == opcode4) {
928 return true;
929 }
930 }
931 return false;
932}
933
934
935//---------------------------uncast_helper-------------------------------------
936Node* Node::uncast_helper(const Node* p, bool keep_deps) {
937#ifdef ASSERT
938 uint depth_count = 0;
939 const Node* orig_p = p;
940#endif
941
942 while (true) {
943#ifdef ASSERT
944 if (depth_count >= K) {
945 orig_p->dump(4);
946 if (p != orig_p)
947 p->dump(1);
948 }
949 assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
950#endif
951 if (p == NULL || p->req() != 2) {
952 break;
953 } else if (p->is_ConstraintCast()) {
954 if (keep_deps && p->as_ConstraintCast()->carry_dependency()) {
955 break; // stop at casts with dependencies
956 }
957 p = p->in(1);
958 } else {
959 break;
960 }
961 }
962 return (Node*) p;
963}
964
965//------------------------------add_prec---------------------------------------
966// Add a new precedence input. Precedence inputs are unordered, with
967// duplicates removed and NULLs packed down at the end.
968void Node::add_prec( Node *n ) {
969 assert( is_not_dead(n), "can not use dead node");
970
971 // Check for NULL at end
972 if( _cnt >= _max || in(_max-1) )
973 grow( _max+1 );
974
975 // Find a precedence edge to move
976 uint i = _cnt;
977 while( in(i) != NULL ) {
978 if (in(i) == n) return; // Avoid spec violation: duplicated prec edge.
979 i++;
980 }
981 _in[i] = n; // Stuff prec edge over NULL
982 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge
983
984#ifdef ASSERT
985 while ((++i)<_max) { assert(_in[i] == NULL, "spec violation: Gap in prec edges (node %d)", _idx); }
986#endif
987}
988
989//------------------------------rm_prec----------------------------------------
990// Remove a precedence input. Precedence inputs are unordered, with
991// duplicates removed and NULLs packed down at the end.
992void Node::rm_prec( uint j ) {
993 assert(j < _max, "oob: i=%d, _max=%d", j, _max);
994 assert(j >= _cnt, "not a precedence edge");
995 if (_in[j] == NULL) return; // Avoid spec violation: Gap in prec edges.
996 _in[j]->del_out((Node *)this);
997 close_prec_gap_at(j);
998}
999
1000//------------------------------size_of----------------------------------------
1001uint Node::size_of() const { return sizeof(*this); }
1002
1003//------------------------------ideal_reg--------------------------------------
1004uint Node::ideal_reg() const { return 0; }
1005
1006//------------------------------jvms-------------------------------------------
1007JVMState* Node::jvms() const { return NULL; }
1008
1009#ifdef ASSERT
1010//------------------------------jvms-------------------------------------------
1011bool Node::verify_jvms(const JVMState* using_jvms) const {
1012 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
1013 if (jvms == using_jvms) return true;
1014 }
1015 return false;
1016}
1017
1018//------------------------------init_NodeProperty------------------------------
1019void Node::init_NodeProperty() {
1020 assert(_max_classes <= max_jushort, "too many NodeProperty classes");
1021 assert(_max_flags <= max_jushort, "too many NodeProperty flags");
1022}
1023#endif
1024
1025//------------------------------format-----------------------------------------
1026// Print as assembly
1027void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
1028//------------------------------emit-------------------------------------------
1029// Emit bytes starting at parameter 'ptr'.
1030void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
1031//------------------------------size-------------------------------------------
1032// Size of instruction in bytes
1033uint Node::size(PhaseRegAlloc *ra_) const { return 0; }
1034
1035//------------------------------CFG Construction-------------------------------
1036// Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root,
1037// Goto and Return.
1038const Node *Node::is_block_proj() const { return 0; }
1039
1040// Minimum guaranteed type
1041const Type *Node::bottom_type() const { return Type::BOTTOM; }
1042
1043
1044//------------------------------raise_bottom_type------------------------------
1045// Get the worst-case Type output for this Node.
1046void Node::raise_bottom_type(const Type* new_type) {
1047 if (is_Type()) {
1048 TypeNode *n = this->as_Type();
1049 if (VerifyAliases) {
1050 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
1051 }
1052 n->set_type(new_type);
1053 } else if (is_Load()) {
1054 LoadNode *n = this->as_Load();
1055 if (VerifyAliases) {
1056 assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
1057 }
1058 n->set_type(new_type);
1059 }
1060}
1061
1062//------------------------------Identity---------------------------------------
1063// Return a node that the given node is equivalent to.
1064Node* Node::Identity(PhaseGVN* phase) {
1065 return this; // Default to no identities
1066}
1067
1068//------------------------------Value------------------------------------------
1069// Compute a new Type for a node using the Type of the inputs.
1070const Type* Node::Value(PhaseGVN* phase) const {
1071 return bottom_type(); // Default to worst-case Type
1072}
1073
1074//------------------------------Ideal------------------------------------------
1075//
1076// 'Idealize' the graph rooted at this Node.
1077//
1078// In order to be efficient and flexible there are some subtle invariants
1079// these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks
1080// these invariants, although its too slow to have on by default. If you are
1081// hacking an Ideal call, be sure to test with +VerifyIterativeGVN!
1082//
1083// The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
1084// pointer. If ANY change is made, it must return the root of the reshaped
1085// graph - even if the root is the same Node. Example: swapping the inputs
1086// to an AddINode gives the same answer and same root, but you still have to
1087// return the 'this' pointer instead of NULL.
1088//
1089// You cannot return an OLD Node, except for the 'this' pointer. Use the
1090// Identity call to return an old Node; basically if Identity can find
1091// another Node have the Ideal call make no change and return NULL.
1092// Example: AddINode::Ideal must check for add of zero; in this case it
1093// returns NULL instead of doing any graph reshaping.
1094//
1095// You cannot modify any old Nodes except for the 'this' pointer. Due to
1096// sharing there may be other users of the old Nodes relying on their current
1097// semantics. Modifying them will break the other users.
1098// Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
1099// "X+3" unchanged in case it is shared.
1100//
1101// If you modify the 'this' pointer's inputs, you should use
1102// 'set_req'. If you are making a new Node (either as the new root or
1103// some new internal piece) you may use 'init_req' to set the initial
1104// value. You can make a new Node with either 'new' or 'clone'. In
1105// either case, def-use info is correctly maintained.
1106//
1107// Example: reshape "(X+3)+4" into "X+7":
1108// set_req(1, in(1)->in(1));
1109// set_req(2, phase->intcon(7));
1110// return this;
1111// Example: reshape "X*4" into "X<<2"
1112// return new LShiftINode(in(1), phase->intcon(2));
1113//
1114// You must call 'phase->transform(X)' on any new Nodes X you make, except
1115// for the returned root node. Example: reshape "X*31" with "(X<<5)-X".
1116// Node *shift=phase->transform(new LShiftINode(in(1),phase->intcon(5)));
1117// return new AddINode(shift, in(1));
1118//
1119// When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
1120// These forms are faster than 'phase->transform(new ConNode())' and Do
1121// The Right Thing with def-use info.
1122//
1123// You cannot bury the 'this' Node inside of a graph reshape. If the reshaped
1124// graph uses the 'this' Node it must be the root. If you want a Node with
1125// the same Opcode as the 'this' pointer use 'clone'.
1126//
1127Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
1128 return NULL; // Default to being Ideal already
1129}
1130
1131// Some nodes have specific Ideal subgraph transformations only if they are
1132// unique users of specific nodes. Such nodes should be put on IGVN worklist
1133// for the transformations to happen.
1134bool Node::has_special_unique_user() const {
1135 assert(outcnt() == 1, "match only for unique out");
1136 Node* n = unique_out();
1137 int op = Opcode();
1138 if (this->is_Store()) {
1139 // Condition for back-to-back stores folding.
1140 return n->Opcode() == op && n->in(MemNode::Memory) == this;
1141 } else if (this->is_Load() || this->is_DecodeN() || this->is_Phi()) {
1142 // Condition for removing an unused LoadNode or DecodeNNode from the MemBarAcquire precedence input
1143 return n->Opcode() == Op_MemBarAcquire;
1144 } else if (op == Op_AddL) {
1145 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
1146 return n->Opcode() == Op_ConvL2I && n->in(1) == this;
1147 } else if (op == Op_SubI || op == Op_SubL) {
1148 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y)
1149 return n->Opcode() == op && n->in(2) == this;
1150 } else if (is_If() && (n->is_IfFalse() || n->is_IfTrue())) {
1151 // See IfProjNode::Identity()
1152 return true;
1153 } else {
1154 return BarrierSet::barrier_set()->barrier_set_c2()->has_special_unique_user(this);
1155 }
1156};
1157
1158//--------------------------find_exact_control---------------------------------
1159// Skip Proj and CatchProj nodes chains. Check for Null and Top.
1160Node* Node::find_exact_control(Node* ctrl) {
1161 if (ctrl == NULL && this->is_Region())
1162 ctrl = this->as_Region()->is_copy();
1163
1164 if (ctrl != NULL && ctrl->is_CatchProj()) {
1165 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
1166 ctrl = ctrl->in(0);
1167 if (ctrl != NULL && !ctrl->is_top())
1168 ctrl = ctrl->in(0);
1169 }
1170
1171 if (ctrl != NULL && ctrl->is_Proj())
1172 ctrl = ctrl->in(0);
1173
1174 return ctrl;
1175}
1176
1177//--------------------------dominates------------------------------------------
1178// Helper function for MemNode::all_controls_dominate().
1179// Check if 'this' control node dominates or equal to 'sub' control node.
1180// We already know that if any path back to Root or Start reaches 'this',
1181// then all paths so, so this is a simple search for one example,
1182// not an exhaustive search for a counterexample.
1183bool Node::dominates(Node* sub, Node_List &nlist) {
1184 assert(this->is_CFG(), "expecting control");
1185 assert(sub != NULL && sub->is_CFG(), "expecting control");
1186
1187 // detect dead cycle without regions
1188 int iterations_without_region_limit = DominatorSearchLimit;
1189
1190 Node* orig_sub = sub;
1191 Node* dom = this;
1192 bool met_dom = false;
1193 nlist.clear();
1194
1195 // Walk 'sub' backward up the chain to 'dom', watching for regions.
1196 // After seeing 'dom', continue up to Root or Start.
1197 // If we hit a region (backward split point), it may be a loop head.
1198 // Keep going through one of the region's inputs. If we reach the
1199 // same region again, go through a different input. Eventually we
1200 // will either exit through the loop head, or give up.
1201 // (If we get confused, break out and return a conservative 'false'.)
1202 while (sub != NULL) {
1203 if (sub->is_top()) break; // Conservative answer for dead code.
1204 if (sub == dom) {
1205 if (nlist.size() == 0) {
1206 // No Region nodes except loops were visited before and the EntryControl
1207 // path was taken for loops: it did not walk in a cycle.
1208 return true;
1209 } else if (met_dom) {
1210 break; // already met before: walk in a cycle
1211 } else {
1212 // Region nodes were visited. Continue walk up to Start or Root
1213 // to make sure that it did not walk in a cycle.
1214 met_dom = true; // first time meet
1215 iterations_without_region_limit = DominatorSearchLimit; // Reset
1216 }
1217 }
1218 if (sub->is_Start() || sub->is_Root()) {
1219 // Success if we met 'dom' along a path to Start or Root.
1220 // We assume there are no alternative paths that avoid 'dom'.
1221 // (This assumption is up to the caller to ensure!)
1222 return met_dom;
1223 }
1224 Node* up = sub->in(0);
1225 // Normalize simple pass-through regions and projections:
1226 up = sub->find_exact_control(up);
1227 // If sub == up, we found a self-loop. Try to push past it.
1228 if (sub == up && sub->is_Loop()) {
1229 // Take loop entry path on the way up to 'dom'.
1230 up = sub->in(1); // in(LoopNode::EntryControl);
1231 } else if (sub == up && sub->is_Region() && sub->req() != 3) {
1232 // Always take in(1) path on the way up to 'dom' for clone regions
1233 // (with only one input) or regions which merge > 2 paths
1234 // (usually used to merge fast/slow paths).
1235 up = sub->in(1);
1236 } else if (sub == up && sub->is_Region()) {
1237 // Try both paths for Regions with 2 input paths (it may be a loop head).
1238 // It could give conservative 'false' answer without information
1239 // which region's input is the entry path.
1240 iterations_without_region_limit = DominatorSearchLimit; // Reset
1241
1242 bool region_was_visited_before = false;
1243 // Was this Region node visited before?
1244 // If so, we have reached it because we accidentally took a
1245 // loop-back edge from 'sub' back into the body of the loop,
1246 // and worked our way up again to the loop header 'sub'.
1247 // So, take the first unexplored path on the way up to 'dom'.
1248 for (int j = nlist.size() - 1; j >= 0; j--) {
1249 intptr_t ni = (intptr_t)nlist.at(j);
1250 Node* visited = (Node*)(ni & ~1);
1251 bool visited_twice_already = ((ni & 1) != 0);
1252 if (visited == sub) {
1253 if (visited_twice_already) {
1254 // Visited 2 paths, but still stuck in loop body. Give up.
1255 return false;
1256 }
1257 // The Region node was visited before only once.
1258 // (We will repush with the low bit set, below.)
1259 nlist.remove(j);
1260 // We will find a new edge and re-insert.
1261 region_was_visited_before = true;
1262 break;
1263 }
1264 }
1265
1266 // Find an incoming edge which has not been seen yet; walk through it.
1267 assert(up == sub, "");
1268 uint skip = region_was_visited_before ? 1 : 0;
1269 for (uint i = 1; i < sub->req(); i++) {
1270 Node* in = sub->in(i);
1271 if (in != NULL && !in->is_top() && in != sub) {
1272 if (skip == 0) {
1273 up = in;
1274 break;
1275 }
1276 --skip; // skip this nontrivial input
1277 }
1278 }
1279
1280 // Set 0 bit to indicate that both paths were taken.
1281 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
1282 }
1283
1284 if (up == sub) {
1285 break; // some kind of tight cycle
1286 }
1287 if (up == orig_sub && met_dom) {
1288 // returned back after visiting 'dom'
1289 break; // some kind of cycle
1290 }
1291 if (--iterations_without_region_limit < 0) {
1292 break; // dead cycle
1293 }
1294 sub = up;
1295 }
1296
1297 // Did not meet Root or Start node in pred. chain.
1298 // Conservative answer for dead code.
1299 return false;
1300}
1301
1302//------------------------------remove_dead_region-----------------------------
1303// This control node is dead. Follow the subgraph below it making everything
1304// using it dead as well. This will happen normally via the usual IterGVN
1305// worklist but this call is more efficient. Do not update use-def info
1306// inside the dead region, just at the borders.
1307static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
1308 // Con's are a popular node to re-hit in the hash table again.
1309 if( dead->is_Con() ) return;
1310
1311 // Can't put ResourceMark here since igvn->_worklist uses the same arena
1312 // for verify pass with +VerifyOpto and we add/remove elements in it here.
1313 Node_List nstack(Thread::current()->resource_area());
1314
1315 Node *top = igvn->C->top();
1316 nstack.push(dead);
1317 bool has_irreducible_loop = igvn->C->has_irreducible_loop();
1318
1319 while (nstack.size() > 0) {
1320 dead = nstack.pop();
1321 if (dead->Opcode() == Op_SafePoint) {
1322 dead->as_SafePoint()->disconnect_from_root(igvn);
1323 }
1324 if (dead->outcnt() > 0) {
1325 // Keep dead node on stack until all uses are processed.
1326 nstack.push(dead);
1327 // For all Users of the Dead... ;-)
1328 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) {
1329 Node* use = dead->last_out(k);
1330 igvn->hash_delete(use); // Yank from hash table prior to mod
1331 if (use->in(0) == dead) { // Found another dead node
1332 assert (!use->is_Con(), "Control for Con node should be Root node.");
1333 use->set_req(0, top); // Cut dead edge to prevent processing
1334 nstack.push(use); // the dead node again.
1335 } else if (!has_irreducible_loop && // Backedge could be alive in irreducible loop
1336 use->is_Loop() && !use->is_Root() && // Don't kill Root (RootNode extends LoopNode)
1337 use->in(LoopNode::EntryControl) == dead) { // Dead loop if its entry is dead
1338 use->set_req(LoopNode::EntryControl, top); // Cut dead edge to prevent processing
1339 use->set_req(0, top); // Cut self edge
1340 nstack.push(use);
1341 } else { // Else found a not-dead user
1342 // Dead if all inputs are top or null
1343 bool dead_use = !use->is_Root(); // Keep empty graph alive
1344 for (uint j = 1; j < use->req(); j++) {
1345 Node* in = use->in(j);
1346 if (in == dead) { // Turn all dead inputs into TOP
1347 use->set_req(j, top);
1348 } else if (in != NULL && !in->is_top()) {
1349 dead_use = false;
1350 }
1351 }
1352 if (dead_use) {
1353 if (use->is_Region()) {
1354 use->set_req(0, top); // Cut self edge
1355 }
1356 nstack.push(use);
1357 } else {
1358 igvn->_worklist.push(use);
1359 }
1360 }
1361 // Refresh the iterator, since any number of kills might have happened.
1362 k = dead->last_outs(kmin);
1363 }
1364 } else { // (dead->outcnt() == 0)
1365 // Done with outputs.
1366 igvn->hash_delete(dead);
1367 igvn->_worklist.remove(dead);
1368 igvn->C->remove_modified_node(dead);
1369 igvn->set_type(dead, Type::TOP);
1370 if (dead->is_macro()) {
1371 igvn->C->remove_macro_node(dead);
1372 }
1373 if (dead->is_expensive()) {
1374 igvn->C->remove_expensive_node(dead);
1375 }
1376 CastIINode* cast = dead->isa_CastII();
1377 if (cast != NULL && cast->has_range_check()) {
1378 igvn->C->remove_range_check_cast(cast);
1379 }
1380 if (dead->Opcode() == Op_Opaque4) {
1381 igvn->C->remove_opaque4_node(dead);
1382 }
1383 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1384 bs->unregister_potential_barrier_node(dead);
1385 igvn->C->record_dead_node(dead->_idx);
1386 // Kill all inputs to the dead guy
1387 for (uint i=0; i < dead->req(); i++) {
1388 Node *n = dead->in(i); // Get input to dead guy
1389 if (n != NULL && !n->is_top()) { // Input is valid?
1390 dead->set_req(i, top); // Smash input away
1391 if (n->outcnt() == 0) { // Input also goes dead?
1392 if (!n->is_Con())
1393 nstack.push(n); // Clear it out as well
1394 } else if (n->outcnt() == 1 &&
1395 n->has_special_unique_user()) {
1396 igvn->add_users_to_worklist( n );
1397 } else if (n->outcnt() <= 2 && n->is_Store()) {
1398 // Push store's uses on worklist to enable folding optimization for
1399 // store/store and store/load to the same address.
1400 // The restriction (outcnt() <= 2) is the same as in set_req_X()
1401 // and remove_globally_dead_node().
1402 igvn->add_users_to_worklist( n );
1403 } else {
1404 BarrierSet::barrier_set()->barrier_set_c2()->enqueue_useful_gc_barrier(igvn, n);
1405 }
1406 }
1407 }
1408 } // (dead->outcnt() == 0)
1409 } // while (nstack.size() > 0) for outputs
1410 return;
1411}
1412
1413//------------------------------remove_dead_region-----------------------------
1414bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
1415 Node *n = in(0);
1416 if( !n ) return false;
1417 // Lost control into this guy? I.e., it became unreachable?
1418 // Aggressively kill all unreachable code.
1419 if (can_reshape && n->is_top()) {
1420 kill_dead_code(this, phase->is_IterGVN());
1421 return false; // Node is dead.
1422 }
1423
1424 if( n->is_Region() && n->as_Region()->is_copy() ) {
1425 Node *m = n->nonnull_req();
1426 set_req(0, m);
1427 return true;
1428 }
1429 return false;
1430}
1431
1432//------------------------------hash-------------------------------------------
1433// Hash function over Nodes.
1434uint Node::hash() const {
1435 uint sum = 0;
1436 for( uint i=0; i<_cnt; i++ ) // Add in all inputs
1437 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs
1438 return (sum>>2) + _cnt + Opcode();
1439}
1440
1441//------------------------------cmp--------------------------------------------
1442// Compare special parts of simple Nodes
1443bool Node::cmp( const Node &n ) const {
1444 return true; // Must be same
1445}
1446
1447//------------------------------rematerialize-----------------------------------
1448// Should we clone rather than spill this instruction?
1449bool Node::rematerialize() const {
1450 if ( is_Mach() )
1451 return this->as_Mach()->rematerialize();
1452 else
1453 return (_flags & Flag_rematerialize) != 0;
1454}
1455
1456//------------------------------needs_anti_dependence_check---------------------
1457// Nodes which use memory without consuming it, hence need antidependences.
1458bool Node::needs_anti_dependence_check() const {
1459 if (req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0) {
1460 return false;
1461 }
1462 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
1463 if (!bs->needs_anti_dependence_check(this)) {
1464 return false;
1465 }
1466 return in(1)->bottom_type()->has_memory();
1467}
1468
1469// Get an integer constant from a ConNode (or CastIINode).
1470// Return a default value if there is no apparent constant here.
1471const TypeInt* Node::find_int_type() const {
1472 if (this->is_Type()) {
1473 return this->as_Type()->type()->isa_int();
1474 } else if (this->is_Con()) {
1475 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1476 return this->bottom_type()->isa_int();
1477 }
1478 return NULL;
1479}
1480
1481// Get a pointer constant from a ConstNode.
1482// Returns the constant if it is a pointer ConstNode
1483intptr_t Node::get_ptr() const {
1484 assert( Opcode() == Op_ConP, "" );
1485 return ((ConPNode*)this)->type()->is_ptr()->get_con();
1486}
1487
1488// Get a narrow oop constant from a ConNNode.
1489intptr_t Node::get_narrowcon() const {
1490 assert( Opcode() == Op_ConN, "" );
1491 return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
1492}
1493
1494// Get a long constant from a ConNode.
1495// Return a default value if there is no apparent constant here.
1496const TypeLong* Node::find_long_type() const {
1497 if (this->is_Type()) {
1498 return this->as_Type()->type()->isa_long();
1499 } else if (this->is_Con()) {
1500 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1501 return this->bottom_type()->isa_long();
1502 }
1503 return NULL;
1504}
1505
1506
1507/**
1508 * Return a ptr type for nodes which should have it.
1509 */
1510const TypePtr* Node::get_ptr_type() const {
1511 const TypePtr* tp = this->bottom_type()->make_ptr();
1512#ifdef ASSERT
1513 if (tp == NULL) {
1514 this->dump(1);
1515 assert((tp != NULL), "unexpected node type");
1516 }
1517#endif
1518 return tp;
1519}
1520
1521// Get a double constant from a ConstNode.
1522// Returns the constant if it is a double ConstNode
1523jdouble Node::getd() const {
1524 assert( Opcode() == Op_ConD, "" );
1525 return ((ConDNode*)this)->type()->is_double_constant()->getd();
1526}
1527
1528// Get a float constant from a ConstNode.
1529// Returns the constant if it is a float ConstNode
1530jfloat Node::getf() const {
1531 assert( Opcode() == Op_ConF, "" );
1532 return ((ConFNode*)this)->type()->is_float_constant()->getf();
1533}
1534
1535#ifndef PRODUCT
1536
1537//------------------------------find------------------------------------------
1538// Find a neighbor of this Node with the given _idx
1539// If idx is negative, find its absolute value, following both _in and _out.
1540static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl,
1541 VectorSet* old_space, VectorSet* new_space ) {
1542 int node_idx = (idx >= 0) ? idx : -idx;
1543 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc.
1544 // Contained in new_space or old_space? Check old_arena first since it's mostly empty.
1545 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space;
1546 if( v->test(n->_idx) ) return;
1547 if( (int)n->_idx == node_idx
1548 debug_only(|| n->debug_idx() == node_idx) ) {
1549 if (result != NULL)
1550 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n",
1551 (uintptr_t)result, (uintptr_t)n, node_idx);
1552 result = n;
1553 }
1554 v->set(n->_idx);
1555 for( uint i=0; i<n->len(); i++ ) {
1556 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue;
1557 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space );
1558 }
1559 // Search along forward edges also:
1560 if (idx < 0 && !only_ctrl) {
1561 for( uint j=0; j<n->outcnt(); j++ ) {
1562 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space );
1563 }
1564 }
1565#ifdef ASSERT
1566 // Search along debug_orig edges last, checking for cycles
1567 Node* orig = n->debug_orig();
1568 if (orig != NULL) {
1569 do {
1570 if (NotANode(orig)) break;
1571 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space );
1572 orig = orig->debug_orig();
1573 } while (orig != NULL && orig != n->debug_orig());
1574 }
1575#endif //ASSERT
1576}
1577
1578// call this from debugger:
1579Node* find_node(Node* n, int idx) {
1580 return n->find(idx);
1581}
1582
1583//------------------------------find-------------------------------------------
1584Node* Node::find(int idx) const {
1585 ResourceArea *area = Thread::current()->resource_area();
1586 VectorSet old_space(area), new_space(area);
1587 Node* result = NULL;
1588 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space );
1589 return result;
1590}
1591
1592//------------------------------find_ctrl--------------------------------------
1593// Find an ancestor to this node in the control history with given _idx
1594Node* Node::find_ctrl(int idx) const {
1595 ResourceArea *area = Thread::current()->resource_area();
1596 VectorSet old_space(area), new_space(area);
1597 Node* result = NULL;
1598 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space );
1599 return result;
1600}
1601#endif
1602
1603
1604
1605#ifndef PRODUCT
1606
1607// -----------------------------Name-------------------------------------------
1608extern const char *NodeClassNames[];
1609const char *Node::Name() const { return NodeClassNames[Opcode()]; }
1610
1611static bool is_disconnected(const Node* n) {
1612 for (uint i = 0; i < n->req(); i++) {
1613 if (n->in(i) != NULL) return false;
1614 }
1615 return true;
1616}
1617
1618#ifdef ASSERT
1619static void dump_orig(Node* orig, outputStream *st) {
1620 Compile* C = Compile::current();
1621 if (NotANode(orig)) orig = NULL;
1622 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1623 if (orig == NULL) return;
1624 st->print(" !orig=");
1625 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
1626 if (NotANode(fast)) fast = NULL;
1627 while (orig != NULL) {
1628 bool discon = is_disconnected(orig); // if discon, print [123] else 123
1629 if (discon) st->print("[");
1630 if (!Compile::current()->node_arena()->contains(orig))
1631 st->print("o");
1632 st->print("%d", orig->_idx);
1633 if (discon) st->print("]");
1634 orig = orig->debug_orig();
1635 if (NotANode(orig)) orig = NULL;
1636 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1637 if (orig != NULL) st->print(",");
1638 if (fast != NULL) {
1639 // Step fast twice for each single step of orig:
1640 fast = fast->debug_orig();
1641 if (NotANode(fast)) fast = NULL;
1642 if (fast != NULL && fast != orig) {
1643 fast = fast->debug_orig();
1644 if (NotANode(fast)) fast = NULL;
1645 }
1646 if (fast == orig) {
1647 st->print("...");
1648 break;
1649 }
1650 }
1651 }
1652}
1653
1654void Node::set_debug_orig(Node* orig) {
1655 _debug_orig = orig;
1656 if (BreakAtNode == 0) return;
1657 if (NotANode(orig)) orig = NULL;
1658 int trip = 10;
1659 while (orig != NULL) {
1660 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
1661 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
1662 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
1663 BREAKPOINT;
1664 }
1665 orig = orig->debug_orig();
1666 if (NotANode(orig)) orig = NULL;
1667 if (trip-- <= 0) break;
1668 }
1669}
1670#endif //ASSERT
1671
1672//------------------------------dump------------------------------------------
1673// Dump a Node
1674void Node::dump(const char* suffix, bool mark, outputStream *st) const {
1675 Compile* C = Compile::current();
1676 bool is_new = C->node_arena()->contains(this);
1677 C->_in_dump_cnt++;
1678 st->print("%c%d%s\t%s\t=== ", is_new ? ' ' : 'o', _idx, mark ? " >" : "", Name());
1679
1680 // Dump the required and precedence inputs
1681 dump_req(st);
1682 dump_prec(st);
1683 // Dump the outputs
1684 dump_out(st);
1685
1686 if (is_disconnected(this)) {
1687#ifdef ASSERT
1688 st->print(" [%d]",debug_idx());
1689 dump_orig(debug_orig(), st);
1690#endif
1691 st->cr();
1692 C->_in_dump_cnt--;
1693 return; // don't process dead nodes
1694 }
1695
1696 if (C->clone_map().value(_idx) != 0) {
1697 C->clone_map().dump(_idx);
1698 }
1699 // Dump node-specific info
1700 dump_spec(st);
1701#ifdef ASSERT
1702 // Dump the non-reset _debug_idx
1703 if (Verbose && WizardMode) {
1704 st->print(" [%d]",debug_idx());
1705 }
1706#endif
1707
1708 const Type *t = bottom_type();
1709
1710 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
1711 const TypeInstPtr *toop = t->isa_instptr();
1712 const TypeKlassPtr *tkls = t->isa_klassptr();
1713 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
1714 if (klass && klass->is_loaded() && klass->is_interface()) {
1715 st->print(" Interface:");
1716 } else if (toop) {
1717 st->print(" Oop:");
1718 } else if (tkls) {
1719 st->print(" Klass:");
1720 }
1721 t->dump_on(st);
1722 } else if (t == Type::MEMORY) {
1723 st->print(" Memory:");
1724 MemNode::dump_adr_type(this, adr_type(), st);
1725 } else if (Verbose || WizardMode) {
1726 st->print(" Type:");
1727 if (t) {
1728 t->dump_on(st);
1729 } else {
1730 st->print("no type");
1731 }
1732 } else if (t->isa_vect() && this->is_MachSpillCopy()) {
1733 // Dump MachSpillcopy vector type.
1734 t->dump_on(st);
1735 }
1736 if (is_new) {
1737 debug_only(dump_orig(debug_orig(), st));
1738 Node_Notes* nn = C->node_notes_at(_idx);
1739 if (nn != NULL && !nn->is_clear()) {
1740 if (nn->jvms() != NULL) {
1741 st->print(" !jvms:");
1742 nn->jvms()->dump_spec(st);
1743 }
1744 }
1745 }
1746 if (suffix) st->print("%s", suffix);
1747 C->_in_dump_cnt--;
1748}
1749
1750//------------------------------dump_req--------------------------------------
1751void Node::dump_req(outputStream *st) const {
1752 // Dump the required input edges
1753 for (uint i = 0; i < req(); i++) { // For all required inputs
1754 Node* d = in(i);
1755 if (d == NULL) {
1756 st->print("_ ");
1757 } else if (NotANode(d)) {
1758 st->print("NotANode "); // uninitialized, sentinel, garbage, etc.
1759 } else {
1760 st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
1761 }
1762 }
1763}
1764
1765
1766//------------------------------dump_prec-------------------------------------
1767void Node::dump_prec(outputStream *st) const {
1768 // Dump the precedence edges
1769 int any_prec = 0;
1770 for (uint i = req(); i < len(); i++) { // For all precedence inputs
1771 Node* p = in(i);
1772 if (p != NULL) {
1773 if (!any_prec++) st->print(" |");
1774 if (NotANode(p)) { st->print("NotANode "); continue; }
1775 st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
1776 }
1777 }
1778}
1779
1780//------------------------------dump_out--------------------------------------
1781void Node::dump_out(outputStream *st) const {
1782 // Delimit the output edges
1783 st->print(" [[");
1784 // Dump the output edges
1785 for (uint i = 0; i < _outcnt; i++) { // For all outputs
1786 Node* u = _out[i];
1787 if (u == NULL) {
1788 st->print("_ ");
1789 } else if (NotANode(u)) {
1790 st->print("NotANode ");
1791 } else {
1792 st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
1793 }
1794 }
1795 st->print("]] ");
1796}
1797
1798//----------------------------collect_nodes_i----------------------------------
1799// Collects nodes from an Ideal graph, starting from a given start node and
1800// moving in a given direction until a certain depth (distance from the start
1801// node) is reached. Duplicates are ignored.
1802// Arguments:
1803// nstack: the nodes are collected into this array.
1804// start: the node at which to start collecting.
1805// direction: if this is a positive number, collect input nodes; if it is
1806// a negative number, collect output nodes.
1807// depth: collect nodes up to this distance from the start node.
1808// include_start: whether to include the start node in the result collection.
1809// only_ctrl: whether to regard control edges only during traversal.
1810// only_data: whether to regard data edges only during traversal.
1811static void collect_nodes_i(GrowableArray<Node*> *nstack, const Node* start, int direction, uint depth, bool include_start, bool only_ctrl, bool only_data) {
1812 Node* s = (Node*) start; // remove const
1813 nstack->append(s);
1814 int begin = 0;
1815 int end = 0;
1816 for(uint i = 0; i < depth; i++) {
1817 end = nstack->length();
1818 for(int j = begin; j < end; j++) {
1819 Node* tp = nstack->at(j);
1820 uint limit = direction > 0 ? tp->len() : tp->outcnt();
1821 for(uint k = 0; k < limit; k++) {
1822 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k);
1823
1824 if (NotANode(n)) continue;
1825 // do not recurse through top or the root (would reach unrelated stuff)
1826 if (n->is_Root() || n->is_top()) continue;
1827 if (only_ctrl && !n->is_CFG()) continue;
1828 if (only_data && n->is_CFG()) continue;
1829
1830 bool on_stack = nstack->contains(n);
1831 if (!on_stack) {
1832 nstack->append(n);
1833 }
1834 }
1835 }
1836 begin = end;
1837 }
1838 if (!include_start) {
1839 nstack->remove(s);
1840 }
1841}
1842
1843//------------------------------dump_nodes-------------------------------------
1844static void dump_nodes(const Node* start, int d, bool only_ctrl) {
1845 if (NotANode(start)) return;
1846
1847 GrowableArray <Node *> nstack(Compile::current()->live_nodes());
1848 collect_nodes_i(&nstack, start, d, (uint) ABS(d), true, only_ctrl, false);
1849
1850 int end = nstack.length();
1851 if (d > 0) {
1852 for(int j = end-1; j >= 0; j--) {
1853 nstack.at(j)->dump();
1854 }
1855 } else {
1856 for(int j = 0; j < end; j++) {
1857 nstack.at(j)->dump();
1858 }
1859 }
1860}
1861
1862//------------------------------dump-------------------------------------------
1863void Node::dump(int d) const {
1864 dump_nodes(this, d, false);
1865}
1866
1867//------------------------------dump_ctrl--------------------------------------
1868// Dump a Node's control history to depth
1869void Node::dump_ctrl(int d) const {
1870 dump_nodes(this, d, true);
1871}
1872
1873//-----------------------------dump_compact------------------------------------
1874void Node::dump_comp() const {
1875 this->dump_comp("\n");
1876}
1877
1878//-----------------------------dump_compact------------------------------------
1879// Dump a Node in compact representation, i.e., just print its name and index.
1880// Nodes can specify additional specifics to print in compact representation by
1881// implementing dump_compact_spec.
1882void Node::dump_comp(const char* suffix, outputStream *st) const {
1883 Compile* C = Compile::current();
1884 C->_in_dump_cnt++;
1885 st->print("%s(%d)", Name(), _idx);
1886 this->dump_compact_spec(st);
1887 if (suffix) {
1888 st->print("%s", suffix);
1889 }
1890 C->_in_dump_cnt--;
1891}
1892
1893//----------------------------dump_related-------------------------------------
1894// Dump a Node's related nodes - the notion of "related" depends on the Node at
1895// hand and is determined by the implementation of the virtual method rel.
1896void Node::dump_related() const {
1897 Compile* C = Compile::current();
1898 GrowableArray <Node *> in_rel(C->unique());
1899 GrowableArray <Node *> out_rel(C->unique());
1900 this->related(&in_rel, &out_rel, false);
1901 for (int i = in_rel.length() - 1; i >= 0; i--) {
1902 in_rel.at(i)->dump();
1903 }
1904 this->dump("\n", true);
1905 for (int i = 0; i < out_rel.length(); i++) {
1906 out_rel.at(i)->dump();
1907 }
1908}
1909
1910//----------------------------dump_related-------------------------------------
1911// Dump a Node's related nodes up to a given depth (distance from the start
1912// node).
1913// Arguments:
1914// d_in: depth for input nodes.
1915// d_out: depth for output nodes (note: this also is a positive number).
1916void Node::dump_related(uint d_in, uint d_out) const {
1917 Compile* C = Compile::current();
1918 GrowableArray <Node *> in_rel(C->unique());
1919 GrowableArray <Node *> out_rel(C->unique());
1920
1921 // call collect_nodes_i directly
1922 collect_nodes_i(&in_rel, this, 1, d_in, false, false, false);
1923 collect_nodes_i(&out_rel, this, -1, d_out, false, false, false);
1924
1925 for (int i = in_rel.length() - 1; i >= 0; i--) {
1926 in_rel.at(i)->dump();
1927 }
1928 this->dump("\n", true);
1929 for (int i = 0; i < out_rel.length(); i++) {
1930 out_rel.at(i)->dump();
1931 }
1932}
1933
1934//------------------------dump_related_compact---------------------------------
1935// Dump a Node's related nodes in compact representation. The notion of
1936// "related" depends on the Node at hand and is determined by the implementation
1937// of the virtual method rel.
1938void Node::dump_related_compact() const {
1939 Compile* C = Compile::current();
1940 GrowableArray <Node *> in_rel(C->unique());
1941 GrowableArray <Node *> out_rel(C->unique());
1942 this->related(&in_rel, &out_rel, true);
1943 int n_in = in_rel.length();
1944 int n_out = out_rel.length();
1945
1946 this->dump_comp(n_in == 0 ? "\n" : " ");
1947 for (int i = 0; i < n_in; i++) {
1948 in_rel.at(i)->dump_comp(i == n_in - 1 ? "\n" : " ");
1949 }
1950 for (int i = 0; i < n_out; i++) {
1951 out_rel.at(i)->dump_comp(i == n_out - 1 ? "\n" : " ");
1952 }
1953}
1954
1955//------------------------------related----------------------------------------
1956// Collect a Node's related nodes. The default behaviour just collects the
1957// inputs and outputs at depth 1, including both control and data flow edges,
1958// regardless of whether the presentation is compact or not. For data nodes,
1959// the default is to collect all data inputs (till level 1 if compact), and
1960// outputs till level 1.
1961void Node::related(GrowableArray<Node*> *in_rel, GrowableArray<Node*> *out_rel, bool compact) const {
1962 if (this->is_CFG()) {
1963 collect_nodes_i(in_rel, this, 1, 1, false, false, false);
1964 collect_nodes_i(out_rel, this, -1, 1, false, false, false);
1965 } else {
1966 if (compact) {
1967 this->collect_nodes(in_rel, 1, false, true);
1968 } else {
1969 this->collect_nodes_in_all_data(in_rel, false);
1970 }
1971 this->collect_nodes(out_rel, -1, false, false);
1972 }
1973}
1974
1975//---------------------------collect_nodes-------------------------------------
1976// An entry point to the low-level node collection facility, to start from a
1977// given node in the graph. The start node is by default not included in the
1978// result.
1979// Arguments:
1980// ns: collect the nodes into this data structure.
1981// d: the depth (distance from start node) to which nodes should be
1982// collected. A value >0 indicates input nodes, a value <0, output
1983// nodes.
1984// ctrl: include only control nodes.
1985// data: include only data nodes.
1986void Node::collect_nodes(GrowableArray<Node*> *ns, int d, bool ctrl, bool data) const {
1987 if (ctrl && data) {
1988 // ignore nonsensical combination
1989 return;
1990 }
1991 collect_nodes_i(ns, this, d, (uint) ABS(d), false, ctrl, data);
1992}
1993
1994//--------------------------collect_nodes_in-----------------------------------
1995static void collect_nodes_in(Node* start, GrowableArray<Node*> *ns, bool primary_is_data, bool collect_secondary) {
1996 // The maximum depth is determined using a BFS that visits all primary (data
1997 // or control) inputs and increments the depth at each level.
1998 uint d_in = 0;
1999 GrowableArray<Node*> nodes(Compile::current()->unique());
2000 nodes.push(start);
2001 int nodes_at_current_level = 1;
2002 int n_idx = 0;
2003 while (nodes_at_current_level > 0) {
2004 // Add all primary inputs reachable from the current level to the list, and
2005 // increase the depth if there were any.
2006 int nodes_at_next_level = 0;
2007 bool nodes_added = false;
2008 while (nodes_at_current_level > 0) {
2009 nodes_at_current_level--;
2010 Node* current = nodes.at(n_idx++);
2011 for (uint i = 0; i < current->len(); i++) {
2012 Node* n = current->in(i);
2013 if (NotANode(n)) {
2014 continue;
2015 }
2016 if ((primary_is_data && n->is_CFG()) || (!primary_is_data && !n->is_CFG())) {
2017 continue;
2018 }
2019 if (!nodes.contains(n)) {
2020 nodes.push(n);
2021 nodes_added = true;
2022 nodes_at_next_level++;
2023 }
2024 }
2025 }
2026 if (nodes_added) {
2027 d_in++;
2028 }
2029 nodes_at_current_level = nodes_at_next_level;
2030 }
2031 start->collect_nodes(ns, d_in, !primary_is_data, primary_is_data);
2032 if (collect_secondary) {
2033 // Now, iterate over the secondary nodes in ns and add the respective
2034 // boundary reachable from them.
2035 GrowableArray<Node*> sns(Compile::current()->unique());
2036 for (GrowableArrayIterator<Node*> it = ns->begin(); it != ns->end(); ++it) {
2037 Node* n = *it;
2038 n->collect_nodes(&sns, 1, primary_is_data, !primary_is_data);
2039 for (GrowableArrayIterator<Node*> d = sns.begin(); d != sns.end(); ++d) {
2040 ns->append_if_missing(*d);
2041 }
2042 sns.clear();
2043 }
2044 }
2045}
2046
2047//---------------------collect_nodes_in_all_data-------------------------------
2048// Collect the entire data input graph. Include the control boundary if
2049// requested.
2050// Arguments:
2051// ns: collect the nodes into this data structure.
2052// ctrl: if true, include the control boundary.
2053void Node::collect_nodes_in_all_data(GrowableArray<Node*> *ns, bool ctrl) const {
2054 collect_nodes_in((Node*) this, ns, true, ctrl);
2055}
2056
2057//--------------------------collect_nodes_in_all_ctrl--------------------------
2058// Collect the entire control input graph. Include the data boundary if
2059// requested.
2060// ns: collect the nodes into this data structure.
2061// data: if true, include the control boundary.
2062void Node::collect_nodes_in_all_ctrl(GrowableArray<Node*> *ns, bool data) const {
2063 collect_nodes_in((Node*) this, ns, false, data);
2064}
2065
2066//------------------collect_nodes_out_all_ctrl_boundary------------------------
2067// Collect the entire output graph until hitting control node boundaries, and
2068// include those.
2069void Node::collect_nodes_out_all_ctrl_boundary(GrowableArray<Node*> *ns) const {
2070 // Perform a BFS and stop at control nodes.
2071 GrowableArray<Node*> nodes(Compile::current()->unique());
2072 nodes.push((Node*) this);
2073 while (nodes.length() > 0) {
2074 Node* current = nodes.pop();
2075 if (NotANode(current)) {
2076 continue;
2077 }
2078 ns->append_if_missing(current);
2079 if (!current->is_CFG()) {
2080 for (DUIterator i = current->outs(); current->has_out(i); i++) {
2081 nodes.push(current->out(i));
2082 }
2083 }
2084 }
2085 ns->remove((Node*) this);
2086}
2087
2088// VERIFICATION CODE
2089// For each input edge to a node (ie - for each Use-Def edge), verify that
2090// there is a corresponding Def-Use edge.
2091//------------------------------verify_edges-----------------------------------
2092void Node::verify_edges(Unique_Node_List &visited) {
2093 uint i, j, idx;
2094 int cnt;
2095 Node *n;
2096
2097 // Recursive termination test
2098 if (visited.member(this)) return;
2099 visited.push(this);
2100
2101 // Walk over all input edges, checking for correspondence
2102 for( i = 0; i < len(); i++ ) {
2103 n = in(i);
2104 if (n != NULL && !n->is_top()) {
2105 // Count instances of (Node *)this
2106 cnt = 0;
2107 for (idx = 0; idx < n->_outcnt; idx++ ) {
2108 if (n->_out[idx] == (Node *)this) cnt++;
2109 }
2110 assert( cnt > 0,"Failed to find Def-Use edge." );
2111 // Check for duplicate edges
2112 // walk the input array downcounting the input edges to n
2113 for( j = 0; j < len(); j++ ) {
2114 if( in(j) == n ) cnt--;
2115 }
2116 assert( cnt == 0,"Mismatched edge count.");
2117 } else if (n == NULL) {
2118 assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges");
2119 } else {
2120 assert(n->is_top(), "sanity");
2121 // Nothing to check.
2122 }
2123 }
2124 // Recursive walk over all input edges
2125 for( i = 0; i < len(); i++ ) {
2126 n = in(i);
2127 if( n != NULL )
2128 in(i)->verify_edges(visited);
2129 }
2130}
2131
2132//------------------------------verify_recur-----------------------------------
2133static const Node *unique_top = NULL;
2134
2135void Node::verify_recur(const Node *n, int verify_depth,
2136 VectorSet &old_space, VectorSet &new_space) {
2137 if ( verify_depth == 0 ) return;
2138 if (verify_depth > 0) --verify_depth;
2139
2140 Compile* C = Compile::current();
2141
2142 // Contained in new_space or old_space?
2143 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space;
2144 // Check for visited in the proper space. Numberings are not unique
2145 // across spaces so we need a separate VectorSet for each space.
2146 if( v->test_set(n->_idx) ) return;
2147
2148 if (n->is_Con() && n->bottom_type() == Type::TOP) {
2149 if (C->cached_top_node() == NULL)
2150 C->set_cached_top_node((Node*)n);
2151 assert(C->cached_top_node() == n, "TOP node must be unique");
2152 }
2153
2154 for( uint i = 0; i < n->len(); i++ ) {
2155 Node *x = n->in(i);
2156 if (!x || x->is_top()) continue;
2157
2158 // Verify my input has a def-use edge to me
2159 if (true /*VerifyDefUse*/) {
2160 // Count use-def edges from n to x
2161 int cnt = 0;
2162 for( uint j = 0; j < n->len(); j++ )
2163 if( n->in(j) == x )
2164 cnt++;
2165 // Count def-use edges from x to n
2166 uint max = x->_outcnt;
2167 for( uint k = 0; k < max; k++ )
2168 if (x->_out[k] == n)
2169 cnt--;
2170 assert( cnt == 0, "mismatched def-use edge counts" );
2171 }
2172
2173 verify_recur(x, verify_depth, old_space, new_space);
2174 }
2175
2176}
2177
2178//------------------------------verify-----------------------------------------
2179// Check Def-Use info for my subgraph
2180void Node::verify() const {
2181 Compile* C = Compile::current();
2182 Node* old_top = C->cached_top_node();
2183 ResourceMark rm;
2184 ResourceArea *area = Thread::current()->resource_area();
2185 VectorSet old_space(area), new_space(area);
2186 verify_recur(this, -1, old_space, new_space);
2187 C->set_cached_top_node(old_top);
2188}
2189#endif
2190
2191
2192//------------------------------walk-------------------------------------------
2193// Graph walk, with both pre-order and post-order functions
2194void Node::walk(NFunc pre, NFunc post, void *env) {
2195 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk
2196 walk_(pre, post, env, visited);
2197}
2198
2199void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) {
2200 if( visited.test_set(_idx) ) return;
2201 pre(*this,env); // Call the pre-order walk function
2202 for( uint i=0; i<_max; i++ )
2203 if( in(i) ) // Input exists and is not walked?
2204 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions
2205 post(*this,env); // Call the post-order walk function
2206}
2207
2208void Node::nop(Node &, void*) {}
2209
2210//------------------------------Registers--------------------------------------
2211// Do we Match on this edge index or not? Generally false for Control
2212// and true for everything else. Weird for calls & returns.
2213uint Node::match_edge(uint idx) const {
2214 return idx; // True for other than index 0 (control)
2215}
2216
2217static RegMask _not_used_at_all;
2218// Register classes are defined for specific machines
2219const RegMask &Node::out_RegMask() const {
2220 ShouldNotCallThis();
2221 return _not_used_at_all;
2222}
2223
2224const RegMask &Node::in_RegMask(uint) const {
2225 ShouldNotCallThis();
2226 return _not_used_at_all;
2227}
2228
2229//=============================================================================
2230//-----------------------------------------------------------------------------
2231void Node_Array::reset( Arena *new_arena ) {
2232 _a->Afree(_nodes,_max*sizeof(Node*));
2233 _max = 0;
2234 _nodes = NULL;
2235 _a = new_arena;
2236}
2237
2238//------------------------------clear------------------------------------------
2239// Clear all entries in _nodes to NULL but keep storage
2240void Node_Array::clear() {
2241 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) );
2242}
2243
2244//-----------------------------------------------------------------------------
2245void Node_Array::grow( uint i ) {
2246 if( !_max ) {
2247 _max = 1;
2248 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) );
2249 _nodes[0] = NULL;
2250 }
2251 uint old = _max;
2252 while( i >= _max ) _max <<= 1; // Double to fit
2253 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*));
2254 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) );
2255}
2256
2257//-----------------------------------------------------------------------------
2258void Node_Array::insert( uint i, Node *n ) {
2259 if( _nodes[_max-1] ) grow(_max); // Get more space if full
2260 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*)));
2261 _nodes[i] = n;
2262}
2263
2264//-----------------------------------------------------------------------------
2265void Node_Array::remove( uint i ) {
2266 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*)));
2267 _nodes[_max-1] = NULL;
2268}
2269
2270//-----------------------------------------------------------------------------
2271void Node_Array::sort( C_sort_func_t func) {
2272 qsort( _nodes, _max, sizeof( Node* ), func );
2273}
2274
2275//-----------------------------------------------------------------------------
2276void Node_Array::dump() const {
2277#ifndef PRODUCT
2278 for( uint i = 0; i < _max; i++ ) {
2279 Node *nn = _nodes[i];
2280 if( nn != NULL ) {
2281 tty->print("%5d--> ",i); nn->dump();
2282 }
2283 }
2284#endif
2285}
2286
2287//--------------------------is_iteratively_computed------------------------------
2288// Operation appears to be iteratively computed (such as an induction variable)
2289// It is possible for this operation to return false for a loop-varying
2290// value, if it appears (by local graph inspection) to be computed by a simple conditional.
2291bool Node::is_iteratively_computed() {
2292 if (ideal_reg()) { // does operation have a result register?
2293 for (uint i = 1; i < req(); i++) {
2294 Node* n = in(i);
2295 if (n != NULL && n->is_Phi()) {
2296 for (uint j = 1; j < n->req(); j++) {
2297 if (n->in(j) == this) {
2298 return true;
2299 }
2300 }
2301 }
2302 }
2303 }
2304 return false;
2305}
2306
2307//--------------------------find_similar------------------------------
2308// Return a node with opcode "opc" and same inputs as "this" if one can
2309// be found; Otherwise return NULL;
2310Node* Node::find_similar(int opc) {
2311 if (req() >= 2) {
2312 Node* def = in(1);
2313 if (def && def->outcnt() >= 2) {
2314 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) {
2315 Node* use = def->fast_out(i);
2316 if (use != this &&
2317 use->Opcode() == opc &&
2318 use->req() == req()) {
2319 uint j;
2320 for (j = 0; j < use->req(); j++) {
2321 if (use->in(j) != in(j)) {
2322 break;
2323 }
2324 }
2325 if (j == use->req()) {
2326 return use;
2327 }
2328 }
2329 }
2330 }
2331 }
2332 return NULL;
2333}
2334
2335
2336//--------------------------unique_ctrl_out------------------------------
2337// Return the unique control out if only one. Null if none or more than one.
2338Node* Node::unique_ctrl_out() const {
2339 Node* found = NULL;
2340 for (uint i = 0; i < outcnt(); i++) {
2341 Node* use = raw_out(i);
2342 if (use->is_CFG() && use != this) {
2343 if (found != NULL) return NULL;
2344 found = use;
2345 }
2346 }
2347 return found;
2348}
2349
2350void Node::ensure_control_or_add_prec(Node* c) {
2351 if (in(0) == NULL) {
2352 set_req(0, c);
2353 } else if (in(0) != c) {
2354 add_prec(c);
2355 }
2356}
2357
2358//=============================================================================
2359//------------------------------yank-------------------------------------------
2360// Find and remove
2361void Node_List::yank( Node *n ) {
2362 uint i;
2363 for( i = 0; i < _cnt; i++ )
2364 if( _nodes[i] == n )
2365 break;
2366
2367 if( i < _cnt )
2368 _nodes[i] = _nodes[--_cnt];
2369}
2370
2371//------------------------------dump-------------------------------------------
2372void Node_List::dump() const {
2373#ifndef PRODUCT
2374 for( uint i = 0; i < _cnt; i++ )
2375 if( _nodes[i] ) {
2376 tty->print("%5d--> ",i);
2377 _nodes[i]->dump();
2378 }
2379#endif
2380}
2381
2382void Node_List::dump_simple() const {
2383#ifndef PRODUCT
2384 for( uint i = 0; i < _cnt; i++ )
2385 if( _nodes[i] ) {
2386 tty->print(" %d", _nodes[i]->_idx);
2387 } else {
2388 tty->print(" NULL");
2389 }
2390#endif
2391}
2392
2393//=============================================================================
2394//------------------------------remove-----------------------------------------
2395void Unique_Node_List::remove( Node *n ) {
2396 if( _in_worklist[n->_idx] ) {
2397 for( uint i = 0; i < size(); i++ )
2398 if( _nodes[i] == n ) {
2399 map(i,Node_List::pop());
2400 _in_worklist >>= n->_idx;
2401 return;
2402 }
2403 ShouldNotReachHere();
2404 }
2405}
2406
2407//-----------------------remove_useless_nodes----------------------------------
2408// Remove useless nodes from worklist
2409void Unique_Node_List::remove_useless_nodes(VectorSet &useful) {
2410
2411 for( uint i = 0; i < size(); ++i ) {
2412 Node *n = at(i);
2413 assert( n != NULL, "Did not expect null entries in worklist");
2414 if( ! useful.test(n->_idx) ) {
2415 _in_worklist >>= n->_idx;
2416 map(i,Node_List::pop());
2417 // Node *replacement = Node_List::pop();
2418 // if( i != size() ) { // Check if removing last entry
2419 // _nodes[i] = replacement;
2420 // }
2421 --i; // Visit popped node
2422 // If it was last entry, loop terminates since size() was also reduced
2423 }
2424 }
2425}
2426
2427//=============================================================================
2428void Node_Stack::grow() {
2429 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top
2430 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode));
2431 size_t max = old_max << 1; // max * 2
2432 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max);
2433 _inode_max = _inodes + max;
2434 _inode_top = _inodes + old_top; // restore _top
2435}
2436
2437// Node_Stack is used to map nodes.
2438Node* Node_Stack::find(uint idx) const {
2439 uint sz = size();
2440 for (uint i=0; i < sz; i++) {
2441 if (idx == index_at(i) )
2442 return node_at(i);
2443 }
2444 return NULL;
2445}
2446
2447//=============================================================================
2448uint TypeNode::size_of() const { return sizeof(*this); }
2449#ifndef PRODUCT
2450void TypeNode::dump_spec(outputStream *st) const {
2451 if( !Verbose && !WizardMode ) {
2452 // standard dump does this in Verbose and WizardMode
2453 st->print(" #"); _type->dump_on(st);
2454 }
2455}
2456
2457void TypeNode::dump_compact_spec(outputStream *st) const {
2458 st->print("#");
2459 _type->dump_on(st);
2460}
2461#endif
2462uint TypeNode::hash() const {
2463 return Node::hash() + _type->hash();
2464}
2465bool TypeNode::cmp( const Node &n ) const
2466{ return !Type::cmp( _type, ((TypeNode&)n)._type ); }
2467const Type *TypeNode::bottom_type() const { return _type; }
2468const Type* TypeNode::Value(PhaseGVN* phase) const { return _type; }
2469
2470//------------------------------ideal_reg--------------------------------------
2471uint TypeNode::ideal_reg() const {
2472 return _type->ideal_reg();
2473}
2474