1/*
2 * Copyright (c) 2015, 2019, Red Hat, Inc. All rights reserved.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation.
7 *
8 * This code is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * version 2 for more details (a copy is included in the LICENSE file that
12 * accompanied this code).
13 *
14 * You should have received a copy of the GNU General Public License version
15 * 2 along with this work; if not, write to the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
19 * or visit www.oracle.com if you need additional information or have any
20 * questions.
21 *
22 */
23
24#include "precompiled.hpp"
25
26#include "gc/shenandoah/c2/shenandoahSupport.hpp"
27#include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
28#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
29#include "gc/shenandoah/shenandoahForwarding.hpp"
30#include "gc/shenandoah/shenandoahHeap.hpp"
31#include "gc/shenandoah/shenandoahHeapRegion.hpp"
32#include "gc/shenandoah/shenandoahRuntime.hpp"
33#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
34#include "opto/arraycopynode.hpp"
35#include "opto/block.hpp"
36#include "opto/callnode.hpp"
37#include "opto/castnode.hpp"
38#include "opto/movenode.hpp"
39#include "opto/phaseX.hpp"
40#include "opto/rootnode.hpp"
41#include "opto/runtime.hpp"
42#include "opto/subnode.hpp"
43
44bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
45 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
46 if ((state->enqueue_barriers_count() +
47 state->load_reference_barriers_count()) > 0) {
48 bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion;
49 C->clear_major_progress();
50 PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand);
51 if (C->failing()) return false;
52 PhaseIdealLoop::verify(igvn);
53 DEBUG_ONLY(verify_raw_mem(C->root());)
54 if (attempt_more_loopopts) {
55 C->set_major_progress();
56 if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
57 return false;
58 }
59 C->clear_major_progress();
60 }
61 }
62 return true;
63}
64
65bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) {
66 if (!UseShenandoahGC) {
67 return false;
68 }
69 assert(iff->is_If(), "bad input");
70 if (iff->Opcode() != Op_If) {
71 return false;
72 }
73 Node* bol = iff->in(1);
74 if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
75 return false;
76 }
77 Node* cmp = bol->in(1);
78 if (cmp->Opcode() != Op_CmpI) {
79 return false;
80 }
81 Node* in1 = cmp->in(1);
82 Node* in2 = cmp->in(2);
83 if (in2->find_int_con(-1) != 0) {
84 return false;
85 }
86 if (in1->Opcode() != Op_AndI) {
87 return false;
88 }
89 in2 = in1->in(2);
90 if (in2->find_int_con(-1) != mask) {
91 return false;
92 }
93 in1 = in1->in(1);
94
95 return is_gc_state_load(in1);
96}
97
98bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
99 return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
100}
101
102bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
103 if (!UseShenandoahGC) {
104 return false;
105 }
106 if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
107 return false;
108 }
109 Node* addp = n->in(MemNode::Address);
110 if (!addp->is_AddP()) {
111 return false;
112 }
113 Node* base = addp->in(AddPNode::Address);
114 Node* off = addp->in(AddPNode::Offset);
115 if (base->Opcode() != Op_ThreadLocal) {
116 return false;
117 }
118 if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
119 return false;
120 }
121 return true;
122}
123
124bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
125 assert(phase->is_dominator(stop, start), "bad inputs");
126 ResourceMark rm;
127 Unique_Node_List wq;
128 wq.push(start);
129 for (uint next = 0; next < wq.size(); next++) {
130 Node *m = wq.at(next);
131 if (m == stop) {
132 continue;
133 }
134 if (m->is_SafePoint() && !m->is_CallLeaf()) {
135 return true;
136 }
137 if (m->is_Region()) {
138 for (uint i = 1; i < m->req(); i++) {
139 wq.push(m->in(i));
140 }
141 } else {
142 wq.push(m->in(0));
143 }
144 }
145 return false;
146}
147
148bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) {
149 assert(is_gc_state_load(n), "inconsistent");
150 Node* addp = n->in(MemNode::Address);
151 Node* dominator = NULL;
152 for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
153 Node* u = addp->fast_out(i);
154 assert(is_gc_state_load(u), "inconsistent");
155 if (u != n && phase->is_dominator(u->in(0), n->in(0))) {
156 if (dominator == NULL) {
157 dominator = u;
158 } else {
159 if (phase->dom_depth(u->in(0)) < phase->dom_depth(dominator->in(0))) {
160 dominator = u;
161 }
162 }
163 }
164 }
165 if (dominator == NULL || has_safepoint_between(n->in(0), dominator->in(0), phase)) {
166 return false;
167 }
168 phase->igvn().replace_node(n, dominator);
169
170 return true;
171}
172
173#ifdef ASSERT
174bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
175 assert(phis.size() == 0, "");
176
177 while (true) {
178 if (in->bottom_type() == TypePtr::NULL_PTR) {
179 if (trace) {tty->print_cr("NULL");}
180 } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
181 if (trace) {tty->print_cr("Non oop");}
182 } else if (t == ShenandoahLoad && ShenandoahOptimizeStableFinals &&
183 in->bottom_type()->make_ptr()->isa_aryptr() &&
184 in->bottom_type()->make_ptr()->is_aryptr()->is_stable()) {
185 if (trace) {tty->print_cr("Stable array load");}
186 } else {
187 if (in->is_ConstraintCast()) {
188 in = in->in(1);
189 continue;
190 } else if (in->is_AddP()) {
191 assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
192 in = in->in(AddPNode::Address);
193 continue;
194 } else if (in->is_Con()) {
195 if (trace) {
196 tty->print("Found constant");
197 in->dump();
198 }
199 } else if (in->Opcode() == Op_Parm) {
200 if (trace) {
201 tty->print("Found argument");
202 }
203 } else if (in->Opcode() == Op_CreateEx) {
204 if (trace) {
205 tty->print("Found create-exception");
206 }
207 } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
208 if (trace) {
209 tty->print("Found raw LoadP (OSR argument?)");
210 }
211 } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
212 if (t == ShenandoahOopStore) {
213 uint i = 0;
214 for (; i < phis.size(); i++) {
215 Node* n = phis.node_at(i);
216 if (n->Opcode() == Op_ShenandoahEnqueueBarrier) {
217 break;
218 }
219 }
220 if (i == phis.size()) {
221 return false;
222 }
223 }
224 barriers_used.push(in);
225 if (trace) {tty->print("Found barrier"); in->dump();}
226 } else if (in->Opcode() == Op_ShenandoahEnqueueBarrier) {
227 if (t != ShenandoahOopStore) {
228 in = in->in(1);
229 continue;
230 }
231 if (trace) {tty->print("Found enqueue barrier"); in->dump();}
232 phis.push(in, in->req());
233 in = in->in(1);
234 continue;
235 } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
236 if (trace) {
237 tty->print("Found alloc");
238 in->in(0)->dump();
239 }
240 } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
241 if (trace) {
242 tty->print("Found Java call");
243 }
244 } else if (in->is_Phi()) {
245 if (!visited.test_set(in->_idx)) {
246 if (trace) {tty->print("Pushed phi:"); in->dump();}
247 phis.push(in, 2);
248 in = in->in(1);
249 continue;
250 }
251 if (trace) {tty->print("Already seen phi:"); in->dump();}
252 } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
253 if (!visited.test_set(in->_idx)) {
254 if (trace) {tty->print("Pushed cmovep:"); in->dump();}
255 phis.push(in, CMoveNode::IfTrue);
256 in = in->in(CMoveNode::IfFalse);
257 continue;
258 }
259 if (trace) {tty->print("Already seen cmovep:"); in->dump();}
260 } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
261 in = in->in(1);
262 continue;
263 } else {
264 return false;
265 }
266 }
267 bool cont = false;
268 while (phis.is_nonempty()) {
269 uint idx = phis.index();
270 Node* phi = phis.node();
271 if (idx >= phi->req()) {
272 if (trace) {tty->print("Popped phi:"); phi->dump();}
273 phis.pop();
274 continue;
275 }
276 if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
277 in = phi->in(idx);
278 phis.set_index(idx+1);
279 cont = true;
280 break;
281 }
282 if (!cont) {
283 break;
284 }
285 }
286 return true;
287}
288
289void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
290 if (n1 != NULL) {
291 n1->dump(+10);
292 }
293 if (n2 != NULL) {
294 n2->dump(+10);
295 }
296 fatal("%s", msg);
297}
298
299void ShenandoahBarrierC2Support::verify(RootNode* root) {
300 ResourceMark rm;
301 Unique_Node_List wq;
302 GrowableArray<Node*> barriers;
303 Unique_Node_List barriers_used;
304 Node_Stack phis(0);
305 VectorSet visited(Thread::current()->resource_area());
306 const bool trace = false;
307 const bool verify_no_useless_barrier = false;
308
309 wq.push(root);
310 for (uint next = 0; next < wq.size(); next++) {
311 Node *n = wq.at(next);
312 if (n->is_Load()) {
313 const bool trace = false;
314 if (trace) {tty->print("Verifying"); n->dump();}
315 if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
316 if (trace) {tty->print_cr("Load range/klass");}
317 } else {
318 const TypePtr* adr_type = n->as_Load()->adr_type();
319
320 if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
321 if (trace) {tty->print_cr("Mark load");}
322 } else if (adr_type->isa_instptr() &&
323 adr_type->is_instptr()->klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
324 adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset) {
325 if (trace) {tty->print_cr("Reference.get()");}
326 } else {
327 bool verify = true;
328 if (adr_type->isa_instptr()) {
329 const TypeInstPtr* tinst = adr_type->is_instptr();
330 ciKlass* k = tinst->klass();
331 assert(k->is_instance_klass(), "");
332 ciInstanceKlass* ik = (ciInstanceKlass*)k;
333 int offset = adr_type->offset();
334
335 if ((ik->debug_final_field_at(offset) && ShenandoahOptimizeInstanceFinals) ||
336 (ik->debug_stable_field_at(offset) && ShenandoahOptimizeStableFinals)) {
337 if (trace) {tty->print_cr("Final/stable");}
338 verify = false;
339 } else if (k == ciEnv::current()->Class_klass() &&
340 tinst->const_oop() != NULL &&
341 tinst->offset() >= (ik->size_helper() * wordSize)) {
342 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
343 ciField* field = k->get_field_by_offset(tinst->offset(), true);
344 if ((ShenandoahOptimizeStaticFinals && field->is_final()) ||
345 (ShenandoahOptimizeStableFinals && field->is_stable())) {
346 verify = false;
347 }
348 }
349 }
350
351 if (verify && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
352 report_verify_failure("Shenandoah verification: Load should have barriers", n);
353 }
354 }
355 }
356 } else if (n->is_Store()) {
357 const bool trace = false;
358
359 if (trace) {tty->print("Verifying"); n->dump();}
360 if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
361 Node* adr = n->in(MemNode::Address);
362 bool verify = true;
363
364 if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
365 adr = adr->in(AddPNode::Address);
366 if (adr->is_AddP()) {
367 assert(adr->in(AddPNode::Base)->is_top(), "");
368 adr = adr->in(AddPNode::Address);
369 if (adr->Opcode() == Op_LoadP &&
370 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
371 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
372 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
373 if (trace) {tty->print_cr("SATB prebarrier");}
374 verify = false;
375 }
376 }
377 }
378
379 if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
380 report_verify_failure("Shenandoah verification: Store should have barriers", n);
381 }
382 }
383 if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
384 report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
385 }
386 } else if (n->Opcode() == Op_CmpP) {
387 const bool trace = false;
388
389 Node* in1 = n->in(1);
390 Node* in2 = n->in(2);
391 if (in1->bottom_type()->isa_oopptr()) {
392 if (trace) {tty->print("Verifying"); n->dump();}
393
394 bool mark_inputs = false;
395 if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
396 (in1->is_Con() || in2->is_Con())) {
397 if (trace) {tty->print_cr("Comparison against a constant");}
398 mark_inputs = true;
399 } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
400 (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
401 if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
402 mark_inputs = true;
403 } else {
404 assert(in2->bottom_type()->isa_oopptr(), "");
405
406 if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
407 !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
408 report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
409 }
410 }
411 if (verify_no_useless_barrier &&
412 mark_inputs &&
413 (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
414 !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
415 phis.clear();
416 visited.Reset();
417 }
418 }
419 } else if (n->is_LoadStore()) {
420 if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
421 !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) {
422 report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
423 }
424
425 if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
426 report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
427 }
428 } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
429 CallNode* call = n->as_Call();
430
431 static struct {
432 const char* name;
433 struct {
434 int pos;
435 verify_type t;
436 } args[6];
437 } calls[] = {
438 "aescrypt_encryptBlock",
439 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
440 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
441 "aescrypt_decryptBlock",
442 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
443 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
444 "multiplyToLen",
445 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { TypeFunc::Parms+4, ShenandoahStore },
446 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
447 "squareToLen",
448 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad }, { -1, ShenandoahNone},
449 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
450 "montgomery_multiply",
451 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad },
452 { TypeFunc::Parms+6, ShenandoahStore }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
453 "montgomery_square",
454 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+5, ShenandoahStore },
455 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
456 "mulAdd",
457 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone},
458 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
459 "vectorizedMismatch",
460 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone},
461 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
462 "updateBytesCRC32",
463 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
464 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
465 "updateBytesAdler32",
466 { { TypeFunc::Parms+1, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
467 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
468 "updateBytesCRC32C",
469 { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad}, { -1, ShenandoahNone},
470 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
471 "counterMode_AESCrypt",
472 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
473 { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
474 "cipherBlockChaining_encryptAESCrypt",
475 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
476 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
477 "cipherBlockChaining_decryptAESCrypt",
478 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { TypeFunc::Parms+2, ShenandoahLoad },
479 { TypeFunc::Parms+3, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
480 "shenandoah_clone_barrier",
481 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone}, { -1, ShenandoahNone},
482 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
483 "ghash_processBlocks",
484 { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+2, ShenandoahLoad },
485 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
486 "sha1_implCompress",
487 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
488 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
489 "sha256_implCompress",
490 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
491 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
492 "sha512_implCompress",
493 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
494 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
495 "sha1_implCompressMB",
496 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
497 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
498 "sha256_implCompressMB",
499 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
500 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
501 "sha512_implCompressMB",
502 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+1, ShenandoahStore }, { -1, ShenandoahNone },
503 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
504 "encodeBlock",
505 { { TypeFunc::Parms, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahStore }, { -1, ShenandoahNone },
506 { -1, ShenandoahNone}, { -1, ShenandoahNone}, { -1, ShenandoahNone} },
507 };
508
509 if (call->is_call_to_arraycopystub()) {
510 Node* dest = NULL;
511 const TypeTuple* args = n->as_Call()->_tf->domain();
512 for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
513 if (args->field_at(i)->isa_ptr()) {
514 j++;
515 if (j == 2) {
516 dest = n->in(i);
517 break;
518 }
519 }
520 }
521 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
522 !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
523 report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
524 }
525 } else if (strlen(call->_name) > 5 &&
526 !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
527 if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
528 report_verify_failure("Shenandoah verification: _fill should have barriers", n);
529 }
530 } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
531 // skip
532 } else {
533 const int calls_len = sizeof(calls) / sizeof(calls[0]);
534 int i = 0;
535 for (; i < calls_len; i++) {
536 if (!strcmp(calls[i].name, call->_name)) {
537 break;
538 }
539 }
540 if (i != calls_len) {
541 const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
542 for (uint j = 0; j < args_len; j++) {
543 int pos = calls[i].args[j].pos;
544 if (pos == -1) {
545 break;
546 }
547 if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
548 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
549 }
550 }
551 for (uint j = TypeFunc::Parms; j < call->req(); j++) {
552 if (call->in(j)->bottom_type()->make_ptr() &&
553 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
554 uint k = 0;
555 for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
556 if (k == args_len) {
557 fatal("arg %d for call %s not covered", j, call->_name);
558 }
559 }
560 }
561 } else {
562 for (uint j = TypeFunc::Parms; j < call->req(); j++) {
563 if (call->in(j)->bottom_type()->make_ptr() &&
564 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
565 fatal("%s not covered", call->_name);
566 }
567 }
568 }
569 }
570 } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
571 // skip
572 } else if (n->is_AddP()
573 || n->is_Phi()
574 || n->is_ConstraintCast()
575 || n->Opcode() == Op_Return
576 || n->Opcode() == Op_CMoveP
577 || n->Opcode() == Op_CMoveN
578 || n->Opcode() == Op_Rethrow
579 || n->is_MemBar()
580 || n->Opcode() == Op_Conv2B
581 || n->Opcode() == Op_SafePoint
582 || n->is_CallJava()
583 || n->Opcode() == Op_Unlock
584 || n->Opcode() == Op_EncodeP
585 || n->Opcode() == Op_DecodeN) {
586 // nothing to do
587 } else {
588 static struct {
589 int opcode;
590 struct {
591 int pos;
592 verify_type t;
593 } inputs[2];
594 } others[] = {
595 Op_FastLock,
596 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} },
597 Op_Lock,
598 { { TypeFunc::Parms, ShenandoahLoad }, { -1, ShenandoahNone} },
599 Op_ArrayCopy,
600 { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
601 Op_StrCompressedCopy,
602 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
603 Op_StrInflatedCopy,
604 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
605 Op_AryEq,
606 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } },
607 Op_StrIndexOf,
608 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } },
609 Op_StrComp,
610 { { 2, ShenandoahLoad }, { 4, ShenandoahLoad } },
611 Op_StrEquals,
612 { { 2, ShenandoahLoad }, { 3, ShenandoahLoad } },
613 Op_EncodeISOArray,
614 { { 2, ShenandoahLoad }, { 3, ShenandoahStore } },
615 Op_HasNegatives,
616 { { 2, ShenandoahLoad }, { -1, ShenandoahNone} },
617 Op_CastP2X,
618 { { 1, ShenandoahLoad }, { -1, ShenandoahNone} },
619 Op_StrIndexOfChar,
620 { { 2, ShenandoahLoad }, { -1, ShenandoahNone } },
621 };
622
623 const int others_len = sizeof(others) / sizeof(others[0]);
624 int i = 0;
625 for (; i < others_len; i++) {
626 if (others[i].opcode == n->Opcode()) {
627 break;
628 }
629 }
630 uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
631 if (i != others_len) {
632 const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
633 for (uint j = 0; j < inputs_len; j++) {
634 int pos = others[i].inputs[j].pos;
635 if (pos == -1) {
636 break;
637 }
638 if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
639 report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
640 }
641 }
642 for (uint j = 1; j < stop; j++) {
643 if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
644 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
645 uint k = 0;
646 for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
647 if (k == inputs_len) {
648 fatal("arg %d for node %s not covered", j, n->Name());
649 }
650 }
651 }
652 } else {
653 for (uint j = 1; j < stop; j++) {
654 if (n->in(j) != NULL && n->in(j)->bottom_type()->make_ptr() &&
655 n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
656 fatal("%s not covered", n->Name());
657 }
658 }
659 }
660 }
661
662 if (n->is_SafePoint()) {
663 SafePointNode* sfpt = n->as_SafePoint();
664 if (verify_no_useless_barrier && sfpt->jvms() != NULL) {
665 for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
666 if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
667 phis.clear();
668 visited.Reset();
669 }
670 }
671 }
672 }
673 for( uint i = 0; i < n->len(); ++i ) {
674 Node *m = n->in(i);
675 if (m == NULL) continue;
676
677 // In most cases, inputs should be known to be non null. If it's
678 // not the case, it could be a missing cast_not_null() in an
679 // intrinsic or support might be needed in AddPNode::Ideal() to
680 // avoid a NULL+offset input.
681 if (!(n->is_Phi() ||
682 (n->is_SafePoint() && (!n->is_CallRuntime() || !strcmp(n->as_Call()->_name, "shenandoah_wb_pre") || !strcmp(n->as_Call()->_name, "unsafe_arraycopy"))) ||
683 n->Opcode() == Op_CmpP ||
684 n->Opcode() == Op_CmpN ||
685 (n->Opcode() == Op_StoreP && i == StoreNode::ValueIn) ||
686 (n->Opcode() == Op_StoreN && i == StoreNode::ValueIn) ||
687 n->is_ConstraintCast() ||
688 n->Opcode() == Op_Return ||
689 n->Opcode() == Op_Conv2B ||
690 n->is_AddP() ||
691 n->Opcode() == Op_CMoveP ||
692 n->Opcode() == Op_CMoveN ||
693 n->Opcode() == Op_Rethrow ||
694 n->is_MemBar() ||
695 n->is_Mem() ||
696 n->Opcode() == Op_AryEq ||
697 n->Opcode() == Op_SCMemProj ||
698 n->Opcode() == Op_EncodeP ||
699 n->Opcode() == Op_DecodeN ||
700 n->Opcode() == Op_ShenandoahEnqueueBarrier ||
701 n->Opcode() == Op_ShenandoahLoadReferenceBarrier)) {
702 if (m->bottom_type()->make_oopptr() && m->bottom_type()->make_oopptr()->meet(TypePtr::NULL_PTR) == m->bottom_type()) {
703 report_verify_failure("Shenandoah verification: null input", n, m);
704 }
705 }
706
707 wq.push(m);
708 }
709 }
710
711 if (verify_no_useless_barrier) {
712 for (int i = 0; i < barriers.length(); i++) {
713 Node* n = barriers.at(i);
714 if (!barriers_used.member(n)) {
715 tty->print("XXX useless barrier"); n->dump(-2);
716 ShouldNotReachHere();
717 }
718 }
719 }
720}
721#endif
722
723bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
724 // That both nodes have the same control is not sufficient to prove
725 // domination, verify that there's no path from d to n
726 ResourceMark rm;
727 Unique_Node_List wq;
728 wq.push(d);
729 for (uint next = 0; next < wq.size(); next++) {
730 Node *m = wq.at(next);
731 if (m == n) {
732 return false;
733 }
734 if (m->is_Phi() && m->in(0)->is_Loop()) {
735 assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
736 } else {
737 for (uint i = 0; i < m->req(); i++) {
738 if (m->in(i) != NULL && phase->ctrl_or_self(m->in(i)) == c) {
739 wq.push(m->in(i));
740 }
741 }
742 }
743 }
744 return true;
745}
746
747bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
748 if (d_c != n_c) {
749 return phase->is_dominator(d_c, n_c);
750 }
751 return is_dominator_same_ctrl(d_c, d, n, phase);
752}
753
754Node* next_mem(Node* mem, int alias) {
755 Node* res = NULL;
756 if (mem->is_Proj()) {
757 res = mem->in(0);
758 } else if (mem->is_SafePoint() || mem->is_MemBar()) {
759 res = mem->in(TypeFunc::Memory);
760 } else if (mem->is_Phi()) {
761 res = mem->in(1);
762 } else if (mem->is_MergeMem()) {
763 res = mem->as_MergeMem()->memory_at(alias);
764 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
765 assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
766 res = mem->in(MemNode::Memory);
767 } else {
768#ifdef ASSERT
769 mem->dump();
770#endif
771 ShouldNotReachHere();
772 }
773 return res;
774}
775
776Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
777 Node* iffproj = NULL;
778 while (c != dom) {
779 Node* next = phase->idom(c);
780 assert(next->unique_ctrl_out() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
781 if (c->is_Region()) {
782 ResourceMark rm;
783 Unique_Node_List wq;
784 wq.push(c);
785 for (uint i = 0; i < wq.size(); i++) {
786 Node *n = wq.at(i);
787 if (n == next) {
788 continue;
789 }
790 if (n->is_Region()) {
791 for (uint j = 1; j < n->req(); j++) {
792 wq.push(n->in(j));
793 }
794 } else {
795 wq.push(n->in(0));
796 }
797 }
798 for (uint i = 0; i < wq.size(); i++) {
799 Node *n = wq.at(i);
800 assert(n->is_CFG(), "");
801 if (n->is_Multi()) {
802 for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
803 Node* u = n->fast_out(j);
804 if (u->is_CFG()) {
805 if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
806 return NodeSentinel;
807 }
808 }
809 }
810 }
811 }
812 } else if (c->is_Proj()) {
813 if (c->is_IfProj()) {
814 if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != NULL) {
815 // continue;
816 } else {
817 if (!allow_one_proj) {
818 return NodeSentinel;
819 }
820 if (iffproj == NULL) {
821 iffproj = c;
822 } else {
823 return NodeSentinel;
824 }
825 }
826 } else if (c->Opcode() == Op_JumpProj) {
827 return NodeSentinel; // unsupported
828 } else if (c->Opcode() == Op_CatchProj) {
829 return NodeSentinel; // unsupported
830 } else if (c->Opcode() == Op_CProj && next->Opcode() == Op_NeverBranch) {
831 return NodeSentinel; // unsupported
832 } else {
833 assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
834 }
835 }
836 c = next;
837 }
838 return iffproj;
839}
840
841Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
842 ResourceMark rm;
843 VectorSet wq(Thread::current()->resource_area());
844 wq.set(mem->_idx);
845 mem_ctrl = phase->ctrl_or_self(mem);
846 while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
847 mem = next_mem(mem, alias);
848 if (wq.test_set(mem->_idx)) {
849 return NULL;
850 }
851 mem_ctrl = phase->ctrl_or_self(mem);
852 }
853 if (mem->is_MergeMem()) {
854 mem = mem->as_MergeMem()->memory_at(alias);
855 mem_ctrl = phase->ctrl_or_self(mem);
856 }
857 return mem;
858}
859
860Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
861 Node* mem = NULL;
862 Node* c = ctrl;
863 do {
864 if (c->is_Region()) {
865 Node* phi_bottom = NULL;
866 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == NULL; i++) {
867 Node* u = c->fast_out(i);
868 if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
869 if (u->adr_type() == TypePtr::BOTTOM) {
870 mem = u;
871 }
872 }
873 }
874 } else {
875 if (c->is_Call() && c->as_Call()->adr_type() != NULL) {
876 CallProjections projs;
877 c->as_Call()->extract_projections(&projs, true, false);
878 if (projs.fallthrough_memproj != NULL) {
879 if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
880 if (projs.catchall_memproj == NULL) {
881 mem = projs.fallthrough_memproj;
882 } else {
883 if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
884 mem = projs.fallthrough_memproj;
885 } else {
886 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
887 mem = projs.catchall_memproj;
888 }
889 }
890 }
891 } else {
892 Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
893 if (proj != NULL &&
894 proj->adr_type() == TypePtr::BOTTOM) {
895 mem = proj;
896 }
897 }
898 } else {
899 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
900 Node* u = c->fast_out(i);
901 if (u->is_Proj() &&
902 u->bottom_type() == Type::MEMORY &&
903 u->adr_type() == TypePtr::BOTTOM) {
904 assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
905 assert(mem == NULL, "only one proj");
906 mem = u;
907 }
908 }
909 assert(!c->is_Call() || c->as_Call()->adr_type() != NULL || mem == NULL, "no mem projection expected");
910 }
911 }
912 c = phase->idom(c);
913 } while (mem == NULL);
914 return mem;
915}
916
917void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
918 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
919 Node* u = n->fast_out(i);
920 if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
921 uses.push(u);
922 }
923 }
924}
925
926static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
927 OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
928 Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
929 phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
930 Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
931 phase->register_control(new_le, phase->get_loop(le), le->in(0));
932 phase->lazy_replace(outer, new_outer);
933 phase->lazy_replace(le, new_le);
934 inner->clear_strip_mined();
935}
936
937void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl,
938 PhaseIdealLoop* phase) {
939 IdealLoopTree* loop = phase->get_loop(ctrl);
940 Node* thread = new ThreadLocalNode();
941 phase->register_new_node(thread, ctrl);
942 Node* offset = phase->igvn().MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
943 phase->set_ctrl(offset, phase->C->root());
944 Node* gc_state_addr = new AddPNode(phase->C->top(), thread, offset);
945 phase->register_new_node(gc_state_addr, ctrl);
946 uint gc_state_idx = Compile::AliasIdxRaw;
947 const TypePtr* gc_state_adr_type = NULL; // debug-mode-only argument
948 debug_only(gc_state_adr_type = phase->C->get_adr_type(gc_state_idx));
949
950 Node* gc_state = new LoadBNode(ctrl, raw_mem, gc_state_addr, gc_state_adr_type, TypeInt::BYTE, MemNode::unordered);
951 phase->register_new_node(gc_state, ctrl);
952 Node* heap_stable_and = new AndINode(gc_state, phase->igvn().intcon(ShenandoahHeap::HAS_FORWARDED));
953 phase->register_new_node(heap_stable_and, ctrl);
954 Node* heap_stable_cmp = new CmpINode(heap_stable_and, phase->igvn().zerocon(T_INT));
955 phase->register_new_node(heap_stable_cmp, ctrl);
956 Node* heap_stable_test = new BoolNode(heap_stable_cmp, BoolTest::ne);
957 phase->register_new_node(heap_stable_test, ctrl);
958 IfNode* heap_stable_iff = new IfNode(ctrl, heap_stable_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
959 phase->register_control(heap_stable_iff, loop, ctrl);
960
961 heap_stable_ctrl = new IfFalseNode(heap_stable_iff);
962 phase->register_control(heap_stable_ctrl, loop, heap_stable_iff);
963 ctrl = new IfTrueNode(heap_stable_iff);
964 phase->register_control(ctrl, loop, heap_stable_iff);
965
966 assert(is_heap_stable_test(heap_stable_iff), "Should match the shape");
967}
968
969void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
970 const Type* val_t = phase->igvn().type(val);
971 if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
972 IdealLoopTree* loop = phase->get_loop(ctrl);
973 Node* null_cmp = new CmpPNode(val, phase->igvn().zerocon(T_OBJECT));
974 phase->register_new_node(null_cmp, ctrl);
975 Node* null_test = new BoolNode(null_cmp, BoolTest::ne);
976 phase->register_new_node(null_test, ctrl);
977 IfNode* null_iff = new IfNode(ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
978 phase->register_control(null_iff, loop, ctrl);
979 ctrl = new IfTrueNode(null_iff);
980 phase->register_control(ctrl, loop, null_iff);
981 null_ctrl = new IfFalseNode(null_iff);
982 phase->register_control(null_ctrl, loop, null_iff);
983 }
984}
985
986Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) {
987 IdealLoopTree *loop = phase->get_loop(c);
988 Node* iff = unc_ctrl->in(0);
989 assert(iff->is_If(), "broken");
990 Node* new_iff = iff->clone();
991 new_iff->set_req(0, c);
992 phase->register_control(new_iff, loop, c);
993 Node* iffalse = new IfFalseNode(new_iff->as_If());
994 phase->register_control(iffalse, loop, new_iff);
995 Node* iftrue = new IfTrueNode(new_iff->as_If());
996 phase->register_control(iftrue, loop, new_iff);
997 c = iftrue;
998 const Type *t = phase->igvn().type(val);
999 assert(val->Opcode() == Op_CastPP, "expect cast to non null here");
1000 Node* uncasted_val = val->in(1);
1001 val = new CastPPNode(uncasted_val, t);
1002 val->init_req(0, c);
1003 phase->register_new_node(val, c);
1004 return val;
1005}
1006
1007void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl,
1008 Unique_Node_List& uses, PhaseIdealLoop* phase) {
1009 IfNode* iff = unc_ctrl->in(0)->as_If();
1010 Node* proj = iff->proj_out(0);
1011 assert(proj != unc_ctrl, "bad projection");
1012 Node* use = proj->unique_ctrl_out();
1013
1014 assert(use == unc || use->is_Region(), "what else?");
1015
1016 uses.clear();
1017 if (use == unc) {
1018 phase->set_idom(use, new_unc_ctrl, phase->dom_depth(use));
1019 for (uint i = 1; i < unc->req(); i++) {
1020 Node* n = unc->in(i);
1021 if (phase->has_ctrl(n) && phase->get_ctrl(n) == proj) {
1022 uses.push(n);
1023 }
1024 }
1025 } else {
1026 assert(use->is_Region(), "what else?");
1027 uint idx = 1;
1028 for (; use->in(idx) != proj; idx++);
1029 for (DUIterator_Fast imax, i = use->fast_outs(imax); i < imax; i++) {
1030 Node* u = use->fast_out(i);
1031 if (u->is_Phi() && phase->get_ctrl(u->in(idx)) == proj) {
1032 uses.push(u->in(idx));
1033 }
1034 }
1035 }
1036 for(uint next = 0; next < uses.size(); next++ ) {
1037 Node *n = uses.at(next);
1038 assert(phase->get_ctrl(n) == proj, "bad control");
1039 phase->set_ctrl_and_loop(n, new_unc_ctrl);
1040 if (n->in(0) == proj) {
1041 phase->igvn().replace_input_of(n, 0, new_unc_ctrl);
1042 }
1043 for (uint i = 0; i < n->req(); i++) {
1044 Node* m = n->in(i);
1045 if (m != NULL && phase->has_ctrl(m) && phase->get_ctrl(m) == proj) {
1046 uses.push(m);
1047 }
1048 }
1049 }
1050
1051 phase->igvn().rehash_node_delayed(use);
1052 int nb = use->replace_edge(proj, new_unc_ctrl);
1053 assert(nb == 1, "only use expected");
1054}
1055
1056void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
1057 IdealLoopTree *loop = phase->get_loop(ctrl);
1058 Node* raw_rbtrue = new CastP2XNode(ctrl, val);
1059 phase->register_new_node(raw_rbtrue, ctrl);
1060 Node* cset_offset = new URShiftXNode(raw_rbtrue, phase->igvn().intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
1061 phase->register_new_node(cset_offset, ctrl);
1062 Node* in_cset_fast_test_base_addr = phase->igvn().makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
1063 phase->set_ctrl(in_cset_fast_test_base_addr, phase->C->root());
1064 Node* in_cset_fast_test_adr = new AddPNode(phase->C->top(), in_cset_fast_test_base_addr, cset_offset);
1065 phase->register_new_node(in_cset_fast_test_adr, ctrl);
1066 uint in_cset_fast_test_idx = Compile::AliasIdxRaw;
1067 const TypePtr* in_cset_fast_test_adr_type = NULL; // debug-mode-only argument
1068 debug_only(in_cset_fast_test_adr_type = phase->C->get_adr_type(in_cset_fast_test_idx));
1069 Node* in_cset_fast_test_load = new LoadBNode(ctrl, raw_mem, in_cset_fast_test_adr, in_cset_fast_test_adr_type, TypeInt::BYTE, MemNode::unordered);
1070 phase->register_new_node(in_cset_fast_test_load, ctrl);
1071 Node* in_cset_fast_test_cmp = new CmpINode(in_cset_fast_test_load, phase->igvn().zerocon(T_INT));
1072 phase->register_new_node(in_cset_fast_test_cmp, ctrl);
1073 Node* in_cset_fast_test_test = new BoolNode(in_cset_fast_test_cmp, BoolTest::eq);
1074 phase->register_new_node(in_cset_fast_test_test, ctrl);
1075 IfNode* in_cset_fast_test_iff = new IfNode(ctrl, in_cset_fast_test_test, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
1076 phase->register_control(in_cset_fast_test_iff, loop, ctrl);
1077
1078 not_cset_ctrl = new IfTrueNode(in_cset_fast_test_iff);
1079 phase->register_control(not_cset_ctrl, loop, in_cset_fast_test_iff);
1080
1081 ctrl = new IfFalseNode(in_cset_fast_test_iff);
1082 phase->register_control(ctrl, loop, in_cset_fast_test_iff);
1083}
1084
1085void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase) {
1086 IdealLoopTree*loop = phase->get_loop(ctrl);
1087 const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst();
1088
1089 // The slow path stub consumes and produces raw memory in addition
1090 // to the existing memory edges
1091 Node* base = find_bottom_mem(ctrl, phase);
1092 MergeMemNode* mm = MergeMemNode::make(base);
1093 mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1094 phase->register_new_node(mm, ctrl);
1095
1096 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), "shenandoah_load_reference_barrier", TypeRawPtr::BOTTOM);
1097 call->init_req(TypeFunc::Control, ctrl);
1098 call->init_req(TypeFunc::I_O, phase->C->top());
1099 call->init_req(TypeFunc::Memory, mm);
1100 call->init_req(TypeFunc::FramePtr, phase->C->top());
1101 call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1102 call->init_req(TypeFunc::Parms, val);
1103 phase->register_control(call, loop, ctrl);
1104 ctrl = new ProjNode(call, TypeFunc::Control);
1105 phase->register_control(ctrl, loop, call);
1106 result_mem = new ProjNode(call, TypeFunc::Memory);
1107 phase->register_new_node(result_mem, call);
1108 val = new ProjNode(call, TypeFunc::Parms);
1109 phase->register_new_node(val, call);
1110 val = new CheckCastPPNode(ctrl, val, obj_type);
1111 phase->register_new_node(val, ctrl);
1112}
1113
1114void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1115 Node* ctrl = phase->get_ctrl(barrier);
1116 Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1117
1118 // Update the control of all nodes that should be after the
1119 // barrier control flow
1120 uses.clear();
1121 // Every node that is control dependent on the barrier's input
1122 // control will be after the expanded barrier. The raw memory (if
1123 // its memory is control dependent on the barrier's input control)
1124 // must stay above the barrier.
1125 uses_to_ignore.clear();
1126 if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1127 uses_to_ignore.push(init_raw_mem);
1128 }
1129 for (uint next = 0; next < uses_to_ignore.size(); next++) {
1130 Node *n = uses_to_ignore.at(next);
1131 for (uint i = 0; i < n->req(); i++) {
1132 Node* in = n->in(i);
1133 if (in != NULL && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1134 uses_to_ignore.push(in);
1135 }
1136 }
1137 }
1138 for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1139 Node* u = ctrl->fast_out(i);
1140 if (u->_idx < last &&
1141 u != barrier &&
1142 !uses_to_ignore.member(u) &&
1143 (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1144 (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1145 Node* old_c = phase->ctrl_or_self(u);
1146 Node* c = old_c;
1147 if (c != ctrl ||
1148 is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1149 ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1150 phase->igvn().rehash_node_delayed(u);
1151 int nb = u->replace_edge(ctrl, region);
1152 if (u->is_CFG()) {
1153 if (phase->idom(u) == ctrl) {
1154 phase->set_idom(u, region, phase->dom_depth(region));
1155 }
1156 } else if (phase->get_ctrl(u) == ctrl) {
1157 assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1158 uses.push(u);
1159 }
1160 assert(nb == 1, "more than 1 ctrl input?");
1161 --i, imax -= nb;
1162 }
1163 }
1164 }
1165}
1166
1167static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1168 Node* region = NULL;
1169 while (c != ctrl) {
1170 if (c->is_Region()) {
1171 region = c;
1172 }
1173 c = phase->idom(c);
1174 }
1175 assert(region != NULL, "");
1176 Node* phi = new PhiNode(region, n->bottom_type());
1177 for (uint j = 1; j < region->req(); j++) {
1178 Node* in = region->in(j);
1179 if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1180 phi->init_req(j, n);
1181 } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1182 phi->init_req(j, n_clone);
1183 } else {
1184 phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1185 }
1186 }
1187 phase->register_new_node(phi, region);
1188 return phi;
1189}
1190
1191void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1192 ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1193
1194 Unique_Node_List uses;
1195 for (int i = 0; i < state->enqueue_barriers_count(); i++) {
1196 Node* barrier = state->enqueue_barrier(i);
1197 Node* ctrl = phase->get_ctrl(barrier);
1198 IdealLoopTree* loop = phase->get_loop(ctrl);
1199 if (loop->_head->is_OuterStripMinedLoop()) {
1200 // Expanding a barrier here will break loop strip mining
1201 // verification. Transform the loop so the loop nest doesn't
1202 // appear as strip mined.
1203 OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1204 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1205 }
1206 }
1207
1208 Node_Stack stack(0);
1209 Node_List clones;
1210 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1211 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1212 if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1213 continue;
1214 }
1215
1216 Node* ctrl = phase->get_ctrl(lrb);
1217 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1218
1219 CallStaticJavaNode* unc = NULL;
1220 Node* unc_ctrl = NULL;
1221 Node* uncasted_val = val;
1222
1223 for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1224 Node* u = lrb->fast_out(i);
1225 if (u->Opcode() == Op_CastPP &&
1226 u->in(0) != NULL &&
1227 phase->is_dominator(u->in(0), ctrl)) {
1228 const Type* u_t = phase->igvn().type(u);
1229
1230 if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1231 u->in(0)->Opcode() == Op_IfTrue &&
1232 u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1233 u->in(0)->in(0)->is_If() &&
1234 u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1235 u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1236 u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1237 u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1238 u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1239 IdealLoopTree* loop = phase->get_loop(ctrl);
1240 IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1241
1242 if (!unc_loop->is_member(loop)) {
1243 continue;
1244 }
1245
1246 Node* branch = no_branches(ctrl, u->in(0), false, phase);
1247 assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch");
1248 if (branch == NodeSentinel) {
1249 continue;
1250 }
1251
1252 phase->igvn().replace_input_of(u, 1, val);
1253 phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u);
1254 phase->set_ctrl(u, u->in(0));
1255 phase->set_ctrl(lrb, u->in(0));
1256 unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
1257 unc_ctrl = u->in(0);
1258 val = u;
1259
1260 for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
1261 Node* u = val->fast_out(j);
1262 if (u == lrb) continue;
1263 phase->igvn().rehash_node_delayed(u);
1264 int nb = u->replace_edge(val, lrb);
1265 --j; jmax -= nb;
1266 }
1267
1268 RegionNode* r = new RegionNode(3);
1269 IfNode* iff = unc_ctrl->in(0)->as_If();
1270
1271 Node* ctrl_use = unc_ctrl->unique_ctrl_out();
1272 Node* unc_ctrl_clone = unc_ctrl->clone();
1273 phase->register_control(unc_ctrl_clone, loop, iff);
1274 Node* c = unc_ctrl_clone;
1275 Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase);
1276 r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0));
1277
1278 phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0));
1279 phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl));
1280 phase->lazy_replace(c, unc_ctrl);
1281 c = NULL;;
1282 phase->igvn().replace_input_of(val, 0, unc_ctrl_clone);
1283 phase->set_ctrl(val, unc_ctrl_clone);
1284
1285 IfNode* new_iff = new_cast->in(0)->in(0)->as_If();
1286 fix_null_check(unc, unc_ctrl_clone, r, uses, phase);
1287 Node* iff_proj = iff->proj_out(0);
1288 r->init_req(2, iff_proj);
1289 phase->register_control(r, phase->ltree_root(), iff);
1290
1291 Node* new_bol = new_iff->in(1)->clone();
1292 Node* new_cmp = new_bol->in(1)->clone();
1293 assert(new_cmp->Opcode() == Op_CmpP, "broken");
1294 assert(new_cmp->in(1) == val->in(1), "broken");
1295 new_bol->set_req(1, new_cmp);
1296 new_cmp->set_req(1, lrb);
1297 phase->register_new_node(new_bol, new_iff->in(0));
1298 phase->register_new_node(new_cmp, new_iff->in(0));
1299 phase->igvn().replace_input_of(new_iff, 1, new_bol);
1300 phase->igvn().replace_input_of(new_cast, 1, lrb);
1301
1302 for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1303 Node* u = lrb->fast_out(i);
1304 if (u == new_cast || u == new_cmp) {
1305 continue;
1306 }
1307 phase->igvn().rehash_node_delayed(u);
1308 int nb = u->replace_edge(lrb, new_cast);
1309 assert(nb > 0, "no update?");
1310 --i; imax -= nb;
1311 }
1312
1313 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1314 Node* u = val->fast_out(i);
1315 if (u == lrb) {
1316 continue;
1317 }
1318 phase->igvn().rehash_node_delayed(u);
1319 int nb = u->replace_edge(val, new_cast);
1320 assert(nb > 0, "no update?");
1321 --i; imax -= nb;
1322 }
1323
1324 ctrl = unc_ctrl_clone;
1325 phase->set_ctrl_and_loop(lrb, ctrl);
1326 break;
1327 }
1328 }
1329 }
1330 if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1331 CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1332 CallProjections projs;
1333 call->extract_projections(&projs, false, false);
1334
1335 Node* lrb_clone = lrb->clone();
1336 phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1337 phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1338
1339 stack.push(lrb, 0);
1340 clones.push(lrb_clone);
1341
1342 do {
1343 assert(stack.size() == clones.size(), "");
1344 Node* n = stack.node();
1345#ifdef ASSERT
1346 if (n->is_Load()) {
1347 Node* mem = n->in(MemNode::Memory);
1348 for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1349 Node* u = mem->fast_out(j);
1350 assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1351 }
1352 }
1353#endif
1354 uint idx = stack.index();
1355 Node* n_clone = clones.at(clones.size()-1);
1356 if (idx < n->outcnt()) {
1357 Node* u = n->raw_out(idx);
1358 Node* c = phase->ctrl_or_self(u);
1359 if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1360 stack.set_index(idx+1);
1361 assert(!u->is_CFG(), "");
1362 stack.push(u, 0);
1363 Node* u_clone = u->clone();
1364 int nb = u_clone->replace_edge(n, n_clone);
1365 assert(nb > 0, "should have replaced some uses");
1366 phase->register_new_node(u_clone, projs.catchall_catchproj);
1367 clones.push(u_clone);
1368 phase->set_ctrl(u, projs.fallthrough_catchproj);
1369 } else {
1370 bool replaced = false;
1371 if (u->is_Phi()) {
1372 for (uint k = 1; k < u->req(); k++) {
1373 if (u->in(k) == n) {
1374 if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1375 phase->igvn().replace_input_of(u, k, n_clone);
1376 replaced = true;
1377 } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1378 phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1379 replaced = true;
1380 }
1381 }
1382 }
1383 } else {
1384 if (phase->is_dominator(projs.catchall_catchproj, c)) {
1385 phase->igvn().rehash_node_delayed(u);
1386 int nb = u->replace_edge(n, n_clone);
1387 assert(nb > 0, "should have replaced some uses");
1388 replaced = true;
1389 } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1390 phase->igvn().rehash_node_delayed(u);
1391 int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase));
1392 assert(nb > 0, "should have replaced some uses");
1393 replaced = true;
1394 }
1395 }
1396 if (!replaced) {
1397 stack.set_index(idx+1);
1398 }
1399 }
1400 } else {
1401 stack.pop();
1402 clones.pop();
1403 }
1404 } while (stack.size() > 0);
1405 assert(stack.size() == 0 && clones.size() == 0, "");
1406 }
1407 }
1408
1409 for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1410 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1411 if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1412 continue;
1413 }
1414 Node* ctrl = phase->get_ctrl(lrb);
1415 IdealLoopTree* loop = phase->get_loop(ctrl);
1416 if (loop->_head->is_OuterStripMinedLoop()) {
1417 // Expanding a barrier here will break loop strip mining
1418 // verification. Transform the loop so the loop nest doesn't
1419 // appear as strip mined.
1420 OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop();
1421 hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1422 }
1423 }
1424
1425 // Expand load-reference-barriers
1426 MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1427 Unique_Node_List uses_to_ignore;
1428 for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1429 ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1430 if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) {
1431 phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1432 continue;
1433 }
1434 uint last = phase->C->unique();
1435 Node* ctrl = phase->get_ctrl(lrb);
1436 Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1437
1438
1439 Node* orig_ctrl = ctrl;
1440
1441 Node* raw_mem = fixer.find_mem(ctrl, lrb);
1442 Node* init_raw_mem = raw_mem;
1443 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1444
1445 IdealLoopTree *loop = phase->get_loop(ctrl);
1446 CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn());
1447 Node* unc_ctrl = NULL;
1448 if (unc != NULL) {
1449 if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) {
1450 unc = NULL;
1451 } else {
1452 unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control);
1453 }
1454 }
1455
1456 Node* uncasted_val = val;
1457 if (unc != NULL) {
1458 uncasted_val = val->in(1);
1459 }
1460
1461 Node* heap_stable_ctrl = NULL;
1462 Node* null_ctrl = NULL;
1463
1464 assert(val->bottom_type()->make_oopptr(), "need oop");
1465 assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant");
1466
1467 enum { _heap_stable = 1, _not_cset, _fwded, _evac_path, _null_path, PATH_LIMIT };
1468 Node* region = new RegionNode(PATH_LIMIT);
1469 Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr());
1470 Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1471
1472 // Stable path.
1473 test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1474 IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1475
1476 // Heap stable case
1477 region->init_req(_heap_stable, heap_stable_ctrl);
1478 val_phi->init_req(_heap_stable, uncasted_val);
1479 raw_mem_phi->init_req(_heap_stable, raw_mem);
1480
1481 Node* reg2_ctrl = NULL;
1482 // Null case
1483 test_null(ctrl, val, null_ctrl, phase);
1484 if (null_ctrl != NULL) {
1485 reg2_ctrl = null_ctrl->in(0);
1486 region->init_req(_null_path, null_ctrl);
1487 val_phi->init_req(_null_path, uncasted_val);
1488 raw_mem_phi->init_req(_null_path, raw_mem);
1489 } else {
1490 region->del_req(_null_path);
1491 val_phi->del_req(_null_path);
1492 raw_mem_phi->del_req(_null_path);
1493 }
1494
1495 // Test for in-cset.
1496 // Wires !in_cset(obj) to slot 2 of region and phis
1497 Node* not_cset_ctrl = NULL;
1498 in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase);
1499 if (not_cset_ctrl != NULL) {
1500 if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0);
1501 region->init_req(_not_cset, not_cset_ctrl);
1502 val_phi->init_req(_not_cset, uncasted_val);
1503 raw_mem_phi->init_req(_not_cset, raw_mem);
1504 }
1505
1506 // Resolve object when orig-value is in cset.
1507 // Make the unconditional resolve for fwdptr.
1508 Node* new_val = uncasted_val;
1509 if (unc_ctrl != NULL) {
1510 // Clone the null check in this branch to allow implicit null check
1511 new_val = clone_null_check(ctrl, val, unc_ctrl, phase);
1512 fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase);
1513
1514 IfNode* iff = unc_ctrl->in(0)->as_If();
1515 phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1));
1516 }
1517 Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(oopDesc::mark_offset_in_bytes()));
1518 phase->register_new_node(addr, ctrl);
1519 assert(new_val->bottom_type()->isa_oopptr(), "what else?");
1520 Node* markword = new LoadXNode(ctrl, raw_mem, addr, TypeRawPtr::BOTTOM, TypeX_X, MemNode::unordered);
1521 phase->register_new_node(markword, ctrl);
1522
1523 // Test if object is forwarded. This is the case if lowest two bits are set.
1524 Node* masked = new AndXNode(markword, phase->igvn().MakeConX(markOopDesc::lock_mask_in_place));
1525 phase->register_new_node(masked, ctrl);
1526 Node* cmp = new CmpXNode(masked, phase->igvn().MakeConX(markOopDesc::marked_value));
1527 phase->register_new_node(cmp, ctrl);
1528
1529 // Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr
1530 Node* bol = new BoolNode(cmp, BoolTest::eq); // Equals 3 means it's forwarded
1531 phase->register_new_node(bol, ctrl);
1532
1533 IfNode* iff = new IfNode(ctrl, bol, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1534 phase->register_control(iff, loop, ctrl);
1535 Node* if_fwd = new IfTrueNode(iff);
1536 phase->register_control(if_fwd, loop, iff);
1537 Node* if_not_fwd = new IfFalseNode(iff);
1538 phase->register_control(if_not_fwd, loop, iff);
1539
1540 // Decode forward pointer: since we already have the lowest bits, we can just subtract them
1541 // from the mark word without the need for large immediate mask.
1542 Node* masked2 = new SubXNode(markword, masked);
1543 phase->register_new_node(masked2, if_fwd);
1544 Node* fwdraw = new CastX2PNode(masked2);
1545 fwdraw->init_req(0, if_fwd);
1546 phase->register_new_node(fwdraw, if_fwd);
1547 Node* fwd = new CheckCastPPNode(NULL, fwdraw, val->bottom_type());
1548 phase->register_new_node(fwd, if_fwd);
1549
1550 // Wire up not-equal-path in slots 3.
1551 region->init_req(_fwded, if_fwd);
1552 val_phi->init_req(_fwded, fwd);
1553 raw_mem_phi->init_req(_fwded, raw_mem);
1554
1555 // Call lrb-stub and wire up that path in slots 4
1556 Node* result_mem = NULL;
1557 ctrl = if_not_fwd;
1558 fwd = new_val;
1559 call_lrb_stub(ctrl, fwd, result_mem, raw_mem, phase);
1560 region->init_req(_evac_path, ctrl);
1561 val_phi->init_req(_evac_path, fwd);
1562 raw_mem_phi->init_req(_evac_path, result_mem);
1563
1564 phase->register_control(region, loop, heap_stable_iff);
1565 Node* out_val = val_phi;
1566 phase->register_new_node(val_phi, region);
1567 phase->register_new_node(raw_mem_phi, region);
1568
1569 fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1570
1571 ctrl = orig_ctrl;
1572
1573 if (unc != NULL) {
1574 for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
1575 Node* u = val->fast_out(i);
1576 Node* c = phase->ctrl_or_self(u);
1577 if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) {
1578 phase->igvn().rehash_node_delayed(u);
1579 int nb = u->replace_edge(val, out_val);
1580 --i, imax -= nb;
1581 }
1582 }
1583 if (val->outcnt() == 0) {
1584 phase->igvn()._worklist.push(val);
1585 }
1586 }
1587 phase->igvn().replace_node(lrb, out_val);
1588
1589 follow_barrier_uses(out_val, ctrl, uses, phase);
1590
1591 for(uint next = 0; next < uses.size(); next++ ) {
1592 Node *n = uses.at(next);
1593 assert(phase->get_ctrl(n) == ctrl, "bad control");
1594 assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1595 phase->set_ctrl(n, region);
1596 follow_barrier_uses(n, ctrl, uses, phase);
1597 }
1598
1599 // The slow path call produces memory: hook the raw memory phi
1600 // from the expanded load reference barrier with the rest of the graph
1601 // which may require adding memory phis at every post dominated
1602 // region and at enclosing loop heads. Use the memory state
1603 // collected in memory_nodes to fix the memory graph. Update that
1604 // memory state as we go.
1605 fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses);
1606 }
1607 // Done expanding load-reference-barriers.
1608 assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1609
1610 for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) {
1611 Node* barrier = state->enqueue_barrier(i);
1612 Node* pre_val = barrier->in(1);
1613
1614 if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) {
1615 ShouldNotReachHere();
1616 continue;
1617 }
1618
1619 Node* ctrl = phase->get_ctrl(barrier);
1620
1621 if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) {
1622 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0)->in(0), pre_val, ctrl->in(0), phase), "can't move");
1623 ctrl = ctrl->in(0)->in(0);
1624 phase->set_ctrl(barrier, ctrl);
1625 } else if (ctrl->is_CallRuntime()) {
1626 assert(is_dominator(phase->get_ctrl(pre_val), ctrl->in(0), pre_val, ctrl, phase), "can't move");
1627 ctrl = ctrl->in(0);
1628 phase->set_ctrl(barrier, ctrl);
1629 }
1630
1631 Node* init_ctrl = ctrl;
1632 IdealLoopTree* loop = phase->get_loop(ctrl);
1633 Node* raw_mem = fixer.find_mem(ctrl, barrier);
1634 Node* init_raw_mem = raw_mem;
1635 Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL);
1636 Node* heap_stable_ctrl = NULL;
1637 Node* null_ctrl = NULL;
1638 uint last = phase->C->unique();
1639
1640 enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT };
1641 Node* region = new RegionNode(PATH_LIMIT);
1642 Node* phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1643
1644 enum { _fast_path = 1, _slow_path, _null_path, PATH_LIMIT2 };
1645 Node* region2 = new RegionNode(PATH_LIMIT2);
1646 Node* phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM);
1647
1648 // Stable path.
1649 test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase);
1650 region->init_req(_heap_stable, heap_stable_ctrl);
1651 phi->init_req(_heap_stable, raw_mem);
1652
1653 // Null path
1654 Node* reg2_ctrl = NULL;
1655 test_null(ctrl, pre_val, null_ctrl, phase);
1656 if (null_ctrl != NULL) {
1657 reg2_ctrl = null_ctrl->in(0);
1658 region2->init_req(_null_path, null_ctrl);
1659 phi2->init_req(_null_path, raw_mem);
1660 } else {
1661 region2->del_req(_null_path);
1662 phi2->del_req(_null_path);
1663 }
1664
1665 const int index_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset());
1666 const int buffer_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset());
1667 Node* thread = new ThreadLocalNode();
1668 phase->register_new_node(thread, ctrl);
1669 Node* buffer_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(buffer_offset));
1670 phase->register_new_node(buffer_adr, ctrl);
1671 Node* index_adr = new AddPNode(phase->C->top(), thread, phase->igvn().MakeConX(index_offset));
1672 phase->register_new_node(index_adr, ctrl);
1673
1674 BasicType index_bt = TypeX_X->basic_type();
1675 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
1676 const TypePtr* adr_type = TypeRawPtr::BOTTOM;
1677 Node* index = new LoadXNode(ctrl, raw_mem, index_adr, adr_type, TypeX_X, MemNode::unordered);
1678 phase->register_new_node(index, ctrl);
1679 Node* index_cmp = new CmpXNode(index, phase->igvn().MakeConX(0));
1680 phase->register_new_node(index_cmp, ctrl);
1681 Node* index_test = new BoolNode(index_cmp, BoolTest::ne);
1682 phase->register_new_node(index_test, ctrl);
1683 IfNode* queue_full_iff = new IfNode(ctrl, index_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
1684 if (reg2_ctrl == NULL) reg2_ctrl = queue_full_iff;
1685 phase->register_control(queue_full_iff, loop, ctrl);
1686 Node* not_full = new IfTrueNode(queue_full_iff);
1687 phase->register_control(not_full, loop, queue_full_iff);
1688 Node* full = new IfFalseNode(queue_full_iff);
1689 phase->register_control(full, loop, queue_full_iff);
1690
1691 ctrl = not_full;
1692
1693 Node* next_index = new SubXNode(index, phase->igvn().MakeConX(sizeof(intptr_t)));
1694 phase->register_new_node(next_index, ctrl);
1695
1696 Node* buffer = new LoadPNode(ctrl, raw_mem, buffer_adr, adr_type, TypeRawPtr::NOTNULL, MemNode::unordered);
1697 phase->register_new_node(buffer, ctrl);
1698 Node *log_addr = new AddPNode(phase->C->top(), buffer, next_index);
1699 phase->register_new_node(log_addr, ctrl);
1700 Node* log_store = new StorePNode(ctrl, raw_mem, log_addr, adr_type, pre_val, MemNode::unordered);
1701 phase->register_new_node(log_store, ctrl);
1702 // update the index
1703 Node* index_update = new StoreXNode(ctrl, log_store, index_adr, adr_type, next_index, MemNode::unordered);
1704 phase->register_new_node(index_update, ctrl);
1705
1706 // Fast-path case
1707 region2->init_req(_fast_path, ctrl);
1708 phi2->init_req(_fast_path, index_update);
1709
1710 ctrl = full;
1711
1712 Node* base = find_bottom_mem(ctrl, phase);
1713
1714 MergeMemNode* mm = MergeMemNode::make(base);
1715 mm->set_memory_at(Compile::AliasIdxRaw, raw_mem);
1716 phase->register_new_node(mm, ctrl);
1717
1718 Node* call = new CallLeafNode(ShenandoahBarrierSetC2::write_ref_field_pre_entry_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), "shenandoah_wb_pre", TypeRawPtr::BOTTOM);
1719 call->init_req(TypeFunc::Control, ctrl);
1720 call->init_req(TypeFunc::I_O, phase->C->top());
1721 call->init_req(TypeFunc::Memory, mm);
1722 call->init_req(TypeFunc::FramePtr, phase->C->top());
1723 call->init_req(TypeFunc::ReturnAdr, phase->C->top());
1724 call->init_req(TypeFunc::Parms, pre_val);
1725 call->init_req(TypeFunc::Parms+1, thread);
1726 phase->register_control(call, loop, ctrl);
1727
1728 Node* ctrl_proj = new ProjNode(call, TypeFunc::Control);
1729 phase->register_control(ctrl_proj, loop, call);
1730 Node* mem_proj = new ProjNode(call, TypeFunc::Memory);
1731 phase->register_new_node(mem_proj, call);
1732
1733 // Slow-path case
1734 region2->init_req(_slow_path, ctrl_proj);
1735 phi2->init_req(_slow_path, mem_proj);
1736
1737 phase->register_control(region2, loop, reg2_ctrl);
1738 phase->register_new_node(phi2, region2);
1739
1740 region->init_req(_heap_unstable, region2);
1741 phi->init_req(_heap_unstable, phi2);
1742
1743 phase->register_control(region, loop, heap_stable_ctrl->in(0));
1744 phase->register_new_node(phi, region);
1745
1746 fix_ctrl(barrier, region, fixer, uses, uses_to_ignore, last, phase);
1747 for(uint next = 0; next < uses.size(); next++ ) {
1748 Node *n = uses.at(next);
1749 assert(phase->get_ctrl(n) == init_ctrl, "bad control");
1750 assert(n != init_raw_mem, "should leave input raw mem above the barrier");
1751 phase->set_ctrl(n, region);
1752 follow_barrier_uses(n, init_ctrl, uses, phase);
1753 }
1754 fixer.fix_mem(init_ctrl, region, init_raw_mem, raw_mem_for_ctrl, phi, uses);
1755
1756 phase->igvn().replace_node(barrier, pre_val);
1757 }
1758 assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced");
1759
1760}
1761
1762void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1763 IdealLoopTree *loop = phase->get_loop(iff);
1764 Node* loop_head = loop->_head;
1765 Node* entry_c = loop_head->in(LoopNode::EntryControl);
1766
1767 Node* bol = iff->in(1);
1768 Node* cmp = bol->in(1);
1769 Node* andi = cmp->in(1);
1770 Node* load = andi->in(1);
1771
1772 assert(is_gc_state_load(load), "broken");
1773 if (!phase->is_dominator(load->in(0), entry_c)) {
1774 Node* mem_ctrl = NULL;
1775 Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1776 load = load->clone();
1777 load->set_req(MemNode::Memory, mem);
1778 load->set_req(0, entry_c);
1779 phase->register_new_node(load, entry_c);
1780 andi = andi->clone();
1781 andi->set_req(1, load);
1782 phase->register_new_node(andi, entry_c);
1783 cmp = cmp->clone();
1784 cmp->set_req(1, andi);
1785 phase->register_new_node(cmp, entry_c);
1786 bol = bol->clone();
1787 bol->set_req(1, cmp);
1788 phase->register_new_node(bol, entry_c);
1789
1790 Node* old_bol =iff->in(1);
1791 phase->igvn().replace_input_of(iff, 1, bol);
1792 }
1793}
1794
1795bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1796 if (!n->is_If() || n->is_CountedLoopEnd()) {
1797 return false;
1798 }
1799 Node* region = n->in(0);
1800
1801 if (!region->is_Region()) {
1802 return false;
1803 }
1804 Node* dom = phase->idom(region);
1805 if (!dom->is_If()) {
1806 return false;
1807 }
1808
1809 if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1810 return false;
1811 }
1812
1813 IfNode* dom_if = dom->as_If();
1814 Node* proj_true = dom_if->proj_out(1);
1815 Node* proj_false = dom_if->proj_out(0);
1816
1817 for (uint i = 1; i < region->req(); i++) {
1818 if (phase->is_dominator(proj_true, region->in(i))) {
1819 continue;
1820 }
1821 if (phase->is_dominator(proj_false, region->in(i))) {
1822 continue;
1823 }
1824 return false;
1825 }
1826
1827 return true;
1828}
1829
1830void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1831 assert(is_heap_stable_test(n), "no other tests");
1832 if (identical_backtoback_ifs(n, phase)) {
1833 Node* n_ctrl = n->in(0);
1834 if (phase->can_split_if(n_ctrl)) {
1835 IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1836 if (is_heap_stable_test(n)) {
1837 Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1838 assert(is_gc_state_load(gc_state_load), "broken");
1839 Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1840 assert(is_gc_state_load(dom_gc_state_load), "broken");
1841 if (gc_state_load != dom_gc_state_load) {
1842 phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1843 }
1844 }
1845 PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1846 Node* proj_true = dom_if->proj_out(1);
1847 Node* proj_false = dom_if->proj_out(0);
1848 Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1849 Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1850
1851 for (uint i = 1; i < n_ctrl->req(); i++) {
1852 if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1853 bolphi->init_req(i, con_true);
1854 } else {
1855 assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1856 bolphi->init_req(i, con_false);
1857 }
1858 }
1859 phase->register_new_node(bolphi, n_ctrl);
1860 phase->igvn().replace_input_of(n, 1, bolphi);
1861 phase->do_split_if(n);
1862 }
1863 }
1864}
1865
1866IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1867 // Find first invariant test that doesn't exit the loop
1868 LoopNode *head = loop->_head->as_Loop();
1869 IfNode* unswitch_iff = NULL;
1870 Node* n = head->in(LoopNode::LoopBackControl);
1871 int loop_has_sfpts = -1;
1872 while (n != head) {
1873 Node* n_dom = phase->idom(n);
1874 if (n->is_Region()) {
1875 if (n_dom->is_If()) {
1876 IfNode* iff = n_dom->as_If();
1877 if (iff->in(1)->is_Bool()) {
1878 BoolNode* bol = iff->in(1)->as_Bool();
1879 if (bol->in(1)->is_Cmp()) {
1880 // If condition is invariant and not a loop exit,
1881 // then found reason to unswitch.
1882 if (is_heap_stable_test(iff) &&
1883 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1884 assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1885 if (loop_has_sfpts == -1) {
1886 for(uint i = 0; i < loop->_body.size(); i++) {
1887 Node *m = loop->_body[i];
1888 if (m->is_SafePoint() && !m->is_CallLeaf()) {
1889 loop_has_sfpts = 1;
1890 break;
1891 }
1892 }
1893 if (loop_has_sfpts == -1) {
1894 loop_has_sfpts = 0;
1895 }
1896 }
1897 if (!loop_has_sfpts) {
1898 unswitch_iff = iff;
1899 }
1900 }
1901 }
1902 }
1903 }
1904 }
1905 n = n_dom;
1906 }
1907 return unswitch_iff;
1908}
1909
1910
1911void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1912 Node_List heap_stable_tests;
1913 Node_List gc_state_loads;
1914 stack.push(phase->C->start(), 0);
1915 do {
1916 Node* n = stack.node();
1917 uint i = stack.index();
1918
1919 if (i < n->outcnt()) {
1920 Node* u = n->raw_out(i);
1921 stack.set_index(i+1);
1922 if (!visited.test_set(u->_idx)) {
1923 stack.push(u, 0);
1924 }
1925 } else {
1926 stack.pop();
1927 if (ShenandoahCommonGCStateLoads && is_gc_state_load(n)) {
1928 gc_state_loads.push(n);
1929 }
1930 if (n->is_If() && is_heap_stable_test(n)) {
1931 heap_stable_tests.push(n);
1932 }
1933 }
1934 } while (stack.size() > 0);
1935
1936 bool progress;
1937 do {
1938 progress = false;
1939 for (uint i = 0; i < gc_state_loads.size(); i++) {
1940 Node* n = gc_state_loads.at(i);
1941 if (n->outcnt() != 0) {
1942 progress |= try_common_gc_state_load(n, phase);
1943 }
1944 }
1945 } while (progress);
1946
1947 for (uint i = 0; i < heap_stable_tests.size(); i++) {
1948 Node* n = heap_stable_tests.at(i);
1949 assert(is_heap_stable_test(n), "only evacuation test");
1950 merge_back_to_back_tests(n, phase);
1951 }
1952
1953 if (!phase->C->major_progress()) {
1954 VectorSet seen(Thread::current()->resource_area());
1955 for (uint i = 0; i < heap_stable_tests.size(); i++) {
1956 Node* n = heap_stable_tests.at(i);
1957 IdealLoopTree* loop = phase->get_loop(n);
1958 if (loop != phase->ltree_root() &&
1959 loop->_child == NULL &&
1960 !loop->_irreducible) {
1961 LoopNode* head = loop->_head->as_Loop();
1962 if ((!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1963 !seen.test_set(head->_idx)) {
1964 IfNode* iff = find_unswitching_candidate(loop, phase);
1965 if (iff != NULL) {
1966 Node* bol = iff->in(1);
1967 if (head->is_strip_mined()) {
1968 head->verify_strip_mined(0);
1969 }
1970 move_heap_stable_test_out_of_loop(iff, phase);
1971
1972 AutoNodeBudget node_budget(phase);
1973
1974 if (loop->policy_unswitching(phase)) {
1975 if (head->is_strip_mined()) {
1976 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1977 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1978 }
1979 phase->do_unswitching(loop, old_new);
1980 } else {
1981 // Not proceeding with unswitching. Move load back in
1982 // the loop.
1983 phase->igvn().replace_input_of(iff, 1, bol);
1984 }
1985 }
1986 }
1987 }
1988 }
1989 }
1990}
1991
1992#ifdef ASSERT
1993void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) {
1994 const bool trace = false;
1995 ResourceMark rm;
1996 Unique_Node_List nodes;
1997 Unique_Node_List controls;
1998 Unique_Node_List memories;
1999
2000 nodes.push(root);
2001 for (uint next = 0; next < nodes.size(); next++) {
2002 Node *n = nodes.at(next);
2003 if (ShenandoahBarrierSetC2::is_shenandoah_lrb_call(n)) {
2004 controls.push(n);
2005 if (trace) { tty->print("XXXXXX verifying"); n->dump(); }
2006 for (uint next2 = 0; next2 < controls.size(); next2++) {
2007 Node *m = controls.at(next2);
2008 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2009 Node* u = m->fast_out(i);
2010 if (u->is_CFG() && !u->is_Root() &&
2011 !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1) &&
2012 !(u->is_Region() && u->unique_ctrl_out()->Opcode() == Op_Halt)) {
2013 if (trace) { tty->print("XXXXXX pushing control"); u->dump(); }
2014 controls.push(u);
2015 }
2016 }
2017 }
2018 memories.push(n->as_Call()->proj_out(TypeFunc::Memory));
2019 for (uint next2 = 0; next2 < memories.size(); next2++) {
2020 Node *m = memories.at(next2);
2021 assert(m->bottom_type() == Type::MEMORY, "");
2022 for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
2023 Node* u = m->fast_out(i);
2024 if (u->bottom_type() == Type::MEMORY && (u->is_Mem() || u->is_ClearArray())) {
2025 if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2026 memories.push(u);
2027 } else if (u->is_LoadStore()) {
2028 if (trace) { tty->print("XXXXXX pushing memory"); u->find_out_with(Op_SCMemProj)->dump(); }
2029 memories.push(u->find_out_with(Op_SCMemProj));
2030 } else if (u->is_MergeMem() && u->as_MergeMem()->memory_at(Compile::AliasIdxRaw) == m) {
2031 if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2032 memories.push(u);
2033 } else if (u->is_Phi()) {
2034 assert(u->bottom_type() == Type::MEMORY, "");
2035 if (u->adr_type() == TypeRawPtr::BOTTOM || u->adr_type() == TypePtr::BOTTOM) {
2036 assert(controls.member(u->in(0)), "");
2037 if (trace) { tty->print("XXXXXX pushing memory"); u->dump(); }
2038 memories.push(u);
2039 }
2040 } else if (u->is_SafePoint() || u->is_MemBar()) {
2041 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2042 Node* uu = u->fast_out(j);
2043 if (uu->bottom_type() == Type::MEMORY) {
2044 if (trace) { tty->print("XXXXXX pushing memory"); uu->dump(); }
2045 memories.push(uu);
2046 }
2047 }
2048 }
2049 }
2050 }
2051 for (uint next2 = 0; next2 < controls.size(); next2++) {
2052 Node *m = controls.at(next2);
2053 if (m->is_Region()) {
2054 bool all_in = true;
2055 for (uint i = 1; i < m->req(); i++) {
2056 if (!controls.member(m->in(i))) {
2057 all_in = false;
2058 break;
2059 }
2060 }
2061 if (trace) { tty->print("XXX verifying %s", all_in ? "all in" : ""); m->dump(); }
2062 bool found_phi = false;
2063 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax && !found_phi; j++) {
2064 Node* u = m->fast_out(j);
2065 if (u->is_Phi() && memories.member(u)) {
2066 found_phi = true;
2067 for (uint i = 1; i < u->req() && found_phi; i++) {
2068 Node* k = u->in(i);
2069 if (memories.member(k) != controls.member(m->in(i))) {
2070 found_phi = false;
2071 }
2072 }
2073 }
2074 }
2075 assert(found_phi || all_in, "");
2076 }
2077 }
2078 controls.clear();
2079 memories.clear();
2080 }
2081 for( uint i = 0; i < n->len(); ++i ) {
2082 Node *m = n->in(i);
2083 if (m != NULL) {
2084 nodes.push(m);
2085 }
2086 }
2087 }
2088}
2089#endif
2090
2091ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) {
2092 ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this);
2093}
2094
2095const Type* ShenandoahEnqueueBarrierNode::bottom_type() const {
2096 if (in(1) == NULL || in(1)->is_top()) {
2097 return Type::TOP;
2098 }
2099 const Type* t = in(1)->bottom_type();
2100 if (t == TypePtr::NULL_PTR) {
2101 return t;
2102 }
2103 return t->is_oopptr()->cast_to_nonconst();
2104}
2105
2106const Type* ShenandoahEnqueueBarrierNode::Value(PhaseGVN* phase) const {
2107 if (in(1) == NULL) {
2108 return Type::TOP;
2109 }
2110 const Type* t = phase->type(in(1));
2111 if (t == Type::TOP) {
2112 return Type::TOP;
2113 }
2114 if (t == TypePtr::NULL_PTR) {
2115 return t;
2116 }
2117 return t->is_oopptr()->cast_to_nonconst();
2118}
2119
2120int ShenandoahEnqueueBarrierNode::needed(Node* n) {
2121 if (n == NULL ||
2122 n->is_Allocate() ||
2123 n->Opcode() == Op_ShenandoahEnqueueBarrier ||
2124 n->bottom_type() == TypePtr::NULL_PTR ||
2125 (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) {
2126 return NotNeeded;
2127 }
2128 if (n->is_Phi() ||
2129 n->is_CMove()) {
2130 return MaybeNeeded;
2131 }
2132 return Needed;
2133}
2134
2135Node* ShenandoahEnqueueBarrierNode::next(Node* n) {
2136 for (;;) {
2137 if (n == NULL) {
2138 return n;
2139 } else if (n->bottom_type() == TypePtr::NULL_PTR) {
2140 return n;
2141 } else if (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL) {
2142 return n;
2143 } else if (n->is_ConstraintCast() ||
2144 n->Opcode() == Op_DecodeN ||
2145 n->Opcode() == Op_EncodeP) {
2146 n = n->in(1);
2147 } else if (n->is_Proj()) {
2148 n = n->in(0);
2149 } else {
2150 return n;
2151 }
2152 }
2153 ShouldNotReachHere();
2154 return NULL;
2155}
2156
2157Node* ShenandoahEnqueueBarrierNode::Identity(PhaseGVN* phase) {
2158 PhaseIterGVN* igvn = phase->is_IterGVN();
2159
2160 Node* n = next(in(1));
2161
2162 int cont = needed(n);
2163
2164 if (cont == NotNeeded) {
2165 return in(1);
2166 } else if (cont == MaybeNeeded) {
2167 if (igvn == NULL) {
2168 phase->record_for_igvn(this);
2169 return this;
2170 } else {
2171 ResourceMark rm;
2172 Unique_Node_List wq;
2173 uint wq_i = 0;
2174
2175 for (;;) {
2176 if (n->is_Phi()) {
2177 for (uint i = 1; i < n->req(); i++) {
2178 Node* m = n->in(i);
2179 if (m != NULL) {
2180 wq.push(m);
2181 }
2182 }
2183 } else {
2184 assert(n->is_CMove(), "nothing else here");
2185 Node* m = n->in(CMoveNode::IfFalse);
2186 wq.push(m);
2187 m = n->in(CMoveNode::IfTrue);
2188 wq.push(m);
2189 }
2190 Node* orig_n = NULL;
2191 do {
2192 if (wq_i >= wq.size()) {
2193 return in(1);
2194 }
2195 n = wq.at(wq_i);
2196 wq_i++;
2197 orig_n = n;
2198 n = next(n);
2199 cont = needed(n);
2200 if (cont == Needed) {
2201 return this;
2202 }
2203 } while (cont != MaybeNeeded || (orig_n != n && wq.member(n)));
2204 }
2205 }
2206 }
2207
2208 return this;
2209}
2210
2211#ifdef ASSERT
2212static bool has_never_branch(Node* root) {
2213 for (uint i = 1; i < root->req(); i++) {
2214 Node* in = root->in(i);
2215 if (in != NULL && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->Opcode() == Op_NeverBranch) {
2216 return true;
2217 }
2218 }
2219 return false;
2220}
2221#endif
2222
2223void MemoryGraphFixer::collect_memory_nodes() {
2224 Node_Stack stack(0);
2225 VectorSet visited(Thread::current()->resource_area());
2226 Node_List regions;
2227
2228 // Walk the raw memory graph and create a mapping from CFG node to
2229 // memory node. Exclude phis for now.
2230 stack.push(_phase->C->root(), 1);
2231 do {
2232 Node* n = stack.node();
2233 int opc = n->Opcode();
2234 uint i = stack.index();
2235 if (i < n->req()) {
2236 Node* mem = NULL;
2237 if (opc == Op_Root) {
2238 Node* in = n->in(i);
2239 int in_opc = in->Opcode();
2240 if (in_opc == Op_Return || in_opc == Op_Rethrow) {
2241 mem = in->in(TypeFunc::Memory);
2242 } else if (in_opc == Op_Halt) {
2243 if (!in->in(0)->is_Region()) {
2244 Node* proj = in->in(0);
2245 assert(proj->is_Proj(), "");
2246 Node* in = proj->in(0);
2247 assert(in->is_CallStaticJava() || in->Opcode() == Op_NeverBranch || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
2248 if (in->is_CallStaticJava()) {
2249 mem = in->in(TypeFunc::Memory);
2250 } else if (in->Opcode() == Op_Catch) {
2251 Node* call = in->in(0)->in(0);
2252 assert(call->is_Call(), "");
2253 mem = call->in(TypeFunc::Memory);
2254 } else if (in->Opcode() == Op_NeverBranch) {
2255 ResourceMark rm;
2256 Unique_Node_List wq;
2257 wq.push(in);
2258 wq.push(in->as_Multi()->proj_out(0));
2259 for (uint j = 1; j < wq.size(); j++) {
2260 Node* c = wq.at(j);
2261 assert(!c->is_Root(), "shouldn't leave loop");
2262 if (c->is_SafePoint()) {
2263 assert(mem == NULL, "only one safepoint");
2264 mem = c->in(TypeFunc::Memory);
2265 }
2266 for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) {
2267 Node* u = c->fast_out(k);
2268 if (u->is_CFG()) {
2269 wq.push(u);
2270 }
2271 }
2272 }
2273 assert(mem != NULL, "should have found safepoint");
2274 }
2275 }
2276 } else {
2277#ifdef ASSERT
2278 n->dump();
2279 in->dump();
2280#endif
2281 ShouldNotReachHere();
2282 }
2283 } else {
2284 assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
2285 assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
2286 mem = n->in(i);
2287 }
2288 i++;
2289 stack.set_index(i);
2290 if (mem == NULL) {
2291 continue;
2292 }
2293 for (;;) {
2294 if (visited.test_set(mem->_idx) || mem->is_Start()) {
2295 break;
2296 }
2297 if (mem->is_Phi()) {
2298 stack.push(mem, 2);
2299 mem = mem->in(1);
2300 } else if (mem->is_Proj()) {
2301 stack.push(mem, mem->req());
2302 mem = mem->in(0);
2303 } else if (mem->is_SafePoint() || mem->is_MemBar()) {
2304 mem = mem->in(TypeFunc::Memory);
2305 } else if (mem->is_MergeMem()) {
2306 MergeMemNode* mm = mem->as_MergeMem();
2307 mem = mm->memory_at(_alias);
2308 } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
2309 assert(_alias == Compile::AliasIdxRaw, "");
2310 stack.push(mem, mem->req());
2311 mem = mem->in(MemNode::Memory);
2312 } else {
2313#ifdef ASSERT
2314 mem->dump();
2315#endif
2316 ShouldNotReachHere();
2317 }
2318 }
2319 } else {
2320 if (n->is_Phi()) {
2321 // Nothing
2322 } else if (!n->is_Root()) {
2323 Node* c = get_ctrl(n);
2324 _memory_nodes.map(c->_idx, n);
2325 }
2326 stack.pop();
2327 }
2328 } while(stack.is_nonempty());
2329
2330 // Iterate over CFG nodes in rpo and propagate memory state to
2331 // compute memory state at regions, creating new phis if needed.
2332 Node_List rpo_list;
2333 visited.Clear();
2334 _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
2335 Node* root = rpo_list.pop();
2336 assert(root == _phase->C->root(), "");
2337
2338 const bool trace = false;
2339#ifdef ASSERT
2340 if (trace) {
2341 for (int i = rpo_list.size() - 1; i >= 0; i--) {
2342 Node* c = rpo_list.at(i);
2343 if (_memory_nodes[c->_idx] != NULL) {
2344 tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump();
2345 }
2346 }
2347 }
2348#endif
2349 uint last = _phase->C->unique();
2350
2351#ifdef ASSERT
2352 uint8_t max_depth = 0;
2353 for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
2354 IdealLoopTree* lpt = iter.current();
2355 max_depth = MAX2(max_depth, lpt->_nest);
2356 }
2357#endif
2358
2359 bool progress = true;
2360 int iteration = 0;
2361 Node_List dead_phis;
2362 while (progress) {
2363 progress = false;
2364 iteration++;
2365 assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
2366 if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
2367 IdealLoopTree* last_updated_ilt = NULL;
2368 for (int i = rpo_list.size() - 1; i >= 0; i--) {
2369 Node* c = rpo_list.at(i);
2370
2371 Node* prev_mem = _memory_nodes[c->_idx];
2372 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2373 Node* prev_region = regions[c->_idx];
2374 Node* unique = NULL;
2375 for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
2376 Node* m = _memory_nodes[c->in(j)->_idx];
2377 assert(m != NULL || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
2378 if (m != NULL) {
2379 if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
2380 assert(c->is_Loop() && j == LoopNode::LoopBackControl || _phase->C->has_irreducible_loop(), "");
2381 // continue
2382 } else if (unique == NULL) {
2383 unique = m;
2384 } else if (m == unique) {
2385 // continue
2386 } else {
2387 unique = NodeSentinel;
2388 }
2389 }
2390 }
2391 assert(unique != NULL, "empty phi???");
2392 if (unique != NodeSentinel) {
2393 if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c) {
2394 dead_phis.push(prev_region);
2395 }
2396 regions.map(c->_idx, unique);
2397 } else {
2398 Node* phi = NULL;
2399 if (prev_region != NULL && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
2400 phi = prev_region;
2401 for (uint k = 1; k < c->req(); k++) {
2402 Node* m = _memory_nodes[c->in(k)->_idx];
2403 assert(m != NULL, "expect memory state");
2404 phi->set_req(k, m);
2405 }
2406 } else {
2407 for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == NULL; j++) {
2408 Node* u = c->fast_out(j);
2409 if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2410 (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
2411 phi = u;
2412 for (uint k = 1; k < c->req() && phi != NULL; k++) {
2413 Node* m = _memory_nodes[c->in(k)->_idx];
2414 assert(m != NULL, "expect memory state");
2415 if (u->in(k) != m) {
2416 phi = NULL;
2417 }
2418 }
2419 }
2420 }
2421 if (phi == NULL) {
2422 phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
2423 for (uint k = 1; k < c->req(); k++) {
2424 Node* m = _memory_nodes[c->in(k)->_idx];
2425 assert(m != NULL, "expect memory state");
2426 phi->init_req(k, m);
2427 }
2428 }
2429 }
2430 assert(phi != NULL, "");
2431 regions.map(c->_idx, phi);
2432 }
2433 Node* current_region = regions[c->_idx];
2434 if (current_region != prev_region) {
2435 progress = true;
2436 if (prev_region == prev_mem) {
2437 _memory_nodes.map(c->_idx, current_region);
2438 }
2439 }
2440 } else if (prev_mem == NULL || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
2441 Node* m = _memory_nodes[_phase->idom(c)->_idx];
2442 assert(m != NULL, "expect memory state");
2443 if (m != prev_mem) {
2444 _memory_nodes.map(c->_idx, m);
2445 progress = true;
2446 }
2447 }
2448#ifdef ASSERT
2449 if (trace) { tty->print("X %d", c->_idx); _memory_nodes[c->_idx]->dump(); }
2450#endif
2451 }
2452 }
2453
2454 // Replace existing phi with computed memory state for that region
2455 // if different (could be a new phi or a dominating memory node if
2456 // that phi was found to be useless).
2457 while (dead_phis.size() > 0) {
2458 Node* n = dead_phis.pop();
2459 n->replace_by(_phase->C->top());
2460 n->destruct();
2461 }
2462 for (int i = rpo_list.size() - 1; i >= 0; i--) {
2463 Node* c = rpo_list.at(i);
2464 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2465 Node* n = regions[c->_idx];
2466 if (n->is_Phi() && n->_idx >= last && n->in(0) == c) {
2467 _phase->register_new_node(n, c);
2468 }
2469 }
2470 }
2471 for (int i = rpo_list.size() - 1; i >= 0; i--) {
2472 Node* c = rpo_list.at(i);
2473 if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
2474 Node* n = regions[c->_idx];
2475 for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
2476 Node* u = c->fast_out(i);
2477 if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
2478 u != n) {
2479 if (u->adr_type() == TypePtr::BOTTOM) {
2480 fix_memory_uses(u, n, n, c);
2481 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2482 _phase->lazy_replace(u, n);
2483 --i; --imax;
2484 }
2485 }
2486 }
2487 }
2488 }
2489}
2490
2491Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2492 Node* c = _phase->get_ctrl(n);
2493 if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Call()) {
2494 assert(c == n->in(0), "");
2495 CallNode* call = c->as_Call();
2496 CallProjections projs;
2497 call->extract_projections(&projs, true, false);
2498 if (projs.catchall_memproj != NULL) {
2499 if (projs.fallthrough_memproj == n) {
2500 c = projs.fallthrough_catchproj;
2501 } else {
2502 assert(projs.catchall_memproj == n, "");
2503 c = projs.catchall_catchproj;
2504 }
2505 }
2506 }
2507 return c;
2508}
2509
2510Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2511 if (_phase->has_ctrl(n))
2512 return get_ctrl(n);
2513 else {
2514 assert (n->is_CFG(), "must be a CFG node");
2515 return n;
2516 }
2517}
2518
2519bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2520 return m != NULL && get_ctrl(m) == c;
2521}
2522
2523Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2524 assert(n == NULL || _phase->ctrl_or_self(n) == ctrl, "");
2525 Node* mem = _memory_nodes[ctrl->_idx];
2526 Node* c = ctrl;
2527 while (!mem_is_valid(mem, c) &&
2528 (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2529 c = _phase->idom(c);
2530 mem = _memory_nodes[c->_idx];
2531 }
2532 if (n != NULL && mem_is_valid(mem, c)) {
2533 while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2534 mem = next_mem(mem, _alias);
2535 }
2536 if (mem->is_MergeMem()) {
2537 mem = mem->as_MergeMem()->memory_at(_alias);
2538 }
2539 if (!mem_is_valid(mem, c)) {
2540 do {
2541 c = _phase->idom(c);
2542 mem = _memory_nodes[c->_idx];
2543 } while (!mem_is_valid(mem, c) &&
2544 (!c->is_CatchProj() || mem == NULL || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2545 }
2546 }
2547 assert(mem->bottom_type() == Type::MEMORY, "");
2548 return mem;
2549}
2550
2551bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2552 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2553 Node* use = region->fast_out(i);
2554 if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2555 (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2556 return true;
2557 }
2558 }
2559 return false;
2560}
2561
2562void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2563 assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2564 const bool trace = false;
2565 DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2566 DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2567 GrowableArray<Node*> phis;
2568 if (mem_for_ctrl != mem) {
2569 Node* old = mem_for_ctrl;
2570 Node* prev = NULL;
2571 while (old != mem) {
2572 prev = old;
2573 if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2574 assert(_alias == Compile::AliasIdxRaw, "");
2575 old = old->in(MemNode::Memory);
2576 } else if (old->Opcode() == Op_SCMemProj) {
2577 assert(_alias == Compile::AliasIdxRaw, "");
2578 old = old->in(0);
2579 } else {
2580 ShouldNotReachHere();
2581 }
2582 }
2583 assert(prev != NULL, "");
2584 if (new_ctrl != ctrl) {
2585 _memory_nodes.map(ctrl->_idx, mem);
2586 _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2587 }
2588 uint input = (uint)MemNode::Memory;
2589 _phase->igvn().replace_input_of(prev, input, new_mem);
2590 } else {
2591 uses.clear();
2592 _memory_nodes.map(new_ctrl->_idx, new_mem);
2593 uses.push(new_ctrl);
2594 for(uint next = 0; next < uses.size(); next++ ) {
2595 Node *n = uses.at(next);
2596 assert(n->is_CFG(), "");
2597 DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2598 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2599 Node* u = n->fast_out(i);
2600 if (!u->is_Root() && u->is_CFG() && u != n) {
2601 Node* m = _memory_nodes[u->_idx];
2602 if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2603 !has_mem_phi(u) &&
2604 u->unique_ctrl_out()->Opcode() != Op_Halt) {
2605 DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2606 DEBUG_ONLY(if (trace && m != NULL) { tty->print("ZZZ mem"); m->dump(); });
2607
2608 if (!mem_is_valid(m, u) || !m->is_Phi()) {
2609 bool push = true;
2610 bool create_phi = true;
2611 if (_phase->is_dominator(new_ctrl, u)) {
2612 create_phi = false;
2613 } else if (!_phase->C->has_irreducible_loop()) {
2614 IdealLoopTree* loop = _phase->get_loop(ctrl);
2615 bool do_check = true;
2616 IdealLoopTree* l = loop;
2617 create_phi = false;
2618 while (l != _phase->ltree_root()) {
2619 Node* head = l->_head;
2620 if (head->in(0) == NULL) {
2621 head = _phase->get_ctrl(head);
2622 }
2623 if (_phase->is_dominator(head, u) && _phase->is_dominator(_phase->idom(u), head)) {
2624 create_phi = true;
2625 do_check = false;
2626 break;
2627 }
2628 l = l->_parent;
2629 }
2630
2631 if (do_check) {
2632 assert(!create_phi, "");
2633 IdealLoopTree* u_loop = _phase->get_loop(u);
2634 if (u_loop != _phase->ltree_root() && u_loop->is_member(loop)) {
2635 Node* c = ctrl;
2636 while (!_phase->is_dominator(c, u_loop->tail())) {
2637 c = _phase->idom(c);
2638 }
2639 if (!_phase->is_dominator(c, u)) {
2640 do_check = false;
2641 }
2642 }
2643 }
2644
2645 if (do_check && _phase->is_dominator(_phase->idom(u), new_ctrl)) {
2646 create_phi = true;
2647 }
2648 }
2649 if (create_phi) {
2650 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2651 _phase->register_new_node(phi, u);
2652 phis.push(phi);
2653 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2654 if (!mem_is_valid(m, u)) {
2655 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2656 _memory_nodes.map(u->_idx, phi);
2657 } else {
2658 DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2659 for (;;) {
2660 assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2661 Node* next = NULL;
2662 if (m->is_Proj()) {
2663 next = m->in(0);
2664 } else {
2665 assert(m->is_Mem() || m->is_LoadStore(), "");
2666 assert(_alias == Compile::AliasIdxRaw, "");
2667 next = m->in(MemNode::Memory);
2668 }
2669 if (_phase->get_ctrl(next) != u) {
2670 break;
2671 }
2672 if (next->is_MergeMem()) {
2673 assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2674 break;
2675 }
2676 if (next->is_Phi()) {
2677 assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2678 break;
2679 }
2680 m = next;
2681 }
2682
2683 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2684 assert(m->is_Mem() || m->is_LoadStore(), "");
2685 uint input = (uint)MemNode::Memory;
2686 _phase->igvn().replace_input_of(m, input, phi);
2687 push = false;
2688 }
2689 } else {
2690 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2691 }
2692 if (push) {
2693 uses.push(u);
2694 }
2695 }
2696 } else if (!mem_is_valid(m, u) &&
2697 !(u->Opcode() == Op_CProj && u->in(0)->Opcode() == Op_NeverBranch && u->as_Proj()->_con == 1)) {
2698 uses.push(u);
2699 }
2700 }
2701 }
2702 }
2703 for (int i = 0; i < phis.length(); i++) {
2704 Node* n = phis.at(i);
2705 Node* r = n->in(0);
2706 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2707 for (uint j = 1; j < n->req(); j++) {
2708 Node* m = find_mem(r->in(j), NULL);
2709 _phase->igvn().replace_input_of(n, j, m);
2710 DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2711 }
2712 }
2713 }
2714 uint last = _phase->C->unique();
2715 MergeMemNode* mm = NULL;
2716 int alias = _alias;
2717 DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2718 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2719 Node* u = mem->out(i);
2720 if (u->_idx < last) {
2721 if (u->is_Mem()) {
2722 if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2723 Node* m = find_mem(_phase->get_ctrl(u), u);
2724 if (m != mem) {
2725 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2726 _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2727 --i;
2728 }
2729 }
2730 } else if (u->is_MergeMem()) {
2731 MergeMemNode* u_mm = u->as_MergeMem();
2732 if (u_mm->memory_at(alias) == mem) {
2733 MergeMemNode* newmm = NULL;
2734 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2735 Node* uu = u->fast_out(j);
2736 assert(!uu->is_MergeMem(), "chain of MergeMems?");
2737 if (uu->is_Phi()) {
2738 assert(uu->adr_type() == TypePtr::BOTTOM, "");
2739 Node* region = uu->in(0);
2740 int nb = 0;
2741 for (uint k = 1; k < uu->req(); k++) {
2742 if (uu->in(k) == u) {
2743 Node* m = find_mem(region->in(k), NULL);
2744 if (m != mem) {
2745 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2746 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2747 if (newmm != u) {
2748 _phase->igvn().replace_input_of(uu, k, newmm);
2749 nb++;
2750 --jmax;
2751 }
2752 }
2753 }
2754 }
2755 if (nb > 0) {
2756 --j;
2757 }
2758 } else {
2759 Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2760 if (m != mem) {
2761 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2762 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2763 if (newmm != u) {
2764 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2765 --j, --jmax;
2766 }
2767 }
2768 }
2769 }
2770 }
2771 } else if (u->is_Phi()) {
2772 assert(u->bottom_type() == Type::MEMORY, "what else?");
2773 if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2774 Node* region = u->in(0);
2775 bool replaced = false;
2776 for (uint j = 1; j < u->req(); j++) {
2777 if (u->in(j) == mem) {
2778 Node* m = find_mem(region->in(j), NULL);
2779 Node* nnew = m;
2780 if (m != mem) {
2781 if (u->adr_type() == TypePtr::BOTTOM) {
2782 mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2783 nnew = mm;
2784 }
2785 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2786 _phase->igvn().replace_input_of(u, j, nnew);
2787 replaced = true;
2788 }
2789 }
2790 }
2791 if (replaced) {
2792 --i;
2793 }
2794 }
2795 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2796 u->adr_type() == NULL) {
2797 assert(u->adr_type() != NULL ||
2798 u->Opcode() == Op_Rethrow ||
2799 u->Opcode() == Op_Return ||
2800 u->Opcode() == Op_SafePoint ||
2801 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2802 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2803 u->Opcode() == Op_CallLeaf, "");
2804 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2805 if (m != mem) {
2806 mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2807 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2808 --i;
2809 }
2810 } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2811 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2812 if (m != mem) {
2813 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2814 _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2815 --i;
2816 }
2817 } else if (u->adr_type() != TypePtr::BOTTOM &&
2818 _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2819 Node* m = find_mem(_phase->ctrl_or_self(u), u);
2820 assert(m != mem, "");
2821 // u is on the wrong slice...
2822 assert(u->is_ClearArray(), "");
2823 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2824 _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2825 --i;
2826 }
2827 }
2828 }
2829#ifdef ASSERT
2830 assert(new_mem->outcnt() > 0, "");
2831 for (int i = 0; i < phis.length(); i++) {
2832 Node* n = phis.at(i);
2833 assert(n->outcnt() > 0, "new phi must have uses now");
2834 }
2835#endif
2836}
2837
2838MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2839 MergeMemNode* mm = MergeMemNode::make(mem);
2840 mm->set_memory_at(_alias, rep_proj);
2841 _phase->register_new_node(mm, rep_ctrl);
2842 return mm;
2843}
2844
2845MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2846 MergeMemNode* newmm = NULL;
2847 MergeMemNode* u_mm = u->as_MergeMem();
2848 Node* c = _phase->get_ctrl(u);
2849 if (_phase->is_dominator(c, rep_ctrl)) {
2850 c = rep_ctrl;
2851 } else {
2852 assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2853 }
2854 if (u->outcnt() == 1) {
2855 if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2856 _phase->igvn().replace_input_of(u, _alias, rep_proj);
2857 --i;
2858 } else {
2859 _phase->igvn().rehash_node_delayed(u);
2860 u_mm->set_memory_at(_alias, rep_proj);
2861 }
2862 newmm = u_mm;
2863 _phase->set_ctrl_and_loop(u, c);
2864 } else {
2865 // can't simply clone u and then change one of its input because
2866 // it adds and then removes an edge which messes with the
2867 // DUIterator
2868 newmm = MergeMemNode::make(u_mm->base_memory());
2869 for (uint j = 0; j < u->req(); j++) {
2870 if (j < newmm->req()) {
2871 if (j == (uint)_alias) {
2872 newmm->set_req(j, rep_proj);
2873 } else if (newmm->in(j) != u->in(j)) {
2874 newmm->set_req(j, u->in(j));
2875 }
2876 } else if (j == (uint)_alias) {
2877 newmm->add_req(rep_proj);
2878 } else {
2879 newmm->add_req(u->in(j));
2880 }
2881 }
2882 if ((uint)_alias >= u->req()) {
2883 newmm->set_memory_at(_alias, rep_proj);
2884 }
2885 _phase->register_new_node(newmm, c);
2886 }
2887 return newmm;
2888}
2889
2890bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2891 if (phi->adr_type() == TypePtr::BOTTOM) {
2892 Node* region = phi->in(0);
2893 for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2894 Node* uu = region->fast_out(j);
2895 if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2896 return false;
2897 }
2898 }
2899 return true;
2900 }
2901 return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2902}
2903
2904void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2905 uint last = _phase-> C->unique();
2906 MergeMemNode* mm = NULL;
2907 assert(mem->bottom_type() == Type::MEMORY, "");
2908 for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2909 Node* u = mem->out(i);
2910 if (u != replacement && u->_idx < last) {
2911 if (u->is_MergeMem()) {
2912 MergeMemNode* u_mm = u->as_MergeMem();
2913 if (u_mm->memory_at(_alias) == mem) {
2914 MergeMemNode* newmm = NULL;
2915 for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2916 Node* uu = u->fast_out(j);
2917 assert(!uu->is_MergeMem(), "chain of MergeMems?");
2918 if (uu->is_Phi()) {
2919 if (should_process_phi(uu)) {
2920 Node* region = uu->in(0);
2921 int nb = 0;
2922 for (uint k = 1; k < uu->req(); k++) {
2923 if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2924 if (newmm == NULL) {
2925 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2926 }
2927 if (newmm != u) {
2928 _phase->igvn().replace_input_of(uu, k, newmm);
2929 nb++;
2930 --jmax;
2931 }
2932 }
2933 }
2934 if (nb > 0) {
2935 --j;
2936 }
2937 }
2938 } else {
2939 if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2940 if (newmm == NULL) {
2941 newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2942 }
2943 if (newmm != u) {
2944 _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2945 --j, --jmax;
2946 }
2947 }
2948 }
2949 }
2950 }
2951 } else if (u->is_Phi()) {
2952 assert(u->bottom_type() == Type::MEMORY, "what else?");
2953 Node* region = u->in(0);
2954 if (should_process_phi(u)) {
2955 bool replaced = false;
2956 for (uint j = 1; j < u->req(); j++) {
2957 if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2958 Node* nnew = rep_proj;
2959 if (u->adr_type() == TypePtr::BOTTOM) {
2960 if (mm == NULL) {
2961 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2962 }
2963 nnew = mm;
2964 }
2965 _phase->igvn().replace_input_of(u, j, nnew);
2966 replaced = true;
2967 }
2968 }
2969 if (replaced) {
2970 --i;
2971 }
2972
2973 }
2974 } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2975 u->adr_type() == NULL) {
2976 assert(u->adr_type() != NULL ||
2977 u->Opcode() == Op_Rethrow ||
2978 u->Opcode() == Op_Return ||
2979 u->Opcode() == Op_SafePoint ||
2980 u->Opcode() == Op_StoreIConditional ||
2981 u->Opcode() == Op_StoreLConditional ||
2982 (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2983 (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2984 u->Opcode() == Op_CallLeaf, "%s", u->Name());
2985 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2986 if (mm == NULL) {
2987 mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2988 }
2989 _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2990 --i;
2991 }
2992 } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2993 if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2994 _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2995 --i;
2996 }
2997 }
2998 }
2999 }
3000}
3001
3002ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj)
3003: Node(ctrl, obj) {
3004 ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
3005}
3006
3007const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
3008 if (in(ValueIn) == NULL || in(ValueIn)->is_top()) {
3009 return Type::TOP;
3010 }
3011 const Type* t = in(ValueIn)->bottom_type();
3012 if (t == TypePtr::NULL_PTR) {
3013 return t;
3014 }
3015 return t->is_oopptr();
3016}
3017
3018const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
3019 // Either input is TOP ==> the result is TOP
3020 const Type *t2 = phase->type(in(ValueIn));
3021 if( t2 == Type::TOP ) return Type::TOP;
3022
3023 if (t2 == TypePtr::NULL_PTR) {
3024 return t2;
3025 }
3026
3027 const Type* type = t2->is_oopptr()/*->cast_to_nonconst()*/;
3028 return type;
3029}
3030
3031Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
3032 Node* value = in(ValueIn);
3033 if (!needs_barrier(phase, value)) {
3034 return value;
3035 }
3036 return this;
3037}
3038
3039bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
3040 Unique_Node_List visited;
3041 return needs_barrier_impl(phase, n, visited);
3042}
3043
3044bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
3045 if (n == NULL) return false;
3046 if (visited.member(n)) {
3047 return false; // Been there.
3048 }
3049 visited.push(n);
3050
3051 if (n->is_Allocate()) {
3052 // tty->print_cr("optimize barrier on alloc");
3053 return false;
3054 }
3055 if (n->is_Call()) {
3056 // tty->print_cr("optimize barrier on call");
3057 return false;
3058 }
3059
3060 const Type* type = phase->type(n);
3061 if (type == Type::TOP) {
3062 return false;
3063 }
3064 if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
3065 // tty->print_cr("optimize barrier on null");
3066 return false;
3067 }
3068 if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) {
3069 // tty->print_cr("optimize barrier on constant");
3070 return false;
3071 }
3072
3073 switch (n->Opcode()) {
3074 case Op_AddP:
3075 return true; // TODO: Can refine?
3076 case Op_LoadP:
3077 case Op_ShenandoahCompareAndExchangeN:
3078 case Op_ShenandoahCompareAndExchangeP:
3079 case Op_CompareAndExchangeN:
3080 case Op_CompareAndExchangeP:
3081 case Op_GetAndSetN:
3082 case Op_GetAndSetP:
3083 return true;
3084 case Op_Phi: {
3085 for (uint i = 1; i < n->req(); i++) {
3086 if (needs_barrier_impl(phase, n->in(i), visited)) return true;
3087 }
3088 return false;
3089 }
3090 case Op_CheckCastPP:
3091 case Op_CastPP:
3092 return needs_barrier_impl(phase, n->in(1), visited);
3093 case Op_Proj:
3094 return needs_barrier_impl(phase, n->in(0), visited);
3095 case Op_ShenandoahLoadReferenceBarrier:
3096 // tty->print_cr("optimize barrier on barrier");
3097 return false;
3098 case Op_Parm:
3099 // tty->print_cr("optimize barrier on input arg");
3100 return false;
3101 case Op_DecodeN:
3102 case Op_EncodeP:
3103 return needs_barrier_impl(phase, n->in(1), visited);
3104 case Op_LoadN:
3105 return true;
3106 case Op_CMoveP:
3107 return needs_barrier_impl(phase, n->in(2), visited) ||
3108 needs_barrier_impl(phase, n->in(3), visited);
3109 case Op_ShenandoahEnqueueBarrier:
3110 return needs_barrier_impl(phase, n->in(1), visited);
3111 default:
3112 break;
3113 }
3114#ifdef ASSERT
3115 tty->print("need barrier on?: ");
3116 tty->print_cr("ins:");
3117 n->dump(2);
3118 tty->print_cr("outs:");
3119 n->dump(-2);
3120 ShouldNotReachHere();
3121#endif
3122 return true;
3123}
3124
3125ShenandoahLoadReferenceBarrierNode::Strength ShenandoahLoadReferenceBarrierNode::get_barrier_strength() {
3126 Unique_Node_List visited;
3127 Node_Stack stack(0);
3128 stack.push(this, 0);
3129 Strength strength = NONE;
3130 while (strength != STRONG && stack.size() > 0) {
3131 Node* n = stack.node();
3132 if (visited.member(n)) {
3133 stack.pop();
3134 continue;
3135 }
3136 visited.push(n);
3137 bool visit_users = false;
3138 switch (n->Opcode()) {
3139 case Op_StoreN:
3140 case Op_StoreP: {
3141 strength = STRONG;
3142 break;
3143 }
3144 case Op_CmpP: {
3145 if (!n->in(1)->bottom_type()->higher_equal(TypePtr::NULL_PTR) &&
3146 !n->in(2)->bottom_type()->higher_equal(TypePtr::NULL_PTR)) {
3147 strength = STRONG;
3148 }
3149 break;
3150 }
3151 case Op_CallStaticJava: {
3152 strength = STRONG;
3153 break;
3154 }
3155 case Op_CallDynamicJava:
3156 case Op_CallLeaf:
3157 case Op_CallLeafNoFP:
3158 case Op_CompareAndSwapL:
3159 case Op_CompareAndSwapI:
3160 case Op_CompareAndSwapB:
3161 case Op_CompareAndSwapS:
3162 case Op_CompareAndSwapN:
3163 case Op_CompareAndSwapP:
3164 case Op_CompareAndExchangeL:
3165 case Op_CompareAndExchangeI:
3166 case Op_CompareAndExchangeB:
3167 case Op_CompareAndExchangeS:
3168 case Op_CompareAndExchangeN:
3169 case Op_CompareAndExchangeP:
3170 case Op_WeakCompareAndSwapL:
3171 case Op_WeakCompareAndSwapI:
3172 case Op_WeakCompareAndSwapB:
3173 case Op_WeakCompareAndSwapS:
3174 case Op_WeakCompareAndSwapN:
3175 case Op_WeakCompareAndSwapP:
3176 case Op_ShenandoahCompareAndSwapN:
3177 case Op_ShenandoahCompareAndSwapP:
3178 case Op_ShenandoahWeakCompareAndSwapN:
3179 case Op_ShenandoahWeakCompareAndSwapP:
3180 case Op_ShenandoahCompareAndExchangeN:
3181 case Op_ShenandoahCompareAndExchangeP:
3182 case Op_GetAndSetL:
3183 case Op_GetAndSetI:
3184 case Op_GetAndSetB:
3185 case Op_GetAndSetS:
3186 case Op_GetAndSetP:
3187 case Op_GetAndSetN:
3188 case Op_GetAndAddL:
3189 case Op_GetAndAddI:
3190 case Op_GetAndAddB:
3191 case Op_GetAndAddS:
3192 case Op_ShenandoahEnqueueBarrier:
3193 case Op_FastLock:
3194 case Op_FastUnlock:
3195 case Op_Rethrow:
3196 case Op_Return:
3197 case Op_StoreB:
3198 case Op_StoreC:
3199 case Op_StoreD:
3200 case Op_StoreF:
3201 case Op_StoreL:
3202 case Op_StoreLConditional:
3203 case Op_StoreI:
3204 case Op_StoreIConditional:
3205 case Op_StoreVector:
3206 case Op_StrInflatedCopy:
3207 case Op_StrCompressedCopy:
3208 case Op_EncodeP:
3209 case Op_CastP2X:
3210 case Op_SafePoint:
3211 case Op_EncodeISOArray:
3212 strength = STRONG;
3213 break;
3214 case Op_LoadB:
3215 case Op_LoadUB:
3216 case Op_LoadUS:
3217 case Op_LoadD:
3218 case Op_LoadF:
3219 case Op_LoadL:
3220 case Op_LoadI:
3221 case Op_LoadS:
3222 case Op_LoadN:
3223 case Op_LoadP:
3224 case Op_LoadVector: {
3225 const TypePtr* adr_type = n->adr_type();
3226 int alias_idx = Compile::current()->get_alias_index(adr_type);
3227 Compile::AliasType* alias_type = Compile::current()->alias_type(alias_idx);
3228 ciField* field = alias_type->field();
3229 bool is_static = field != NULL && field->is_static();
3230 bool is_final = field != NULL && field->is_final();
3231 bool is_stable = field != NULL && field->is_stable();
3232 if (ShenandoahOptimizeStaticFinals && is_static && is_final) {
3233 // Leave strength as is.
3234 } else if (ShenandoahOptimizeInstanceFinals && !is_static && is_final) {
3235 // Leave strength as is.
3236 } else if (ShenandoahOptimizeStableFinals && (is_stable || (adr_type->isa_aryptr() && adr_type->isa_aryptr()->is_stable()))) {
3237 // Leave strength as is.
3238 } else {
3239 strength = WEAK;
3240 }
3241 break;
3242 }
3243 case Op_AryEq: {
3244 Node* n1 = n->in(2);
3245 Node* n2 = n->in(3);
3246 if (!ShenandoahOptimizeStableFinals ||
3247 !n1->bottom_type()->isa_aryptr() || !n1->bottom_type()->isa_aryptr()->is_stable() ||
3248 !n2->bottom_type()->isa_aryptr() || !n2->bottom_type()->isa_aryptr()->is_stable()) {
3249 strength = WEAK;
3250 }
3251 break;
3252 }
3253 case Op_StrEquals:
3254 case Op_StrComp:
3255 case Op_StrIndexOf:
3256 case Op_StrIndexOfChar:
3257 if (!ShenandoahOptimizeStableFinals) {
3258 strength = WEAK;
3259 }
3260 break;
3261 case Op_Conv2B:
3262 case Op_LoadRange:
3263 case Op_LoadKlass:
3264 case Op_LoadNKlass:
3265 // NONE, i.e. leave current strength as is
3266 break;
3267 case Op_AddP:
3268 case Op_CheckCastPP:
3269 case Op_CastPP:
3270 case Op_CMoveP:
3271 case Op_Phi:
3272 case Op_ShenandoahLoadReferenceBarrier:
3273 visit_users = true;
3274 break;
3275 default: {
3276#ifdef ASSERT
3277 tty->print_cr("Unknown node in get_barrier_strength:");
3278 n->dump(1);
3279 ShouldNotReachHere();
3280#else
3281 strength = STRONG;
3282#endif
3283 }
3284 }
3285#ifdef ASSERT
3286/*
3287 if (strength == STRONG) {
3288 tty->print("strengthening node: ");
3289 n->dump();
3290 }
3291 */
3292#endif
3293 stack.pop();
3294 if (visit_users) {
3295 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3296 Node* user = n->fast_out(i);
3297 if (user != NULL) {
3298 stack.push(user, 0);
3299 }
3300 }
3301 }
3302 }
3303 return strength;
3304}
3305
3306CallStaticJavaNode* ShenandoahLoadReferenceBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) {
3307 Node* val = in(ValueIn);
3308
3309 const Type* val_t = igvn.type(val);
3310
3311 if (val_t->meet(TypePtr::NULL_PTR) != val_t &&
3312 val->Opcode() == Op_CastPP &&
3313 val->in(0) != NULL &&
3314 val->in(0)->Opcode() == Op_IfTrue &&
3315 val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
3316 val->in(0)->in(0)->is_If() &&
3317 val->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
3318 val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
3319 val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
3320 val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) &&
3321 val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
3322 assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), "");
3323 CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none);
3324 return unc;
3325 }
3326 return NULL;
3327}
3328