1/*
2 * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/shared/barrierSet.hpp"
27#include "gc/shared/c2/barrierSetC2.hpp"
28#include "gc/shared/c2/cardTableBarrierSetC2.hpp"
29#include "opto/arraycopynode.hpp"
30#include "opto/graphKit.hpp"
31#include "runtime/sharedRuntime.hpp"
32#include "utilities/macros.hpp"
33
34ArrayCopyNode::ArrayCopyNode(Compile* C, bool alloc_tightly_coupled, bool has_negative_length_guard)
35 : CallNode(arraycopy_type(), NULL, TypePtr::BOTTOM),
36 _kind(None),
37 _alloc_tightly_coupled(alloc_tightly_coupled),
38 _has_negative_length_guard(has_negative_length_guard),
39 _arguments_validated(false),
40 _src_type(TypeOopPtr::BOTTOM),
41 _dest_type(TypeOopPtr::BOTTOM) {
42 init_class_id(Class_ArrayCopy);
43 init_flags(Flag_is_macro);
44 C->add_macro_node(this);
45}
46
47uint ArrayCopyNode::size_of() const { return sizeof(*this); }
48
49ArrayCopyNode* ArrayCopyNode::make(GraphKit* kit, bool may_throw,
50 Node* src, Node* src_offset,
51 Node* dest, Node* dest_offset,
52 Node* length,
53 bool alloc_tightly_coupled,
54 bool has_negative_length_guard,
55 Node* src_klass, Node* dest_klass,
56 Node* src_length, Node* dest_length) {
57
58 ArrayCopyNode* ac = new ArrayCopyNode(kit->C, alloc_tightly_coupled, has_negative_length_guard);
59 Node* prev_mem = kit->set_predefined_input_for_runtime_call(ac);
60
61 ac->init_req(ArrayCopyNode::Src, src);
62 ac->init_req(ArrayCopyNode::SrcPos, src_offset);
63 ac->init_req(ArrayCopyNode::Dest, dest);
64 ac->init_req(ArrayCopyNode::DestPos, dest_offset);
65 ac->init_req(ArrayCopyNode::Length, length);
66 ac->init_req(ArrayCopyNode::SrcLen, src_length);
67 ac->init_req(ArrayCopyNode::DestLen, dest_length);
68 ac->init_req(ArrayCopyNode::SrcKlass, src_klass);
69 ac->init_req(ArrayCopyNode::DestKlass, dest_klass);
70
71 if (may_throw) {
72 ac->set_req(TypeFunc::I_O , kit->i_o());
73 kit->add_safepoint_edges(ac, false);
74 }
75
76 return ac;
77}
78
79void ArrayCopyNode::connect_outputs(GraphKit* kit) {
80 kit->set_all_memory_call(this, true);
81 kit->set_control(kit->gvn().transform(new ProjNode(this,TypeFunc::Control)));
82 kit->set_i_o(kit->gvn().transform(new ProjNode(this, TypeFunc::I_O)));
83 kit->make_slow_call_ex(this, kit->env()->Throwable_klass(), true);
84 kit->set_all_memory_call(this);
85}
86
87#ifndef PRODUCT
88const char* ArrayCopyNode::_kind_names[] = {"arraycopy", "arraycopy, validated arguments", "clone", "oop array clone", "CopyOf", "CopyOfRange"};
89
90void ArrayCopyNode::dump_spec(outputStream *st) const {
91 CallNode::dump_spec(st);
92 st->print(" (%s%s)", _kind_names[_kind], _alloc_tightly_coupled ? ", tightly coupled allocation" : "");
93}
94
95void ArrayCopyNode::dump_compact_spec(outputStream* st) const {
96 st->print("%s%s", _kind_names[_kind], _alloc_tightly_coupled ? ",tight" : "");
97}
98#endif
99
100intptr_t ArrayCopyNode::get_length_if_constant(PhaseGVN *phase) const {
101 // check that length is constant
102 Node* length = in(ArrayCopyNode::Length);
103 const Type* length_type = phase->type(length);
104
105 if (length_type == Type::TOP) {
106 return -1;
107 }
108
109 assert(is_clonebasic() || is_arraycopy() || is_copyof() || is_copyofrange(), "unexpected array copy type");
110
111 return is_clonebasic() ? length->find_intptr_t_con(-1) : length->find_int_con(-1);
112}
113
114int ArrayCopyNode::get_count(PhaseGVN *phase) const {
115 Node* src = in(ArrayCopyNode::Src);
116 const Type* src_type = phase->type(src);
117
118 if (is_clonebasic()) {
119 if (src_type->isa_instptr()) {
120 const TypeInstPtr* inst_src = src_type->is_instptr();
121 ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
122 // ciInstanceKlass::nof_nonstatic_fields() doesn't take injected
123 // fields into account. They are rare anyway so easier to simply
124 // skip instances with injected fields.
125 if ((!inst_src->klass_is_exact() && (ik->is_interface() || ik->has_subklass())) || ik->has_injected_fields()) {
126 return -1;
127 }
128 int nb_fields = ik->nof_nonstatic_fields();
129 return nb_fields;
130 } else {
131 const TypeAryPtr* ary_src = src_type->isa_aryptr();
132 assert (ary_src != NULL, "not an array or instance?");
133 // clone passes a length as a rounded number of longs. If we're
134 // cloning an array we'll do it element by element. If the
135 // length input to ArrayCopyNode is constant, length of input
136 // array must be too.
137
138 assert((get_length_if_constant(phase) == -1) == !ary_src->size()->is_con() ||
139 phase->is_IterGVN(), "inconsistent");
140
141 if (ary_src->size()->is_con()) {
142 return ary_src->size()->get_con();
143 }
144 return -1;
145 }
146 }
147
148 return get_length_if_constant(phase);
149}
150
151Node* ArrayCopyNode::load(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, const Type *type, BasicType bt) {
152 DecoratorSet decorators = C2_READ_ACCESS | C2_CONTROL_DEPENDENT_LOAD | IN_HEAP | C2_ARRAY_COPY;
153 C2AccessValuePtr addr(adr, adr_type);
154 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
155 Node* res = bs->load_at(access, type);
156 ctl = access.ctl();
157 return res;
158}
159
160void ArrayCopyNode::store(BarrierSetC2* bs, PhaseGVN *phase, Node*& ctl, MergeMemNode* mem, Node* adr, const TypePtr* adr_type, Node* val, const Type *type, BasicType bt) {
161 DecoratorSet decorators = C2_WRITE_ACCESS | IN_HEAP | C2_ARRAY_COPY;
162 if (is_alloc_tightly_coupled()) {
163 decorators |= C2_TIGHTLY_COUPLED_ALLOC;
164 }
165 C2AccessValuePtr addr(adr, adr_type);
166 C2AccessValue value(val, type);
167 C2OptAccess access(*phase, ctl, mem, decorators, bt, adr->in(AddPNode::Base), addr);
168 bs->store_at(access, value);
169 ctl = access.ctl();
170}
171
172
173Node* ArrayCopyNode::try_clone_instance(PhaseGVN *phase, bool can_reshape, int count) {
174 if (!is_clonebasic()) {
175 return NULL;
176 }
177
178 Node* src = in(ArrayCopyNode::Src);
179 Node* dest = in(ArrayCopyNode::Dest);
180 Node* ctl = in(TypeFunc::Control);
181 Node* in_mem = in(TypeFunc::Memory);
182
183 const Type* src_type = phase->type(src);
184
185 assert(src->is_AddP(), "should be base + off");
186 assert(dest->is_AddP(), "should be base + off");
187 Node* base_src = src->in(AddPNode::Base);
188 Node* base_dest = dest->in(AddPNode::Base);
189
190 MergeMemNode* mem = MergeMemNode::make(in_mem);
191
192 const TypeInstPtr* inst_src = src_type->isa_instptr();
193
194 if (inst_src == NULL) {
195 return NULL;
196 }
197
198 if (!inst_src->klass_is_exact()) {
199 ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
200 assert(!ik->is_interface() && !ik->has_subklass(), "inconsistent klass hierarchy");
201 phase->C->dependencies()->assert_leaf_type(ik);
202 }
203
204 ciInstanceKlass* ik = inst_src->klass()->as_instance_klass();
205 assert(ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem, "too many fields");
206
207 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
208 for (int i = 0; i < count; i++) {
209 ciField* field = ik->nonstatic_field_at(i);
210 int fieldidx = phase->C->alias_type(field)->index();
211 const TypePtr* adr_type = phase->C->alias_type(field)->adr_type();
212 Node* off = phase->MakeConX(field->offset());
213 Node* next_src = phase->transform(new AddPNode(base_src,base_src,off));
214 Node* next_dest = phase->transform(new AddPNode(base_dest,base_dest,off));
215 BasicType bt = field->layout_type();
216
217 const Type *type;
218 if (bt == T_OBJECT) {
219 if (!field->type()->is_loaded()) {
220 type = TypeInstPtr::BOTTOM;
221 } else {
222 ciType* field_klass = field->type();
223 type = TypeOopPtr::make_from_klass(field_klass->as_klass());
224 }
225 } else {
226 type = Type::get_const_basic_type(bt);
227 }
228
229 Node* v = load(bs, phase, ctl, mem, next_src, adr_type, type, bt);
230 store(bs, phase, ctl, mem, next_dest, adr_type, v, type, bt);
231 }
232
233 if (!finish_transform(phase, can_reshape, ctl, mem)) {
234 // Return NodeSentinel to indicate that the transform failed
235 return NodeSentinel;
236 }
237
238 return mem;
239}
240
241bool ArrayCopyNode::prepare_array_copy(PhaseGVN *phase, bool can_reshape,
242 Node*& adr_src,
243 Node*& base_src,
244 Node*& adr_dest,
245 Node*& base_dest,
246 BasicType& copy_type,
247 const Type*& value_type,
248 bool& disjoint_bases) {
249 Node* src = in(ArrayCopyNode::Src);
250 Node* dest = in(ArrayCopyNode::Dest);
251 const Type* src_type = phase->type(src);
252 const TypeAryPtr* ary_src = src_type->isa_aryptr();
253
254 if (is_arraycopy() || is_copyofrange() || is_copyof()) {
255 const Type* dest_type = phase->type(dest);
256 const TypeAryPtr* ary_dest = dest_type->isa_aryptr();
257 Node* src_offset = in(ArrayCopyNode::SrcPos);
258 Node* dest_offset = in(ArrayCopyNode::DestPos);
259
260 // newly allocated object is guaranteed to not overlap with source object
261 disjoint_bases = is_alloc_tightly_coupled();
262
263 if (ary_src == NULL || ary_src->klass() == NULL ||
264 ary_dest == NULL || ary_dest->klass() == NULL) {
265 // We don't know if arguments are arrays
266 return false;
267 }
268
269 BasicType src_elem = ary_src->klass()->as_array_klass()->element_type()->basic_type();
270 BasicType dest_elem = ary_dest->klass()->as_array_klass()->element_type()->basic_type();
271 if (src_elem == T_ARRAY) src_elem = T_OBJECT;
272 if (dest_elem == T_ARRAY) dest_elem = T_OBJECT;
273
274 if (src_elem != dest_elem || dest_elem == T_VOID) {
275 // We don't know if arguments are arrays of the same type
276 return false;
277 }
278
279 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
280 if (bs->array_copy_requires_gc_barriers(is_alloc_tightly_coupled(), dest_elem, false, BarrierSetC2::Optimization)) {
281 // It's an object array copy but we can't emit the card marking
282 // that is needed
283 return false;
284 }
285
286 value_type = ary_src->elem();
287
288 base_src = src;
289 base_dest = dest;
290
291 uint shift = exact_log2(type2aelembytes(dest_elem));
292 uint header = arrayOopDesc::base_offset_in_bytes(dest_elem);
293
294 adr_src = src;
295 adr_dest = dest;
296
297 src_offset = Compile::conv_I2X_index(phase, src_offset, ary_src->size());
298 dest_offset = Compile::conv_I2X_index(phase, dest_offset, ary_dest->size());
299 if (src_offset->is_top() || dest_offset->is_top()) {
300 // Offset is out of bounds (the ArrayCopyNode will be removed)
301 return false;
302 }
303
304 Node* src_scale = phase->transform(new LShiftXNode(src_offset, phase->intcon(shift)));
305 Node* dest_scale = phase->transform(new LShiftXNode(dest_offset, phase->intcon(shift)));
306
307 adr_src = phase->transform(new AddPNode(base_src, adr_src, src_scale));
308 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, dest_scale));
309
310 adr_src = new AddPNode(base_src, adr_src, phase->MakeConX(header));
311 adr_dest = new AddPNode(base_dest, adr_dest, phase->MakeConX(header));
312
313 adr_src = phase->transform(adr_src);
314 adr_dest = phase->transform(adr_dest);
315
316 copy_type = dest_elem;
317 } else {
318 assert(ary_src != NULL, "should be a clone");
319 assert(is_clonebasic(), "should be");
320
321 disjoint_bases = true;
322 assert(src->is_AddP(), "should be base + off");
323 assert(dest->is_AddP(), "should be base + off");
324 adr_src = src;
325 base_src = src->in(AddPNode::Base);
326 adr_dest = dest;
327 base_dest = dest->in(AddPNode::Base);
328
329 assert(phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con() == phase->type(dest->in(AddPNode::Offset))->is_intptr_t()->get_con(), "same start offset?");
330 BasicType elem = ary_src->klass()->as_array_klass()->element_type()->basic_type();
331 if (elem == T_ARRAY) elem = T_OBJECT;
332
333 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
334 if (bs->array_copy_requires_gc_barriers(true, elem, true, BarrierSetC2::Optimization)) {
335 return false;
336 }
337
338 int diff = arrayOopDesc::base_offset_in_bytes(elem) - phase->type(src->in(AddPNode::Offset))->is_intptr_t()->get_con();
339 assert(diff >= 0, "clone should not start after 1st array element");
340 if (diff > 0) {
341 adr_src = phase->transform(new AddPNode(base_src, adr_src, phase->MakeConX(diff)));
342 adr_dest = phase->transform(new AddPNode(base_dest, adr_dest, phase->MakeConX(diff)));
343 }
344
345 copy_type = elem;
346 value_type = ary_src->elem();
347 }
348 return true;
349}
350
351const TypePtr* ArrayCopyNode::get_address_type(PhaseGVN *phase, Node* n) {
352 const Type* at = phase->type(n);
353 assert(at != Type::TOP, "unexpected type");
354 const TypePtr* atp = at->isa_ptr();
355 // adjust atp to be the correct array element address type
356 atp = atp->add_offset(Type::OffsetBot);
357 return atp;
358}
359
360void ArrayCopyNode::array_copy_test_overlap(PhaseGVN *phase, bool can_reshape, bool disjoint_bases, int count, Node*& forward_ctl, Node*& backward_ctl) {
361 Node* ctl = in(TypeFunc::Control);
362 if (!disjoint_bases && count > 1) {
363 Node* src_offset = in(ArrayCopyNode::SrcPos);
364 Node* dest_offset = in(ArrayCopyNode::DestPos);
365 assert(src_offset != NULL && dest_offset != NULL, "should be");
366 Node* cmp = phase->transform(new CmpINode(src_offset, dest_offset));
367 Node *bol = phase->transform(new BoolNode(cmp, BoolTest::lt));
368 IfNode *iff = new IfNode(ctl, bol, PROB_FAIR, COUNT_UNKNOWN);
369
370 phase->transform(iff);
371
372 forward_ctl = phase->transform(new IfFalseNode(iff));
373 backward_ctl = phase->transform(new IfTrueNode(iff));
374 } else {
375 forward_ctl = ctl;
376 }
377}
378
379Node* ArrayCopyNode::array_copy_forward(PhaseGVN *phase,
380 bool can_reshape,
381 Node*& forward_ctl,
382 MergeMemNode* mm,
383 const TypePtr* atp_src,
384 const TypePtr* atp_dest,
385 Node* adr_src,
386 Node* base_src,
387 Node* adr_dest,
388 Node* base_dest,
389 BasicType copy_type,
390 const Type* value_type,
391 int count) {
392 if (!forward_ctl->is_top()) {
393 // copy forward
394 mm = mm->clone()->as_MergeMem();
395
396 if (count > 0) {
397 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
398 Node* v = load(bs, phase, forward_ctl, mm, adr_src, atp_src, value_type, copy_type);
399 store(bs, phase, forward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
400 for (int i = 1; i < count; i++) {
401 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
402 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
403 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
404 v = load(bs, phase, forward_ctl, mm, next_src, atp_src, value_type, copy_type);
405 store(bs, phase, forward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
406 }
407 } else if(can_reshape) {
408 PhaseIterGVN* igvn = phase->is_IterGVN();
409 igvn->_worklist.push(adr_src);
410 igvn->_worklist.push(adr_dest);
411 }
412 return mm;
413 }
414 return phase->C->top();
415}
416
417Node* ArrayCopyNode::array_copy_backward(PhaseGVN *phase,
418 bool can_reshape,
419 Node*& backward_ctl,
420 MergeMemNode* mm,
421 const TypePtr* atp_src,
422 const TypePtr* atp_dest,
423 Node* adr_src,
424 Node* base_src,
425 Node* adr_dest,
426 Node* base_dest,
427 BasicType copy_type,
428 const Type* value_type,
429 int count) {
430 if (!backward_ctl->is_top()) {
431 // copy backward
432 mm = mm->clone()->as_MergeMem();
433
434 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
435 assert(copy_type != T_OBJECT || !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Optimization), "only tightly coupled allocations for object arrays");
436
437 if (count > 0) {
438 for (int i = count-1; i >= 1; i--) {
439 Node* off = phase->MakeConX(type2aelembytes(copy_type) * i);
440 Node* next_src = phase->transform(new AddPNode(base_src,adr_src,off));
441 Node* next_dest = phase->transform(new AddPNode(base_dest,adr_dest,off));
442 Node* v = load(bs, phase, backward_ctl, mm, next_src, atp_src, value_type, copy_type);
443 store(bs, phase, backward_ctl, mm, next_dest, atp_dest, v, value_type, copy_type);
444 }
445 Node* v = load(bs, phase, backward_ctl, mm, adr_src, atp_src, value_type, copy_type);
446 store(bs, phase, backward_ctl, mm, adr_dest, atp_dest, v, value_type, copy_type);
447 } else if(can_reshape) {
448 PhaseIterGVN* igvn = phase->is_IterGVN();
449 igvn->_worklist.push(adr_src);
450 igvn->_worklist.push(adr_dest);
451 }
452 return phase->transform(mm);
453 }
454 return phase->C->top();
455}
456
457bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
458 Node* ctl, Node *mem) {
459 if (can_reshape) {
460 PhaseIterGVN* igvn = phase->is_IterGVN();
461 igvn->set_delay_transform(false);
462 if (is_clonebasic()) {
463 Node* out_mem = proj_out(TypeFunc::Memory);
464
465 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
466 if (out_mem->outcnt() != 1 || !out_mem->raw_out(0)->is_MergeMem() ||
467 out_mem->raw_out(0)->outcnt() != 1 || !out_mem->raw_out(0)->raw_out(0)->is_MemBar()) {
468 assert(bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Optimization), "can only happen with card marking");
469 return false;
470 }
471
472 igvn->replace_node(out_mem->raw_out(0), mem);
473
474 Node* out_ctl = proj_out(TypeFunc::Control);
475 igvn->replace_node(out_ctl, ctl);
476 } else {
477 // replace fallthrough projections of the ArrayCopyNode by the
478 // new memory, control and the input IO.
479 CallProjections callprojs;
480 extract_projections(&callprojs, true, false);
481
482 if (callprojs.fallthrough_ioproj != NULL) {
483 igvn->replace_node(callprojs.fallthrough_ioproj, in(TypeFunc::I_O));
484 }
485 if (callprojs.fallthrough_memproj != NULL) {
486 igvn->replace_node(callprojs.fallthrough_memproj, mem);
487 }
488 if (callprojs.fallthrough_catchproj != NULL) {
489 igvn->replace_node(callprojs.fallthrough_catchproj, ctl);
490 }
491
492 // The ArrayCopyNode is not disconnected. It still has the
493 // projections for the exception case. Replace current
494 // ArrayCopyNode with a dummy new one with a top() control so
495 // that this part of the graph stays consistent but is
496 // eventually removed.
497
498 set_req(0, phase->C->top());
499 remove_dead_region(phase, can_reshape);
500 }
501 } else {
502 if (in(TypeFunc::Control) != ctl) {
503 // we can't return new memory and control from Ideal at parse time
504 assert(!is_clonebasic() || UseShenandoahGC, "added control for clone?");
505 phase->record_for_igvn(this);
506 return false;
507 }
508 }
509 return true;
510}
511
512
513Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
514 if (remove_dead_region(phase, can_reshape)) return this;
515
516 if (StressArrayCopyMacroNode && !can_reshape) {
517 phase->record_for_igvn(this);
518 return NULL;
519 }
520
521 // See if it's a small array copy and we can inline it as
522 // loads/stores
523 // Here we can only do:
524 // - arraycopy if all arguments were validated before and we don't
525 // need card marking
526 // - clone for which we don't need to do card marking
527
528 if (!is_clonebasic() && !is_arraycopy_validated() &&
529 !is_copyofrange_validated() && !is_copyof_validated()) {
530 return NULL;
531 }
532
533 assert(in(TypeFunc::Control) != NULL &&
534 in(TypeFunc::Memory) != NULL &&
535 in(ArrayCopyNode::Src) != NULL &&
536 in(ArrayCopyNode::Dest) != NULL &&
537 in(ArrayCopyNode::Length) != NULL &&
538 ((in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::DestPos) != NULL) ||
539 is_clonebasic()), "broken inputs");
540
541 if (in(TypeFunc::Control)->is_top() ||
542 in(TypeFunc::Memory)->is_top() ||
543 phase->type(in(ArrayCopyNode::Src)) == Type::TOP ||
544 phase->type(in(ArrayCopyNode::Dest)) == Type::TOP ||
545 (in(ArrayCopyNode::SrcPos) != NULL && in(ArrayCopyNode::SrcPos)->is_top()) ||
546 (in(ArrayCopyNode::DestPos) != NULL && in(ArrayCopyNode::DestPos)->is_top())) {
547 return NULL;
548 }
549
550 int count = get_count(phase);
551
552 if (count < 0 || count > ArrayCopyLoadStoreMaxElem) {
553 return NULL;
554 }
555
556 Node* mem = try_clone_instance(phase, can_reshape, count);
557 if (mem != NULL) {
558 return (mem == NodeSentinel) ? NULL : mem;
559 }
560
561 Node* adr_src = NULL;
562 Node* base_src = NULL;
563 Node* adr_dest = NULL;
564 Node* base_dest = NULL;
565 BasicType copy_type = T_ILLEGAL;
566 const Type* value_type = NULL;
567 bool disjoint_bases = false;
568
569 if (!prepare_array_copy(phase, can_reshape,
570 adr_src, base_src, adr_dest, base_dest,
571 copy_type, value_type, disjoint_bases)) {
572 return NULL;
573 }
574
575 Node* src = in(ArrayCopyNode::Src);
576 Node* dest = in(ArrayCopyNode::Dest);
577 const TypePtr* atp_src = get_address_type(phase, src);
578 const TypePtr* atp_dest = get_address_type(phase, dest);
579
580 Node *in_mem = in(TypeFunc::Memory);
581 if (!in_mem->is_MergeMem()) {
582 in_mem = MergeMemNode::make(in_mem);
583 }
584
585
586 if (can_reshape) {
587 assert(!phase->is_IterGVN()->delay_transform(), "cannot delay transforms");
588 phase->is_IterGVN()->set_delay_transform(true);
589 }
590
591 Node* backward_ctl = phase->C->top();
592 Node* forward_ctl = phase->C->top();
593 array_copy_test_overlap(phase, can_reshape, disjoint_bases, count, forward_ctl, backward_ctl);
594
595 Node* forward_mem = array_copy_forward(phase, can_reshape, forward_ctl,
596 in_mem->as_MergeMem(),
597 atp_src, atp_dest,
598 adr_src, base_src, adr_dest, base_dest,
599 copy_type, value_type, count);
600
601 Node* backward_mem = array_copy_backward(phase, can_reshape, backward_ctl,
602 in_mem->as_MergeMem(),
603 atp_src, atp_dest,
604 adr_src, base_src, adr_dest, base_dest,
605 copy_type, value_type, count);
606
607 Node* ctl = NULL;
608 if (!forward_ctl->is_top() && !backward_ctl->is_top()) {
609 ctl = new RegionNode(3);
610 ctl->init_req(1, forward_ctl);
611 ctl->init_req(2, backward_ctl);
612 ctl = phase->transform(ctl);
613 MergeMemNode* forward_mm = forward_mem->as_MergeMem();
614 MergeMemNode* backward_mm = backward_mem->as_MergeMem();
615 for (MergeMemStream mms(forward_mm, backward_mm); mms.next_non_empty2(); ) {
616 if (mms.memory() != mms.memory2()) {
617 Node* phi = new PhiNode(ctl, Type::MEMORY, phase->C->get_adr_type(mms.alias_idx()));
618 phi->init_req(1, mms.memory());
619 phi->init_req(2, mms.memory2());
620 phi = phase->transform(phi);
621 mms.set_memory(phi);
622 }
623 }
624 mem = forward_mem;
625 } else if (!forward_ctl->is_top()) {
626 ctl = forward_ctl;
627 mem = forward_mem;
628 } else {
629 assert(!backward_ctl->is_top(), "no copy?");
630 ctl = backward_ctl;
631 mem = backward_mem;
632 }
633
634 if (can_reshape) {
635 assert(phase->is_IterGVN()->delay_transform(), "should be delaying transforms");
636 phase->is_IterGVN()->set_delay_transform(false);
637 }
638
639 if (!finish_transform(phase, can_reshape, ctl, mem)) {
640 return NULL;
641 }
642
643 return mem;
644}
645
646bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
647 Node* dest = in(ArrayCopyNode::Dest);
648 if (dest->is_top()) {
649 return false;
650 }
651 const TypeOopPtr* dest_t = phase->type(dest)->is_oopptr();
652 assert(!dest_t->is_known_instance() || _dest_type->is_known_instance(), "result of EA not recorded");
653 assert(in(ArrayCopyNode::Src)->is_top() || !phase->type(in(ArrayCopyNode::Src))->is_oopptr()->is_known_instance() ||
654 _src_type->is_known_instance(), "result of EA not recorded");
655
656 if (_dest_type != TypeOopPtr::BOTTOM || t_oop->is_known_instance()) {
657 assert(_dest_type == TypeOopPtr::BOTTOM || _dest_type->is_known_instance(), "result of EA is known instance");
658 return t_oop->instance_id() == _dest_type->instance_id();
659 }
660
661 return CallNode::may_modify_arraycopy_helper(dest_t, t_oop, phase);
662}
663
664bool ArrayCopyNode::may_modify_helper(const TypeOopPtr *t_oop, Node* n, PhaseTransform *phase, CallNode*& call) {
665 if (n != NULL &&
666 n->is_Call() &&
667 n->as_Call()->may_modify(t_oop, phase) &&
668 (n->as_Call()->is_ArrayCopy() || n->as_Call()->is_call_to_arraycopystub())) {
669 call = n->as_Call();
670 return true;
671 }
672 return false;
673}
674
675bool ArrayCopyNode::may_modify(const TypeOopPtr *t_oop, MemBarNode* mb, PhaseTransform *phase, ArrayCopyNode*& ac) {
676
677 Node* c = mb->in(0);
678
679 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
680 // step over g1 gc barrier if we're at e.g. a clone with ReduceInitialCardMarks off
681 c = bs->step_over_gc_barrier(c);
682
683 CallNode* call = NULL;
684 guarantee(c != NULL, "step_over_gc_barrier failed, there must be something to step to.");
685 if (c->is_Region()) {
686 for (uint i = 1; i < c->req(); i++) {
687 if (c->in(i) != NULL) {
688 Node* n = c->in(i)->in(0);
689 if (may_modify_helper(t_oop, n, phase, call)) {
690 ac = call->isa_ArrayCopy();
691 assert(c == mb->in(0), "only for clone");
692 return true;
693 }
694 }
695 }
696 } else if (may_modify_helper(t_oop, c->in(0), phase, call)) {
697 ac = call->isa_ArrayCopy();
698#ifdef ASSERT
699 bool use_ReduceInitialCardMarks = BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
700 static_cast<CardTableBarrierSetC2*>(bs)->use_ReduceInitialCardMarks();
701 assert(c == mb->in(0) || (ac != NULL && ac->is_clonebasic() && !use_ReduceInitialCardMarks), "only for clone");
702#endif
703 return true;
704 }
705
706 return false;
707}
708
709// Does this array copy modify offsets between offset_lo and offset_hi
710// in the destination array
711// if must_modify is false, return true if the copy could write
712// between offset_lo and offset_hi
713// if must_modify is true, return true if the copy is guaranteed to
714// write between offset_lo and offset_hi
715bool ArrayCopyNode::modifies(intptr_t offset_lo, intptr_t offset_hi, PhaseTransform* phase, bool must_modify) const {
716 assert(_kind == ArrayCopy || _kind == CopyOf || _kind == CopyOfRange, "only for real array copies");
717
718 Node* dest = in(Dest);
719 Node* dest_pos = in(DestPos);
720 Node* len = in(Length);
721
722 const TypeInt *dest_pos_t = phase->type(dest_pos)->isa_int();
723 const TypeInt *len_t = phase->type(len)->isa_int();
724 const TypeAryPtr* ary_t = phase->type(dest)->isa_aryptr();
725
726 if (dest_pos_t == NULL || len_t == NULL || ary_t == NULL) {
727 return !must_modify;
728 }
729
730 BasicType ary_elem = ary_t->klass()->as_array_klass()->element_type()->basic_type();
731 uint header = arrayOopDesc::base_offset_in_bytes(ary_elem);
732 uint elemsize = type2aelembytes(ary_elem);
733
734 jlong dest_pos_plus_len_lo = (((jlong)dest_pos_t->_lo) + len_t->_lo) * elemsize + header;
735 jlong dest_pos_plus_len_hi = (((jlong)dest_pos_t->_hi) + len_t->_hi) * elemsize + header;
736 jlong dest_pos_lo = ((jlong)dest_pos_t->_lo) * elemsize + header;
737 jlong dest_pos_hi = ((jlong)dest_pos_t->_hi) * elemsize + header;
738
739 if (must_modify) {
740 if (offset_lo >= dest_pos_hi && offset_hi < dest_pos_plus_len_lo) {
741 return true;
742 }
743 } else {
744 if (offset_hi >= dest_pos_lo && offset_lo < dest_pos_plus_len_hi) {
745 return true;
746 }
747 }
748 return false;
749}
750