1/*
2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "gc/g1/c2/g1BarrierSetC2.hpp"
27#include "gc/g1/g1BarrierSet.hpp"
28#include "gc/g1/g1BarrierSetRuntime.hpp"
29#include "gc/g1/g1CardTable.hpp"
30#include "gc/g1/g1ThreadLocalData.hpp"
31#include "gc/g1/heapRegion.hpp"
32#include "opto/arraycopynode.hpp"
33#include "opto/compile.hpp"
34#include "opto/escape.hpp"
35#include "opto/graphKit.hpp"
36#include "opto/idealKit.hpp"
37#include "opto/macro.hpp"
38#include "opto/rootnode.hpp"
39#include "opto/type.hpp"
40#include "utilities/macros.hpp"
41
42const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {
43 const Type **fields = TypeTuple::fields(2);
44 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
45 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
46 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
47
48 // create result type (range)
49 fields = TypeTuple::fields(0);
50 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0, fields);
51
52 return TypeFunc::make(domain, range);
53}
54
55const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
56 const Type **fields = TypeTuple::fields(2);
57 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Card addr
58 fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
59 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+2, fields);
60
61 // create result type (range)
62 fields = TypeTuple::fields(0);
63 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms, fields);
64
65 return TypeFunc::make(domain, range);
66}
67
68#define __ ideal.
69/*
70 * Determine if the G1 pre-barrier can be removed. The pre-barrier is
71 * required by SATB to make sure all objects live at the start of the
72 * marking are kept alive, all reference updates need to any previous
73 * reference stored before writing.
74 *
75 * If the previous value is NULL there is no need to save the old value.
76 * References that are NULL are filtered during runtime by the barrier
77 * code to avoid unnecessary queuing.
78 *
79 * However in the case of newly allocated objects it might be possible to
80 * prove that the reference about to be overwritten is NULL during compile
81 * time and avoid adding the barrier code completely.
82 *
83 * The compiler needs to determine that the object in which a field is about
84 * to be written is newly allocated, and that no prior store to the same field
85 * has happened since the allocation.
86 *
87 * Returns true if the pre-barrier can be removed
88 */
89bool G1BarrierSetC2::g1_can_remove_pre_barrier(GraphKit* kit,
90 PhaseTransform* phase,
91 Node* adr,
92 BasicType bt,
93 uint adr_idx) const {
94 intptr_t offset = 0;
95 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
96 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
97
98 if (offset == Type::OffsetBot) {
99 return false; // cannot unalias unless there are precise offsets
100 }
101
102 if (alloc == NULL) {
103 return false; // No allocation found
104 }
105
106 intptr_t size_in_bytes = type2aelembytes(bt);
107
108 Node* mem = kit->memory(adr_idx); // start searching here...
109
110 for (int cnt = 0; cnt < 50; cnt++) {
111
112 if (mem->is_Store()) {
113
114 Node* st_adr = mem->in(MemNode::Address);
115 intptr_t st_offset = 0;
116 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset);
117
118 if (st_base == NULL) {
119 break; // inscrutable pointer
120 }
121
122 // Break we have found a store with same base and offset as ours so break
123 if (st_base == base && st_offset == offset) {
124 break;
125 }
126
127 if (st_offset != offset && st_offset != Type::OffsetBot) {
128 const int MAX_STORE = BytesPerLong;
129 if (st_offset >= offset + size_in_bytes ||
130 st_offset <= offset - MAX_STORE ||
131 st_offset <= offset - mem->as_Store()->memory_size()) {
132 // Success: The offsets are provably independent.
133 // (You may ask, why not just test st_offset != offset and be done?
134 // The answer is that stores of different sizes can co-exist
135 // in the same sequence of RawMem effects. We sometimes initialize
136 // a whole 'tile' of array elements with a single jint or jlong.)
137 mem = mem->in(MemNode::Memory);
138 continue; // advance through independent store memory
139 }
140 }
141
142 if (st_base != base
143 && MemNode::detect_ptr_independence(base, alloc, st_base,
144 AllocateNode::Ideal_allocation(st_base, phase),
145 phase)) {
146 // Success: The bases are provably independent.
147 mem = mem->in(MemNode::Memory);
148 continue; // advance through independent store memory
149 }
150 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
151
152 InitializeNode* st_init = mem->in(0)->as_Initialize();
153 AllocateNode* st_alloc = st_init->allocation();
154
155 // Make sure that we are looking at the same allocation site.
156 // The alloc variable is guaranteed to not be null here from earlier check.
157 if (alloc == st_alloc) {
158 // Check that the initialization is storing NULL so that no previous store
159 // has been moved up and directly write a reference
160 Node* captured_store = st_init->find_captured_store(offset,
161 type2aelembytes(T_OBJECT),
162 phase);
163 if (captured_store == NULL || captured_store == st_init->zero_memory()) {
164 return true;
165 }
166 }
167 }
168
169 // Unless there is an explicit 'continue', we must bail out here,
170 // because 'mem' is an inscrutable memory state (e.g., a call).
171 break;
172 }
173
174 return false;
175}
176
177// G1 pre/post barriers
178void G1BarrierSetC2::pre_barrier(GraphKit* kit,
179 bool do_load,
180 Node* ctl,
181 Node* obj,
182 Node* adr,
183 uint alias_idx,
184 Node* val,
185 const TypeOopPtr* val_type,
186 Node* pre_val,
187 BasicType bt) const {
188 // Some sanity checks
189 // Note: val is unused in this routine.
190
191 if (do_load) {
192 // We need to generate the load of the previous value
193 assert(obj != NULL, "must have a base");
194 assert(adr != NULL, "where are loading from?");
195 assert(pre_val == NULL, "loaded already?");
196 assert(val_type != NULL, "need a type");
197
198 if (use_ReduceInitialCardMarks()
199 && g1_can_remove_pre_barrier(kit, &kit->gvn(), adr, bt, alias_idx)) {
200 return;
201 }
202
203 } else {
204 // In this case both val_type and alias_idx are unused.
205 assert(pre_val != NULL, "must be loaded already");
206 // Nothing to be done if pre_val is null.
207 if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
208 assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
209 }
210 assert(bt == T_OBJECT, "or we shouldn't be here");
211
212 IdealKit ideal(kit, true);
213
214 Node* tls = __ thread(); // ThreadLocalStorage
215
216 Node* no_base = __ top();
217 Node* zero = __ ConI(0);
218 Node* zeroX = __ ConX(0);
219
220 float likely = PROB_LIKELY(0.999);
221 float unlikely = PROB_UNLIKELY(0.999);
222
223 BasicType active_type = in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 ? T_INT : T_BYTE;
224 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 4 || in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "flag width");
225
226 // Offsets into the thread
227 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
228 const int index_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
229 const int buffer_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
230
231 // Now the actual pointers into the thread
232 Node* marking_adr = __ AddP(no_base, tls, __ ConX(marking_offset));
233 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
234 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
235
236 // Now some of the values
237 Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
238
239 // if (!marking)
240 __ if_then(marking, BoolTest::ne, zero, unlikely); {
241 BasicType index_bt = TypeX_X->basic_type();
242 assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 SATBMarkQueue::_index with wrong size.");
243 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
244
245 if (do_load) {
246 // load original value
247 // alias_idx correct??
248 pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
249 }
250
251 // if (pre_val != NULL)
252 __ if_then(pre_val, BoolTest::ne, kit->null()); {
253 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
254
255 // is the queue for this thread full?
256 __ if_then(index, BoolTest::ne, zeroX, likely); {
257
258 // decrement the index
259 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
260
261 // Now get the buffer location we will log the previous value into and store it
262 Node *log_addr = __ AddP(no_base, buffer, next_index);
263 __ store(__ ctrl(), log_addr, pre_val, T_OBJECT, Compile::AliasIdxRaw, MemNode::unordered);
264 // update the index
265 __ store(__ ctrl(), index_adr, next_index, index_bt, Compile::AliasIdxRaw, MemNode::unordered);
266
267 } __ else_(); {
268
269 // logging buffer is full, call the runtime
270 const TypeFunc *tf = write_ref_field_pre_entry_Type();
271 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls);
272 } __ end_if(); // (!index)
273 } __ end_if(); // (pre_val != NULL)
274 } __ end_if(); // (!marking)
275
276 // Final sync IdealKit and GraphKit.
277 kit->final_sync(ideal);
278}
279
280/*
281 * G1 similar to any GC with a Young Generation requires a way to keep track of
282 * references from Old Generation to Young Generation to make sure all live
283 * objects are found. G1 also requires to keep track of object references
284 * between different regions to enable evacuation of old regions, which is done
285 * as part of mixed collections. References are tracked in remembered sets and
286 * is continuously updated as reference are written to with the help of the
287 * post-barrier.
288 *
289 * To reduce the number of updates to the remembered set the post-barrier
290 * filters updates to fields in objects located in the Young Generation,
291 * the same region as the reference, when the NULL is being written or
292 * if the card is already marked as dirty by an earlier write.
293 *
294 * Under certain circumstances it is possible to avoid generating the
295 * post-barrier completely if it is possible during compile time to prove
296 * the object is newly allocated and that no safepoint exists between the
297 * allocation and the store.
298 *
299 * In the case of slow allocation the allocation code must handle the barrier
300 * as part of the allocation in the case the allocated object is not located
301 * in the nursery, this would happen for humongous objects. This is similar to
302 * how CMS is required to handle this case, see the comments for the method
303 * CollectedHeap::new_deferred_store_barrier and OptoRuntime::new_deferred_store_barrier.
304 * A deferred card mark is required for these objects and handled in the above
305 * mentioned methods.
306 *
307 * Returns true if the post barrier can be removed
308 */
309bool G1BarrierSetC2::g1_can_remove_post_barrier(GraphKit* kit,
310 PhaseTransform* phase, Node* store,
311 Node* adr) const {
312 intptr_t offset = 0;
313 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset);
314 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase);
315
316 if (offset == Type::OffsetBot) {
317 return false; // cannot unalias unless there are precise offsets
318 }
319
320 if (alloc == NULL) {
321 return false; // No allocation found
322 }
323
324 // Start search from Store node
325 Node* mem = store->in(MemNode::Control);
326 if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
327
328 InitializeNode* st_init = mem->in(0)->as_Initialize();
329 AllocateNode* st_alloc = st_init->allocation();
330
331 // Make sure we are looking at the same allocation
332 if (alloc == st_alloc) {
333 return true;
334 }
335 }
336
337 return false;
338}
339
340//
341// Update the card table and add card address to the queue
342//
343void G1BarrierSetC2::g1_mark_card(GraphKit* kit,
344 IdealKit& ideal,
345 Node* card_adr,
346 Node* oop_store,
347 uint oop_alias_idx,
348 Node* index,
349 Node* index_adr,
350 Node* buffer,
351 const TypeFunc* tf) const {
352 Node* zero = __ ConI(0);
353 Node* zeroX = __ ConX(0);
354 Node* no_base = __ top();
355 BasicType card_bt = T_BYTE;
356 // Smash zero into card. MUST BE ORDERED WRT TO STORE
357 __ storeCM(__ ctrl(), card_adr, zero, oop_store, oop_alias_idx, card_bt, Compile::AliasIdxRaw);
358
359 // Now do the queue work
360 __ if_then(index, BoolTest::ne, zeroX); {
361
362 Node* next_index = kit->gvn().transform(new SubXNode(index, __ ConX(sizeof(intptr_t))));
363 Node* log_addr = __ AddP(no_base, buffer, next_index);
364
365 // Order, see storeCM.
366 __ store(__ ctrl(), log_addr, card_adr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered);
367 __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
368
369 } __ else_(); {
370 __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread());
371 } __ end_if();
372
373}
374
375void G1BarrierSetC2::post_barrier(GraphKit* kit,
376 Node* ctl,
377 Node* oop_store,
378 Node* obj,
379 Node* adr,
380 uint alias_idx,
381 Node* val,
382 BasicType bt,
383 bool use_precise) const {
384 // If we are writing a NULL then we need no post barrier
385
386 if (val != NULL && val->is_Con() && val->bottom_type() == TypePtr::NULL_PTR) {
387 // Must be NULL
388 const Type* t = val->bottom_type();
389 assert(t == Type::TOP || t == TypePtr::NULL_PTR, "must be NULL");
390 // No post barrier if writing NULLx
391 return;
392 }
393
394 if (use_ReduceInitialCardMarks() && obj == kit->just_allocated_object(kit->control())) {
395 // We can skip marks on a freshly-allocated object in Eden.
396 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
397 // That routine informs GC to take appropriate compensating steps,
398 // upon a slow-path allocation, so as to make this card-mark
399 // elision safe.
400 return;
401 }
402
403 if (use_ReduceInitialCardMarks()
404 && g1_can_remove_post_barrier(kit, &kit->gvn(), oop_store, adr)) {
405 return;
406 }
407
408 if (!use_precise) {
409 // All card marks for a (non-array) instance are in one place:
410 adr = obj;
411 }
412 // (Else it's an array (or unknown), and we want more precise card marks.)
413 assert(adr != NULL, "");
414
415 IdealKit ideal(kit, true);
416
417 Node* tls = __ thread(); // ThreadLocalStorage
418
419 Node* no_base = __ top();
420 float unlikely = PROB_UNLIKELY(0.999);
421 Node* young_card = __ ConI((jint)G1CardTable::g1_young_card_val());
422 Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());
423 Node* zeroX = __ ConX(0);
424
425 const TypeFunc *tf = write_ref_field_post_entry_Type();
426
427 // Offsets into the thread
428 const int index_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
429 const int buffer_offset = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
430
431 // Pointers into the thread
432
433 Node* buffer_adr = __ AddP(no_base, tls, __ ConX(buffer_offset));
434 Node* index_adr = __ AddP(no_base, tls, __ ConX(index_offset));
435
436 // Now some values
437 // Use ctrl to avoid hoisting these values past a safepoint, which could
438 // potentially reset these fields in the JavaThread.
439 Node* index = __ load(__ ctrl(), index_adr, TypeX_X, TypeX_X->basic_type(), Compile::AliasIdxRaw);
440 Node* buffer = __ load(__ ctrl(), buffer_adr, TypeRawPtr::NOTNULL, T_ADDRESS, Compile::AliasIdxRaw);
441
442 // Convert the store obj pointer to an int prior to doing math on it
443 // Must use ctrl to prevent "integerized oop" existing across safepoint
444 Node* cast = __ CastPX(__ ctrl(), adr);
445
446 // Divide pointer by card size
447 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
448
449 // Combine card table base and card offset
450 Node* card_adr = __ AddP(no_base, byte_map_base_node(kit), card_offset );
451
452 // If we know the value being stored does it cross regions?
453
454 if (val != NULL) {
455 // Does the store cause us to cross regions?
456
457 // Should be able to do an unsigned compare of region_size instead of
458 // and extra shift. Do we have an unsigned compare??
459 // Node* region_size = __ ConI(1 << HeapRegion::LogOfHRGrainBytes);
460 Node* xor_res = __ URShiftX ( __ XorX( cast, __ CastPX(__ ctrl(), val)), __ ConI(HeapRegion::LogOfHRGrainBytes));
461
462 // if (xor_res == 0) same region so skip
463 __ if_then(xor_res, BoolTest::ne, zeroX); {
464
465 // No barrier if we are storing a NULL
466 __ if_then(val, BoolTest::ne, kit->null(), unlikely); {
467
468 // Ok must mark the card if not already dirty
469
470 // load the original value of the card
471 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
472
473 __ if_then(card_val, BoolTest::ne, young_card); {
474 kit->sync_kit(ideal);
475 kit->insert_store_load_for_barrier();
476 __ sync_kit(kit);
477
478 Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
479 __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
480 g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
481 } __ end_if();
482 } __ end_if();
483 } __ end_if();
484 } __ end_if();
485 } else {
486 // The Object.clone() intrinsic uses this path if !ReduceInitialCardMarks.
487 // We don't need a barrier here if the destination is a newly allocated object
488 // in Eden. Otherwise, GC verification breaks because we assume that cards in Eden
489 // are set to 'g1_young_gen' (see G1CardTable::verify_g1_young_region()).
490 assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
491 Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
492 __ if_then(card_val, BoolTest::ne, young_card); {
493 g1_mark_card(kit, ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
494 } __ end_if();
495 }
496
497 // Final sync IdealKit and GraphKit.
498 kit->final_sync(ideal);
499}
500
501// Helper that guards and inserts a pre-barrier.
502void G1BarrierSetC2::insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset,
503 Node* pre_val, bool need_mem_bar) const {
504 // We could be accessing the referent field of a reference object. If so, when G1
505 // is enabled, we need to log the value in the referent field in an SATB buffer.
506 // This routine performs some compile time filters and generates suitable
507 // runtime filters that guard the pre-barrier code.
508 // Also add memory barrier for non volatile load from the referent field
509 // to prevent commoning of loads across safepoint.
510
511 // Some compile time checks.
512
513 // If offset is a constant, is it java_lang_ref_Reference::_reference_offset?
514 const TypeX* otype = offset->find_intptr_t_type();
515 if (otype != NULL && otype->is_con() &&
516 otype->get_con() != java_lang_ref_Reference::referent_offset) {
517 // Constant offset but not the reference_offset so just return
518 return;
519 }
520
521 // We only need to generate the runtime guards for instances.
522 const TypeOopPtr* btype = base_oop->bottom_type()->isa_oopptr();
523 if (btype != NULL) {
524 if (btype->isa_aryptr()) {
525 // Array type so nothing to do
526 return;
527 }
528
529 const TypeInstPtr* itype = btype->isa_instptr();
530 if (itype != NULL) {
531 // Can the klass of base_oop be statically determined to be
532 // _not_ a sub-class of Reference and _not_ Object?
533 ciKlass* klass = itype->klass();
534 if ( klass->is_loaded() &&
535 !klass->is_subtype_of(kit->env()->Reference_klass()) &&
536 !kit->env()->Object_klass()->is_subtype_of(klass)) {
537 return;
538 }
539 }
540 }
541
542 // The compile time filters did not reject base_oop/offset so
543 // we need to generate the following runtime filters
544 //
545 // if (offset == java_lang_ref_Reference::_reference_offset) {
546 // if (instance_of(base, java.lang.ref.Reference)) {
547 // pre_barrier(_, pre_val, ...);
548 // }
549 // }
550
551 float likely = PROB_LIKELY( 0.999);
552 float unlikely = PROB_UNLIKELY(0.999);
553
554 IdealKit ideal(kit);
555
556 Node* referent_off = __ ConX(java_lang_ref_Reference::referent_offset);
557
558 __ if_then(offset, BoolTest::eq, referent_off, unlikely); {
559 // Update graphKit memory and control from IdealKit.
560 kit->sync_kit(ideal);
561
562 Node* ref_klass_con = kit->makecon(TypeKlassPtr::make(kit->env()->Reference_klass()));
563 Node* is_instof = kit->gen_instanceof(base_oop, ref_klass_con);
564
565 // Update IdealKit memory and control from graphKit.
566 __ sync_kit(kit);
567
568 Node* one = __ ConI(1);
569 // is_instof == 0 if base_oop == NULL
570 __ if_then(is_instof, BoolTest::eq, one, unlikely); {
571
572 // Update graphKit from IdeakKit.
573 kit->sync_kit(ideal);
574
575 // Use the pre-barrier to record the value in the referent field
576 pre_barrier(kit, false /* do_load */,
577 __ ctrl(),
578 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
579 pre_val /* pre_val */,
580 T_OBJECT);
581 if (need_mem_bar) {
582 // Add memory barrier to prevent commoning reads from this field
583 // across safepoint since GC can change its value.
584 kit->insert_mem_bar(Op_MemBarCPUOrder);
585 }
586 // Update IdealKit from graphKit.
587 __ sync_kit(kit);
588
589 } __ end_if(); // _ref_type != ref_none
590 } __ end_if(); // offset == referent_offset
591
592 // Final sync IdealKit and GraphKit.
593 kit->final_sync(ideal);
594}
595
596#undef __
597
598Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) const {
599 DecoratorSet decorators = access.decorators();
600 Node* adr = access.addr().node();
601 Node* obj = access.base();
602
603 bool mismatched = (decorators & C2_MISMATCHED) != 0;
604 bool unknown = (decorators & ON_UNKNOWN_OOP_REF) != 0;
605 bool in_heap = (decorators & IN_HEAP) != 0;
606 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
607 bool is_unordered = (decorators & MO_UNORDERED) != 0;
608 bool need_cpu_mem_bar = !is_unordered || mismatched || !in_heap;
609
610 Node* top = Compile::current()->top();
611 Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
612 Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type);
613
614 // If we are reading the value of the referent field of a Reference
615 // object (either by using Unsafe directly or through reflection)
616 // then, if G1 is enabled, we need to record the referent in an
617 // SATB log buffer using the pre-barrier mechanism.
618 // Also we need to add memory barrier to prevent commoning reads
619 // from this field across safepoint since GC can change its value.
620 bool need_read_barrier = in_heap && (on_weak ||
621 (unknown && offset != top && obj != top));
622
623 if (!access.is_oop() || !need_read_barrier) {
624 return load;
625 }
626
627 assert(access.is_parse_access(), "entry not supported at optimization time");
628 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
629 GraphKit* kit = parse_access.kit();
630
631 if (on_weak) {
632 // Use the pre-barrier to record the value in the referent field
633 pre_barrier(kit, false /* do_load */,
634 kit->control(),
635 NULL /* obj */, NULL /* adr */, max_juint /* alias_idx */, NULL /* val */, NULL /* val_type */,
636 load /* pre_val */, T_OBJECT);
637 // Add memory barrier to prevent commoning reads from this field
638 // across safepoint since GC can change its value.
639 kit->insert_mem_bar(Op_MemBarCPUOrder);
640 } else if (unknown) {
641 // We do not require a mem bar inside pre_barrier if need_mem_bar
642 // is set: the barriers would be emitted by us.
643 insert_pre_barrier(kit, obj, offset, load, !need_cpu_mem_bar);
644 }
645
646 return load;
647}
648
649bool G1BarrierSetC2::is_gc_barrier_node(Node* node) const {
650 if (CardTableBarrierSetC2::is_gc_barrier_node(node)) {
651 return true;
652 }
653 if (node->Opcode() != Op_CallLeaf) {
654 return false;
655 }
656 CallLeafNode *call = node->as_CallLeaf();
657 if (call->_name == NULL) {
658 return false;
659 }
660
661 return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0;
662}
663
664void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
665 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
666 assert(node->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes");
667 // It could be only one user, URShift node, in Object.clone() intrinsic
668 // but the new allocation is passed to arraycopy stub and it could not
669 // be scalar replaced. So we don't check the case.
670
671 // An other case of only one user (Xor) is when the value check for NULL
672 // in G1 post barrier is folded after CCP so the code which used URShift
673 // is removed.
674
675 // Take Region node before eliminating post barrier since it also
676 // eliminates CastP2X node when it has only one user.
677 Node* this_region = node->in(0);
678 assert(this_region != NULL, "");
679
680 // Remove G1 post barrier.
681
682 // Search for CastP2X->Xor->URShift->Cmp path which
683 // checks if the store done to a different from the value's region.
684 // And replace Cmp with #0 (false) to collapse G1 post barrier.
685 Node* xorx = node->find_out_with(Op_XorX);
686 if (xorx != NULL) {
687 Node* shift = xorx->unique_out();
688 Node* cmpx = shift->unique_out();
689 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
690 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
691 "missing region check in G1 post barrier");
692 macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
693
694 // Remove G1 pre barrier.
695
696 // Search "if (marking != 0)" check and set it to "false".
697 // There is no G1 pre barrier if previous stored value is NULL
698 // (for example, after initialization).
699 if (this_region->is_Region() && this_region->req() == 3) {
700 int ind = 1;
701 if (!this_region->in(ind)->is_IfFalse()) {
702 ind = 2;
703 }
704 if (this_region->in(ind)->is_IfFalse() &&
705 this_region->in(ind)->in(0)->Opcode() == Op_If) {
706 Node* bol = this_region->in(ind)->in(0)->in(1);
707 assert(bol->is_Bool(), "");
708 cmpx = bol->in(1);
709 if (bol->as_Bool()->_test._test == BoolTest::ne &&
710 cmpx->is_Cmp() && cmpx->in(2) == macro->intcon(0) &&
711 cmpx->in(1)->is_Load()) {
712 Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address);
713 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
714 if (adr->is_AddP() && adr->in(AddPNode::Base) == macro->top() &&
715 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
716 adr->in(AddPNode::Offset) == macro->MakeConX(marking_offset)) {
717 macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
718 }
719 }
720 }
721 }
722 } else {
723 assert(!use_ReduceInitialCardMarks(), "can only happen with card marking");
724 // This is a G1 post barrier emitted by the Object.clone() intrinsic.
725 // Search for the CastP2X->URShiftX->AddP->LoadB->Cmp path which checks if the card
726 // is marked as young_gen and replace the Cmp with 0 (false) to collapse the barrier.
727 Node* shift = node->find_out_with(Op_URShiftX);
728 assert(shift != NULL, "missing G1 post barrier");
729 Node* addp = shift->unique_out();
730 Node* load = addp->find_out_with(Op_LoadB);
731 assert(load != NULL, "missing G1 post barrier");
732 Node* cmpx = load->unique_out();
733 assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() &&
734 cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne,
735 "missing card value check in G1 post barrier");
736 macro->replace_node(cmpx, macro->makecon(TypeInt::CC_EQ));
737 // There is no G1 pre barrier in this case
738 }
739 // Now CastP2X can be removed since it is used only on dead path
740 // which currently still alive until igvn optimize it.
741 assert(node->outcnt() == 0 || node->unique_out()->Opcode() == Op_URShiftX, "");
742 macro->replace_node(node, macro->top());
743}
744
745Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {
746 if (!use_ReduceInitialCardMarks() &&
747 c != NULL && c->is_Region() && c->req() == 3) {
748 for (uint i = 1; i < c->req(); i++) {
749 if (c->in(i) != NULL && c->in(i)->is_Region() &&
750 c->in(i)->req() == 3) {
751 Node* r = c->in(i);
752 for (uint j = 1; j < r->req(); j++) {
753 if (r->in(j) != NULL && r->in(j)->is_Proj() &&
754 r->in(j)->in(0) != NULL &&
755 r->in(j)->in(0)->Opcode() == Op_CallLeaf &&
756 r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) {
757 Node* call = r->in(j)->in(0);
758 c = c->in(i == 1 ? 2 : 1);
759 if (c != NULL) {
760 c = c->in(0);
761 if (c != NULL) {
762 c = c->in(0);
763 assert(call->in(0) == NULL ||
764 call->in(0)->in(0) == NULL ||
765 call->in(0)->in(0)->in(0) == NULL ||
766 call->in(0)->in(0)->in(0)->in(0) == NULL ||
767 call->in(0)->in(0)->in(0)->in(0)->in(0) == NULL ||
768 c == call->in(0)->in(0)->in(0)->in(0)->in(0), "bad barrier shape");
769 return c;
770 }
771 }
772 }
773 }
774 }
775 }
776 }
777 return c;
778}
779
780#ifdef ASSERT
781void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
782 if (phase != BarrierSetC2::BeforeCodeGen) {
783 return;
784 }
785 // Verify G1 pre-barriers
786 const int marking_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
787
788 ResourceArea *area = Thread::current()->resource_area();
789 Unique_Node_List visited(area);
790 Node_List worklist(area);
791 // We're going to walk control flow backwards starting from the Root
792 worklist.push(compile->root());
793 while (worklist.size() > 0) {
794 Node* x = worklist.pop();
795 if (x == NULL || x == compile->top()) continue;
796 if (visited.member(x)) {
797 continue;
798 } else {
799 visited.push(x);
800 }
801
802 if (x->is_Region()) {
803 for (uint i = 1; i < x->req(); i++) {
804 worklist.push(x->in(i));
805 }
806 } else {
807 worklist.push(x->in(0));
808 // We are looking for the pattern:
809 // /->ThreadLocal
810 // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset)
811 // \->ConI(0)
812 // We want to verify that the If and the LoadB have the same control
813 // See GraphKit::g1_write_barrier_pre()
814 if (x->is_If()) {
815 IfNode *iff = x->as_If();
816 if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) {
817 CmpNode *cmp = iff->in(1)->in(1)->as_Cmp();
818 if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0
819 && cmp->in(1)->is_Load()) {
820 LoadNode* load = cmp->in(1)->as_Load();
821 if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal
822 && load->in(2)->in(3)->is_Con()
823 && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) {
824
825 Node* if_ctrl = iff->in(0);
826 Node* load_ctrl = load->in(0);
827
828 if (if_ctrl != load_ctrl) {
829 // Skip possible CProj->NeverBranch in infinite loops
830 if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj)
831 && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) {
832 if_ctrl = if_ctrl->in(0)->in(0);
833 }
834 }
835 assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
836 }
837 }
838 }
839 }
840 }
841 }
842}
843#endif
844
845bool G1BarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const {
846 if (opcode == Op_StoreP) {
847 Node* adr = n->in(MemNode::Address);
848 const Type* adr_type = gvn->type(adr);
849 // Pointer stores in G1 barriers looks like unsafe access.
850 // Ignore such stores to be able scalar replace non-escaping
851 // allocations.
852 if (adr_type->isa_rawptr() && adr->is_AddP()) {
853 Node* base = conn_graph->get_addp_base(adr);
854 if (base->Opcode() == Op_LoadP &&
855 base->in(MemNode::Address)->is_AddP()) {
856 adr = base->in(MemNode::Address);
857 Node* tls = conn_graph->get_addp_base(adr);
858 if (tls->Opcode() == Op_ThreadLocal) {
859 int offs = (int) gvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
860 const int buf_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
861 if (offs == buf_offset) {
862 return true; // G1 pre barrier previous oop value store.
863 }
864 if (offs == in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset())) {
865 return true; // G1 post barrier card address store.
866 }
867 }
868 }
869 }
870 }
871 return false;
872}
873