1/*
2 * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "ci/ciUtilities.hpp"
27#include "gc/shared/cardTable.hpp"
28#include "gc/shared/cardTableBarrierSet.hpp"
29#include "gc/shared/c2/cardTableBarrierSetC2.hpp"
30#include "opto/arraycopynode.hpp"
31#include "opto/graphKit.hpp"
32#include "opto/idealKit.hpp"
33#include "opto/macro.hpp"
34#include "utilities/macros.hpp"
35
36#define __ ideal.
37
38Node* CardTableBarrierSetC2::byte_map_base_node(GraphKit* kit) const {
39 // Get base of card map
40 CardTable::CardValue* card_table_base = ci_card_table_address();
41 if (card_table_base != NULL) {
42 return kit->makecon(TypeRawPtr::make((address)card_table_base));
43 } else {
44 return kit->null();
45 }
46}
47
48// vanilla/CMS post barrier
49// Insert a write-barrier store. This is to let generational GC work; we have
50// to flag all oop-stores before the next GC point.
51void CardTableBarrierSetC2::post_barrier(GraphKit* kit,
52 Node* ctl,
53 Node* oop_store,
54 Node* obj,
55 Node* adr,
56 uint adr_idx,
57 Node* val,
58 BasicType bt,
59 bool use_precise) const {
60 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
61 CardTable* ct = ctbs->card_table();
62 // No store check needed if we're storing a NULL or an old object
63 // (latter case is probably a string constant). The concurrent
64 // mark sweep garbage collector, however, needs to have all nonNull
65 // oop updates flagged via card-marks.
66 if (val != NULL && val->is_Con()) {
67 // must be either an oop or NULL
68 const Type* t = val->bottom_type();
69 if (t == TypePtr::NULL_PTR || t == Type::TOP)
70 // stores of null never (?) need barriers
71 return;
72 }
73
74 if (use_ReduceInitialCardMarks()
75 && obj == kit->just_allocated_object(kit->control())) {
76 // We can skip marks on a freshly-allocated object in Eden.
77 // Keep this code in sync with new_deferred_store_barrier() in runtime.cpp.
78 // That routine informs GC to take appropriate compensating steps,
79 // upon a slow-path allocation, so as to make this card-mark
80 // elision safe.
81 return;
82 }
83
84 if (!use_precise) {
85 // All card marks for a (non-array) instance are in one place:
86 adr = obj;
87 }
88 // (Else it's an array (or unknown), and we want more precise card marks.)
89 assert(adr != NULL, "");
90
91 IdealKit ideal(kit, true);
92
93 // Convert the pointer to an int prior to doing math on it
94 Node* cast = __ CastPX(__ ctrl(), adr);
95
96 // Divide by card size
97 Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
98
99 // Combine card table base and card offset
100 Node* card_adr = __ AddP(__ top(), byte_map_base_node(kit), card_offset );
101
102 // Get the alias_index for raw card-mark memory
103 int adr_type = Compile::AliasIdxRaw;
104 Node* zero = __ ConI(0); // Dirty card value
105
106 if (UseCondCardMark) {
107 if (ct->scanned_concurrently()) {
108 kit->insert_store_load_for_barrier();
109 __ sync_kit(kit);
110 }
111 // The classic GC reference write barrier is typically implemented
112 // as a store into the global card mark table. Unfortunately
113 // unconditional stores can result in false sharing and excessive
114 // coherence traffic as well as false transactional aborts.
115 // UseCondCardMark enables MP "polite" conditional card mark
116 // stores. In theory we could relax the load from ctrl() to
117 // no_ctrl, but that doesn't buy much latitude.
118 Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, T_BYTE, adr_type);
119 __ if_then(card_val, BoolTest::ne, zero);
120 }
121
122 // Smash zero into card
123 if(!ct->scanned_concurrently()) {
124 __ store(__ ctrl(), card_adr, zero, T_BYTE, adr_type, MemNode::unordered);
125 } else {
126 // Specialized path for CM store barrier
127 __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, T_BYTE, adr_type);
128 }
129
130 if (UseCondCardMark) {
131 __ end_if();
132 }
133
134 // Final sync IdealKit and GraphKit.
135 kit->final_sync(ideal);
136}
137
138void CardTableBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const {
139 BarrierSetC2::clone(kit, src, dst, size, is_array);
140 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
141
142 // If necessary, emit some card marks afterwards. (Non-arrays only.)
143 bool card_mark = !is_array && !use_ReduceInitialCardMarks();
144 if (card_mark) {
145 assert(!is_array, "");
146 // Put in store barrier for any and all oops we are sticking
147 // into this object. (We could avoid this if we could prove
148 // that the object type contains no oop fields at all.)
149 Node* no_particular_value = NULL;
150 Node* no_particular_field = NULL;
151 int raw_adr_idx = Compile::AliasIdxRaw;
152 post_barrier(kit, kit->control(),
153 kit->memory(raw_adr_type),
154 dst,
155 no_particular_field,
156 raw_adr_idx,
157 no_particular_value,
158 T_OBJECT,
159 false);
160 }
161}
162
163bool CardTableBarrierSetC2::use_ReduceInitialCardMarks() const {
164 return ReduceInitialCardMarks;
165}
166
167bool CardTableBarrierSetC2::is_gc_barrier_node(Node* node) const {
168 return ModRefBarrierSetC2::is_gc_barrier_node(node) || node->Opcode() == Op_StoreCM;
169}
170
171void CardTableBarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
172 assert(node->Opcode() == Op_CastP2X, "ConvP2XNode required");
173 Node *shift = node->unique_out();
174 Node *addp = shift->unique_out();
175 for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
176 Node *mem = addp->last_out(j);
177 if (UseCondCardMark && mem->is_Load()) {
178 assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
179 // The load is checking if the card has been written so
180 // replace it with zero to fold the test.
181 macro->replace_node(mem, macro->intcon(0));
182 continue;
183 }
184 assert(mem->is_Store(), "store required");
185 macro->replace_node(mem, mem->in(MemNode::Memory));
186 }
187}
188
189bool CardTableBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const {
190 bool is_oop = type == T_OBJECT || type == T_ARRAY;
191 return is_oop && (!tightly_coupled_alloc || !use_ReduceInitialCardMarks());
192}
193