1/*
2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "c1/c1_Defs.hpp"
27#include "c1/c1_LIRGenerator.hpp"
28#include "gc/shared/c1/barrierSetC1.hpp"
29#include "utilities/macros.hpp"
30
31#ifndef PATCHED_ADDR
32#define PATCHED_ADDR (max_jint)
33#endif
34
35#ifdef ASSERT
36#define __ gen->lir(__FILE__, __LINE__)->
37#else
38#define __ gen->lir()->
39#endif
40
41LIR_Opr BarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) {
42 DecoratorSet decorators = access.decorators();
43 bool is_array = (decorators & IS_ARRAY) != 0;
44 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
45
46 LIRItem& base = access.base().item();
47 LIR_Opr offset = access.offset().opr();
48 LIRGenerator *gen = access.gen();
49
50 LIR_Opr addr_opr;
51 if (is_array) {
52 addr_opr = LIR_OprFact::address(gen->emit_array_address(base.result(), offset, access.type()));
53 } else if (needs_patching) {
54 // we need to patch the offset in the instruction so don't allow
55 // generate_address to try to be smart about emitting the -1.
56 // Otherwise the patching code won't know how to find the
57 // instruction to patch.
58 addr_opr = LIR_OprFact::address(new LIR_Address(base.result(), PATCHED_ADDR, access.type()));
59 } else {
60 addr_opr = LIR_OprFact::address(gen->generate_address(base.result(), offset, 0, 0, access.type()));
61 }
62
63 if (resolve_in_register) {
64 LIR_Opr resolved_addr = gen->new_pointer_register();
65 if (needs_patching) {
66 __ leal(addr_opr, resolved_addr, lir_patch_normal, access.patch_emit_info());
67 access.clear_decorators(C1_NEEDS_PATCHING);
68 } else {
69 __ leal(addr_opr, resolved_addr);
70 }
71 return LIR_OprFact::address(new LIR_Address(resolved_addr, access.type()));
72 } else {
73 return addr_opr;
74 }
75}
76
77void BarrierSetC1::store_at(LIRAccess& access, LIR_Opr value) {
78 DecoratorSet decorators = access.decorators();
79 bool in_heap = (decorators & IN_HEAP) != 0;
80 assert(in_heap, "not supported yet");
81
82 LIR_Opr resolved = resolve_address(access, false);
83 access.set_resolved_addr(resolved);
84 store_at_resolved(access, value);
85}
86
87void BarrierSetC1::load_at(LIRAccess& access, LIR_Opr result) {
88 DecoratorSet decorators = access.decorators();
89 bool in_heap = (decorators & IN_HEAP) != 0;
90 assert(in_heap, "not supported yet");
91
92 LIR_Opr resolved = resolve_address(access, false);
93 access.set_resolved_addr(resolved);
94 load_at_resolved(access, result);
95}
96
97void BarrierSetC1::load(LIRAccess& access, LIR_Opr result) {
98 DecoratorSet decorators = access.decorators();
99 bool in_heap = (decorators & IN_HEAP) != 0;
100 assert(!in_heap, "consider using load_at");
101 load_at_resolved(access, result);
102}
103
104LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
105 DecoratorSet decorators = access.decorators();
106 bool in_heap = (decorators & IN_HEAP) != 0;
107 assert(in_heap, "not supported yet");
108
109 access.load_address();
110
111 LIR_Opr resolved = resolve_address(access, true);
112 access.set_resolved_addr(resolved);
113 return atomic_cmpxchg_at_resolved(access, cmp_value, new_value);
114}
115
116LIR_Opr BarrierSetC1::atomic_xchg_at(LIRAccess& access, LIRItem& value) {
117 DecoratorSet decorators = access.decorators();
118 bool in_heap = (decorators & IN_HEAP) != 0;
119 assert(in_heap, "not supported yet");
120
121 access.load_address();
122
123 LIR_Opr resolved = resolve_address(access, true);
124 access.set_resolved_addr(resolved);
125 return atomic_xchg_at_resolved(access, value);
126}
127
128LIR_Opr BarrierSetC1::atomic_add_at(LIRAccess& access, LIRItem& value) {
129 DecoratorSet decorators = access.decorators();
130 bool in_heap = (decorators & IN_HEAP) != 0;
131 assert(in_heap, "not supported yet");
132
133 access.load_address();
134
135 LIR_Opr resolved = resolve_address(access, true);
136 access.set_resolved_addr(resolved);
137 return atomic_add_at_resolved(access, value);
138}
139
140void BarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) {
141 DecoratorSet decorators = access.decorators();
142 bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses);
143 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
144 bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
145 LIRGenerator* gen = access.gen();
146
147 if (mask_boolean) {
148 value = gen->mask_boolean(access.base().opr(), value, access.access_emit_info());
149 }
150
151 if (is_volatile) {
152 __ membar_release();
153 }
154
155 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
156 if (is_volatile && !needs_patching) {
157 gen->volatile_field_store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info());
158 } else {
159 __ store(value, access.resolved_addr()->as_address_ptr(), access.access_emit_info(), patch_code);
160 }
161
162 if (is_volatile && !support_IRIW_for_not_multiple_copy_atomic_cpu) {
163 __ membar();
164 }
165}
166
167void BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) {
168 LIRGenerator *gen = access.gen();
169 DecoratorSet decorators = access.decorators();
170 bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses);
171 bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0;
172 bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0;
173 bool in_native = (decorators & IN_NATIVE) != 0;
174
175 if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) {
176 __ membar();
177 }
178
179 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
180 if (in_native) {
181 __ move_wide(access.resolved_addr()->as_address_ptr(), result);
182 } else if (is_volatile && !needs_patching) {
183 gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info());
184 } else {
185 __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
186 }
187
188 if (is_volatile) {
189 __ membar_acquire();
190 }
191
192 /* Normalize boolean value returned by unsafe operation, i.e., value != 0 ? value = true : value false. */
193 if (mask_boolean) {
194 LabelObj* equalZeroLabel = new LabelObj();
195 __ cmp(lir_cond_equal, result, 0);
196 __ branch(lir_cond_equal, T_BOOLEAN, equalZeroLabel->label());
197 __ move(LIR_OprFact::intConst(1), result);
198 __ branch_destination(equalZeroLabel->label());
199 }
200}
201
202LIR_Opr BarrierSetC1::atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) {
203 LIRGenerator *gen = access.gen();
204 return gen->atomic_cmpxchg(access.type(), access.resolved_addr(), cmp_value, new_value);
205}
206
207LIR_Opr BarrierSetC1::atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value) {
208 LIRGenerator *gen = access.gen();
209 return gen->atomic_xchg(access.type(), access.resolved_addr(), value);
210}
211
212LIR_Opr BarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) {
213 LIRGenerator *gen = access.gen();
214 return gen->atomic_add(access.type(), access.resolved_addr(), value);
215}
216
217void BarrierSetC1::generate_referent_check(LIRAccess& access, LabelObj* cont) {
218 // We might be reading the value of the referent field of a
219 // Reference object in order to attach it back to the live
220 // object graph. If G1 is enabled then we need to record
221 // the value that is being returned in an SATB log buffer.
222 //
223 // We need to generate code similar to the following...
224 //
225 // if (offset == java_lang_ref_Reference::referent_offset) {
226 // if (src != NULL) {
227 // if (klass(src)->reference_type() != REF_NONE) {
228 // pre_barrier(..., value, ...);
229 // }
230 // }
231 // }
232
233 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
234 bool gen_offset_check = true; // Assume we need to generate the offset guard.
235 bool gen_source_check = true; // Assume we need to check the src object for null.
236 bool gen_type_check = true; // Assume we need to check the reference_type.
237
238 LIRGenerator *gen = access.gen();
239
240 LIRItem& base = access.base().item();
241 LIR_Opr offset = access.offset().opr();
242
243 if (offset->is_constant()) {
244 LIR_Const* constant = offset->as_constant_ptr();
245 jlong off_con = (constant->type() == T_INT ?
246 (jlong)constant->as_jint() :
247 constant->as_jlong());
248
249
250 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
251 // The constant offset is something other than referent_offset.
252 // We can skip generating/checking the remaining guards and
253 // skip generation of the code stub.
254 gen_pre_barrier = false;
255 } else {
256 // The constant offset is the same as referent_offset -
257 // we do not need to generate a runtime offset check.
258 gen_offset_check = false;
259 }
260 }
261
262 // We don't need to generate stub if the source object is an array
263 if (gen_pre_barrier && base.type()->is_array()) {
264 gen_pre_barrier = false;
265 }
266
267 if (gen_pre_barrier) {
268 // We still need to continue with the checks.
269 if (base.is_constant()) {
270 ciObject* src_con = base.get_jobject_constant();
271 guarantee(src_con != NULL, "no source constant");
272
273 if (src_con->is_null_object()) {
274 // The constant src object is null - We can skip
275 // generating the code stub.
276 gen_pre_barrier = false;
277 } else {
278 // Non-null constant source object. We still have to generate
279 // the slow stub - but we don't need to generate the runtime
280 // null object check.
281 gen_source_check = false;
282 }
283 }
284 }
285 if (gen_pre_barrier && !PatchALot) {
286 // Can the klass of object be statically determined to be
287 // a sub-class of Reference?
288 ciType* type = base.value()->declared_type();
289 if ((type != NULL) && type->is_loaded()) {
290 if (type->is_subtype_of(gen->compilation()->env()->Reference_klass())) {
291 gen_type_check = false;
292 } else if (type->is_klass() &&
293 !gen->compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
294 // Not Reference and not Object klass.
295 gen_pre_barrier = false;
296 }
297 }
298 }
299
300 if (gen_pre_barrier) {
301 // We can have generate one runtime check here. Let's start with
302 // the offset check.
303 // Allocate temp register to base and load it here, otherwise
304 // control flow below may confuse register allocator.
305 LIR_Opr base_reg = gen->new_register(T_OBJECT);
306 __ move(base.result(), base_reg);
307 if (gen_offset_check) {
308 // if (offset != referent_offset) -> continue
309 // If offset is an int then we can do the comparison with the
310 // referent_offset constant; otherwise we need to move
311 // referent_offset into a temporary register and generate
312 // a reg-reg compare.
313
314 LIR_Opr referent_off;
315
316 if (offset->type() == T_INT) {
317 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
318 } else {
319 assert(offset->type() == T_LONG, "what else?");
320 referent_off = gen->new_register(T_LONG);
321 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
322 }
323 __ cmp(lir_cond_notEqual, offset, referent_off);
324 __ branch(lir_cond_notEqual, offset->type(), cont->label());
325 }
326 if (gen_source_check) {
327 // offset is a const and equals referent offset
328 // if (source == null) -> continue
329 __ cmp(lir_cond_equal, base_reg, LIR_OprFact::oopConst(NULL));
330 __ branch(lir_cond_equal, T_OBJECT, cont->label());
331 }
332 LIR_Opr src_klass = gen->new_register(T_METADATA);
333 if (gen_type_check) {
334 // We have determined that offset == referent_offset && src != null.
335 // if (src->_klass->_reference_type == REF_NONE) -> continue
336 __ move(new LIR_Address(base_reg, oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
337 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
338 LIR_Opr reference_type = gen->new_register(T_INT);
339 __ move(reference_type_addr, reference_type);
340 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
341 __ branch(lir_cond_equal, T_INT, cont->label());
342 }
343 }
344}
345
346LIR_Opr BarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) {
347 return obj;
348}
349