1 | /* |
2 | * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "c1/c1_LIRGenerator.hpp" |
27 | #include "c1/c1_CodeStubs.hpp" |
28 | #include "gc/g1/c1/g1BarrierSetC1.hpp" |
29 | #include "gc/g1/g1BarrierSet.hpp" |
30 | #include "gc/g1/g1BarrierSetAssembler.hpp" |
31 | #include "gc/g1/g1ThreadLocalData.hpp" |
32 | #include "gc/g1/heapRegion.hpp" |
33 | #include "utilities/macros.hpp" |
34 | |
35 | #ifdef ASSERT |
36 | #define __ gen->lir(__FILE__, __LINE__)-> |
37 | #else |
38 | #define __ gen->lir()-> |
39 | #endif |
40 | |
41 | void G1PreBarrierStub::emit_code(LIR_Assembler* ce) { |
42 | G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); |
43 | bs->gen_pre_barrier_stub(ce, this); |
44 | } |
45 | |
46 | void G1PostBarrierStub::emit_code(LIR_Assembler* ce) { |
47 | G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); |
48 | bs->gen_post_barrier_stub(ce, this); |
49 | } |
50 | |
51 | void G1BarrierSetC1::pre_barrier(LIRAccess& access, LIR_Opr addr_opr, |
52 | LIR_Opr pre_val, CodeEmitInfo* info) { |
53 | LIRGenerator* gen = access.gen(); |
54 | DecoratorSet decorators = access.decorators(); |
55 | |
56 | // First we test whether marking is in progress. |
57 | BasicType flag_type; |
58 | bool patch = (decorators & C1_NEEDS_PATCHING) != 0; |
59 | bool do_load = pre_val == LIR_OprFact::illegalOpr; |
60 | if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { |
61 | flag_type = T_INT; |
62 | } else { |
63 | guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, |
64 | "Assumption" ); |
65 | // Use unsigned type T_BOOLEAN here rather than signed T_BYTE since some platforms, eg. ARM, |
66 | // need to use unsigned instructions to use the large offset to load the satb_mark_queue. |
67 | flag_type = T_BOOLEAN; |
68 | } |
69 | LIR_Opr thrd = gen->getThreadPointer(); |
70 | LIR_Address* mark_active_flag_addr = |
71 | new LIR_Address(thrd, |
72 | in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset()), |
73 | flag_type); |
74 | // Read the marking-in-progress flag. |
75 | LIR_Opr flag_val = gen->new_register(T_INT); |
76 | __ load(mark_active_flag_addr, flag_val); |
77 | __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); |
78 | |
79 | LIR_PatchCode pre_val_patch_code = lir_patch_none; |
80 | |
81 | CodeStub* slow; |
82 | |
83 | if (do_load) { |
84 | assert(pre_val == LIR_OprFact::illegalOpr, "sanity" ); |
85 | assert(addr_opr != LIR_OprFact::illegalOpr, "sanity" ); |
86 | |
87 | if (patch) |
88 | pre_val_patch_code = lir_patch_normal; |
89 | |
90 | pre_val = gen->new_register(T_OBJECT); |
91 | |
92 | if (!addr_opr->is_address()) { |
93 | assert(addr_opr->is_register(), "must be" ); |
94 | addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT)); |
95 | } |
96 | slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info); |
97 | } else { |
98 | assert(addr_opr == LIR_OprFact::illegalOpr, "sanity" ); |
99 | assert(pre_val->is_register(), "must be" ); |
100 | assert(pre_val->type() == T_OBJECT, "must be an object" ); |
101 | assert(info == NULL, "sanity" ); |
102 | |
103 | slow = new G1PreBarrierStub(pre_val); |
104 | } |
105 | |
106 | __ branch(lir_cond_notEqual, T_INT, slow); |
107 | __ branch_destination(slow->continuation()); |
108 | } |
109 | |
110 | void G1BarrierSetC1::post_barrier(LIRAccess& access, LIR_OprDesc* addr, LIR_OprDesc* new_val) { |
111 | LIRGenerator* gen = access.gen(); |
112 | DecoratorSet decorators = access.decorators(); |
113 | bool in_heap = (decorators & IN_HEAP) != 0; |
114 | if (!in_heap) { |
115 | return; |
116 | } |
117 | |
118 | // If the "new_val" is a constant NULL, no barrier is necessary. |
119 | if (new_val->is_constant() && |
120 | new_val->as_constant_ptr()->as_jobject() == NULL) return; |
121 | |
122 | if (!new_val->is_register()) { |
123 | LIR_Opr new_val_reg = gen->new_register(T_OBJECT); |
124 | if (new_val->is_constant()) { |
125 | __ move(new_val, new_val_reg); |
126 | } else { |
127 | __ leal(new_val, new_val_reg); |
128 | } |
129 | new_val = new_val_reg; |
130 | } |
131 | assert(new_val->is_register(), "must be a register at this point" ); |
132 | |
133 | if (addr->is_address()) { |
134 | LIR_Address* address = addr->as_address_ptr(); |
135 | LIR_Opr ptr = gen->new_pointer_register(); |
136 | if (!address->index()->is_valid() && address->disp() == 0) { |
137 | __ move(address->base(), ptr); |
138 | } else { |
139 | assert(address->disp() != max_jint, "lea doesn't support patched addresses!" ); |
140 | __ leal(addr, ptr); |
141 | } |
142 | addr = ptr; |
143 | } |
144 | assert(addr->is_register(), "must be a register at this point" ); |
145 | |
146 | LIR_Opr xor_res = gen->new_pointer_register(); |
147 | LIR_Opr xor_shift_res = gen->new_pointer_register(); |
148 | if (TwoOperandLIRForm) { |
149 | __ move(addr, xor_res); |
150 | __ logical_xor(xor_res, new_val, xor_res); |
151 | __ move(xor_res, xor_shift_res); |
152 | __ unsigned_shift_right(xor_shift_res, |
153 | LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), |
154 | xor_shift_res, |
155 | LIR_OprDesc::illegalOpr()); |
156 | } else { |
157 | __ logical_xor(addr, new_val, xor_res); |
158 | __ unsigned_shift_right(xor_res, |
159 | LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes), |
160 | xor_shift_res, |
161 | LIR_OprDesc::illegalOpr()); |
162 | } |
163 | |
164 | if (!new_val->is_register()) { |
165 | LIR_Opr new_val_reg = gen->new_register(T_OBJECT); |
166 | __ leal(new_val, new_val_reg); |
167 | new_val = new_val_reg; |
168 | } |
169 | assert(new_val->is_register(), "must be a register at this point" ); |
170 | |
171 | __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD)); |
172 | |
173 | CodeStub* slow = new G1PostBarrierStub(addr, new_val); |
174 | __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow); |
175 | __ branch_destination(slow->continuation()); |
176 | } |
177 | |
178 | void G1BarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { |
179 | DecoratorSet decorators = access.decorators(); |
180 | bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0; |
181 | bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; |
182 | bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; |
183 | LIRGenerator *gen = access.gen(); |
184 | |
185 | BarrierSetC1::load_at_resolved(access, result); |
186 | |
187 | if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) { |
188 | // Register the value in the referent field with the pre-barrier |
189 | LabelObj *Lcont_anonymous; |
190 | if (is_anonymous) { |
191 | Lcont_anonymous = new LabelObj(); |
192 | generate_referent_check(access, Lcont_anonymous); |
193 | } |
194 | pre_barrier(access, LIR_OprFact::illegalOpr /* addr_opr */, |
195 | result /* pre_val */, access.patch_emit_info() /* info */); |
196 | if (is_anonymous) { |
197 | __ branch_destination(Lcont_anonymous->label()); |
198 | } |
199 | } |
200 | } |
201 | |
202 | class C1G1PreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { |
203 | virtual OopMapSet* generate_code(StubAssembler* sasm) { |
204 | G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); |
205 | bs->generate_c1_pre_barrier_runtime_stub(sasm); |
206 | return NULL; |
207 | } |
208 | }; |
209 | |
210 | class C1G1PostBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { |
211 | virtual OopMapSet* generate_code(StubAssembler* sasm) { |
212 | G1BarrierSetAssembler* bs = (G1BarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); |
213 | bs->generate_c1_post_barrier_runtime_stub(sasm); |
214 | return NULL; |
215 | } |
216 | }; |
217 | |
218 | void G1BarrierSetC1::generate_c1_runtime_stubs(BufferBlob* buffer_blob) { |
219 | C1G1PreBarrierCodeGenClosure pre_code_gen_cl; |
220 | C1G1PostBarrierCodeGenClosure post_code_gen_cl; |
221 | _pre_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, "g1_pre_barrier_slow" , |
222 | false, &pre_code_gen_cl); |
223 | _post_barrier_c1_runtime_code_blob = Runtime1::generate_blob(buffer_blob, -1, "g1_post_barrier_slow" , |
224 | false, &post_code_gen_cl); |
225 | } |
226 | |