1/*
2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_SHARED_C2_BARRIERSETC2_HPP
26#define SHARE_GC_SHARED_C2_BARRIERSETC2_HPP
27
28#include "memory/allocation.hpp"
29#include "oops/accessDecorators.hpp"
30#include "opto/loopnode.hpp"
31#include "opto/matcher.hpp"
32#include "opto/memnode.hpp"
33#include "utilities/globalDefinitions.hpp"
34
35// This means the access is mismatched. This means the value of an access
36// is not equivalent to the value pointed to by the address.
37const DecoratorSet C2_MISMATCHED = DECORATOR_LAST << 1;
38// The access may not be aligned to its natural size.
39const DecoratorSet C2_UNALIGNED = DECORATOR_LAST << 2;
40// The atomic cmpxchg is weak, meaning that spurious false negatives are allowed,
41// but never false positives.
42const DecoratorSet C2_WEAK_CMPXCHG = DECORATOR_LAST << 3;
43// This denotes that a load has control dependency.
44const DecoratorSet C2_CONTROL_DEPENDENT_LOAD = DECORATOR_LAST << 4;
45// This denotes that a load that must be pinned.
46const DecoratorSet C2_PINNED_LOAD = DECORATOR_LAST << 5;
47// This denotes that the access is produced from the sun.misc.Unsafe intrinsics.
48const DecoratorSet C2_UNSAFE_ACCESS = DECORATOR_LAST << 6;
49// This denotes that the access mutates state.
50const DecoratorSet C2_WRITE_ACCESS = DECORATOR_LAST << 7;
51// This denotes that the access reads state.
52const DecoratorSet C2_READ_ACCESS = DECORATOR_LAST << 8;
53// A nearby allocation?
54const DecoratorSet C2_TIGHTLY_COUPLED_ALLOC = DECORATOR_LAST << 9;
55// Loads and stores from an arraycopy being optimized
56const DecoratorSet C2_ARRAY_COPY = DECORATOR_LAST << 10;
57
58class Compile;
59class ConnectionGraph;
60class GraphKit;
61class IdealKit;
62class Node;
63class PhaseGVN;
64class PhaseIdealLoop;
65class PhaseMacroExpand;
66class Type;
67class TypePtr;
68class Unique_Node_List;
69
70// This class wraps a node and a type.
71class C2AccessValue: public StackObj {
72protected:
73 Node* _node;
74 const Type* _type;
75
76public:
77 C2AccessValue(Node* node, const Type* type) :
78 _node(node),
79 _type(type) {}
80
81 Node* node() const { return _node; }
82 const Type* type() const { return _type; }
83
84 void set_node(Node* node) { _node = node; }
85};
86
87// This class wraps a node and a pointer type.
88class C2AccessValuePtr: public C2AccessValue {
89
90public:
91 C2AccessValuePtr(Node* node, const TypePtr* type) :
92 C2AccessValue(node, reinterpret_cast<const Type*>(type)) {}
93
94 const TypePtr* type() const { return reinterpret_cast<const TypePtr*>(_type); }
95};
96
97// This class wraps a bunch of context parameters thare are passed around in the
98// BarrierSetC2 backend hierarchy, for loads and stores, to reduce boiler plate.
99class C2Access: public StackObj {
100protected:
101 DecoratorSet _decorators;
102 BasicType _type;
103 Node* _base;
104 C2AccessValuePtr& _addr;
105 Node* _raw_access;
106
107 void fixup_decorators();
108
109public:
110 C2Access(DecoratorSet decorators,
111 BasicType type, Node* base, C2AccessValuePtr& addr) :
112 _decorators(decorators),
113 _type(type),
114 _base(base),
115 _addr(addr),
116 _raw_access(NULL)
117 {}
118
119 DecoratorSet decorators() const { return _decorators; }
120 Node* base() const { return _base; }
121 C2AccessValuePtr& addr() const { return _addr; }
122 BasicType type() const { return _type; }
123 bool is_oop() const { return _type == T_OBJECT || _type == T_ARRAY; }
124 bool is_raw() const { return (_decorators & AS_RAW) != 0; }
125 Node* raw_access() const { return _raw_access; }
126
127 void set_raw_access(Node* raw_access) { _raw_access = raw_access; }
128 virtual void set_memory() {} // no-op for normal accesses, but not for atomic accesses.
129
130 MemNode::MemOrd mem_node_mo() const;
131 bool needs_cpu_membar() const;
132
133 virtual PhaseGVN& gvn() const = 0;
134 virtual bool is_parse_access() const { return false; }
135 virtual bool is_opt_access() const { return false; }
136};
137
138// C2Access for parse time calls to the BarrierSetC2 backend.
139class C2ParseAccess: public C2Access {
140protected:
141 GraphKit* _kit;
142
143 void* barrier_set_state() const;
144
145public:
146 C2ParseAccess(GraphKit* kit, DecoratorSet decorators,
147 BasicType type, Node* base, C2AccessValuePtr& addr) :
148 C2Access(decorators, type, base, addr),
149 _kit(kit) {
150 fixup_decorators();
151 }
152
153 GraphKit* kit() const { return _kit; }
154
155 template <typename T>
156 T barrier_set_state_as() const {
157 return reinterpret_cast<T>(barrier_set_state());
158 }
159
160 virtual PhaseGVN& gvn() const;
161 virtual bool is_parse_access() const { return true; }
162};
163
164// This class wraps a bunch of context parameters thare are passed around in the
165// BarrierSetC2 backend hierarchy, for atomic accesses, to reduce boiler plate.
166class C2AtomicParseAccess: public C2ParseAccess {
167 Node* _memory;
168 uint _alias_idx;
169 bool _needs_pinning;
170
171public:
172 C2AtomicParseAccess(GraphKit* kit, DecoratorSet decorators, BasicType type,
173 Node* base, C2AccessValuePtr& addr, uint alias_idx) :
174 C2ParseAccess(kit, decorators, type, base, addr),
175 _memory(NULL),
176 _alias_idx(alias_idx),
177 _needs_pinning(true) {}
178
179 // Set the memory node based on the current memory slice.
180 virtual void set_memory();
181
182 Node* memory() const { return _memory; }
183 uint alias_idx() const { return _alias_idx; }
184 bool needs_pinning() const { return _needs_pinning; }
185
186 void set_needs_pinning(bool value) { _needs_pinning = value; }
187};
188
189// C2Access for optimization time calls to the BarrierSetC2 backend.
190class C2OptAccess: public C2Access {
191 PhaseGVN& _gvn;
192 MergeMemNode* _mem;
193 Node* _ctl;
194
195public:
196 C2OptAccess(PhaseGVN& gvn, Node* ctl, MergeMemNode* mem, DecoratorSet decorators,
197 BasicType type, Node* base, C2AccessValuePtr& addr) :
198 C2Access(decorators, type, base, addr),
199 _gvn(gvn), _mem(mem), _ctl(ctl) {
200 fixup_decorators();
201 }
202
203
204 MergeMemNode* mem() const { return _mem; }
205 Node* ctl() const { return _ctl; }
206 // void set_mem(Node* mem) { _mem = mem; }
207 void set_ctl(Node* ctl) { _ctl = ctl; }
208
209 virtual PhaseGVN& gvn() const { return _gvn; }
210 virtual bool is_opt_access() const { return true; }
211};
212
213
214// This is the top-level class for the backend of the Access API in C2.
215// The top-level class is responsible for performing raw accesses. The
216// various GC barrier sets inherit from the BarrierSetC2 class to sprinkle
217// barriers into the accesses.
218class BarrierSetC2: public CHeapObj<mtGC> {
219protected:
220 virtual void resolve_address(C2Access& access) const;
221 virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const;
222 virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
223
224 virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
225 Node* new_val, const Type* val_type) const;
226 virtual Node* atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
227 Node* new_val, const Type* value_type) const;
228 virtual Node* atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
229 virtual Node* atomic_add_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* val_type) const;
230 void pin_atomic_op(C2AtomicParseAccess& access) const;
231
232public:
233 // This is the entry-point for the backend to perform accesses through the Access API.
234 virtual Node* store_at(C2Access& access, C2AccessValue& val) const;
235 virtual Node* load_at(C2Access& access, const Type* val_type) const;
236
237 virtual Node* atomic_cmpxchg_val_at(C2AtomicParseAccess& access, Node* expected_val,
238 Node* new_val, const Type* val_type) const;
239 virtual Node* atomic_cmpxchg_bool_at(C2AtomicParseAccess& access, Node* expected_val,
240 Node* new_val, const Type* val_type) const;
241 virtual Node* atomic_xchg_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
242 virtual Node* atomic_add_at(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const;
243
244 virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const;
245
246 virtual Node* resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const { return n; }
247
248 virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes,
249 Node*& i_o, Node*& needgc_ctrl,
250 Node*& fast_oop_ctrl, Node*& fast_oop_rawmem,
251 intx prefetch_lines) const;
252
253 virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const { return NULL; }
254 virtual Node* identity_node(PhaseGVN* phase, Node* n) const { return n; }
255
256 // These are general helper methods used by C2
257 enum ArrayCopyPhase {
258 Parsing,
259 Optimization,
260 Expansion
261 };
262
263 virtual bool array_copy_requires_gc_barriers(bool tightly_coupled_alloc, BasicType type, bool is_clone, ArrayCopyPhase phase) const { return false; }
264 virtual void clone_barrier_at_expansion(ArrayCopyNode* ac, Node* call, PhaseIterGVN& igvn) const;
265
266 // Support for GC barriers emitted during parsing
267 virtual bool has_load_barriers() const { return false; }
268 virtual bool is_gc_barrier_node(Node* node) const { return false; }
269 virtual Node* step_over_gc_barrier(Node* c) const { return c; }
270 virtual Node* step_over_gc_barrier_ctrl(Node* c) const { return c; }
271
272 // Support for macro expanded GC barriers
273 virtual void register_potential_barrier_node(Node* node) const { }
274 virtual void unregister_potential_barrier_node(Node* node) const { }
275 virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const { }
276 virtual void enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) const {}
277 virtual void eliminate_useless_gc_barriers(Unique_Node_List &useful, Compile* C) const {}
278
279 // Allow barrier sets to have shared state that is preserved across a compilation unit.
280 // This could for example comprise macro nodes to be expanded during macro expansion.
281 virtual void* create_barrier_state(Arena* comp_arena) const { return NULL; }
282 // If the BarrierSetC2 state has barrier nodes in its compilation
283 // unit state to be expanded later, then now is the time to do so.
284 virtual bool expand_barriers(Compile* C, PhaseIterGVN& igvn) const { return false; }
285 virtual bool optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { return false; }
286 virtual bool strip_mined_loops_expanded(LoopOptsMode mode) const { return false; }
287 virtual bool is_gc_specific_loop_opts_pass(LoopOptsMode mode) const { return false; }
288
289 virtual bool has_special_unique_user(const Node* node) const { return false; }
290 virtual bool needs_anti_dependence_check(const Node* node) const { return true; }
291
292 virtual void barrier_insertion_phase(Compile* C, PhaseIterGVN &igvn) const { }
293
294 enum CompilePhase {
295 BeforeOptimize,
296 BeforeLateInsertion,
297 BeforeMacroExpand,
298 BeforeCodeGen
299 };
300
301 virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const { return false; }
302#ifdef ASSERT
303 virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const { return false; }
304 virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const {}
305#endif
306
307 virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { return false; }
308
309 virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { return false; }
310 virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const { return false; }
311 virtual bool escape_has_out_with_unsafe_object(Node* n) const { return false; }
312 virtual bool escape_is_barrier_node(Node* n) const { return false; }
313
314 virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const { return false; };
315 virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { return false; };
316 virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const { return false; }
317
318 virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const { }
319 virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const { }
320
321 virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const { return NULL; }
322 virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const { return false; }
323 virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const { return false; }
324};
325
326#endif // SHARE_GC_SHARED_C2_BARRIERSETC2_HPP
327