1/*
2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "opto/arraycopynode.hpp"
27#include "opto/graphKit.hpp"
28#include "opto/idealKit.hpp"
29#include "opto/narrowptrnode.hpp"
30#include "gc/shared/c2/modRefBarrierSetC2.hpp"
31#include "utilities/macros.hpp"
32
33Node* ModRefBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const {
34 DecoratorSet decorators = access.decorators();
35
36 const TypePtr* adr_type = access.addr().type();
37 Node* adr = access.addr().node();
38
39 bool is_array = (decorators & IS_ARRAY) != 0;
40 bool anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
41 bool in_heap = (decorators & IN_HEAP) != 0;
42 bool use_precise = is_array || anonymous;
43 bool tightly_coupled_alloc = (decorators & C2_TIGHTLY_COUPLED_ALLOC) != 0;
44
45 if (!access.is_oop() || tightly_coupled_alloc || (!in_heap && !anonymous)) {
46 return BarrierSetC2::store_at_resolved(access, val);
47 }
48
49 assert(access.is_parse_access(), "entry not supported at optimization time");
50 C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
51 GraphKit* kit = parse_access.kit();
52
53 uint adr_idx = kit->C->get_alias_index(adr_type);
54 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
55
56 pre_barrier(kit, true /* do_load */, kit->control(), access.base(), adr, adr_idx, val.node(),
57 static_cast<const TypeOopPtr*>(val.type()), NULL /* pre_val */, access.type());
58 Node* store = BarrierSetC2::store_at_resolved(access, val);
59 post_barrier(kit, kit->control(), access.raw_access(), access.base(), adr, adr_idx, val.node(),
60 access.type(), use_precise);
61
62 return store;
63}
64
65Node* ModRefBarrierSetC2::atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
66 Node* new_val, const Type* value_type) const {
67 GraphKit* kit = access.kit();
68
69 if (!access.is_oop()) {
70 return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
71 }
72
73 pre_barrier(kit, false /* do_load */,
74 kit->control(), NULL, NULL, max_juint, NULL, NULL,
75 expected_val /* pre_val */, T_OBJECT);
76
77 Node* result = BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type);
78
79 post_barrier(kit, kit->control(), access.raw_access(), access.base(),
80 access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
81
82 return result;
83}
84
85Node* ModRefBarrierSetC2::atomic_cmpxchg_bool_at_resolved(C2AtomicParseAccess& access, Node* expected_val,
86 Node* new_val, const Type* value_type) const {
87 GraphKit* kit = access.kit();
88
89 if (!access.is_oop()) {
90 return BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
91 }
92
93 pre_barrier(kit, false /* do_load */,
94 kit->control(), NULL, NULL, max_juint, NULL, NULL,
95 expected_val /* pre_val */, T_OBJECT);
96
97 Node* load_store = BarrierSetC2::atomic_cmpxchg_bool_at_resolved(access, expected_val, new_val, value_type);
98
99 // Emit the post barrier only when the actual store happened. This makes sense
100 // to check only for LS_cmp_* that can fail to set the value.
101 // LS_cmp_exchange does not produce any branches by default, so there is no
102 // boolean result to piggyback on. TODO: When we merge CompareAndSwap with
103 // CompareAndExchange and move branches here, it would make sense to conditionalize
104 // post_barriers for LS_cmp_exchange as well.
105 //
106 // CAS success path is marked more likely since we anticipate this is a performance
107 // critical path, while CAS failure path can use the penalty for going through unlikely
108 // path as backoff. Which is still better than doing a store barrier there.
109 IdealKit ideal(kit);
110 ideal.if_then(load_store, BoolTest::ne, ideal.ConI(0), PROB_STATIC_FREQUENT); {
111 kit->sync_kit(ideal);
112 post_barrier(kit, ideal.ctrl(), access.raw_access(), access.base(),
113 access.addr().node(), access.alias_idx(), new_val, T_OBJECT, true);
114 ideal.sync_kit(kit);
115 } ideal.end_if();
116 kit->final_sync(ideal);
117
118 return load_store;
119}
120
121Node* ModRefBarrierSetC2::atomic_xchg_at_resolved(C2AtomicParseAccess& access, Node* new_val, const Type* value_type) const {
122 GraphKit* kit = access.kit();
123
124 Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, new_val, value_type);
125 if (!access.is_oop()) {
126 return result;
127 }
128
129 // Don't need to load pre_val. The old value is returned by load_store.
130 // The pre_barrier can execute after the xchg as long as no safepoint
131 // gets inserted between them.
132 pre_barrier(kit, false /* do_load */,
133 kit->control(), NULL, NULL, max_juint, NULL, NULL,
134 result /* pre_val */, T_OBJECT);
135 post_barrier(kit, kit->control(), access.raw_access(), access.base(), access.addr().node(),
136 access.alias_idx(), new_val, T_OBJECT, true);
137
138 return result;
139}
140