1/*
2 * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24#ifndef SHARE_GC_Z_ZMARKSTACK_INLINE_HPP
25#define SHARE_GC_Z_ZMARKSTACK_INLINE_HPP
26
27#include "gc/z/zMarkStack.hpp"
28#include "utilities/debug.hpp"
29#include "runtime/atomic.hpp"
30
31template <typename T, size_t S>
32inline ZStack<T, S>::ZStack() :
33 _top(0),
34 _next(NULL) {}
35
36template <typename T, size_t S>
37inline bool ZStack<T, S>::is_empty() const {
38 return _top == 0;
39}
40
41template <typename T, size_t S>
42inline bool ZStack<T, S>::is_full() const {
43 return _top == S;
44}
45
46template <typename T, size_t S>
47inline bool ZStack<T, S>::push(T value) {
48 if (is_full()) {
49 return false;
50 }
51
52 _slots[_top++] = value;
53 return true;
54}
55
56template <typename T, size_t S>
57inline bool ZStack<T, S>::pop(T& value) {
58 if (is_empty()) {
59 return false;
60 }
61
62 value = _slots[--_top];
63 return true;
64}
65
66template <typename T, size_t S>
67inline ZStack<T, S>* ZStack<T, S>::next() const {
68 return _next;
69}
70
71template <typename T, size_t S>
72inline ZStack<T, S>** ZStack<T, S>::next_addr() {
73 return &_next;
74}
75
76template <typename T>
77inline ZStackList<T>::ZStackList() :
78 _head(encode_versioned_pointer(NULL, 0)) {}
79
80template <typename T>
81inline T* ZStackList<T>::encode_versioned_pointer(const T* stack, uint32_t version) const {
82 uint64_t addr;
83
84 if (stack == NULL) {
85 addr = (uint32_t)-1;
86 } else {
87 addr = ((uint64_t)stack - ZMarkStackSpaceStart) >> ZMarkStackSizeShift;
88 }
89
90 return (T*)((addr << 32) | (uint64_t)version);
91}
92
93template <typename T>
94inline void ZStackList<T>::decode_versioned_pointer(const T* vstack, T** stack, uint32_t* version) const {
95 const uint64_t addr = (uint64_t)vstack >> 32;
96
97 if (addr == (uint32_t)-1) {
98 *stack = NULL;
99 } else {
100 *stack = (T*)((addr << ZMarkStackSizeShift) + ZMarkStackSpaceStart);
101 }
102
103 *version = (uint32_t)(uint64_t)vstack;
104}
105
106template <typename T>
107inline bool ZStackList<T>::is_empty() const {
108 const T* vstack = _head;
109 T* stack = NULL;
110 uint32_t version = 0;
111
112 decode_versioned_pointer(vstack, &stack, &version);
113 return stack == NULL;
114}
115
116template <typename T>
117inline void ZStackList<T>::push_atomic(T* stack) {
118 T* vstack = _head;
119 uint32_t version = 0;
120
121 for (;;) {
122 decode_versioned_pointer(vstack, stack->next_addr(), &version);
123 T* const new_vstack = encode_versioned_pointer(stack, version + 1);
124 T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
125 if (prev_vstack == vstack) {
126 // Success
127 break;
128 }
129
130 // Retry
131 vstack = prev_vstack;
132 }
133}
134
135template <typename T>
136inline T* ZStackList<T>::pop_atomic() {
137 T* vstack = _head;
138 T* stack = NULL;
139 uint32_t version = 0;
140
141 for (;;) {
142 decode_versioned_pointer(vstack, &stack, &version);
143 if (stack == NULL) {
144 return NULL;
145 }
146
147 T* const new_vstack = encode_versioned_pointer(stack->next(), version + 1);
148 T* const prev_vstack = Atomic::cmpxchg(new_vstack, &_head, vstack);
149 if (prev_vstack == vstack) {
150 // Success
151 return stack;
152 }
153
154 // Retry
155 vstack = prev_vstack;
156 }
157}
158
159inline bool ZMarkStripe::is_empty() const {
160 return _published.is_empty() && _overflowed.is_empty();
161}
162
163inline void ZMarkStripe::publish_stack(ZMarkStack* stack, bool publish) {
164 // A stack is published either on the published list or the overflowed
165 // list. The published list is used by mutators publishing stacks for GC
166 // workers to work on, while the overflowed list is used by GC workers
167 // to publish stacks that overflowed. The intention here is to avoid
168 // contention between mutators and GC workers as much as possible, while
169 // still allowing GC workers to help out and steal work from each other.
170 if (publish) {
171 _published.push_atomic(stack);
172 } else {
173 _overflowed.push_atomic(stack);
174 }
175}
176
177inline ZMarkStack* ZMarkStripe::steal_stack() {
178 // Steal overflowed stacks first, then published stacks
179 ZMarkStack* const stack = _overflowed.pop_atomic();
180 if (stack != NULL) {
181 return stack;
182 }
183
184 return _published.pop_atomic();
185}
186
187inline size_t ZMarkStripeSet::nstripes() const {
188 return _nstripes;
189}
190
191inline size_t ZMarkStripeSet::stripe_id(const ZMarkStripe* stripe) const {
192 const size_t index = ((uintptr_t)stripe - (uintptr_t)_stripes) / sizeof(ZMarkStripe);
193 assert(index < _nstripes, "Invalid index");
194 return index;
195}
196
197inline ZMarkStripe* ZMarkStripeSet::stripe_at(size_t index) {
198 assert(index < _nstripes, "Invalid index");
199 return &_stripes[index];
200}
201
202inline ZMarkStripe* ZMarkStripeSet::stripe_next(ZMarkStripe* stripe) {
203 const size_t index = (stripe_id(stripe) + 1) & _nstripes_mask;
204 assert(index < _nstripes, "Invalid index");
205 return &_stripes[index];
206}
207
208inline ZMarkStripe* ZMarkStripeSet::stripe_for_addr(uintptr_t addr) {
209 const size_t index = (addr >> ZMarkStripeShift) & _nstripes_mask;
210 assert(index < _nstripes, "Invalid index");
211 return &_stripes[index];
212}
213
214inline void ZMarkThreadLocalStacks::install(ZMarkStripeSet* stripes,
215 ZMarkStripe* stripe,
216 ZMarkStack* stack) {
217 ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)];
218 assert(*stackp == NULL, "Should be empty");
219 *stackp = stack;
220}
221
222inline bool ZMarkThreadLocalStacks::push(ZMarkStackAllocator* allocator,
223 ZMarkStripeSet* stripes,
224 ZMarkStripe* stripe,
225 ZMarkStackEntry entry,
226 bool publish) {
227 ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)];
228 ZMarkStack* const stack = *stackp;
229 if (stack != NULL && stack->push(entry)) {
230 return true;
231 }
232
233 return push_slow(allocator, stripe, stackp, entry, publish);
234}
235
236inline bool ZMarkThreadLocalStacks::pop(ZMarkStackAllocator* allocator,
237 ZMarkStripeSet* stripes,
238 ZMarkStripe* stripe,
239 ZMarkStackEntry& entry) {
240 ZMarkStack** const stackp = &_stacks[stripes->stripe_id(stripe)];
241 ZMarkStack* const stack = *stackp;
242 if (stack != NULL && stack->pop(entry)) {
243 return true;
244 }
245
246 return pop_slow(allocator, stripe, stackp, entry);
247}
248
249#endif // SHARE_GC_Z_ZMARKSTACK_INLINE_HPP
250