1/*
2 * Copyright (c) 2009, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_UTILITIES_STACK_INLINE_HPP
26#define SHARE_UTILITIES_STACK_INLINE_HPP
27
28#include "memory/allocation.inline.hpp"
29#include "utilities/align.hpp"
30#include "utilities/stack.hpp"
31#include "utilities/copy.hpp"
32
33template <MEMFLAGS F> StackBase<F>::StackBase(size_t segment_size, size_t max_cache_size,
34 size_t max_size):
35 _seg_size(segment_size),
36 _max_size(adjust_max_size(max_size, segment_size)),
37 _max_cache_size(max_cache_size)
38{
39 assert(_max_size % _seg_size == 0, "not a multiple");
40}
41
42template <MEMFLAGS F> size_t StackBase<F>::adjust_max_size(size_t max_size, size_t seg_size)
43{
44 assert(seg_size > 0, "cannot be 0");
45 assert(max_size >= seg_size || max_size == 0, "max_size too small");
46 const size_t limit = max_uintx - (seg_size - 1);
47 if (max_size == 0 || max_size > limit) {
48 max_size = limit;
49 }
50 return (max_size + seg_size - 1) / seg_size * seg_size;
51}
52
53template <class E, MEMFLAGS F>
54Stack<E, F>::Stack(size_t segment_size, size_t max_cache_size, size_t max_size):
55 StackBase<F>(adjust_segment_size(segment_size), max_cache_size, max_size)
56{
57 reset(true);
58}
59
60template <class E, MEMFLAGS F>
61void Stack<E, F>::push(E item)
62{
63 assert(!is_full(), "pushing onto a full stack");
64 if (this->_cur_seg_size == this->_seg_size) {
65 push_segment();
66 }
67 this->_cur_seg[this->_cur_seg_size] = item;
68 ++this->_cur_seg_size;
69}
70
71template <class E, MEMFLAGS F>
72E Stack<E, F>::pop()
73{
74 assert(!is_empty(), "popping from an empty stack");
75 if (this->_cur_seg_size == 1) {
76 E tmp = _cur_seg[--this->_cur_seg_size];
77 pop_segment();
78 return tmp;
79 }
80 return this->_cur_seg[--this->_cur_seg_size];
81}
82
83template <class E, MEMFLAGS F>
84void Stack<E, F>::clear(bool clear_cache)
85{
86 free_segments(_cur_seg);
87 if (clear_cache) free_segments(_cache);
88 reset(clear_cache);
89}
90
91template <class E, MEMFLAGS F>
92size_t Stack<E, F>::adjust_segment_size(size_t seg_size)
93{
94 const size_t elem_sz = sizeof(E);
95 const size_t ptr_sz = sizeof(E*);
96 assert(elem_sz % ptr_sz == 0 || ptr_sz % elem_sz == 0, "bad element size");
97 if (elem_sz < ptr_sz) {
98 return align_up(seg_size * elem_sz, ptr_sz) / elem_sz;
99 }
100 return seg_size;
101}
102
103template <class E, MEMFLAGS F>
104size_t Stack<E, F>::link_offset() const
105{
106 return align_up(this->_seg_size * sizeof(E), sizeof(E*));
107}
108
109template <class E, MEMFLAGS F>
110size_t Stack<E, F>::segment_bytes() const
111{
112 return link_offset() + sizeof(E*);
113}
114
115template <class E, MEMFLAGS F>
116E** Stack<E, F>::link_addr(E* seg) const
117{
118 return (E**) ((char*)seg + link_offset());
119}
120
121template <class E, MEMFLAGS F>
122E* Stack<E, F>::get_link(E* seg) const
123{
124 return *link_addr(seg);
125}
126
127template <class E, MEMFLAGS F>
128E* Stack<E, F>::set_link(E* new_seg, E* old_seg)
129{
130 *link_addr(new_seg) = old_seg;
131 return new_seg;
132}
133
134template <class E, MEMFLAGS F>
135E* Stack<E, F>::alloc(size_t bytes)
136{
137 return (E*) NEW_C_HEAP_ARRAY(char, bytes, F);
138}
139
140template <class E, MEMFLAGS F>
141void Stack<E, F>::free(E* addr, size_t bytes)
142{
143 FREE_C_HEAP_ARRAY(char, (char*) addr);
144}
145
146// Stack is used by the GC code and in some hot paths a lot of the Stack
147// code gets inlined. This is generally good, but when too much code has
148// been inlined, no further inlining is allowed by GCC. Therefore we need
149// to prevent parts of the slow path in Stack to be inlined to allow other
150// code to be.
151template <class E, MEMFLAGS F>
152NOINLINE void Stack<E, F>::push_segment()
153{
154 assert(this->_cur_seg_size == this->_seg_size, "current segment is not full");
155 E* next;
156 if (this->_cache_size > 0) {
157 // Use a cached segment.
158 next = _cache;
159 _cache = get_link(_cache);
160 --this->_cache_size;
161 } else {
162 next = alloc(segment_bytes());
163 DEBUG_ONLY(zap_segment(next, true);)
164 }
165 const bool at_empty_transition = is_empty();
166 this->_cur_seg = set_link(next, _cur_seg);
167 this->_cur_seg_size = 0;
168 this->_full_seg_size += at_empty_transition ? 0 : this->_seg_size;
169 DEBUG_ONLY(verify(at_empty_transition);)
170}
171
172template <class E, MEMFLAGS F>
173void Stack<E, F>::pop_segment()
174{
175 assert(this->_cur_seg_size == 0, "current segment is not empty");
176 E* const prev = get_link(_cur_seg);
177 if (this->_cache_size < this->_max_cache_size) {
178 // Add the current segment to the cache.
179 DEBUG_ONLY(zap_segment(_cur_seg, false);)
180 _cache = set_link(_cur_seg, _cache);
181 ++this->_cache_size;
182 } else {
183 DEBUG_ONLY(zap_segment(_cur_seg, true);)
184 free(_cur_seg, segment_bytes());
185 }
186 const bool at_empty_transition = prev == NULL;
187 this->_cur_seg = prev;
188 this->_cur_seg_size = this->_seg_size;
189 this->_full_seg_size -= at_empty_transition ? 0 : this->_seg_size;
190 DEBUG_ONLY(verify(at_empty_transition);)
191}
192
193template <class E, MEMFLAGS F>
194void Stack<E, F>::free_segments(E* seg)
195{
196 const size_t bytes = segment_bytes();
197 while (seg != NULL) {
198 E* const prev = get_link(seg);
199 free(seg, bytes);
200 seg = prev;
201 }
202}
203
204template <class E, MEMFLAGS F>
205void Stack<E, F>::reset(bool reset_cache)
206{
207 this->_cur_seg_size = this->_seg_size; // So push() will alloc a new segment.
208 this->_full_seg_size = 0;
209 _cur_seg = NULL;
210 if (reset_cache) {
211 this->_cache_size = 0;
212 _cache = NULL;
213 }
214}
215
216#ifdef ASSERT
217template <class E, MEMFLAGS F>
218void Stack<E, F>::verify(bool at_empty_transition) const
219{
220 assert(size() <= this->max_size(), "stack exceeded bounds");
221 assert(this->cache_size() <= this->max_cache_size(), "cache exceeded bounds");
222 assert(this->_cur_seg_size <= this->segment_size(), "segment index exceeded bounds");
223
224 assert(this->_full_seg_size % this->_seg_size == 0, "not a multiple");
225 assert(at_empty_transition || is_empty() == (size() == 0), "mismatch");
226 assert((_cache == NULL) == (this->cache_size() == 0), "mismatch");
227
228 if (is_empty()) {
229 assert(this->_cur_seg_size == this->segment_size(), "sanity");
230 }
231}
232
233template <class E, MEMFLAGS F>
234void Stack<E, F>::zap_segment(E* seg, bool zap_link_field) const
235{
236 if (!ZapStackSegments) return;
237 const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*));
238 Copy::fill_to_bytes(seg, zap_bytes, badStackSegVal);
239}
240#endif
241
242template <class E, MEMFLAGS F>
243E* ResourceStack<E, F>::alloc(size_t bytes)
244{
245 return (E*) resource_allocate_bytes(bytes);
246}
247
248template <class E, MEMFLAGS F>
249void ResourceStack<E, F>::free(E* addr, size_t bytes)
250{
251 resource_free_bytes((char*) addr, bytes);
252}
253
254template <class E, MEMFLAGS F>
255void StackIterator<E, F>::sync()
256{
257 _full_seg_size = _stack._full_seg_size;
258 _cur_seg_size = _stack._cur_seg_size;
259 _cur_seg = _stack._cur_seg;
260}
261
262template <class E, MEMFLAGS F>
263E* StackIterator<E, F>::next_addr()
264{
265 assert(!is_empty(), "no items left");
266 if (_cur_seg_size == 1) {
267 E* addr = _cur_seg;
268 _cur_seg = _stack.get_link(_cur_seg);
269 _cur_seg_size = _stack.segment_size();
270 _full_seg_size -= _stack.segment_size();
271 return addr;
272 }
273 return _cur_seg + --_cur_seg_size;
274}
275
276#endif // SHARE_UTILITIES_STACK_INLINE_HPP
277