1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "code/codeBlob.hpp"
27#include "code/codeCache.hpp"
28#include "code/stubs.hpp"
29#include "memory/allocation.inline.hpp"
30#include "oops/oop.inline.hpp"
31#include "runtime/mutexLocker.hpp"
32#include "utilities/align.hpp"
33
34
35// Implementation of StubQueue
36//
37// Standard wrap-around queue implementation; the queue dimensions
38// are specified by the _queue_begin & _queue_end indices. The queue
39// can be in two states (transparent to the outside):
40//
41// a) contiguous state: all queue entries in one block (or empty)
42//
43// Queue: |...|XXXXXXX|...............|
44// ^0 ^begin ^end ^size = limit
45// |_______|
46// one block
47//
48// b) non-contiguous state: queue entries in two blocks
49//
50// Queue: |XXX|.......|XXXXXXX|.......|
51// ^0 ^end ^begin ^limit ^size
52// |___| |_______|
53// 1st block 2nd block
54//
55// In the non-contiguous state, the wrap-around point is
56// indicated via the _buffer_limit index since the last
57// queue entry may not fill up the queue completely in
58// which case we need to know where the 2nd block's end
59// is to do the proper wrap-around. When removing the
60// last entry of the 2nd block, _buffer_limit is reset
61// to _buffer_size.
62//
63// CAUTION: DO NOT MESS WITH THIS CODE IF YOU CANNOT PROVE
64// ITS CORRECTNESS! THIS CODE IS MORE SUBTLE THAN IT LOOKS!
65
66
67StubQueue::StubQueue(StubInterface* stub_interface, int buffer_size,
68 Mutex* lock, const char* name) : _mutex(lock) {
69 intptr_t size = align_up(buffer_size, 2*BytesPerWord);
70 BufferBlob* blob = BufferBlob::create(name, size);
71 if( blob == NULL) {
72 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", name);
73 }
74 _stub_interface = stub_interface;
75 _buffer_size = blob->content_size();
76 _buffer_limit = blob->content_size();
77 _stub_buffer = blob->content_begin();
78 _queue_begin = 0;
79 _queue_end = 0;
80 _number_of_stubs = 0;
81}
82
83
84StubQueue::~StubQueue() {
85 // Note: Currently StubQueues are never destroyed so nothing needs to be done here.
86 // If we want to implement the destructor, we need to release the BufferBlob
87 // allocated in the constructor (i.e., we need to keep it around or look it
88 // up via CodeCache::find_blob(...).
89 Unimplemented();
90}
91
92void StubQueue::deallocate_unused_tail() {
93 CodeBlob* blob = CodeCache::find_blob((void*)_stub_buffer);
94 CodeCache::free_unused_tail(blob, used_space());
95 // Update the limits to the new, trimmed CodeBlob size
96 _buffer_size = blob->content_size();
97 _buffer_limit = blob->content_size();
98}
99
100Stub* StubQueue::stub_containing(address pc) const {
101 if (contains(pc)) {
102 for (Stub* s = first(); s != NULL; s = next(s)) {
103 if (stub_contains(s, pc)) return s;
104 }
105 }
106 return NULL;
107}
108
109
110Stub* StubQueue::request_committed(int code_size) {
111 Stub* s = request(code_size);
112 CodeStrings strings;
113 if (s != NULL) commit(code_size, strings);
114 return s;
115}
116
117
118Stub* StubQueue::request(int requested_code_size) {
119 assert(requested_code_size > 0, "requested_code_size must be > 0");
120 if (_mutex != NULL) _mutex->lock_without_safepoint_check();
121 Stub* s = current_stub();
122 int requested_size = align_up(stub_code_size_to_size(requested_code_size), CodeEntryAlignment);
123 if (requested_size <= available_space()) {
124 if (is_contiguous()) {
125 // Queue: |...|XXXXXXX|.............|
126 // ^0 ^begin ^end ^size = limit
127 assert(_buffer_limit == _buffer_size, "buffer must be fully usable");
128 if (_queue_end + requested_size <= _buffer_size) {
129 // code fits in at the end => nothing to do
130 CodeStrings strings;
131 stub_initialize(s, requested_size, strings);
132 return s;
133 } else {
134 // stub doesn't fit in at the queue end
135 // => reduce buffer limit & wrap around
136 assert(!is_empty(), "just checkin'");
137 _buffer_limit = _queue_end;
138 _queue_end = 0;
139 }
140 }
141 }
142 if (requested_size <= available_space()) {
143 assert(!is_contiguous(), "just checkin'");
144 assert(_buffer_limit <= _buffer_size, "queue invariant broken");
145 // Queue: |XXX|.......|XXXXXXX|.......|
146 // ^0 ^end ^begin ^limit ^size
147 s = current_stub();
148 CodeStrings strings;
149 stub_initialize(s, requested_size, strings);
150 return s;
151 }
152 // Not enough space left
153 if (_mutex != NULL) _mutex->unlock();
154 return NULL;
155}
156
157
158void StubQueue::commit(int committed_code_size, CodeStrings& strings) {
159 assert(committed_code_size > 0, "committed_code_size must be > 0");
160 int committed_size = align_up(stub_code_size_to_size(committed_code_size), CodeEntryAlignment);
161 Stub* s = current_stub();
162 assert(committed_size <= stub_size(s), "committed size must not exceed requested size");
163 stub_initialize(s, committed_size, strings);
164 _queue_end += committed_size;
165 _number_of_stubs++;
166 if (_mutex != NULL) _mutex->unlock();
167 debug_only(stub_verify(s);)
168}
169
170
171void StubQueue::remove_first() {
172 if (number_of_stubs() == 0) return;
173 Stub* s = first();
174 debug_only(stub_verify(s);)
175 stub_finalize(s);
176 _queue_begin += stub_size(s);
177 assert(_queue_begin <= _buffer_limit, "sanity check");
178 if (_queue_begin == _queue_end) {
179 // buffer empty
180 // => reset queue indices
181 _queue_begin = 0;
182 _queue_end = 0;
183 _buffer_limit = _buffer_size;
184 } else if (_queue_begin == _buffer_limit) {
185 // buffer limit reached
186 // => reset buffer limit & wrap around
187 _buffer_limit = _buffer_size;
188 _queue_begin = 0;
189 }
190 _number_of_stubs--;
191}
192
193
194void StubQueue::remove_first(int n) {
195 int i = MIN2(n, number_of_stubs());
196 while (i-- > 0) remove_first();
197}
198
199
200void StubQueue::remove_all(){
201 debug_only(verify();)
202 remove_first(number_of_stubs());
203 assert(number_of_stubs() == 0, "sanity check");
204}
205
206
207void StubQueue::verify() {
208 // verify only if initialized
209 if (_stub_buffer == NULL) return;
210 MutexLocker lock(_mutex, Mutex::_no_safepoint_check_flag);
211 // verify index boundaries
212 guarantee(0 <= _buffer_size, "buffer size must be positive");
213 guarantee(0 <= _buffer_limit && _buffer_limit <= _buffer_size , "_buffer_limit out of bounds");
214 guarantee(0 <= _queue_begin && _queue_begin < _buffer_limit, "_queue_begin out of bounds");
215 guarantee(0 <= _queue_end && _queue_end <= _buffer_limit, "_queue_end out of bounds");
216 // verify alignment
217 guarantee(_buffer_size % CodeEntryAlignment == 0, "_buffer_size not aligned");
218 guarantee(_buffer_limit % CodeEntryAlignment == 0, "_buffer_limit not aligned");
219 guarantee(_queue_begin % CodeEntryAlignment == 0, "_queue_begin not aligned");
220 guarantee(_queue_end % CodeEntryAlignment == 0, "_queue_end not aligned");
221 // verify buffer limit/size relationship
222 if (is_contiguous()) {
223 guarantee(_buffer_limit == _buffer_size, "_buffer_limit must equal _buffer_size");
224 }
225 // verify contents
226 int n = 0;
227 for (Stub* s = first(); s != NULL; s = next(s)) {
228 stub_verify(s);
229 n++;
230 }
231 guarantee(n == number_of_stubs(), "number of stubs inconsistent");
232 guarantee(_queue_begin != _queue_end || n == 0, "buffer indices must be the same");
233}
234
235
236void StubQueue::print() {
237 MutexLocker lock(_mutex, Mutex::_no_safepoint_check_flag);
238 for (Stub* s = first(); s != NULL; s = next(s)) {
239 stub_print(s);
240 }
241}
242