1/*
2 * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 */
23
24#include "precompiled.hpp"
25#include "code/relocInfo.hpp"
26#include "code/nmethod.hpp"
27#include "code/icBuffer.hpp"
28#include "gc/shared/barrierSet.hpp"
29#include "gc/shared/barrierSetNMethod.hpp"
30#include "gc/z/zGlobals.hpp"
31#include "gc/z/zHash.inline.hpp"
32#include "gc/z/zLock.inline.hpp"
33#include "gc/z/zNMethodData.hpp"
34#include "gc/z/zNMethodTable.hpp"
35#include "gc/z/zNMethodTableEntry.hpp"
36#include "gc/z/zNMethodTableIteration.hpp"
37#include "gc/z/zOopClosures.inline.hpp"
38#include "gc/z/zSafeDelete.inline.hpp"
39#include "gc/z/zTask.hpp"
40#include "gc/z/zWorkers.hpp"
41#include "logging/log.hpp"
42#include "memory/allocation.hpp"
43#include "memory/iterator.hpp"
44#include "memory/resourceArea.hpp"
45#include "runtime/atomic.hpp"
46#include "runtime/orderAccess.hpp"
47#include "utilities/debug.hpp"
48
49ZNMethodTableEntry* ZNMethodTable::_table = NULL;
50size_t ZNMethodTable::_size = 0;
51size_t ZNMethodTable::_nregistered = 0;
52size_t ZNMethodTable::_nunregistered = 0;
53ZNMethodTableIteration ZNMethodTable::_iteration;
54ZSafeDelete<ZNMethodTableEntry[]> ZNMethodTable::_safe_delete;
55
56size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
57 assert(is_power_of_2(size), "Invalid size");
58 const size_t mask = size - 1;
59 const size_t hash = ZHash::address_to_uint32((uintptr_t)nm);
60 return hash & mask;
61}
62
63size_t ZNMethodTable::next_index(size_t prev_index, size_t size) {
64 assert(is_power_of_2(size), "Invalid size");
65 const size_t mask = size - 1;
66 return (prev_index + 1) & mask;
67}
68
69bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
70 const ZNMethodTableEntry entry(nm);
71 size_t index = first_index(nm, size);
72
73 for (;;) {
74 const ZNMethodTableEntry table_entry = table[index];
75
76 if (!table_entry.registered() && !table_entry.unregistered()) {
77 // Insert new entry
78 table[index] = entry;
79 return true;
80 }
81
82 if (table_entry.registered() && table_entry.method() == nm) {
83 // Replace existing entry
84 table[index] = entry;
85 return false;
86 }
87
88 index = next_index(index, size);
89 }
90}
91
92void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
93 size_t index = first_index(nm, size);
94
95 for (;;) {
96 const ZNMethodTableEntry table_entry = table[index];
97 assert(table_entry.registered() || table_entry.unregistered(), "Entry not found");
98
99 if (table_entry.registered() && table_entry.method() == nm) {
100 // Remove entry
101 table[index] = ZNMethodTableEntry(true /* unregistered */);
102 return;
103 }
104
105 index = next_index(index, size);
106 }
107}
108
109void ZNMethodTable::rebuild(size_t new_size) {
110 assert(CodeCache_lock->owned_by_self(), "Lock must be held");
111
112 assert(is_power_of_2(new_size), "Invalid size");
113
114 log_debug(gc, nmethod)("Rebuilding NMethod Table: "
115 SIZE_FORMAT "->" SIZE_FORMAT " entries, "
116 SIZE_FORMAT "(%.0lf%%->%.0lf%%) registered, "
117 SIZE_FORMAT "(%.0lf%%->%.0lf%%) unregistered",
118 _size, new_size,
119 _nregistered, percent_of(_nregistered, _size), percent_of(_nregistered, new_size),
120 _nunregistered, percent_of(_nunregistered, _size), 0.0);
121
122 // Allocate new table
123 ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
124
125 // Transfer all registered entries
126 for (size_t i = 0; i < _size; i++) {
127 const ZNMethodTableEntry entry = _table[i];
128 if (entry.registered()) {
129 register_entry(new_table, new_size, entry.method());
130 }
131 }
132
133 // Free old table
134 _safe_delete(_table);
135
136 // Install new table
137 _table = new_table;
138 _size = new_size;
139 _nunregistered = 0;
140}
141
142void ZNMethodTable::rebuild_if_needed() {
143 // The hash table uses linear probing. To avoid wasting memory while
144 // at the same time maintaining good hash collision behavior we want
145 // to keep the table occupancy between 30% and 70%. The table always
146 // grows/shrinks by doubling/halving its size. Pruning of unregistered
147 // entries is done by rebuilding the table with or without resizing it.
148 const size_t min_size = 1024;
149 const size_t shrink_threshold = _size * 0.30;
150 const size_t prune_threshold = _size * 0.65;
151 const size_t grow_threshold = _size * 0.70;
152
153 if (_size == 0) {
154 // Initialize table
155 rebuild(min_size);
156 } else if (_nregistered < shrink_threshold && _size > min_size) {
157 // Shrink table
158 rebuild(_size / 2);
159 } else if (_nregistered + _nunregistered > grow_threshold) {
160 // Prune or grow table
161 if (_nregistered < prune_threshold) {
162 // Prune table
163 rebuild(_size);
164 } else {
165 // Grow table
166 rebuild(_size * 2);
167 }
168 }
169}
170
171size_t ZNMethodTable::registered_nmethods() {
172 return _nregistered;
173}
174
175size_t ZNMethodTable::unregistered_nmethods() {
176 return _nunregistered;
177}
178
179void ZNMethodTable::register_nmethod(nmethod* nm) {
180 assert(CodeCache_lock->owned_by_self(), "Lock must be held");
181
182 // Grow/Shrink/Prune table if needed
183 rebuild_if_needed();
184
185 // Insert new entry
186 if (register_entry(_table, _size, nm)) {
187 // New entry registered. When register_entry() instead returns
188 // false the nmethod was already in the table so we do not want
189 // to increase number of registered entries in that case.
190 _nregistered++;
191 }
192}
193
194void ZNMethodTable::wait_until_iteration_done() {
195 assert(CodeCache_lock->owned_by_self(), "Lock must be held");
196
197 while (_iteration.in_progress()) {
198 CodeCache_lock->wait_without_safepoint_check();
199 }
200}
201
202void ZNMethodTable::unregister_nmethod(nmethod* nm) {
203 assert(CodeCache_lock->owned_by_self(), "Lock must be held");
204
205 // Remove entry
206 unregister_entry(_table, _size, nm);
207 _nunregistered++;
208 _nregistered--;
209}
210
211void ZNMethodTable::nmethods_do_begin() {
212 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
213
214 // Do not allow the table to be deleted while iterating
215 _safe_delete.enable_deferred_delete();
216
217 // Prepare iteration
218 _iteration.nmethods_do_begin(_table, _size);
219}
220
221void ZNMethodTable::nmethods_do_end() {
222 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
223
224 // Finish iteration
225 _iteration.nmethods_do_end();
226
227 // Allow the table to be deleted
228 _safe_delete.disable_deferred_delete();
229
230 // Notify iteration done
231 CodeCache_lock->notify_all();
232}
233
234void ZNMethodTable::nmethods_do(NMethodClosure* cl) {
235 _iteration.nmethods_do(cl);
236}
237