1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "memory/allocation.hpp" |
27 | #include "memory/allocation.inline.hpp" |
28 | #include "memory/arena.hpp" |
29 | #include "memory/metaspaceShared.hpp" |
30 | #include "memory/resourceArea.hpp" |
31 | #include "runtime/atomic.hpp" |
32 | #include "runtime/os.hpp" |
33 | #include "runtime/task.hpp" |
34 | #include "runtime/threadCritical.hpp" |
35 | #include "services/memTracker.hpp" |
36 | #include "utilities/ostream.hpp" |
37 | |
38 | // allocate using malloc; will fail if no memory available |
39 | char* AllocateHeap(size_t size, |
40 | MEMFLAGS flags, |
41 | const NativeCallStack& stack, |
42 | AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) { |
43 | char* p = (char*) os::malloc(size, flags, stack); |
44 | if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { |
45 | vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap" ); |
46 | } |
47 | return p; |
48 | } |
49 | |
50 | char* AllocateHeap(size_t size, |
51 | MEMFLAGS flags, |
52 | AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) { |
53 | return AllocateHeap(size, flags, CALLER_PC); |
54 | } |
55 | |
56 | char* ReallocateHeap(char *old, |
57 | size_t size, |
58 | MEMFLAGS flag, |
59 | AllocFailType alloc_failmode) { |
60 | char* p = (char*) os::realloc(old, size, flag, CALLER_PC); |
61 | if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) { |
62 | vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap" ); |
63 | } |
64 | return p; |
65 | } |
66 | |
67 | void FreeHeap(void* p) { |
68 | os::free(p); |
69 | } |
70 | |
71 | void* MetaspaceObj::_shared_metaspace_base = NULL; |
72 | void* MetaspaceObj::_shared_metaspace_top = NULL; |
73 | |
74 | void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; } |
75 | void StackObj::operator delete(void* p) { ShouldNotCallThis(); } |
76 | void* StackObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; } |
77 | void StackObj::operator delete [](void* p) { ShouldNotCallThis(); } |
78 | |
79 | void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, |
80 | size_t word_size, |
81 | MetaspaceObj::Type type, TRAPS) throw() { |
82 | // Klass has it's own operator new |
83 | return Metaspace::allocate(loader_data, word_size, type, THREAD); |
84 | } |
85 | |
86 | bool MetaspaceObj::is_valid(const MetaspaceObj* p) { |
87 | // Weed out obvious bogus values first without traversing metaspace |
88 | if ((size_t)p < os::min_page_size()) { |
89 | return false; |
90 | } else if (!is_aligned((address)p, sizeof(MetaWord))) { |
91 | return false; |
92 | } |
93 | return Metaspace::contains((void*)p); |
94 | } |
95 | |
96 | void MetaspaceObj::print_address_on(outputStream* st) const { |
97 | st->print(" {" INTPTR_FORMAT "}" , p2i(this)); |
98 | } |
99 | |
100 | void* ResourceObj::operator new(size_t size, Arena *arena) throw() { |
101 | address res = (address)arena->Amalloc(size); |
102 | DEBUG_ONLY(set_allocation_type(res, ARENA);) |
103 | return res; |
104 | } |
105 | |
106 | void* ResourceObj::operator new [](size_t size, Arena *arena) throw() { |
107 | address res = (address)arena->Amalloc(size); |
108 | DEBUG_ONLY(set_allocation_type(res, ARENA);) |
109 | return res; |
110 | } |
111 | |
112 | void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() { |
113 | address res = NULL; |
114 | switch (type) { |
115 | case C_HEAP: |
116 | res = (address)AllocateHeap(size, flags, CALLER_PC); |
117 | DEBUG_ONLY(set_allocation_type(res, C_HEAP);) |
118 | break; |
119 | case RESOURCE_AREA: |
120 | // new(size) sets allocation type RESOURCE_AREA. |
121 | res = (address)operator new(size); |
122 | break; |
123 | default: |
124 | ShouldNotReachHere(); |
125 | } |
126 | return res; |
127 | } |
128 | |
129 | void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() { |
130 | return (address) operator new(size, type, flags); |
131 | } |
132 | |
133 | void* ResourceObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, |
134 | allocation_type type, MEMFLAGS flags) throw() { |
135 | // should only call this with std::nothrow, use other operator new() otherwise |
136 | address res = NULL; |
137 | switch (type) { |
138 | case C_HEAP: |
139 | res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); |
140 | DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);) |
141 | break; |
142 | case RESOURCE_AREA: |
143 | // new(size) sets allocation type RESOURCE_AREA. |
144 | res = (address)operator new(size, std::nothrow); |
145 | break; |
146 | default: |
147 | ShouldNotReachHere(); |
148 | } |
149 | return res; |
150 | } |
151 | |
152 | void* ResourceObj::operator new [](size_t size, const std::nothrow_t& nothrow_constant, |
153 | allocation_type type, MEMFLAGS flags) throw() { |
154 | return (address)operator new(size, nothrow_constant, type, flags); |
155 | } |
156 | |
157 | void ResourceObj::operator delete(void* p) { |
158 | assert(((ResourceObj *)p)->allocated_on_C_heap(), |
159 | "delete only allowed for C_HEAP objects" ); |
160 | DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) |
161 | FreeHeap(p); |
162 | } |
163 | |
164 | void ResourceObj::operator delete [](void* p) { |
165 | operator delete(p); |
166 | } |
167 | |
168 | #ifdef ASSERT |
169 | void ResourceObj::set_allocation_type(address res, allocation_type type) { |
170 | // Set allocation type in the resource object |
171 | uintptr_t allocation = (uintptr_t)res; |
172 | assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least: " INTPTR_FORMAT, p2i(res)); |
173 | assert(type <= allocation_mask, "incorrect allocation type" ); |
174 | ResourceObj* resobj = (ResourceObj *)res; |
175 | resobj->_allocation_t[0] = ~(allocation + type); |
176 | if (type != STACK_OR_EMBEDDED) { |
177 | // Called from operator new(), set verification value. |
178 | resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; |
179 | } |
180 | } |
181 | |
182 | ResourceObj::allocation_type ResourceObj::get_allocation_type() const { |
183 | assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object" ); |
184 | return (allocation_type)((~_allocation_t[0]) & allocation_mask); |
185 | } |
186 | |
187 | bool ResourceObj::is_type_set() const { |
188 | allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); |
189 | return get_allocation_type() == type && |
190 | (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); |
191 | } |
192 | |
193 | // This whole business of passing information from ResourceObj::operator new |
194 | // to the ResourceObj constructor via fields in the "object" is technically UB. |
195 | // But it seems to work within the limitations of HotSpot usage (such as no |
196 | // multiple inheritance) with the compilers and compiler options we're using. |
197 | // And it gives some possibly useful checking for misuse of ResourceObj. |
198 | void ResourceObj::initialize_allocation_info() { |
199 | if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { |
200 | // Operator new() is not called for allocations |
201 | // on stack and for embedded objects. |
202 | set_allocation_type((address)this, STACK_OR_EMBEDDED); |
203 | } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED |
204 | // For some reason we got a value which resembles |
205 | // an embedded or stack object (operator new() does not |
206 | // set such type). Keep it since it is valid value |
207 | // (even if it was garbage). |
208 | // Ignore garbage in other fields. |
209 | } else if (is_type_set()) { |
210 | // Operator new() was called and type was set. |
211 | assert(!allocated_on_stack(), |
212 | "not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")" , |
213 | p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]); |
214 | } else { |
215 | // Operator new() was not called. |
216 | // Assume that it is embedded or stack object. |
217 | set_allocation_type((address)this, STACK_OR_EMBEDDED); |
218 | } |
219 | _allocation_t[1] = 0; // Zap verification value |
220 | } |
221 | |
222 | ResourceObj::ResourceObj() { |
223 | initialize_allocation_info(); |
224 | } |
225 | |
226 | ResourceObj::ResourceObj(const ResourceObj&) { |
227 | // Initialize _allocation_t as a new object, ignoring object being copied. |
228 | initialize_allocation_info(); |
229 | } |
230 | |
231 | ResourceObj& ResourceObj::operator=(const ResourceObj& r) { |
232 | assert(allocated_on_stack(), |
233 | "copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")" , |
234 | p2i(this), get_allocation_type(), _allocation_t[0], _allocation_t[1]); |
235 | // Keep current _allocation_t value; |
236 | return *this; |
237 | } |
238 | |
239 | ResourceObj::~ResourceObj() { |
240 | // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. |
241 | if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. |
242 | _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type |
243 | } |
244 | } |
245 | #endif // ASSERT |
246 | |
247 | //-------------------------------------------------------------------------------------- |
248 | // Non-product code |
249 | |
250 | #ifndef PRODUCT |
251 | void AllocatedObj::print() const { print_on(tty); } |
252 | void AllocatedObj::print_value() const { print_value_on(tty); } |
253 | |
254 | void AllocatedObj::print_on(outputStream* st) const { |
255 | st->print_cr("AllocatedObj(" INTPTR_FORMAT ")" , p2i(this)); |
256 | } |
257 | |
258 | void AllocatedObj::print_value_on(outputStream* st) const { |
259 | st->print("AllocatedObj(" INTPTR_FORMAT ")" , p2i(this)); |
260 | } |
261 | |
262 | AllocStats::AllocStats() { |
263 | start_mallocs = os::num_mallocs; |
264 | start_frees = os::num_frees; |
265 | start_malloc_bytes = os::alloc_bytes; |
266 | start_mfree_bytes = os::free_bytes; |
267 | start_res_bytes = Arena::_bytes_allocated; |
268 | } |
269 | |
270 | julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } |
271 | julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } |
272 | julong AllocStats::num_frees() { return os::num_frees - start_frees; } |
273 | julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } |
274 | julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } |
275 | void AllocStats::print() { |
276 | tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " |
277 | UINT64_FORMAT " frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc" , |
278 | num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); |
279 | } |
280 | |
281 | ReallocMark::ReallocMark() { |
282 | #ifdef ASSERT |
283 | Thread *thread = Thread::current(); |
284 | _nesting = thread->resource_area()->nesting(); |
285 | #endif |
286 | } |
287 | |
288 | void ReallocMark::check() { |
289 | #ifdef ASSERT |
290 | if (_nesting != Thread::current()->resource_area()->nesting()) { |
291 | fatal("allocation bug: array could grow within nested ResourceMark" ); |
292 | } |
293 | #endif |
294 | } |
295 | |
296 | #endif // Non-product |
297 | |