1/*
2 * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/javaClasses.hpp"
27#include "gc/shared/allocTracer.hpp"
28#include "gc/shared/collectedHeap.hpp"
29#include "gc/shared/memAllocator.hpp"
30#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
31#include "memory/universe.hpp"
32#include "oops/arrayOop.hpp"
33#include "oops/oop.inline.hpp"
34#include "prims/jvmtiExport.hpp"
35#include "runtime/sharedRuntime.hpp"
36#include "runtime/handles.inline.hpp"
37#include "runtime/thread.inline.hpp"
38#include "services/lowMemoryDetector.hpp"
39#include "utilities/align.hpp"
40#include "utilities/copy.hpp"
41
42class MemAllocator::Allocation: StackObj {
43 friend class MemAllocator;
44
45 const MemAllocator& _allocator;
46 Thread* _thread;
47 oop* _obj_ptr;
48 bool _overhead_limit_exceeded;
49 bool _allocated_outside_tlab;
50 size_t _allocated_tlab_size;
51 bool _tlab_end_reset_for_sample;
52
53 bool check_out_of_memory();
54 void verify_before();
55 void verify_after();
56 void notify_allocation();
57 void notify_allocation_jvmti_allocation_event();
58 void notify_allocation_jvmti_sampler();
59 void notify_allocation_low_memory_detector();
60 void notify_allocation_jfr_sampler();
61 void notify_allocation_dtrace_sampler();
62 void check_for_bad_heap_word_value() const;
63#ifdef ASSERT
64 void check_for_valid_allocation_state() const;
65#endif
66
67 class PreserveObj;
68
69public:
70 Allocation(const MemAllocator& allocator, oop* obj_ptr)
71 : _allocator(allocator),
72 _thread(Thread::current()),
73 _obj_ptr(obj_ptr),
74 _overhead_limit_exceeded(false),
75 _allocated_outside_tlab(false),
76 _allocated_tlab_size(0),
77 _tlab_end_reset_for_sample(false)
78 {
79 verify_before();
80 }
81
82 ~Allocation() {
83 if (!check_out_of_memory()) {
84 verify_after();
85 notify_allocation();
86 }
87 }
88
89 oop obj() const { return *_obj_ptr; }
90};
91
92class MemAllocator::Allocation::PreserveObj: StackObj {
93 HandleMark _handle_mark;
94 Handle _handle;
95 oop* const _obj_ptr;
96
97public:
98 PreserveObj(Thread* thread, oop* obj_ptr)
99 : _handle_mark(thread),
100 _handle(thread, *obj_ptr),
101 _obj_ptr(obj_ptr)
102 {
103 *obj_ptr = NULL;
104 }
105
106 ~PreserveObj() {
107 *_obj_ptr = _handle();
108 }
109
110 oop operator()() const {
111 return _handle();
112 }
113};
114
115bool MemAllocator::Allocation::check_out_of_memory() {
116 Thread* THREAD = _thread;
117 assert(!HAS_PENDING_EXCEPTION, "Unexpected exception, will result in uninitialized storage");
118
119 if (obj() != NULL) {
120 return false;
121 }
122
123 const char* message = _overhead_limit_exceeded ? "GC overhead limit exceeded" : "Java heap space";
124 if (!THREAD->in_retryable_allocation()) {
125 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
126 report_java_out_of_memory(message);
127
128 if (JvmtiExport::should_post_resource_exhausted()) {
129 JvmtiExport::post_resource_exhausted(
130 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
131 message);
132 }
133 oop exception = _overhead_limit_exceeded ?
134 Universe::out_of_memory_error_gc_overhead_limit() :
135 Universe::out_of_memory_error_java_heap();
136 THROW_OOP_(exception, true);
137 } else {
138 THROW_OOP_(Universe::out_of_memory_error_retry(), true);
139 }
140}
141
142void MemAllocator::Allocation::verify_before() {
143 // Clear unhandled oops for memory allocation. Memory allocation might
144 // not take out a lock if from tlab, so clear here.
145 Thread* THREAD = _thread;
146 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
147 assert(!HAS_PENDING_EXCEPTION, "Should not allocate with exception pending");
148 debug_only(check_for_valid_allocation_state());
149 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
150}
151
152void MemAllocator::Allocation::verify_after() {
153 NOT_PRODUCT(check_for_bad_heap_word_value();)
154}
155
156void MemAllocator::Allocation::check_for_bad_heap_word_value() const {
157 MemRegion obj_range = _allocator.obj_memory_range(obj());
158 HeapWord* addr = obj_range.start();
159 size_t size = obj_range.word_size();
160 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
161 for (size_t slot = 0; slot < size; slot += 1) {
162 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
163 "Found badHeapWordValue in post-allocation check");
164 }
165 }
166}
167
168#ifdef ASSERT
169void MemAllocator::Allocation::check_for_valid_allocation_state() const {
170 // How to choose between a pending exception and a potential
171 // OutOfMemoryError? Don't allow pending exceptions.
172 // This is a VM policy failure, so how do we exhaustively test it?
173 assert(!_thread->has_pending_exception(),
174 "shouldn't be allocating with pending exception");
175 if (StrictSafepointChecks) {
176 assert(_thread->allow_allocation(),
177 "Allocation done by thread for which allocation is blocked "
178 "by No_Allocation_Verifier!");
179 // Allocation of an oop can always invoke a safepoint,
180 // hence, the true argument
181 _thread->check_for_valid_safepoint_state(true);
182 }
183}
184#endif
185
186void MemAllocator::Allocation::notify_allocation_jvmti_sampler() {
187 // support for JVMTI VMObjectAlloc event (no-op if not enabled)
188 JvmtiExport::vm_object_alloc_event_collector(obj());
189
190 if (!JvmtiExport::should_post_sampled_object_alloc()) {
191 // Sampling disabled
192 return;
193 }
194
195 if (!_allocated_outside_tlab && _allocated_tlab_size == 0 && !_tlab_end_reset_for_sample) {
196 // Sample if it's a non-TLAB allocation, or a TLAB allocation that either refills the TLAB
197 // or expands it due to taking a sampler induced slow path.
198 return;
199 }
200
201 // If we want to be sampling, protect the allocated object with a Handle
202 // before doing the callback. The callback is done in the destructor of
203 // the JvmtiSampledObjectAllocEventCollector.
204 size_t bytes_since_last = 0;
205
206 {
207 PreserveObj obj_h(_thread, _obj_ptr);
208 JvmtiSampledObjectAllocEventCollector collector;
209 size_t size_in_bytes = _allocator._word_size * HeapWordSize;
210 ThreadLocalAllocBuffer& tlab = _thread->tlab();
211
212 if (!_allocated_outside_tlab) {
213 bytes_since_last = tlab.bytes_since_last_sample_point();
214 }
215
216 _thread->heap_sampler().check_for_sampling(obj_h(), size_in_bytes, bytes_since_last);
217 }
218
219 if (_tlab_end_reset_for_sample || _allocated_tlab_size != 0) {
220 // Tell tlab to forget bytes_since_last if we passed it to the heap sampler.
221 _thread->tlab().set_sample_end(bytes_since_last != 0);
222 }
223}
224
225void MemAllocator::Allocation::notify_allocation_low_memory_detector() {
226 // support low memory notifications (no-op if not enabled)
227 LowMemoryDetector::detect_low_memory_for_collected_pools();
228}
229
230void MemAllocator::Allocation::notify_allocation_jfr_sampler() {
231 HeapWord* mem = (HeapWord*)obj();
232 size_t size_in_bytes = _allocator._word_size * HeapWordSize;
233
234 if (_allocated_outside_tlab) {
235 AllocTracer::send_allocation_outside_tlab(_allocator._klass, mem, size_in_bytes, _thread);
236 } else if (_allocated_tlab_size != 0) {
237 // TLAB was refilled
238 AllocTracer::send_allocation_in_new_tlab(_allocator._klass, mem, _allocated_tlab_size * HeapWordSize,
239 size_in_bytes, _thread);
240 }
241}
242
243void MemAllocator::Allocation::notify_allocation_dtrace_sampler() {
244 if (DTraceAllocProbes) {
245 // support for Dtrace object alloc event (no-op most of the time)
246 Klass* klass = _allocator._klass;
247 size_t word_size = _allocator._word_size;
248 if (klass != NULL && klass->name() != NULL) {
249 SharedRuntime::dtrace_object_alloc(obj(), (int)word_size);
250 }
251 }
252}
253
254void MemAllocator::Allocation::notify_allocation() {
255 notify_allocation_low_memory_detector();
256 notify_allocation_jfr_sampler();
257 notify_allocation_dtrace_sampler();
258 notify_allocation_jvmti_sampler();
259}
260
261HeapWord* MemAllocator::allocate_outside_tlab(Allocation& allocation) const {
262 allocation._allocated_outside_tlab = true;
263 HeapWord* mem = Universe::heap()->mem_allocate(_word_size, &allocation._overhead_limit_exceeded);
264 if (mem == NULL) {
265 return mem;
266 }
267
268 NOT_PRODUCT(Universe::heap()->check_for_non_bad_heap_word_value(mem, _word_size));
269 size_t size_in_bytes = _word_size * HeapWordSize;
270 _thread->incr_allocated_bytes(size_in_bytes);
271
272 return mem;
273}
274
275HeapWord* MemAllocator::allocate_inside_tlab(Allocation& allocation) const {
276 assert(UseTLAB, "should use UseTLAB");
277
278 // Try allocating from an existing TLAB.
279 HeapWord* mem = _thread->tlab().allocate(_word_size);
280 if (mem != NULL) {
281 return mem;
282 }
283
284 // Try refilling the TLAB and allocating the object in it.
285 return allocate_inside_tlab_slow(allocation);
286}
287
288HeapWord* MemAllocator::allocate_inside_tlab_slow(Allocation& allocation) const {
289 HeapWord* mem = NULL;
290 ThreadLocalAllocBuffer& tlab = _thread->tlab();
291
292 if (JvmtiExport::should_post_sampled_object_alloc()) {
293 tlab.set_back_allocation_end();
294 mem = tlab.allocate(_word_size);
295
296 // We set back the allocation sample point to try to allocate this, reset it
297 // when done.
298 allocation._tlab_end_reset_for_sample = true;
299
300 if (mem != NULL) {
301 return mem;
302 }
303 }
304
305 // Retain tlab and allocate object in shared space if
306 // the amount free in the tlab is too large to discard.
307 if (tlab.free() > tlab.refill_waste_limit()) {
308 tlab.record_slow_allocation(_word_size);
309 return NULL;
310 }
311
312 // Discard tlab and allocate a new one.
313 // To minimize fragmentation, the last TLAB may be smaller than the rest.
314 size_t new_tlab_size = tlab.compute_size(_word_size);
315
316 tlab.retire_before_allocation();
317
318 if (new_tlab_size == 0) {
319 return NULL;
320 }
321
322 // Allocate a new TLAB requesting new_tlab_size. Any size
323 // between minimal and new_tlab_size is accepted.
324 size_t min_tlab_size = ThreadLocalAllocBuffer::compute_min_size(_word_size);
325 mem = Universe::heap()->allocate_new_tlab(min_tlab_size, new_tlab_size, &allocation._allocated_tlab_size);
326 if (mem == NULL) {
327 assert(allocation._allocated_tlab_size == 0,
328 "Allocation failed, but actual size was updated. min: " SIZE_FORMAT
329 ", desired: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
330 min_tlab_size, new_tlab_size, allocation._allocated_tlab_size);
331 return NULL;
332 }
333 assert(allocation._allocated_tlab_size != 0, "Allocation succeeded but actual size not updated. mem at: "
334 PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
335 p2i(mem), min_tlab_size, new_tlab_size);
336
337 if (ZeroTLAB) {
338 // ..and clear it.
339 Copy::zero_to_words(mem, allocation._allocated_tlab_size);
340 } else {
341 // ...and zap just allocated object.
342#ifdef ASSERT
343 // Skip mangling the space corresponding to the object header to
344 // ensure that the returned space is not considered parsable by
345 // any concurrent GC thread.
346 size_t hdr_size = oopDesc::header_size();
347 Copy::fill_to_words(mem + hdr_size, allocation._allocated_tlab_size - hdr_size, badHeapWordVal);
348#endif // ASSERT
349 }
350
351 tlab.fill(mem, mem + _word_size, allocation._allocated_tlab_size);
352 return mem;
353}
354
355HeapWord* MemAllocator::mem_allocate(Allocation& allocation) const {
356 if (UseTLAB) {
357 HeapWord* result = allocate_inside_tlab(allocation);
358 if (result != NULL) {
359 return result;
360 }
361 }
362
363 return allocate_outside_tlab(allocation);
364}
365
366oop MemAllocator::allocate() const {
367 oop obj = NULL;
368 {
369 Allocation allocation(*this, &obj);
370 HeapWord* mem = mem_allocate(allocation);
371 if (mem != NULL) {
372 obj = initialize(mem);
373 }
374 }
375 return obj;
376}
377
378void MemAllocator::mem_clear(HeapWord* mem) const {
379 assert(mem != NULL, "cannot initialize NULL object");
380 const size_t hs = oopDesc::header_size();
381 assert(_word_size >= hs, "unexpected object size");
382 oopDesc::set_klass_gap(mem, 0);
383 Copy::fill_to_aligned_words(mem + hs, _word_size - hs);
384}
385
386oop MemAllocator::finish(HeapWord* mem) const {
387 assert(mem != NULL, "NULL object pointer");
388 if (UseBiasedLocking) {
389 oopDesc::set_mark_raw(mem, _klass->prototype_header());
390 } else {
391 // May be bootstrapping
392 oopDesc::set_mark_raw(mem, markOopDesc::prototype());
393 }
394 // Need a release store to ensure array/class length, mark word, and
395 // object zeroing are visible before setting the klass non-NULL, for
396 // concurrent collectors.
397 oopDesc::release_set_klass(mem, _klass);
398 return oop(mem);
399}
400
401oop ObjAllocator::initialize(HeapWord* mem) const {
402 mem_clear(mem);
403 return finish(mem);
404}
405
406MemRegion ObjArrayAllocator::obj_memory_range(oop obj) const {
407 if (_do_zero) {
408 return MemAllocator::obj_memory_range(obj);
409 }
410 ArrayKlass* array_klass = ArrayKlass::cast(_klass);
411 const size_t hs = arrayOopDesc::header_size(array_klass->element_type());
412 return MemRegion(((HeapWord*)obj) + hs, _word_size - hs);
413}
414
415oop ObjArrayAllocator::initialize(HeapWord* mem) const {
416 // Set array length before setting the _klass field because a
417 // non-NULL klass field indicates that the object is parsable by
418 // concurrent GC.
419 assert(_length >= 0, "length should be non-negative");
420 if (_do_zero) {
421 mem_clear(mem);
422 }
423 arrayOopDesc::set_length(mem, _length);
424 return finish(mem);
425}
426
427oop ClassAllocator::initialize(HeapWord* mem) const {
428 // Set oop_size field before setting the _klass field because a
429 // non-NULL _klass field indicates that the object is parsable by
430 // concurrent GC.
431 assert(_word_size > 0, "oop_size must be positive.");
432 mem_clear(mem);
433 java_lang_Class::set_oop_size(mem, (int)_word_size);
434 return finish(mem);
435}
436