| 1 | /* | 
|---|
| 2 | * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | * | 
|---|
| 23 | */ | 
|---|
| 24 |  | 
|---|
| 25 | #ifndef SHARE_GC_SHARED_THREADLOCALALLOCBUFFER_INLINE_HPP | 
|---|
| 26 | #define SHARE_GC_SHARED_THREADLOCALALLOCBUFFER_INLINE_HPP | 
|---|
| 27 |  | 
|---|
| 28 | #include "gc/shared/collectedHeap.hpp" | 
|---|
| 29 | #include "gc/shared/threadLocalAllocBuffer.hpp" | 
|---|
| 30 | #include "memory/universe.hpp" | 
|---|
| 31 | #include "logging/log.hpp" | 
|---|
| 32 | #include "runtime/thread.hpp" | 
|---|
| 33 | #include "utilities/copy.hpp" | 
|---|
| 34 |  | 
|---|
| 35 | inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) { | 
|---|
| 36 | invariants(); | 
|---|
| 37 | HeapWord* obj = top(); | 
|---|
| 38 | if (pointer_delta(end(), obj) >= size) { | 
|---|
| 39 | // successful thread-local allocation | 
|---|
| 40 | #ifdef ASSERT | 
|---|
| 41 | // Skip mangling the space corresponding to the object header to | 
|---|
| 42 | // ensure that the returned space is not considered parsable by | 
|---|
| 43 | // any concurrent GC thread. | 
|---|
| 44 | size_t hdr_size = oopDesc::header_size(); | 
|---|
| 45 | Copy::fill_to_words(obj + hdr_size, size - hdr_size, badHeapWordVal); | 
|---|
| 46 | #endif // ASSERT | 
|---|
| 47 | // This addition is safe because we know that top is | 
|---|
| 48 | // at least size below end, so the add can't wrap. | 
|---|
| 49 | set_top(obj + size); | 
|---|
| 50 |  | 
|---|
| 51 | invariants(); | 
|---|
| 52 | return obj; | 
|---|
| 53 | } | 
|---|
| 54 | return NULL; | 
|---|
| 55 | } | 
|---|
| 56 |  | 
|---|
| 57 | inline size_t ThreadLocalAllocBuffer::compute_size(size_t obj_size) { | 
|---|
| 58 | // Compute the size for the new TLAB. | 
|---|
| 59 | // The "last" tlab may be smaller to reduce fragmentation. | 
|---|
| 60 | // unsafe_max_tlab_alloc is just a hint. | 
|---|
| 61 | const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc(thread()) / HeapWordSize; | 
|---|
| 62 | size_t new_tlab_size = MIN3(available_size, desired_size() + align_object_size(obj_size), max_size()); | 
|---|
| 63 |  | 
|---|
| 64 | // Make sure there's enough room for object and filler int[]. | 
|---|
| 65 | if (new_tlab_size < compute_min_size(obj_size)) { | 
|---|
| 66 | // If there isn't enough room for the allocation, return failure. | 
|---|
| 67 | log_trace(gc, tlab)( "ThreadLocalAllocBuffer::compute_size("SIZE_FORMAT ") returns failure", | 
|---|
| 68 | obj_size); | 
|---|
| 69 | return 0; | 
|---|
| 70 | } | 
|---|
| 71 | log_trace(gc, tlab)( "ThreadLocalAllocBuffer::compute_size("SIZE_FORMAT ") returns "SIZE_FORMAT, | 
|---|
| 72 | obj_size, new_tlab_size); | 
|---|
| 73 | return new_tlab_size; | 
|---|
| 74 | } | 
|---|
| 75 |  | 
|---|
| 76 | inline size_t ThreadLocalAllocBuffer::compute_min_size(size_t obj_size) { | 
|---|
| 77 | const size_t aligned_obj_size = align_object_size(obj_size); | 
|---|
| 78 | const size_t size_with_reserve = aligned_obj_size + alignment_reserve(); | 
|---|
| 79 | return MAX2(size_with_reserve, heap_word_size(MinTLABSize)); | 
|---|
| 80 | } | 
|---|
| 81 |  | 
|---|
| 82 | void ThreadLocalAllocBuffer::record_slow_allocation(size_t obj_size) { | 
|---|
| 83 | // Raise size required to bypass TLAB next time. Why? Else there's | 
|---|
| 84 | // a risk that a thread that repeatedly allocates objects of one | 
|---|
| 85 | // size will get stuck on this slow path. | 
|---|
| 86 |  | 
|---|
| 87 | set_refill_waste_limit(refill_waste_limit() + refill_waste_limit_increment()); | 
|---|
| 88 |  | 
|---|
| 89 | _slow_allocations++; | 
|---|
| 90 |  | 
|---|
| 91 | log_develop_trace(gc, tlab)( "TLAB: %s thread: "INTPTR_FORMAT " [id: %2d]" | 
|---|
| 92 | " obj: "SIZE_FORMAT | 
|---|
| 93 | " free: "SIZE_FORMAT | 
|---|
| 94 | " waste: "SIZE_FORMAT, | 
|---|
| 95 | "slow", p2i(thread()), thread()->osthread()->thread_id(), | 
|---|
| 96 | obj_size, free(), refill_waste_limit()); | 
|---|
| 97 | } | 
|---|
| 98 |  | 
|---|
| 99 | #endif // SHARE_GC_SHARED_THREADLOCALALLOCBUFFER_INLINE_HPP | 
|---|
| 100 |  | 
|---|