| 1 | /* | 
|---|
| 2 | * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | * | 
|---|
| 23 | */ | 
|---|
| 24 |  | 
|---|
| 25 | #include "precompiled.hpp" | 
|---|
| 26 | #include "gc/parallel/objectStartArray.inline.hpp" | 
|---|
| 27 | #include "gc/shared/cardTableBarrierSet.hpp" | 
|---|
| 28 | #include "memory/allocation.inline.hpp" | 
|---|
| 29 | #include "oops/oop.inline.hpp" | 
|---|
| 30 | #include "runtime/java.hpp" | 
|---|
| 31 | #include "services/memTracker.hpp" | 
|---|
| 32 | #include "utilities/align.hpp" | 
|---|
| 33 |  | 
|---|
| 34 | void ObjectStartArray::initialize(MemRegion reserved_region) { | 
|---|
| 35 | // We're based on the assumption that we use the same | 
|---|
| 36 | // size blocks as the card table. | 
|---|
| 37 | assert((int)block_size == (int)CardTable::card_size, "Sanity"); | 
|---|
| 38 | assert((int)block_size <= 512, "block_size must be less than or equal to 512"); | 
|---|
| 39 |  | 
|---|
| 40 | // Calculate how much space must be reserved | 
|---|
| 41 | _reserved_region = reserved_region; | 
|---|
| 42 |  | 
|---|
| 43 | size_t bytes_to_reserve = reserved_region.word_size() / block_size_in_words; | 
|---|
| 44 | assert(bytes_to_reserve > 0, "Sanity"); | 
|---|
| 45 |  | 
|---|
| 46 | bytes_to_reserve = | 
|---|
| 47 | align_up(bytes_to_reserve, os::vm_allocation_granularity()); | 
|---|
| 48 |  | 
|---|
| 49 | // Do not use large-pages for the backing store. The one large page region | 
|---|
| 50 | // will be used for the heap proper. | 
|---|
| 51 | ReservedSpace backing_store(bytes_to_reserve); | 
|---|
| 52 | if (!backing_store.is_reserved()) { | 
|---|
| 53 | vm_exit_during_initialization( "Could not reserve space for ObjectStartArray"); | 
|---|
| 54 | } | 
|---|
| 55 | MemTracker::record_virtual_memory_type((address)backing_store.base(), mtGC); | 
|---|
| 56 |  | 
|---|
| 57 | // We do not commit any memory initially | 
|---|
| 58 | if (!_virtual_space.initialize(backing_store, 0)) { | 
|---|
| 59 | vm_exit_during_initialization( "Could not commit space for ObjectStartArray"); | 
|---|
| 60 | } | 
|---|
| 61 |  | 
|---|
| 62 | _raw_base = (jbyte*)_virtual_space.low_boundary(); | 
|---|
| 63 |  | 
|---|
| 64 | if (_raw_base == NULL) { | 
|---|
| 65 | vm_exit_during_initialization( "Could not get raw_base address"); | 
|---|
| 66 | } | 
|---|
| 67 |  | 
|---|
| 68 | MemTracker::record_virtual_memory_type((address)_raw_base, mtGC); | 
|---|
| 69 |  | 
|---|
| 70 |  | 
|---|
| 71 | _offset_base = _raw_base - (size_t(reserved_region.start()) >> block_shift); | 
|---|
| 72 |  | 
|---|
| 73 | _covered_region.set_start(reserved_region.start()); | 
|---|
| 74 | _covered_region.set_word_size(0); | 
|---|
| 75 |  | 
|---|
| 76 | _blocks_region.set_start((HeapWord*)_raw_base); | 
|---|
| 77 | _blocks_region.set_word_size(0); | 
|---|
| 78 | } | 
|---|
| 79 |  | 
|---|
| 80 | void ObjectStartArray::set_covered_region(MemRegion mr) { | 
|---|
| 81 | assert(_reserved_region.contains(mr), "MemRegion outside of reserved space"); | 
|---|
| 82 | assert(_reserved_region.start() == mr.start(), "Attempt to move covered region"); | 
|---|
| 83 |  | 
|---|
| 84 | HeapWord* low_bound  = mr.start(); | 
|---|
| 85 | HeapWord* high_bound = mr.end(); | 
|---|
| 86 | assert((uintptr_t(low_bound)  & (block_size - 1))  == 0, "heap must start at block boundary"); | 
|---|
| 87 | assert((uintptr_t(high_bound) & (block_size - 1))  == 0, "heap must end at block boundary"); | 
|---|
| 88 |  | 
|---|
| 89 | size_t requested_blocks_size_in_bytes = mr.word_size() / block_size_in_words; | 
|---|
| 90 |  | 
|---|
| 91 | // Only commit memory in page sized chunks | 
|---|
| 92 | requested_blocks_size_in_bytes = | 
|---|
| 93 | align_up(requested_blocks_size_in_bytes, os::vm_page_size()); | 
|---|
| 94 |  | 
|---|
| 95 | _covered_region = mr; | 
|---|
| 96 |  | 
|---|
| 97 | size_t current_blocks_size_in_bytes = _blocks_region.byte_size(); | 
|---|
| 98 |  | 
|---|
| 99 | if (requested_blocks_size_in_bytes > current_blocks_size_in_bytes) { | 
|---|
| 100 | // Expand | 
|---|
| 101 | size_t expand_by = requested_blocks_size_in_bytes - current_blocks_size_in_bytes; | 
|---|
| 102 | if (!_virtual_space.expand_by(expand_by)) { | 
|---|
| 103 | vm_exit_out_of_memory(expand_by, OOM_MMAP_ERROR, "object start array expansion"); | 
|---|
| 104 | } | 
|---|
| 105 | // Clear *only* the newly allocated region | 
|---|
| 106 | memset(_blocks_region.end(), clean_block, expand_by); | 
|---|
| 107 | } | 
|---|
| 108 |  | 
|---|
| 109 | if (requested_blocks_size_in_bytes < current_blocks_size_in_bytes) { | 
|---|
| 110 | // Shrink | 
|---|
| 111 | size_t shrink_by = current_blocks_size_in_bytes - requested_blocks_size_in_bytes; | 
|---|
| 112 | _virtual_space.shrink_by(shrink_by); | 
|---|
| 113 | } | 
|---|
| 114 |  | 
|---|
| 115 | _blocks_region.set_word_size(requested_blocks_size_in_bytes / sizeof(HeapWord)); | 
|---|
| 116 |  | 
|---|
| 117 | assert(requested_blocks_size_in_bytes % sizeof(HeapWord) == 0, "Block table not expanded in word sized increment"); | 
|---|
| 118 | assert(requested_blocks_size_in_bytes == _blocks_region.byte_size(), "Sanity"); | 
|---|
| 119 | assert(block_for_addr(low_bound) == &_raw_base[0], "Checking start of map"); | 
|---|
| 120 | assert(block_for_addr(high_bound-1) <= &_raw_base[_blocks_region.byte_size()-1], "Checking end of map"); | 
|---|
| 121 | } | 
|---|
| 122 |  | 
|---|
| 123 | void ObjectStartArray::reset() { | 
|---|
| 124 | memset(_blocks_region.start(), clean_block, _blocks_region.byte_size()); | 
|---|
| 125 | } | 
|---|
| 126 |  | 
|---|
| 127 | bool ObjectStartArray::object_starts_in_range(HeapWord* start_addr, | 
|---|
| 128 | HeapWord* end_addr) const { | 
|---|
| 129 | assert(start_addr <= end_addr, | 
|---|
| 130 | "Range is wrong. start_addr ("PTR_FORMAT ") is after end_addr ("PTR_FORMAT ")", | 
|---|
| 131 | p2i(start_addr), p2i(end_addr)); | 
|---|
| 132 | if (start_addr > end_addr) { | 
|---|
| 133 | return false; | 
|---|
| 134 | } | 
|---|
| 135 |  | 
|---|
| 136 | jbyte* start_block = block_for_addr(start_addr); | 
|---|
| 137 | jbyte* end_block = block_for_addr(end_addr); | 
|---|
| 138 |  | 
|---|
| 139 | for (jbyte* block = start_block; block <= end_block; block++) { | 
|---|
| 140 | if (*block != clean_block) { | 
|---|
| 141 | return true; | 
|---|
| 142 | } | 
|---|
| 143 | } | 
|---|
| 144 |  | 
|---|
| 145 | return false; | 
|---|
| 146 | } | 
|---|
| 147 |  | 
|---|