| 1 | /* |
| 2 | * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "gc/parallel/parMarkBitMap.inline.hpp" |
| 27 | #include "gc/parallel/psCompactionManager.inline.hpp" |
| 28 | #include "gc/parallel/psParallelCompact.inline.hpp" |
| 29 | #include "oops/oop.inline.hpp" |
| 30 | #include "runtime/atomic.hpp" |
| 31 | #include "runtime/os.hpp" |
| 32 | #include "services/memTracker.hpp" |
| 33 | #include "utilities/align.hpp" |
| 34 | #include "utilities/bitMap.inline.hpp" |
| 35 | |
| 36 | bool |
| 37 | ParMarkBitMap::initialize(MemRegion covered_region) |
| 38 | { |
| 39 | const idx_t bits = bits_required(covered_region); |
| 40 | // The bits will be divided evenly between two bitmaps; each of them should be |
| 41 | // an integral number of words. |
| 42 | assert(bits % (BitsPerWord * 2) == 0, "region size unaligned" ); |
| 43 | |
| 44 | const size_t words = bits / BitsPerWord; |
| 45 | const size_t raw_bytes = words * sizeof(idx_t); |
| 46 | const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10); |
| 47 | const size_t granularity = os::vm_allocation_granularity(); |
| 48 | _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity)); |
| 49 | |
| 50 | const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 : |
| 51 | MAX2(page_sz, granularity); |
| 52 | ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0); |
| 53 | os::trace_page_sizes("Mark Bitmap" , raw_bytes, raw_bytes, page_sz, |
| 54 | rs.base(), rs.size()); |
| 55 | |
| 56 | MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); |
| 57 | |
| 58 | _virtual_space = new PSVirtualSpace(rs, page_sz); |
| 59 | if (_virtual_space != NULL && _virtual_space->expand_by(_reserved_byte_size)) { |
| 60 | _region_start = covered_region.start(); |
| 61 | _region_size = covered_region.word_size(); |
| 62 | BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr(); |
| 63 | _beg_bits = BitMapView(map, bits / 2); |
| 64 | _end_bits = BitMapView(map + words / 2, bits / 2); |
| 65 | return true; |
| 66 | } |
| 67 | |
| 68 | _region_start = 0; |
| 69 | _region_size = 0; |
| 70 | if (_virtual_space != NULL) { |
| 71 | delete _virtual_space; |
| 72 | _virtual_space = NULL; |
| 73 | // Release memory reserved in the space. |
| 74 | rs.release(); |
| 75 | } |
| 76 | return false; |
| 77 | } |
| 78 | |
| 79 | #ifdef ASSERT |
| 80 | extern size_t mark_bitmap_count; |
| 81 | extern size_t mark_bitmap_size; |
| 82 | #endif // #ifdef ASSERT |
| 83 | |
| 84 | bool |
| 85 | ParMarkBitMap::mark_obj(HeapWord* addr, size_t size) |
| 86 | { |
| 87 | const idx_t beg_bit = addr_to_bit(addr); |
| 88 | if (_beg_bits.par_set_bit(beg_bit)) { |
| 89 | const idx_t end_bit = addr_to_bit(addr + size - 1); |
| 90 | bool end_bit_ok = _end_bits.par_set_bit(end_bit); |
| 91 | assert(end_bit_ok, "concurrency problem" ); |
| 92 | DEBUG_ONLY(Atomic::inc(&mark_bitmap_count)); |
| 93 | DEBUG_ONLY(Atomic::add(size, &mark_bitmap_size)); |
| 94 | return true; |
| 95 | } |
| 96 | return false; |
| 97 | } |
| 98 | |
| 99 | inline bool |
| 100 | ParMarkBitMap::is_live_words_in_range_in_cache(ParCompactionManager* cm, HeapWord* beg_addr) const { |
| 101 | return cm->last_query_begin() == beg_addr; |
| 102 | } |
| 103 | |
| 104 | inline void |
| 105 | ParMarkBitMap::update_live_words_in_range_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj, size_t result) const { |
| 106 | cm->set_last_query_begin(beg_addr); |
| 107 | cm->set_last_query_object(end_obj); |
| 108 | cm->set_last_query_return(result); |
| 109 | } |
| 110 | |
| 111 | size_t |
| 112 | ParMarkBitMap::live_words_in_range_helper(HeapWord* beg_addr, oop end_obj) const |
| 113 | { |
| 114 | assert(beg_addr <= (HeapWord*)end_obj, "bad range" ); |
| 115 | assert(is_marked(end_obj), "end_obj must be live" ); |
| 116 | |
| 117 | idx_t live_bits = 0; |
| 118 | |
| 119 | // The bitmap routines require the right boundary to be word-aligned. |
| 120 | const idx_t end_bit = addr_to_bit((HeapWord*)end_obj); |
| 121 | const idx_t range_end = BitMap::word_align_up(end_bit); |
| 122 | |
| 123 | idx_t beg_bit = find_obj_beg(addr_to_bit(beg_addr), range_end); |
| 124 | while (beg_bit < end_bit) { |
| 125 | idx_t tmp_end = find_obj_end(beg_bit, range_end); |
| 126 | assert(tmp_end < end_bit, "missing end bit" ); |
| 127 | live_bits += tmp_end - beg_bit + 1; |
| 128 | beg_bit = find_obj_beg(tmp_end + 1, range_end); |
| 129 | } |
| 130 | return bits_to_words(live_bits); |
| 131 | } |
| 132 | |
| 133 | size_t |
| 134 | ParMarkBitMap::live_words_in_range_use_cache(ParCompactionManager* cm, HeapWord* beg_addr, oop end_oop) const |
| 135 | { |
| 136 | HeapWord* last_beg = cm->last_query_begin(); |
| 137 | HeapWord* last_obj = (HeapWord*)cm->last_query_object(); |
| 138 | HeapWord* end_obj = (HeapWord*)end_oop; |
| 139 | |
| 140 | size_t last_ret = cm->last_query_return(); |
| 141 | if (end_obj > last_obj) { |
| 142 | last_ret = last_ret + live_words_in_range_helper(last_obj, end_oop); |
| 143 | last_obj = end_obj; |
| 144 | } else if (end_obj < last_obj) { |
| 145 | // The cached value is for an object that is to the left (lower address) of the current |
| 146 | // end_obj. Calculate back from that cached value. |
| 147 | if (pointer_delta(end_obj, beg_addr) > pointer_delta(last_obj, end_obj)) { |
| 148 | last_ret = last_ret - live_words_in_range_helper(end_obj, (oop)last_obj); |
| 149 | } else { |
| 150 | last_ret = live_words_in_range_helper(beg_addr, end_oop); |
| 151 | } |
| 152 | last_obj = end_obj; |
| 153 | } |
| 154 | |
| 155 | update_live_words_in_range_cache(cm, last_beg, (oop)last_obj, last_ret); |
| 156 | return last_ret; |
| 157 | } |
| 158 | |
| 159 | size_t |
| 160 | ParMarkBitMap::live_words_in_range(ParCompactionManager* cm, HeapWord* beg_addr, oop end_obj) const |
| 161 | { |
| 162 | // Try to reuse result from ParCompactionManager cache first. |
| 163 | if (is_live_words_in_range_in_cache(cm, beg_addr)) { |
| 164 | return live_words_in_range_use_cache(cm, beg_addr, end_obj); |
| 165 | } |
| 166 | size_t ret = live_words_in_range_helper(beg_addr, end_obj); |
| 167 | update_live_words_in_range_cache(cm, beg_addr, end_obj, ret); |
| 168 | return ret; |
| 169 | } |
| 170 | |
| 171 | ParMarkBitMap::IterationStatus |
| 172 | ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure, |
| 173 | idx_t range_beg, idx_t range_end) const |
| 174 | { |
| 175 | DEBUG_ONLY(verify_bit(range_beg);) |
| 176 | DEBUG_ONLY(verify_bit(range_end);) |
| 177 | assert(range_beg <= range_end, "live range invalid" ); |
| 178 | |
| 179 | // The bitmap routines require the right boundary to be word-aligned. |
| 180 | const idx_t search_end = BitMap::word_align_up(range_end); |
| 181 | |
| 182 | idx_t cur_beg = find_obj_beg(range_beg, search_end); |
| 183 | while (cur_beg < range_end) { |
| 184 | const idx_t cur_end = find_obj_end(cur_beg, search_end); |
| 185 | if (cur_end >= range_end) { |
| 186 | // The obj ends outside the range. |
| 187 | live_closure->set_source(bit_to_addr(cur_beg)); |
| 188 | return incomplete; |
| 189 | } |
| 190 | |
| 191 | const size_t size = obj_size(cur_beg, cur_end); |
| 192 | IterationStatus status = live_closure->do_addr(bit_to_addr(cur_beg), size); |
| 193 | if (status != incomplete) { |
| 194 | assert(status == would_overflow || status == full, "sanity" ); |
| 195 | return status; |
| 196 | } |
| 197 | |
| 198 | // Successfully processed the object; look for the next object. |
| 199 | cur_beg = find_obj_beg(cur_end + 1, search_end); |
| 200 | } |
| 201 | |
| 202 | live_closure->set_source(bit_to_addr(range_end)); |
| 203 | return complete; |
| 204 | } |
| 205 | |
| 206 | ParMarkBitMap::IterationStatus |
| 207 | ParMarkBitMap::iterate(ParMarkBitMapClosure* live_closure, |
| 208 | ParMarkBitMapClosure* dead_closure, |
| 209 | idx_t range_beg, idx_t range_end, |
| 210 | idx_t dead_range_end) const |
| 211 | { |
| 212 | DEBUG_ONLY(verify_bit(range_beg);) |
| 213 | DEBUG_ONLY(verify_bit(range_end);) |
| 214 | DEBUG_ONLY(verify_bit(dead_range_end);) |
| 215 | assert(range_beg <= range_end, "live range invalid" ); |
| 216 | assert(range_end <= dead_range_end, "dead range invalid" ); |
| 217 | |
| 218 | // The bitmap routines require the right boundary to be word-aligned. |
| 219 | const idx_t live_search_end = BitMap::word_align_up(range_end); |
| 220 | const idx_t dead_search_end = BitMap::word_align_up(dead_range_end); |
| 221 | |
| 222 | idx_t cur_beg = range_beg; |
| 223 | if (range_beg < range_end && is_unmarked(range_beg)) { |
| 224 | // The range starts with dead space. Look for the next object, then fill. |
| 225 | cur_beg = find_obj_beg(range_beg + 1, dead_search_end); |
| 226 | const idx_t dead_space_end = MIN2(cur_beg - 1, dead_range_end - 1); |
| 227 | const size_t size = obj_size(range_beg, dead_space_end); |
| 228 | dead_closure->do_addr(bit_to_addr(range_beg), size); |
| 229 | } |
| 230 | |
| 231 | while (cur_beg < range_end) { |
| 232 | const idx_t cur_end = find_obj_end(cur_beg, live_search_end); |
| 233 | if (cur_end >= range_end) { |
| 234 | // The obj ends outside the range. |
| 235 | live_closure->set_source(bit_to_addr(cur_beg)); |
| 236 | return incomplete; |
| 237 | } |
| 238 | |
| 239 | const size_t size = obj_size(cur_beg, cur_end); |
| 240 | IterationStatus status = live_closure->do_addr(bit_to_addr(cur_beg), size); |
| 241 | if (status != incomplete) { |
| 242 | assert(status == would_overflow || status == full, "sanity" ); |
| 243 | return status; |
| 244 | } |
| 245 | |
| 246 | // Look for the start of the next object. |
| 247 | const idx_t dead_space_beg = cur_end + 1; |
| 248 | cur_beg = find_obj_beg(dead_space_beg, dead_search_end); |
| 249 | if (cur_beg > dead_space_beg) { |
| 250 | // Found dead space; compute the size and invoke the dead closure. |
| 251 | const idx_t dead_space_end = MIN2(cur_beg - 1, dead_range_end - 1); |
| 252 | const size_t size = obj_size(dead_space_beg, dead_space_end); |
| 253 | dead_closure->do_addr(bit_to_addr(dead_space_beg), size); |
| 254 | } |
| 255 | } |
| 256 | |
| 257 | live_closure->set_source(bit_to_addr(range_end)); |
| 258 | return complete; |
| 259 | } |
| 260 | |
| 261 | #ifdef ASSERT |
| 262 | void ParMarkBitMap::verify_clear() const |
| 263 | { |
| 264 | const idx_t* const beg = (const idx_t*)_virtual_space->committed_low_addr(); |
| 265 | const idx_t* const end = (const idx_t*)_virtual_space->committed_high_addr(); |
| 266 | for (const idx_t* p = beg; p < end; ++p) { |
| 267 | assert(*p == 0, "bitmap not clear" ); |
| 268 | } |
| 269 | } |
| 270 | #endif // #ifdef ASSERT |
| 271 | |