| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "memory/heap.hpp" |
| 27 | #include "oops/oop.inline.hpp" |
| 28 | #include "runtime/os.hpp" |
| 29 | #include "services/memTracker.hpp" |
| 30 | #include "utilities/align.hpp" |
| 31 | |
| 32 | size_t CodeHeap::() { |
| 33 | return sizeof(HeapBlock); |
| 34 | } |
| 35 | |
| 36 | |
| 37 | // Implementation of Heap |
| 38 | |
| 39 | CodeHeap::CodeHeap(const char* name, const int code_blob_type) |
| 40 | : _code_blob_type(code_blob_type) { |
| 41 | _name = name; |
| 42 | _number_of_committed_segments = 0; |
| 43 | _number_of_reserved_segments = 0; |
| 44 | _segment_size = 0; |
| 45 | _log2_segment_size = 0; |
| 46 | _next_segment = 0; |
| 47 | _freelist = NULL; |
| 48 | _freelist_segments = 0; |
| 49 | _freelist_length = 0; |
| 50 | _max_allocated_capacity = 0; |
| 51 | _blob_count = 0; |
| 52 | _nmethod_count = 0; |
| 53 | _adapter_count = 0; |
| 54 | _full_count = 0; |
| 55 | } |
| 56 | |
| 57 | |
| 58 | // The segmap is marked free for that part of the heap |
| 59 | // which has not been allocated yet (beyond _next_segment). |
| 60 | // "Allocated" space in this context means there exists a |
| 61 | // HeapBlock or a FreeBlock describing this space. |
| 62 | // This method takes segment map indices as range boundaries |
| 63 | void CodeHeap::mark_segmap_as_free(size_t beg, size_t end) { |
| 64 | assert( beg < _number_of_committed_segments, "interval begin out of bounds" ); |
| 65 | assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds" ); |
| 66 | // Don't do unpredictable things in PRODUCT build |
| 67 | if (beg < end) { |
| 68 | // setup _segmap pointers for faster indexing |
| 69 | address p = (address)_segmap.low() + beg; |
| 70 | address q = (address)_segmap.low() + end; |
| 71 | // initialize interval |
| 72 | memset(p, free_sentinel, q-p); |
| 73 | } |
| 74 | } |
| 75 | |
| 76 | // Don't get confused here. |
| 77 | // All existing blocks, no matter if they are used() or free(), |
| 78 | // have their segmap marked as used. This allows to find the |
| 79 | // block header (HeapBlock or FreeBlock) for any pointer |
| 80 | // within the allocated range (upper limit: _next_segment). |
| 81 | // This method takes segment map indices as range boundaries |
| 82 | void CodeHeap::mark_segmap_as_used(size_t beg, size_t end) { |
| 83 | assert( beg < _number_of_committed_segments, "interval begin out of bounds" ); |
| 84 | assert(beg < end && end <= _number_of_committed_segments, "interval end out of bounds" ); |
| 85 | // Don't do unpredictable things in PRODUCT build |
| 86 | if (beg < end) { |
| 87 | // setup _segmap pointers for faster indexing |
| 88 | address p = (address)_segmap.low() + beg; |
| 89 | address q = (address)_segmap.low() + end; |
| 90 | // initialize interval |
| 91 | int i = 0; |
| 92 | while (p < q) { |
| 93 | *p++ = i++; |
| 94 | if (i == free_sentinel) i = 1; |
| 95 | } |
| 96 | } |
| 97 | } |
| 98 | |
| 99 | void CodeHeap::invalidate(size_t beg, size_t end, size_t hdr_size) { |
| 100 | #ifndef PRODUCT |
| 101 | // Fill the given range with some bad value. |
| 102 | // length is expected to be in segment_size units. |
| 103 | // This prevents inadvertent execution of code leftover from previous use. |
| 104 | char* p = low_boundary() + segments_to_size(beg) + hdr_size; |
| 105 | memset(p, badCodeHeapNewVal, segments_to_size(end-beg)-hdr_size); |
| 106 | #endif |
| 107 | } |
| 108 | |
| 109 | void CodeHeap::clear(size_t beg, size_t end) { |
| 110 | mark_segmap_as_free(beg, end); |
| 111 | invalidate(beg, end, 0); |
| 112 | } |
| 113 | |
| 114 | void CodeHeap::clear() { |
| 115 | _next_segment = 0; |
| 116 | clear(_next_segment, _number_of_committed_segments); |
| 117 | } |
| 118 | |
| 119 | |
| 120 | static size_t align_to_page_size(size_t size) { |
| 121 | const size_t alignment = (size_t)os::vm_page_size(); |
| 122 | assert(is_power_of_2(alignment), "no kidding ???" ); |
| 123 | return (size + alignment - 1) & ~(alignment - 1); |
| 124 | } |
| 125 | |
| 126 | |
| 127 | void CodeHeap::on_code_mapping(char* base, size_t size) { |
| 128 | #ifdef LINUX |
| 129 | extern void linux_wrap_code(char* base, size_t size); |
| 130 | linux_wrap_code(base, size); |
| 131 | #endif |
| 132 | } |
| 133 | |
| 134 | |
| 135 | bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_size) { |
| 136 | assert(rs.size() >= committed_size, "reserved < committed" ); |
| 137 | assert(segment_size >= sizeof(FreeBlock), "segment size is too small" ); |
| 138 | assert(is_power_of_2(segment_size), "segment_size must be a power of 2" ); |
| 139 | |
| 140 | _segment_size = segment_size; |
| 141 | _log2_segment_size = exact_log2(segment_size); |
| 142 | |
| 143 | // Reserve and initialize space for _memory. |
| 144 | size_t page_size = os::vm_page_size(); |
| 145 | if (os::can_execute_large_page_memory()) { |
| 146 | const size_t min_pages = 8; |
| 147 | page_size = MIN2(os::page_size_for_region_aligned(committed_size, min_pages), |
| 148 | os::page_size_for_region_aligned(rs.size(), min_pages)); |
| 149 | } |
| 150 | |
| 151 | const size_t granularity = os::vm_allocation_granularity(); |
| 152 | const size_t c_size = align_up(committed_size, page_size); |
| 153 | |
| 154 | os::trace_page_sizes(_name, committed_size, rs.size(), page_size, |
| 155 | rs.base(), rs.size()); |
| 156 | if (!_memory.initialize(rs, c_size)) { |
| 157 | return false; |
| 158 | } |
| 159 | |
| 160 | on_code_mapping(_memory.low(), _memory.committed_size()); |
| 161 | _number_of_committed_segments = size_to_segments(_memory.committed_size()); |
| 162 | _number_of_reserved_segments = size_to_segments(_memory.reserved_size()); |
| 163 | assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking" ); |
| 164 | const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity); |
| 165 | const size_t reserved_segments_size = align_up(_number_of_reserved_segments, reserved_segments_alignment); |
| 166 | const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments); |
| 167 | |
| 168 | // reserve space for _segmap |
| 169 | if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) { |
| 170 | return false; |
| 171 | } |
| 172 | |
| 173 | MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode); |
| 174 | |
| 175 | assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map" ); |
| 176 | assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map" ); |
| 177 | assert(_segmap.reserved_size() >= _segmap.committed_size() , "just checking" ); |
| 178 | |
| 179 | // initialize remaining instance variables, heap memory and segmap |
| 180 | clear(); |
| 181 | return true; |
| 182 | } |
| 183 | |
| 184 | |
| 185 | bool CodeHeap::expand_by(size_t size) { |
| 186 | // expand _memory space |
| 187 | size_t dm = align_to_page_size(_memory.committed_size() + size) - _memory.committed_size(); |
| 188 | if (dm > 0) { |
| 189 | // Use at least the available uncommitted space if 'size' is larger |
| 190 | if (_memory.uncommitted_size() != 0 && dm > _memory.uncommitted_size()) { |
| 191 | dm = _memory.uncommitted_size(); |
| 192 | } |
| 193 | char* base = _memory.low() + _memory.committed_size(); |
| 194 | if (!_memory.expand_by(dm)) return false; |
| 195 | on_code_mapping(base, dm); |
| 196 | size_t i = _number_of_committed_segments; |
| 197 | _number_of_committed_segments = size_to_segments(_memory.committed_size()); |
| 198 | assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change" ); |
| 199 | assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking" ); |
| 200 | // expand _segmap space |
| 201 | size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size(); |
| 202 | if ((ds > 0) && !_segmap.expand_by(ds)) { |
| 203 | return false; |
| 204 | } |
| 205 | assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "just checking" ); |
| 206 | // initialize additional space (heap memory and segmap) |
| 207 | clear(i, _number_of_committed_segments); |
| 208 | } |
| 209 | return true; |
| 210 | } |
| 211 | |
| 212 | |
| 213 | void* CodeHeap::allocate(size_t instance_size) { |
| 214 | size_t number_of_segments = size_to_segments(instance_size + header_size()); |
| 215 | assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList" ); |
| 216 | |
| 217 | // First check if we can satisfy request from freelist |
| 218 | NOT_PRODUCT(verify()); |
| 219 | HeapBlock* block = search_freelist(number_of_segments); |
| 220 | NOT_PRODUCT(verify()); |
| 221 | |
| 222 | if (block != NULL) { |
| 223 | assert(!block->free(), "must be marked free" ); |
| 224 | guarantee((char*) block >= _memory.low_boundary() && (char*) block < _memory.high(), |
| 225 | "The newly allocated block " INTPTR_FORMAT " is not within the heap " |
| 226 | "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, |
| 227 | p2i(block), p2i(_memory.low_boundary()), p2i(_memory.high())); |
| 228 | // Invalidate the additional space that FreeBlock occupies. The rest of the block should already be invalidated. |
| 229 | // This is necessary due to a dubious assert in nmethod.cpp(PcDescCache::reset_to()). |
| 230 | DEBUG_ONLY(memset((void*)block->allocated_space(), badCodeHeapNewVal, sizeof(FreeBlock) - sizeof(HeapBlock))); |
| 231 | _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); |
| 232 | _blob_count++; |
| 233 | return block->allocated_space(); |
| 234 | } |
| 235 | |
| 236 | // Ensure minimum size for allocation to the heap. |
| 237 | number_of_segments = MAX2((int)CodeCacheMinBlockLength, (int)number_of_segments); |
| 238 | |
| 239 | if (_next_segment + number_of_segments <= _number_of_committed_segments) { |
| 240 | mark_segmap_as_used(_next_segment, _next_segment + number_of_segments); |
| 241 | HeapBlock* b = block_at(_next_segment); |
| 242 | b->initialize(number_of_segments); |
| 243 | _next_segment += number_of_segments; |
| 244 | guarantee((char*) b >= _memory.low_boundary() && (char*) block < _memory.high(), |
| 245 | "The newly allocated block " INTPTR_FORMAT " is not within the heap " |
| 246 | "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, |
| 247 | p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high())); |
| 248 | _max_allocated_capacity = MAX2(_max_allocated_capacity, allocated_capacity()); |
| 249 | _blob_count++; |
| 250 | return b->allocated_space(); |
| 251 | } else { |
| 252 | return NULL; |
| 253 | } |
| 254 | } |
| 255 | |
| 256 | // Split the given block into two at the given segment. |
| 257 | // This is helpful when a block was allocated too large |
| 258 | // to trim off the unused space at the end (interpreter). |
| 259 | // It also helps with splitting a large free block during allocation. |
| 260 | // Usage state (used or free) must be set by caller since |
| 261 | // we don't know if the resulting blocks will be used or free. |
| 262 | // split_at is the segment number (relative to segment_for(b)) |
| 263 | // where the split happens. The segment with relative |
| 264 | // number split_at is the first segment of the split-off block. |
| 265 | HeapBlock* CodeHeap::split_block(HeapBlock *b, size_t split_at) { |
| 266 | if (b == NULL) return NULL; |
| 267 | // After the split, both blocks must have a size of at least CodeCacheMinBlockLength |
| 268 | assert((split_at >= CodeCacheMinBlockLength) && (split_at + CodeCacheMinBlockLength <= b->length()), |
| 269 | "split position(%d) out of range [0..%d]" , (int)split_at, (int)b->length()); |
| 270 | size_t split_segment = segment_for(b) + split_at; |
| 271 | size_t b_size = b->length(); |
| 272 | size_t newb_size = b_size - split_at; |
| 273 | |
| 274 | HeapBlock* newb = block_at(split_segment); |
| 275 | newb->set_length(newb_size); |
| 276 | mark_segmap_as_used(segment_for(newb), segment_for(newb) + newb_size); |
| 277 | b->set_length(split_at); |
| 278 | return newb; |
| 279 | } |
| 280 | |
| 281 | void CodeHeap::deallocate_tail(void* p, size_t used_size) { |
| 282 | assert(p == find_start(p), "illegal deallocation" ); |
| 283 | // Find start of HeapBlock |
| 284 | HeapBlock* b = (((HeapBlock *)p) - 1); |
| 285 | assert(b->allocated_space() == p, "sanity check" ); |
| 286 | |
| 287 | size_t actual_number_of_segments = b->length(); |
| 288 | size_t used_number_of_segments = size_to_segments(used_size + header_size()); |
| 289 | size_t unused_number_of_segments = actual_number_of_segments - used_number_of_segments; |
| 290 | guarantee(used_number_of_segments <= actual_number_of_segments, "Must be!" ); |
| 291 | |
| 292 | HeapBlock* f = split_block(b, used_number_of_segments); |
| 293 | add_to_freelist(f); |
| 294 | NOT_PRODUCT(verify()); |
| 295 | } |
| 296 | |
| 297 | void CodeHeap::deallocate(void* p) { |
| 298 | assert(p == find_start(p), "illegal deallocation" ); |
| 299 | // Find start of HeapBlock |
| 300 | HeapBlock* b = (((HeapBlock *)p) - 1); |
| 301 | assert(b->allocated_space() == p, "sanity check" ); |
| 302 | guarantee((char*) b >= _memory.low_boundary() && (char*) b < _memory.high(), |
| 303 | "The block to be deallocated " INTPTR_FORMAT " is not within the heap " |
| 304 | "starting with " INTPTR_FORMAT " and ending with " INTPTR_FORMAT, |
| 305 | p2i(b), p2i(_memory.low_boundary()), p2i(_memory.high())); |
| 306 | add_to_freelist(b); |
| 307 | NOT_PRODUCT(verify()); |
| 308 | } |
| 309 | |
| 310 | /** |
| 311 | * Uses segment map to find the the start (header) of a nmethod. This works as follows: |
| 312 | * The memory of the code cache is divided into 'segments'. The size of a segment is |
| 313 | * determined by -XX:CodeCacheSegmentSize=XX. Allocation in the code cache can only |
| 314 | * happen at segment boundaries. A pointer in the code cache can be mapped to a segment |
| 315 | * by calling segment_for(addr). Each time memory is requested from the code cache, |
| 316 | * the segmap is updated accordingly. See the following example, which illustrates the |
| 317 | * state of code cache and the segment map: (seg -> segment, nm ->nmethod) |
| 318 | * |
| 319 | * code cache segmap |
| 320 | * ----------- --------- |
| 321 | * seg 1 | nm 1 | -> | 0 | |
| 322 | * seg 2 | nm 1 | -> | 1 | |
| 323 | * ... | nm 1 | -> | .. | |
| 324 | * seg m | nm 2 | -> | 0 | |
| 325 | * seg m+1 | nm 2 | -> | 1 | |
| 326 | * ... | nm 2 | -> | 2 | |
| 327 | * ... | nm 2 | -> | .. | |
| 328 | * ... | nm 2 | -> | 0xFE | |
| 329 | * seg m+n | nm 2 | -> | 1 | |
| 330 | * ... | nm 2 | -> | | |
| 331 | * |
| 332 | * A value of '0' in the segmap indicates that this segment contains the beginning of |
| 333 | * an nmethod. Let's walk through a simple example: If we want to find the start of |
| 334 | * an nmethod that falls into seg 2, we read the value of the segmap[2]. The value |
| 335 | * is an offset that points to the segment that contains the start of the nmethod. |
| 336 | * Another example: If we want to get the start of nm 2, and we happen to get a pointer |
| 337 | * that points to seg m+n, we first read seg[n+m], which returns '1'. So we have to |
| 338 | * do one more read of the segmap[m+n-1] to finally get the segment header. |
| 339 | */ |
| 340 | void* CodeHeap::find_start(void* p) const { |
| 341 | if (!contains(p)) { |
| 342 | return NULL; |
| 343 | } |
| 344 | size_t seg_idx = segment_for(p); |
| 345 | address seg_map = (address)_segmap.low(); |
| 346 | if (is_segment_unused(seg_map[seg_idx])) { |
| 347 | return NULL; |
| 348 | } |
| 349 | while (seg_map[seg_idx] > 0) { |
| 350 | seg_idx -= (int)seg_map[seg_idx]; |
| 351 | } |
| 352 | |
| 353 | HeapBlock* h = block_at(seg_idx); |
| 354 | if (h->free()) { |
| 355 | return NULL; |
| 356 | } |
| 357 | return h->allocated_space(); |
| 358 | } |
| 359 | |
| 360 | CodeBlob* CodeHeap::find_blob_unsafe(void* start) const { |
| 361 | CodeBlob* result = (CodeBlob*)CodeHeap::find_start(start); |
| 362 | if (result != NULL && result->blob_contains((address)start)) { |
| 363 | return result; |
| 364 | } |
| 365 | return NULL; |
| 366 | } |
| 367 | |
| 368 | size_t CodeHeap::alignment_unit() const { |
| 369 | // this will be a power of two |
| 370 | return _segment_size; |
| 371 | } |
| 372 | |
| 373 | |
| 374 | size_t CodeHeap::alignment_offset() const { |
| 375 | // The lowest address in any allocated block will be |
| 376 | // equal to alignment_offset (mod alignment_unit). |
| 377 | return sizeof(HeapBlock) & (_segment_size - 1); |
| 378 | } |
| 379 | |
| 380 | // Returns the current block if available and used. |
| 381 | // If not, it returns the subsequent block (if available), NULL otherwise. |
| 382 | // Free blocks are merged, therefore there is at most one free block |
| 383 | // between two used ones. As a result, the subsequent block (if available) is |
| 384 | // guaranteed to be used. |
| 385 | void* CodeHeap::next_used(HeapBlock* b) const { |
| 386 | if (b != NULL && b->free()) b = next_block(b); |
| 387 | assert(b == NULL || !b->free(), "must be in use or at end of heap" ); |
| 388 | return (b == NULL) ? NULL : b->allocated_space(); |
| 389 | } |
| 390 | |
| 391 | // Returns the first used HeapBlock |
| 392 | HeapBlock* CodeHeap::first_block() const { |
| 393 | if (_next_segment > 0) |
| 394 | return block_at(0); |
| 395 | return NULL; |
| 396 | } |
| 397 | |
| 398 | HeapBlock* CodeHeap::block_start(void* q) const { |
| 399 | HeapBlock* b = (HeapBlock*)find_start(q); |
| 400 | if (b == NULL) return NULL; |
| 401 | return b - 1; |
| 402 | } |
| 403 | |
| 404 | // Returns the next Heap block an offset into one |
| 405 | HeapBlock* CodeHeap::next_block(HeapBlock *b) const { |
| 406 | if (b == NULL) return NULL; |
| 407 | size_t i = segment_for(b) + b->length(); |
| 408 | if (i < _next_segment) |
| 409 | return block_at(i); |
| 410 | return NULL; |
| 411 | } |
| 412 | |
| 413 | |
| 414 | // Returns current capacity |
| 415 | size_t CodeHeap::capacity() const { |
| 416 | return _memory.committed_size(); |
| 417 | } |
| 418 | |
| 419 | size_t CodeHeap::max_capacity() const { |
| 420 | return _memory.reserved_size(); |
| 421 | } |
| 422 | |
| 423 | int CodeHeap::allocated_segments() const { |
| 424 | return (int)_next_segment; |
| 425 | } |
| 426 | |
| 427 | size_t CodeHeap::allocated_capacity() const { |
| 428 | // size of used heap - size on freelist |
| 429 | return segments_to_size(_next_segment - _freelist_segments); |
| 430 | } |
| 431 | |
| 432 | // Returns size of the unallocated heap block |
| 433 | size_t CodeHeap::heap_unallocated_capacity() const { |
| 434 | // Total number of segments - number currently used |
| 435 | return segments_to_size(_number_of_reserved_segments - _next_segment); |
| 436 | } |
| 437 | |
| 438 | // Free list management |
| 439 | |
| 440 | FreeBlock* CodeHeap::following_block(FreeBlock *b) { |
| 441 | return (FreeBlock*)(((address)b) + _segment_size * b->length()); |
| 442 | } |
| 443 | |
| 444 | // Inserts block b after a |
| 445 | void CodeHeap::insert_after(FreeBlock* a, FreeBlock* b) { |
| 446 | assert(a != NULL && b != NULL, "must be real pointers" ); |
| 447 | |
| 448 | // Link b into the list after a |
| 449 | b->set_link(a->link()); |
| 450 | a->set_link(b); |
| 451 | |
| 452 | // See if we can merge blocks |
| 453 | merge_right(b); // Try to make b bigger |
| 454 | merge_right(a); // Try to make a include b |
| 455 | } |
| 456 | |
| 457 | // Try to merge this block with the following block |
| 458 | bool CodeHeap::merge_right(FreeBlock* a) { |
| 459 | assert(a->free(), "must be a free block" ); |
| 460 | if (following_block(a) == a->link()) { |
| 461 | assert(a->link() != NULL && a->link()->free(), "must be free too" ); |
| 462 | // Update block a to include the following block |
| 463 | a->set_length(a->length() + a->link()->length()); |
| 464 | a->set_link(a->link()->link()); |
| 465 | // Update find_start map |
| 466 | size_t beg = segment_for(a); |
| 467 | mark_segmap_as_used(beg, beg + a->length()); |
| 468 | invalidate(beg, beg + a->length(), sizeof(FreeBlock)); |
| 469 | _freelist_length--; |
| 470 | return true; |
| 471 | } |
| 472 | return false; |
| 473 | } |
| 474 | |
| 475 | |
| 476 | void CodeHeap::add_to_freelist(HeapBlock* a) { |
| 477 | FreeBlock* b = (FreeBlock*)a; |
| 478 | size_t bseg = segment_for(b); |
| 479 | _freelist_length++; |
| 480 | |
| 481 | assert(b != _freelist, "cannot be removed twice" ); |
| 482 | |
| 483 | // Mark as free and update free space count |
| 484 | _freelist_segments += b->length(); |
| 485 | b->set_free(); |
| 486 | invalidate(bseg, bseg + b->length(), sizeof(FreeBlock)); |
| 487 | |
| 488 | // First element in list? |
| 489 | if (_freelist == NULL) { |
| 490 | b->set_link(NULL); |
| 491 | _freelist = b; |
| 492 | return; |
| 493 | } |
| 494 | |
| 495 | // Since the freelist is ordered (smaller addresses -> larger addresses) and the |
| 496 | // element we want to insert into the freelist has a smaller address than the first |
| 497 | // element, we can simply add 'b' as the first element and we are done. |
| 498 | if (b < _freelist) { |
| 499 | // Insert first in list |
| 500 | b->set_link(_freelist); |
| 501 | _freelist = b; |
| 502 | merge_right(_freelist); |
| 503 | return; |
| 504 | } |
| 505 | |
| 506 | // Scan for right place to put into list. List |
| 507 | // is sorted by increasing addresses |
| 508 | FreeBlock* prev = _freelist; |
| 509 | FreeBlock* cur = _freelist->link(); |
| 510 | while(cur != NULL && cur < b) { |
| 511 | assert(prev < cur, "Freelist must be ordered" ); |
| 512 | prev = cur; |
| 513 | cur = cur->link(); |
| 514 | } |
| 515 | assert((prev < b) && (cur == NULL || b < cur), "free-list must be ordered" ); |
| 516 | insert_after(prev, b); |
| 517 | } |
| 518 | |
| 519 | /** |
| 520 | * Search freelist for an entry on the list with the best fit. |
| 521 | * @return NULL, if no one was found |
| 522 | */ |
| 523 | HeapBlock* CodeHeap::search_freelist(size_t length) { |
| 524 | FreeBlock* found_block = NULL; |
| 525 | FreeBlock* found_prev = NULL; |
| 526 | size_t found_length = _next_segment; // max it out to begin with |
| 527 | |
| 528 | HeapBlock* res = NULL; |
| 529 | FreeBlock* prev = NULL; |
| 530 | FreeBlock* cur = _freelist; |
| 531 | |
| 532 | length = length < CodeCacheMinBlockLength ? CodeCacheMinBlockLength : length; |
| 533 | |
| 534 | // Search for best-fitting block |
| 535 | while(cur != NULL) { |
| 536 | size_t cur_length = cur->length(); |
| 537 | if (cur_length == length) { |
| 538 | // We have a perfect fit |
| 539 | found_block = cur; |
| 540 | found_prev = prev; |
| 541 | found_length = cur_length; |
| 542 | break; |
| 543 | } else if ((cur_length > length) && (cur_length < found_length)) { |
| 544 | // This is a new, closer fit. Remember block, its previous element, and its length |
| 545 | found_block = cur; |
| 546 | found_prev = prev; |
| 547 | found_length = cur_length; |
| 548 | } |
| 549 | // Next element in list |
| 550 | prev = cur; |
| 551 | cur = cur->link(); |
| 552 | } |
| 553 | |
| 554 | if (found_block == NULL) { |
| 555 | // None found |
| 556 | return NULL; |
| 557 | } |
| 558 | |
| 559 | // Exact (or at least good enough) fit. Remove from list. |
| 560 | // Don't leave anything on the freelist smaller than CodeCacheMinBlockLength. |
| 561 | if (found_length - length < CodeCacheMinBlockLength) { |
| 562 | _freelist_length--; |
| 563 | length = found_length; |
| 564 | if (found_prev == NULL) { |
| 565 | assert(_freelist == found_block, "sanity check" ); |
| 566 | _freelist = _freelist->link(); |
| 567 | } else { |
| 568 | assert((found_prev->link() == found_block), "sanity check" ); |
| 569 | // Unmap element |
| 570 | found_prev->set_link(found_block->link()); |
| 571 | } |
| 572 | res = found_block; |
| 573 | } else { |
| 574 | // Truncate the free block and return the truncated part |
| 575 | // as new HeapBlock. The remaining free block does not |
| 576 | // need to be updated, except for it's length. Truncating |
| 577 | // the segment map does not invalidate the leading part. |
| 578 | res = split_block(found_block, found_length - length); |
| 579 | } |
| 580 | |
| 581 | res->set_used(); |
| 582 | _freelist_segments -= length; |
| 583 | return res; |
| 584 | } |
| 585 | |
| 586 | //---------------------------------------------------------------------------- |
| 587 | // Non-product code |
| 588 | |
| 589 | #ifndef PRODUCT |
| 590 | |
| 591 | void CodeHeap::print() { |
| 592 | tty->print_cr("The Heap" ); |
| 593 | } |
| 594 | |
| 595 | void CodeHeap::verify() { |
| 596 | if (VerifyCodeCache) { |
| 597 | size_t len = 0; |
| 598 | int count = 0; |
| 599 | for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { |
| 600 | len += b->length(); |
| 601 | count++; |
| 602 | // Check if we have merged all free blocks |
| 603 | assert(merge_right(b) == false, "Missed merging opportunity" ); |
| 604 | } |
| 605 | // Verify that freelist contains the right amount of free space |
| 606 | assert(len == _freelist_segments, "wrong freelist" ); |
| 607 | |
| 608 | for(HeapBlock* h = first_block(); h != NULL; h = next_block(h)) { |
| 609 | if (h->free()) count--; |
| 610 | } |
| 611 | // Verify that the freelist contains the same number of blocks |
| 612 | // than free blocks found on the full list. |
| 613 | assert(count == 0, "missing free blocks" ); |
| 614 | |
| 615 | //---< all free block memory must have been invalidated >--- |
| 616 | for(FreeBlock* b = _freelist; b != NULL; b = b->link()) { |
| 617 | for (char* c = (char*)b + sizeof(FreeBlock); c < (char*)b + segments_to_size(b->length()); c++) { |
| 618 | assert(*c == (char)badCodeHeapNewVal, "FreeBlock@" PTR_FORMAT "(" PTR_FORMAT ") not invalidated @byte %d" , p2i(b), b->length(), (int)(c - (char*)b)); |
| 619 | } |
| 620 | } |
| 621 | |
| 622 | // Verify segment map marking. |
| 623 | // All allocated segments, no matter if in a free or used block, |
| 624 | // must be marked "in use". |
| 625 | address seg_map = (address)_segmap.low(); |
| 626 | size_t nseg = 0; |
| 627 | for(HeapBlock* b = first_block(); b != NULL; b = next_block(b)) { |
| 628 | size_t seg1 = segment_for(b); |
| 629 | size_t segn = seg1 + b->length(); |
| 630 | for (size_t i = seg1; i < segn; i++) { |
| 631 | nseg++; |
| 632 | assert(!is_segment_unused(seg_map[i]), "CodeHeap: unused segment. %d [%d..%d], %s block" , (int)i, (int)seg1, (int)segn, b->free()? "free" :"used" ); |
| 633 | } |
| 634 | } |
| 635 | assert(nseg == _next_segment, "CodeHeap: segment count mismatch. found %d, expected %d." , (int)nseg, (int)_next_segment); |
| 636 | |
| 637 | // Verify that the number of free blocks is not out of hand. |
| 638 | static int free_block_threshold = 10000; |
| 639 | if (count > free_block_threshold) { |
| 640 | warning("CodeHeap: # of free blocks > %d" , free_block_threshold); |
| 641 | // Double the warning limit |
| 642 | free_block_threshold *= 2; |
| 643 | } |
| 644 | } |
| 645 | } |
| 646 | |
| 647 | #endif |
| 648 | |