| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "aot/aotLoader.hpp" |
| 27 | #include "code/codeBlob.hpp" |
| 28 | #include "code/codeCache.hpp" |
| 29 | #include "code/codeHeapState.hpp" |
| 30 | #include "code/compiledIC.hpp" |
| 31 | #include "code/dependencies.hpp" |
| 32 | #include "code/dependencyContext.hpp" |
| 33 | #include "code/icBuffer.hpp" |
| 34 | #include "code/nmethod.hpp" |
| 35 | #include "code/pcDesc.hpp" |
| 36 | #include "compiler/compileBroker.hpp" |
| 37 | #include "jfr/jfrEvents.hpp" |
| 38 | #include "logging/log.hpp" |
| 39 | #include "logging/logStream.hpp" |
| 40 | #include "memory/allocation.inline.hpp" |
| 41 | #include "memory/iterator.hpp" |
| 42 | #include "memory/resourceArea.hpp" |
| 43 | #include "memory/universe.hpp" |
| 44 | #include "oops/method.inline.hpp" |
| 45 | #include "oops/objArrayOop.hpp" |
| 46 | #include "oops/oop.inline.hpp" |
| 47 | #include "oops/verifyOopClosure.hpp" |
| 48 | #include "runtime/arguments.hpp" |
| 49 | #include "runtime/compilationPolicy.hpp" |
| 50 | #include "runtime/deoptimization.hpp" |
| 51 | #include "runtime/handles.inline.hpp" |
| 52 | #include "runtime/icache.hpp" |
| 53 | #include "runtime/java.hpp" |
| 54 | #include "runtime/mutexLocker.hpp" |
| 55 | #include "runtime/safepointVerifiers.hpp" |
| 56 | #include "runtime/sweeper.hpp" |
| 57 | #include "runtime/vmThread.hpp" |
| 58 | #include "services/memoryService.hpp" |
| 59 | #include "utilities/align.hpp" |
| 60 | #include "utilities/vmError.hpp" |
| 61 | #include "utilities/xmlstream.hpp" |
| 62 | #ifdef COMPILER1 |
| 63 | #include "c1/c1_Compilation.hpp" |
| 64 | #include "c1/c1_Compiler.hpp" |
| 65 | #endif |
| 66 | #ifdef COMPILER2 |
| 67 | #include "opto/c2compiler.hpp" |
| 68 | #include "opto/compile.hpp" |
| 69 | #include "opto/node.hpp" |
| 70 | #endif |
| 71 | |
| 72 | // Helper class for printing in CodeCache |
| 73 | class CodeBlob_sizes { |
| 74 | private: |
| 75 | int count; |
| 76 | int total_size; |
| 77 | int ; |
| 78 | int code_size; |
| 79 | int stub_size; |
| 80 | int relocation_size; |
| 81 | int scopes_oop_size; |
| 82 | int scopes_metadata_size; |
| 83 | int scopes_data_size; |
| 84 | int scopes_pcs_size; |
| 85 | |
| 86 | public: |
| 87 | CodeBlob_sizes() { |
| 88 | count = 0; |
| 89 | total_size = 0; |
| 90 | header_size = 0; |
| 91 | code_size = 0; |
| 92 | stub_size = 0; |
| 93 | relocation_size = 0; |
| 94 | scopes_oop_size = 0; |
| 95 | scopes_metadata_size = 0; |
| 96 | scopes_data_size = 0; |
| 97 | scopes_pcs_size = 0; |
| 98 | } |
| 99 | |
| 100 | int total() { return total_size; } |
| 101 | bool is_empty() { return count == 0; } |
| 102 | |
| 103 | void print(const char* title) { |
| 104 | tty->print_cr(" #%d %s = %dK (hdr %d%%, loc %d%%, code %d%%, stub %d%%, [oops %d%%, metadata %d%%, data %d%%, pcs %d%%])" , |
| 105 | count, |
| 106 | title, |
| 107 | (int)(total() / K), |
| 108 | header_size * 100 / total_size, |
| 109 | relocation_size * 100 / total_size, |
| 110 | code_size * 100 / total_size, |
| 111 | stub_size * 100 / total_size, |
| 112 | scopes_oop_size * 100 / total_size, |
| 113 | scopes_metadata_size * 100 / total_size, |
| 114 | scopes_data_size * 100 / total_size, |
| 115 | scopes_pcs_size * 100 / total_size); |
| 116 | } |
| 117 | |
| 118 | void add(CodeBlob* cb) { |
| 119 | count++; |
| 120 | total_size += cb->size(); |
| 121 | header_size += cb->header_size(); |
| 122 | relocation_size += cb->relocation_size(); |
| 123 | if (cb->is_nmethod()) { |
| 124 | nmethod* nm = cb->as_nmethod_or_null(); |
| 125 | code_size += nm->insts_size(); |
| 126 | stub_size += nm->stub_size(); |
| 127 | |
| 128 | scopes_oop_size += nm->oops_size(); |
| 129 | scopes_metadata_size += nm->metadata_size(); |
| 130 | scopes_data_size += nm->scopes_data_size(); |
| 131 | scopes_pcs_size += nm->scopes_pcs_size(); |
| 132 | } else { |
| 133 | code_size += cb->code_size(); |
| 134 | } |
| 135 | } |
| 136 | }; |
| 137 | |
| 138 | // Iterate over all CodeHeaps |
| 139 | #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) |
| 140 | #define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) |
| 141 | #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) |
| 142 | |
| 143 | // Iterate over all CodeBlobs (cb) on the given CodeHeap |
| 144 | #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) |
| 145 | |
| 146 | address CodeCache::_low_bound = 0; |
| 147 | address CodeCache::_high_bound = 0; |
| 148 | int CodeCache::_number_of_nmethods_with_dependencies = 0; |
| 149 | ExceptionCache* volatile CodeCache::_exception_cache_purge_list = NULL; |
| 150 | |
| 151 | // Initialize arrays of CodeHeap subsets |
| 152 | GrowableArray<CodeHeap*>* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); |
| 153 | GrowableArray<CodeHeap*>* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); |
| 154 | GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); |
| 155 | GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<CodeHeap*> (CodeBlobType::All, true); |
| 156 | |
| 157 | void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { |
| 158 | size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; |
| 159 | // Prepare error message |
| 160 | const char* error = "Invalid code heap sizes" ; |
| 161 | err_msg message("NonNMethodCodeHeapSize (" SIZE_FORMAT "K) + ProfiledCodeHeapSize (" SIZE_FORMAT "K)" |
| 162 | " + NonProfiledCodeHeapSize (" SIZE_FORMAT "K) = " SIZE_FORMAT "K" , |
| 163 | non_nmethod_size/K, profiled_size/K, non_profiled_size/K, total_size/K); |
| 164 | |
| 165 | if (total_size > cache_size) { |
| 166 | // Some code heap sizes were explicitly set: total_size must be <= cache_size |
| 167 | message.append(" is greater than ReservedCodeCacheSize (" SIZE_FORMAT "K)." , cache_size/K); |
| 168 | vm_exit_during_initialization(error, message); |
| 169 | } else if (all_set && total_size != cache_size) { |
| 170 | // All code heap sizes were explicitly set: total_size must equal cache_size |
| 171 | message.append(" is not equal to ReservedCodeCacheSize (" SIZE_FORMAT "K)." , cache_size/K); |
| 172 | vm_exit_during_initialization(error, message); |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | void CodeCache::initialize_heaps() { |
| 177 | bool non_nmethod_set = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize); |
| 178 | bool profiled_set = FLAG_IS_CMDLINE(ProfiledCodeHeapSize); |
| 179 | bool non_profiled_set = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize); |
| 180 | size_t min_size = os::vm_page_size(); |
| 181 | size_t cache_size = ReservedCodeCacheSize; |
| 182 | size_t non_nmethod_size = NonNMethodCodeHeapSize; |
| 183 | size_t profiled_size = ProfiledCodeHeapSize; |
| 184 | size_t non_profiled_size = NonProfiledCodeHeapSize; |
| 185 | // Check if total size set via command line flags exceeds the reserved size |
| 186 | check_heap_sizes((non_nmethod_set ? non_nmethod_size : min_size), |
| 187 | (profiled_set ? profiled_size : min_size), |
| 188 | (non_profiled_set ? non_profiled_size : min_size), |
| 189 | cache_size, |
| 190 | non_nmethod_set && profiled_set && non_profiled_set); |
| 191 | |
| 192 | // Determine size of compiler buffers |
| 193 | size_t code_buffers_size = 0; |
| 194 | #ifdef COMPILER1 |
| 195 | // C1 temporary code buffers (see Compiler::init_buffer_blob()) |
| 196 | const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple); |
| 197 | code_buffers_size += c1_count * Compiler::code_buffer_size(); |
| 198 | #endif |
| 199 | #ifdef COMPILER2 |
| 200 | // C2 scratch buffers (see Compile::init_scratch_buffer_blob()) |
| 201 | const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization); |
| 202 | // Initial size of constant table (this may be increased if a compiled method needs more space) |
| 203 | code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size(); |
| 204 | #endif |
| 205 | |
| 206 | // Increase default non_nmethod_size to account for compiler buffers |
| 207 | if (!non_nmethod_set) { |
| 208 | non_nmethod_size += code_buffers_size; |
| 209 | } |
| 210 | // Calculate default CodeHeap sizes if not set by user |
| 211 | if (!non_nmethod_set && !profiled_set && !non_profiled_set) { |
| 212 | // Check if we have enough space for the non-nmethod code heap |
| 213 | if (cache_size > non_nmethod_size) { |
| 214 | // Use the default value for non_nmethod_size and one half of the |
| 215 | // remaining size for non-profiled and one half for profiled methods |
| 216 | size_t remaining_size = cache_size - non_nmethod_size; |
| 217 | profiled_size = remaining_size / 2; |
| 218 | non_profiled_size = remaining_size - profiled_size; |
| 219 | } else { |
| 220 | // Use all space for the non-nmethod heap and set other heaps to minimal size |
| 221 | non_nmethod_size = cache_size - 2 * min_size; |
| 222 | profiled_size = min_size; |
| 223 | non_profiled_size = min_size; |
| 224 | } |
| 225 | } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) { |
| 226 | // The user explicitly set some code heap sizes. Increase or decrease the (default) |
| 227 | // sizes of the other code heaps accordingly. First adapt non-profiled and profiled |
| 228 | // code heap sizes and then only change non-nmethod code heap size if still necessary. |
| 229 | intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size); |
| 230 | if (non_profiled_set) { |
| 231 | if (!profiled_set) { |
| 232 | // Adapt size of profiled code heap |
| 233 | if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) { |
| 234 | // Not enough space available, set to minimum size |
| 235 | diff_size += profiled_size - min_size; |
| 236 | profiled_size = min_size; |
| 237 | } else { |
| 238 | profiled_size += diff_size; |
| 239 | diff_size = 0; |
| 240 | } |
| 241 | } |
| 242 | } else if (profiled_set) { |
| 243 | // Adapt size of non-profiled code heap |
| 244 | if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) { |
| 245 | // Not enough space available, set to minimum size |
| 246 | diff_size += non_profiled_size - min_size; |
| 247 | non_profiled_size = min_size; |
| 248 | } else { |
| 249 | non_profiled_size += diff_size; |
| 250 | diff_size = 0; |
| 251 | } |
| 252 | } else if (non_nmethod_set) { |
| 253 | // Distribute remaining size between profiled and non-profiled code heaps |
| 254 | diff_size = cache_size - non_nmethod_size; |
| 255 | profiled_size = diff_size / 2; |
| 256 | non_profiled_size = diff_size - profiled_size; |
| 257 | diff_size = 0; |
| 258 | } |
| 259 | if (diff_size != 0) { |
| 260 | // Use non-nmethod code heap for remaining space requirements |
| 261 | assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity" ); |
| 262 | non_nmethod_size += diff_size; |
| 263 | } |
| 264 | } |
| 265 | |
| 266 | // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap |
| 267 | if (!heap_available(CodeBlobType::MethodProfiled)) { |
| 268 | non_profiled_size += profiled_size; |
| 269 | profiled_size = 0; |
| 270 | } |
| 271 | // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap |
| 272 | if (!heap_available(CodeBlobType::MethodNonProfiled)) { |
| 273 | non_nmethod_size += non_profiled_size; |
| 274 | non_profiled_size = 0; |
| 275 | } |
| 276 | // Make sure we have enough space for VM internal code |
| 277 | uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); |
| 278 | if (non_nmethod_size < min_code_cache_size) { |
| 279 | vm_exit_during_initialization(err_msg( |
| 280 | "Not enough space in non-nmethod code heap to run VM: " SIZE_FORMAT "K < " SIZE_FORMAT "K" , |
| 281 | non_nmethod_size/K, min_code_cache_size/K)); |
| 282 | } |
| 283 | |
| 284 | // Verify sizes and update flag values |
| 285 | assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes" ); |
| 286 | FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod_size); |
| 287 | FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled_size); |
| 288 | FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled_size); |
| 289 | |
| 290 | // If large page support is enabled, align code heaps according to large |
| 291 | // page size to make sure that code cache is covered by large pages. |
| 292 | const size_t alignment = MAX2(page_size(false, 8), (size_t) os::vm_allocation_granularity()); |
| 293 | non_nmethod_size = align_up(non_nmethod_size, alignment); |
| 294 | profiled_size = align_down(profiled_size, alignment); |
| 295 | |
| 296 | // Reserve one continuous chunk of memory for CodeHeaps and split it into |
| 297 | // parts for the individual heaps. The memory layout looks like this: |
| 298 | // ---------- high ----------- |
| 299 | // Non-profiled nmethods |
| 300 | // Profiled nmethods |
| 301 | // Non-nmethods |
| 302 | // ---------- low ------------ |
| 303 | ReservedCodeSpace rs = reserve_heap_memory(cache_size); |
| 304 | ReservedSpace non_method_space = rs.first_part(non_nmethod_size); |
| 305 | ReservedSpace rest = rs.last_part(non_nmethod_size); |
| 306 | ReservedSpace profiled_space = rest.first_part(profiled_size); |
| 307 | ReservedSpace non_profiled_space = rest.last_part(profiled_size); |
| 308 | |
| 309 | // Non-nmethods (stubs, adapters, ...) |
| 310 | add_heap(non_method_space, "CodeHeap 'non-nmethods'" , CodeBlobType::NonNMethod); |
| 311 | // Tier 2 and tier 3 (profiled) methods |
| 312 | add_heap(profiled_space, "CodeHeap 'profiled nmethods'" , CodeBlobType::MethodProfiled); |
| 313 | // Tier 1 and tier 4 (non-profiled) methods and native methods |
| 314 | add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'" , CodeBlobType::MethodNonProfiled); |
| 315 | } |
| 316 | |
| 317 | size_t CodeCache::page_size(bool aligned, size_t min_pages) { |
| 318 | if (os::can_execute_large_page_memory()) { |
| 319 | if (InitialCodeCacheSize < ReservedCodeCacheSize) { |
| 320 | // Make sure that the page size allows for an incremental commit of the reserved space |
| 321 | min_pages = MAX2(min_pages, (size_t)8); |
| 322 | } |
| 323 | return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) : |
| 324 | os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages); |
| 325 | } else { |
| 326 | return os::vm_page_size(); |
| 327 | } |
| 328 | } |
| 329 | |
| 330 | ReservedCodeSpace CodeCache::reserve_heap_memory(size_t size) { |
| 331 | // Align and reserve space for code cache |
| 332 | const size_t rs_ps = page_size(); |
| 333 | const size_t rs_align = MAX2(rs_ps, (size_t) os::vm_allocation_granularity()); |
| 334 | const size_t rs_size = align_up(size, rs_align); |
| 335 | ReservedCodeSpace rs(rs_size, rs_align, rs_ps > (size_t) os::vm_page_size()); |
| 336 | if (!rs.is_reserved()) { |
| 337 | vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (" SIZE_FORMAT "K)" , |
| 338 | rs_size/K)); |
| 339 | } |
| 340 | |
| 341 | // Initialize bounds |
| 342 | _low_bound = (address)rs.base(); |
| 343 | _high_bound = _low_bound + rs.size(); |
| 344 | return rs; |
| 345 | } |
| 346 | |
| 347 | // Heaps available for allocation |
| 348 | bool CodeCache::heap_available(int code_blob_type) { |
| 349 | if (!SegmentedCodeCache) { |
| 350 | // No segmentation: use a single code heap |
| 351 | return (code_blob_type == CodeBlobType::All); |
| 352 | } else if (Arguments::is_interpreter_only()) { |
| 353 | // Interpreter only: we don't need any method code heaps |
| 354 | return (code_blob_type == CodeBlobType::NonNMethod); |
| 355 | } else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) { |
| 356 | // Tiered compilation: use all code heaps |
| 357 | return (code_blob_type < CodeBlobType::All); |
| 358 | } else { |
| 359 | // No TieredCompilation: we only need the non-nmethod and non-profiled code heap |
| 360 | return (code_blob_type == CodeBlobType::NonNMethod) || |
| 361 | (code_blob_type == CodeBlobType::MethodNonProfiled); |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | const char* CodeCache::get_code_heap_flag_name(int code_blob_type) { |
| 366 | switch(code_blob_type) { |
| 367 | case CodeBlobType::NonNMethod: |
| 368 | return "NonNMethodCodeHeapSize" ; |
| 369 | break; |
| 370 | case CodeBlobType::MethodNonProfiled: |
| 371 | return "NonProfiledCodeHeapSize" ; |
| 372 | break; |
| 373 | case CodeBlobType::MethodProfiled: |
| 374 | return "ProfiledCodeHeapSize" ; |
| 375 | break; |
| 376 | } |
| 377 | ShouldNotReachHere(); |
| 378 | return NULL; |
| 379 | } |
| 380 | |
| 381 | int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { |
| 382 | if (lhs->code_blob_type() == rhs->code_blob_type()) { |
| 383 | return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); |
| 384 | } else { |
| 385 | return lhs->code_blob_type() - rhs->code_blob_type(); |
| 386 | } |
| 387 | } |
| 388 | |
| 389 | void CodeCache::add_heap(CodeHeap* heap) { |
| 390 | assert(!Universe::is_fully_initialized(), "late heap addition?" ); |
| 391 | |
| 392 | _heaps->insert_sorted<code_heap_compare>(heap); |
| 393 | |
| 394 | int type = heap->code_blob_type(); |
| 395 | if (code_blob_type_accepts_compiled(type)) { |
| 396 | _compiled_heaps->insert_sorted<code_heap_compare>(heap); |
| 397 | } |
| 398 | if (code_blob_type_accepts_nmethod(type)) { |
| 399 | _nmethod_heaps->insert_sorted<code_heap_compare>(heap); |
| 400 | } |
| 401 | if (code_blob_type_accepts_allocable(type)) { |
| 402 | _allocable_heaps->insert_sorted<code_heap_compare>(heap); |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { |
| 407 | // Check if heap is needed |
| 408 | if (!heap_available(code_blob_type)) { |
| 409 | return; |
| 410 | } |
| 411 | |
| 412 | // Create CodeHeap |
| 413 | CodeHeap* heap = new CodeHeap(name, code_blob_type); |
| 414 | add_heap(heap); |
| 415 | |
| 416 | // Reserve Space |
| 417 | size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); |
| 418 | size_initial = align_up(size_initial, os::vm_page_size()); |
| 419 | if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { |
| 420 | vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (" SIZE_FORMAT "K)" , |
| 421 | heap->name(), size_initial/K)); |
| 422 | } |
| 423 | |
| 424 | // Register the CodeHeap |
| 425 | MemoryService::add_code_heap_memory_pool(heap, name); |
| 426 | } |
| 427 | |
| 428 | CodeHeap* CodeCache::get_code_heap_containing(void* start) { |
| 429 | FOR_ALL_HEAPS(heap) { |
| 430 | if ((*heap)->contains(start)) { |
| 431 | return *heap; |
| 432 | } |
| 433 | } |
| 434 | return NULL; |
| 435 | } |
| 436 | |
| 437 | CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { |
| 438 | assert(cb != NULL, "CodeBlob is null" ); |
| 439 | FOR_ALL_HEAPS(heap) { |
| 440 | if ((*heap)->contains_blob(cb)) { |
| 441 | return *heap; |
| 442 | } |
| 443 | } |
| 444 | ShouldNotReachHere(); |
| 445 | return NULL; |
| 446 | } |
| 447 | |
| 448 | CodeHeap* CodeCache::get_code_heap(int code_blob_type) { |
| 449 | FOR_ALL_HEAPS(heap) { |
| 450 | if ((*heap)->accepts(code_blob_type)) { |
| 451 | return *heap; |
| 452 | } |
| 453 | } |
| 454 | return NULL; |
| 455 | } |
| 456 | |
| 457 | CodeBlob* CodeCache::first_blob(CodeHeap* heap) { |
| 458 | assert_locked_or_safepoint(CodeCache_lock); |
| 459 | assert(heap != NULL, "heap is null" ); |
| 460 | return (CodeBlob*)heap->first(); |
| 461 | } |
| 462 | |
| 463 | CodeBlob* CodeCache::first_blob(int code_blob_type) { |
| 464 | if (heap_available(code_blob_type)) { |
| 465 | return first_blob(get_code_heap(code_blob_type)); |
| 466 | } else { |
| 467 | return NULL; |
| 468 | } |
| 469 | } |
| 470 | |
| 471 | CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { |
| 472 | assert_locked_or_safepoint(CodeCache_lock); |
| 473 | assert(heap != NULL, "heap is null" ); |
| 474 | return (CodeBlob*)heap->next(cb); |
| 475 | } |
| 476 | |
| 477 | /** |
| 478 | * Do not seize the CodeCache lock here--if the caller has not |
| 479 | * already done so, we are going to lose bigtime, since the code |
| 480 | * cache will contain a garbage CodeBlob until the caller can |
| 481 | * run the constructor for the CodeBlob subclass he is busy |
| 482 | * instantiating. |
| 483 | */ |
| 484 | CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { |
| 485 | // Possibly wakes up the sweeper thread. |
| 486 | NMethodSweeper::notify(code_blob_type); |
| 487 | assert_locked_or_safepoint(CodeCache_lock); |
| 488 | assert(size > 0, "Code cache allocation request must be > 0 but is %d" , size); |
| 489 | if (size <= 0) { |
| 490 | return NULL; |
| 491 | } |
| 492 | CodeBlob* cb = NULL; |
| 493 | |
| 494 | // Get CodeHeap for the given CodeBlobType |
| 495 | CodeHeap* heap = get_code_heap(code_blob_type); |
| 496 | assert(heap != NULL, "heap is null" ); |
| 497 | |
| 498 | while (true) { |
| 499 | cb = (CodeBlob*)heap->allocate(size); |
| 500 | if (cb != NULL) break; |
| 501 | if (!heap->expand_by(CodeCacheExpansionSize)) { |
| 502 | // Save original type for error reporting |
| 503 | if (orig_code_blob_type == CodeBlobType::All) { |
| 504 | orig_code_blob_type = code_blob_type; |
| 505 | } |
| 506 | // Expansion failed |
| 507 | if (SegmentedCodeCache) { |
| 508 | // Fallback solution: Try to store code in another code heap. |
| 509 | // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) |
| 510 | // Note that in the sweeper, we check the reverse_free_ratio of the code heap |
| 511 | // and force stack scanning if less than 10% of the code heap are free. |
| 512 | int type = code_blob_type; |
| 513 | switch (type) { |
| 514 | case CodeBlobType::NonNMethod: |
| 515 | type = CodeBlobType::MethodNonProfiled; |
| 516 | break; |
| 517 | case CodeBlobType::MethodNonProfiled: |
| 518 | type = CodeBlobType::MethodProfiled; |
| 519 | break; |
| 520 | case CodeBlobType::MethodProfiled: |
| 521 | // Avoid loop if we already tried that code heap |
| 522 | if (type == orig_code_blob_type) { |
| 523 | type = CodeBlobType::MethodNonProfiled; |
| 524 | } |
| 525 | break; |
| 526 | } |
| 527 | if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { |
| 528 | if (PrintCodeCacheExtension) { |
| 529 | tty->print_cr("Extension of %s failed. Trying to allocate in %s." , |
| 530 | heap->name(), get_code_heap(type)->name()); |
| 531 | } |
| 532 | return allocate(size, type, orig_code_blob_type); |
| 533 | } |
| 534 | } |
| 535 | MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
| 536 | CompileBroker::handle_full_code_cache(orig_code_blob_type); |
| 537 | return NULL; |
| 538 | } |
| 539 | if (PrintCodeCacheExtension) { |
| 540 | ResourceMark rm; |
| 541 | if (_nmethod_heaps->length() >= 1) { |
| 542 | tty->print("%s" , heap->name()); |
| 543 | } else { |
| 544 | tty->print("CodeCache" ); |
| 545 | } |
| 546 | tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (" SSIZE_FORMAT " bytes)" , |
| 547 | (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), |
| 548 | (address)heap->high() - (address)heap->low_boundary()); |
| 549 | } |
| 550 | } |
| 551 | print_trace("allocation" , cb, size); |
| 552 | return cb; |
| 553 | } |
| 554 | |
| 555 | void CodeCache::free(CodeBlob* cb) { |
| 556 | assert_locked_or_safepoint(CodeCache_lock); |
| 557 | CodeHeap* heap = get_code_heap(cb); |
| 558 | print_trace("free" , cb); |
| 559 | if (cb->is_nmethod()) { |
| 560 | heap->set_nmethod_count(heap->nmethod_count() - 1); |
| 561 | if (((nmethod *)cb)->has_dependencies()) { |
| 562 | _number_of_nmethods_with_dependencies--; |
| 563 | } |
| 564 | } |
| 565 | if (cb->is_adapter_blob()) { |
| 566 | heap->set_adapter_count(heap->adapter_count() - 1); |
| 567 | } |
| 568 | |
| 569 | // Get heap for given CodeBlob and deallocate |
| 570 | get_code_heap(cb)->deallocate(cb); |
| 571 | |
| 572 | assert(heap->blob_count() >= 0, "sanity check" ); |
| 573 | } |
| 574 | |
| 575 | void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { |
| 576 | assert_locked_or_safepoint(CodeCache_lock); |
| 577 | guarantee(cb->is_buffer_blob() && strncmp("Interpreter" , cb->name(), 11) == 0, "Only possible for interpreter!" ); |
| 578 | print_trace("free_unused_tail" , cb); |
| 579 | |
| 580 | // We also have to account for the extra space (i.e. header) used by the CodeBlob |
| 581 | // which provides the memory (see BufferBlob::create() in codeBlob.cpp). |
| 582 | used += CodeBlob::align_code_offset(cb->header_size()); |
| 583 | |
| 584 | // Get heap for given CodeBlob and deallocate its unused tail |
| 585 | get_code_heap(cb)->deallocate_tail(cb, used); |
| 586 | // Adjust the sizes of the CodeBlob |
| 587 | cb->adjust_size(used); |
| 588 | } |
| 589 | |
| 590 | void CodeCache::commit(CodeBlob* cb) { |
| 591 | // this is called by nmethod::nmethod, which must already own CodeCache_lock |
| 592 | assert_locked_or_safepoint(CodeCache_lock); |
| 593 | CodeHeap* heap = get_code_heap(cb); |
| 594 | if (cb->is_nmethod()) { |
| 595 | heap->set_nmethod_count(heap->nmethod_count() + 1); |
| 596 | if (((nmethod *)cb)->has_dependencies()) { |
| 597 | _number_of_nmethods_with_dependencies++; |
| 598 | } |
| 599 | } |
| 600 | if (cb->is_adapter_blob()) { |
| 601 | heap->set_adapter_count(heap->adapter_count() + 1); |
| 602 | } |
| 603 | |
| 604 | // flush the hardware I-cache |
| 605 | ICache::invalidate_range(cb->content_begin(), cb->content_size()); |
| 606 | } |
| 607 | |
| 608 | bool CodeCache::contains(void *p) { |
| 609 | // S390 uses contains() in current_frame(), which is used before |
| 610 | // code cache initialization if NativeMemoryTracking=detail is set. |
| 611 | S390_ONLY(if (_heaps == NULL) return false;) |
| 612 | // It should be ok to call contains without holding a lock. |
| 613 | FOR_ALL_HEAPS(heap) { |
| 614 | if ((*heap)->contains(p)) { |
| 615 | return true; |
| 616 | } |
| 617 | } |
| 618 | return false; |
| 619 | } |
| 620 | |
| 621 | bool CodeCache::contains(nmethod *nm) { |
| 622 | return contains((void *)nm); |
| 623 | } |
| 624 | |
| 625 | // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not |
| 626 | // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain |
| 627 | // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. |
| 628 | CodeBlob* CodeCache::find_blob(void* start) { |
| 629 | CodeBlob* result = find_blob_unsafe(start); |
| 630 | // We could potentially look up non_entrant methods |
| 631 | guarantee(result == NULL || !result->is_zombie() || result->is_locked_by_vm() || VMError::is_error_reported(), "unsafe access to zombie method" ); |
| 632 | return result; |
| 633 | } |
| 634 | |
| 635 | // Lookup that does not fail if you lookup a zombie method (if you call this, be sure to know |
| 636 | // what you are doing) |
| 637 | CodeBlob* CodeCache::find_blob_unsafe(void* start) { |
| 638 | // NMT can walk the stack before code cache is created |
| 639 | if (_heaps != NULL) { |
| 640 | CodeHeap* heap = get_code_heap_containing(start); |
| 641 | if (heap != NULL) { |
| 642 | return heap->find_blob_unsafe(start); |
| 643 | } |
| 644 | } |
| 645 | return NULL; |
| 646 | } |
| 647 | |
| 648 | nmethod* CodeCache::find_nmethod(void* start) { |
| 649 | CodeBlob* cb = find_blob(start); |
| 650 | assert(cb->is_nmethod(), "did not find an nmethod" ); |
| 651 | return (nmethod*)cb; |
| 652 | } |
| 653 | |
| 654 | void CodeCache::blobs_do(void f(CodeBlob* nm)) { |
| 655 | assert_locked_or_safepoint(CodeCache_lock); |
| 656 | FOR_ALL_HEAPS(heap) { |
| 657 | FOR_ALL_BLOBS(cb, *heap) { |
| 658 | f(cb); |
| 659 | } |
| 660 | } |
| 661 | } |
| 662 | |
| 663 | void CodeCache::nmethods_do(void f(nmethod* nm)) { |
| 664 | assert_locked_or_safepoint(CodeCache_lock); |
| 665 | NMethodIterator iter(NMethodIterator::all_blobs); |
| 666 | while(iter.next()) { |
| 667 | f(iter.method()); |
| 668 | } |
| 669 | } |
| 670 | |
| 671 | void CodeCache::metadata_do(MetadataClosure* f) { |
| 672 | assert_locked_or_safepoint(CodeCache_lock); |
| 673 | NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); |
| 674 | while(iter.next()) { |
| 675 | iter.method()->metadata_do(f); |
| 676 | } |
| 677 | AOTLoader::metadata_do(f); |
| 678 | } |
| 679 | |
| 680 | int CodeCache::alignment_unit() { |
| 681 | return (int)_heaps->first()->alignment_unit(); |
| 682 | } |
| 683 | |
| 684 | int CodeCache::alignment_offset() { |
| 685 | return (int)_heaps->first()->alignment_offset(); |
| 686 | } |
| 687 | |
| 688 | // Mark nmethods for unloading if they contain otherwise unreachable oops. |
| 689 | void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) { |
| 690 | assert_locked_or_safepoint(CodeCache_lock); |
| 691 | UnloadingScope scope(is_alive); |
| 692 | CompiledMethodIterator iter(CompiledMethodIterator::only_alive); |
| 693 | while(iter.next()) { |
| 694 | iter.method()->do_unloading(unloading_occurred); |
| 695 | } |
| 696 | } |
| 697 | |
| 698 | void CodeCache::blobs_do(CodeBlobClosure* f) { |
| 699 | assert_locked_or_safepoint(CodeCache_lock); |
| 700 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 701 | FOR_ALL_BLOBS(cb, *heap) { |
| 702 | if (cb->is_alive()) { |
| 703 | f->do_code_blob(cb); |
| 704 | #ifdef ASSERT |
| 705 | if (cb->is_nmethod()) { |
| 706 | Universe::heap()->verify_nmethod((nmethod*)cb); |
| 707 | } |
| 708 | #endif //ASSERT |
| 709 | } |
| 710 | } |
| 711 | } |
| 712 | } |
| 713 | |
| 714 | void CodeCache::verify_clean_inline_caches() { |
| 715 | #ifdef ASSERT |
| 716 | NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); |
| 717 | while(iter.next()) { |
| 718 | nmethod* nm = iter.method(); |
| 719 | assert(!nm->is_unloaded(), "Tautology" ); |
| 720 | nm->verify_clean_inline_caches(); |
| 721 | nm->verify(); |
| 722 | } |
| 723 | #endif |
| 724 | } |
| 725 | |
| 726 | void CodeCache::verify_icholder_relocations() { |
| 727 | #ifdef ASSERT |
| 728 | // make sure that we aren't leaking icholders |
| 729 | int count = 0; |
| 730 | FOR_ALL_HEAPS(heap) { |
| 731 | FOR_ALL_BLOBS(cb, *heap) { |
| 732 | CompiledMethod *nm = cb->as_compiled_method_or_null(); |
| 733 | if (nm != NULL) { |
| 734 | count += nm->verify_icholder_relocations(); |
| 735 | } |
| 736 | } |
| 737 | } |
| 738 | assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == |
| 739 | CompiledICHolder::live_count(), "must agree" ); |
| 740 | #endif |
| 741 | } |
| 742 | |
| 743 | // Defer freeing of concurrently cleaned ExceptionCache entries until |
| 744 | // after a global handshake operation. |
| 745 | void CodeCache::release_exception_cache(ExceptionCache* entry) { |
| 746 | if (SafepointSynchronize::is_at_safepoint()) { |
| 747 | delete entry; |
| 748 | } else { |
| 749 | for (;;) { |
| 750 | ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list); |
| 751 | entry->set_purge_list_next(purge_list_head); |
| 752 | if (Atomic::cmpxchg(entry, &_exception_cache_purge_list, purge_list_head) == purge_list_head) { |
| 753 | break; |
| 754 | } |
| 755 | } |
| 756 | } |
| 757 | } |
| 758 | |
| 759 | // Delete exception caches that have been concurrently unlinked, |
| 760 | // followed by a global handshake operation. |
| 761 | void CodeCache::purge_exception_caches() { |
| 762 | ExceptionCache* curr = _exception_cache_purge_list; |
| 763 | while (curr != NULL) { |
| 764 | ExceptionCache* next = curr->purge_list_next(); |
| 765 | delete curr; |
| 766 | curr = next; |
| 767 | } |
| 768 | _exception_cache_purge_list = NULL; |
| 769 | } |
| 770 | |
| 771 | uint8_t CodeCache::_unloading_cycle = 1; |
| 772 | |
| 773 | void CodeCache::increment_unloading_cycle() { |
| 774 | if (_unloading_cycle == 1) { |
| 775 | _unloading_cycle = 2; |
| 776 | } else { |
| 777 | _unloading_cycle = 1; |
| 778 | } |
| 779 | } |
| 780 | |
| 781 | CodeCache::UnloadingScope::UnloadingScope(BoolObjectClosure* is_alive) |
| 782 | : _is_unloading_behaviour(is_alive) |
| 783 | { |
| 784 | _saved_behaviour = IsUnloadingBehaviour::current(); |
| 785 | IsUnloadingBehaviour::set_current(&_is_unloading_behaviour); |
| 786 | increment_unloading_cycle(); |
| 787 | DependencyContext::cleaning_start(); |
| 788 | } |
| 789 | |
| 790 | CodeCache::UnloadingScope::~UnloadingScope() { |
| 791 | IsUnloadingBehaviour::set_current(_saved_behaviour); |
| 792 | DependencyContext::cleaning_end(); |
| 793 | } |
| 794 | |
| 795 | void CodeCache::verify_oops() { |
| 796 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
| 797 | VerifyOopClosure voc; |
| 798 | NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); |
| 799 | while(iter.next()) { |
| 800 | nmethod* nm = iter.method(); |
| 801 | nm->oops_do(&voc); |
| 802 | nm->verify_oop_relocations(); |
| 803 | } |
| 804 | } |
| 805 | |
| 806 | int CodeCache::blob_count(int code_blob_type) { |
| 807 | CodeHeap* heap = get_code_heap(code_blob_type); |
| 808 | return (heap != NULL) ? heap->blob_count() : 0; |
| 809 | } |
| 810 | |
| 811 | int CodeCache::blob_count() { |
| 812 | int count = 0; |
| 813 | FOR_ALL_HEAPS(heap) { |
| 814 | count += (*heap)->blob_count(); |
| 815 | } |
| 816 | return count; |
| 817 | } |
| 818 | |
| 819 | int CodeCache::nmethod_count(int code_blob_type) { |
| 820 | CodeHeap* heap = get_code_heap(code_blob_type); |
| 821 | return (heap != NULL) ? heap->nmethod_count() : 0; |
| 822 | } |
| 823 | |
| 824 | int CodeCache::nmethod_count() { |
| 825 | int count = 0; |
| 826 | FOR_ALL_NMETHOD_HEAPS(heap) { |
| 827 | count += (*heap)->nmethod_count(); |
| 828 | } |
| 829 | return count; |
| 830 | } |
| 831 | |
| 832 | int CodeCache::adapter_count(int code_blob_type) { |
| 833 | CodeHeap* heap = get_code_heap(code_blob_type); |
| 834 | return (heap != NULL) ? heap->adapter_count() : 0; |
| 835 | } |
| 836 | |
| 837 | int CodeCache::adapter_count() { |
| 838 | int count = 0; |
| 839 | FOR_ALL_HEAPS(heap) { |
| 840 | count += (*heap)->adapter_count(); |
| 841 | } |
| 842 | return count; |
| 843 | } |
| 844 | |
| 845 | address CodeCache::low_bound(int code_blob_type) { |
| 846 | CodeHeap* heap = get_code_heap(code_blob_type); |
| 847 | return (heap != NULL) ? (address)heap->low_boundary() : NULL; |
| 848 | } |
| 849 | |
| 850 | address CodeCache::high_bound(int code_blob_type) { |
| 851 | CodeHeap* heap = get_code_heap(code_blob_type); |
| 852 | return (heap != NULL) ? (address)heap->high_boundary() : NULL; |
| 853 | } |
| 854 | |
| 855 | size_t CodeCache::capacity() { |
| 856 | size_t cap = 0; |
| 857 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 858 | cap += (*heap)->capacity(); |
| 859 | } |
| 860 | return cap; |
| 861 | } |
| 862 | |
| 863 | size_t CodeCache::unallocated_capacity(int code_blob_type) { |
| 864 | CodeHeap* heap = get_code_heap(code_blob_type); |
| 865 | return (heap != NULL) ? heap->unallocated_capacity() : 0; |
| 866 | } |
| 867 | |
| 868 | size_t CodeCache::unallocated_capacity() { |
| 869 | size_t unallocated_cap = 0; |
| 870 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 871 | unallocated_cap += (*heap)->unallocated_capacity(); |
| 872 | } |
| 873 | return unallocated_cap; |
| 874 | } |
| 875 | |
| 876 | size_t CodeCache::max_capacity() { |
| 877 | size_t max_cap = 0; |
| 878 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 879 | max_cap += (*heap)->max_capacity(); |
| 880 | } |
| 881 | return max_cap; |
| 882 | } |
| 883 | |
| 884 | /** |
| 885 | * Returns the reverse free ratio. E.g., if 25% (1/4) of the code heap |
| 886 | * is free, reverse_free_ratio() returns 4. |
| 887 | */ |
| 888 | double CodeCache::reverse_free_ratio(int code_blob_type) { |
| 889 | CodeHeap* heap = get_code_heap(code_blob_type); |
| 890 | if (heap == NULL) { |
| 891 | return 0; |
| 892 | } |
| 893 | |
| 894 | double unallocated_capacity = MAX2((double)heap->unallocated_capacity(), 1.0); // Avoid division by 0; |
| 895 | double max_capacity = (double)heap->max_capacity(); |
| 896 | double result = max_capacity / unallocated_capacity; |
| 897 | assert (max_capacity >= unallocated_capacity, "Must be" ); |
| 898 | assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f" , result); |
| 899 | return result; |
| 900 | } |
| 901 | |
| 902 | size_t CodeCache::bytes_allocated_in_freelists() { |
| 903 | size_t allocated_bytes = 0; |
| 904 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 905 | allocated_bytes += (*heap)->allocated_in_freelist(); |
| 906 | } |
| 907 | return allocated_bytes; |
| 908 | } |
| 909 | |
| 910 | int CodeCache::allocated_segments() { |
| 911 | int number_of_segments = 0; |
| 912 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 913 | number_of_segments += (*heap)->allocated_segments(); |
| 914 | } |
| 915 | return number_of_segments; |
| 916 | } |
| 917 | |
| 918 | size_t CodeCache::freelists_length() { |
| 919 | size_t length = 0; |
| 920 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 921 | length += (*heap)->freelist_length(); |
| 922 | } |
| 923 | return length; |
| 924 | } |
| 925 | |
| 926 | void icache_init(); |
| 927 | |
| 928 | void CodeCache::initialize() { |
| 929 | assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points" ); |
| 930 | #ifdef COMPILER2 |
| 931 | assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops" ); |
| 932 | #endif |
| 933 | assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants" ); |
| 934 | // This was originally just a check of the alignment, causing failure, instead, round |
| 935 | // the code cache to the page size. In particular, Solaris is moving to a larger |
| 936 | // default page size. |
| 937 | CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); |
| 938 | |
| 939 | if (SegmentedCodeCache) { |
| 940 | // Use multiple code heaps |
| 941 | initialize_heaps(); |
| 942 | } else { |
| 943 | // Use a single code heap |
| 944 | FLAG_SET_ERGO(NonNMethodCodeHeapSize, 0); |
| 945 | FLAG_SET_ERGO(ProfiledCodeHeapSize, 0); |
| 946 | FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0); |
| 947 | ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize); |
| 948 | add_heap(rs, "CodeCache" , CodeBlobType::All); |
| 949 | } |
| 950 | |
| 951 | // Initialize ICache flush mechanism |
| 952 | // This service is needed for os::register_code_area |
| 953 | icache_init(); |
| 954 | |
| 955 | // Give OS a chance to register generated code area. |
| 956 | // This is used on Windows 64 bit platforms to register |
| 957 | // Structured Exception Handlers for our generated code. |
| 958 | os::register_code_area((char*)low_bound(), (char*)high_bound()); |
| 959 | } |
| 960 | |
| 961 | void codeCache_init() { |
| 962 | CodeCache::initialize(); |
| 963 | // Load AOT libraries and add AOT code heaps. |
| 964 | AOTLoader::initialize(); |
| 965 | } |
| 966 | |
| 967 | //------------------------------------------------------------------------------------------------ |
| 968 | |
| 969 | int CodeCache::number_of_nmethods_with_dependencies() { |
| 970 | return _number_of_nmethods_with_dependencies; |
| 971 | } |
| 972 | |
| 973 | void CodeCache::clear_inline_caches() { |
| 974 | assert_locked_or_safepoint(CodeCache_lock); |
| 975 | CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); |
| 976 | while(iter.next()) { |
| 977 | iter.method()->clear_inline_caches(); |
| 978 | } |
| 979 | } |
| 980 | |
| 981 | void CodeCache::cleanup_inline_caches() { |
| 982 | assert_locked_or_safepoint(CodeCache_lock); |
| 983 | NMethodIterator iter(NMethodIterator::only_alive_and_not_unloading); |
| 984 | while(iter.next()) { |
| 985 | iter.method()->cleanup_inline_caches(/*clean_all=*/true); |
| 986 | } |
| 987 | } |
| 988 | |
| 989 | // Keeps track of time spent for checking dependencies |
| 990 | NOT_PRODUCT(static elapsedTimer dependentCheckTime;) |
| 991 | |
| 992 | int CodeCache::mark_for_deoptimization(KlassDepChange& changes) { |
| 993 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
| 994 | int number_of_marked_CodeBlobs = 0; |
| 995 | |
| 996 | // search the hierarchy looking for nmethods which are affected by the loading of this class |
| 997 | |
| 998 | // then search the interfaces this class implements looking for nmethods |
| 999 | // which might be dependent of the fact that an interface only had one |
| 1000 | // implementor. |
| 1001 | // nmethod::check_all_dependencies works only correctly, if no safepoint |
| 1002 | // can happen |
| 1003 | NoSafepointVerifier nsv; |
| 1004 | for (DepChange::ContextStream str(changes, nsv); str.next(); ) { |
| 1005 | Klass* d = str.klass(); |
| 1006 | number_of_marked_CodeBlobs += InstanceKlass::cast(d)->mark_dependent_nmethods(changes); |
| 1007 | } |
| 1008 | |
| 1009 | #ifndef PRODUCT |
| 1010 | if (VerifyDependencies) { |
| 1011 | // Object pointers are used as unique identifiers for dependency arguments. This |
| 1012 | // is only possible if no safepoint, i.e., GC occurs during the verification code. |
| 1013 | dependentCheckTime.start(); |
| 1014 | nmethod::check_all_dependencies(changes); |
| 1015 | dependentCheckTime.stop(); |
| 1016 | } |
| 1017 | #endif |
| 1018 | |
| 1019 | return number_of_marked_CodeBlobs; |
| 1020 | } |
| 1021 | |
| 1022 | CompiledMethod* CodeCache::find_compiled(void* start) { |
| 1023 | CodeBlob *cb = find_blob(start); |
| 1024 | assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method" ); |
| 1025 | return (CompiledMethod*)cb; |
| 1026 | } |
| 1027 | |
| 1028 | bool CodeCache::is_far_target(address target) { |
| 1029 | #if INCLUDE_AOT |
| 1030 | return NativeCall::is_far_call(_low_bound, target) || |
| 1031 | NativeCall::is_far_call(_high_bound, target); |
| 1032 | #else |
| 1033 | return false; |
| 1034 | #endif |
| 1035 | } |
| 1036 | |
| 1037 | #ifdef INCLUDE_JVMTI |
| 1038 | // RedefineClasses support for unloading nmethods that are dependent on "old" methods. |
| 1039 | // We don't really expect this table to grow very large. If it does, it can become a hashtable. |
| 1040 | static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL; |
| 1041 | |
| 1042 | static void add_to_old_table(CompiledMethod* c) { |
| 1043 | if (old_compiled_method_table == NULL) { |
| 1044 | old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, true); |
| 1045 | } |
| 1046 | old_compiled_method_table->push(c); |
| 1047 | } |
| 1048 | |
| 1049 | static void reset_old_method_table() { |
| 1050 | if (old_compiled_method_table != NULL) { |
| 1051 | delete old_compiled_method_table; |
| 1052 | old_compiled_method_table = NULL; |
| 1053 | } |
| 1054 | } |
| 1055 | |
| 1056 | // Remove this method when zombied or unloaded. |
| 1057 | void CodeCache::unregister_old_nmethod(CompiledMethod* c) { |
| 1058 | assert_lock_strong(CodeCache_lock); |
| 1059 | if (old_compiled_method_table != NULL) { |
| 1060 | int index = old_compiled_method_table->find(c); |
| 1061 | if (index != -1) { |
| 1062 | old_compiled_method_table->delete_at(index); |
| 1063 | } |
| 1064 | } |
| 1065 | } |
| 1066 | |
| 1067 | void CodeCache::old_nmethods_do(MetadataClosure* f) { |
| 1068 | // Walk old method table and mark those on stack. |
| 1069 | int length = 0; |
| 1070 | if (old_compiled_method_table != NULL) { |
| 1071 | length = old_compiled_method_table->length(); |
| 1072 | for (int i = 0; i < length; i++) { |
| 1073 | CompiledMethod* cm = old_compiled_method_table->at(i); |
| 1074 | // Only walk alive nmethods, the dead ones will get removed by the sweeper. |
| 1075 | if (cm->is_alive()) { |
| 1076 | old_compiled_method_table->at(i)->metadata_do(f); |
| 1077 | } |
| 1078 | } |
| 1079 | } |
| 1080 | log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack" , length); |
| 1081 | } |
| 1082 | |
| 1083 | // Just marks the methods in this class as needing deoptimization |
| 1084 | void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { |
| 1085 | assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!" ); |
| 1086 | |
| 1087 | // Mark dependent AOT nmethods, which are only found via the class redefined. |
| 1088 | // TODO: add dependencies to aotCompiledMethod's metadata section so this isn't |
| 1089 | // needed. |
| 1090 | AOTLoader::mark_evol_dependent_methods(dependee); |
| 1091 | } |
| 1092 | |
| 1093 | |
| 1094 | // Walk compiled methods and mark dependent methods for deoptimization. |
| 1095 | int CodeCache::mark_dependents_for_evol_deoptimization() { |
| 1096 | assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!" ); |
| 1097 | // Each redefinition creates a new set of nmethods that have references to "old" Methods |
| 1098 | // So delete old method table and create a new one. |
| 1099 | reset_old_method_table(); |
| 1100 | |
| 1101 | int number_of_marked_CodeBlobs = 0; |
| 1102 | CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); |
| 1103 | while(iter.next()) { |
| 1104 | CompiledMethod* nm = iter.method(); |
| 1105 | // Walk all alive nmethods to check for old Methods. |
| 1106 | // This includes methods whose inline caches point to old methods, so |
| 1107 | // inline cache clearing is unnecessary. |
| 1108 | if (nm->has_evol_metadata()) { |
| 1109 | nm->mark_for_deoptimization(); |
| 1110 | add_to_old_table(nm); |
| 1111 | number_of_marked_CodeBlobs++; |
| 1112 | } |
| 1113 | } |
| 1114 | |
| 1115 | // return total count of nmethods marked for deoptimization, if zero the caller |
| 1116 | // can skip deoptimization |
| 1117 | return number_of_marked_CodeBlobs; |
| 1118 | } |
| 1119 | |
| 1120 | void CodeCache::mark_all_nmethods_for_evol_deoptimization() { |
| 1121 | assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!" ); |
| 1122 | CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); |
| 1123 | while(iter.next()) { |
| 1124 | CompiledMethod* nm = iter.method(); |
| 1125 | if (!nm->method()->is_method_handle_intrinsic()) { |
| 1126 | nm->mark_for_deoptimization(); |
| 1127 | if (nm->has_evol_metadata()) { |
| 1128 | add_to_old_table(nm); |
| 1129 | } |
| 1130 | } |
| 1131 | } |
| 1132 | } |
| 1133 | |
| 1134 | // Flushes compiled methods dependent on redefined classes, that have already been |
| 1135 | // marked for deoptimization. |
| 1136 | void CodeCache::flush_evol_dependents() { |
| 1137 | assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!" ); |
| 1138 | |
| 1139 | // CodeCache can only be updated by a thread_in_VM and they will all be |
| 1140 | // stopped during the safepoint so CodeCache will be safe to update without |
| 1141 | // holding the CodeCache_lock. |
| 1142 | |
| 1143 | // At least one nmethod has been marked for deoptimization |
| 1144 | |
| 1145 | // All this already happens inside a VM_Operation, so we'll do all the work here. |
| 1146 | // Stuff copied from VM_Deoptimize and modified slightly. |
| 1147 | |
| 1148 | // We do not want any GCs to happen while we are in the middle of this VM operation |
| 1149 | ResourceMark rm; |
| 1150 | DeoptimizationMarker dm; |
| 1151 | |
| 1152 | // Deoptimize all activations depending on marked nmethods |
| 1153 | Deoptimization::deoptimize_dependents(); |
| 1154 | |
| 1155 | // Make the dependent methods not entrant |
| 1156 | make_marked_nmethods_not_entrant(); |
| 1157 | } |
| 1158 | #endif // INCLUDE_JVMTI |
| 1159 | |
| 1160 | // Deoptimize all methods |
| 1161 | void CodeCache::mark_all_nmethods_for_deoptimization() { |
| 1162 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
| 1163 | CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); |
| 1164 | while(iter.next()) { |
| 1165 | CompiledMethod* nm = iter.method(); |
| 1166 | if (!nm->method()->is_method_handle_intrinsic()) { |
| 1167 | nm->mark_for_deoptimization(); |
| 1168 | } |
| 1169 | } |
| 1170 | } |
| 1171 | |
| 1172 | int CodeCache::mark_for_deoptimization(Method* dependee) { |
| 1173 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
| 1174 | int number_of_marked_CodeBlobs = 0; |
| 1175 | |
| 1176 | CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); |
| 1177 | while(iter.next()) { |
| 1178 | CompiledMethod* nm = iter.method(); |
| 1179 | if (nm->is_dependent_on_method(dependee)) { |
| 1180 | ResourceMark rm; |
| 1181 | nm->mark_for_deoptimization(); |
| 1182 | number_of_marked_CodeBlobs++; |
| 1183 | } |
| 1184 | } |
| 1185 | |
| 1186 | return number_of_marked_CodeBlobs; |
| 1187 | } |
| 1188 | |
| 1189 | void CodeCache::make_marked_nmethods_not_entrant() { |
| 1190 | assert_locked_or_safepoint(CodeCache_lock); |
| 1191 | CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); |
| 1192 | while(iter.next()) { |
| 1193 | CompiledMethod* nm = iter.method(); |
| 1194 | if (nm->is_marked_for_deoptimization() && !nm->is_not_entrant()) { |
| 1195 | nm->make_not_entrant(); |
| 1196 | } |
| 1197 | } |
| 1198 | } |
| 1199 | |
| 1200 | // Flushes compiled methods dependent on dependee. |
| 1201 | void CodeCache::flush_dependents_on(InstanceKlass* dependee) { |
| 1202 | assert_lock_strong(Compile_lock); |
| 1203 | |
| 1204 | if (number_of_nmethods_with_dependencies() == 0) return; |
| 1205 | |
| 1206 | // CodeCache can only be updated by a thread_in_VM and they will all be |
| 1207 | // stopped during the safepoint so CodeCache will be safe to update without |
| 1208 | // holding the CodeCache_lock. |
| 1209 | |
| 1210 | KlassDepChange changes(dependee); |
| 1211 | |
| 1212 | // Compute the dependent nmethods |
| 1213 | if (mark_for_deoptimization(changes) > 0) { |
| 1214 | // At least one nmethod has been marked for deoptimization |
| 1215 | VM_Deoptimize op; |
| 1216 | VMThread::execute(&op); |
| 1217 | } |
| 1218 | } |
| 1219 | |
| 1220 | // Flushes compiled methods dependent on dependee |
| 1221 | void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { |
| 1222 | // --- Compile_lock is not held. However we are at a safepoint. |
| 1223 | assert_locked_or_safepoint(Compile_lock); |
| 1224 | |
| 1225 | // CodeCache can only be updated by a thread_in_VM and they will all be |
| 1226 | // stopped dring the safepoint so CodeCache will be safe to update without |
| 1227 | // holding the CodeCache_lock. |
| 1228 | |
| 1229 | // Compute the dependent nmethods |
| 1230 | if (mark_for_deoptimization(m_h()) > 0) { |
| 1231 | // At least one nmethod has been marked for deoptimization |
| 1232 | |
| 1233 | // All this already happens inside a VM_Operation, so we'll do all the work here. |
| 1234 | // Stuff copied from VM_Deoptimize and modified slightly. |
| 1235 | |
| 1236 | // We do not want any GCs to happen while we are in the middle of this VM operation |
| 1237 | ResourceMark rm; |
| 1238 | DeoptimizationMarker dm; |
| 1239 | |
| 1240 | // Deoptimize all activations depending on marked nmethods |
| 1241 | Deoptimization::deoptimize_dependents(); |
| 1242 | |
| 1243 | // Make the dependent methods not entrant |
| 1244 | make_marked_nmethods_not_entrant(); |
| 1245 | } |
| 1246 | } |
| 1247 | |
| 1248 | void CodeCache::verify() { |
| 1249 | assert_locked_or_safepoint(CodeCache_lock); |
| 1250 | FOR_ALL_HEAPS(heap) { |
| 1251 | (*heap)->verify(); |
| 1252 | FOR_ALL_BLOBS(cb, *heap) { |
| 1253 | if (cb->is_alive()) { |
| 1254 | cb->verify(); |
| 1255 | } |
| 1256 | } |
| 1257 | } |
| 1258 | } |
| 1259 | |
| 1260 | // A CodeHeap is full. Print out warning and report event. |
| 1261 | PRAGMA_DIAG_PUSH |
| 1262 | PRAGMA_FORMAT_NONLITERAL_IGNORED |
| 1263 | void CodeCache::report_codemem_full(int code_blob_type, bool print) { |
| 1264 | // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event |
| 1265 | CodeHeap* heap = get_code_heap(code_blob_type); |
| 1266 | assert(heap != NULL, "heap is null" ); |
| 1267 | |
| 1268 | if ((heap->full_count() == 0) || print) { |
| 1269 | // Not yet reported for this heap, report |
| 1270 | if (SegmentedCodeCache) { |
| 1271 | ResourceMark rm; |
| 1272 | stringStream msg1_stream, msg2_stream; |
| 1273 | msg1_stream.print("%s is full. Compiler has been disabled." , |
| 1274 | get_code_heap_name(code_blob_type)); |
| 1275 | msg2_stream.print("Try increasing the code heap size using -XX:%s=" , |
| 1276 | get_code_heap_flag_name(code_blob_type)); |
| 1277 | const char *msg1 = msg1_stream.as_string(); |
| 1278 | const char *msg2 = msg2_stream.as_string(); |
| 1279 | |
| 1280 | log_warning(codecache)("%s" , msg1); |
| 1281 | log_warning(codecache)("%s" , msg2); |
| 1282 | warning("%s" , msg1); |
| 1283 | warning("%s" , msg2); |
| 1284 | } else { |
| 1285 | const char *msg1 = "CodeCache is full. Compiler has been disabled." ; |
| 1286 | const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize=" ; |
| 1287 | |
| 1288 | log_warning(codecache)("%s" , msg1); |
| 1289 | log_warning(codecache)("%s" , msg2); |
| 1290 | warning("%s" , msg1); |
| 1291 | warning("%s" , msg2); |
| 1292 | } |
| 1293 | ResourceMark rm; |
| 1294 | stringStream s; |
| 1295 | // Dump code cache into a buffer before locking the tty. |
| 1296 | { |
| 1297 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
| 1298 | print_summary(&s); |
| 1299 | } |
| 1300 | { |
| 1301 | ttyLocker ttyl; |
| 1302 | tty->print("%s" , s.as_string()); |
| 1303 | } |
| 1304 | |
| 1305 | if (heap->full_count() == 0) { |
| 1306 | if (PrintCodeHeapAnalytics) { |
| 1307 | CompileBroker::print_heapinfo(tty, "all" , 4096); // details, may be a lot! |
| 1308 | } |
| 1309 | } |
| 1310 | } |
| 1311 | |
| 1312 | heap->report_full(); |
| 1313 | |
| 1314 | EventCodeCacheFull event; |
| 1315 | if (event.should_commit()) { |
| 1316 | event.set_codeBlobType((u1)code_blob_type); |
| 1317 | event.set_startAddress((u8)heap->low_boundary()); |
| 1318 | event.set_commitedTopAddress((u8)heap->high()); |
| 1319 | event.set_reservedTopAddress((u8)heap->high_boundary()); |
| 1320 | event.set_entryCount(heap->blob_count()); |
| 1321 | event.set_methodCount(heap->nmethod_count()); |
| 1322 | event.set_adaptorCount(heap->adapter_count()); |
| 1323 | event.set_unallocatedCapacity(heap->unallocated_capacity()); |
| 1324 | event.set_fullCount(heap->full_count()); |
| 1325 | event.commit(); |
| 1326 | } |
| 1327 | } |
| 1328 | PRAGMA_DIAG_POP |
| 1329 | |
| 1330 | void CodeCache::print_memory_overhead() { |
| 1331 | size_t wasted_bytes = 0; |
| 1332 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1333 | CodeHeap* curr_heap = *heap; |
| 1334 | for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { |
| 1335 | HeapBlock* heap_block = ((HeapBlock*)cb) - 1; |
| 1336 | wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); |
| 1337 | } |
| 1338 | } |
| 1339 | // Print bytes that are allocated in the freelist |
| 1340 | ttyLocker ttl; |
| 1341 | tty->print_cr("Number of elements in freelist: " SSIZE_FORMAT, freelists_length()); |
| 1342 | tty->print_cr("Allocated in freelist: " SSIZE_FORMAT "kB" , bytes_allocated_in_freelists()/K); |
| 1343 | tty->print_cr("Unused bytes in CodeBlobs: " SSIZE_FORMAT "kB" , (wasted_bytes/K)); |
| 1344 | tty->print_cr("Segment map size: " SSIZE_FORMAT "kB" , allocated_segments()/K); // 1 byte per segment |
| 1345 | } |
| 1346 | |
| 1347 | //------------------------------------------------------------------------------------------------ |
| 1348 | // Non-product version |
| 1349 | |
| 1350 | #ifndef PRODUCT |
| 1351 | |
| 1352 | void CodeCache::print_trace(const char* event, CodeBlob* cb, int size) { |
| 1353 | if (PrintCodeCache2) { // Need to add a new flag |
| 1354 | ResourceMark rm; |
| 1355 | if (size == 0) size = cb->size(); |
| 1356 | tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x" , event, p2i(cb), size); |
| 1357 | } |
| 1358 | } |
| 1359 | |
| 1360 | void CodeCache::print_internals() { |
| 1361 | int nmethodCount = 0; |
| 1362 | int runtimeStubCount = 0; |
| 1363 | int adapterCount = 0; |
| 1364 | int deoptimizationStubCount = 0; |
| 1365 | int uncommonTrapStubCount = 0; |
| 1366 | int bufferBlobCount = 0; |
| 1367 | int total = 0; |
| 1368 | int nmethodAlive = 0; |
| 1369 | int nmethodNotEntrant = 0; |
| 1370 | int nmethodZombie = 0; |
| 1371 | int nmethodUnloaded = 0; |
| 1372 | int nmethodJava = 0; |
| 1373 | int nmethodNative = 0; |
| 1374 | int max_nm_size = 0; |
| 1375 | ResourceMark rm; |
| 1376 | |
| 1377 | int i = 0; |
| 1378 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1379 | if ((_nmethod_heaps->length() >= 1) && Verbose) { |
| 1380 | tty->print_cr("-- %s --" , (*heap)->name()); |
| 1381 | } |
| 1382 | FOR_ALL_BLOBS(cb, *heap) { |
| 1383 | total++; |
| 1384 | if (cb->is_nmethod()) { |
| 1385 | nmethod* nm = (nmethod*)cb; |
| 1386 | |
| 1387 | if (Verbose && nm->method() != NULL) { |
| 1388 | ResourceMark rm; |
| 1389 | char *method_name = nm->method()->name_and_sig_as_C_string(); |
| 1390 | tty->print("%s" , method_name); |
| 1391 | if(nm->is_alive()) { tty->print_cr(" alive" ); } |
| 1392 | if(nm->is_not_entrant()) { tty->print_cr(" not-entrant" ); } |
| 1393 | if(nm->is_zombie()) { tty->print_cr(" zombie" ); } |
| 1394 | } |
| 1395 | |
| 1396 | nmethodCount++; |
| 1397 | |
| 1398 | if(nm->is_alive()) { nmethodAlive++; } |
| 1399 | if(nm->is_not_entrant()) { nmethodNotEntrant++; } |
| 1400 | if(nm->is_zombie()) { nmethodZombie++; } |
| 1401 | if(nm->is_unloaded()) { nmethodUnloaded++; } |
| 1402 | if(nm->method() != NULL && nm->is_native_method()) { nmethodNative++; } |
| 1403 | |
| 1404 | if(nm->method() != NULL && nm->is_java_method()) { |
| 1405 | nmethodJava++; |
| 1406 | max_nm_size = MAX2(max_nm_size, nm->size()); |
| 1407 | } |
| 1408 | } else if (cb->is_runtime_stub()) { |
| 1409 | runtimeStubCount++; |
| 1410 | } else if (cb->is_deoptimization_stub()) { |
| 1411 | deoptimizationStubCount++; |
| 1412 | } else if (cb->is_uncommon_trap_stub()) { |
| 1413 | uncommonTrapStubCount++; |
| 1414 | } else if (cb->is_adapter_blob()) { |
| 1415 | adapterCount++; |
| 1416 | } else if (cb->is_buffer_blob()) { |
| 1417 | bufferBlobCount++; |
| 1418 | } |
| 1419 | } |
| 1420 | } |
| 1421 | |
| 1422 | int bucketSize = 512; |
| 1423 | int bucketLimit = max_nm_size / bucketSize + 1; |
| 1424 | int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); |
| 1425 | memset(buckets, 0, sizeof(int) * bucketLimit); |
| 1426 | |
| 1427 | NMethodIterator iter(NMethodIterator::all_blobs); |
| 1428 | while(iter.next()) { |
| 1429 | nmethod* nm = iter.method(); |
| 1430 | if(nm->method() != NULL && nm->is_java_method()) { |
| 1431 | buckets[nm->size() / bucketSize]++; |
| 1432 | } |
| 1433 | } |
| 1434 | |
| 1435 | tty->print_cr("Code Cache Entries (total of %d)" ,total); |
| 1436 | tty->print_cr("-------------------------------------------------" ); |
| 1437 | tty->print_cr("nmethods: %d" ,nmethodCount); |
| 1438 | tty->print_cr("\talive: %d" ,nmethodAlive); |
| 1439 | tty->print_cr("\tnot_entrant: %d" ,nmethodNotEntrant); |
| 1440 | tty->print_cr("\tzombie: %d" ,nmethodZombie); |
| 1441 | tty->print_cr("\tunloaded: %d" ,nmethodUnloaded); |
| 1442 | tty->print_cr("\tjava: %d" ,nmethodJava); |
| 1443 | tty->print_cr("\tnative: %d" ,nmethodNative); |
| 1444 | tty->print_cr("runtime_stubs: %d" ,runtimeStubCount); |
| 1445 | tty->print_cr("adapters: %d" ,adapterCount); |
| 1446 | tty->print_cr("buffer blobs: %d" ,bufferBlobCount); |
| 1447 | tty->print_cr("deoptimization_stubs: %d" ,deoptimizationStubCount); |
| 1448 | tty->print_cr("uncommon_traps: %d" ,uncommonTrapStubCount); |
| 1449 | tty->print_cr("\nnmethod size distribution (non-zombie java)" ); |
| 1450 | tty->print_cr("-------------------------------------------------" ); |
| 1451 | |
| 1452 | for(int i=0; i<bucketLimit; i++) { |
| 1453 | if(buckets[i] != 0) { |
| 1454 | tty->print("%d - %d bytes" ,i*bucketSize,(i+1)*bucketSize); |
| 1455 | tty->fill_to(40); |
| 1456 | tty->print_cr("%d" ,buckets[i]); |
| 1457 | } |
| 1458 | } |
| 1459 | |
| 1460 | FREE_C_HEAP_ARRAY(int, buckets); |
| 1461 | print_memory_overhead(); |
| 1462 | } |
| 1463 | |
| 1464 | #endif // !PRODUCT |
| 1465 | |
| 1466 | void CodeCache::print() { |
| 1467 | print_summary(tty); |
| 1468 | |
| 1469 | #ifndef PRODUCT |
| 1470 | if (!Verbose) return; |
| 1471 | |
| 1472 | CodeBlob_sizes live; |
| 1473 | CodeBlob_sizes dead; |
| 1474 | |
| 1475 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1476 | FOR_ALL_BLOBS(cb, *heap) { |
| 1477 | if (!cb->is_alive()) { |
| 1478 | dead.add(cb); |
| 1479 | } else { |
| 1480 | live.add(cb); |
| 1481 | } |
| 1482 | } |
| 1483 | } |
| 1484 | |
| 1485 | tty->print_cr("CodeCache:" ); |
| 1486 | tty->print_cr("nmethod dependency checking time %fs" , dependentCheckTime.seconds()); |
| 1487 | |
| 1488 | if (!live.is_empty()) { |
| 1489 | live.print("live" ); |
| 1490 | } |
| 1491 | if (!dead.is_empty()) { |
| 1492 | dead.print("dead" ); |
| 1493 | } |
| 1494 | |
| 1495 | if (WizardMode) { |
| 1496 | // print the oop_map usage |
| 1497 | int code_size = 0; |
| 1498 | int number_of_blobs = 0; |
| 1499 | int number_of_oop_maps = 0; |
| 1500 | int map_size = 0; |
| 1501 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1502 | FOR_ALL_BLOBS(cb, *heap) { |
| 1503 | if (cb->is_alive()) { |
| 1504 | number_of_blobs++; |
| 1505 | code_size += cb->code_size(); |
| 1506 | ImmutableOopMapSet* set = cb->oop_maps(); |
| 1507 | if (set != NULL) { |
| 1508 | number_of_oop_maps += set->count(); |
| 1509 | map_size += set->nr_of_bytes(); |
| 1510 | } |
| 1511 | } |
| 1512 | } |
| 1513 | } |
| 1514 | tty->print_cr("OopMaps" ); |
| 1515 | tty->print_cr(" #blobs = %d" , number_of_blobs); |
| 1516 | tty->print_cr(" code size = %d" , code_size); |
| 1517 | tty->print_cr(" #oop_maps = %d" , number_of_oop_maps); |
| 1518 | tty->print_cr(" map size = %d" , map_size); |
| 1519 | } |
| 1520 | |
| 1521 | #endif // !PRODUCT |
| 1522 | } |
| 1523 | |
| 1524 | void CodeCache::print_summary(outputStream* st, bool detailed) { |
| 1525 | int full_count = 0; |
| 1526 | FOR_ALL_HEAPS(heap_iterator) { |
| 1527 | CodeHeap* heap = (*heap_iterator); |
| 1528 | size_t total = (heap->high_boundary() - heap->low_boundary()); |
| 1529 | if (_heaps->length() >= 1) { |
| 1530 | st->print("%s:" , heap->name()); |
| 1531 | } else { |
| 1532 | st->print("CodeCache:" ); |
| 1533 | } |
| 1534 | st->print_cr(" size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT |
| 1535 | "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb" , |
| 1536 | total/K, (total - heap->unallocated_capacity())/K, |
| 1537 | heap->max_allocated_capacity()/K, heap->unallocated_capacity()/K); |
| 1538 | |
| 1539 | if (detailed) { |
| 1540 | st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]" , |
| 1541 | p2i(heap->low_boundary()), |
| 1542 | p2i(heap->high()), |
| 1543 | p2i(heap->high_boundary())); |
| 1544 | |
| 1545 | full_count += get_codemem_full_count(heap->code_blob_type()); |
| 1546 | } |
| 1547 | } |
| 1548 | |
| 1549 | if (detailed) { |
| 1550 | st->print_cr(" total_blobs=" UINT32_FORMAT " nmethods=" UINT32_FORMAT |
| 1551 | " adapters=" UINT32_FORMAT, |
| 1552 | blob_count(), nmethod_count(), adapter_count()); |
| 1553 | st->print_cr(" compilation: %s" , CompileBroker::should_compile_new_jobs() ? |
| 1554 | "enabled" : Arguments::mode() == Arguments::_int ? |
| 1555 | "disabled (interpreter mode)" : |
| 1556 | "disabled (not enough contiguous free space left)" ); |
| 1557 | st->print_cr(" stopped_count=%d, restarted_count=%d" , |
| 1558 | CompileBroker::get_total_compiler_stopped_count(), |
| 1559 | CompileBroker::get_total_compiler_restarted_count()); |
| 1560 | st->print_cr(" full_count=%d" , full_count); |
| 1561 | } |
| 1562 | } |
| 1563 | |
| 1564 | void CodeCache::print_codelist(outputStream* st) { |
| 1565 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
| 1566 | |
| 1567 | CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); |
| 1568 | while (iter.next()) { |
| 1569 | CompiledMethod* cm = iter.method(); |
| 1570 | ResourceMark rm; |
| 1571 | char* method_name = cm->method()->name_and_sig_as_C_string(); |
| 1572 | st->print_cr("%d %d %d %s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]" , |
| 1573 | cm->compile_id(), cm->comp_level(), cm->get_state(), |
| 1574 | method_name, |
| 1575 | (intptr_t)cm->header_begin(), (intptr_t)cm->code_begin(), (intptr_t)cm->code_end()); |
| 1576 | } |
| 1577 | } |
| 1578 | |
| 1579 | void CodeCache::print_layout(outputStream* st) { |
| 1580 | MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
| 1581 | ResourceMark rm; |
| 1582 | print_summary(st, true); |
| 1583 | } |
| 1584 | |
| 1585 | void CodeCache::log_state(outputStream* st) { |
| 1586 | st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" |
| 1587 | " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'" , |
| 1588 | blob_count(), nmethod_count(), adapter_count(), |
| 1589 | unallocated_capacity()); |
| 1590 | } |
| 1591 | |
| 1592 | //---< BEGIN >--- CodeHeap State Analytics. |
| 1593 | |
| 1594 | void CodeCache::aggregate(outputStream *out, size_t granularity) { |
| 1595 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1596 | CodeHeapState::aggregate(out, (*heap), granularity); |
| 1597 | } |
| 1598 | } |
| 1599 | |
| 1600 | void CodeCache::discard(outputStream *out) { |
| 1601 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1602 | CodeHeapState::discard(out, (*heap)); |
| 1603 | } |
| 1604 | } |
| 1605 | |
| 1606 | void CodeCache::print_usedSpace(outputStream *out) { |
| 1607 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1608 | CodeHeapState::print_usedSpace(out, (*heap)); |
| 1609 | } |
| 1610 | } |
| 1611 | |
| 1612 | void CodeCache::print_freeSpace(outputStream *out) { |
| 1613 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1614 | CodeHeapState::print_freeSpace(out, (*heap)); |
| 1615 | } |
| 1616 | } |
| 1617 | |
| 1618 | void CodeCache::print_count(outputStream *out) { |
| 1619 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1620 | CodeHeapState::print_count(out, (*heap)); |
| 1621 | } |
| 1622 | } |
| 1623 | |
| 1624 | void CodeCache::print_space(outputStream *out) { |
| 1625 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1626 | CodeHeapState::print_space(out, (*heap)); |
| 1627 | } |
| 1628 | } |
| 1629 | |
| 1630 | void CodeCache::print_age(outputStream *out) { |
| 1631 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1632 | CodeHeapState::print_age(out, (*heap)); |
| 1633 | } |
| 1634 | } |
| 1635 | |
| 1636 | void CodeCache::print_names(outputStream *out) { |
| 1637 | FOR_ALL_ALLOCABLE_HEAPS(heap) { |
| 1638 | CodeHeapState::print_names(out, (*heap)); |
| 1639 | } |
| 1640 | } |
| 1641 | //---< END >--- CodeHeap State Analytics. |
| 1642 | |