| 1 | /* | 
| 2 |  * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. | 
| 3 |  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
| 4 |  * | 
| 5 |  * This code is free software; you can redistribute it and/or modify it | 
| 6 |  * under the terms of the GNU General Public License version 2 only, as | 
| 7 |  * published by the Free Software Foundation. | 
| 8 |  * | 
| 9 |  * This code is distributed in the hope that it will be useful, but WITHOUT | 
| 10 |  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
| 11 |  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
| 12 |  * version 2 for more details (a copy is included in the LICENSE file that | 
| 13 |  * accompanied this code). | 
| 14 |  * | 
| 15 |  * You should have received a copy of the GNU General Public License version | 
| 16 |  * 2 along with this work; if not, write to the Free Software Foundation, | 
| 17 |  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
| 18 |  * | 
| 19 |  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
| 20 |  * or visit www.oracle.com if you need additional information or have any | 
| 21 |  * questions. | 
| 22 |  * | 
| 23 |  */ | 
| 24 |  | 
| 25 | #include "precompiled.hpp" | 
| 26 | #include "jvm.h" | 
| 27 | #include "classfile/classLoaderDataGraph.hpp" | 
| 28 | #include "classfile/classListParser.hpp" | 
| 29 | #include "classfile/classLoaderExt.hpp" | 
| 30 | #include "classfile/dictionary.hpp" | 
| 31 | #include "classfile/loaderConstraints.hpp" | 
| 32 | #include "classfile/javaClasses.inline.hpp" | 
| 33 | #include "classfile/placeholders.hpp" | 
| 34 | #include "classfile/symbolTable.hpp" | 
| 35 | #include "classfile/stringTable.hpp" | 
| 36 | #include "classfile/systemDictionary.hpp" | 
| 37 | #include "classfile/systemDictionaryShared.hpp" | 
| 38 | #include "code/codeCache.hpp" | 
| 39 | #include "gc/shared/softRefPolicy.hpp" | 
| 40 | #include "interpreter/bytecodeStream.hpp" | 
| 41 | #include "interpreter/bytecodes.hpp" | 
| 42 | #include "logging/log.hpp" | 
| 43 | #include "logging/logMessage.hpp" | 
| 44 | #include "memory/filemap.hpp" | 
| 45 | #include "memory/heapShared.inline.hpp" | 
| 46 | #include "memory/metaspace.hpp" | 
| 47 | #include "memory/metaspaceClosure.hpp" | 
| 48 | #include "memory/metaspaceShared.hpp" | 
| 49 | #include "memory/resourceArea.hpp" | 
| 50 | #include "memory/universe.hpp" | 
| 51 | #include "memory/dynamicArchive.hpp" | 
| 52 | #include "oops/compressedOops.inline.hpp" | 
| 53 | #include "oops/instanceClassLoaderKlass.hpp" | 
| 54 | #include "oops/instanceMirrorKlass.hpp" | 
| 55 | #include "oops/instanceRefKlass.hpp" | 
| 56 | #include "oops/methodData.hpp" | 
| 57 | #include "oops/objArrayKlass.hpp" | 
| 58 | #include "oops/objArrayOop.hpp" | 
| 59 | #include "oops/oop.inline.hpp" | 
| 60 | #include "oops/typeArrayKlass.hpp" | 
| 61 | #include "prims/jvmtiRedefineClasses.hpp" | 
| 62 | #include "runtime/handles.inline.hpp" | 
| 63 | #include "runtime/os.hpp" | 
| 64 | #include "runtime/safepointVerifiers.hpp" | 
| 65 | #include "runtime/signature.hpp" | 
| 66 | #include "runtime/timerTrace.hpp" | 
| 67 | #include "runtime/vmThread.hpp" | 
| 68 | #include "runtime/vmOperations.hpp" | 
| 69 | #include "utilities/align.hpp" | 
| 70 | #include "utilities/bitMap.hpp" | 
| 71 | #include "utilities/defaultStream.hpp" | 
| 72 | #include "utilities/hashtable.inline.hpp" | 
| 73 | #if INCLUDE_G1GC | 
| 74 | #include "gc/g1/g1CollectedHeap.hpp" | 
| 75 | #endif | 
| 76 |  | 
| 77 | ReservedSpace MetaspaceShared::_shared_rs; | 
| 78 | VirtualSpace MetaspaceShared::_shared_vs; | 
| 79 | MetaspaceSharedStats MetaspaceShared::_stats; | 
| 80 | bool MetaspaceShared::_has_error_classes; | 
| 81 | bool MetaspaceShared::_archive_loading_failed = false; | 
| 82 | bool MetaspaceShared::_remapped_readwrite = false; | 
| 83 | address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL; | 
| 84 | size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0; | 
| 85 | size_t MetaspaceShared::_core_spaces_size = 0; | 
| 86 | void* MetaspaceShared::_shared_metaspace_static_top = NULL; | 
| 87 |  | 
| 88 | // The CDS archive is divided into the following regions: | 
| 89 | //     mc  - misc code (the method entry trampolines) | 
| 90 | //     rw  - read-write metadata | 
| 91 | //     ro  - read-only metadata and read-only tables | 
| 92 | //     md  - misc data (the c++ vtables) | 
| 93 | // | 
| 94 | //     ca0 - closed archive heap space #0 | 
| 95 | //     ca1 - closed archive heap space #1 (may be empty) | 
| 96 | //     oa0 - open archive heap space #0 | 
| 97 | //     oa1 - open archive heap space #1 (may be empty) | 
| 98 | // | 
| 99 | // The mc, rw, ro, and md regions are linearly allocated, starting from | 
| 100 | // SharedBaseAddress, in the order of mc->rw->ro->md. The size of these 4 regions | 
| 101 | // are page-aligned, and there's no gap between any consecutive regions. | 
| 102 | // | 
| 103 | // These 4 regions are populated in the following steps: | 
| 104 | // [1] All classes are loaded in MetaspaceShared::preload_classes(). All metadata are | 
| 105 | //     temporarily allocated outside of the shared regions. Only the method entry | 
| 106 | //     trampolines are written into the mc region. | 
| 107 | // [2] ArchiveCompactor copies RW metadata into the rw region. | 
| 108 | // [3] ArchiveCompactor copies RO metadata into the ro region. | 
| 109 | // [4] SymbolTable, StringTable, SystemDictionary, and a few other read-only data | 
| 110 | //     are copied into the ro region as read-only tables. | 
| 111 | // [5] C++ vtables are copied into the md region. | 
| 112 | // | 
| 113 | // The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects. | 
| 114 | // Their layout is independent of the other 4 regions. | 
| 115 |  | 
| 116 | char* DumpRegion::expand_top_to(char* newtop) { | 
| 117 |   assert(is_allocatable(), "must be initialized and not packed" ); | 
| 118 |   assert(newtop >= _top, "must not grow backwards" ); | 
| 119 |   if (newtop > _end) { | 
| 120 |     MetaspaceShared::report_out_of_space(_name, newtop - _top); | 
| 121 |     ShouldNotReachHere(); | 
| 122 |   } | 
| 123 |   uintx delta; | 
| 124 |   if (DynamicDumpSharedSpaces) { | 
| 125 |     delta = DynamicArchive::object_delta_uintx(newtop); | 
| 126 |   } else { | 
| 127 |     delta = MetaspaceShared::object_delta_uintx(newtop); | 
| 128 |   } | 
| 129 |   if (delta > MAX_SHARED_DELTA) { | 
| 130 |     // This is just a sanity check and should not appear in any real world usage. This | 
| 131 |     // happens only if you allocate more than 2GB of shared objects and would require | 
| 132 |     // millions of shared classes. | 
| 133 |     vm_exit_during_initialization("Out of memory in the CDS archive" , | 
| 134 |                                   "Please reduce the number of shared classes." ); | 
| 135 |   } | 
| 136 |  | 
| 137 |   MetaspaceShared::commit_shared_space_to(newtop); | 
| 138 |   _top = newtop; | 
| 139 |   return _top; | 
| 140 | } | 
| 141 |  | 
| 142 | char* DumpRegion::allocate(size_t num_bytes, size_t alignment) { | 
| 143 |   char* p = (char*)align_up(_top, alignment); | 
| 144 |   char* newtop = p + align_up(num_bytes, alignment); | 
| 145 |   expand_top_to(newtop); | 
| 146 |   memset(p, 0, newtop - p); | 
| 147 |   return p; | 
| 148 | } | 
| 149 |  | 
| 150 | void DumpRegion::print(size_t total_bytes) const { | 
| 151 |   tty->print_cr("%-3s space: "  SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of "  SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at "  INTPTR_FORMAT, | 
| 152 |                 _name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()), p2i(_base)); | 
| 153 | } | 
| 154 |  | 
| 155 | void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) { | 
| 156 |   tty->print("[%-8s] "  PTR_FORMAT " - "  PTR_FORMAT " capacity =%9d, allocated =%9d" , | 
| 157 |              _name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base)); | 
| 158 |   if (strcmp(_name, failing_region) == 0) { | 
| 159 |     tty->print_cr(" required = %d" , int(needed_bytes)); | 
| 160 |   } else { | 
| 161 |     tty->cr(); | 
| 162 |   } | 
| 163 | } | 
| 164 |  | 
| 165 | void DumpRegion::pack(DumpRegion* next) { | 
| 166 |   assert(!is_packed(), "sanity" ); | 
| 167 |   _end = (char*)align_up(_top, Metaspace::reserve_alignment()); | 
| 168 |   _is_packed = true; | 
| 169 |   if (next != NULL) { | 
| 170 |     next->_base = next->_top = this->_end; | 
| 171 |     next->_end = MetaspaceShared::shared_rs()->end(); | 
| 172 |   } | 
| 173 | } | 
| 174 |  | 
| 175 | DumpRegion _mc_region("mc" ), _ro_region("ro" ), _rw_region("rw" ), _md_region("md" ); | 
| 176 | size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0; | 
| 177 |  | 
| 178 | void MetaspaceShared::init_shared_dump_space(DumpRegion* first_space, address first_space_bottom) { | 
| 179 |   // Start with 0 committed bytes. The memory will be committed as needed by | 
| 180 |   // MetaspaceShared::commit_shared_space_to(). | 
| 181 |   if (!_shared_vs.initialize(_shared_rs, 0)) { | 
| 182 |     vm_exit_during_initialization("Unable to allocate memory for shared space" ); | 
| 183 |   } | 
| 184 |   first_space->init(&_shared_rs, (char*)first_space_bottom); | 
| 185 | } | 
| 186 |  | 
| 187 | DumpRegion* MetaspaceShared::misc_code_dump_space() { | 
| 188 |   return &_mc_region; | 
| 189 | } | 
| 190 |  | 
| 191 | DumpRegion* MetaspaceShared::read_write_dump_space() { | 
| 192 |   return &_rw_region; | 
| 193 | } | 
| 194 |  | 
| 195 | DumpRegion* MetaspaceShared::read_only_dump_space() { | 
| 196 |   return &_ro_region; | 
| 197 | } | 
| 198 |  | 
| 199 | void MetaspaceShared::pack_dump_space(DumpRegion* current, DumpRegion* next, | 
| 200 |                                       ReservedSpace* rs) { | 
| 201 |   current->pack(next); | 
| 202 | } | 
| 203 |  | 
| 204 | char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) { | 
| 205 |   return _mc_region.allocate(num_bytes); | 
| 206 | } | 
| 207 |  | 
| 208 | char* MetaspaceShared::read_only_space_alloc(size_t num_bytes) { | 
| 209 |   return _ro_region.allocate(num_bytes); | 
| 210 | } | 
| 211 |  | 
| 212 | void MetaspaceShared::initialize_runtime_shared_and_meta_spaces() { | 
| 213 |   assert(UseSharedSpaces, "Must be called when UseSharedSpaces is enabled" ); | 
| 214 |  | 
| 215 |   // If using shared space, open the file that contains the shared space | 
| 216 |   // and map in the memory before initializing the rest of metaspace (so | 
| 217 |   // the addresses don't conflict) | 
| 218 |   FileMapInfo* mapinfo = new FileMapInfo(true); | 
| 219 |  | 
| 220 |   // Open the shared archive file, read and validate the header. If | 
| 221 |   // initialization fails, shared spaces [UseSharedSpaces] are | 
| 222 |   // disabled and the file is closed. | 
| 223 |   // Map in spaces now also | 
| 224 |   if (mapinfo->initialize(true) && map_shared_spaces(mapinfo)) { | 
| 225 |     size_t cds_total = core_spaces_size(); | 
| 226 |     address cds_address = (address)mapinfo->region_addr(0); | 
| 227 |     char* cds_end = (char *)align_up(cds_address + cds_total, | 
| 228 |                                      Metaspace::reserve_alignment()); | 
| 229 |  | 
| 230 |     // Mapping the dynamic archive before allocating the class space | 
| 231 |     cds_end = initialize_dynamic_runtime_shared_spaces((char*)cds_address, cds_end); | 
| 232 |  | 
| 233 | #ifdef _LP64 | 
| 234 |     if (Metaspace::using_class_space()) { | 
| 235 |       // If UseCompressedClassPointers is set then allocate the metaspace area | 
| 236 |       // above the heap and above the CDS area (if it exists). | 
| 237 |       Metaspace::allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address); | 
| 238 |       // map_heap_regions() compares the current narrow oop and klass encodings | 
| 239 |       // with the archived ones, so it must be done after all encodings are determined. | 
| 240 |       mapinfo->map_heap_regions(); | 
| 241 |     } | 
| 242 |     CompressedKlassPointers::set_range(CompressedClassSpaceSize); | 
| 243 | #endif // _LP64 | 
| 244 |   } else { | 
| 245 |     assert(!mapinfo->is_open() && !UseSharedSpaces, | 
| 246 |            "archive file not closed or shared spaces not disabled." ); | 
| 247 |   } | 
| 248 | } | 
| 249 |  | 
| 250 | char* MetaspaceShared::initialize_dynamic_runtime_shared_spaces( | 
| 251 |         char* static_start, char* static_end) { | 
| 252 |   assert(UseSharedSpaces, "must be runtime" ); | 
| 253 |   char* cds_end = static_end; | 
| 254 |   if (!DynamicDumpSharedSpaces) { | 
| 255 |     address dynamic_top = DynamicArchive::map(); | 
| 256 |     if (dynamic_top != NULL) { | 
| 257 |       assert(dynamic_top > (address)static_start, "Unexpected layout" ); | 
| 258 |       MetaspaceObj::expand_shared_metaspace_range(dynamic_top); | 
| 259 |       cds_end = (char *)align_up(dynamic_top, Metaspace::reserve_alignment()); | 
| 260 |     } | 
| 261 |   } | 
| 262 |   return cds_end; | 
| 263 | } | 
| 264 |  | 
| 265 | ReservedSpace* MetaspaceShared::reserve_shared_rs(size_t size, size_t alignment, | 
| 266 |                                                   bool large, char* requested_address) { | 
| 267 |   if (requested_address != NULL) { | 
| 268 |     _shared_rs = ReservedSpace(size, alignment, large, requested_address); | 
| 269 |   } else { | 
| 270 |     _shared_rs = ReservedSpace(size, alignment, large); | 
| 271 |   } | 
| 272 |   return &_shared_rs; | 
| 273 | } | 
| 274 |  | 
| 275 | void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() { | 
| 276 |   assert(DumpSharedSpaces, "should be called for dump time only" ); | 
| 277 |   const size_t reserve_alignment = Metaspace::reserve_alignment(); | 
| 278 |   bool large_pages = false; // No large pages when dumping the CDS archive. | 
| 279 |   char* shared_base = (char*)align_up((char*)SharedBaseAddress, reserve_alignment); | 
| 280 |  | 
| 281 | #ifdef _LP64 | 
| 282 |   // On 64-bit VM, the heap and class space layout will be the same as if | 
| 283 |   // you're running in -Xshare:on mode: | 
| 284 |   // | 
| 285 |   //                              +-- SharedBaseAddress (default = 0x800000000) | 
| 286 |   //                              v | 
| 287 |   // +-..---------+---------+ ... +----+----+----+----+---------------+ | 
| 288 |   // |    Heap    | Archive |     | MC | RW | RO | MD | class space   | | 
| 289 |   // +-..---------+---------+ ... +----+----+----+----+---------------+ | 
| 290 |   // |<--   MaxHeapSize  -->|     |<-- UnscaledClassSpaceMax = 4GB -->| | 
| 291 |   // | 
| 292 |   const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1); | 
| 293 |   const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment); | 
| 294 | #else | 
| 295 |   // We don't support archives larger than 256MB on 32-bit due to limited virtual address space. | 
| 296 |   size_t cds_total = align_down(256*M, reserve_alignment); | 
| 297 | #endif | 
| 298 |  | 
| 299 |   // First try to reserve the space at the specified SharedBaseAddress. | 
| 300 |   //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages, shared_base); | 
| 301 |   reserve_shared_rs(cds_total, reserve_alignment, large_pages, shared_base); | 
| 302 |   if (_shared_rs.is_reserved()) { | 
| 303 |     assert(shared_base == 0 || _shared_rs.base() == shared_base, "should match" ); | 
| 304 |   } else { | 
| 305 |     // Get a mmap region anywhere if the SharedBaseAddress fails. | 
| 306 |     //_shared_rs = ReservedSpace(cds_total, reserve_alignment, large_pages); | 
| 307 |     reserve_shared_rs(cds_total, reserve_alignment, large_pages, NULL); | 
| 308 |   } | 
| 309 |   if (!_shared_rs.is_reserved()) { | 
| 310 |     vm_exit_during_initialization("Unable to reserve memory for shared space" , | 
| 311 |                                   err_msg(SIZE_FORMAT " bytes." , cds_total)); | 
| 312 |   } | 
| 313 |  | 
| 314 | #ifdef _LP64 | 
| 315 |   // During dump time, we allocate 4GB (UnscaledClassSpaceMax) of space and split it up: | 
| 316 |   // + The upper 1 GB is used as the "temporary compressed class space" -- preload_classes() | 
| 317 |   //   will store Klasses into this space. | 
| 318 |   // + The lower 3 GB is used for the archive -- when preload_classes() is done, | 
| 319 |   //   ArchiveCompactor will copy the class metadata into this space, first the RW parts, | 
| 320 |   //   then the RO parts. | 
| 321 |  | 
| 322 |   assert(UseCompressedOops && UseCompressedClassPointers, | 
| 323 |       "UseCompressedOops and UseCompressedClassPointers must be set" ); | 
| 324 |  | 
| 325 |   size_t max_archive_size = align_down(cds_total * 3 / 4, reserve_alignment); | 
| 326 |   ReservedSpace tmp_class_space = _shared_rs.last_part(max_archive_size); | 
| 327 |   CompressedClassSpaceSize = align_down(tmp_class_space.size(), reserve_alignment); | 
| 328 |   _shared_rs = _shared_rs.first_part(max_archive_size); | 
| 329 |  | 
| 330 |   // Set up compress class pointers. | 
| 331 |   CompressedKlassPointers::set_base((address)_shared_rs.base()); | 
| 332 |   // Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent | 
| 333 |   // with AOT. | 
| 334 |   CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes); | 
| 335 |   // Set the range of klass addresses to 4GB. | 
| 336 |   CompressedKlassPointers::set_range(cds_total); | 
| 337 |  | 
| 338 |   Metaspace::initialize_class_space(tmp_class_space); | 
| 339 |   log_info(cds)("narrow_klass_base = "  PTR_FORMAT ", narrow_klass_shift = %d" , | 
| 340 |                 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift()); | 
| 341 |  | 
| 342 |   log_info(cds)("Allocated temporary class space: "  SIZE_FORMAT " bytes at "  PTR_FORMAT, | 
| 343 |                 CompressedClassSpaceSize, p2i(tmp_class_space.base())); | 
| 344 | #endif | 
| 345 |  | 
| 346 |   init_shared_dump_space(&_mc_region); | 
| 347 |   SharedBaseAddress = (size_t)_shared_rs.base(); | 
| 348 |   tty->print_cr("Allocated shared space: "  SIZE_FORMAT " bytes at "  PTR_FORMAT, | 
| 349 |                 _shared_rs.size(), p2i(_shared_rs.base())); | 
| 350 | } | 
| 351 |  | 
| 352 | // Called by universe_post_init() | 
| 353 | void MetaspaceShared::post_initialize(TRAPS) { | 
| 354 |   if (UseSharedSpaces) { | 
| 355 |     int size = FileMapInfo::get_number_of_shared_paths(); | 
| 356 |     if (size > 0) { | 
| 357 |       SystemDictionaryShared::allocate_shared_data_arrays(size, THREAD); | 
| 358 |       if (!DynamicDumpSharedSpaces) { | 
| 359 |         FileMapHeader* ; | 
| 360 |         if (FileMapInfo::dynamic_info() == NULL) { | 
| 361 |           header = FileMapInfo::current_info()->header(); | 
| 362 |         } else { | 
| 363 |           header = FileMapInfo::dynamic_info()->header(); | 
| 364 |         } | 
| 365 |         ClassLoaderExt::init_paths_start_index(header->_app_class_paths_start_index); | 
| 366 |         ClassLoaderExt::init_app_module_paths_start_index(header->_app_module_paths_start_index); | 
| 367 |       } | 
| 368 |     } | 
| 369 |   } | 
| 370 | } | 
| 371 |  | 
| 372 | static GrowableArray<Handle>*  = NULL; | 
| 373 |  | 
| 374 | void MetaspaceShared::(const char* filename, TRAPS) { | 
| 375 |   _extra_interned_strings = new (ResourceObj::C_HEAP, mtInternal)GrowableArray<Handle>(10000, true); | 
| 376 |  | 
| 377 |   HashtableTextDump reader(filename); | 
| 378 |   reader.check_version("VERSION: 1.0" ); | 
| 379 |  | 
| 380 |   while (reader.remain() > 0) { | 
| 381 |     int utf8_length; | 
| 382 |     int prefix_type = reader.scan_prefix(&utf8_length); | 
| 383 |     ResourceMark rm(THREAD); | 
| 384 |     if (utf8_length == 0x7fffffff) { | 
| 385 |       // buf_len will overflown 32-bit value. | 
| 386 |       vm_exit_during_initialization(err_msg("string length too large: %d" , utf8_length)); | 
| 387 |     } | 
| 388 |     int buf_len = utf8_length+1; | 
| 389 |     char* utf8_buffer = NEW_RESOURCE_ARRAY(char, buf_len); | 
| 390 |     reader.get_utf8(utf8_buffer, utf8_length); | 
| 391 |     utf8_buffer[utf8_length] = '\0'; | 
| 392 |  | 
| 393 |     if (prefix_type == HashtableTextDump::SymbolPrefix) { | 
| 394 |       SymbolTable::new_permanent_symbol(utf8_buffer); | 
| 395 |     } else{ | 
| 396 |       assert(prefix_type == HashtableTextDump::StringPrefix, "Sanity" ); | 
| 397 |       oop s = StringTable::intern(utf8_buffer, THREAD); | 
| 398 |  | 
| 399 |       if (HAS_PENDING_EXCEPTION) { | 
| 400 |         log_warning(cds, heap)("[line %d] extra interned string allocation failed; size too large: %d" , | 
| 401 |                                reader.last_line_no(), utf8_length); | 
| 402 |         CLEAR_PENDING_EXCEPTION; | 
| 403 |       } else { | 
| 404 | #if INCLUDE_G1GC | 
| 405 |         if (UseG1GC) { | 
| 406 |           typeArrayOop body = java_lang_String::value(s); | 
| 407 |           const HeapRegion* hr = G1CollectedHeap::heap()->heap_region_containing(body); | 
| 408 |           if (hr->is_humongous()) { | 
| 409 |             // Don't keep it alive, so it will be GC'ed before we dump the strings, in order | 
| 410 |             // to maximize free heap space and minimize fragmentation. | 
| 411 |             log_warning(cds, heap)("[line %d] extra interned string ignored; size too large: %d" , | 
| 412 |                                 reader.last_line_no(), utf8_length); | 
| 413 |             continue; | 
| 414 |           } | 
| 415 |         } | 
| 416 | #endif | 
| 417 |         // Interned strings are GC'ed if there are no references to it, so let's | 
| 418 |         // add a reference to keep this string alive. | 
| 419 |         assert(s != NULL, "must succeed" ); | 
| 420 |         Handle h(THREAD, s); | 
| 421 |         _extra_interned_strings->append(h); | 
| 422 |       } | 
| 423 |     } | 
| 424 |   } | 
| 425 | } | 
| 426 |  | 
| 427 | void MetaspaceShared::commit_shared_space_to(char* newtop) { | 
| 428 |   assert(DumpSharedSpaces || DynamicDumpSharedSpaces, "dump-time only" ); | 
| 429 |   char* base = _shared_rs.base(); | 
| 430 |   size_t need_committed_size = newtop - base; | 
| 431 |   size_t has_committed_size = _shared_vs.committed_size(); | 
| 432 |   if (need_committed_size < has_committed_size) { | 
| 433 |     return; | 
| 434 |   } | 
| 435 |  | 
| 436 |   size_t min_bytes = need_committed_size - has_committed_size; | 
| 437 |   size_t preferred_bytes = 1 * M; | 
| 438 |   size_t uncommitted = _shared_vs.reserved_size() - has_committed_size; | 
| 439 |  | 
| 440 |   size_t commit =MAX2(min_bytes, preferred_bytes); | 
| 441 |   commit = MIN2(commit, uncommitted); | 
| 442 |   assert(commit <= uncommitted, "sanity" ); | 
| 443 |  | 
| 444 |   bool result = _shared_vs.expand_by(commit, false); | 
| 445 |   if (!result) { | 
| 446 |     vm_exit_during_initialization(err_msg("Failed to expand shared space to "  SIZE_FORMAT " bytes" , | 
| 447 |                                           need_committed_size)); | 
| 448 |   } | 
| 449 |  | 
| 450 |   log_info(cds)("Expanding shared spaces by "  SIZE_FORMAT_W(7) " bytes [total "  SIZE_FORMAT_W(9)  " bytes ending at %p]" , | 
| 451 |                 commit, _shared_vs.actual_committed_size(), _shared_vs.high()); | 
| 452 | } | 
| 453 |  | 
| 454 | // Read/write a data stream for restoring/preserving metadata pointers and | 
| 455 | // miscellaneous data from/to the shared archive file. | 
| 456 |  | 
| 457 | void MetaspaceShared::serialize(SerializeClosure* soc) { | 
| 458 |   int tag = 0; | 
| 459 |   soc->do_tag(--tag); | 
| 460 |  | 
| 461 |   // Verify the sizes of various metadata in the system. | 
| 462 |   soc->do_tag(sizeof(Method)); | 
| 463 |   soc->do_tag(sizeof(ConstMethod)); | 
| 464 |   soc->do_tag(arrayOopDesc::base_offset_in_bytes(T_BYTE)); | 
| 465 |   soc->do_tag(sizeof(ConstantPool)); | 
| 466 |   soc->do_tag(sizeof(ConstantPoolCache)); | 
| 467 |   soc->do_tag(objArrayOopDesc::base_offset_in_bytes()); | 
| 468 |   soc->do_tag(typeArrayOopDesc::base_offset_in_bytes(T_BYTE)); | 
| 469 |   soc->do_tag(sizeof(Symbol)); | 
| 470 |  | 
| 471 |   // Dump/restore miscellaneous metadata. | 
| 472 |   Universe::serialize(soc); | 
| 473 |   soc->do_tag(--tag); | 
| 474 |  | 
| 475 |   // Dump/restore references to commonly used names and signatures. | 
| 476 |   vmSymbols::serialize(soc); | 
| 477 |   soc->do_tag(--tag); | 
| 478 |  | 
| 479 |   // Dump/restore the symbol/string/subgraph_info tables | 
| 480 |   SymbolTable::serialize_shared_table_header(soc); | 
| 481 |   StringTable::serialize_shared_table_header(soc); | 
| 482 |   HeapShared::serialize_subgraph_info_table_header(soc); | 
| 483 |   SystemDictionaryShared::serialize_dictionary_headers(soc); | 
| 484 |  | 
| 485 |   JavaClasses::serialize_offsets(soc); | 
| 486 |   InstanceMirrorKlass::serialize_offsets(soc); | 
| 487 |   soc->do_tag(--tag); | 
| 488 |  | 
| 489 |   serialize_cloned_cpp_vtptrs(soc); | 
| 490 |   soc->do_tag(--tag); | 
| 491 |  | 
| 492 |   soc->do_tag(666); | 
| 493 | } | 
| 494 |  | 
| 495 | address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) { | 
| 496 |   if (DumpSharedSpaces) { | 
| 497 |     if (_cds_i2i_entry_code_buffers == NULL) { | 
| 498 |       _cds_i2i_entry_code_buffers = (address)misc_code_space_alloc(total_size); | 
| 499 |       _cds_i2i_entry_code_buffers_size = total_size; | 
| 500 |     } | 
| 501 |   } else if (UseSharedSpaces) { | 
| 502 |     assert(_cds_i2i_entry_code_buffers != NULL, "must already been initialized" ); | 
| 503 |   } else { | 
| 504 |     return NULL; | 
| 505 |   } | 
| 506 |  | 
| 507 |   assert(_cds_i2i_entry_code_buffers_size == total_size, "must not change" ); | 
| 508 |   return _cds_i2i_entry_code_buffers; | 
| 509 | } | 
| 510 |  | 
| 511 | uintx MetaspaceShared::object_delta_uintx(void* obj) { | 
| 512 |   assert(DumpSharedSpaces || DynamicDumpSharedSpaces, | 
| 513 |          "supported only for dumping" ); | 
| 514 |   if (DumpSharedSpaces) { | 
| 515 |     assert(shared_rs()->contains(obj), "must be" ); | 
| 516 |   } else { | 
| 517 |     assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be" ); | 
| 518 |   } | 
| 519 |   address base_address = address(SharedBaseAddress); | 
| 520 |   uintx deltax = address(obj) - base_address; | 
| 521 |   return deltax; | 
| 522 | } | 
| 523 |  | 
| 524 | // Global object for holding classes that have been loaded.  Since this | 
| 525 | // is run at a safepoint just before exit, this is the entire set of classes. | 
| 526 | static GrowableArray<Klass*>* _global_klass_objects; | 
| 527 |  | 
| 528 | GrowableArray<Klass*>* MetaspaceShared::collected_klasses() { | 
| 529 |   return _global_klass_objects; | 
| 530 | } | 
| 531 |  | 
| 532 | static void collect_array_classes(Klass* k) { | 
| 533 |   _global_klass_objects->append_if_missing(k); | 
| 534 |   if (k->is_array_klass()) { | 
| 535 |     // Add in the array classes too | 
| 536 |     ArrayKlass* ak = ArrayKlass::cast(k); | 
| 537 |     Klass* h = ak->higher_dimension(); | 
| 538 |     if (h != NULL) { | 
| 539 |       h->array_klasses_do(collect_array_classes); | 
| 540 |     } | 
| 541 |   } | 
| 542 | } | 
| 543 |  | 
| 544 | class CollectClassesClosure : public KlassClosure { | 
| 545 |   void do_klass(Klass* k) { | 
| 546 |     if (k->is_instance_klass() && | 
| 547 |         SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k))) { | 
| 548 |       // Don't add to the _global_klass_objects | 
| 549 |     } else { | 
| 550 |       _global_klass_objects->append_if_missing(k); | 
| 551 |     } | 
| 552 |     if (k->is_array_klass()) { | 
| 553 |       // Add in the array classes too | 
| 554 |       ArrayKlass* ak = ArrayKlass::cast(k); | 
| 555 |       Klass* h = ak->higher_dimension(); | 
| 556 |       if (h != NULL) { | 
| 557 |         h->array_klasses_do(collect_array_classes); | 
| 558 |       } | 
| 559 |     } | 
| 560 |   } | 
| 561 | }; | 
| 562 |  | 
| 563 | static void remove_unshareable_in_classes() { | 
| 564 |   for (int i = 0; i < _global_klass_objects->length(); i++) { | 
| 565 |     Klass* k = _global_klass_objects->at(i); | 
| 566 |     if (!k->is_objArray_klass()) { | 
| 567 |       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info | 
| 568 |       // on their array classes. | 
| 569 |       assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be" ); | 
| 570 |       k->remove_unshareable_info(); | 
| 571 |     } | 
| 572 |   } | 
| 573 | } | 
| 574 |  | 
| 575 | static void remove_java_mirror_in_classes() { | 
| 576 |   for (int i = 0; i < _global_klass_objects->length(); i++) { | 
| 577 |     Klass* k = _global_klass_objects->at(i); | 
| 578 |     if (!k->is_objArray_klass()) { | 
| 579 |       // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info | 
| 580 |       // on their array classes. | 
| 581 |       assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be" ); | 
| 582 |       k->remove_java_mirror(); | 
| 583 |     } | 
| 584 |   } | 
| 585 | } | 
| 586 |  | 
| 587 | static void clear_basic_type_mirrors() { | 
| 588 |   assert(!HeapShared::is_heap_object_archiving_allowed(), "Sanity" ); | 
| 589 |   Universe::set_int_mirror(NULL); | 
| 590 |   Universe::set_float_mirror(NULL); | 
| 591 |   Universe::set_double_mirror(NULL); | 
| 592 |   Universe::set_byte_mirror(NULL); | 
| 593 |   Universe::set_bool_mirror(NULL); | 
| 594 |   Universe::set_char_mirror(NULL); | 
| 595 |   Universe::set_long_mirror(NULL); | 
| 596 |   Universe::set_short_mirror(NULL); | 
| 597 |   Universe::set_void_mirror(NULL); | 
| 598 | } | 
| 599 |  | 
| 600 | static void rewrite_nofast_bytecode(Method* method) { | 
| 601 |   BytecodeStream bcs(method); | 
| 602 |   while (!bcs.is_last_bytecode()) { | 
| 603 |     Bytecodes::Code opcode = bcs.next(); | 
| 604 |     switch (opcode) { | 
| 605 |     case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break; | 
| 606 |     case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break; | 
| 607 |     case Bytecodes::_aload_0:       *bcs.bcp() = Bytecodes::_nofast_aload_0;       break; | 
| 608 |     case Bytecodes::_iload: { | 
| 609 |       if (!bcs.is_wide()) { | 
| 610 |         *bcs.bcp() = Bytecodes::_nofast_iload; | 
| 611 |       } | 
| 612 |       break; | 
| 613 |     } | 
| 614 |     default: break; | 
| 615 |     } | 
| 616 |   } | 
| 617 | } | 
| 618 |  | 
| 619 | // Walk all methods in the class list to ensure that they won't be modified at | 
| 620 | // run time. This includes: | 
| 621 | // [1] Rewrite all bytecodes as needed, so that the ConstMethod* will not be modified | 
| 622 | //     at run time by RewriteBytecodes/RewriteFrequentPairs | 
| 623 | // [2] Assign a fingerprint, so one doesn't need to be assigned at run-time. | 
| 624 | static void rewrite_nofast_bytecodes_and_calculate_fingerprints() { | 
| 625 |   for (int i = 0; i < _global_klass_objects->length(); i++) { | 
| 626 |     Klass* k = _global_klass_objects->at(i); | 
| 627 |     if (k->is_instance_klass()) { | 
| 628 |       InstanceKlass* ik = InstanceKlass::cast(k); | 
| 629 |       MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(ik); | 
| 630 |     } | 
| 631 |   } | 
| 632 | } | 
| 633 |  | 
| 634 | void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(InstanceKlass* ik) { | 
| 635 |   for (int i = 0; i < ik->methods()->length(); i++) { | 
| 636 |     Method* m = ik->methods()->at(i); | 
| 637 |     rewrite_nofast_bytecode(m); | 
| 638 |     Fingerprinter fp(m); | 
| 639 |     // The side effect of this call sets method's fingerprint field. | 
| 640 |     fp.fingerprint(); | 
| 641 |   } | 
| 642 | } | 
| 643 |  | 
| 644 | // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables. | 
| 645 | // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.) | 
| 646 | // | 
| 647 | // Addresses of the vtables and the methods may be different across JVM runs, | 
| 648 | // if libjvm.so is dynamically loaded at a different base address. | 
| 649 | // | 
| 650 | // To ensure that the Metadata objects in the CDS archive always have the correct vtable: | 
| 651 | // | 
| 652 | // + at dump time:  we redirect the _vptr to point to our own vtables inside | 
| 653 | //                  the CDS image | 
| 654 | // + at run time:   we clone the actual contents of the vtables from libjvm.so | 
| 655 | //                  into our own tables. | 
| 656 |  | 
| 657 | // Currently, the archive contain ONLY the following types of objects that have C++ vtables. | 
| 658 | #define CPP_VTABLE_PATCH_TYPES_DO(f) \ | 
| 659 |   f(ConstantPool) \ | 
| 660 |   f(InstanceKlass) \ | 
| 661 |   f(InstanceClassLoaderKlass) \ | 
| 662 |   f(InstanceMirrorKlass) \ | 
| 663 |   f(InstanceRefKlass) \ | 
| 664 |   f(Method) \ | 
| 665 |   f(ObjArrayKlass) \ | 
| 666 |   f(TypeArrayKlass) | 
| 667 |  | 
| 668 | class CppVtableInfo { | 
| 669 |   intptr_t _vtable_size; | 
| 670 |   intptr_t _cloned_vtable[1]; | 
| 671 | public: | 
| 672 |   static int num_slots(int vtable_size) { | 
| 673 |     return 1 + vtable_size; // Need to add the space occupied by _vtable_size; | 
| 674 |   } | 
| 675 |   int vtable_size()           { return int(uintx(_vtable_size)); } | 
| 676 |   void set_vtable_size(int n) { _vtable_size = intptr_t(n); } | 
| 677 |   intptr_t* cloned_vtable()   { return &_cloned_vtable[0]; } | 
| 678 |   void zero()                 { memset(_cloned_vtable, 0, sizeof(intptr_t) * vtable_size()); } | 
| 679 |   // Returns the address of the next CppVtableInfo that can be placed immediately after this CppVtableInfo | 
| 680 |   static size_t byte_size(int vtable_size) { | 
| 681 |     CppVtableInfo i; | 
| 682 |     return pointer_delta(&i._cloned_vtable[vtable_size], &i, sizeof(u1)); | 
| 683 |   } | 
| 684 | }; | 
| 685 |  | 
| 686 | template <class T> class CppVtableCloner : public T { | 
| 687 |   static intptr_t* vtable_of(Metadata& m) { | 
| 688 |     return *((intptr_t**)&m); | 
| 689 |   } | 
| 690 |   static CppVtableInfo* _info; | 
| 691 |  | 
| 692 |   static int get_vtable_length(const char* name); | 
| 693 |  | 
| 694 | public: | 
| 695 |   // Allocate and initialize the C++ vtable, starting from top, but do not go past end. | 
| 696 |   static intptr_t* allocate(const char* name); | 
| 697 |  | 
| 698 |   // Clone the vtable to ... | 
| 699 |   static intptr_t* clone_vtable(const char* name, CppVtableInfo* info); | 
| 700 |  | 
| 701 |   static void zero_vtable_clone() { | 
| 702 |     assert(DumpSharedSpaces, "dump-time only" ); | 
| 703 |     _info->zero(); | 
| 704 |   } | 
| 705 |  | 
| 706 |   // Switch the vtable pointer to point to the cloned vtable. | 
| 707 |   static void patch(Metadata* obj) { | 
| 708 |     assert(DumpSharedSpaces, "dump-time only" ); | 
| 709 |     *(void**)obj = (void*)(_info->cloned_vtable()); | 
| 710 |   } | 
| 711 |  | 
| 712 |   static bool is_valid_shared_object(const T* obj) { | 
| 713 |     intptr_t* vptr = *(intptr_t**)obj; | 
| 714 |     return vptr == _info->cloned_vtable(); | 
| 715 |   } | 
| 716 | }; | 
| 717 |  | 
| 718 | template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL; | 
| 719 |  | 
| 720 | template <class T> | 
| 721 | intptr_t* CppVtableCloner<T>::allocate(const char* name) { | 
| 722 |   assert(is_aligned(_md_region.top(), sizeof(intptr_t)), "bad alignment" ); | 
| 723 |   int n = get_vtable_length(name); | 
| 724 |   _info = (CppVtableInfo*)_md_region.allocate(CppVtableInfo::byte_size(n), sizeof(intptr_t)); | 
| 725 |   _info->set_vtable_size(n); | 
| 726 |  | 
| 727 |   intptr_t* p = clone_vtable(name, _info); | 
| 728 |   assert((char*)p == _md_region.top(), "must be" ); | 
| 729 |  | 
| 730 |   return _info->cloned_vtable(); | 
| 731 | } | 
| 732 |  | 
| 733 | template <class T> | 
| 734 | intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) { | 
| 735 |   if (!DumpSharedSpaces) { | 
| 736 |     assert(_info == 0, "_info is initialized only at dump time" ); | 
| 737 |     _info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method() | 
| 738 |   } | 
| 739 |   T tmp; // Allocate temporary dummy metadata object to get to the original vtable. | 
| 740 |   int n = info->vtable_size(); | 
| 741 |   intptr_t* srcvtable = vtable_of(tmp); | 
| 742 |   intptr_t* dstvtable = info->cloned_vtable(); | 
| 743 |  | 
| 744 |   // We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are | 
| 745 |   // safe to do memcpy. | 
| 746 |   log_debug(cds, vtables)("Copying %3d vtable entries for %s" , n, name); | 
| 747 |   memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n); | 
| 748 |   return dstvtable + n; | 
| 749 | } | 
| 750 |  | 
| 751 | // To determine the size of the vtable for each type, we use the following | 
| 752 | // trick by declaring 2 subclasses: | 
| 753 | // | 
| 754 | //   class CppVtableTesterA: public InstanceKlass {virtual int   last_virtual_method() {return 1;}    }; | 
| 755 | //   class CppVtableTesterB: public InstanceKlass {virtual void* last_virtual_method() {return NULL}; }; | 
| 756 | // | 
| 757 | // CppVtableTesterA and CppVtableTesterB's vtables have the following properties: | 
| 758 | // - Their size (N+1) is exactly one more than the size of InstanceKlass's vtable (N) | 
| 759 | // - The first N entries have are exactly the same as in InstanceKlass's vtable. | 
| 760 | // - Their last entry is different. | 
| 761 | // | 
| 762 | // So to determine the value of N, we just walk CppVtableTesterA and CppVtableTesterB's tables | 
| 763 | // and find the first entry that's different. | 
| 764 | // | 
| 765 | // This works on all C++ compilers supported by Oracle, but you may need to tweak it for more | 
| 766 | // esoteric compilers. | 
| 767 |  | 
| 768 | template <class T> class CppVtableTesterB: public T { | 
| 769 | public: | 
| 770 |   virtual int last_virtual_method() {return 1;} | 
| 771 | }; | 
| 772 |  | 
| 773 | template <class T> class CppVtableTesterA : public T { | 
| 774 | public: | 
| 775 |   virtual void* last_virtual_method() { | 
| 776 |     // Make this different than CppVtableTesterB::last_virtual_method so the C++ | 
| 777 |     // compiler/linker won't alias the two functions. | 
| 778 |     return NULL; | 
| 779 |   } | 
| 780 | }; | 
| 781 |  | 
| 782 | template <class T> | 
| 783 | int CppVtableCloner<T>::get_vtable_length(const char* name) { | 
| 784 |   CppVtableTesterA<T> a; | 
| 785 |   CppVtableTesterB<T> b; | 
| 786 |  | 
| 787 |   intptr_t* avtable = vtable_of(a); | 
| 788 |   intptr_t* bvtable = vtable_of(b); | 
| 789 |  | 
| 790 |   // Start at slot 1, because slot 0 may be RTTI (on Solaris/Sparc) | 
| 791 |   int vtable_len = 1; | 
| 792 |   for (; ; vtable_len++) { | 
| 793 |     if (avtable[vtable_len] != bvtable[vtable_len]) { | 
| 794 |       break; | 
| 795 |     } | 
| 796 |   } | 
| 797 |   log_debug(cds, vtables)("Found   %3d vtable entries for %s" , vtable_len, name); | 
| 798 |  | 
| 799 |   return vtable_len; | 
| 800 | } | 
| 801 |  | 
| 802 | #define ALLOC_CPP_VTABLE_CLONE(c) \ | 
| 803 |   _cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); | 
| 804 |  | 
| 805 | #define CLONE_CPP_VTABLE(c) \ | 
| 806 |   p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p); | 
| 807 |  | 
| 808 | #define ZERO_CPP_VTABLE(c) \ | 
| 809 |  CppVtableCloner<c>::zero_vtable_clone(); | 
| 810 |  | 
| 811 | //------------------------------ for DynamicDumpSharedSpaces - start | 
| 812 | #define DECLARE_CLONED_VTABLE_KIND(c) c ## _Kind, | 
| 813 |  | 
| 814 | enum { | 
| 815 |   CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND) | 
| 816 |   _num_cloned_vtable_kinds | 
| 817 | }; | 
| 818 |  | 
| 819 | static intptr_t** _cloned_cpp_vtptrs = NULL; | 
| 820 |  | 
| 821 | void MetaspaceShared::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) { | 
| 822 |   soc->do_ptr((void**)&_cloned_cpp_vtptrs); | 
| 823 | } | 
| 824 |  | 
| 825 | intptr_t* MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(MetaspaceObj::Type msotype, address obj) { | 
| 826 |   assert(DynamicDumpSharedSpaces, "must" ); | 
| 827 |   int kind = -1; | 
| 828 |   switch (msotype) { | 
| 829 |   case MetaspaceObj::SymbolType: | 
| 830 |   case MetaspaceObj::TypeArrayU1Type: | 
| 831 |   case MetaspaceObj::TypeArrayU2Type: | 
| 832 |   case MetaspaceObj::TypeArrayU4Type: | 
| 833 |   case MetaspaceObj::TypeArrayU8Type: | 
| 834 |   case MetaspaceObj::TypeArrayOtherType: | 
| 835 |   case MetaspaceObj::ConstMethodType: | 
| 836 |   case MetaspaceObj::ConstantPoolCacheType: | 
| 837 |   case MetaspaceObj::AnnotationsType: | 
| 838 |   case MetaspaceObj::MethodCountersType: | 
| 839 |     // These have no vtables. | 
| 840 |     break; | 
| 841 |   case MetaspaceObj::ClassType: | 
| 842 |     { | 
| 843 |       Klass* k = (Klass*)obj; | 
| 844 |       assert(k->is_klass(), "must be" ); | 
| 845 |       if (k->is_instance_klass()) { | 
| 846 |         kind = InstanceKlass_Kind; | 
| 847 |       } else { | 
| 848 |         assert(k->is_objArray_klass(), | 
| 849 |                "We shouldn't archive any other klasses in DynamicDumpSharedSpaces" ); | 
| 850 |         kind = ObjArrayKlass_Kind; | 
| 851 |       } | 
| 852 |     } | 
| 853 |     break; | 
| 854 |  | 
| 855 |   case MetaspaceObj::MethodType: | 
| 856 |     { | 
| 857 |       Method* m = (Method*)obj; | 
| 858 |       assert(m->is_method(), "must be" ); | 
| 859 |       kind = Method_Kind; | 
| 860 |     } | 
| 861 |     break; | 
| 862 |  | 
| 863 |   case MetaspaceObj::MethodDataType: | 
| 864 |     // We don't archive MethodData <-- should have been removed in removed_unsharable_info | 
| 865 |     ShouldNotReachHere(); | 
| 866 |     break; | 
| 867 |  | 
| 868 |   case MetaspaceObj::ConstantPoolType: | 
| 869 |     { | 
| 870 |       ConstantPool *cp = (ConstantPool*)obj; | 
| 871 |       assert(cp->is_constantPool(), "must be" ); | 
| 872 |       kind = ConstantPool_Kind; | 
| 873 |     } | 
| 874 |     break; | 
| 875 |  | 
| 876 |   default: | 
| 877 |     ShouldNotReachHere(); | 
| 878 |   } | 
| 879 |  | 
| 880 |   if (kind >= 0) { | 
| 881 |     assert(kind < _num_cloned_vtable_kinds, "must be" ); | 
| 882 |     return _cloned_cpp_vtptrs[kind]; | 
| 883 |   } else { | 
| 884 |     return NULL; | 
| 885 |   } | 
| 886 | } | 
| 887 |  | 
| 888 | //------------------------------ for DynamicDumpSharedSpaces - end | 
| 889 |  | 
| 890 | // This can be called at both dump time and run time. | 
| 891 | intptr_t* MetaspaceShared::clone_cpp_vtables(intptr_t* p) { | 
| 892 |   assert(DumpSharedSpaces || UseSharedSpaces, "sanity" ); | 
| 893 |   CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE); | 
| 894 |   return p; | 
| 895 | } | 
| 896 |  | 
| 897 | void MetaspaceShared::zero_cpp_vtable_clones_for_writing() { | 
| 898 |   assert(DumpSharedSpaces, "dump-time only" ); | 
| 899 |   CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE); | 
| 900 | } | 
| 901 |  | 
| 902 | // Allocate and initialize the C++ vtables, starting from top, but do not go past end. | 
| 903 | void MetaspaceShared::allocate_cpp_vtable_clones() { | 
| 904 |   assert(DumpSharedSpaces, "dump-time only" ); | 
| 905 |   // Layout (each slot is a intptr_t): | 
| 906 |   //   [number of slots in the first vtable = n1] | 
| 907 |   //   [ <n1> slots for the first vtable] | 
| 908 |   //   [number of slots in the first second = n2] | 
| 909 |   //   [ <n2> slots for the second vtable] | 
| 910 |   //   ... | 
| 911 |   // The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro. | 
| 912 |   CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE); | 
| 913 | } | 
| 914 |  | 
| 915 | // Switch the vtable pointer to point to the cloned vtable. We assume the | 
| 916 | // vtable pointer is in first slot in object. | 
| 917 | void MetaspaceShared::patch_cpp_vtable_pointers() { | 
| 918 |   int n = _global_klass_objects->length(); | 
| 919 |   for (int i = 0; i < n; i++) { | 
| 920 |     Klass* obj = _global_klass_objects->at(i); | 
| 921 |     if (obj->is_instance_klass()) { | 
| 922 |       InstanceKlass* ik = InstanceKlass::cast(obj); | 
| 923 |       if (ik->is_class_loader_instance_klass()) { | 
| 924 |         CppVtableCloner<InstanceClassLoaderKlass>::patch(ik); | 
| 925 |       } else if (ik->is_reference_instance_klass()) { | 
| 926 |         CppVtableCloner<InstanceRefKlass>::patch(ik); | 
| 927 |       } else if (ik->is_mirror_instance_klass()) { | 
| 928 |         CppVtableCloner<InstanceMirrorKlass>::patch(ik); | 
| 929 |       } else { | 
| 930 |         CppVtableCloner<InstanceKlass>::patch(ik); | 
| 931 |       } | 
| 932 |       ConstantPool* cp = ik->constants(); | 
| 933 |       CppVtableCloner<ConstantPool>::patch(cp); | 
| 934 |       for (int j = 0; j < ik->methods()->length(); j++) { | 
| 935 |         Method* m = ik->methods()->at(j); | 
| 936 |         CppVtableCloner<Method>::patch(m); | 
| 937 |         assert(CppVtableCloner<Method>::is_valid_shared_object(m), "must be" ); | 
| 938 |       } | 
| 939 |     } else if (obj->is_objArray_klass()) { | 
| 940 |       CppVtableCloner<ObjArrayKlass>::patch(obj); | 
| 941 |     } else { | 
| 942 |       assert(obj->is_typeArray_klass(), "sanity" ); | 
| 943 |       CppVtableCloner<TypeArrayKlass>::patch(obj); | 
| 944 |     } | 
| 945 |   } | 
| 946 | } | 
| 947 |  | 
| 948 | bool MetaspaceShared::is_valid_shared_method(const Method* m) { | 
| 949 |   assert(is_in_shared_metaspace(m), "must be" ); | 
| 950 |   return CppVtableCloner<Method>::is_valid_shared_object(m); | 
| 951 | } | 
| 952 |  | 
| 953 | void WriteClosure::do_oop(oop* o) { | 
| 954 |   if (*o == NULL) { | 
| 955 |     _dump_region->append_intptr_t(0); | 
| 956 |   } else { | 
| 957 |     assert(HeapShared::is_heap_object_archiving_allowed(), | 
| 958 |            "Archiving heap object is not allowed" ); | 
| 959 |     _dump_region->append_intptr_t( | 
| 960 |       (intptr_t)CompressedOops::encode_not_null(*o)); | 
| 961 |   } | 
| 962 | } | 
| 963 |  | 
| 964 | void WriteClosure::do_region(u_char* start, size_t size) { | 
| 965 |   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment" ); | 
| 966 |   assert(size % sizeof(intptr_t) == 0, "bad size" ); | 
| 967 |   do_tag((int)size); | 
| 968 |   while (size > 0) { | 
| 969 |     _dump_region->append_intptr_t(*(intptr_t*)start); | 
| 970 |     start += sizeof(intptr_t); | 
| 971 |     size -= sizeof(intptr_t); | 
| 972 |   } | 
| 973 | } | 
| 974 |  | 
| 975 | // This is for dumping detailed statistics for the allocations | 
| 976 | // in the shared spaces. | 
| 977 | class DumpAllocStats : public ResourceObj { | 
| 978 | public: | 
| 979 |  | 
| 980 |   // Here's poor man's enum inheritance | 
| 981 | #define SHAREDSPACE_OBJ_TYPES_DO(f) \ | 
| 982 |   METASPACE_OBJ_TYPES_DO(f) \ | 
| 983 |   f(SymbolHashentry) \ | 
| 984 |   f(SymbolBucket) \ | 
| 985 |   f(StringHashentry) \ | 
| 986 |   f(StringBucket) \ | 
| 987 |   f(Other) | 
| 988 |  | 
| 989 |   enum Type { | 
| 990 |     // Types are MetaspaceObj::ClassType, MetaspaceObj::SymbolType, etc | 
| 991 |     SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_DECLARE) | 
| 992 |     _number_of_types | 
| 993 |   }; | 
| 994 |  | 
| 995 |   static const char * type_name(Type type) { | 
| 996 |     switch(type) { | 
| 997 |     SHAREDSPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE) | 
| 998 |     default: | 
| 999 |       ShouldNotReachHere(); | 
| 1000 |       return NULL; | 
| 1001 |     } | 
| 1002 |   } | 
| 1003 |  | 
| 1004 | public: | 
| 1005 |   enum { RO = 0, RW = 1 }; | 
| 1006 |  | 
| 1007 |   int _counts[2][_number_of_types]; | 
| 1008 |   int _bytes [2][_number_of_types]; | 
| 1009 |  | 
| 1010 |   DumpAllocStats() { | 
| 1011 |     memset(_counts, 0, sizeof(_counts)); | 
| 1012 |     memset(_bytes,  0, sizeof(_bytes)); | 
| 1013 |   }; | 
| 1014 |  | 
| 1015 |   void record(MetaspaceObj::Type type, int byte_size, bool read_only) { | 
| 1016 |     assert(int(type) >= 0 && type < MetaspaceObj::_number_of_types, "sanity" ); | 
| 1017 |     int which = (read_only) ? RO : RW; | 
| 1018 |     _counts[which][type] ++; | 
| 1019 |     _bytes [which][type] += byte_size; | 
| 1020 |   } | 
| 1021 |  | 
| 1022 |   void record_other_type(int byte_size, bool read_only) { | 
| 1023 |     int which = (read_only) ? RO : RW; | 
| 1024 |     _bytes [which][OtherType] += byte_size; | 
| 1025 |   } | 
| 1026 |   void print_stats(int ro_all, int rw_all, int mc_all, int md_all); | 
| 1027 | }; | 
| 1028 |  | 
| 1029 | void DumpAllocStats::print_stats(int ro_all, int rw_all, int mc_all, int md_all) { | 
| 1030 |   // Calculate size of data that was not allocated by Metaspace::allocate() | 
| 1031 |   MetaspaceSharedStats *stats = MetaspaceShared::stats(); | 
| 1032 |  | 
| 1033 |   // symbols | 
| 1034 |   _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count; | 
| 1035 |   _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes; | 
| 1036 |  | 
| 1037 |   _counts[RO][SymbolBucketType] = stats->symbol.bucket_count; | 
| 1038 |   _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes; | 
| 1039 |  | 
| 1040 |   // strings | 
| 1041 |   _counts[RO][StringHashentryType] = stats->string.hashentry_count; | 
| 1042 |   _bytes [RO][StringHashentryType] = stats->string.hashentry_bytes; | 
| 1043 |  | 
| 1044 |   _counts[RO][StringBucketType] = stats->string.bucket_count; | 
| 1045 |   _bytes [RO][StringBucketType] = stats->string.bucket_bytes; | 
| 1046 |  | 
| 1047 |   // TODO: count things like dictionary, vtable, etc | 
| 1048 |   _bytes[RW][OtherType] += mc_all + md_all; | 
| 1049 |   rw_all += mc_all + md_all; // mc/md are mapped Read/Write | 
| 1050 |  | 
| 1051 |   // prevent divide-by-zero | 
| 1052 |   if (ro_all < 1) { | 
| 1053 |     ro_all = 1; | 
| 1054 |   } | 
| 1055 |   if (rw_all < 1) { | 
| 1056 |     rw_all = 1; | 
| 1057 |   } | 
| 1058 |  | 
| 1059 |   int all_ro_count = 0; | 
| 1060 |   int all_ro_bytes = 0; | 
| 1061 |   int all_rw_count = 0; | 
| 1062 |   int all_rw_bytes = 0; | 
| 1063 |  | 
| 1064 | // To make fmt_stats be a syntactic constant (for format warnings), use #define. | 
| 1065 | #define fmt_stats "%-20s: %8d %10d %5.1f | %8d %10d %5.1f | %8d %10d %5.1f" | 
| 1066 |   const char *sep = "--------------------+---------------------------+---------------------------+--------------------------" ; | 
| 1067 |   const char *hdr = "                        ro_cnt   ro_bytes     % |   rw_cnt   rw_bytes     % |  all_cnt  all_bytes     %" ; | 
| 1068 |  | 
| 1069 |   LogMessage(cds) msg; | 
| 1070 |  | 
| 1071 |   msg.info("Detailed metadata info (excluding st regions; rw stats include md/mc regions):" ); | 
| 1072 |   msg.info("%s" , hdr); | 
| 1073 |   msg.info("%s" , sep); | 
| 1074 |   for (int type = 0; type < int(_number_of_types); type ++) { | 
| 1075 |     const char *name = type_name((Type)type); | 
| 1076 |     int ro_count = _counts[RO][type]; | 
| 1077 |     int ro_bytes = _bytes [RO][type]; | 
| 1078 |     int rw_count = _counts[RW][type]; | 
| 1079 |     int rw_bytes = _bytes [RW][type]; | 
| 1080 |     int count = ro_count + rw_count; | 
| 1081 |     int bytes = ro_bytes + rw_bytes; | 
| 1082 |  | 
| 1083 |     double ro_perc = percent_of(ro_bytes, ro_all); | 
| 1084 |     double rw_perc = percent_of(rw_bytes, rw_all); | 
| 1085 |     double perc    = percent_of(bytes, ro_all + rw_all); | 
| 1086 |  | 
| 1087 |     msg.info(fmt_stats, name, | 
| 1088 |                          ro_count, ro_bytes, ro_perc, | 
| 1089 |                          rw_count, rw_bytes, rw_perc, | 
| 1090 |                          count, bytes, perc); | 
| 1091 |  | 
| 1092 |     all_ro_count += ro_count; | 
| 1093 |     all_ro_bytes += ro_bytes; | 
| 1094 |     all_rw_count += rw_count; | 
| 1095 |     all_rw_bytes += rw_bytes; | 
| 1096 |   } | 
| 1097 |  | 
| 1098 |   int all_count = all_ro_count + all_rw_count; | 
| 1099 |   int all_bytes = all_ro_bytes + all_rw_bytes; | 
| 1100 |  | 
| 1101 |   double all_ro_perc = percent_of(all_ro_bytes, ro_all); | 
| 1102 |   double all_rw_perc = percent_of(all_rw_bytes, rw_all); | 
| 1103 |   double all_perc    = percent_of(all_bytes, ro_all + rw_all); | 
| 1104 |  | 
| 1105 |   msg.info("%s" , sep); | 
| 1106 |   msg.info(fmt_stats, "Total" , | 
| 1107 |                        all_ro_count, all_ro_bytes, all_ro_perc, | 
| 1108 |                        all_rw_count, all_rw_bytes, all_rw_perc, | 
| 1109 |                        all_count, all_bytes, all_perc); | 
| 1110 |  | 
| 1111 |   assert(all_ro_bytes == ro_all, "everything should have been counted" ); | 
| 1112 |   assert(all_rw_bytes == rw_all, "everything should have been counted" ); | 
| 1113 |  | 
| 1114 | #undef fmt_stats | 
| 1115 | } | 
| 1116 |  | 
| 1117 | // Populate the shared space. | 
| 1118 |  | 
| 1119 | class VM_PopulateDumpSharedSpace: public VM_Operation { | 
| 1120 | private: | 
| 1121 |   GrowableArray<MemRegion> *_closed_archive_heap_regions; | 
| 1122 |   GrowableArray<MemRegion> *_open_archive_heap_regions; | 
| 1123 |  | 
| 1124 |   GrowableArray<ArchiveHeapOopmapInfo> *_closed_archive_heap_oopmaps; | 
| 1125 |   GrowableArray<ArchiveHeapOopmapInfo> *_open_archive_heap_oopmaps; | 
| 1126 |  | 
| 1127 |   void dump_java_heap_objects() NOT_CDS_JAVA_HEAP_RETURN; | 
| 1128 |   void dump_archive_heap_oopmaps() NOT_CDS_JAVA_HEAP_RETURN; | 
| 1129 |   void dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, | 
| 1130 |                                  GrowableArray<ArchiveHeapOopmapInfo>* oopmaps); | 
| 1131 |   void dump_symbols(); | 
| 1132 |   char* dump_read_only_tables(); | 
| 1133 |   void print_region_stats(); | 
| 1134 |   void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, | 
| 1135 |                                const char *name, const size_t total_size); | 
| 1136 | public: | 
| 1137 |  | 
| 1138 |   VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } | 
| 1139 |   void doit();   // outline because gdb sucks | 
| 1140 |   static void write_region(FileMapInfo* mapinfo, int region, DumpRegion* space, bool read_only,  bool allow_exec); | 
| 1141 |   bool allow_nested_vm_operations() const { return true; } | 
| 1142 | }; // class VM_PopulateDumpSharedSpace | 
| 1143 |  | 
| 1144 | class SortedSymbolClosure: public SymbolClosure { | 
| 1145 |   GrowableArray<Symbol*> _symbols; | 
| 1146 |   virtual void do_symbol(Symbol** sym) { | 
| 1147 |     assert((*sym)->is_permanent(), "archived symbols must be permanent" ); | 
| 1148 |     _symbols.append(*sym); | 
| 1149 |   } | 
| 1150 |   static int compare_symbols_by_address(Symbol** a, Symbol** b) { | 
| 1151 |     if (a[0] < b[0]) { | 
| 1152 |       return -1; | 
| 1153 |     } else if (a[0] == b[0]) { | 
| 1154 |       return 0; | 
| 1155 |     } else { | 
| 1156 |       return 1; | 
| 1157 |     } | 
| 1158 |   } | 
| 1159 |  | 
| 1160 | public: | 
| 1161 |   SortedSymbolClosure() { | 
| 1162 |     SymbolTable::symbols_do(this); | 
| 1163 |     _symbols.sort(compare_symbols_by_address); | 
| 1164 |   } | 
| 1165 |   GrowableArray<Symbol*>* get_sorted_symbols() { | 
| 1166 |     return &_symbols; | 
| 1167 |   } | 
| 1168 | }; | 
| 1169 |  | 
| 1170 | // ArchiveCompactor -- | 
| 1171 | // | 
| 1172 | // This class is the central piece of shared archive compaction -- all metaspace data are | 
| 1173 | // initially allocated outside of the shared regions. ArchiveCompactor copies the | 
| 1174 | // metaspace data into their final location in the shared regions. | 
| 1175 |  | 
| 1176 | class ArchiveCompactor : AllStatic { | 
| 1177 |   static const int INITIAL_TABLE_SIZE = 8087; | 
| 1178 |   static const int MAX_TABLE_SIZE     = 1000000; | 
| 1179 |  | 
| 1180 |   static DumpAllocStats* _alloc_stats; | 
| 1181 |   static SortedSymbolClosure* _ssc; | 
| 1182 |  | 
| 1183 |   typedef KVHashtable<address, address, mtInternal> RelocationTable; | 
| 1184 |   static RelocationTable* _new_loc_table; | 
| 1185 |  | 
| 1186 | public: | 
| 1187 |   static void initialize() { | 
| 1188 |     _alloc_stats = new(ResourceObj::C_HEAP, mtInternal)DumpAllocStats; | 
| 1189 |     _new_loc_table = new RelocationTable(INITIAL_TABLE_SIZE); | 
| 1190 |   } | 
| 1191 |   static DumpAllocStats* alloc_stats() { | 
| 1192 |     return _alloc_stats; | 
| 1193 |   } | 
| 1194 |  | 
| 1195 |   // Use this when you allocate space with MetaspaceShare::read_only_space_alloc() | 
| 1196 |   // outside of ArchiveCompactor::allocate(). These are usually for misc tables | 
| 1197 |   // that are allocated in the RO space. | 
| 1198 |   class OtherROAllocMark { | 
| 1199 |     char* _oldtop; | 
| 1200 |   public: | 
| 1201 |     OtherROAllocMark() { | 
| 1202 |       _oldtop = _ro_region.top(); | 
| 1203 |     } | 
| 1204 |     ~OtherROAllocMark() { | 
| 1205 |       char* newtop = _ro_region.top(); | 
| 1206 |       ArchiveCompactor::alloc_stats()->record_other_type(int(newtop - _oldtop), true); | 
| 1207 |     } | 
| 1208 |   }; | 
| 1209 |  | 
| 1210 |   static void allocate(MetaspaceClosure::Ref* ref, bool read_only) { | 
| 1211 |     address obj = ref->obj(); | 
| 1212 |     int bytes = ref->size() * BytesPerWord; | 
| 1213 |     char* p; | 
| 1214 |     size_t alignment = BytesPerWord; | 
| 1215 |     char* oldtop; | 
| 1216 |     char* newtop; | 
| 1217 |  | 
| 1218 |     if (read_only) { | 
| 1219 |       oldtop = _ro_region.top(); | 
| 1220 |       p = _ro_region.allocate(bytes, alignment); | 
| 1221 |       newtop = _ro_region.top(); | 
| 1222 |     } else { | 
| 1223 |       oldtop = _rw_region.top(); | 
| 1224 |       if (ref->msotype() == MetaspaceObj::ClassType) { | 
| 1225 |         // Save a pointer immediate in front of an InstanceKlass, so | 
| 1226 |         // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* | 
| 1227 |         // without building another hashtable. See RunTimeSharedClassInfo::get_for() | 
| 1228 |         // in systemDictionaryShared.cpp. | 
| 1229 |         Klass* klass = (Klass*)obj; | 
| 1230 |         if (klass->is_instance_klass()) { | 
| 1231 |           SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); | 
| 1232 |           _rw_region.allocate(sizeof(address), BytesPerWord); | 
| 1233 |         } | 
| 1234 |       } | 
| 1235 |       p = _rw_region.allocate(bytes, alignment); | 
| 1236 |       newtop = _rw_region.top(); | 
| 1237 |     } | 
| 1238 |     memcpy(p, obj, bytes); | 
| 1239 |     assert(_new_loc_table->lookup(obj) == NULL, "each object can be relocated at most once" ); | 
| 1240 |     _new_loc_table->add(obj, (address)p); | 
| 1241 |     log_trace(cds)("Copy: "  PTR_FORMAT " ==> "  PTR_FORMAT " %d" , p2i(obj), p2i(p), bytes); | 
| 1242 |     if (_new_loc_table->maybe_grow(MAX_TABLE_SIZE)) { | 
| 1243 |       log_info(cds, hashtables)("Expanded _new_loc_table to %d" , _new_loc_table->table_size()); | 
| 1244 |     } | 
| 1245 |     _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only); | 
| 1246 |   } | 
| 1247 |  | 
| 1248 |   static address get_new_loc(MetaspaceClosure::Ref* ref) { | 
| 1249 |     address* pp = _new_loc_table->lookup(ref->obj()); | 
| 1250 |     assert(pp != NULL, "must be" ); | 
| 1251 |     return *pp; | 
| 1252 |   } | 
| 1253 |  | 
| 1254 | private: | 
| 1255 |   // Makes a shallow copy of visited MetaspaceObj's | 
| 1256 |   class ShallowCopier: public UniqueMetaspaceClosure { | 
| 1257 |     bool _read_only; | 
| 1258 |   public: | 
| 1259 |     ShallowCopier(bool read_only) : _read_only(read_only) {} | 
| 1260 |  | 
| 1261 |     virtual bool do_unique_ref(Ref* ref, bool read_only) { | 
| 1262 |       if (read_only == _read_only) { | 
| 1263 |         allocate(ref, read_only); | 
| 1264 |       } | 
| 1265 |       return true; // recurse into ref.obj() | 
| 1266 |     } | 
| 1267 |   }; | 
| 1268 |  | 
| 1269 |   // Relocate embedded pointers within a MetaspaceObj's shallow copy | 
| 1270 |   class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { | 
| 1271 |   public: | 
| 1272 |     virtual bool do_unique_ref(Ref* ref, bool read_only) { | 
| 1273 |       address new_loc = get_new_loc(ref); | 
| 1274 |       RefRelocator refer; | 
| 1275 |       ref->metaspace_pointers_do_at(&refer, new_loc); | 
| 1276 |       return true; // recurse into ref.obj() | 
| 1277 |     } | 
| 1278 |   }; | 
| 1279 |  | 
| 1280 |   // Relocate a reference to point to its shallow copy | 
| 1281 |   class RefRelocator: public MetaspaceClosure { | 
| 1282 |   public: | 
| 1283 |     virtual bool do_ref(Ref* ref, bool read_only) { | 
| 1284 |       if (ref->not_null()) { | 
| 1285 |         ref->update(get_new_loc(ref)); | 
| 1286 |       } | 
| 1287 |       return false; // Do not recurse. | 
| 1288 |     } | 
| 1289 |   }; | 
| 1290 |  | 
| 1291 | #ifdef ASSERT | 
| 1292 |   class IsRefInArchiveChecker: public MetaspaceClosure { | 
| 1293 |   public: | 
| 1294 |     virtual bool do_ref(Ref* ref, bool read_only) { | 
| 1295 |       if (ref->not_null()) { | 
| 1296 |         char* obj = (char*)ref->obj(); | 
| 1297 |         assert(_ro_region.contains(obj) || _rw_region.contains(obj), | 
| 1298 |                "must be relocated to point to CDS archive" ); | 
| 1299 |       } | 
| 1300 |       return false; // Do not recurse. | 
| 1301 |     } | 
| 1302 |   }; | 
| 1303 | #endif | 
| 1304 |  | 
| 1305 | public: | 
| 1306 |   static void copy_and_compact() { | 
| 1307 |     ResourceMark rm; | 
| 1308 |     SortedSymbolClosure the_ssc; // StackObj | 
| 1309 |     _ssc = &the_ssc; | 
| 1310 |  | 
| 1311 |     tty->print_cr("Scanning all metaspace objects ... " ); | 
| 1312 |     { | 
| 1313 |       // allocate and shallow-copy RW objects, immediately following the MC region | 
| 1314 |       tty->print_cr("Allocating RW objects ... " ); | 
| 1315 |       _mc_region.pack(&_rw_region); | 
| 1316 |  | 
| 1317 |       ResourceMark rm; | 
| 1318 |       ShallowCopier rw_copier(false); | 
| 1319 |       iterate_roots(&rw_copier); | 
| 1320 |     } | 
| 1321 |     { | 
| 1322 |       // allocate and shallow-copy of RO object, immediately following the RW region | 
| 1323 |       tty->print_cr("Allocating RO objects ... " ); | 
| 1324 |       _rw_region.pack(&_ro_region); | 
| 1325 |  | 
| 1326 |       ResourceMark rm; | 
| 1327 |       ShallowCopier ro_copier(true); | 
| 1328 |       iterate_roots(&ro_copier); | 
| 1329 |     } | 
| 1330 |     { | 
| 1331 |       tty->print_cr("Relocating embedded pointers ... " ); | 
| 1332 |       ResourceMark rm; | 
| 1333 |       ShallowCopyEmbeddedRefRelocator emb_reloc; | 
| 1334 |       iterate_roots(&emb_reloc); | 
| 1335 |     } | 
| 1336 |     { | 
| 1337 |       tty->print_cr("Relocating external roots ... " ); | 
| 1338 |       ResourceMark rm; | 
| 1339 |       RefRelocator ext_reloc; | 
| 1340 |       iterate_roots(&ext_reloc); | 
| 1341 |     } | 
| 1342 |  | 
| 1343 | #ifdef ASSERT | 
| 1344 |     { | 
| 1345 |       tty->print_cr("Verifying external roots ... " ); | 
| 1346 |       ResourceMark rm; | 
| 1347 |       IsRefInArchiveChecker checker; | 
| 1348 |       iterate_roots(&checker); | 
| 1349 |     } | 
| 1350 | #endif | 
| 1351 |  | 
| 1352 |  | 
| 1353 |     // cleanup | 
| 1354 |     _ssc = NULL; | 
| 1355 |   } | 
| 1356 |  | 
| 1357 |   // We must relocate the System::_well_known_klasses only after we have copied the | 
| 1358 |   // java objects in during dump_java_heap_objects(): during the object copy, we operate on | 
| 1359 |   // old objects which assert that their klass is the original klass. | 
| 1360 |   static void relocate_well_known_klasses() { | 
| 1361 |     { | 
| 1362 |       tty->print_cr("Relocating SystemDictionary::_well_known_klasses[] ... " ); | 
| 1363 |       ResourceMark rm; | 
| 1364 |       RefRelocator ext_reloc; | 
| 1365 |       SystemDictionary::well_known_klasses_do(&ext_reloc); | 
| 1366 |     } | 
| 1367 |     // NOTE: after this point, we shouldn't have any globals that can reach the old | 
| 1368 |     // objects. | 
| 1369 |  | 
| 1370 |     // We cannot use any of the objects in the heap anymore (except for the | 
| 1371 |     // shared strings) because their headers no longer point to valid Klasses. | 
| 1372 |   } | 
| 1373 |  | 
| 1374 |   static void iterate_roots(MetaspaceClosure* it) { | 
| 1375 |     GrowableArray<Symbol*>* symbols = _ssc->get_sorted_symbols(); | 
| 1376 |     for (int i=0; i<symbols->length(); i++) { | 
| 1377 |       it->push(symbols->adr_at(i)); | 
| 1378 |     } | 
| 1379 |     if (_global_klass_objects != NULL) { | 
| 1380 |       // Need to fix up the pointers | 
| 1381 |       for (int i = 0; i < _global_klass_objects->length(); i++) { | 
| 1382 |         // NOTE -- this requires that the vtable is NOT yet patched, or else we are hosed. | 
| 1383 |         it->push(_global_klass_objects->adr_at(i)); | 
| 1384 |       } | 
| 1385 |     } | 
| 1386 |     FileMapInfo::metaspace_pointers_do(it); | 
| 1387 |     SystemDictionaryShared::dumptime_classes_do(it); | 
| 1388 |     Universe::metaspace_pointers_do(it); | 
| 1389 |     SymbolTable::metaspace_pointers_do(it); | 
| 1390 |     vmSymbols::metaspace_pointers_do(it); | 
| 1391 |  | 
| 1392 |     it->finish(); | 
| 1393 |   } | 
| 1394 |  | 
| 1395 |   static Klass* get_relocated_klass(Klass* orig_klass) { | 
| 1396 |     assert(DumpSharedSpaces, "dump time only" ); | 
| 1397 |     address* pp = _new_loc_table->lookup((address)orig_klass); | 
| 1398 |     assert(pp != NULL, "must be" ); | 
| 1399 |     Klass* klass = (Klass*)(*pp); | 
| 1400 |     assert(klass->is_klass(), "must be" ); | 
| 1401 |     return klass; | 
| 1402 |   } | 
| 1403 | }; | 
| 1404 |  | 
| 1405 | DumpAllocStats* ArchiveCompactor::_alloc_stats; | 
| 1406 | SortedSymbolClosure* ArchiveCompactor::_ssc; | 
| 1407 | ArchiveCompactor::RelocationTable* ArchiveCompactor::_new_loc_table; | 
| 1408 |  | 
| 1409 | void VM_PopulateDumpSharedSpace::write_region(FileMapInfo* mapinfo, int region_idx, | 
| 1410 |                                               DumpRegion* dump_region, bool read_only,  bool allow_exec) { | 
| 1411 |   mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec); | 
| 1412 | } | 
| 1413 |  | 
| 1414 | void VM_PopulateDumpSharedSpace::dump_symbols() { | 
| 1415 |   tty->print_cr("Dumping symbol table ..." ); | 
| 1416 |  | 
| 1417 |   NOT_PRODUCT(SymbolTable::verify()); | 
| 1418 |   SymbolTable::write_to_archive(); | 
| 1419 | } | 
| 1420 |  | 
| 1421 | char* VM_PopulateDumpSharedSpace::dump_read_only_tables() { | 
| 1422 |   ArchiveCompactor::OtherROAllocMark mark; | 
| 1423 |  | 
| 1424 |   tty->print("Removing java_mirror ... " ); | 
| 1425 |   if (!HeapShared::is_heap_object_archiving_allowed()) { | 
| 1426 |     clear_basic_type_mirrors(); | 
| 1427 |   } | 
| 1428 |   remove_java_mirror_in_classes(); | 
| 1429 |   tty->print_cr("done. " ); | 
| 1430 |  | 
| 1431 |   SystemDictionaryShared::write_to_archive(); | 
| 1432 |  | 
| 1433 |   char* start = _ro_region.top(); | 
| 1434 |  | 
| 1435 |   size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*); | 
| 1436 |   _cloned_cpp_vtptrs = (intptr_t**)_ro_region.allocate(vtptrs_bytes, sizeof(intptr_t*)); | 
| 1437 |  | 
| 1438 |   // Write the other data to the output array. | 
| 1439 |   WriteClosure wc(&_ro_region); | 
| 1440 |   MetaspaceShared::serialize(&wc); | 
| 1441 |  | 
| 1442 |   // Write the bitmaps for patching the archive heap regions | 
| 1443 |   dump_archive_heap_oopmaps(); | 
| 1444 |  | 
| 1445 |   return start; | 
| 1446 | } | 
| 1447 |  | 
| 1448 | void VM_PopulateDumpSharedSpace::doit() { | 
| 1449 |   // We should no longer allocate anything from the metaspace, so that: | 
| 1450 |   // | 
| 1451 |   // (1) Metaspace::allocate might trigger GC if we have run out of | 
| 1452 |   //     committed metaspace, but we can't GC because we're running | 
| 1453 |   //     in the VM thread. | 
| 1454 |   // (2) ArchiveCompactor needs to work with a stable set of MetaspaceObjs. | 
| 1455 |   Metaspace::freeze(); | 
| 1456 |   DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); | 
| 1457 |  | 
| 1458 |   Thread* THREAD = VMThread::vm_thread(); | 
| 1459 |  | 
| 1460 |   FileMapInfo::check_nonempty_dir_in_shared_path_table(); | 
| 1461 |  | 
| 1462 |   NOT_PRODUCT(SystemDictionary::verify();) | 
| 1463 |   // The following guarantee is meant to ensure that no loader constraints | 
| 1464 |   // exist yet, since the constraints table is not shared.  This becomes | 
| 1465 |   // more important now that we don't re-initialize vtables/itables for | 
| 1466 |   // shared classes at runtime, where constraints were previously created. | 
| 1467 |   guarantee(SystemDictionary::constraints()->number_of_entries() == 0, | 
| 1468 |             "loader constraints are not saved" ); | 
| 1469 |   guarantee(SystemDictionary::placeholders()->number_of_entries() == 0, | 
| 1470 |           "placeholders are not saved" ); | 
| 1471 |  | 
| 1472 |   // At this point, many classes have been loaded. | 
| 1473 |   // Gather systemDictionary classes in a global array and do everything to | 
| 1474 |   // that so we don't have to walk the SystemDictionary again. | 
| 1475 |   SystemDictionaryShared::check_excluded_classes(); | 
| 1476 |   _global_klass_objects = new GrowableArray<Klass*>(1000); | 
| 1477 |   CollectClassesClosure collect_classes; | 
| 1478 |   ClassLoaderDataGraph::loaded_classes_do(&collect_classes); | 
| 1479 |  | 
| 1480 |   tty->print_cr("Number of classes %d" , _global_klass_objects->length()); | 
| 1481 |   { | 
| 1482 |     int num_type_array = 0, num_obj_array = 0, num_inst = 0; | 
| 1483 |     for (int i = 0; i < _global_klass_objects->length(); i++) { | 
| 1484 |       Klass* k = _global_klass_objects->at(i); | 
| 1485 |       if (k->is_instance_klass()) { | 
| 1486 |         num_inst ++; | 
| 1487 |       } else if (k->is_objArray_klass()) { | 
| 1488 |         num_obj_array ++; | 
| 1489 |       } else { | 
| 1490 |         assert(k->is_typeArray_klass(), "sanity" ); | 
| 1491 |         num_type_array ++; | 
| 1492 |       } | 
| 1493 |     } | 
| 1494 |     tty->print_cr("    instance classes   = %5d" , num_inst); | 
| 1495 |     tty->print_cr("    obj array classes  = %5d" , num_obj_array); | 
| 1496 |     tty->print_cr("    type array classes = %5d" , num_type_array); | 
| 1497 |   } | 
| 1498 |  | 
| 1499 |   // Ensure the ConstMethods won't be modified at run-time | 
| 1500 |   tty->print("Updating ConstMethods ... " ); | 
| 1501 |   rewrite_nofast_bytecodes_and_calculate_fingerprints(); | 
| 1502 |   tty->print_cr("done. " ); | 
| 1503 |  | 
| 1504 |   // Remove all references outside the metadata | 
| 1505 |   tty->print("Removing unshareable information ... " ); | 
| 1506 |   remove_unshareable_in_classes(); | 
| 1507 |   tty->print_cr("done. " ); | 
| 1508 |  | 
| 1509 |   ArchiveCompactor::initialize(); | 
| 1510 |   ArchiveCompactor::copy_and_compact(); | 
| 1511 |  | 
| 1512 |   dump_symbols(); | 
| 1513 |  | 
| 1514 |   // Dump supported java heap objects | 
| 1515 |   _closed_archive_heap_regions = NULL; | 
| 1516 |   _open_archive_heap_regions = NULL; | 
| 1517 |   dump_java_heap_objects(); | 
| 1518 |  | 
| 1519 |   ArchiveCompactor::relocate_well_known_klasses(); | 
| 1520 |  | 
| 1521 |   char* read_only_tables_start = dump_read_only_tables(); | 
| 1522 |   _ro_region.pack(&_md_region); | 
| 1523 |  | 
| 1524 |   char* vtbl_list = _md_region.top(); | 
| 1525 |   MetaspaceShared::allocate_cpp_vtable_clones(); | 
| 1526 |   _md_region.pack(); | 
| 1527 |  | 
| 1528 |   // The 4 core spaces are allocated consecutively mc->rw->ro->md, so there total size | 
| 1529 |   // is just the spaces between the two ends. | 
| 1530 |   size_t core_spaces_size = _md_region.end() - _mc_region.base(); | 
| 1531 |   assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()), | 
| 1532 |          "should already be aligned" ); | 
| 1533 |  | 
| 1534 |   // During patching, some virtual methods may be called, so at this point | 
| 1535 |   // the vtables must contain valid methods (as filled in by CppVtableCloner::allocate). | 
| 1536 |   MetaspaceShared::patch_cpp_vtable_pointers(); | 
| 1537 |  | 
| 1538 |   // The vtable clones contain addresses of the current process. | 
| 1539 |   // We don't want to write these addresses into the archive. | 
| 1540 |   MetaspaceShared::zero_cpp_vtable_clones_for_writing(); | 
| 1541 |  | 
| 1542 |   // Create and write the archive file that maps the shared spaces. | 
| 1543 |  | 
| 1544 |   FileMapInfo* mapinfo = new FileMapInfo(true); | 
| 1545 |   mapinfo->populate_header(os::vm_allocation_granularity()); | 
| 1546 |   mapinfo->set_read_only_tables_start(read_only_tables_start); | 
| 1547 |   mapinfo->set_misc_data_patching_start(vtbl_list); | 
| 1548 |   mapinfo->set_cds_i2i_entry_code_buffers(MetaspaceShared::cds_i2i_entry_code_buffers()); | 
| 1549 |   mapinfo->set_cds_i2i_entry_code_buffers_size(MetaspaceShared::cds_i2i_entry_code_buffers_size()); | 
| 1550 |   mapinfo->set_core_spaces_size(core_spaces_size); | 
| 1551 |  | 
| 1552 |   for (int pass=1; pass<=2; pass++) { | 
| 1553 |     bool print_archive_log = (pass==1); | 
| 1554 |     if (pass == 1) { | 
| 1555 |       // The first pass doesn't actually write the data to disk. All it | 
| 1556 |       // does is to update the fields in the mapinfo->_header. | 
| 1557 |     } else { | 
| 1558 |       // After the first pass, the contents of mapinfo->_header are finalized, | 
| 1559 |       // so we can compute the header's CRC, and write the contents of the header | 
| 1560 |       // and the regions into disk. | 
| 1561 |       mapinfo->open_for_write(); | 
| 1562 |       mapinfo->set_header_crc(mapinfo->compute_header_crc()); | 
| 1563 |     } | 
| 1564 |     mapinfo->write_header(); | 
| 1565 |  | 
| 1566 |     // NOTE: md contains the trampoline code for method entries, which are patched at run time, | 
| 1567 |     // so it needs to be read/write. | 
| 1568 |     write_region(mapinfo, MetaspaceShared::mc, &_mc_region, /*read_only=*/false,/*allow_exec=*/true); | 
| 1569 |     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false); | 
| 1570 |     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false); | 
| 1571 |     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false); | 
| 1572 |  | 
| 1573 |     _total_closed_archive_region_size = mapinfo->write_archive_heap_regions( | 
| 1574 |                                         _closed_archive_heap_regions, | 
| 1575 |                                         _closed_archive_heap_oopmaps, | 
| 1576 |                                         MetaspaceShared::first_closed_archive_heap_region, | 
| 1577 |                                         MetaspaceShared::max_closed_archive_heap_region, | 
| 1578 |                                         print_archive_log); | 
| 1579 |     _total_open_archive_region_size = mapinfo->write_archive_heap_regions( | 
| 1580 |                                         _open_archive_heap_regions, | 
| 1581 |                                         _open_archive_heap_oopmaps, | 
| 1582 |                                         MetaspaceShared::first_open_archive_heap_region, | 
| 1583 |                                         MetaspaceShared::max_open_archive_heap_region, | 
| 1584 |                                         print_archive_log); | 
| 1585 |   } | 
| 1586 |  | 
| 1587 |   mapinfo->close(); | 
| 1588 |  | 
| 1589 |   // Restore the vtable in case we invoke any virtual methods. | 
| 1590 |   MetaspaceShared::clone_cpp_vtables((intptr_t*)vtbl_list); | 
| 1591 |  | 
| 1592 |   print_region_stats(); | 
| 1593 |  | 
| 1594 |   if (log_is_enabled(Info, cds)) { | 
| 1595 |     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()), | 
| 1596 |                                                  int(_mc_region.used()), int(_md_region.used())); | 
| 1597 |   } | 
| 1598 |  | 
| 1599 |   if (PrintSystemDictionaryAtExit) { | 
| 1600 |     SystemDictionary::print(); | 
| 1601 |   } | 
| 1602 |  | 
| 1603 |   if (AllowArchivingWithJavaAgent) { | 
| 1604 |     warning("This archive was created with AllowArchivingWithJavaAgent. It should be used "  | 
| 1605 |             "for testing purposes only and should not be used in a production environment" ); | 
| 1606 |   } | 
| 1607 |  | 
| 1608 |   // There may be other pending VM operations that operate on the InstanceKlasses, | 
| 1609 |   // which will fail because InstanceKlasses::remove_unshareable_info() | 
| 1610 |   // has been called. Forget these operations and exit the VM directly. | 
| 1611 |   vm_direct_exit(0); | 
| 1612 | } | 
| 1613 |  | 
| 1614 | void VM_PopulateDumpSharedSpace::print_region_stats() { | 
| 1615 |   // Print statistics of all the regions | 
| 1616 |   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() + | 
| 1617 |                                 _mc_region.reserved()  + _md_region.reserved() + | 
| 1618 |                                 _total_closed_archive_region_size + | 
| 1619 |                                 _total_open_archive_region_size; | 
| 1620 |   const size_t total_bytes = _ro_region.used()  + _rw_region.used() + | 
| 1621 |                              _mc_region.used()  + _md_region.used() + | 
| 1622 |                              _total_closed_archive_region_size + | 
| 1623 |                              _total_open_archive_region_size; | 
| 1624 |   const double total_u_perc = percent_of(total_bytes, total_reserved); | 
| 1625 |  | 
| 1626 |   _mc_region.print(total_reserved); | 
| 1627 |   _rw_region.print(total_reserved); | 
| 1628 |   _ro_region.print(total_reserved); | 
| 1629 |   _md_region.print(total_reserved); | 
| 1630 |   print_heap_region_stats(_closed_archive_heap_regions, "ca" , total_reserved); | 
| 1631 |   print_heap_region_stats(_open_archive_heap_regions, "oa" , total_reserved); | 
| 1632 |  | 
| 1633 |   tty->print_cr("total    : "  SIZE_FORMAT_W(9) " [100.0%% of total] out of "  SIZE_FORMAT_W(9) " bytes [%5.1f%% used]" , | 
| 1634 |                  total_bytes, total_reserved, total_u_perc); | 
| 1635 | } | 
| 1636 |  | 
| 1637 | void VM_PopulateDumpSharedSpace::print_heap_region_stats(GrowableArray<MemRegion> *heap_mem, | 
| 1638 |                                                          const char *name, const size_t total_size) { | 
| 1639 |   int arr_len = heap_mem == NULL ? 0 : heap_mem->length(); | 
| 1640 |   for (int i = 0; i < arr_len; i++) { | 
| 1641 |       char* start = (char*)heap_mem->at(i).start(); | 
| 1642 |       size_t size = heap_mem->at(i).byte_size(); | 
| 1643 |       char* top = start + size; | 
| 1644 |       tty->print_cr("%s%d space: "  SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of "  SIZE_FORMAT_W(9) " bytes [100.0%% used] at "  INTPTR_FORMAT, | 
| 1645 |                     name, i, size, size/double(total_size)*100.0, size, p2i(start)); | 
| 1646 |  | 
| 1647 |   } | 
| 1648 | } | 
| 1649 |  | 
| 1650 | // Update a Java object to point its Klass* to the new location after | 
| 1651 | // shared archive has been compacted. | 
| 1652 | void MetaspaceShared::relocate_klass_ptr(oop o) { | 
| 1653 |   assert(DumpSharedSpaces, "sanity" ); | 
| 1654 |   Klass* k = ArchiveCompactor::get_relocated_klass(o->klass()); | 
| 1655 |   o->set_klass(k); | 
| 1656 | } | 
| 1657 |  | 
| 1658 | Klass* MetaspaceShared::get_relocated_klass(Klass *k) { | 
| 1659 |   assert(DumpSharedSpaces, "sanity" ); | 
| 1660 |   return ArchiveCompactor::get_relocated_klass(k); | 
| 1661 | } | 
| 1662 |  | 
| 1663 | class LinkSharedClassesClosure : public KlassClosure { | 
| 1664 |   Thread* THREAD; | 
| 1665 |   bool    _made_progress; | 
| 1666 |  public: | 
| 1667 |   LinkSharedClassesClosure(Thread* thread) : THREAD(thread), _made_progress(false) {} | 
| 1668 |  | 
| 1669 |   void reset()               { _made_progress = false; } | 
| 1670 |   bool made_progress() const { return _made_progress; } | 
| 1671 |  | 
| 1672 |   void do_klass(Klass* k) { | 
| 1673 |     if (k->is_instance_klass()) { | 
| 1674 |       InstanceKlass* ik = InstanceKlass::cast(k); | 
| 1675 |       // Link the class to cause the bytecodes to be rewritten and the | 
| 1676 |       // cpcache to be created. Class verification is done according | 
| 1677 |       // to -Xverify setting. | 
| 1678 |       _made_progress |= MetaspaceShared::try_link_class(ik, THREAD); | 
| 1679 |       guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class" ); | 
| 1680 |  | 
| 1681 |       ik->constants()->resolve_class_constants(THREAD); | 
| 1682 |     } | 
| 1683 |   } | 
| 1684 | }; | 
| 1685 |  | 
| 1686 | class CheckSharedClassesClosure : public KlassClosure { | 
| 1687 |   bool    _made_progress; | 
| 1688 |  public: | 
| 1689 |   CheckSharedClassesClosure() : _made_progress(false) {} | 
| 1690 |  | 
| 1691 |   void reset()               { _made_progress = false; } | 
| 1692 |   bool made_progress() const { return _made_progress; } | 
| 1693 |   void do_klass(Klass* k) { | 
| 1694 |     if (k->is_instance_klass() && InstanceKlass::cast(k)->check_sharing_error_state()) { | 
| 1695 |       _made_progress = true; | 
| 1696 |     } | 
| 1697 |   } | 
| 1698 | }; | 
| 1699 |  | 
| 1700 | void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) { | 
| 1701 |   // We need to iterate because verification may cause additional classes | 
| 1702 |   // to be loaded. | 
| 1703 |   LinkSharedClassesClosure link_closure(THREAD); | 
| 1704 |   do { | 
| 1705 |     link_closure.reset(); | 
| 1706 |     ClassLoaderDataGraph::unlocked_loaded_classes_do(&link_closure); | 
| 1707 |     guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class" ); | 
| 1708 |   } while (link_closure.made_progress()); | 
| 1709 |  | 
| 1710 |   if (_has_error_classes) { | 
| 1711 |     // Mark all classes whose super class or interfaces failed verification. | 
| 1712 |     CheckSharedClassesClosure check_closure; | 
| 1713 |     do { | 
| 1714 |       // Not completely sure if we need to do this iteratively. Anyway, | 
| 1715 |       // we should come here only if there are unverifiable classes, which | 
| 1716 |       // shouldn't happen in normal cases. So better safe than sorry. | 
| 1717 |       check_closure.reset(); | 
| 1718 |       ClassLoaderDataGraph::unlocked_loaded_classes_do(&check_closure); | 
| 1719 |     } while (check_closure.made_progress()); | 
| 1720 |   } | 
| 1721 | } | 
| 1722 |  | 
| 1723 | void MetaspaceShared::prepare_for_dumping() { | 
| 1724 |   Arguments::check_unsupported_dumping_properties(); | 
| 1725 |   ClassLoader::initialize_shared_path(); | 
| 1726 | } | 
| 1727 |  | 
| 1728 | // Preload classes from a list, populate the shared spaces and dump to a | 
| 1729 | // file. | 
| 1730 | void MetaspaceShared::preload_and_dump(TRAPS) { | 
| 1731 |   { TraceTime timer("Dump Shared Spaces" , TRACETIME_LOG(Info, startuptime)); | 
| 1732 |     ResourceMark rm; | 
| 1733 |     char class_list_path_str[JVM_MAXPATHLEN]; | 
| 1734 |     // Preload classes to be shared. | 
| 1735 |     const char* class_list_path; | 
| 1736 |     if (SharedClassListFile == NULL) { | 
| 1737 |       // Construct the path to the class list (in jre/lib) | 
| 1738 |       // Walk up two directories from the location of the VM and | 
| 1739 |       // optionally tack on "lib" (depending on platform) | 
| 1740 |       os::jvm_path(class_list_path_str, sizeof(class_list_path_str)); | 
| 1741 |       for (int i = 0; i < 3; i++) { | 
| 1742 |         char *end = strrchr(class_list_path_str, *os::file_separator()); | 
| 1743 |         if (end != NULL) *end = '\0'; | 
| 1744 |       } | 
| 1745 |       int class_list_path_len = (int)strlen(class_list_path_str); | 
| 1746 |       if (class_list_path_len >= 3) { | 
| 1747 |         if (strcmp(class_list_path_str + class_list_path_len - 3, "lib" ) != 0) { | 
| 1748 |           if (class_list_path_len < JVM_MAXPATHLEN - 4) { | 
| 1749 |             jio_snprintf(class_list_path_str + class_list_path_len, | 
| 1750 |                          sizeof(class_list_path_str) - class_list_path_len, | 
| 1751 |                          "%slib" , os::file_separator()); | 
| 1752 |             class_list_path_len += 4; | 
| 1753 |           } | 
| 1754 |         } | 
| 1755 |       } | 
| 1756 |       if (class_list_path_len < JVM_MAXPATHLEN - 10) { | 
| 1757 |         jio_snprintf(class_list_path_str + class_list_path_len, | 
| 1758 |                      sizeof(class_list_path_str) - class_list_path_len, | 
| 1759 |                      "%sclasslist" , os::file_separator()); | 
| 1760 |       } | 
| 1761 |       class_list_path = class_list_path_str; | 
| 1762 |     } else { | 
| 1763 |       class_list_path = SharedClassListFile; | 
| 1764 |     } | 
| 1765 |  | 
| 1766 |     tty->print_cr("Loading classes to share ..." ); | 
| 1767 |     _has_error_classes = false; | 
| 1768 |     int class_count = preload_classes(class_list_path, THREAD); | 
| 1769 |     if (ExtraSharedClassListFile) { | 
| 1770 |       class_count += preload_classes(ExtraSharedClassListFile, THREAD); | 
| 1771 |     } | 
| 1772 |     tty->print_cr("Loading classes to share: done." ); | 
| 1773 |  | 
| 1774 |     log_info(cds)("Shared spaces: preloaded %d classes" , class_count); | 
| 1775 |  | 
| 1776 |     if (SharedArchiveConfigFile) { | 
| 1777 |       tty->print_cr("Reading extra data from %s ..." , SharedArchiveConfigFile); | 
| 1778 |       read_extra_data(SharedArchiveConfigFile, THREAD); | 
| 1779 |     } | 
| 1780 |     tty->print_cr("Reading extra data: done." ); | 
| 1781 |  | 
| 1782 |     HeapShared::init_subgraph_entry_fields(THREAD); | 
| 1783 |  | 
| 1784 |     // Rewrite and link classes | 
| 1785 |     tty->print_cr("Rewriting and linking classes ..." ); | 
| 1786 |  | 
| 1787 |     // Link any classes which got missed. This would happen if we have loaded classes that | 
| 1788 |     // were not explicitly specified in the classlist. E.g., if an interface implemented by class K | 
| 1789 |     // fails verification, all other interfaces that were not specified in the classlist but | 
| 1790 |     // are implemented by K are not verified. | 
| 1791 |     link_and_cleanup_shared_classes(CATCH); | 
| 1792 |     tty->print_cr("Rewriting and linking classes: done" ); | 
| 1793 |  | 
| 1794 |     if (HeapShared::is_heap_object_archiving_allowed()) { | 
| 1795 |       // Avoid fragmentation while archiving heap objects. | 
| 1796 |       Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(true); | 
| 1797 |       Universe::heap()->collect(GCCause::_archive_time_gc); | 
| 1798 |       Universe::heap()->soft_ref_policy()->set_should_clear_all_soft_refs(false); | 
| 1799 |     } | 
| 1800 |  | 
| 1801 |     VM_PopulateDumpSharedSpace op; | 
| 1802 |     VMThread::execute(&op); | 
| 1803 |   } | 
| 1804 | } | 
| 1805 |  | 
| 1806 |  | 
| 1807 | int MetaspaceShared::preload_classes(const char* class_list_path, TRAPS) { | 
| 1808 |   ClassListParser parser(class_list_path); | 
| 1809 |   int class_count = 0; | 
| 1810 |  | 
| 1811 |   while (parser.parse_one_line()) { | 
| 1812 |     Klass* klass = parser.load_current_class(THREAD); | 
| 1813 |     if (HAS_PENDING_EXCEPTION) { | 
| 1814 |       if (klass == NULL && | 
| 1815 |           (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) { | 
| 1816 |         // print a warning only when the pending exception is class not found | 
| 1817 |         tty->print_cr("Preload Warning: Cannot find %s" , parser.current_class_name()); | 
| 1818 |       } | 
| 1819 |       CLEAR_PENDING_EXCEPTION; | 
| 1820 |     } | 
| 1821 |     if (klass != NULL) { | 
| 1822 |       if (log_is_enabled(Trace, cds)) { | 
| 1823 |         ResourceMark rm; | 
| 1824 |         log_trace(cds)("Shared spaces preloaded: %s" , klass->external_name()); | 
| 1825 |       } | 
| 1826 |  | 
| 1827 |       if (klass->is_instance_klass()) { | 
| 1828 |         InstanceKlass* ik = InstanceKlass::cast(klass); | 
| 1829 |  | 
| 1830 |         // Link the class to cause the bytecodes to be rewritten and the | 
| 1831 |         // cpcache to be created. The linking is done as soon as classes | 
| 1832 |         // are loaded in order that the related data structures (klass and | 
| 1833 |         // cpCache) are located together. | 
| 1834 |         try_link_class(ik, THREAD); | 
| 1835 |         guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class" ); | 
| 1836 |       } | 
| 1837 |  | 
| 1838 |       class_count++; | 
| 1839 |     } | 
| 1840 |   } | 
| 1841 |  | 
| 1842 |   return class_count; | 
| 1843 | } | 
| 1844 |  | 
| 1845 | // Returns true if the class's status has changed | 
| 1846 | bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) { | 
| 1847 |   assert(DumpSharedSpaces, "should only be called during dumping" ); | 
| 1848 |   if (ik->init_state() < InstanceKlass::linked) { | 
| 1849 |     bool saved = BytecodeVerificationLocal; | 
| 1850 |     if (ik->loader_type() == 0 && ik->class_loader() == NULL) { | 
| 1851 |       // The verification decision is based on BytecodeVerificationRemote | 
| 1852 |       // for non-system classes. Since we are using the NULL classloader | 
| 1853 |       // to load non-system classes for customized class loaders during dumping, | 
| 1854 |       // we need to temporarily change BytecodeVerificationLocal to be the same as | 
| 1855 |       // BytecodeVerificationRemote. Note this can cause the parent system | 
| 1856 |       // classes also being verified. The extra overhead is acceptable during | 
| 1857 |       // dumping. | 
| 1858 |       BytecodeVerificationLocal = BytecodeVerificationRemote; | 
| 1859 |     } | 
| 1860 |     ik->link_class(THREAD); | 
| 1861 |     if (HAS_PENDING_EXCEPTION) { | 
| 1862 |       ResourceMark rm; | 
| 1863 |       tty->print_cr("Preload Warning: Verification failed for %s" , | 
| 1864 |                     ik->external_name()); | 
| 1865 |       CLEAR_PENDING_EXCEPTION; | 
| 1866 |       ik->set_in_error_state(); | 
| 1867 |       _has_error_classes = true; | 
| 1868 |     } | 
| 1869 |     BytecodeVerificationLocal = saved; | 
| 1870 |     return true; | 
| 1871 |   } else { | 
| 1872 |     return false; | 
| 1873 |   } | 
| 1874 | } | 
| 1875 |  | 
| 1876 | #if INCLUDE_CDS_JAVA_HEAP | 
| 1877 | void VM_PopulateDumpSharedSpace::dump_java_heap_objects() { | 
| 1878 |   // The closed and open archive heap space has maximum two regions. | 
| 1879 |   // See FileMapInfo::write_archive_heap_regions() for details. | 
| 1880 |   _closed_archive_heap_regions = new GrowableArray<MemRegion>(2); | 
| 1881 |   _open_archive_heap_regions = new GrowableArray<MemRegion>(2); | 
| 1882 |   HeapShared::archive_java_heap_objects(_closed_archive_heap_regions, | 
| 1883 |                                         _open_archive_heap_regions); | 
| 1884 |   ArchiveCompactor::OtherROAllocMark mark; | 
| 1885 |   HeapShared::write_subgraph_info_table(); | 
| 1886 | } | 
| 1887 |  | 
| 1888 | void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps() { | 
| 1889 |   if (HeapShared::is_heap_object_archiving_allowed()) { | 
| 1890 |     _closed_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); | 
| 1891 |     dump_archive_heap_oopmaps(_closed_archive_heap_regions, _closed_archive_heap_oopmaps); | 
| 1892 |  | 
| 1893 |     _open_archive_heap_oopmaps = new GrowableArray<ArchiveHeapOopmapInfo>(2); | 
| 1894 |     dump_archive_heap_oopmaps(_open_archive_heap_regions, _open_archive_heap_oopmaps); | 
| 1895 |   } | 
| 1896 | } | 
| 1897 |  | 
| 1898 | void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegion>* regions, | 
| 1899 |                                                            GrowableArray<ArchiveHeapOopmapInfo>* oopmaps) { | 
| 1900 |   for (int i=0; i<regions->length(); i++) { | 
| 1901 |     ResourceBitMap oopmap = HeapShared::calculate_oopmap(regions->at(i)); | 
| 1902 |     size_t size_in_bits = oopmap.size(); | 
| 1903 |     size_t size_in_bytes = oopmap.size_in_bytes(); | 
| 1904 |     uintptr_t* buffer = (uintptr_t*)_ro_region.allocate(size_in_bytes, sizeof(intptr_t)); | 
| 1905 |     oopmap.write_to(buffer, size_in_bytes); | 
| 1906 |     log_info(cds)("Oopmap = "  INTPTR_FORMAT " ("  SIZE_FORMAT_W(6) " bytes) for heap region "  | 
| 1907 |                   INTPTR_FORMAT " ("  SIZE_FORMAT_W(8) " bytes)" , | 
| 1908 |                   p2i(buffer), size_in_bytes, | 
| 1909 |                   p2i(regions->at(i).start()), regions->at(i).byte_size()); | 
| 1910 |  | 
| 1911 |     ArchiveHeapOopmapInfo info; | 
| 1912 |     info._oopmap = (address)buffer; | 
| 1913 |     info._oopmap_size_in_bits = size_in_bits; | 
| 1914 |     oopmaps->append(info); | 
| 1915 |   } | 
| 1916 | } | 
| 1917 | #endif // INCLUDE_CDS_JAVA_HEAP | 
| 1918 |  | 
| 1919 | void ReadClosure::do_ptr(void** p) { | 
| 1920 |   assert(*p == NULL, "initializing previous initialized pointer." ); | 
| 1921 |   intptr_t obj = nextPtr(); | 
| 1922 |   assert((intptr_t)obj >= 0 || (intptr_t)obj < -100, | 
| 1923 |          "hit tag while initializing ptrs." ); | 
| 1924 |   *p = (void*)obj; | 
| 1925 | } | 
| 1926 |  | 
| 1927 | void ReadClosure::do_u4(u4* p) { | 
| 1928 |   intptr_t obj = nextPtr(); | 
| 1929 |   *p = (u4)(uintx(obj)); | 
| 1930 | } | 
| 1931 |  | 
| 1932 | void ReadClosure::do_bool(bool* p) { | 
| 1933 |   intptr_t obj = nextPtr(); | 
| 1934 |   *p = (bool)(uintx(obj)); | 
| 1935 | } | 
| 1936 |  | 
| 1937 | void ReadClosure::do_tag(int tag) { | 
| 1938 |   int old_tag; | 
| 1939 |   old_tag = (int)(intptr_t)nextPtr(); | 
| 1940 |   // do_int(&old_tag); | 
| 1941 |   assert(tag == old_tag, "old tag doesn't match" ); | 
| 1942 |   FileMapInfo::assert_mark(tag == old_tag); | 
| 1943 | } | 
| 1944 |  | 
| 1945 | void ReadClosure::do_oop(oop *p) { | 
| 1946 |   narrowOop o = (narrowOop)nextPtr(); | 
| 1947 |   if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) { | 
| 1948 |     p = NULL; | 
| 1949 |   } else { | 
| 1950 |     assert(HeapShared::is_heap_object_archiving_allowed(), | 
| 1951 |            "Archived heap object is not allowed" ); | 
| 1952 |     assert(HeapShared::open_archive_heap_region_mapped(), | 
| 1953 |            "Open archive heap region is not mapped" ); | 
| 1954 |     *p = HeapShared::decode_from_archive(o); | 
| 1955 |   } | 
| 1956 | } | 
| 1957 |  | 
| 1958 | void ReadClosure::do_region(u_char* start, size_t size) { | 
| 1959 |   assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment" ); | 
| 1960 |   assert(size % sizeof(intptr_t) == 0, "bad size" ); | 
| 1961 |   do_tag((int)size); | 
| 1962 |   while (size > 0) { | 
| 1963 |     *(intptr_t*)start = nextPtr(); | 
| 1964 |     start += sizeof(intptr_t); | 
| 1965 |     size -= sizeof(intptr_t); | 
| 1966 |   } | 
| 1967 | } | 
| 1968 |  | 
| 1969 | void MetaspaceShared::set_shared_metaspace_range(void* base, void* top) { | 
| 1970 |   _shared_metaspace_static_top = top; | 
| 1971 |   MetaspaceObj::set_shared_metaspace_range(base, top); | 
| 1972 | } | 
| 1973 |  | 
| 1974 | // Return true if given address is in the misc data region | 
| 1975 | bool MetaspaceShared::is_in_shared_region(const void* p, int idx) { | 
| 1976 |   return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_region(p, idx); | 
| 1977 | } | 
| 1978 |  | 
| 1979 | bool MetaspaceShared::is_in_trampoline_frame(address addr) { | 
| 1980 |   if (UseSharedSpaces && is_in_shared_region(addr, MetaspaceShared::mc)) { | 
| 1981 |     return true; | 
| 1982 |   } | 
| 1983 |   return false; | 
| 1984 | } | 
| 1985 |  | 
| 1986 | bool MetaspaceShared::is_shared_dynamic(void* p) { | 
| 1987 |   if ((p < MetaspaceObj::shared_metaspace_top()) && | 
| 1988 |       (p >= _shared_metaspace_static_top)) { | 
| 1989 |     return true; | 
| 1990 |   } else { | 
| 1991 |     return false; | 
| 1992 |   } | 
| 1993 | } | 
| 1994 |  | 
| 1995 | // Map shared spaces at requested addresses and return if succeeded. | 
| 1996 | bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) { | 
| 1997 |   size_t image_alignment = mapinfo->alignment(); | 
| 1998 |  | 
| 1999 | #ifndef _WINDOWS | 
| 2000 |   // Map in the shared memory and then map the regions on top of it. | 
| 2001 |   // On Windows, don't map the memory here because it will cause the | 
| 2002 |   // mappings of the regions to fail. | 
| 2003 |   ReservedSpace shared_rs = mapinfo->reserve_shared_memory(); | 
| 2004 |   if (!shared_rs.is_reserved()) return false; | 
| 2005 | #endif | 
| 2006 |  | 
| 2007 |   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces" ); | 
| 2008 |  | 
| 2009 |   // Map each shared region | 
| 2010 |   int regions[] = {mc, rw, ro, md}; | 
| 2011 |   size_t len = sizeof(regions)/sizeof(int); | 
| 2012 |   char* saved_base[] = {NULL, NULL, NULL, NULL}; | 
| 2013 |   char* top = mapinfo->map_regions(regions, saved_base, len ); | 
| 2014 |  | 
| 2015 |   if (top != NULL && | 
| 2016 |       (image_alignment == (size_t)os::vm_allocation_granularity()) && | 
| 2017 |       mapinfo->validate_shared_path_table()) { | 
| 2018 |     // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for | 
| 2019 |     // fast checking in MetaspaceShared::is_in_shared_metaspace() and | 
| 2020 |     // MetaspaceObj::is_shared(). | 
| 2021 |     _core_spaces_size = mapinfo->core_spaces_size(); | 
| 2022 |     set_shared_metaspace_range((void*)saved_base[0], (void*)top); | 
| 2023 |     return true; | 
| 2024 |   } else { | 
| 2025 |     mapinfo->unmap_regions(regions, saved_base, len); | 
| 2026 | #ifndef _WINDOWS | 
| 2027 |     // Release the entire mapped region | 
| 2028 |     shared_rs.release(); | 
| 2029 | #endif | 
| 2030 |     // If -Xshare:on is specified, print out the error message and exit VM, | 
| 2031 |     // otherwise, set UseSharedSpaces to false and continue. | 
| 2032 |     if (RequireSharedSpaces || PrintSharedArchiveAndExit) { | 
| 2033 |       vm_exit_during_initialization("Unable to use shared archive." , "Failed map_region for using -Xshare:on." ); | 
| 2034 |     } else { | 
| 2035 |       FLAG_SET_DEFAULT(UseSharedSpaces, false); | 
| 2036 |     } | 
| 2037 |     return false; | 
| 2038 |   } | 
| 2039 | } | 
| 2040 |  | 
| 2041 | // Read the miscellaneous data from the shared file, and | 
| 2042 | // serialize it out to its various destinations. | 
| 2043 |  | 
| 2044 | void MetaspaceShared::initialize_shared_spaces() { | 
| 2045 |   FileMapInfo *mapinfo = FileMapInfo::current_info(); | 
| 2046 |   _cds_i2i_entry_code_buffers = mapinfo->cds_i2i_entry_code_buffers(); | 
| 2047 |   _cds_i2i_entry_code_buffers_size = mapinfo->cds_i2i_entry_code_buffers_size(); | 
| 2048 |   // _core_spaces_size is loaded from the shared archive immediatelly after mapping | 
| 2049 |   assert(_core_spaces_size == mapinfo->core_spaces_size(), "sanity" ); | 
| 2050 |   char* buffer = mapinfo->misc_data_patching_start(); | 
| 2051 |   clone_cpp_vtables((intptr_t*)buffer); | 
| 2052 |  | 
| 2053 |   // The rest of the data is now stored in the RW region | 
| 2054 |   buffer = mapinfo->read_only_tables_start(); | 
| 2055 |  | 
| 2056 |   // Skip over _cloned_cpp_vtptrs; | 
| 2057 |   buffer += _num_cloned_vtable_kinds * sizeof(intptr_t*); | 
| 2058 |  | 
| 2059 |   // Verify various attributes of the archive, plus initialize the | 
| 2060 |   // shared string/symbol tables | 
| 2061 |   intptr_t* array = (intptr_t*)buffer; | 
| 2062 |   ReadClosure rc(&array); | 
| 2063 |   serialize(&rc); | 
| 2064 |  | 
| 2065 |   // Initialize the run-time symbol table. | 
| 2066 |   SymbolTable::create_table(); | 
| 2067 |  | 
| 2068 |   mapinfo->patch_archived_heap_embedded_pointers(); | 
| 2069 |  | 
| 2070 |   // Close the mapinfo file | 
| 2071 |   mapinfo->close(); | 
| 2072 |  | 
| 2073 |   if (PrintSharedArchiveAndExit) { | 
| 2074 |     if (PrintSharedDictionary) { | 
| 2075 |       tty->print_cr("\nShared classes:\n" ); | 
| 2076 |       SystemDictionaryShared::print_on(tty); | 
| 2077 |     } | 
| 2078 |     if (_archive_loading_failed) { | 
| 2079 |       tty->print_cr("archive is invalid" ); | 
| 2080 |       vm_exit(1); | 
| 2081 |     } else { | 
| 2082 |       tty->print_cr("archive is valid" ); | 
| 2083 |       vm_exit(0); | 
| 2084 |     } | 
| 2085 |   } | 
| 2086 | } | 
| 2087 |  | 
| 2088 | // JVM/TI RedefineClasses() support: | 
| 2089 | bool MetaspaceShared::remap_shared_readonly_as_readwrite() { | 
| 2090 |   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint" ); | 
| 2091 |  | 
| 2092 |   if (UseSharedSpaces) { | 
| 2093 |     // remap the shared readonly space to shared readwrite, private | 
| 2094 |     FileMapInfo* mapinfo = FileMapInfo::current_info(); | 
| 2095 |     if (!mapinfo->remap_shared_readonly_as_readwrite()) { | 
| 2096 |       return false; | 
| 2097 |     } | 
| 2098 |     if (FileMapInfo::dynamic_info() != NULL) { | 
| 2099 |       mapinfo = FileMapInfo::dynamic_info(); | 
| 2100 |       if (!mapinfo->remap_shared_readonly_as_readwrite()) { | 
| 2101 |         return false; | 
| 2102 |       } | 
| 2103 |     } | 
| 2104 |     _remapped_readwrite = true; | 
| 2105 |   } | 
| 2106 |   return true; | 
| 2107 | } | 
| 2108 |  | 
| 2109 | void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes) { | 
| 2110 |   // This is highly unlikely to happen on 64-bits because we have reserved a 4GB space. | 
| 2111 |   // On 32-bit we reserve only 256MB so you could run out of space with 100,000 classes | 
| 2112 |   // or so. | 
| 2113 |   _mc_region.print_out_of_space_msg(name, needed_bytes); | 
| 2114 |   _rw_region.print_out_of_space_msg(name, needed_bytes); | 
| 2115 |   _ro_region.print_out_of_space_msg(name, needed_bytes); | 
| 2116 |   _md_region.print_out_of_space_msg(name, needed_bytes); | 
| 2117 |  | 
| 2118 |   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region" , name), | 
| 2119 |                                 "Please reduce the number of shared classes." ); | 
| 2120 | } | 
| 2121 |  |