| 1 | /* |
| 2 | * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "jvm.h" |
| 27 | #include "classfile/classLoaderData.inline.hpp" |
| 28 | #include "classfile/symbolTable.hpp" |
| 29 | #include "classfile/systemDictionary.hpp" |
| 30 | #include "classfile/systemDictionaryShared.hpp" |
| 31 | #include "logging/log.hpp" |
| 32 | #include "memory/metadataFactory.hpp" |
| 33 | #include "memory/metaspace.hpp" |
| 34 | #include "memory/metaspaceClosure.hpp" |
| 35 | #include "memory/metaspaceShared.hpp" |
| 36 | #include "memory/resourceArea.hpp" |
| 37 | #include "memory/dynamicArchive.hpp" |
| 38 | #include "oops/compressedOops.hpp" |
| 39 | #include "oops/objArrayKlass.hpp" |
| 40 | #include "prims/jvmtiRedefineClasses.hpp" |
| 41 | #include "runtime/handles.inline.hpp" |
| 42 | #include "runtime/os.inline.hpp" |
| 43 | #include "runtime/sharedRuntime.hpp" |
| 44 | #include "runtime/vmThread.hpp" |
| 45 | #include "runtime/vmOperations.hpp" |
| 46 | #include "utilities/bitMap.inline.hpp" |
| 47 | |
| 48 | #ifndef O_BINARY // if defined (Win32) use binary files. |
| 49 | #define O_BINARY 0 // otherwise do nothing. |
| 50 | #endif |
| 51 | |
| 52 | class DynamicArchiveBuilder : ResourceObj { |
| 53 | CHeapBitMap _ptrmap; |
| 54 | static unsigned my_hash(const address& a) { |
| 55 | return primitive_hash<address>(a); |
| 56 | } |
| 57 | static bool my_equals(const address& a0, const address& a1) { |
| 58 | return primitive_equals<address>(a0, a1); |
| 59 | } |
| 60 | typedef ResourceHashtable< |
| 61 | address, address, |
| 62 | DynamicArchiveBuilder::my_hash, // solaris compiler doesn't like: primitive_hash<address> |
| 63 | DynamicArchiveBuilder::my_equals, // solaris compiler doesn't like: primitive_equals<address> |
| 64 | 16384, ResourceObj::C_HEAP> RelocationTable; |
| 65 | RelocationTable _new_loc_table; |
| 66 | |
| 67 | intx _buffer_to_target_delta; |
| 68 | |
| 69 | DumpRegion* _current_dump_space; |
| 70 | |
| 71 | static size_t reserve_alignment() { |
| 72 | return Metaspace::reserve_alignment(); |
| 73 | } |
| 74 | |
| 75 | static const int _total_dump_regions = 3; |
| 76 | int _num_dump_regions_used; |
| 77 | |
| 78 | public: |
| 79 | void mark_pointer(address* ptr_loc) { |
| 80 | if (is_in_buffer_space(ptr_loc)) { |
| 81 | size_t idx = pointer_delta(ptr_loc, _alloc_bottom, sizeof(address)); |
| 82 | _ptrmap.set_bit(idx); |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | DumpRegion* current_dump_space() const { |
| 87 | return _current_dump_space; |
| 88 | } |
| 89 | |
| 90 | bool is_in_buffer_space(address p) const { |
| 91 | return (_alloc_bottom <= p && p < (address)current_dump_space()->top()); |
| 92 | } |
| 93 | |
| 94 | template <typename T> bool is_in_target_space(T target_obj) const { |
| 95 | address buff_obj = address(target_obj) - _buffer_to_target_delta; |
| 96 | return is_in_buffer_space(buff_obj); |
| 97 | } |
| 98 | |
| 99 | template <typename T> bool is_in_buffer_space(T obj) const { |
| 100 | return is_in_buffer_space(address(obj)); |
| 101 | } |
| 102 | |
| 103 | template <typename T> T to_target_no_check(T obj) const { |
| 104 | return (T)(address(obj) + _buffer_to_target_delta); |
| 105 | } |
| 106 | |
| 107 | template <typename T> T to_target(T obj) const { |
| 108 | assert(is_in_buffer_space(obj), "must be" ); |
| 109 | return (T)(address(obj) + _buffer_to_target_delta); |
| 110 | } |
| 111 | |
| 112 | template <typename T> T get_new_loc(T obj) { |
| 113 | address* pp = _new_loc_table.get((address)obj); |
| 114 | if (pp == NULL) { |
| 115 | // Excluded klasses are not copied |
| 116 | return NULL; |
| 117 | } else { |
| 118 | return (T)*pp; |
| 119 | } |
| 120 | } |
| 121 | |
| 122 | address get_new_loc(MetaspaceClosure::Ref* ref) { |
| 123 | return get_new_loc(ref->obj()); |
| 124 | } |
| 125 | |
| 126 | template <typename T> bool has_new_loc(T obj) { |
| 127 | address* pp = _new_loc_table.get((address)obj); |
| 128 | return pp != NULL; |
| 129 | } |
| 130 | |
| 131 | protected: |
| 132 | enum FollowMode { |
| 133 | make_a_copy, point_to_it, set_to_null |
| 134 | }; |
| 135 | |
| 136 | public: |
| 137 | void copy(MetaspaceClosure::Ref* ref, bool read_only) { |
| 138 | int bytes = ref->size() * BytesPerWord; |
| 139 | address old_obj = ref->obj(); |
| 140 | address new_obj = copy_impl(ref, read_only, bytes); |
| 141 | |
| 142 | assert(new_obj != NULL, "must be" ); |
| 143 | assert(new_obj != old_obj, "must be" ); |
| 144 | bool isnew = _new_loc_table.put(old_obj, new_obj); |
| 145 | assert(isnew, "must be" ); |
| 146 | } |
| 147 | |
| 148 | // Make a shallow copy of each eligible MetaspaceObj into the buffer. |
| 149 | class ShallowCopier: public UniqueMetaspaceClosure { |
| 150 | DynamicArchiveBuilder* _builder; |
| 151 | bool _read_only; |
| 152 | public: |
| 153 | ShallowCopier(DynamicArchiveBuilder* shuffler, bool read_only) |
| 154 | : _builder(shuffler), _read_only(read_only) {} |
| 155 | |
| 156 | virtual bool do_unique_ref(Ref* orig_obj, bool read_only) { |
| 157 | // This method gets called on each *original* object |
| 158 | // reachable from _builder->iterate_roots(). Each orig_obj is |
| 159 | // called exactly once. |
| 160 | FollowMode mode = _builder->follow_ref(orig_obj); |
| 161 | |
| 162 | if (mode == point_to_it) { |
| 163 | if (read_only == _read_only) { |
| 164 | log_debug(cds, dynamic)("ptr : " PTR_FORMAT " %s" , p2i(orig_obj->obj()), |
| 165 | MetaspaceObj::type_name(orig_obj->msotype())); |
| 166 | address p = orig_obj->obj(); |
| 167 | bool isnew = _builder->_new_loc_table.put(p, p); |
| 168 | assert(isnew, "must be" ); |
| 169 | } |
| 170 | return false; |
| 171 | } |
| 172 | |
| 173 | if (mode == set_to_null) { |
| 174 | log_debug(cds, dynamic)("nul : " PTR_FORMAT " %s" , p2i(orig_obj->obj()), |
| 175 | MetaspaceObj::type_name(orig_obj->msotype())); |
| 176 | return false; |
| 177 | } |
| 178 | |
| 179 | if (read_only == _read_only) { |
| 180 | // Make a shallow copy of orig_obj in a buffer (maintained |
| 181 | // by copy_impl in a subclass of DynamicArchiveBuilder). |
| 182 | _builder->copy(orig_obj, read_only); |
| 183 | } |
| 184 | return true; |
| 185 | } |
| 186 | }; |
| 187 | |
| 188 | // Relocate all embedded pointer fields within a MetaspaceObj's shallow copy |
| 189 | class ShallowCopyEmbeddedRefRelocator: public UniqueMetaspaceClosure { |
| 190 | DynamicArchiveBuilder* _builder; |
| 191 | public: |
| 192 | ShallowCopyEmbeddedRefRelocator(DynamicArchiveBuilder* shuffler) |
| 193 | : _builder(shuffler) {} |
| 194 | |
| 195 | // This method gets called on each *original* object reachable |
| 196 | // from _builder->iterate_roots(). Each orig_obj is |
| 197 | // called exactly once. |
| 198 | virtual bool do_unique_ref(Ref* orig_ref, bool read_only) { |
| 199 | FollowMode mode = _builder->follow_ref(orig_ref); |
| 200 | |
| 201 | if (mode == point_to_it) { |
| 202 | // We did not make a copy of this object |
| 203 | // and we have nothing to update |
| 204 | assert(_builder->get_new_loc(orig_ref) == NULL || |
| 205 | _builder->get_new_loc(orig_ref) == orig_ref->obj(), "must be" ); |
| 206 | return false; |
| 207 | } |
| 208 | |
| 209 | if (mode == set_to_null) { |
| 210 | // We did not make a copy of this object |
| 211 | // and we have nothing to update |
| 212 | assert(!_builder->has_new_loc(orig_ref->obj()), "must not be copied or pointed to" ); |
| 213 | return false; |
| 214 | } |
| 215 | |
| 216 | // - orig_obj points to the original object. |
| 217 | // - new_obj points to the shallow copy (created by ShallowCopier) |
| 218 | // of orig_obj. new_obj is NULL if the orig_obj is excluded |
| 219 | address orig_obj = orig_ref->obj(); |
| 220 | address new_obj = _builder->get_new_loc(orig_ref); |
| 221 | |
| 222 | assert(new_obj != orig_obj, "must be" ); |
| 223 | #ifdef ASSERT |
| 224 | if (new_obj == NULL) { |
| 225 | if (orig_ref->msotype() == MetaspaceObj::ClassType) { |
| 226 | Klass* k = (Klass*)orig_obj; |
| 227 | assert(k->is_instance_klass() && |
| 228 | SystemDictionaryShared::is_excluded_class(InstanceKlass::cast(k)), |
| 229 | "orig_obj must be excluded Class" ); |
| 230 | } |
| 231 | } |
| 232 | #endif |
| 233 | |
| 234 | log_debug(cds, dynamic)("Relocating " PTR_FORMAT " %s" , p2i(new_obj), |
| 235 | MetaspaceObj::type_name(orig_ref->msotype())); |
| 236 | if (new_obj != NULL) { |
| 237 | EmbeddedRefUpdater updater(_builder, orig_obj, new_obj); |
| 238 | orig_ref->metaspace_pointers_do(&updater); |
| 239 | } |
| 240 | |
| 241 | return true; // keep recursing until every object is visited exactly once. |
| 242 | } |
| 243 | }; |
| 244 | |
| 245 | class EmbeddedRefUpdater: public MetaspaceClosure { |
| 246 | DynamicArchiveBuilder* _builder; |
| 247 | address _orig_obj; |
| 248 | address _new_obj; |
| 249 | public: |
| 250 | EmbeddedRefUpdater(DynamicArchiveBuilder* shuffler, address orig_obj, address new_obj) : |
| 251 | _builder(shuffler), _orig_obj(orig_obj), _new_obj(new_obj) {} |
| 252 | |
| 253 | // This method gets called once for each pointer field F of orig_obj. |
| 254 | // We update new_obj->F to point to the new location of orig_obj->F. |
| 255 | // |
| 256 | // Example: Klass* 0x100 is copied to 0x400 |
| 257 | // Symbol* 0x200 is copied to 0x500 |
| 258 | // |
| 259 | // Let orig_obj == 0x100; and |
| 260 | // new_obj == 0x400; and |
| 261 | // ((Klass*)orig_obj)->_name == 0x200; |
| 262 | // Then this function effectively assigns |
| 263 | // ((Klass*)new_obj)->_name = 0x500; |
| 264 | virtual bool do_ref(Ref* ref, bool read_only) { |
| 265 | address new_pointee = NULL; |
| 266 | |
| 267 | if (ref->not_null()) { |
| 268 | address old_pointee = ref->obj(); |
| 269 | |
| 270 | FollowMode mode = _builder->follow_ref(ref); |
| 271 | if (mode == point_to_it) { |
| 272 | new_pointee = old_pointee; |
| 273 | } else if (mode == set_to_null) { |
| 274 | new_pointee = NULL; |
| 275 | } else { |
| 276 | new_pointee = _builder->get_new_loc(old_pointee); |
| 277 | } |
| 278 | } |
| 279 | |
| 280 | const char* kind = MetaspaceObj::type_name(ref->msotype()); |
| 281 | // offset of this field inside the original object |
| 282 | intx offset = (address)ref->addr() - _orig_obj; |
| 283 | _builder->update_pointer((address*)(_new_obj + offset), new_pointee, kind, offset); |
| 284 | |
| 285 | // We can't mark the pointer here, because DynamicArchiveBuilder::sort_methods |
| 286 | // may re-layout the [iv]tables, which would change the offset(s) in an InstanceKlass |
| 287 | // that would contain pointers. Therefore, we must mark the pointers after |
| 288 | // sort_methods(), using PointerMarker. |
| 289 | return false; // Do not recurse. |
| 290 | } |
| 291 | }; |
| 292 | |
| 293 | class ExternalRefUpdater: public MetaspaceClosure { |
| 294 | DynamicArchiveBuilder* _builder; |
| 295 | |
| 296 | public: |
| 297 | ExternalRefUpdater(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} |
| 298 | |
| 299 | virtual bool do_ref(Ref* ref, bool read_only) { |
| 300 | // ref is a pointer that lives OUTSIDE of the buffer, but points to an object inside the buffer |
| 301 | if (ref->not_null()) { |
| 302 | address new_loc = _builder->get_new_loc(ref); |
| 303 | const char* kind = MetaspaceObj::type_name(ref->msotype()); |
| 304 | _builder->update_pointer(ref->addr(), new_loc, kind, 0); |
| 305 | _builder->mark_pointer(ref->addr()); |
| 306 | } |
| 307 | return false; // Do not recurse. |
| 308 | } |
| 309 | }; |
| 310 | |
| 311 | class PointerMarker: public UniqueMetaspaceClosure { |
| 312 | DynamicArchiveBuilder* _builder; |
| 313 | |
| 314 | public: |
| 315 | PointerMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} |
| 316 | |
| 317 | virtual bool do_unique_ref(Ref* ref, bool read_only) { |
| 318 | if (_builder->is_in_buffer_space(ref->obj())) { |
| 319 | EmbeddedRefMarker ref_marker(_builder); |
| 320 | ref->metaspace_pointers_do(&ref_marker); |
| 321 | return true; // keep recursing until every buffered object is visited exactly once. |
| 322 | } else { |
| 323 | return false; |
| 324 | } |
| 325 | } |
| 326 | }; |
| 327 | |
| 328 | class EmbeddedRefMarker: public MetaspaceClosure { |
| 329 | DynamicArchiveBuilder* _builder; |
| 330 | |
| 331 | public: |
| 332 | EmbeddedRefMarker(DynamicArchiveBuilder* shuffler) : _builder(shuffler) {} |
| 333 | virtual bool do_ref(Ref* ref, bool read_only) { |
| 334 | if (ref->not_null() && _builder->is_in_buffer_space(ref->obj())) { |
| 335 | _builder->mark_pointer(ref->addr()); |
| 336 | } |
| 337 | return false; // Do not recurse. |
| 338 | } |
| 339 | }; |
| 340 | |
| 341 | void update_pointer(address* addr, address value, const char* kind, uintx offset, bool is_mso_pointer=true) { |
| 342 | // Propagate the the mask bits to the new value -- see comments above MetaspaceClosure::obj() |
| 343 | if (is_mso_pointer) { |
| 344 | const uintx FLAG_MASK = 0x03; |
| 345 | uintx mask_bits = uintx(*addr) & FLAG_MASK; |
| 346 | value = (address)(uintx(value) | mask_bits); |
| 347 | } |
| 348 | |
| 349 | if (*addr != value) { |
| 350 | log_debug(cds, dynamic)("Update (%18s*) %3d [" PTR_FORMAT "] " PTR_FORMAT " -> " PTR_FORMAT, |
| 351 | kind, int(offset), p2i(addr), p2i(*addr), p2i(value)); |
| 352 | *addr = value; |
| 353 | } |
| 354 | } |
| 355 | |
| 356 | private: |
| 357 | GrowableArray<Symbol*>* _symbols; // symbols to dump |
| 358 | GrowableArray<InstanceKlass*>* _klasses; // klasses to dump |
| 359 | |
| 360 | void append(InstanceKlass* k) { _klasses->append(k); } |
| 361 | void append(Symbol* s) { _symbols->append(s); } |
| 362 | |
| 363 | class GatherKlassesAndSymbols : public UniqueMetaspaceClosure { |
| 364 | DynamicArchiveBuilder* _builder; |
| 365 | bool _read_only; |
| 366 | |
| 367 | public: |
| 368 | GatherKlassesAndSymbols(DynamicArchiveBuilder* builder) |
| 369 | : _builder(builder) {} |
| 370 | |
| 371 | virtual bool do_unique_ref(Ref* ref, bool read_only) { |
| 372 | if (_builder->follow_ref(ref) != make_a_copy) { |
| 373 | return false; |
| 374 | } |
| 375 | if (ref->msotype() == MetaspaceObj::ClassType) { |
| 376 | Klass* klass = (Klass*)ref->obj(); |
| 377 | assert(klass->is_klass(), "must be" ); |
| 378 | if (klass->is_instance_klass()) { |
| 379 | InstanceKlass* ik = InstanceKlass::cast(klass); |
| 380 | assert(!SystemDictionaryShared::is_excluded_class(ik), "must be" ); |
| 381 | _builder->append(ik); |
| 382 | _builder->_estimated_metsapceobj_bytes += BytesPerWord; // See RunTimeSharedClassInfo::get_for() |
| 383 | } |
| 384 | } else if (ref->msotype() == MetaspaceObj::SymbolType) { |
| 385 | _builder->append((Symbol*)ref->obj()); |
| 386 | } |
| 387 | |
| 388 | int bytes = ref->size() * BytesPerWord; |
| 389 | _builder->_estimated_metsapceobj_bytes += bytes; |
| 390 | |
| 391 | return true; |
| 392 | } |
| 393 | }; |
| 394 | |
| 395 | FollowMode follow_ref(MetaspaceClosure::Ref *ref) { |
| 396 | address obj = ref->obj(); |
| 397 | if (MetaspaceShared::is_in_shared_metaspace(obj)) { |
| 398 | // Don't dump existing shared metadata again. |
| 399 | return point_to_it; |
| 400 | } else if (ref->msotype() == MetaspaceObj::MethodDataType) { |
| 401 | return set_to_null; |
| 402 | } else { |
| 403 | if (ref->msotype() == MetaspaceObj::ClassType) { |
| 404 | Klass* klass = (Klass*)ref->obj(); |
| 405 | assert(klass->is_klass(), "must be" ); |
| 406 | if (klass->is_instance_klass()) { |
| 407 | InstanceKlass* ik = InstanceKlass::cast(klass); |
| 408 | if (SystemDictionaryShared::is_excluded_class(ik)) { |
| 409 | ResourceMark rm; |
| 410 | log_debug(cds, dynamic)("Skipping class (excluded): %s" , klass->external_name()); |
| 411 | return set_to_null; |
| 412 | } |
| 413 | } else if (klass->is_array_klass()) { |
| 414 | // Don't support archiving of array klasses for now. |
| 415 | ResourceMark rm; |
| 416 | log_debug(cds, dynamic)("Skipping class (array): %s" , klass->external_name()); |
| 417 | return set_to_null; |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | return make_a_copy; |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | address copy_impl(MetaspaceClosure::Ref* ref, bool read_only, int bytes) { |
| 426 | if (ref->msotype() == MetaspaceObj::ClassType) { |
| 427 | // Save a pointer immediate in front of an InstanceKlass, so |
| 428 | // we can do a quick lookup from InstanceKlass* -> RunTimeSharedClassInfo* |
| 429 | // without building another hashtable. See RunTimeSharedClassInfo::get_for() |
| 430 | // in systemDictionaryShared.cpp. |
| 431 | address obj = ref->obj(); |
| 432 | Klass* klass = (Klass*)obj; |
| 433 | if (klass->is_instance_klass()) { |
| 434 | SystemDictionaryShared::validate_before_archiving(InstanceKlass::cast(klass)); |
| 435 | current_dump_space()->allocate(sizeof(address), BytesPerWord); |
| 436 | } |
| 437 | } |
| 438 | address p = (address)current_dump_space()->allocate(bytes); |
| 439 | address obj = ref->obj(); |
| 440 | log_debug(cds, dynamic)("COPY: " PTR_FORMAT " ==> " PTR_FORMAT " %5d %s" , |
| 441 | p2i(obj), p2i(p), bytes, |
| 442 | MetaspaceObj::type_name(ref->msotype())); |
| 443 | memcpy(p, obj, bytes); |
| 444 | |
| 445 | intptr_t* cloned_vtable = MetaspaceShared::fix_cpp_vtable_for_dynamic_archive(ref->msotype(), p); |
| 446 | if (cloned_vtable != NULL) { |
| 447 | update_pointer((address*)p, (address)cloned_vtable, "vtb" , 0, /*is_mso_pointer*/false); |
| 448 | } |
| 449 | |
| 450 | return (address)p; |
| 451 | } |
| 452 | |
| 453 | DynamicArchiveHeader *; |
| 454 | address _alloc_bottom; |
| 455 | address _last_verified_top; |
| 456 | size_t _other_region_used_bytes; |
| 457 | |
| 458 | // Conservative estimate for number of bytes needed for: |
| 459 | size_t _estimated_metsapceobj_bytes; // all archived MetsapceObj's. |
| 460 | size_t _estimated_hashtable_bytes; // symbol table and dictionaries |
| 461 | size_t _estimated_trampoline_bytes; // method entry trampolines |
| 462 | |
| 463 | size_t estimate_archive_size(); |
| 464 | size_t estimate_trampoline_size(); |
| 465 | size_t estimate_class_file_size(); |
| 466 | address reserve_space_and_init_buffer_to_target_delta(); |
| 467 | void init_header(address addr); |
| 468 | void make_trampolines(); |
| 469 | void make_klasses_shareable(); |
| 470 | void sort_methods(InstanceKlass* ik) const; |
| 471 | void set_symbols_permanent(); |
| 472 | void relocate_buffer_to_target(); |
| 473 | void write_archive(char* read_only_tables_start); |
| 474 | |
| 475 | void init_first_dump_space(address reserved_bottom) { |
| 476 | address first_space_base = reserved_bottom; |
| 477 | DumpRegion* rw_space = MetaspaceShared::read_write_dump_space(); |
| 478 | MetaspaceShared::init_shared_dump_space(rw_space, first_space_base); |
| 479 | _current_dump_space = rw_space; |
| 480 | _last_verified_top = first_space_base; |
| 481 | _num_dump_regions_used = 1; |
| 482 | } |
| 483 | |
| 484 | public: |
| 485 | DynamicArchiveBuilder() { |
| 486 | _klasses = new (ResourceObj::C_HEAP, mtClass) GrowableArray<InstanceKlass*>(100, true, mtInternal); |
| 487 | _symbols = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Symbol*>(1000, true, mtInternal); |
| 488 | |
| 489 | _estimated_metsapceobj_bytes = 0; |
| 490 | _estimated_hashtable_bytes = 0; |
| 491 | _estimated_trampoline_bytes = 0; |
| 492 | |
| 493 | _num_dump_regions_used = 0; |
| 494 | } |
| 495 | |
| 496 | void start_dump_space(DumpRegion* next) { |
| 497 | address bottom = _last_verified_top; |
| 498 | address top = (address)(current_dump_space()->top()); |
| 499 | _other_region_used_bytes += size_t(top - bottom); |
| 500 | |
| 501 | MetaspaceShared::pack_dump_space(current_dump_space(), next, MetaspaceShared::shared_rs()); |
| 502 | _current_dump_space = next; |
| 503 | _num_dump_regions_used ++; |
| 504 | |
| 505 | _last_verified_top = (address)(current_dump_space()->top()); |
| 506 | } |
| 507 | |
| 508 | void verify_estimate_size(size_t estimate, const char* which) { |
| 509 | address bottom = _last_verified_top; |
| 510 | address top = (address)(current_dump_space()->top()); |
| 511 | size_t used = size_t(top - bottom) + _other_region_used_bytes; |
| 512 | int diff = int(estimate) - int(used); |
| 513 | |
| 514 | log_info(cds)("%s estimate = " SIZE_FORMAT " used = " SIZE_FORMAT "; diff = %d bytes" , which, estimate, used, diff); |
| 515 | assert(diff >= 0, "Estimate is too small" ); |
| 516 | |
| 517 | _last_verified_top = top; |
| 518 | _other_region_used_bytes = 0; |
| 519 | } |
| 520 | |
| 521 | // Do this before and after the archive dump to see if any corruption |
| 522 | // is caused by dynamic dumping. |
| 523 | void verify_universe(const char* info) { |
| 524 | if (VerifyBeforeExit) { |
| 525 | log_info(cds)("Verify %s" , info); |
| 526 | HandleMark hm; |
| 527 | // Among other things, this ensures that Eden top is correct. |
| 528 | Universe::heap()->prepare_for_verify(); |
| 529 | Universe::verify(info); |
| 530 | } |
| 531 | } |
| 532 | |
| 533 | void doit() { |
| 534 | verify_universe("Before CDS dynamic dump" ); |
| 535 | DEBUG_ONLY(SystemDictionaryShared::NoClassLoadingMark nclm); |
| 536 | SystemDictionaryShared::check_excluded_classes(); |
| 537 | |
| 538 | { |
| 539 | ResourceMark rm; |
| 540 | GatherKlassesAndSymbols gatherer(this); |
| 541 | |
| 542 | SystemDictionaryShared::dumptime_classes_do(&gatherer); |
| 543 | SymbolTable::metaspace_pointers_do(&gatherer); |
| 544 | FileMapInfo::metaspace_pointers_do(&gatherer); |
| 545 | |
| 546 | gatherer.finish(); |
| 547 | } |
| 548 | |
| 549 | // rw space starts ... |
| 550 | address reserved_bottom = reserve_space_and_init_buffer_to_target_delta(); |
| 551 | init_header(reserved_bottom); |
| 552 | |
| 553 | verify_estimate_size(sizeof(DynamicArchiveHeader), "header" ); |
| 554 | |
| 555 | log_info(cds, dynamic)("Copying %d klasses and %d symbols" , |
| 556 | _klasses->length(), _symbols->length()); |
| 557 | |
| 558 | { |
| 559 | assert(current_dump_space() == MetaspaceShared::read_write_dump_space(), |
| 560 | "Current dump space is not rw space" ); |
| 561 | // shallow-copy RW objects, if necessary |
| 562 | ResourceMark rm; |
| 563 | ShallowCopier rw_copier(this, false); |
| 564 | iterate_roots(&rw_copier); |
| 565 | } |
| 566 | |
| 567 | // ro space starts ... |
| 568 | DumpRegion* ro_space = MetaspaceShared::read_only_dump_space(); |
| 569 | { |
| 570 | start_dump_space(ro_space); |
| 571 | |
| 572 | // shallow-copy RO objects, if necessary |
| 573 | ResourceMark rm; |
| 574 | ShallowCopier ro_copier(this, true); |
| 575 | iterate_roots(&ro_copier); |
| 576 | } |
| 577 | |
| 578 | size_t bitmap_size = pointer_delta(current_dump_space()->top(), |
| 579 | _alloc_bottom, sizeof(address)); |
| 580 | _ptrmap.initialize(bitmap_size); |
| 581 | |
| 582 | { |
| 583 | log_info(cds)("Relocating embedded pointers ... " ); |
| 584 | ResourceMark rm; |
| 585 | ShallowCopyEmbeddedRefRelocator emb_reloc(this); |
| 586 | iterate_roots(&emb_reloc); |
| 587 | } |
| 588 | |
| 589 | { |
| 590 | log_info(cds)("Relocating external roots ... " ); |
| 591 | ResourceMark rm; |
| 592 | ExternalRefUpdater ext_reloc(this); |
| 593 | iterate_roots(&ext_reloc); |
| 594 | } |
| 595 | |
| 596 | verify_estimate_size(_estimated_metsapceobj_bytes, "MetaspaceObjs" ); |
| 597 | |
| 598 | char* read_only_tables_start; |
| 599 | { |
| 600 | set_symbols_permanent(); |
| 601 | |
| 602 | // Write the symbol table and system dictionaries to the RO space. |
| 603 | // Note that these tables still point to the *original* objects |
| 604 | // (because they were not processed by ExternalRefUpdater), so |
| 605 | // they would need to call DynamicArchive::original_to_target() to |
| 606 | // get the correct addresses. |
| 607 | assert(current_dump_space() == ro_space, "Must be RO space" ); |
| 608 | SymbolTable::write_to_archive(false); |
| 609 | SystemDictionaryShared::write_to_archive(false); |
| 610 | |
| 611 | read_only_tables_start = ro_space->top(); |
| 612 | WriteClosure wc(ro_space); |
| 613 | SymbolTable::serialize_shared_table_header(&wc, false); |
| 614 | SystemDictionaryShared::serialize_dictionary_headers(&wc, false); |
| 615 | } |
| 616 | |
| 617 | verify_estimate_size(_estimated_hashtable_bytes, "Hashtables" ); |
| 618 | |
| 619 | // mc space starts ... |
| 620 | { |
| 621 | start_dump_space(MetaspaceShared::misc_code_dump_space()); |
| 622 | make_trampolines(); |
| 623 | } |
| 624 | |
| 625 | verify_estimate_size(_estimated_trampoline_bytes, "Trampolines" ); |
| 626 | |
| 627 | make_klasses_shareable(); |
| 628 | |
| 629 | { |
| 630 | log_info(cds)("Final relocation of pointers ... " ); |
| 631 | ResourceMark rm; |
| 632 | PointerMarker marker(this); |
| 633 | iterate_roots(&marker); |
| 634 | relocate_buffer_to_target(); |
| 635 | } |
| 636 | |
| 637 | write_archive(read_only_tables_start); |
| 638 | |
| 639 | assert(_num_dump_regions_used == _total_dump_regions, "must be" ); |
| 640 | verify_universe("After CDS dynamic dump" ); |
| 641 | } |
| 642 | |
| 643 | void iterate_roots(MetaspaceClosure* it) { |
| 644 | int i; |
| 645 | int num_klasses = _klasses->length(); |
| 646 | for (i = 0; i < num_klasses; i++) { |
| 647 | it->push(&_klasses->at(i)); |
| 648 | } |
| 649 | |
| 650 | int num_symbols = _symbols->length(); |
| 651 | for (i = 0; i < num_symbols; i++) { |
| 652 | it->push(&_symbols->at(i)); |
| 653 | } |
| 654 | |
| 655 | _header->_shared_path_table.metaspace_pointers_do(it); |
| 656 | |
| 657 | // Do not call these again, as we have already collected all the classes and symbols |
| 658 | // that we want to archive. Also, these calls would corrupt the tables when |
| 659 | // ExternalRefUpdater is used. |
| 660 | // |
| 661 | // SystemDictionaryShared::dumptime_classes_do(it); |
| 662 | // SymbolTable::metaspace_pointers_do(it); |
| 663 | |
| 664 | it->finish(); |
| 665 | } |
| 666 | }; |
| 667 | |
| 668 | size_t DynamicArchiveBuilder::estimate_archive_size() { |
| 669 | // size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's |
| 670 | _estimated_hashtable_bytes = 0; |
| 671 | _estimated_hashtable_bytes += SymbolTable::estimate_size_for_archive(); |
| 672 | _estimated_hashtable_bytes += SystemDictionaryShared::estimate_size_for_archive(); |
| 673 | |
| 674 | _estimated_trampoline_bytes = estimate_trampoline_size(); |
| 675 | |
| 676 | size_t total = 0; |
| 677 | |
| 678 | total += _estimated_metsapceobj_bytes; |
| 679 | total += _estimated_hashtable_bytes; |
| 680 | total += _estimated_trampoline_bytes; |
| 681 | |
| 682 | // allow fragmentation at the end of each dump region |
| 683 | total += _total_dump_regions * reserve_alignment(); |
| 684 | |
| 685 | return align_up(total, reserve_alignment()); |
| 686 | } |
| 687 | |
| 688 | address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() { |
| 689 | size_t total = estimate_archive_size(); |
| 690 | bool large_pages = false; // No large pages when dumping the CDS archive. |
| 691 | size_t increment = align_up(1*G, reserve_alignment()); |
| 692 | char* addr = (char*)align_up(CompressedKlassPointers::base() + MetaspaceSize + increment, |
| 693 | reserve_alignment()); |
| 694 | |
| 695 | ReservedSpace* rs = MetaspaceShared::reserve_shared_rs( |
| 696 | total, reserve_alignment(), large_pages, addr); |
| 697 | while (!rs->is_reserved() && (addr + increment > addr)) { |
| 698 | addr += increment; |
| 699 | rs = MetaspaceShared::reserve_shared_rs( |
| 700 | total, reserve_alignment(), large_pages, addr); |
| 701 | } |
| 702 | if (!rs->is_reserved()) { |
| 703 | log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer." , (int)total); |
| 704 | vm_direct_exit(0); |
| 705 | } |
| 706 | |
| 707 | address buffer_base = (address)rs->base(); |
| 708 | log_info(cds, dynamic)("Reserved output buffer space at : " PTR_FORMAT " [%d bytes]" , |
| 709 | p2i(buffer_base), (int)total); |
| 710 | |
| 711 | // At run time, we will mmap the dynamic archive at target_space_bottom. |
| 712 | // However, at dump time, we may not be able to write into the target_space, |
| 713 | // as it's occupied by dynamically loaded Klasses. So we allocate a buffer |
| 714 | // at an arbitrary location chosen by the OS. We will write all the dynamically |
| 715 | // archived classes into this buffer. At the final stage of dumping, we relocate |
| 716 | // all pointers that are inside the buffer_space to point to their (runtime) |
| 717 | // target location inside thetarget_space. |
| 718 | address target_space_bottom = |
| 719 | (address)align_up(MetaspaceShared::shared_metaspace_top(), reserve_alignment()); |
| 720 | _buffer_to_target_delta = intx(target_space_bottom) - intx(buffer_base); |
| 721 | |
| 722 | log_info(cds, dynamic)("Target archive space at : " PTR_FORMAT, p2i(target_space_bottom)); |
| 723 | log_info(cds, dynamic)("Buffer-space to target-space delta : " PTR_FORMAT, p2i((address)_buffer_to_target_delta)); |
| 724 | |
| 725 | return buffer_base; |
| 726 | } |
| 727 | |
| 728 | void DynamicArchiveBuilder::(address reserved_bottom) { |
| 729 | _alloc_bottom = reserved_bottom; |
| 730 | _last_verified_top = reserved_bottom; |
| 731 | _other_region_used_bytes = 0; |
| 732 | |
| 733 | init_first_dump_space(reserved_bottom); |
| 734 | |
| 735 | FileMapInfo* mapinfo = new FileMapInfo(false); |
| 736 | _header = (DynamicArchiveHeader*)mapinfo->_header; |
| 737 | |
| 738 | Thread* THREAD = Thread::current(); |
| 739 | FileMapInfo* base_info = FileMapInfo::current_info(); |
| 740 | int* crc = _header->_base_archive_crc; |
| 741 | *crc++ = base_info->crc(); // base archive header crc |
| 742 | for (int i = 0; i < MetaspaceShared::n_regions; i++) { |
| 743 | *crc++ = base_info->space_crc(i); |
| 744 | } |
| 745 | _header->populate(base_info, os::vm_allocation_granularity()); |
| 746 | } |
| 747 | |
| 748 | size_t DynamicArchiveBuilder::estimate_trampoline_size() { |
| 749 | size_t total = 0; |
| 750 | size_t each_method_bytes = |
| 751 | align_up(SharedRuntime::trampoline_size(), BytesPerWord) + |
| 752 | align_up(sizeof(AdapterHandlerEntry*), BytesPerWord); |
| 753 | |
| 754 | for (int i = 0; i < _klasses->length(); i++) { |
| 755 | InstanceKlass* ik = _klasses->at(i); |
| 756 | Array<Method*>* methods = ik->methods(); |
| 757 | total += each_method_bytes * methods->length(); |
| 758 | } |
| 759 | if (total == 0) { |
| 760 | // We have nothing to archive, but let's avoid having an empty region. |
| 761 | total = SharedRuntime::trampoline_size(); |
| 762 | } |
| 763 | return total; |
| 764 | } |
| 765 | |
| 766 | void DynamicArchiveBuilder::make_trampolines() { |
| 767 | for (int i = 0; i < _klasses->length(); i++) { |
| 768 | InstanceKlass* ik = _klasses->at(i); |
| 769 | Array<Method*>* methods = ik->methods(); |
| 770 | for (int j = 0; j < methods->length(); j++) { |
| 771 | Method* m = methods->at(j); |
| 772 | address c2i_entry_trampoline = |
| 773 | (address)MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size()); |
| 774 | m->set_from_compiled_entry(to_target(c2i_entry_trampoline)); |
| 775 | AdapterHandlerEntry** adapter_trampoline = |
| 776 | (AdapterHandlerEntry**)MetaspaceShared::misc_code_space_alloc(sizeof(AdapterHandlerEntry*)); |
| 777 | *adapter_trampoline = NULL; |
| 778 | m->set_adapter_trampoline(to_target(adapter_trampoline)); |
| 779 | } |
| 780 | } |
| 781 | |
| 782 | if (MetaspaceShared::misc_code_dump_space()->used() == 0) { |
| 783 | // We have nothing to archive, but let's avoid having an empty region. |
| 784 | MetaspaceShared::misc_code_space_alloc(SharedRuntime::trampoline_size()); |
| 785 | } |
| 786 | } |
| 787 | |
| 788 | void DynamicArchiveBuilder::make_klasses_shareable() { |
| 789 | int i, count = _klasses->length(); |
| 790 | |
| 791 | for (i = 0; i < count; i++) { |
| 792 | InstanceKlass* ik = _klasses->at(i); |
| 793 | sort_methods(ik); |
| 794 | } |
| 795 | |
| 796 | for (i = 0; i < count; i++) { |
| 797 | InstanceKlass* ik = _klasses->at(i); |
| 798 | ClassLoaderData *cld = ik->class_loader_data(); |
| 799 | if (cld->is_boot_class_loader_data()) { |
| 800 | ik->set_class_loader_type(ClassLoader::BOOT_LOADER); |
| 801 | } |
| 802 | else if (cld->is_platform_class_loader_data()) { |
| 803 | ik->set_class_loader_type(ClassLoader::PLATFORM_LOADER); |
| 804 | } |
| 805 | else if (cld->is_system_class_loader_data()) { |
| 806 | ik->set_class_loader_type(ClassLoader::APP_LOADER); |
| 807 | } |
| 808 | |
| 809 | MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(ik); |
| 810 | ik->remove_unshareable_info(); |
| 811 | |
| 812 | assert(ik->array_klasses() == NULL, "sanity" ); |
| 813 | |
| 814 | if (log_is_enabled(Debug, cds, dynamic)) { |
| 815 | ResourceMark rm; |
| 816 | log_debug(cds, dynamic)("klasses[%4i] = " PTR_FORMAT " %s" , i, p2i(to_target(ik)), ik->external_name()); |
| 817 | } |
| 818 | } |
| 819 | } |
| 820 | |
| 821 | // The address order of the copied Symbols may be different than when the original |
| 822 | // klasses were created. Re-sort all the tables. See Method::sort_methods(). |
| 823 | void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const { |
| 824 | assert(ik != NULL, "DynamicArchiveBuilder currently doesn't support dumping the base archive" ); |
| 825 | if (MetaspaceShared::is_in_shared_metaspace(ik)) { |
| 826 | // We have reached a supertype that's already in the base archive |
| 827 | return; |
| 828 | } |
| 829 | |
| 830 | if (ik->java_mirror() == NULL) { |
| 831 | // NULL mirror means this class has already been visited and methods are already sorted |
| 832 | return; |
| 833 | } |
| 834 | ik->remove_java_mirror(); |
| 835 | |
| 836 | if (log_is_enabled(Debug, cds, dynamic)) { |
| 837 | ResourceMark rm; |
| 838 | log_debug(cds, dynamic)("sorting methods for " PTR_FORMAT " %s" , p2i(to_target(ik)), ik->external_name()); |
| 839 | } |
| 840 | |
| 841 | // Make sure all supertypes have been sorted |
| 842 | sort_methods(ik->java_super()); |
| 843 | Array<InstanceKlass*>* interfaces = ik->local_interfaces(); |
| 844 | int len = interfaces->length(); |
| 845 | for (int i = 0; i < len; i++) { |
| 846 | sort_methods(interfaces->at(i)); |
| 847 | } |
| 848 | |
| 849 | #ifdef ASSERT |
| 850 | { |
| 851 | for (int m = 0; m < ik->methods()->length(); m++) { |
| 852 | Symbol* name = ik->methods()->at(m)->name(); |
| 853 | assert(MetaspaceShared::is_in_shared_metaspace(name) || is_in_buffer_space(name), "must be" ); |
| 854 | } |
| 855 | } |
| 856 | #endif |
| 857 | |
| 858 | Thread* THREAD = Thread::current(); |
| 859 | Method::sort_methods(ik->methods()); |
| 860 | if (ik->default_methods() != NULL) { |
| 861 | Method::sort_methods(ik->default_methods(), /*set_idnums=*/false); |
| 862 | } |
| 863 | ik->vtable().initialize_vtable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail" ); |
| 864 | ik->itable().initialize_itable(true, THREAD); assert(!HAS_PENDING_EXCEPTION, "cannot fail" ); |
| 865 | } |
| 866 | |
| 867 | void DynamicArchiveBuilder::set_symbols_permanent() { |
| 868 | int count = _symbols->length(); |
| 869 | for (int i=0; i<count; i++) { |
| 870 | Symbol* s = _symbols->at(i); |
| 871 | s->set_permanent(); |
| 872 | |
| 873 | if (log_is_enabled(Trace, cds, dynamic)) { |
| 874 | ResourceMark rm; |
| 875 | log_trace(cds, dynamic)("symbols[%4i] = " PTR_FORMAT " %s" , i, p2i(to_target(s)), s->as_quoted_ascii()); |
| 876 | } |
| 877 | } |
| 878 | } |
| 879 | |
| 880 | class RelocateBufferToTarget: public BitMapClosure { |
| 881 | DynamicArchiveBuilder *_builder; |
| 882 | address* _buffer_bottom; |
| 883 | intx _buffer_to_target_delta; |
| 884 | public: |
| 885 | RelocateBufferToTarget(DynamicArchiveBuilder* builder, address* bottom, intx delta) : |
| 886 | _builder(builder), _buffer_bottom(bottom), _buffer_to_target_delta(delta) {} |
| 887 | |
| 888 | bool do_bit(size_t offset) { |
| 889 | address* p = _buffer_bottom + offset; |
| 890 | assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space" ); |
| 891 | |
| 892 | address old_ptr = *p; |
| 893 | if (_builder->is_in_buffer_space(old_ptr)) { |
| 894 | address new_ptr = old_ptr + _buffer_to_target_delta; |
| 895 | log_trace(cds, dynamic)("Final patch: @%6d [" PTR_FORMAT " -> " PTR_FORMAT "] " PTR_FORMAT " => " PTR_FORMAT, |
| 896 | (int)offset, p2i(p), p2i(_builder->to_target(p)), |
| 897 | p2i(old_ptr), p2i(new_ptr)); |
| 898 | *p = new_ptr; |
| 899 | } |
| 900 | |
| 901 | return true; // keep iterating |
| 902 | } |
| 903 | }; |
| 904 | |
| 905 | |
| 906 | void DynamicArchiveBuilder::relocate_buffer_to_target() { |
| 907 | RelocateBufferToTarget patcher(this, (address*)_alloc_bottom, _buffer_to_target_delta); |
| 908 | _ptrmap.iterate(&patcher); |
| 909 | |
| 910 | Array<u8>* table = _header->_shared_path_table.table(); |
| 911 | table = to_target(table); |
| 912 | _header->_shared_path_table.set_table(table); |
| 913 | } |
| 914 | |
| 915 | static void (FileMapInfo* dynamic_info, DynamicArchiveHeader *) { |
| 916 | dynamic_info->write_header(); |
| 917 | dynamic_info->align_file_position(); |
| 918 | dynamic_info->write_region(MetaspaceShared::rw, |
| 919 | MetaspaceShared::read_write_dump_space()->base(), |
| 920 | MetaspaceShared::read_write_dump_space()->used(), |
| 921 | /*read_only=*/false,/*allow_exec=*/false); |
| 922 | dynamic_info->write_region(MetaspaceShared::ro, |
| 923 | MetaspaceShared::read_only_dump_space()->base(), |
| 924 | MetaspaceShared::read_only_dump_space()->used(), |
| 925 | /*read_only=*/true, /*allow_exec=*/false); |
| 926 | dynamic_info->write_region(MetaspaceShared::mc, |
| 927 | MetaspaceShared::misc_code_dump_space()->base(), |
| 928 | MetaspaceShared::misc_code_dump_space()->used(), |
| 929 | /*read_only=*/false,/*allow_exec=*/true); |
| 930 | } |
| 931 | |
| 932 | void DynamicArchiveBuilder::write_archive(char* read_only_tables_start) { |
| 933 | int num_klasses = _klasses->length(); |
| 934 | int num_symbols = _symbols->length(); |
| 935 | |
| 936 | _header->_read_only_tables_start = to_target(read_only_tables_start); |
| 937 | |
| 938 | FileMapInfo* dynamic_info = FileMapInfo::dynamic_info(); |
| 939 | assert(dynamic_info != NULL, "Sanity" ); |
| 940 | |
| 941 | // Populate the file offsets, region crcs, etc. No data is written out. |
| 942 | write_archive_info(dynamic_info, _header); |
| 943 | |
| 944 | // the header will no longer change. Compute its crc. |
| 945 | dynamic_info->set_header_crc(dynamic_info->compute_header_crc()); |
| 946 | |
| 947 | // Now write the archived data including the file offsets. |
| 948 | const char* archive_name = Arguments::GetSharedDynamicArchivePath(); |
| 949 | dynamic_info->open_for_write(archive_name); |
| 950 | write_archive_info(dynamic_info, _header); |
| 951 | dynamic_info->close(); |
| 952 | |
| 953 | |
| 954 | address base = to_target(_alloc_bottom); |
| 955 | address top = address(current_dump_space()->top()) + _buffer_to_target_delta; |
| 956 | int file_size = int(top - base); |
| 957 | |
| 958 | log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT " [%d bytes header, %d bytes total]" , |
| 959 | p2i(base), p2i(top), (int)_header->_header_size, file_size); |
| 960 | log_info(cds, dynamic)("%d klasses; %d symbols" , num_klasses, num_symbols); |
| 961 | } |
| 962 | |
| 963 | |
| 964 | class VM_PopulateDynamicDumpSharedSpace: public VM_Operation { |
| 965 | DynamicArchiveBuilder* _builder; |
| 966 | public: |
| 967 | VM_PopulateDynamicDumpSharedSpace(DynamicArchiveBuilder* builder) : _builder(builder) {} |
| 968 | VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; } |
| 969 | void doit() { |
| 970 | ResourceMark rm; |
| 971 | if (SystemDictionaryShared::empty_dumptime_table()) { |
| 972 | log_warning(cds, dynamic)("There is no class to be included in the dynamic archive." ); |
| 973 | return; |
| 974 | } |
| 975 | if (AllowArchivingWithJavaAgent) { |
| 976 | warning("This archive was created with AllowArchivingWithJavaAgent. It should be used " |
| 977 | "for testing purposes only and should not be used in a production environment" ); |
| 978 | } |
| 979 | FileMapInfo::check_nonempty_dir_in_shared_path_table(); |
| 980 | |
| 981 | _builder->doit(); |
| 982 | } |
| 983 | }; |
| 984 | |
| 985 | |
| 986 | void DynamicArchive::dump() { |
| 987 | if (Arguments::GetSharedDynamicArchivePath() == NULL) { |
| 988 | log_warning(cds, dynamic)("SharedDynamicArchivePath is not specified" ); |
| 989 | return; |
| 990 | } |
| 991 | |
| 992 | DynamicArchiveBuilder builder; |
| 993 | _builder = &builder; |
| 994 | VM_PopulateDynamicDumpSharedSpace op(&builder); |
| 995 | VMThread::execute(&op); |
| 996 | _builder = NULL; |
| 997 | } |
| 998 | |
| 999 | address DynamicArchive::original_to_buffer_impl(address orig_obj) { |
| 1000 | assert(DynamicDumpSharedSpaces, "must be" ); |
| 1001 | address buff_obj = _builder->get_new_loc(orig_obj); |
| 1002 | assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive" ); |
| 1003 | assert(buff_obj != orig_obj, "call this only when you know orig_obj must be copied and not just referenced" ); |
| 1004 | assert(_builder->is_in_buffer_space(buff_obj), "must be" ); |
| 1005 | return buff_obj; |
| 1006 | } |
| 1007 | |
| 1008 | address DynamicArchive::buffer_to_target_impl(address buff_obj) { |
| 1009 | assert(DynamicDumpSharedSpaces, "must be" ); |
| 1010 | assert(_builder->is_in_buffer_space(buff_obj), "must be" ); |
| 1011 | return _builder->to_target(buff_obj); |
| 1012 | } |
| 1013 | |
| 1014 | address DynamicArchive::original_to_target_impl(address orig_obj) { |
| 1015 | assert(DynamicDumpSharedSpaces, "must be" ); |
| 1016 | if (MetaspaceShared::is_in_shared_metaspace(orig_obj)) { |
| 1017 | // This happens when the top archive points to a Symbol* in the base archive. |
| 1018 | return orig_obj; |
| 1019 | } |
| 1020 | address buff_obj = _builder->get_new_loc(orig_obj); |
| 1021 | assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive" ); |
| 1022 | if (buff_obj == orig_obj) { |
| 1023 | // We are storing a pointer to an original object into the dynamic buffer. E.g., |
| 1024 | // a Symbol* that used by both the base and top archives. |
| 1025 | assert(MetaspaceShared::is_in_shared_metaspace(orig_obj), "must be" ); |
| 1026 | return orig_obj; |
| 1027 | } else { |
| 1028 | return _builder->to_target(buff_obj); |
| 1029 | } |
| 1030 | } |
| 1031 | |
| 1032 | uintx DynamicArchive::object_delta_uintx(void* buff_obj) { |
| 1033 | assert(DynamicDumpSharedSpaces, "must be" ); |
| 1034 | address target_obj = _builder->to_target_no_check(address(buff_obj)); |
| 1035 | assert(uintx(target_obj) >= SharedBaseAddress, "must be" ); |
| 1036 | return uintx(target_obj) - SharedBaseAddress; |
| 1037 | } |
| 1038 | |
| 1039 | bool DynamicArchive::is_in_target_space(void *obj) { |
| 1040 | assert(DynamicDumpSharedSpaces, "must be" ); |
| 1041 | return _builder->is_in_target_space(obj); |
| 1042 | } |
| 1043 | |
| 1044 | |
| 1045 | static DynamicArchiveHeader * = NULL; |
| 1046 | DynamicArchiveBuilder* DynamicArchive::_builder = NULL; |
| 1047 | |
| 1048 | void DynamicArchive::map_failed(FileMapInfo* mapinfo) { |
| 1049 | if (mapinfo->_header != NULL) { |
| 1050 | os::free(mapinfo->_header); |
| 1051 | } |
| 1052 | delete mapinfo; |
| 1053 | } |
| 1054 | |
| 1055 | // Returns the top of the mapped address space |
| 1056 | address DynamicArchive::map() { |
| 1057 | assert(UseSharedSpaces, "Sanity" ); |
| 1058 | |
| 1059 | // Create the dynamic archive map info |
| 1060 | FileMapInfo* mapinfo; |
| 1061 | const char* filename = Arguments::GetSharedDynamicArchivePath(); |
| 1062 | struct stat st; |
| 1063 | address result; |
| 1064 | if ((filename != NULL) && (os::stat(filename, &st) == 0)) { |
| 1065 | mapinfo = new FileMapInfo(false); |
| 1066 | if (!mapinfo->open_for_read(filename)) { |
| 1067 | result = NULL; |
| 1068 | } |
| 1069 | result = map_impl(mapinfo); |
| 1070 | if (result == NULL) { |
| 1071 | map_failed(mapinfo); |
| 1072 | mapinfo->restore_shared_path_table(); |
| 1073 | } |
| 1074 | } else { |
| 1075 | if (filename != NULL) { |
| 1076 | log_warning(cds, dynamic)("specified dynamic archive doesn't exist: %s" , filename); |
| 1077 | } |
| 1078 | result = NULL; |
| 1079 | } |
| 1080 | return result; |
| 1081 | } |
| 1082 | |
| 1083 | address DynamicArchive::map_impl(FileMapInfo* mapinfo) { |
| 1084 | |
| 1085 | |
| 1086 | // Read header |
| 1087 | if (!mapinfo->initialize(false)) { |
| 1088 | return NULL; |
| 1089 | } |
| 1090 | |
| 1091 | _dynamic_header = (DynamicArchiveHeader*)mapinfo->header(); |
| 1092 | |
| 1093 | int regions[] = {MetaspaceShared::rw, |
| 1094 | MetaspaceShared::ro, |
| 1095 | MetaspaceShared::mc}; |
| 1096 | |
| 1097 | size_t len = sizeof(regions)/sizeof(int); |
| 1098 | char* saved_base[] = {NULL, NULL, NULL}; |
| 1099 | char* top = mapinfo->map_regions(regions, saved_base, len); |
| 1100 | if (top == NULL) { |
| 1101 | mapinfo->unmap_regions(regions, saved_base, len); |
| 1102 | FileMapInfo::fail_continue("Unable to use dynamic archive. Failed map_region for using -Xshare:on." ); |
| 1103 | return NULL; |
| 1104 | } |
| 1105 | |
| 1106 | if (!validate(mapinfo)) { |
| 1107 | return NULL; |
| 1108 | } |
| 1109 | |
| 1110 | if (_dynamic_header == NULL) { |
| 1111 | return NULL; |
| 1112 | } |
| 1113 | |
| 1114 | intptr_t* buffer = (intptr_t*)_dynamic_header->_read_only_tables_start; |
| 1115 | ReadClosure rc(&buffer); |
| 1116 | SymbolTable::serialize_shared_table_header(&rc, false); |
| 1117 | SystemDictionaryShared::serialize_dictionary_headers(&rc, false); |
| 1118 | |
| 1119 | return (address)top; |
| 1120 | } |
| 1121 | |
| 1122 | bool DynamicArchive::validate(FileMapInfo* dynamic_info) { |
| 1123 | // Check if the recorded base archive matches with the current one |
| 1124 | FileMapInfo* base_info = FileMapInfo::current_info(); |
| 1125 | DynamicArchiveHeader* = (DynamicArchiveHeader*)dynamic_info->header(); |
| 1126 | int* crc = dynamic_header->_base_archive_crc; |
| 1127 | |
| 1128 | // Check the header crc |
| 1129 | if (*crc++ != base_info->crc()) { |
| 1130 | FileMapInfo::fail_continue("Archive header checksum verification failed." ); |
| 1131 | return false; |
| 1132 | } |
| 1133 | |
| 1134 | // Check each space's crc |
| 1135 | for (int i = 0; i < MetaspaceShared::n_regions; i++) { |
| 1136 | if (*crc++ != base_info->space_crc(i)) { |
| 1137 | FileMapInfo::fail_continue("Archive region #%d checksum verification failed." , i); |
| 1138 | return false; |
| 1139 | } |
| 1140 | } |
| 1141 | |
| 1142 | // Validate the dynamic archived shared path table, and set the global |
| 1143 | // _shared_path_table to that. |
| 1144 | if (!dynamic_info->validate_shared_path_table()) { |
| 1145 | return false; |
| 1146 | } |
| 1147 | return true; |
| 1148 | } |
| 1149 | |
| 1150 | bool DynamicArchive::is_mapped() { |
| 1151 | return (_dynamic_header != NULL); |
| 1152 | } |
| 1153 | |
| 1154 | void DynamicArchive::disable() { |
| 1155 | _dynamic_header = NULL; |
| 1156 | } |
| 1157 | |