| 1 | /* |
| 2 | * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "gc/shared/oopStorage.inline.hpp" |
| 27 | #include "logging/log.hpp" |
| 28 | #include "memory/iterator.hpp" |
| 29 | #include "memory/universe.hpp" |
| 30 | #include "oops/access.inline.hpp" |
| 31 | #include "oops/oop.inline.hpp" |
| 32 | #include "runtime/handles.inline.hpp" |
| 33 | #include "runtime/jniHandles.inline.hpp" |
| 34 | #include "runtime/mutexLocker.hpp" |
| 35 | #include "runtime/thread.inline.hpp" |
| 36 | #include "utilities/align.hpp" |
| 37 | #include "utilities/debug.hpp" |
| 38 | |
| 39 | OopStorage* JNIHandles::_global_handles = NULL; |
| 40 | OopStorage* JNIHandles::_weak_global_handles = NULL; |
| 41 | |
| 42 | OopStorage* JNIHandles::global_handles() { |
| 43 | assert(_global_handles != NULL, "Uninitialized JNI global handles" ); |
| 44 | return _global_handles; |
| 45 | } |
| 46 | |
| 47 | OopStorage* JNIHandles::weak_global_handles() { |
| 48 | assert(_weak_global_handles != NULL, "Uninitialized JNI weak global handles" ); |
| 49 | return _weak_global_handles; |
| 50 | } |
| 51 | |
| 52 | |
| 53 | jobject JNIHandles::make_local(oop obj) { |
| 54 | if (obj == NULL) { |
| 55 | return NULL; // ignore null handles |
| 56 | } else { |
| 57 | Thread* thread = Thread::current(); |
| 58 | assert(oopDesc::is_oop(obj), "not an oop" ); |
| 59 | assert(!current_thread_in_native(), "must not be in native" ); |
| 60 | return thread->active_handles()->allocate_handle(obj); |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | |
| 65 | // optimized versions |
| 66 | |
| 67 | jobject JNIHandles::make_local(Thread* thread, oop obj) { |
| 68 | if (obj == NULL) { |
| 69 | return NULL; // ignore null handles |
| 70 | } else { |
| 71 | assert(oopDesc::is_oop(obj), "not an oop" ); |
| 72 | assert(thread->is_Java_thread(), "not a Java thread" ); |
| 73 | assert(!current_thread_in_native(), "must not be in native" ); |
| 74 | return thread->active_handles()->allocate_handle(obj); |
| 75 | } |
| 76 | } |
| 77 | |
| 78 | |
| 79 | jobject JNIHandles::make_local(JNIEnv* env, oop obj) { |
| 80 | if (obj == NULL) { |
| 81 | return NULL; // ignore null handles |
| 82 | } else { |
| 83 | JavaThread* thread = JavaThread::thread_from_jni_environment(env); |
| 84 | assert(oopDesc::is_oop(obj), "not an oop" ); |
| 85 | assert(!current_thread_in_native(), "must not be in native" ); |
| 86 | return thread->active_handles()->allocate_handle(obj); |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | |
| 91 | static void report_handle_allocation_failure(AllocFailType alloc_failmode, |
| 92 | const char* handle_kind) { |
| 93 | if (alloc_failmode == AllocFailStrategy::EXIT_OOM) { |
| 94 | // Fake size value, since we don't know the min allocation size here. |
| 95 | vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR, |
| 96 | "Cannot create %s JNI handle" , handle_kind); |
| 97 | } else { |
| 98 | assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant" ); |
| 99 | } |
| 100 | } |
| 101 | |
| 102 | jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) { |
| 103 | assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC" ); |
| 104 | assert(!current_thread_in_native(), "must not be in native" ); |
| 105 | jobject res = NULL; |
| 106 | if (!obj.is_null()) { |
| 107 | // ignore null handles |
| 108 | assert(oopDesc::is_oop(obj()), "not an oop" ); |
| 109 | oop* ptr = global_handles()->allocate(); |
| 110 | // Return NULL on allocation failure. |
| 111 | if (ptr != NULL) { |
| 112 | assert(*ptr == NULL, "invariant" ); |
| 113 | NativeAccess<>::oop_store(ptr, obj()); |
| 114 | res = reinterpret_cast<jobject>(ptr); |
| 115 | } else { |
| 116 | report_handle_allocation_failure(alloc_failmode, "global" ); |
| 117 | } |
| 118 | } else { |
| 119 | CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); |
| 120 | } |
| 121 | |
| 122 | return res; |
| 123 | } |
| 124 | |
| 125 | |
| 126 | jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) { |
| 127 | assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC" ); |
| 128 | assert(!current_thread_in_native(), "must not be in native" ); |
| 129 | jobject res = NULL; |
| 130 | if (!obj.is_null()) { |
| 131 | // ignore null handles |
| 132 | assert(oopDesc::is_oop(obj()), "not an oop" ); |
| 133 | oop* ptr = weak_global_handles()->allocate(); |
| 134 | // Return NULL on allocation failure. |
| 135 | if (ptr != NULL) { |
| 136 | assert(*ptr == NULL, "invariant" ); |
| 137 | NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(ptr, obj()); |
| 138 | char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value; |
| 139 | res = reinterpret_cast<jobject>(tptr); |
| 140 | } else { |
| 141 | report_handle_allocation_failure(alloc_failmode, "weak global" ); |
| 142 | } |
| 143 | } else { |
| 144 | CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); |
| 145 | } |
| 146 | return res; |
| 147 | } |
| 148 | |
| 149 | // Resolve some erroneous cases to NULL, rather than treating them as |
| 150 | // possibly unchecked errors. In particular, deleted handles are |
| 151 | // treated as NULL (though a deleted and later reallocated handle |
| 152 | // isn't detected). |
| 153 | oop JNIHandles::resolve_external_guard(jobject handle) { |
| 154 | oop result = NULL; |
| 155 | if (handle != NULL) { |
| 156 | result = resolve_impl<DECORATORS_NONE, true /* external_guard */>(handle); |
| 157 | } |
| 158 | return result; |
| 159 | } |
| 160 | |
| 161 | bool JNIHandles::is_global_weak_cleared(jweak handle) { |
| 162 | assert(handle != NULL, "precondition" ); |
| 163 | assert(is_jweak(handle), "not a weak handle" ); |
| 164 | oop* oop_ptr = jweak_ptr(handle); |
| 165 | oop value = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(oop_ptr); |
| 166 | return value == NULL; |
| 167 | } |
| 168 | |
| 169 | void JNIHandles::destroy_global(jobject handle) { |
| 170 | if (handle != NULL) { |
| 171 | assert(!is_jweak(handle), "wrong method for detroying jweak" ); |
| 172 | oop* oop_ptr = jobject_ptr(handle); |
| 173 | NativeAccess<>::oop_store(oop_ptr, (oop)NULL); |
| 174 | global_handles()->release(oop_ptr); |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | |
| 179 | void JNIHandles::destroy_weak_global(jobject handle) { |
| 180 | if (handle != NULL) { |
| 181 | assert(is_jweak(handle), "JNI handle not jweak" ); |
| 182 | oop* oop_ptr = jweak_ptr(handle); |
| 183 | NativeAccess<ON_PHANTOM_OOP_REF>::oop_store(oop_ptr, (oop)NULL); |
| 184 | weak_global_handles()->release(oop_ptr); |
| 185 | } |
| 186 | } |
| 187 | |
| 188 | |
| 189 | void JNIHandles::oops_do(OopClosure* f) { |
| 190 | global_handles()->oops_do(f); |
| 191 | } |
| 192 | |
| 193 | |
| 194 | void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) { |
| 195 | weak_global_handles()->weak_oops_do(is_alive, f); |
| 196 | } |
| 197 | |
| 198 | |
| 199 | void JNIHandles::weak_oops_do(OopClosure* f) { |
| 200 | weak_global_handles()->weak_oops_do(f); |
| 201 | } |
| 202 | |
| 203 | |
| 204 | void JNIHandles::initialize() { |
| 205 | _global_handles = new OopStorage("JNI Global" , |
| 206 | JNIGlobalAlloc_lock, |
| 207 | JNIGlobalActive_lock); |
| 208 | _weak_global_handles = new OopStorage("JNI Weak" , |
| 209 | JNIWeakAlloc_lock, |
| 210 | JNIWeakActive_lock); |
| 211 | } |
| 212 | |
| 213 | |
| 214 | inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) { |
| 215 | return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY; |
| 216 | } |
| 217 | |
| 218 | |
| 219 | jobjectRefType JNIHandles::handle_type(Thread* thread, jobject handle) { |
| 220 | assert(handle != NULL, "precondition" ); |
| 221 | jobjectRefType result = JNIInvalidRefType; |
| 222 | if (is_jweak(handle)) { |
| 223 | if (is_storage_handle(weak_global_handles(), jweak_ptr(handle))) { |
| 224 | result = JNIWeakGlobalRefType; |
| 225 | } |
| 226 | } else { |
| 227 | switch (global_handles()->allocation_status(jobject_ptr(handle))) { |
| 228 | case OopStorage::ALLOCATED_ENTRY: |
| 229 | result = JNIGlobalRefType; |
| 230 | break; |
| 231 | |
| 232 | case OopStorage::UNALLOCATED_ENTRY: |
| 233 | break; // Invalid global handle |
| 234 | |
| 235 | case OopStorage::INVALID_ENTRY: |
| 236 | // Not in global storage. Might be a local handle. |
| 237 | if (is_local_handle(thread, handle) || |
| 238 | (thread->is_Java_thread() && |
| 239 | is_frame_handle((JavaThread*)thread, handle))) { |
| 240 | result = JNILocalRefType; |
| 241 | } |
| 242 | break; |
| 243 | |
| 244 | default: |
| 245 | ShouldNotReachHere(); |
| 246 | } |
| 247 | } |
| 248 | return result; |
| 249 | } |
| 250 | |
| 251 | |
| 252 | bool JNIHandles::is_local_handle(Thread* thread, jobject handle) { |
| 253 | assert(handle != NULL, "precondition" ); |
| 254 | JNIHandleBlock* block = thread->active_handles(); |
| 255 | |
| 256 | // Look back past possible native calls to jni_PushLocalFrame. |
| 257 | while (block != NULL) { |
| 258 | if (block->chain_contains(handle)) { |
| 259 | return true; |
| 260 | } |
| 261 | block = block->pop_frame_link(); |
| 262 | } |
| 263 | return false; |
| 264 | } |
| 265 | |
| 266 | |
| 267 | // Determine if the handle is somewhere in the current thread's stack. |
| 268 | // We easily can't isolate any particular stack frame the handle might |
| 269 | // come from, so we'll check the whole stack. |
| 270 | |
| 271 | bool JNIHandles::is_frame_handle(JavaThread* thr, jobject handle) { |
| 272 | assert(handle != NULL, "precondition" ); |
| 273 | // If there is no java frame, then this must be top level code, such |
| 274 | // as the java command executable, in which case, this type of handle |
| 275 | // is not permitted. |
| 276 | return (thr->has_last_Java_frame() && |
| 277 | (void*)handle < (void*)thr->stack_base() && |
| 278 | (void*)handle >= (void*)thr->last_Java_sp()); |
| 279 | } |
| 280 | |
| 281 | |
| 282 | bool JNIHandles::is_global_handle(jobject handle) { |
| 283 | assert(handle != NULL, "precondition" ); |
| 284 | return !is_jweak(handle) && is_storage_handle(global_handles(), jobject_ptr(handle)); |
| 285 | } |
| 286 | |
| 287 | |
| 288 | bool JNIHandles::is_weak_global_handle(jobject handle) { |
| 289 | assert(handle != NULL, "precondition" ); |
| 290 | return is_jweak(handle) && is_storage_handle(weak_global_handles(), jweak_ptr(handle)); |
| 291 | } |
| 292 | |
| 293 | size_t JNIHandles::global_handle_memory_usage() { |
| 294 | return global_handles()->total_memory_usage(); |
| 295 | } |
| 296 | |
| 297 | size_t JNIHandles::weak_global_handle_memory_usage() { |
| 298 | return weak_global_handles()->total_memory_usage(); |
| 299 | } |
| 300 | |
| 301 | |
| 302 | // We assume this is called at a safepoint: no lock is needed. |
| 303 | void JNIHandles::print_on(outputStream* st) { |
| 304 | assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint" ); |
| 305 | |
| 306 | st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT, |
| 307 | global_handles()->allocation_count(), |
| 308 | weak_global_handles()->allocation_count()); |
| 309 | st->cr(); |
| 310 | st->flush(); |
| 311 | } |
| 312 | |
| 313 | void JNIHandles::print() { print_on(tty); } |
| 314 | |
| 315 | class VerifyJNIHandles: public OopClosure { |
| 316 | public: |
| 317 | virtual void do_oop(oop* root) { |
| 318 | guarantee(oopDesc::is_oop_or_null(RawAccess<>::oop_load(root)), "Invalid oop" ); |
| 319 | } |
| 320 | virtual void do_oop(narrowOop* root) { ShouldNotReachHere(); } |
| 321 | }; |
| 322 | |
| 323 | void JNIHandles::verify() { |
| 324 | VerifyJNIHandles verify_handle; |
| 325 | |
| 326 | oops_do(&verify_handle); |
| 327 | weak_oops_do(&verify_handle); |
| 328 | } |
| 329 | |
| 330 | // This method is implemented here to avoid circular includes between |
| 331 | // jniHandles.hpp and thread.hpp. |
| 332 | bool JNIHandles::current_thread_in_native() { |
| 333 | Thread* thread = Thread::current(); |
| 334 | return (thread->is_Java_thread() && |
| 335 | JavaThread::current()->thread_state() == _thread_in_native); |
| 336 | } |
| 337 | |
| 338 | |
| 339 | void jni_handles_init() { |
| 340 | JNIHandles::initialize(); |
| 341 | } |
| 342 | |
| 343 | |
| 344 | int JNIHandleBlock::_blocks_allocated = 0; |
| 345 | JNIHandleBlock* JNIHandleBlock::_block_free_list = NULL; |
| 346 | #ifndef PRODUCT |
| 347 | JNIHandleBlock* JNIHandleBlock::_block_list = NULL; |
| 348 | #endif |
| 349 | |
| 350 | |
| 351 | #ifdef ASSERT |
| 352 | void JNIHandleBlock::zap() { |
| 353 | // Zap block values |
| 354 | _top = 0; |
| 355 | for (int index = 0; index < block_size_in_oops; index++) { |
| 356 | // NOT using Access here; just bare clobbering to NULL, since the |
| 357 | // block no longer contains valid oops. |
| 358 | _handles[index] = NULL; |
| 359 | } |
| 360 | } |
| 361 | #endif // ASSERT |
| 362 | |
| 363 | JNIHandleBlock* JNIHandleBlock::allocate_block(Thread* thread) { |
| 364 | assert(thread == NULL || thread == Thread::current(), "sanity check" ); |
| 365 | JNIHandleBlock* block; |
| 366 | // Check the thread-local free list for a block so we don't |
| 367 | // have to acquire a mutex. |
| 368 | if (thread != NULL && thread->free_handle_block() != NULL) { |
| 369 | block = thread->free_handle_block(); |
| 370 | thread->set_free_handle_block(block->_next); |
| 371 | } |
| 372 | else { |
| 373 | // locking with safepoint checking introduces a potential deadlock: |
| 374 | // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock |
| 375 | // - another would hold Threads_lock (jni_AttachCurrentThread) and then |
| 376 | // JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block) |
| 377 | MutexLocker ml(JNIHandleBlockFreeList_lock, |
| 378 | Mutex::_no_safepoint_check_flag); |
| 379 | if (_block_free_list == NULL) { |
| 380 | // Allocate new block |
| 381 | block = new JNIHandleBlock(); |
| 382 | _blocks_allocated++; |
| 383 | block->zap(); |
| 384 | #ifndef PRODUCT |
| 385 | // Link new block to list of all allocated blocks |
| 386 | block->_block_list_link = _block_list; |
| 387 | _block_list = block; |
| 388 | #endif |
| 389 | } else { |
| 390 | // Get block from free list |
| 391 | block = _block_free_list; |
| 392 | _block_free_list = _block_free_list->_next; |
| 393 | } |
| 394 | } |
| 395 | block->_top = 0; |
| 396 | block->_next = NULL; |
| 397 | block->_pop_frame_link = NULL; |
| 398 | block->_planned_capacity = block_size_in_oops; |
| 399 | // _last, _free_list & _allocate_before_rebuild initialized in allocate_handle |
| 400 | debug_only(block->_last = NULL); |
| 401 | debug_only(block->_free_list = NULL); |
| 402 | debug_only(block->_allocate_before_rebuild = -1); |
| 403 | return block; |
| 404 | } |
| 405 | |
| 406 | |
| 407 | void JNIHandleBlock::release_block(JNIHandleBlock* block, Thread* thread) { |
| 408 | assert(thread == NULL || thread == Thread::current(), "sanity check" ); |
| 409 | JNIHandleBlock* pop_frame_link = block->pop_frame_link(); |
| 410 | // Put returned block at the beginning of the thread-local free list. |
| 411 | // Note that if thread == NULL, we use it as an implicit argument that |
| 412 | // we _don't_ want the block to be kept on the free_handle_block. |
| 413 | // See for instance JavaThread::exit(). |
| 414 | if (thread != NULL ) { |
| 415 | block->zap(); |
| 416 | JNIHandleBlock* freelist = thread->free_handle_block(); |
| 417 | block->_pop_frame_link = NULL; |
| 418 | thread->set_free_handle_block(block); |
| 419 | |
| 420 | // Add original freelist to end of chain |
| 421 | if ( freelist != NULL ) { |
| 422 | while ( block->_next != NULL ) block = block->_next; |
| 423 | block->_next = freelist; |
| 424 | } |
| 425 | block = NULL; |
| 426 | } |
| 427 | if (block != NULL) { |
| 428 | // Return blocks to free list |
| 429 | // locking with safepoint checking introduces a potential deadlock: |
| 430 | // - we would hold JNIHandleBlockFreeList_lock and then Threads_lock |
| 431 | // - another would hold Threads_lock (jni_AttachCurrentThread) and then |
| 432 | // JNIHandleBlockFreeList_lock (JNIHandleBlock::allocate_block) |
| 433 | MutexLocker ml(JNIHandleBlockFreeList_lock, |
| 434 | Mutex::_no_safepoint_check_flag); |
| 435 | while (block != NULL) { |
| 436 | block->zap(); |
| 437 | JNIHandleBlock* next = block->_next; |
| 438 | block->_next = _block_free_list; |
| 439 | _block_free_list = block; |
| 440 | block = next; |
| 441 | } |
| 442 | } |
| 443 | if (pop_frame_link != NULL) { |
| 444 | // As a sanity check we release blocks pointed to by the pop_frame_link. |
| 445 | // This should never happen (only if PopLocalFrame is not called the |
| 446 | // correct number of times). |
| 447 | release_block(pop_frame_link, thread); |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | |
| 452 | void JNIHandleBlock::oops_do(OopClosure* f) { |
| 453 | JNIHandleBlock* current_chain = this; |
| 454 | // Iterate over chain of blocks, followed by chains linked through the |
| 455 | // pop frame links. |
| 456 | while (current_chain != NULL) { |
| 457 | for (JNIHandleBlock* current = current_chain; current != NULL; |
| 458 | current = current->_next) { |
| 459 | assert(current == current_chain || current->pop_frame_link() == NULL, |
| 460 | "only blocks first in chain should have pop frame link set" ); |
| 461 | for (int index = 0; index < current->_top; index++) { |
| 462 | oop* root = &(current->_handles)[index]; |
| 463 | oop value = *root; |
| 464 | // traverse heap pointers only, not deleted handles or free list |
| 465 | // pointers |
| 466 | if (value != NULL && Universe::heap()->is_in_reserved(value)) { |
| 467 | f->do_oop(root); |
| 468 | } |
| 469 | } |
| 470 | // the next handle block is valid only if current block is full |
| 471 | if (current->_top < block_size_in_oops) { |
| 472 | break; |
| 473 | } |
| 474 | } |
| 475 | current_chain = current_chain->pop_frame_link(); |
| 476 | } |
| 477 | } |
| 478 | |
| 479 | |
| 480 | jobject JNIHandleBlock::allocate_handle(oop obj) { |
| 481 | assert(Universe::heap()->is_in_reserved(obj), "sanity check" ); |
| 482 | if (_top == 0) { |
| 483 | // This is the first allocation or the initial block got zapped when |
| 484 | // entering a native function. If we have any following blocks they are |
| 485 | // not valid anymore. |
| 486 | for (JNIHandleBlock* current = _next; current != NULL; |
| 487 | current = current->_next) { |
| 488 | assert(current->_last == NULL, "only first block should have _last set" ); |
| 489 | assert(current->_free_list == NULL, |
| 490 | "only first block should have _free_list set" ); |
| 491 | if (current->_top == 0) { |
| 492 | // All blocks after the first clear trailing block are already cleared. |
| 493 | #ifdef ASSERT |
| 494 | for (current = current->_next; current != NULL; current = current->_next) { |
| 495 | assert(current->_top == 0, "trailing blocks must already be cleared" ); |
| 496 | } |
| 497 | #endif |
| 498 | break; |
| 499 | } |
| 500 | current->_top = 0; |
| 501 | current->zap(); |
| 502 | } |
| 503 | // Clear initial block |
| 504 | _free_list = NULL; |
| 505 | _allocate_before_rebuild = 0; |
| 506 | _last = this; |
| 507 | zap(); |
| 508 | } |
| 509 | |
| 510 | // Try last block |
| 511 | if (_last->_top < block_size_in_oops) { |
| 512 | oop* handle = &(_last->_handles)[_last->_top++]; |
| 513 | NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj); |
| 514 | return (jobject) handle; |
| 515 | } |
| 516 | |
| 517 | // Try free list |
| 518 | if (_free_list != NULL) { |
| 519 | oop* handle = _free_list; |
| 520 | _free_list = (oop*) *_free_list; |
| 521 | NativeAccess<IS_DEST_UNINITIALIZED>::oop_store(handle, obj); |
| 522 | return (jobject) handle; |
| 523 | } |
| 524 | // Check if unused block follow last |
| 525 | if (_last->_next != NULL) { |
| 526 | // update last and retry |
| 527 | _last = _last->_next; |
| 528 | return allocate_handle(obj); |
| 529 | } |
| 530 | |
| 531 | // No space available, we have to rebuild free list or expand |
| 532 | if (_allocate_before_rebuild == 0) { |
| 533 | rebuild_free_list(); // updates _allocate_before_rebuild counter |
| 534 | } else { |
| 535 | // Append new block |
| 536 | Thread* thread = Thread::current(); |
| 537 | Handle obj_handle(thread, obj); |
| 538 | // This can block, so we need to preserve obj across call. |
| 539 | _last->_next = JNIHandleBlock::allocate_block(thread); |
| 540 | _last = _last->_next; |
| 541 | _allocate_before_rebuild--; |
| 542 | obj = obj_handle(); |
| 543 | } |
| 544 | return allocate_handle(obj); // retry |
| 545 | } |
| 546 | |
| 547 | void JNIHandleBlock::rebuild_free_list() { |
| 548 | assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking" ); |
| 549 | int free = 0; |
| 550 | int blocks = 0; |
| 551 | for (JNIHandleBlock* current = this; current != NULL; current = current->_next) { |
| 552 | for (int index = 0; index < current->_top; index++) { |
| 553 | oop* handle = &(current->_handles)[index]; |
| 554 | if (*handle == NULL) { |
| 555 | // this handle was cleared out by a delete call, reuse it |
| 556 | *handle = (oop) _free_list; |
| 557 | _free_list = handle; |
| 558 | free++; |
| 559 | } |
| 560 | } |
| 561 | // we should not rebuild free list if there are unused handles at the end |
| 562 | assert(current->_top == block_size_in_oops, "just checking" ); |
| 563 | blocks++; |
| 564 | } |
| 565 | // Heuristic: if more than half of the handles are free we rebuild next time |
| 566 | // as well, otherwise we append a corresponding number of new blocks before |
| 567 | // attempting a free list rebuild again. |
| 568 | int total = blocks * block_size_in_oops; |
| 569 | int = total - 2*free; |
| 570 | if (extra > 0) { |
| 571 | // Not as many free handles as we would like - compute number of new blocks to append |
| 572 | _allocate_before_rebuild = (extra + block_size_in_oops - 1) / block_size_in_oops; |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | |
| 577 | bool JNIHandleBlock::contains(jobject handle) const { |
| 578 | return ((jobject)&_handles[0] <= handle && handle<(jobject)&_handles[_top]); |
| 579 | } |
| 580 | |
| 581 | |
| 582 | bool JNIHandleBlock::chain_contains(jobject handle) const { |
| 583 | for (JNIHandleBlock* current = (JNIHandleBlock*) this; current != NULL; current = current->_next) { |
| 584 | if (current->contains(handle)) { |
| 585 | return true; |
| 586 | } |
| 587 | } |
| 588 | return false; |
| 589 | } |
| 590 | |
| 591 | |
| 592 | size_t JNIHandleBlock::length() const { |
| 593 | size_t result = 1; |
| 594 | for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) { |
| 595 | result++; |
| 596 | } |
| 597 | return result; |
| 598 | } |
| 599 | |
| 600 | class CountJNIHandleClosure: public OopClosure { |
| 601 | private: |
| 602 | int _count; |
| 603 | public: |
| 604 | CountJNIHandleClosure(): _count(0) {} |
| 605 | virtual void do_oop(oop* ooph) { _count++; } |
| 606 | virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); } |
| 607 | int count() { return _count; } |
| 608 | }; |
| 609 | |
| 610 | const size_t JNIHandleBlock::get_number_of_live_handles() { |
| 611 | CountJNIHandleClosure counter; |
| 612 | oops_do(&counter); |
| 613 | return counter.count(); |
| 614 | } |
| 615 | |
| 616 | // This method is not thread-safe, i.e., must be called while holding a lock on the |
| 617 | // structure. |
| 618 | size_t JNIHandleBlock::memory_usage() const { |
| 619 | return length() * sizeof(JNIHandleBlock); |
| 620 | } |
| 621 | |
| 622 | |
| 623 | #ifndef PRODUCT |
| 624 | |
| 625 | bool JNIHandles::is_local_handle(jobject handle) { |
| 626 | return JNIHandleBlock::any_contains(handle); |
| 627 | } |
| 628 | |
| 629 | bool JNIHandleBlock::any_contains(jobject handle) { |
| 630 | assert(handle != NULL, "precondition" ); |
| 631 | for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) { |
| 632 | if (current->contains(handle)) { |
| 633 | return true; |
| 634 | } |
| 635 | } |
| 636 | return false; |
| 637 | } |
| 638 | |
| 639 | void JNIHandleBlock::print_statistics() { |
| 640 | int used_blocks = 0; |
| 641 | int free_blocks = 0; |
| 642 | int used_handles = 0; |
| 643 | int free_handles = 0; |
| 644 | JNIHandleBlock* block = _block_list; |
| 645 | while (block != NULL) { |
| 646 | if (block->_top > 0) { |
| 647 | used_blocks++; |
| 648 | } else { |
| 649 | free_blocks++; |
| 650 | } |
| 651 | used_handles += block->_top; |
| 652 | free_handles += (block_size_in_oops - block->_top); |
| 653 | block = block->_block_list_link; |
| 654 | } |
| 655 | tty->print_cr("JNIHandleBlocks statistics" ); |
| 656 | tty->print_cr("- blocks allocated: %d" , used_blocks + free_blocks); |
| 657 | tty->print_cr("- blocks in use: %d" , used_blocks); |
| 658 | tty->print_cr("- blocks free: %d" , free_blocks); |
| 659 | tty->print_cr("- handles in use: %d" , used_handles); |
| 660 | tty->print_cr("- handles free: %d" , free_handles); |
| 661 | } |
| 662 | |
| 663 | #endif |
| 664 | |