1 | /* |
2 | * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "code/compiledIC.hpp" |
27 | #include "code/compiledMethod.inline.hpp" |
28 | #include "code/exceptionHandlerTable.hpp" |
29 | #include "code/scopeDesc.hpp" |
30 | #include "code/codeCache.hpp" |
31 | #include "code/icBuffer.hpp" |
32 | #include "gc/shared/barrierSet.hpp" |
33 | #include "gc/shared/gcBehaviours.hpp" |
34 | #include "interpreter/bytecode.inline.hpp" |
35 | #include "logging/log.hpp" |
36 | #include "logging/logTag.hpp" |
37 | #include "memory/resourceArea.hpp" |
38 | #include "oops/methodData.hpp" |
39 | #include "oops/method.inline.hpp" |
40 | #include "prims/methodHandles.hpp" |
41 | #include "runtime/deoptimization.hpp" |
42 | #include "runtime/handles.inline.hpp" |
43 | #include "runtime/mutexLocker.hpp" |
44 | #include "runtime/sharedRuntime.hpp" |
45 | |
46 | CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, |
47 | int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, |
48 | bool caller_must_gc_arguments) |
49 | : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), |
50 | _mark_for_deoptimization_status(not_marked), |
51 | _method(method), |
52 | _gc_data(NULL) |
53 | { |
54 | init_defaults(); |
55 | } |
56 | |
57 | CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, |
58 | int , CodeBuffer* cb, int frame_complete_offset, int frame_size, |
59 | OopMapSet* oop_maps, bool caller_must_gc_arguments) |
60 | : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, |
61 | frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), |
62 | _mark_for_deoptimization_status(not_marked), |
63 | _method(method), |
64 | _gc_data(NULL) |
65 | { |
66 | init_defaults(); |
67 | } |
68 | |
69 | void CompiledMethod::init_defaults() { |
70 | _has_unsafe_access = 0; |
71 | _has_method_handle_invokes = 0; |
72 | _lazy_critical_native = 0; |
73 | _has_wide_vectors = 0; |
74 | } |
75 | |
76 | bool CompiledMethod::is_method_handle_return(address return_pc) { |
77 | if (!has_method_handle_invokes()) return false; |
78 | PcDesc* pd = pc_desc_at(return_pc); |
79 | if (pd == NULL) |
80 | return false; |
81 | return pd->is_method_handle_invoke(); |
82 | } |
83 | |
84 | // Returns a string version of the method state. |
85 | const char* CompiledMethod::state() const { |
86 | int state = get_state(); |
87 | switch (state) { |
88 | case not_installed: |
89 | return "not installed" ; |
90 | case in_use: |
91 | return "in use" ; |
92 | case not_used: |
93 | return "not_used" ; |
94 | case not_entrant: |
95 | return "not_entrant" ; |
96 | case zombie: |
97 | return "zombie" ; |
98 | case unloaded: |
99 | return "unloaded" ; |
100 | default: |
101 | fatal("unexpected method state: %d" , state); |
102 | return NULL; |
103 | } |
104 | } |
105 | |
106 | //----------------------------------------------------------------------------- |
107 | |
108 | ExceptionCache* CompiledMethod::exception_cache_acquire() const { |
109 | return OrderAccess::load_acquire(&_exception_cache); |
110 | } |
111 | |
112 | void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) { |
113 | assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock" ); |
114 | assert(new_entry != NULL,"Must be non null" ); |
115 | assert(new_entry->next() == NULL, "Must be null" ); |
116 | |
117 | for (;;) { |
118 | ExceptionCache *ec = exception_cache(); |
119 | if (ec != NULL) { |
120 | Klass* ex_klass = ec->exception_type(); |
121 | if (!ex_klass->is_loader_alive()) { |
122 | // We must guarantee that entries are not inserted with new next pointer |
123 | // edges to ExceptionCache entries with dead klasses, due to bad interactions |
124 | // with concurrent ExceptionCache cleanup. Therefore, the inserts roll |
125 | // the head pointer forward to the first live ExceptionCache, so that the new |
126 | // next pointers always point at live ExceptionCaches, that are not removed due |
127 | // to concurrent ExceptionCache cleanup. |
128 | ExceptionCache* next = ec->next(); |
129 | if (Atomic::cmpxchg(next, &_exception_cache, ec) == ec) { |
130 | CodeCache::release_exception_cache(ec); |
131 | } |
132 | continue; |
133 | } |
134 | ec = exception_cache(); |
135 | if (ec != NULL) { |
136 | new_entry->set_next(ec); |
137 | } |
138 | } |
139 | if (Atomic::cmpxchg(new_entry, &_exception_cache, ec) == ec) { |
140 | return; |
141 | } |
142 | } |
143 | } |
144 | |
145 | void CompiledMethod::clean_exception_cache() { |
146 | // For each nmethod, only a single thread may call this cleanup function |
147 | // at the same time, whether called in STW cleanup or concurrent cleanup. |
148 | // Note that if the GC is processing exception cache cleaning in a concurrent phase, |
149 | // then a single writer may contend with cleaning up the head pointer to the |
150 | // first ExceptionCache node that has a Klass* that is alive. That is fine, |
151 | // as long as there is no concurrent cleanup of next pointers from concurrent writers. |
152 | // And the concurrent writers do not clean up next pointers, only the head. |
153 | // Also note that concurent readers will walk through Klass* pointers that are not |
154 | // alive. That does not cause ABA problems, because Klass* is deleted after |
155 | // a handshake with all threads, after all stale ExceptionCaches have been |
156 | // unlinked. That is also when the CodeCache::exception_cache_purge_list() |
157 | // is deleted, with all ExceptionCache entries that were cleaned concurrently. |
158 | // That similarly implies that CAS operations on ExceptionCache entries do not |
159 | // suffer from ABA problems as unlinking and deletion is separated by a global |
160 | // handshake operation. |
161 | ExceptionCache* prev = NULL; |
162 | ExceptionCache* curr = exception_cache_acquire(); |
163 | |
164 | while (curr != NULL) { |
165 | ExceptionCache* next = curr->next(); |
166 | |
167 | if (!curr->exception_type()->is_loader_alive()) { |
168 | if (prev == NULL) { |
169 | // Try to clean head; this is contended by concurrent inserts, that |
170 | // both lazily clean the head, and insert entries at the head. If |
171 | // the CAS fails, the operation is restarted. |
172 | if (Atomic::cmpxchg(next, &_exception_cache, curr) != curr) { |
173 | prev = NULL; |
174 | curr = exception_cache_acquire(); |
175 | continue; |
176 | } |
177 | } else { |
178 | // It is impossible to during cleanup connect the next pointer to |
179 | // an ExceptionCache that has not been published before a safepoint |
180 | // prior to the cleanup. Therefore, release is not required. |
181 | prev->set_next(next); |
182 | } |
183 | // prev stays the same. |
184 | |
185 | CodeCache::release_exception_cache(curr); |
186 | } else { |
187 | prev = curr; |
188 | } |
189 | |
190 | curr = next; |
191 | } |
192 | } |
193 | |
194 | // public method for accessing the exception cache |
195 | // These are the public access methods. |
196 | address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) { |
197 | // We never grab a lock to read the exception cache, so we may |
198 | // have false negatives. This is okay, as it can only happen during |
199 | // the first few exception lookups for a given nmethod. |
200 | ExceptionCache* ec = exception_cache_acquire(); |
201 | while (ec != NULL) { |
202 | address ret_val; |
203 | if ((ret_val = ec->match(exception,pc)) != NULL) { |
204 | return ret_val; |
205 | } |
206 | ec = ec->next(); |
207 | } |
208 | return NULL; |
209 | } |
210 | |
211 | void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { |
212 | // There are potential race conditions during exception cache updates, so we |
213 | // must own the ExceptionCache_lock before doing ANY modifications. Because |
214 | // we don't lock during reads, it is possible to have several threads attempt |
215 | // to update the cache with the same data. We need to check for already inserted |
216 | // copies of the current data before adding it. |
217 | |
218 | MutexLocker ml(ExceptionCache_lock); |
219 | ExceptionCache* target_entry = exception_cache_entry_for_exception(exception); |
220 | |
221 | if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) { |
222 | target_entry = new ExceptionCache(exception,pc,handler); |
223 | add_exception_cache_entry(target_entry); |
224 | } |
225 | } |
226 | |
227 | // private method for handling exception cache |
228 | // These methods are private, and used to manipulate the exception cache |
229 | // directly. |
230 | ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) { |
231 | ExceptionCache* ec = exception_cache_acquire(); |
232 | while (ec != NULL) { |
233 | if (ec->match_exception_with_space(exception)) { |
234 | return ec; |
235 | } |
236 | ec = ec->next(); |
237 | } |
238 | return NULL; |
239 | } |
240 | |
241 | //-------------end of code for ExceptionCache-------------- |
242 | |
243 | bool CompiledMethod::is_at_poll_return(address pc) { |
244 | RelocIterator iter(this, pc, pc+1); |
245 | while (iter.next()) { |
246 | if (iter.type() == relocInfo::poll_return_type) |
247 | return true; |
248 | } |
249 | return false; |
250 | } |
251 | |
252 | |
253 | bool CompiledMethod::is_at_poll_or_poll_return(address pc) { |
254 | RelocIterator iter(this, pc, pc+1); |
255 | while (iter.next()) { |
256 | relocInfo::relocType t = iter.type(); |
257 | if (t == relocInfo::poll_return_type || t == relocInfo::poll_type) |
258 | return true; |
259 | } |
260 | return false; |
261 | } |
262 | |
263 | void CompiledMethod::verify_oop_relocations() { |
264 | // Ensure sure that the code matches the current oop values |
265 | RelocIterator iter(this, NULL, NULL); |
266 | while (iter.next()) { |
267 | if (iter.type() == relocInfo::oop_type) { |
268 | oop_Relocation* reloc = iter.oop_reloc(); |
269 | if (!reloc->oop_is_immediate()) { |
270 | reloc->verify_oop_relocation(); |
271 | } |
272 | } |
273 | } |
274 | } |
275 | |
276 | |
277 | ScopeDesc* CompiledMethod::scope_desc_at(address pc) { |
278 | PcDesc* pd = pc_desc_at(pc); |
279 | guarantee(pd != NULL, "scope must be present" ); |
280 | return new ScopeDesc(this, pd->scope_decode_offset(), |
281 | pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), |
282 | pd->return_oop()); |
283 | } |
284 | |
285 | ScopeDesc* CompiledMethod::scope_desc_near(address pc) { |
286 | PcDesc* pd = pc_desc_near(pc); |
287 | guarantee(pd != NULL, "scope must be present" ); |
288 | return new ScopeDesc(this, pd->scope_decode_offset(), |
289 | pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), |
290 | pd->return_oop()); |
291 | } |
292 | |
293 | address CompiledMethod::oops_reloc_begin() const { |
294 | // If the method is not entrant or zombie then a JMP is plastered over the |
295 | // first few bytes. If an oop in the old code was there, that oop |
296 | // should not get GC'd. Skip the first few bytes of oops on |
297 | // not-entrant methods. |
298 | if (frame_complete_offset() != CodeOffsets::frame_never_safe && |
299 | code_begin() + frame_complete_offset() > |
300 | verified_entry_point() + NativeJump::instruction_size) |
301 | { |
302 | // If we have a frame_complete_offset after the native jump, then there |
303 | // is no point trying to look for oops before that. This is a requirement |
304 | // for being allowed to scan oops concurrently. |
305 | return code_begin() + frame_complete_offset(); |
306 | } |
307 | |
308 | // It is not safe to read oops concurrently using entry barriers, if their |
309 | // location depend on whether the nmethod is entrant or not. |
310 | assert(BarrierSet::barrier_set()->barrier_set_nmethod() == NULL, "Not safe oop scan" ); |
311 | |
312 | address low_boundary = verified_entry_point(); |
313 | if (!is_in_use() && is_nmethod()) { |
314 | low_boundary += NativeJump::instruction_size; |
315 | // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. |
316 | // This means that the low_boundary is going to be a little too high. |
317 | // This shouldn't matter, since oops of non-entrant methods are never used. |
318 | // In fact, why are we bothering to look at oops in a non-entrant method?? |
319 | } |
320 | return low_boundary; |
321 | } |
322 | |
323 | int CompiledMethod::verify_icholder_relocations() { |
324 | ResourceMark rm; |
325 | int count = 0; |
326 | |
327 | RelocIterator iter(this); |
328 | while(iter.next()) { |
329 | if (iter.type() == relocInfo::virtual_call_type) { |
330 | if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { |
331 | CompiledIC *ic = CompiledIC_at(&iter); |
332 | if (TraceCompiledIC) { |
333 | tty->print("noticed icholder " INTPTR_FORMAT " " , p2i(ic->cached_icholder())); |
334 | ic->print(); |
335 | } |
336 | assert(ic->cached_icholder() != NULL, "must be non-NULL" ); |
337 | count++; |
338 | } |
339 | } |
340 | } |
341 | |
342 | return count; |
343 | } |
344 | |
345 | // Method that knows how to preserve outgoing arguments at call. This method must be |
346 | // called with a frame corresponding to a Java invoke |
347 | void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { |
348 | if (method() != NULL && !method()->is_native()) { |
349 | address pc = fr.pc(); |
350 | SimpleScopeDesc ssd(this, pc); |
351 | Bytecode_invoke call(ssd.method(), ssd.bci()); |
352 | bool has_receiver = call.has_receiver(); |
353 | bool has_appendix = call.has_appendix(); |
354 | Symbol* signature = call.signature(); |
355 | |
356 | // The method attached by JIT-compilers should be used, if present. |
357 | // Bytecode can be inaccurate in such case. |
358 | Method* callee = attached_method_before_pc(pc); |
359 | if (callee != NULL) { |
360 | has_receiver = !(callee->access_flags().is_static()); |
361 | has_appendix = false; |
362 | signature = callee->signature(); |
363 | } |
364 | |
365 | fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f); |
366 | } |
367 | } |
368 | |
369 | Method* CompiledMethod::attached_method(address call_instr) { |
370 | assert(code_contains(call_instr), "not part of the nmethod" ); |
371 | RelocIterator iter(this, call_instr, call_instr + 1); |
372 | while (iter.next()) { |
373 | if (iter.addr() == call_instr) { |
374 | switch(iter.type()) { |
375 | case relocInfo::static_call_type: return iter.static_call_reloc()->method_value(); |
376 | case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value(); |
377 | case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value(); |
378 | default: break; |
379 | } |
380 | } |
381 | } |
382 | return NULL; // not found |
383 | } |
384 | |
385 | Method* CompiledMethod::attached_method_before_pc(address pc) { |
386 | if (NativeCall::is_call_before(pc)) { |
387 | NativeCall* ncall = nativeCall_before(pc); |
388 | return attached_method(ncall->instruction_address()); |
389 | } |
390 | return NULL; // not a call |
391 | } |
392 | |
393 | void CompiledMethod::clear_inline_caches() { |
394 | assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint" ); |
395 | if (is_zombie()) { |
396 | return; |
397 | } |
398 | |
399 | RelocIterator iter(this); |
400 | while (iter.next()) { |
401 | iter.reloc()->clear_inline_cache(); |
402 | } |
403 | } |
404 | |
405 | // Clear IC callsites, releasing ICStubs of all compiled ICs |
406 | // as well as any associated CompiledICHolders. |
407 | void CompiledMethod::clear_ic_callsites() { |
408 | assert(CompiledICLocker::is_safe(this), "mt unsafe call" ); |
409 | ResourceMark rm; |
410 | RelocIterator iter(this); |
411 | while(iter.next()) { |
412 | if (iter.type() == relocInfo::virtual_call_type) { |
413 | CompiledIC* ic = CompiledIC_at(&iter); |
414 | ic->set_to_clean(false); |
415 | } |
416 | } |
417 | } |
418 | |
419 | #ifdef ASSERT |
420 | // Check class_loader is alive for this bit of metadata. |
421 | class CheckClass : public MetadataClosure { |
422 | void do_metadata(Metadata* md) { |
423 | Klass* klass = NULL; |
424 | if (md->is_klass()) { |
425 | klass = ((Klass*)md); |
426 | } else if (md->is_method()) { |
427 | klass = ((Method*)md)->method_holder(); |
428 | } else if (md->is_methodData()) { |
429 | klass = ((MethodData*)md)->method()->method_holder(); |
430 | } else { |
431 | md->print(); |
432 | ShouldNotReachHere(); |
433 | } |
434 | assert(klass->is_loader_alive(), "must be alive" ); |
435 | } |
436 | }; |
437 | #endif // ASSERT |
438 | |
439 | |
440 | bool CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic) { |
441 | if (ic->is_clean()) { |
442 | return true; |
443 | } |
444 | if (ic->is_icholder_call()) { |
445 | // The only exception is compiledICHolder metdata which may |
446 | // yet be marked below. (We check this further below). |
447 | CompiledICHolder* cichk_metdata = ic->cached_icholder(); |
448 | |
449 | if (cichk_metdata->is_loader_alive()) { |
450 | return true; |
451 | } |
452 | } else { |
453 | Metadata* ic_metdata = ic->cached_metadata(); |
454 | if (ic_metdata != NULL) { |
455 | if (ic_metdata->is_klass()) { |
456 | if (((Klass*)ic_metdata)->is_loader_alive()) { |
457 | return true; |
458 | } |
459 | } else if (ic_metdata->is_method()) { |
460 | Method* method = (Method*)ic_metdata; |
461 | assert(!method->is_old(), "old method should have been cleaned" ); |
462 | if (method->method_holder()->is_loader_alive()) { |
463 | return true; |
464 | } |
465 | } else { |
466 | ShouldNotReachHere(); |
467 | } |
468 | } |
469 | } |
470 | |
471 | return ic->set_to_clean(); |
472 | } |
473 | |
474 | // Clean references to unloaded nmethods at addr from this one, which is not unloaded. |
475 | template <class CompiledICorStaticCall> |
476 | static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from, |
477 | bool clean_all) { |
478 | // Ok, to lookup references to zombies here |
479 | CodeBlob *cb = CodeCache::find_blob_unsafe(addr); |
480 | CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL; |
481 | if (nm != NULL) { |
482 | // Clean inline caches pointing to both zombie and not_entrant methods |
483 | if (clean_all || !nm->is_in_use() || nm->is_unloading() || (nm->method()->code() != nm)) { |
484 | if (!ic->set_to_clean(from->is_alive())) { |
485 | return false; |
486 | } |
487 | assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s" , p2i(from), from->method()->name_and_sig_as_C_string()); |
488 | } |
489 | } |
490 | return true; |
491 | } |
492 | |
493 | static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from, |
494 | bool clean_all) { |
495 | return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, clean_all); |
496 | } |
497 | |
498 | static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from, |
499 | bool clean_all) { |
500 | return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, clean_all); |
501 | } |
502 | |
503 | // Cleans caches in nmethods that point to either classes that are unloaded |
504 | // or nmethods that are unloaded. |
505 | // |
506 | // Can be called either in parallel by G1 currently or after all |
507 | // nmethods are unloaded. Return postponed=true in the parallel case for |
508 | // inline caches found that point to nmethods that are not yet visited during |
509 | // the do_unloading walk. |
510 | bool CompiledMethod::unload_nmethod_caches(bool unloading_occurred) { |
511 | ResourceMark rm; |
512 | |
513 | // Exception cache only needs to be called if unloading occurred |
514 | if (unloading_occurred) { |
515 | clean_exception_cache(); |
516 | } |
517 | |
518 | if (!cleanup_inline_caches_impl(unloading_occurred, false)) { |
519 | return false; |
520 | } |
521 | |
522 | #ifdef ASSERT |
523 | // Check that the metadata embedded in the nmethod is alive |
524 | CheckClass check_class; |
525 | metadata_do(&check_class); |
526 | #endif |
527 | return true; |
528 | } |
529 | |
530 | void CompiledMethod::cleanup_inline_caches(bool clean_all) { |
531 | for (;;) { |
532 | ICRefillVerifier ic_refill_verifier; |
533 | { CompiledICLocker ic_locker(this); |
534 | if (cleanup_inline_caches_impl(false, clean_all)) { |
535 | return; |
536 | } |
537 | } |
538 | InlineCacheBuffer::refill_ic_stubs(); |
539 | } |
540 | } |
541 | |
542 | // Called to clean up after class unloading for live nmethods and from the sweeper |
543 | // for all methods. |
544 | bool CompiledMethod::cleanup_inline_caches_impl(bool unloading_occurred, bool clean_all) { |
545 | assert(CompiledICLocker::is_safe(this), "mt unsafe call" ); |
546 | ResourceMark rm; |
547 | |
548 | // Find all calls in an nmethod and clear the ones that point to non-entrant, |
549 | // zombie and unloaded nmethods. |
550 | RelocIterator iter(this, oops_reloc_begin()); |
551 | bool is_in_static_stub = false; |
552 | while(iter.next()) { |
553 | |
554 | switch (iter.type()) { |
555 | |
556 | case relocInfo::virtual_call_type: |
557 | if (unloading_occurred) { |
558 | // If class unloading occurred we first clear ICs where the cached metadata |
559 | // is referring to an unloaded klass or method. |
560 | if (!clean_ic_if_metadata_is_dead(CompiledIC_at(&iter))) { |
561 | return false; |
562 | } |
563 | } |
564 | |
565 | if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { |
566 | return false; |
567 | } |
568 | break; |
569 | |
570 | case relocInfo::opt_virtual_call_type: |
571 | if (!clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, clean_all)) { |
572 | return false; |
573 | } |
574 | break; |
575 | |
576 | case relocInfo::static_call_type: |
577 | if (!clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, clean_all)) { |
578 | return false; |
579 | } |
580 | break; |
581 | |
582 | case relocInfo::static_stub_type: { |
583 | is_in_static_stub = true; |
584 | break; |
585 | } |
586 | |
587 | case relocInfo::metadata_type: { |
588 | // Only the metadata relocations contained in static/opt virtual call stubs |
589 | // contains the Method* passed to c2i adapters. It is the only metadata |
590 | // relocation that needs to be walked, as it is the one metadata relocation |
591 | // that violates the invariant that all metadata relocations have an oop |
592 | // in the compiled method (due to deferred resolution and code patching). |
593 | |
594 | // This causes dead metadata to remain in compiled methods that are not |
595 | // unloading. Unless these slippery metadata relocations of the static |
596 | // stubs are at least cleared, subsequent class redefinition operations |
597 | // will access potentially free memory, and JavaThread execution |
598 | // concurrent to class unloading may call c2i adapters with dead methods. |
599 | if (!is_in_static_stub) { |
600 | // The first metadata relocation after a static stub relocation is the |
601 | // metadata relocation of the static stub used to pass the Method* to |
602 | // c2i adapters. |
603 | continue; |
604 | } |
605 | is_in_static_stub = false; |
606 | metadata_Relocation* r = iter.metadata_reloc(); |
607 | Metadata* md = r->metadata_value(); |
608 | if (md != NULL && md->is_method()) { |
609 | Method* method = static_cast<Method*>(md); |
610 | if (!method->method_holder()->is_loader_alive()) { |
611 | Atomic::store((Method*)NULL, r->metadata_addr()); |
612 | |
613 | if (!r->metadata_is_immediate()) { |
614 | r->fix_metadata_relocation(); |
615 | } |
616 | } |
617 | } |
618 | break; |
619 | } |
620 | |
621 | default: |
622 | break; |
623 | } |
624 | } |
625 | |
626 | return true; |
627 | } |
628 | |
629 | // Iterating over all nmethods, e.g. with the help of CodeCache::nmethods_do(fun) was found |
630 | // to not be inherently safe. There is a chance that fields are seen which are not properly |
631 | // initialized. This happens despite the fact that nmethods_do() asserts the CodeCache_lock |
632 | // to be held. |
633 | // To bundle knowledge about necessary checks in one place, this function was introduced. |
634 | // It is not claimed that these checks are sufficient, but they were found to be necessary. |
635 | bool CompiledMethod::nmethod_access_is_safe(nmethod* nm) { |
636 | Method* method = (nm == NULL) ? NULL : nm->method(); // nm->method() may be uninitialized, i.e. != NULL, but invalid |
637 | return (nm != NULL) && (method != NULL) && (method->signature() != NULL) && |
638 | !nm->is_zombie() && !nm->is_not_installed() && |
639 | os::is_readable_pointer(method) && |
640 | os::is_readable_pointer(method->constants()) && |
641 | os::is_readable_pointer(method->signature()); |
642 | } |
643 | |
644 | address CompiledMethod::continuation_for_implicit_exception(address pc, bool for_div0_check) { |
645 | // Exception happened outside inline-cache check code => we are inside |
646 | // an active nmethod => use cpc to determine a return address |
647 | int exception_offset = pc - code_begin(); |
648 | int cont_offset = ImplicitExceptionTable(this).continuation_offset( exception_offset ); |
649 | #ifdef ASSERT |
650 | if (cont_offset == 0) { |
651 | Thread* thread = Thread::current(); |
652 | ResetNoHandleMark rnm; // Might be called from LEAF/QUICK ENTRY |
653 | HandleMark hm(thread); |
654 | ResourceMark rm(thread); |
655 | CodeBlob* cb = CodeCache::find_blob(pc); |
656 | assert(cb != NULL && cb == this, "" ); |
657 | ttyLocker ttyl; |
658 | tty->print_cr("implicit exception happened at " INTPTR_FORMAT, p2i(pc)); |
659 | print(); |
660 | method()->print_codes(); |
661 | print_code(); |
662 | print_pcs(); |
663 | } |
664 | #endif |
665 | if (cont_offset == 0) { |
666 | // Let the normal error handling report the exception |
667 | return NULL; |
668 | } |
669 | if (cont_offset == exception_offset) { |
670 | #if INCLUDE_JVMCI |
671 | Deoptimization::DeoptReason deopt_reason = for_div0_check ? Deoptimization::Reason_div0_check : Deoptimization::Reason_null_check; |
672 | JavaThread *thread = JavaThread::current(); |
673 | thread->set_jvmci_implicit_exception_pc(pc); |
674 | thread->set_pending_deoptimization(Deoptimization::make_trap_request(deopt_reason, |
675 | Deoptimization::Action_reinterpret)); |
676 | return (SharedRuntime::deopt_blob()->implicit_exception_uncommon_trap()); |
677 | #else |
678 | ShouldNotReachHere(); |
679 | #endif |
680 | } |
681 | return code_begin() + cont_offset; |
682 | } |
683 | |
684 | class HasEvolDependency : public MetadataClosure { |
685 | bool _has_evol_dependency; |
686 | public: |
687 | HasEvolDependency() : _has_evol_dependency(false) {} |
688 | void do_metadata(Metadata* md) { |
689 | if (md->is_method()) { |
690 | Method* method = (Method*)md; |
691 | if (method->is_old()) { |
692 | _has_evol_dependency = true; |
693 | } |
694 | } |
695 | } |
696 | bool has_evol_dependency() const { return _has_evol_dependency; } |
697 | }; |
698 | |
699 | bool CompiledMethod::has_evol_metadata() { |
700 | // Check the metadata in relocIter and CompiledIC and also deoptimize |
701 | // any nmethod that has reference to old methods. |
702 | HasEvolDependency check_evol; |
703 | metadata_do(&check_evol); |
704 | if (check_evol.has_evol_dependency() && log_is_enabled(Debug, redefine, class, nmethod)) { |
705 | ResourceMark rm; |
706 | log_debug(redefine, class, nmethod) |
707 | ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on in nmethod metadata" , |
708 | _method->method_holder()->external_name(), |
709 | _method->name()->as_C_string(), |
710 | _method->signature()->as_C_string(), |
711 | compile_id()); |
712 | } |
713 | return check_evol.has_evol_dependency(); |
714 | } |
715 | |