1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_RUNTIME_SHAREDRUNTIME_HPP |
26 | #define SHARE_RUNTIME_SHAREDRUNTIME_HPP |
27 | |
28 | #include "interpreter/bytecodeHistogram.hpp" |
29 | #include "interpreter/bytecodeTracer.hpp" |
30 | #include "interpreter/linkResolver.hpp" |
31 | #include "memory/allocation.hpp" |
32 | #include "memory/resourceArea.hpp" |
33 | #include "utilities/hashtable.hpp" |
34 | #include "utilities/macros.hpp" |
35 | |
36 | class AdapterHandlerEntry; |
37 | class AdapterHandlerTable; |
38 | class AdapterFingerPrint; |
39 | class vframeStream; |
40 | |
41 | // Runtime is the base class for various runtime interfaces |
42 | // (InterpreterRuntime, CompilerRuntime, etc.). It provides |
43 | // shared functionality such as exception forwarding (C++ to |
44 | // Java exceptions), locking/unlocking mechanisms, statistical |
45 | // information, etc. |
46 | |
47 | class SharedRuntime: AllStatic { |
48 | friend class VMStructs; |
49 | |
50 | private: |
51 | static bool resolve_sub_helper_internal(methodHandle callee_method, const frame& caller_frame, |
52 | CompiledMethod* caller_nm, bool is_virtual, bool is_optimized, |
53 | Handle receiver, CallInfo& call_info, Bytecodes::Code invoke_code, TRAPS); |
54 | static methodHandle resolve_sub_helper(JavaThread *thread, |
55 | bool is_virtual, |
56 | bool is_optimized, TRAPS); |
57 | |
58 | // Shared stub locations |
59 | |
60 | static RuntimeStub* _wrong_method_blob; |
61 | static RuntimeStub* _wrong_method_abstract_blob; |
62 | static RuntimeStub* _ic_miss_blob; |
63 | static RuntimeStub* _resolve_opt_virtual_call_blob; |
64 | static RuntimeStub* _resolve_virtual_call_blob; |
65 | static RuntimeStub* _resolve_static_call_blob; |
66 | static address _resolve_static_call_entry; |
67 | |
68 | static DeoptimizationBlob* _deopt_blob; |
69 | |
70 | static SafepointBlob* _polling_page_vectors_safepoint_handler_blob; |
71 | static SafepointBlob* _polling_page_safepoint_handler_blob; |
72 | static SafepointBlob* _polling_page_return_handler_blob; |
73 | |
74 | #ifdef COMPILER2 |
75 | static UncommonTrapBlob* _uncommon_trap_blob; |
76 | #endif // COMPILER2 |
77 | |
78 | #ifndef PRODUCT |
79 | // Counters |
80 | static int _nof_megamorphic_calls; // total # of megamorphic calls (through vtable) |
81 | #endif // !PRODUCT |
82 | |
83 | private: |
84 | enum { POLL_AT_RETURN, POLL_AT_LOOP, POLL_AT_VECTOR_LOOP }; |
85 | static SafepointBlob* generate_handler_blob(address call_ptr, int poll_type); |
86 | static RuntimeStub* generate_resolve_blob(address destination, const char* name); |
87 | |
88 | public: |
89 | static void generate_stubs(void); |
90 | |
91 | // max bytes for each dtrace string parameter |
92 | enum { max_dtrace_string_size = 256 }; |
93 | |
94 | // The following arithmetic routines are used on platforms that do |
95 | // not have machine instructions to implement their functionality. |
96 | // Do not remove these. |
97 | |
98 | // long arithmetics |
99 | static jlong lmul(jlong y, jlong x); |
100 | static jlong ldiv(jlong y, jlong x); |
101 | static jlong lrem(jlong y, jlong x); |
102 | |
103 | // float and double remainder |
104 | static jfloat frem(jfloat x, jfloat y); |
105 | static jdouble drem(jdouble x, jdouble y); |
106 | |
107 | |
108 | #ifdef _WIN64 |
109 | // Workaround for fmod issue in the Windows x64 CRT |
110 | static double fmod_winx64(double x, double y); |
111 | #endif |
112 | |
113 | #ifdef __SOFTFP__ |
114 | static jfloat fadd(jfloat x, jfloat y); |
115 | static jfloat fsub(jfloat x, jfloat y); |
116 | static jfloat fmul(jfloat x, jfloat y); |
117 | static jfloat fdiv(jfloat x, jfloat y); |
118 | |
119 | static jdouble dadd(jdouble x, jdouble y); |
120 | static jdouble dsub(jdouble x, jdouble y); |
121 | static jdouble dmul(jdouble x, jdouble y); |
122 | static jdouble ddiv(jdouble x, jdouble y); |
123 | #endif // __SOFTFP__ |
124 | |
125 | // float conversion (needs to set appropriate rounding mode) |
126 | static jint f2i (jfloat x); |
127 | static jlong f2l (jfloat x); |
128 | static jint d2i (jdouble x); |
129 | static jlong d2l (jdouble x); |
130 | static jfloat d2f (jdouble x); |
131 | static jfloat l2f (jlong x); |
132 | static jdouble l2d (jlong x); |
133 | |
134 | #ifdef __SOFTFP__ |
135 | static jfloat i2f (jint x); |
136 | static jdouble i2d (jint x); |
137 | static jdouble f2d (jfloat x); |
138 | #endif // __SOFTFP__ |
139 | |
140 | // double trigonometrics and transcendentals |
141 | static jdouble dsin(jdouble x); |
142 | static jdouble dcos(jdouble x); |
143 | static jdouble dtan(jdouble x); |
144 | static jdouble dlog(jdouble x); |
145 | static jdouble dlog10(jdouble x); |
146 | static jdouble dexp(jdouble x); |
147 | static jdouble dpow(jdouble x, jdouble y); |
148 | |
149 | #if defined(__SOFTFP__) || defined(E500V2) |
150 | static double dabs(double f); |
151 | #endif |
152 | |
153 | #if defined(__SOFTFP__) || defined(PPC) |
154 | static double dsqrt(double f); |
155 | #endif |
156 | |
157 | // Montgomery multiplication |
158 | static void montgomery_multiply(jint *a_ints, jint *b_ints, jint *n_ints, |
159 | jint len, jlong inv, jint *m_ints); |
160 | static void montgomery_square(jint *a_ints, jint *n_ints, |
161 | jint len, jlong inv, jint *m_ints); |
162 | |
163 | #ifdef __SOFTFP__ |
164 | // C++ compiler generates soft float instructions as well as passing |
165 | // float and double in registers. |
166 | static int fcmpl(float x, float y); |
167 | static int fcmpg(float x, float y); |
168 | static int dcmpl(double x, double y); |
169 | static int dcmpg(double x, double y); |
170 | |
171 | static int unordered_fcmplt(float x, float y); |
172 | static int unordered_dcmplt(double x, double y); |
173 | static int unordered_fcmple(float x, float y); |
174 | static int unordered_dcmple(double x, double y); |
175 | static int unordered_fcmpge(float x, float y); |
176 | static int unordered_dcmpge(double x, double y); |
177 | static int unordered_fcmpgt(float x, float y); |
178 | static int unordered_dcmpgt(double x, double y); |
179 | |
180 | static float fneg(float f); |
181 | static double dneg(double f); |
182 | #endif |
183 | |
184 | // exception handling across interpreter/compiler boundaries |
185 | static address raw_exception_handler_for_return_address(JavaThread* thread, address return_address); |
186 | static address exception_handler_for_return_address(JavaThread* thread, address return_address); |
187 | |
188 | // exception handling and implicit exceptions |
189 | static address compute_compiled_exc_handler(CompiledMethod* nm, address ret_pc, Handle& exception, |
190 | bool force_unwind, bool top_frame_only, bool& recursive_exception_occurred); |
191 | enum ImplicitExceptionKind { |
192 | IMPLICIT_NULL, |
193 | IMPLICIT_DIVIDE_BY_ZERO, |
194 | STACK_OVERFLOW |
195 | }; |
196 | static void throw_AbstractMethodError(JavaThread* thread); |
197 | static void throw_IncompatibleClassChangeError(JavaThread* thread); |
198 | static void throw_ArithmeticException(JavaThread* thread); |
199 | static void throw_NullPointerException(JavaThread* thread); |
200 | static void throw_NullPointerException_at_call(JavaThread* thread); |
201 | static void throw_StackOverflowError(JavaThread* thread); |
202 | static void throw_delayed_StackOverflowError(JavaThread* thread); |
203 | static void throw_StackOverflowError_common(JavaThread* thread, bool delayed); |
204 | static address continuation_for_implicit_exception(JavaThread* thread, |
205 | address faulting_pc, |
206 | ImplicitExceptionKind exception_kind); |
207 | |
208 | // Post-slow-path-allocation, pre-initializing-stores step for |
209 | // implementing e.g. ReduceInitialCardMarks |
210 | static void on_slowpath_allocation_exit(JavaThread* thread); |
211 | |
212 | static void enable_stack_reserved_zone(JavaThread* thread); |
213 | static frame look_for_reserved_stack_annotated_method(JavaThread* thread, frame fr); |
214 | |
215 | // Shared stub locations |
216 | static address get_poll_stub(address pc); |
217 | |
218 | static address get_ic_miss_stub() { |
219 | assert(_ic_miss_blob!= NULL, "oops" ); |
220 | return _ic_miss_blob->entry_point(); |
221 | } |
222 | |
223 | static address get_handle_wrong_method_stub() { |
224 | assert(_wrong_method_blob!= NULL, "oops" ); |
225 | return _wrong_method_blob->entry_point(); |
226 | } |
227 | |
228 | static address get_handle_wrong_method_abstract_stub() { |
229 | assert(_wrong_method_abstract_blob!= NULL, "oops" ); |
230 | return _wrong_method_abstract_blob->entry_point(); |
231 | } |
232 | |
233 | #ifdef COMPILER2 |
234 | static void generate_uncommon_trap_blob(void); |
235 | static UncommonTrapBlob* uncommon_trap_blob() { return _uncommon_trap_blob; } |
236 | #endif // COMPILER2 |
237 | |
238 | static address get_resolve_opt_virtual_call_stub() { |
239 | assert(_resolve_opt_virtual_call_blob != NULL, "oops" ); |
240 | return _resolve_opt_virtual_call_blob->entry_point(); |
241 | } |
242 | static address get_resolve_virtual_call_stub() { |
243 | assert(_resolve_virtual_call_blob != NULL, "oops" ); |
244 | return _resolve_virtual_call_blob->entry_point(); |
245 | } |
246 | static address get_resolve_static_call_stub() { |
247 | assert(_resolve_static_call_blob != NULL, "oops" ); |
248 | return _resolve_static_call_blob->entry_point(); |
249 | } |
250 | |
251 | static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; } |
252 | static SafepointBlob* polling_page_safepoint_handler_blob() { return _polling_page_safepoint_handler_blob; } |
253 | static SafepointBlob* polling_page_vectors_safepoint_handler_blob() { return _polling_page_vectors_safepoint_handler_blob; } |
254 | |
255 | // Counters |
256 | #ifndef PRODUCT |
257 | static address nof_megamorphic_calls_addr() { return (address)&_nof_megamorphic_calls; } |
258 | #endif // PRODUCT |
259 | |
260 | // Helper routine for full-speed JVMTI exception throwing support |
261 | static void throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception); |
262 | static void throw_and_post_jvmti_exception(JavaThread *thread, Symbol* name, const char *message = NULL); |
263 | |
264 | // RedefineClasses() tracing support for obsolete method entry |
265 | static int rc_trace_method_entry(JavaThread* thread, Method* m); |
266 | |
267 | // To be used as the entry point for unresolved native methods. |
268 | static address native_method_throw_unsatisfied_link_error_entry(); |
269 | static address native_method_throw_unsupported_operation_exception_entry(); |
270 | |
271 | static oop retrieve_receiver(Symbol* sig, frame caller); |
272 | |
273 | static void register_finalizer(JavaThread* thread, oopDesc* obj); |
274 | |
275 | // dtrace notifications |
276 | static int dtrace_object_alloc(oopDesc* o, int size); |
277 | static int dtrace_object_alloc_base(Thread* thread, oopDesc* o, int size); |
278 | static int dtrace_method_entry(JavaThread* thread, Method* m); |
279 | static int dtrace_method_exit(JavaThread* thread, Method* m); |
280 | |
281 | // Utility method for retrieving the Java thread id, returns 0 if the |
282 | // thread is not a well formed Java thread. |
283 | static jlong get_java_tid(Thread* thread); |
284 | |
285 | |
286 | // used by native wrappers to reenable yellow if overflow happened in native code |
287 | static void reguard_yellow_pages(); |
288 | |
289 | // Fill in the "X cannot be cast to a Y" message for ClassCastException |
290 | // |
291 | // @param thr the current thread |
292 | // @param caster_klass the class of the object we are casting |
293 | // @return the dynamically allocated exception message (must be freed |
294 | // by the caller using a resource mark) |
295 | // |
296 | // BCP must refer to the current 'checkcast' opcode for the frame |
297 | // on top of the stack. |
298 | // The caller (or one of its callers) must use a ResourceMark |
299 | // in order to correctly free the result. |
300 | // |
301 | static char* generate_class_cast_message(JavaThread* thr, Klass* caster_klass); |
302 | |
303 | // Fill in the "X cannot be cast to a Y" message for ClassCastException |
304 | // |
305 | // @param caster_klass the class of the object we are casting |
306 | // @param target_klass the target klass attempt |
307 | // @return the dynamically allocated exception message (must be freed |
308 | // by the caller using a resource mark) |
309 | // |
310 | // This version does not require access the frame, so it can be called |
311 | // from interpreted code |
312 | // The caller (or one of it's callers) must use a ResourceMark |
313 | // in order to correctly free the result. |
314 | // |
315 | static char* generate_class_cast_message(Klass* caster_klass, Klass* target_klass, Symbol* target_klass_name = NULL); |
316 | |
317 | // Resolves a call site- may patch in the destination of the call into the |
318 | // compiled code. |
319 | static methodHandle resolve_helper(JavaThread *thread, |
320 | bool is_virtual, |
321 | bool is_optimized, TRAPS); |
322 | |
323 | private: |
324 | // deopt blob |
325 | static void generate_deopt_blob(void); |
326 | |
327 | static bool handle_ic_miss_helper_internal(Handle receiver, CompiledMethod* caller_nm, const frame& caller_frame, |
328 | methodHandle callee_method, Bytecodes::Code bc, CallInfo& call_info, |
329 | bool& needs_ic_stub_refill, TRAPS); |
330 | |
331 | public: |
332 | static DeoptimizationBlob* deopt_blob(void) { return _deopt_blob; } |
333 | |
334 | // Resets a call-site in compiled code so it will get resolved again. |
335 | static methodHandle reresolve_call_site(JavaThread *thread, TRAPS); |
336 | |
337 | // In the code prolog, if the klass comparison fails, the inline cache |
338 | // misses and the call site is patched to megamorphic |
339 | static methodHandle handle_ic_miss_helper(JavaThread* thread, TRAPS); |
340 | |
341 | // Find the method that called us. |
342 | static methodHandle find_callee_method(JavaThread* thread, TRAPS); |
343 | |
344 | |
345 | private: |
346 | static Handle find_callee_info(JavaThread* thread, |
347 | Bytecodes::Code& bc, |
348 | CallInfo& callinfo, TRAPS); |
349 | static Handle find_callee_info_helper(JavaThread* thread, |
350 | vframeStream& vfst, |
351 | Bytecodes::Code& bc, |
352 | CallInfo& callinfo, TRAPS); |
353 | |
354 | static methodHandle (vframeStream& vfst); |
355 | |
356 | static address clean_virtual_call_entry(); |
357 | static address clean_opt_virtual_call_entry(); |
358 | static address clean_static_call_entry(); |
359 | |
360 | #if defined(X86) && defined(COMPILER1) |
361 | // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available. |
362 | static void inline_check_hashcode_from_object_header(MacroAssembler* masm, const methodHandle& method, Register obj_reg, Register result); |
363 | #endif // X86 && COMPILER1 |
364 | |
365 | public: |
366 | |
367 | // Read the array of BasicTypes from a Java signature, and compute where |
368 | // compiled Java code would like to put the results. Values in reg_lo and |
369 | // reg_hi refer to 4-byte quantities. Values less than SharedInfo::stack0 are |
370 | // registers, those above refer to 4-byte stack slots. All stack slots are |
371 | // based off of the window top. SharedInfo::stack0 refers to the first usable |
372 | // slot in the bottom of the frame. SharedInfo::stack0+1 refers to the memory word |
373 | // 4-bytes higher. So for sparc because the register window save area is at |
374 | // the bottom of the frame the first 16 words will be skipped and SharedInfo::stack0 |
375 | // will be just above it. ( |
376 | // return value is the maximum number of VMReg stack slots the convention will use. |
377 | static int java_calling_convention(const BasicType* sig_bt, VMRegPair* regs, int total_args_passed, int is_outgoing); |
378 | |
379 | static void check_member_name_argument_is_last_argument(const methodHandle& method, |
380 | const BasicType* sig_bt, |
381 | const VMRegPair* regs) NOT_DEBUG_RETURN; |
382 | |
383 | // Ditto except for calling C |
384 | // |
385 | // C argument in register AND stack slot. |
386 | // Some architectures require that an argument must be passed in a register |
387 | // AND in a stack slot. These architectures provide a second VMRegPair array |
388 | // to be filled by the c_calling_convention method. On other architectures, |
389 | // NULL is being passed as the second VMRegPair array, so arguments are either |
390 | // passed in a register OR in a stack slot. |
391 | static int c_calling_convention(const BasicType *sig_bt, VMRegPair *regs, VMRegPair *regs2, |
392 | int total_args_passed); |
393 | |
394 | static size_t trampoline_size(); |
395 | |
396 | static void generate_trampoline(MacroAssembler *masm, address destination); |
397 | |
398 | // Generate I2C and C2I adapters. These adapters are simple argument marshalling |
399 | // blobs. Unlike adapters in the tiger and earlier releases the code in these |
400 | // blobs does not create a new frame and are therefore virtually invisible |
401 | // to the stack walking code. In general these blobs extend the callers stack |
402 | // as needed for the conversion of argument locations. |
403 | |
404 | // When calling a c2i blob the code will always call the interpreter even if |
405 | // by the time we reach the blob there is compiled code available. This allows |
406 | // the blob to pass the incoming stack pointer (the sender sp) in a known |
407 | // location for the interpreter to record. This is used by the frame code |
408 | // to correct the sender code to match up with the stack pointer when the |
409 | // thread left the compiled code. In addition it allows the interpreter |
410 | // to remove the space the c2i adapter allocated to do its argument conversion. |
411 | |
412 | // Although a c2i blob will always run interpreted even if compiled code is |
413 | // present if we see that compiled code is present the compiled call site |
414 | // will be patched/re-resolved so that later calls will run compiled. |
415 | |
416 | // Additionally a c2i blob need to have a unverified entry because it can be reached |
417 | // in situations where the call site is an inlined cache site and may go megamorphic. |
418 | |
419 | // A i2c adapter is simpler than the c2i adapter. This is because it is assumed |
420 | // that the interpreter before it does any call dispatch will record the current |
421 | // stack pointer in the interpreter frame. On return it will restore the stack |
422 | // pointer as needed. This means the i2c adapter code doesn't need any special |
423 | // handshaking path with compiled code to keep the stack walking correct. |
424 | |
425 | static AdapterHandlerEntry* generate_i2c2i_adapters(MacroAssembler *_masm, |
426 | int total_args_passed, |
427 | int max_arg, |
428 | const BasicType *sig_bt, |
429 | const VMRegPair *regs, |
430 | AdapterFingerPrint* fingerprint); |
431 | |
432 | static void gen_i2c_adapter(MacroAssembler *_masm, |
433 | int total_args_passed, |
434 | int comp_args_on_stack, |
435 | const BasicType *sig_bt, |
436 | const VMRegPair *regs); |
437 | |
438 | // OSR support |
439 | |
440 | // OSR_migration_begin will extract the jvm state from an interpreter |
441 | // frame (locals, monitors) and store the data in a piece of C heap |
442 | // storage. This then allows the interpreter frame to be removed from the |
443 | // stack and the OSR nmethod to be called. That method is called with a |
444 | // pointer to the C heap storage. This pointer is the return value from |
445 | // OSR_migration_begin. |
446 | |
447 | static intptr_t* OSR_migration_begin(JavaThread *thread); |
448 | |
449 | // OSR_migration_end is a trivial routine. It is called after the compiled |
450 | // method has extracted the jvm state from the C heap that OSR_migration_begin |
451 | // created. It's entire job is to simply free this storage. |
452 | static void OSR_migration_end(intptr_t* buf); |
453 | |
454 | // Convert a sig into a calling convention register layout |
455 | // and find interesting things about it. |
456 | static VMRegPair* find_callee_arguments(Symbol* sig, bool has_receiver, bool has_appendix, int *arg_size); |
457 | static VMReg name_for_receiver(); |
458 | |
459 | // "Top of Stack" slots that may be unused by the calling convention but must |
460 | // otherwise be preserved. |
461 | // On Intel these are not necessary and the value can be zero. |
462 | // On Sparc this describes the words reserved for storing a register window |
463 | // when an interrupt occurs. |
464 | static uint out_preserve_stack_slots(); |
465 | |
466 | // Is vector's size (in bytes) bigger than a size saved by default? |
467 | // For example, on x86 16 bytes XMM registers are saved by default. |
468 | static bool is_wide_vector(int size); |
469 | |
470 | // Save and restore a native result |
471 | static void save_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots); |
472 | static void restore_native_result(MacroAssembler *_masm, BasicType ret_type, int frame_slots); |
473 | |
474 | // Generate a native wrapper for a given method. The method takes arguments |
475 | // in the Java compiled code convention, marshals them to the native |
476 | // convention (handlizes oops, etc), transitions to native, makes the call, |
477 | // returns to java state (possibly blocking), unhandlizes any result and |
478 | // returns. |
479 | // |
480 | // The wrapper may contain special-case code if the given method |
481 | // is a JNI critical method, or a compiled method handle adapter, |
482 | // such as _invokeBasic, _linkToVirtual, etc. |
483 | static nmethod* generate_native_wrapper(MacroAssembler* masm, |
484 | const methodHandle& method, |
485 | int compile_id, |
486 | BasicType* sig_bt, |
487 | VMRegPair* regs, |
488 | BasicType ret_type); |
489 | |
490 | // Block before entering a JNI critical method |
491 | static void block_for_jni_critical(JavaThread* thread); |
492 | |
493 | // Pin/Unpin object |
494 | static oopDesc* pin_object(JavaThread* thread, oopDesc* obj); |
495 | static void unpin_object(JavaThread* thread, oopDesc* obj); |
496 | |
497 | // A compiled caller has just called the interpreter, but compiled code |
498 | // exists. Patch the caller so he no longer calls into the interpreter. |
499 | static void fixup_callers_callsite(Method* moop, address ret_pc); |
500 | static bool should_fixup_call_destination(address destination, address entry_point, address caller_pc, Method* moop, CodeBlob* cb); |
501 | |
502 | // Slow-path Locking and Unlocking |
503 | static void complete_monitor_locking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread); |
504 | static void complete_monitor_unlocking_C(oopDesc* obj, BasicLock* lock, JavaThread* thread); |
505 | |
506 | // Resolving of calls |
507 | static address resolve_static_call_C (JavaThread *thread); |
508 | static address resolve_virtual_call_C (JavaThread *thread); |
509 | static address resolve_opt_virtual_call_C(JavaThread *thread); |
510 | |
511 | // arraycopy, the non-leaf version. (See StubRoutines for all the leaf calls.) |
512 | static void slow_arraycopy_C(oopDesc* src, jint src_pos, |
513 | oopDesc* dest, jint dest_pos, |
514 | jint length, JavaThread* thread); |
515 | |
516 | // handle ic miss with caller being compiled code |
517 | // wrong method handling (inline cache misses, zombie methods) |
518 | static address handle_wrong_method(JavaThread* thread); |
519 | static address handle_wrong_method_abstract(JavaThread* thread); |
520 | static address handle_wrong_method_ic_miss(JavaThread* thread); |
521 | |
522 | static address handle_unsafe_access(JavaThread* thread, address next_pc); |
523 | |
524 | #ifndef PRODUCT |
525 | |
526 | // Collect and print inline cache miss statistics |
527 | private: |
528 | enum { maxICmiss_count = 100 }; |
529 | static int _ICmiss_index; // length of IC miss histogram |
530 | static int _ICmiss_count[maxICmiss_count]; // miss counts |
531 | static address _ICmiss_at[maxICmiss_count]; // miss addresses |
532 | static void trace_ic_miss(address at); |
533 | |
534 | public: |
535 | static int _throw_null_ctr; // throwing a null-pointer exception |
536 | static int _ic_miss_ctr; // total # of IC misses |
537 | static int _wrong_method_ctr; |
538 | static int _resolve_static_ctr; |
539 | static int _resolve_virtual_ctr; |
540 | static int _resolve_opt_virtual_ctr; |
541 | static int _implicit_null_throws; |
542 | static int _implicit_div0_throws; |
543 | |
544 | static int _jbyte_array_copy_ctr; // Slow-path byte array copy |
545 | static int _jshort_array_copy_ctr; // Slow-path short array copy |
546 | static int _jint_array_copy_ctr; // Slow-path int array copy |
547 | static int _jlong_array_copy_ctr; // Slow-path long array copy |
548 | static int _oop_array_copy_ctr; // Slow-path oop array copy |
549 | static int _checkcast_array_copy_ctr; // Slow-path oop array copy, with cast |
550 | static int _unsafe_array_copy_ctr; // Slow-path includes alignment checks |
551 | static int _generic_array_copy_ctr; // Slow-path includes type decoding |
552 | static int _slow_array_copy_ctr; // Slow-path failed out to a method call |
553 | |
554 | static int _new_instance_ctr; // 'new' object requires GC |
555 | static int _new_array_ctr; // 'new' array requires GC |
556 | static int _multi1_ctr, _multi2_ctr, _multi3_ctr, _multi4_ctr, _multi5_ctr; |
557 | static int _find_handler_ctr; // find exception handler |
558 | static int _rethrow_ctr; // rethrow exception |
559 | static int _mon_enter_stub_ctr; // monitor enter stub |
560 | static int _mon_exit_stub_ctr; // monitor exit stub |
561 | static int _mon_enter_ctr; // monitor enter slow |
562 | static int _mon_exit_ctr; // monitor exit slow |
563 | static int _partial_subtype_ctr; // SubRoutines::partial_subtype_check |
564 | |
565 | // Statistics code |
566 | // stats for "normal" compiled calls (non-interface) |
567 | static int _nof_normal_calls; // total # of calls |
568 | static int _nof_optimized_calls; // total # of statically-bound calls |
569 | static int _nof_inlined_calls; // total # of inlined normal calls |
570 | static int _nof_static_calls; // total # of calls to static methods or super methods (invokespecial) |
571 | static int _nof_inlined_static_calls; // total # of inlined static calls |
572 | // stats for compiled interface calls |
573 | static int _nof_interface_calls; // total # of compiled calls |
574 | static int _nof_optimized_interface_calls; // total # of statically-bound interface calls |
575 | static int _nof_inlined_interface_calls; // total # of inlined interface calls |
576 | static int _nof_megamorphic_interface_calls;// total # of megamorphic interface calls |
577 | // stats for runtime exceptions |
578 | static int _nof_removable_exceptions; // total # of exceptions that could be replaced by branches due to inlining |
579 | |
580 | public: // for compiler |
581 | static address nof_normal_calls_addr() { return (address)&_nof_normal_calls; } |
582 | static address nof_optimized_calls_addr() { return (address)&_nof_optimized_calls; } |
583 | static address nof_inlined_calls_addr() { return (address)&_nof_inlined_calls; } |
584 | static address nof_static_calls_addr() { return (address)&_nof_static_calls; } |
585 | static address nof_inlined_static_calls_addr() { return (address)&_nof_inlined_static_calls; } |
586 | static address nof_interface_calls_addr() { return (address)&_nof_interface_calls; } |
587 | static address nof_optimized_interface_calls_addr() { return (address)&_nof_optimized_interface_calls; } |
588 | static address nof_inlined_interface_calls_addr() { return (address)&_nof_inlined_interface_calls; } |
589 | static address nof_megamorphic_interface_calls_addr() { return (address)&_nof_megamorphic_interface_calls; } |
590 | static void print_call_statistics(int comp_total); |
591 | static void print_statistics(); |
592 | static void print_ic_miss_histogram(); |
593 | |
594 | #endif // PRODUCT |
595 | }; |
596 | |
597 | |
598 | // --------------------------------------------------------------------------- |
599 | // Implementation of AdapterHandlerLibrary |
600 | // |
601 | // This library manages argument marshaling adapters and native wrappers. |
602 | // There are 2 flavors of adapters: I2C and C2I. |
603 | // |
604 | // The I2C flavor takes a stock interpreted call setup, marshals the |
605 | // arguments for a Java-compiled call, and jumps to Rmethod-> code()-> |
606 | // code_begin(). It is broken to call it without an nmethod assigned. |
607 | // The usual behavior is to lift any register arguments up out of the |
608 | // stack and possibly re-pack the extra arguments to be contiguous. |
609 | // I2C adapters will save what the interpreter's stack pointer will be |
610 | // after arguments are popped, then adjust the interpreter's frame |
611 | // size to force alignment and possibly to repack the arguments. |
612 | // After re-packing, it jumps to the compiled code start. There are |
613 | // no safepoints in this adapter code and a GC cannot happen while |
614 | // marshaling is in progress. |
615 | // |
616 | // The C2I flavor takes a stock compiled call setup plus the target method in |
617 | // Rmethod, marshals the arguments for an interpreted call and jumps to |
618 | // Rmethod->_i2i_entry. On entry, the interpreted frame has not yet been |
619 | // setup. Compiled frames are fixed-size and the args are likely not in the |
620 | // right place. Hence all the args will likely be copied into the |
621 | // interpreter's frame, forcing that frame to grow. The compiled frame's |
622 | // outgoing stack args will be dead after the copy. |
623 | // |
624 | // Native wrappers, like adapters, marshal arguments. Unlike adapters they |
625 | // also perform an official frame push & pop. They have a call to the native |
626 | // routine in their middles and end in a return (instead of ending in a jump). |
627 | // The native wrappers are stored in real nmethods instead of the BufferBlobs |
628 | // used by the adapters. The code generation happens here because it's very |
629 | // similar to what the adapters have to do. |
630 | |
631 | class AdapterHandlerEntry : public BasicHashtableEntry<mtCode> { |
632 | friend class AdapterHandlerTable; |
633 | |
634 | private: |
635 | AdapterFingerPrint* _fingerprint; |
636 | address _i2c_entry; |
637 | address _c2i_entry; |
638 | address _c2i_unverified_entry; |
639 | |
640 | #ifdef ASSERT |
641 | // Captures code and signature used to generate this adapter when |
642 | // verifying adapter equivalence. |
643 | unsigned char* _saved_code; |
644 | int _saved_code_length; |
645 | #endif |
646 | |
647 | void init(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) { |
648 | _fingerprint = fingerprint; |
649 | _i2c_entry = i2c_entry; |
650 | _c2i_entry = c2i_entry; |
651 | _c2i_unverified_entry = c2i_unverified_entry; |
652 | #ifdef ASSERT |
653 | _saved_code = NULL; |
654 | _saved_code_length = 0; |
655 | #endif |
656 | } |
657 | |
658 | void deallocate(); |
659 | |
660 | // should never be used |
661 | AdapterHandlerEntry(); |
662 | |
663 | public: |
664 | address get_i2c_entry() const { return _i2c_entry; } |
665 | address get_c2i_entry() const { return _c2i_entry; } |
666 | address get_c2i_unverified_entry() const { return _c2i_unverified_entry; } |
667 | address base_address(); |
668 | void relocate(address new_base); |
669 | |
670 | AdapterFingerPrint* fingerprint() const { return _fingerprint; } |
671 | |
672 | AdapterHandlerEntry* next() { |
673 | return (AdapterHandlerEntry*)BasicHashtableEntry<mtCode>::next(); |
674 | } |
675 | |
676 | #ifdef ASSERT |
677 | // Used to verify that code generated for shared adapters is equivalent |
678 | void save_code (unsigned char* code, int length); |
679 | bool compare_code(unsigned char* code, int length); |
680 | #endif |
681 | |
682 | //virtual void print_on(outputStream* st) const; DO NOT USE |
683 | void print_adapter_on(outputStream* st) const; |
684 | }; |
685 | |
686 | // This class is used only with DumpSharedSpaces==true. It holds extra information |
687 | // that's used only during CDS dump time. |
688 | // For details, see comments around Method::link_method() |
689 | class CDSAdapterHandlerEntry: public AdapterHandlerEntry { |
690 | address _c2i_entry_trampoline; // allocated from shared spaces "MC" region |
691 | AdapterHandlerEntry** _adapter_trampoline; // allocated from shared spaces "MD" region |
692 | |
693 | public: |
694 | address get_c2i_entry_trampoline() const { return _c2i_entry_trampoline; } |
695 | AdapterHandlerEntry** get_adapter_trampoline() const { return _adapter_trampoline; } |
696 | void init() NOT_CDS_RETURN; |
697 | }; |
698 | |
699 | |
700 | class AdapterHandlerLibrary: public AllStatic { |
701 | private: |
702 | static BufferBlob* _buffer; // the temporary code buffer in CodeCache |
703 | static AdapterHandlerTable* _adapters; |
704 | static AdapterHandlerEntry* _abstract_method_handler; |
705 | static BufferBlob* buffer_blob(); |
706 | static void initialize(); |
707 | static AdapterHandlerEntry* get_adapter0(const methodHandle& method); |
708 | |
709 | public: |
710 | |
711 | static AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, |
712 | address i2c_entry, address c2i_entry, address c2i_unverified_entry); |
713 | static void create_native_wrapper(const methodHandle& method); |
714 | static AdapterHandlerEntry* get_adapter(const methodHandle& method); |
715 | |
716 | static void print_handler(const CodeBlob* b) { print_handler_on(tty, b); } |
717 | static void print_handler_on(outputStream* st, const CodeBlob* b); |
718 | static bool contains(const CodeBlob* b); |
719 | #ifndef PRODUCT |
720 | static void print_statistics(); |
721 | #endif // PRODUCT |
722 | |
723 | }; |
724 | |
725 | #endif // SHARE_RUNTIME_SHAREDRUNTIME_HPP |
726 | |