1/*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "classfile/systemDictionary.hpp"
27#include "compiler/compilerOracle.hpp"
28#include "interpreter/bytecode.hpp"
29#include "interpreter/bytecodeStream.hpp"
30#include "interpreter/linkResolver.hpp"
31#include "memory/heapInspection.hpp"
32#include "memory/metaspaceClosure.hpp"
33#include "memory/resourceArea.hpp"
34#include "oops/methodData.inline.hpp"
35#include "prims/jvmtiRedefineClasses.hpp"
36#include "runtime/arguments.hpp"
37#include "runtime/compilationPolicy.hpp"
38#include "runtime/deoptimization.hpp"
39#include "runtime/handles.inline.hpp"
40#include "runtime/orderAccess.hpp"
41#include "runtime/safepointVerifiers.hpp"
42#include "utilities/align.hpp"
43#include "utilities/copy.hpp"
44
45// ==================================================================
46// DataLayout
47//
48// Overlay for generic profiling data.
49
50// Some types of data layouts need a length field.
51bool DataLayout::needs_array_len(u1 tag) {
52 return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
53}
54
55// Perform generic initialization of the data. More specific
56// initialization occurs in overrides of ProfileData::post_initialize.
57void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
58 _header._bits = (intptr_t)0;
59 _header._struct._tag = tag;
60 _header._struct._bci = bci;
61 for (int i = 0; i < cell_count; i++) {
62 set_cell_at(i, (intptr_t)0);
63 }
64 if (needs_array_len(tag)) {
65 set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
66 }
67 if (tag == call_type_data_tag) {
68 CallTypeData::initialize(this, cell_count);
69 } else if (tag == virtual_call_type_data_tag) {
70 VirtualCallTypeData::initialize(this, cell_count);
71 }
72}
73
74void DataLayout::clean_weak_klass_links(bool always_clean) {
75 ResourceMark m;
76 data_in()->clean_weak_klass_links(always_clean);
77}
78
79
80// ==================================================================
81// ProfileData
82//
83// A ProfileData object is created to refer to a section of profiling
84// data in a structured way.
85
86// Constructor for invalid ProfileData.
87ProfileData::ProfileData() {
88 _data = NULL;
89}
90
91char* ProfileData::print_data_on_helper(const MethodData* md) const {
92 DataLayout* dp = md->extra_data_base();
93 DataLayout* end = md->args_data_limit();
94 stringStream ss;
95 for (;; dp = MethodData::next_extra(dp)) {
96 assert(dp < end, "moved past end of extra data");
97 switch(dp->tag()) {
98 case DataLayout::speculative_trap_data_tag:
99 if (dp->bci() == bci()) {
100 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
101 int trap = data->trap_state();
102 char buf[100];
103 ss.print("trap/");
104 data->method()->print_short_name(&ss);
105 ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
106 }
107 break;
108 case DataLayout::bit_data_tag:
109 break;
110 case DataLayout::no_tag:
111 case DataLayout::arg_info_data_tag:
112 return ss.as_string();
113 break;
114 default:
115 fatal("unexpected tag %d", dp->tag());
116 }
117 }
118 return NULL;
119}
120
121void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
122 print_data_on(st, print_data_on_helper(md));
123}
124
125void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
126 st->print("bci: %d", bci());
127 st->fill_to(tab_width_one);
128 st->print("%s", name);
129 tab(st);
130 int trap = trap_state();
131 if (trap != 0) {
132 char buf[100];
133 st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
134 }
135 if (extra != NULL) {
136 st->print("%s", extra);
137 }
138 int flags = data()->flags();
139 if (flags != 0) {
140 st->print("flags(%d) ", flags);
141 }
142}
143
144void ProfileData::tab(outputStream* st, bool first) const {
145 st->fill_to(first ? tab_width_one : tab_width_two);
146}
147
148// ==================================================================
149// BitData
150//
151// A BitData corresponds to a one-bit flag. This is used to indicate
152// whether a checkcast bytecode has seen a null value.
153
154
155void BitData::print_data_on(outputStream* st, const char* extra) const {
156 print_shared(st, "BitData", extra);
157 st->cr();
158}
159
160// ==================================================================
161// CounterData
162//
163// A CounterData corresponds to a simple counter.
164
165void CounterData::print_data_on(outputStream* st, const char* extra) const {
166 print_shared(st, "CounterData", extra);
167 st->print_cr("count(%u)", count());
168}
169
170// ==================================================================
171// JumpData
172//
173// A JumpData is used to access profiling information for a direct
174// branch. It is a counter, used for counting the number of branches,
175// plus a data displacement, used for realigning the data pointer to
176// the corresponding target bci.
177
178void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
179 assert(stream->bci() == bci(), "wrong pos");
180 int target;
181 Bytecodes::Code c = stream->code();
182 if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
183 target = stream->dest_w();
184 } else {
185 target = stream->dest();
186 }
187 int my_di = mdo->dp_to_di(dp());
188 int target_di = mdo->bci_to_di(target);
189 int offset = target_di - my_di;
190 set_displacement(offset);
191}
192
193void JumpData::print_data_on(outputStream* st, const char* extra) const {
194 print_shared(st, "JumpData", extra);
195 st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
196}
197
198int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
199 // Parameter profiling include the receiver
200 int args_count = include_receiver ? 1 : 0;
201 ResourceMark rm;
202 SignatureStream ss(signature);
203 args_count += ss.reference_parameter_count();
204 args_count = MIN2(args_count, max);
205 return args_count * per_arg_cell_count;
206}
207
208int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
209 assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
210 assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
211 const methodHandle m = stream->method();
212 int bci = stream->bci();
213 Bytecode_invoke inv(m, bci);
214 int args_cell = 0;
215 if (MethodData::profile_arguments_for_invoke(m, bci)) {
216 args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
217 }
218 int ret_cell = 0;
219 if (MethodData::profile_return_for_invoke(m, bci) && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
220 ret_cell = ReturnTypeEntry::static_cell_count();
221 }
222 int header_cell = 0;
223 if (args_cell + ret_cell > 0) {
224 header_cell = header_cell_count();
225 }
226
227 return header_cell + args_cell + ret_cell;
228}
229
230class ArgumentOffsetComputer : public SignatureInfo {
231private:
232 int _max;
233 GrowableArray<int> _offsets;
234
235 void set(int size, BasicType type) { _size += size; }
236 void do_object(int begin, int end) {
237 if (_offsets.length() < _max) {
238 _offsets.push(_size);
239 }
240 SignatureInfo::do_object(begin, end);
241 }
242 void do_array (int begin, int end) {
243 if (_offsets.length() < _max) {
244 _offsets.push(_size);
245 }
246 SignatureInfo::do_array(begin, end);
247 }
248
249public:
250 ArgumentOffsetComputer(Symbol* signature, int max)
251 : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) {
252 }
253
254 int total() { lazy_iterate_parameters(); return _size; }
255
256 int off_at(int i) const { return _offsets.at(i); }
257};
258
259void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
260 ResourceMark rm;
261 int start = 0;
262 // Parameter profiling include the receiver
263 if (include_receiver && has_receiver) {
264 set_stack_slot(0, 0);
265 set_type(0, type_none());
266 start += 1;
267 }
268 ArgumentOffsetComputer aos(signature, _number_of_entries-start);
269 aos.total();
270 for (int i = start; i < _number_of_entries; i++) {
271 set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
272 set_type(i, type_none());
273 }
274}
275
276void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
277 assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
278 Bytecode_invoke inv(stream->method(), stream->bci());
279
280 SignatureStream ss(inv.signature());
281 if (has_arguments()) {
282#ifdef ASSERT
283 ResourceMark rm;
284 int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
285 assert(count > 0, "room for args type but none found?");
286 check_number_of_arguments(count);
287#endif
288 _args.post_initialize(inv.signature(), inv.has_receiver(), false);
289 }
290
291 if (has_return()) {
292 assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
293 _ret.post_initialize();
294 }
295}
296
297void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
298 assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
299 Bytecode_invoke inv(stream->method(), stream->bci());
300
301 if (has_arguments()) {
302#ifdef ASSERT
303 ResourceMark rm;
304 SignatureStream ss(inv.signature());
305 int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
306 assert(count > 0, "room for args type but none found?");
307 check_number_of_arguments(count);
308#endif
309 _args.post_initialize(inv.signature(), inv.has_receiver(), false);
310 }
311
312 if (has_return()) {
313 assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
314 _ret.post_initialize();
315 }
316}
317
318void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
319 for (int i = 0; i < _number_of_entries; i++) {
320 intptr_t p = type(i);
321 Klass* k = (Klass*)klass_part(p);
322 if (k != NULL && (always_clean || !k->is_loader_alive())) {
323 set_type(i, with_status((Klass*)NULL, p));
324 }
325 }
326}
327
328void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
329 intptr_t p = type();
330 Klass* k = (Klass*)klass_part(p);
331 if (k != NULL && (always_clean || !k->is_loader_alive())) {
332 set_type(with_status((Klass*)NULL, p));
333 }
334}
335
336bool TypeEntriesAtCall::return_profiling_enabled() {
337 return MethodData::profile_return();
338}
339
340bool TypeEntriesAtCall::arguments_profiling_enabled() {
341 return MethodData::profile_arguments();
342}
343
344void TypeEntries::print_klass(outputStream* st, intptr_t k) {
345 if (is_type_none(k)) {
346 st->print("none");
347 } else if (is_type_unknown(k)) {
348 st->print("unknown");
349 } else {
350 valid_klass(k)->print_value_on(st);
351 }
352 if (was_null_seen(k)) {
353 st->print(" (null seen)");
354 }
355}
356
357void TypeStackSlotEntries::print_data_on(outputStream* st) const {
358 for (int i = 0; i < _number_of_entries; i++) {
359 _pd->tab(st);
360 st->print("%d: stack(%u) ", i, stack_slot(i));
361 print_klass(st, type(i));
362 st->cr();
363 }
364}
365
366void ReturnTypeEntry::print_data_on(outputStream* st) const {
367 _pd->tab(st);
368 print_klass(st, type());
369 st->cr();
370}
371
372void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
373 CounterData::print_data_on(st, extra);
374 if (has_arguments()) {
375 tab(st, true);
376 st->print("argument types");
377 _args.print_data_on(st);
378 }
379 if (has_return()) {
380 tab(st, true);
381 st->print("return type");
382 _ret.print_data_on(st);
383 }
384}
385
386void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
387 VirtualCallData::print_data_on(st, extra);
388 if (has_arguments()) {
389 tab(st, true);
390 st->print("argument types");
391 _args.print_data_on(st);
392 }
393 if (has_return()) {
394 tab(st, true);
395 st->print("return type");
396 _ret.print_data_on(st);
397 }
398}
399
400// ==================================================================
401// ReceiverTypeData
402//
403// A ReceiverTypeData is used to access profiling information about a
404// dynamic type check. It consists of a counter which counts the total times
405// that the check is reached, and a series of (Klass*, count) pairs
406// which are used to store a type profile for the receiver of the check.
407
408void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
409 for (uint row = 0; row < row_limit(); row++) {
410 Klass* p = receiver(row);
411 if (p != NULL && (always_clean || !p->is_loader_alive())) {
412 clear_row(row);
413 }
414 }
415}
416
417#if INCLUDE_JVMCI
418void VirtualCallData::clean_weak_klass_links(bool always_clean) {
419 ReceiverTypeData::clean_weak_klass_links(always_clean);
420 for (uint row = 0; row < method_row_limit(); row++) {
421 Method* p = method(row);
422 if (p != NULL && (always_clean || !p->method_holder()->is_loader_alive())) {
423 clear_method_row(row);
424 }
425 }
426}
427
428void VirtualCallData::clean_weak_method_links() {
429 ReceiverTypeData::clean_weak_method_links();
430 for (uint row = 0; row < method_row_limit(); row++) {
431 Method* p = method(row);
432 if (p != NULL && p->is_old()) {
433 clear_method_row(row);
434 }
435 }
436}
437#endif // INCLUDE_JVMCI
438
439void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
440 uint row;
441 int entries = 0;
442 for (row = 0; row < row_limit(); row++) {
443 if (receiver(row) != NULL) entries++;
444 }
445#if INCLUDE_JVMCI
446 st->print_cr("count(%u) nonprofiled_count(%u) entries(%u)", count(), nonprofiled_count(), entries);
447#else
448 st->print_cr("count(%u) entries(%u)", count(), entries);
449#endif
450 int total = count();
451 for (row = 0; row < row_limit(); row++) {
452 if (receiver(row) != NULL) {
453 total += receiver_count(row);
454 }
455 }
456 for (row = 0; row < row_limit(); row++) {
457 if (receiver(row) != NULL) {
458 tab(st);
459 receiver(row)->print_value_on(st);
460 st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
461 }
462 }
463}
464void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
465 print_shared(st, "ReceiverTypeData", extra);
466 print_receiver_data_on(st);
467}
468
469#if INCLUDE_JVMCI
470void VirtualCallData::print_method_data_on(outputStream* st) const {
471 uint row;
472 int entries = 0;
473 for (row = 0; row < method_row_limit(); row++) {
474 if (method(row) != NULL) entries++;
475 }
476 tab(st);
477 st->print_cr("method_entries(%u)", entries);
478 int total = count();
479 for (row = 0; row < method_row_limit(); row++) {
480 if (method(row) != NULL) {
481 total += method_count(row);
482 }
483 }
484 for (row = 0; row < method_row_limit(); row++) {
485 if (method(row) != NULL) {
486 tab(st);
487 method(row)->print_value_on(st);
488 st->print_cr("(%u %4.2f)", method_count(row), (float) method_count(row) / (float) total);
489 }
490 }
491}
492#endif // INCLUDE_JVMCI
493
494void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
495 print_shared(st, "VirtualCallData", extra);
496 print_receiver_data_on(st);
497 print_method_data_on(st);
498}
499
500// ==================================================================
501// RetData
502//
503// A RetData is used to access profiling information for a ret bytecode.
504// It is composed of a count of the number of times that the ret has
505// been executed, followed by a series of triples of the form
506// (bci, count, di) which count the number of times that some bci was the
507// target of the ret and cache a corresponding displacement.
508
509void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
510 for (uint row = 0; row < row_limit(); row++) {
511 set_bci_displacement(row, -1);
512 set_bci(row, no_bci);
513 }
514 // release so other threads see a consistent state. bci is used as
515 // a valid flag for bci_displacement.
516 OrderAccess::release();
517}
518
519// This routine needs to atomically update the RetData structure, so the
520// caller needs to hold the RetData_lock before it gets here. Since taking
521// the lock can block (and allow GC) and since RetData is a ProfileData is a
522// wrapper around a derived oop, taking the lock in _this_ method will
523// basically cause the 'this' pointer's _data field to contain junk after the
524// lock. We require the caller to take the lock before making the ProfileData
525// structure. Currently the only caller is InterpreterRuntime::update_mdp_for_ret
526address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
527 // First find the mdp which corresponds to the return bci.
528 address mdp = h_mdo->bci_to_dp(return_bci);
529
530 // Now check to see if any of the cache slots are open.
531 for (uint row = 0; row < row_limit(); row++) {
532 if (bci(row) == no_bci) {
533 set_bci_displacement(row, mdp - dp());
534 set_bci_count(row, DataLayout::counter_increment);
535 // Barrier to ensure displacement is written before the bci; allows
536 // the interpreter to read displacement without fear of race condition.
537 release_set_bci(row, return_bci);
538 break;
539 }
540 }
541 return mdp;
542}
543
544void RetData::print_data_on(outputStream* st, const char* extra) const {
545 print_shared(st, "RetData", extra);
546 uint row;
547 int entries = 0;
548 for (row = 0; row < row_limit(); row++) {
549 if (bci(row) != no_bci) entries++;
550 }
551 st->print_cr("count(%u) entries(%u)", count(), entries);
552 for (row = 0; row < row_limit(); row++) {
553 if (bci(row) != no_bci) {
554 tab(st);
555 st->print_cr("bci(%d: count(%u) displacement(%d))",
556 bci(row), bci_count(row), bci_displacement(row));
557 }
558 }
559}
560
561// ==================================================================
562// BranchData
563//
564// A BranchData is used to access profiling data for a two-way branch.
565// It consists of taken and not_taken counts as well as a data displacement
566// for the taken case.
567
568void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
569 assert(stream->bci() == bci(), "wrong pos");
570 int target = stream->dest();
571 int my_di = mdo->dp_to_di(dp());
572 int target_di = mdo->bci_to_di(target);
573 int offset = target_di - my_di;
574 set_displacement(offset);
575}
576
577void BranchData::print_data_on(outputStream* st, const char* extra) const {
578 print_shared(st, "BranchData", extra);
579 st->print_cr("taken(%u) displacement(%d)",
580 taken(), displacement());
581 tab(st);
582 st->print_cr("not taken(%u)", not_taken());
583}
584
585// ==================================================================
586// MultiBranchData
587//
588// A MultiBranchData is used to access profiling information for
589// a multi-way branch (*switch bytecodes). It consists of a series
590// of (count, displacement) pairs, which count the number of times each
591// case was taken and specify the data displacment for each branch target.
592
593int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
594 int cell_count = 0;
595 if (stream->code() == Bytecodes::_tableswitch) {
596 Bytecode_tableswitch sw(stream->method()(), stream->bcp());
597 cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
598 } else {
599 Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
600 cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
601 }
602 return cell_count;
603}
604
605void MultiBranchData::post_initialize(BytecodeStream* stream,
606 MethodData* mdo) {
607 assert(stream->bci() == bci(), "wrong pos");
608 int target;
609 int my_di;
610 int target_di;
611 int offset;
612 if (stream->code() == Bytecodes::_tableswitch) {
613 Bytecode_tableswitch sw(stream->method()(), stream->bcp());
614 int len = sw.length();
615 assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
616 for (int count = 0; count < len; count++) {
617 target = sw.dest_offset_at(count) + bci();
618 my_di = mdo->dp_to_di(dp());
619 target_di = mdo->bci_to_di(target);
620 offset = target_di - my_di;
621 set_displacement_at(count, offset);
622 }
623 target = sw.default_offset() + bci();
624 my_di = mdo->dp_to_di(dp());
625 target_di = mdo->bci_to_di(target);
626 offset = target_di - my_di;
627 set_default_displacement(offset);
628
629 } else {
630 Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
631 int npairs = sw.number_of_pairs();
632 assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
633 for (int count = 0; count < npairs; count++) {
634 LookupswitchPair pair = sw.pair_at(count);
635 target = pair.offset() + bci();
636 my_di = mdo->dp_to_di(dp());
637 target_di = mdo->bci_to_di(target);
638 offset = target_di - my_di;
639 set_displacement_at(count, offset);
640 }
641 target = sw.default_offset() + bci();
642 my_di = mdo->dp_to_di(dp());
643 target_di = mdo->bci_to_di(target);
644 offset = target_di - my_di;
645 set_default_displacement(offset);
646 }
647}
648
649void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
650 print_shared(st, "MultiBranchData", extra);
651 st->print_cr("default_count(%u) displacement(%d)",
652 default_count(), default_displacement());
653 int cases = number_of_cases();
654 for (int i = 0; i < cases; i++) {
655 tab(st);
656 st->print_cr("count(%u) displacement(%d)",
657 count_at(i), displacement_at(i));
658 }
659}
660
661void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
662 print_shared(st, "ArgInfoData", extra);
663 int nargs = number_of_args();
664 for (int i = 0; i < nargs; i++) {
665 st->print(" 0x%x", arg_modified(i));
666 }
667 st->cr();
668}
669
670int ParametersTypeData::compute_cell_count(Method* m) {
671 if (!MethodData::profile_parameters_for_method(m)) {
672 return 0;
673 }
674 int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
675 int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
676 if (obj_args > 0) {
677 return obj_args + 1; // 1 cell for array len
678 }
679 return 0;
680}
681
682void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
683 _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
684}
685
686bool ParametersTypeData::profiling_enabled() {
687 return MethodData::profile_parameters();
688}
689
690void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
691 st->print("parameter types"); // FIXME extra ignored?
692 _parameters.print_data_on(st);
693}
694
695void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
696 print_shared(st, "SpeculativeTrapData", extra);
697 tab(st);
698 method()->print_short_name(st);
699 st->cr();
700}
701
702// ==================================================================
703// MethodData*
704//
705// A MethodData* holds information which has been collected about
706// a method.
707
708MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
709 int size = MethodData::compute_allocation_size_in_words(method);
710
711 return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
712 MethodData(method(), size, THREAD);
713}
714
715int MethodData::bytecode_cell_count(Bytecodes::Code code) {
716 if (is_client_compilation_mode_vm()) {
717 return no_profile_data;
718 }
719 switch (code) {
720 case Bytecodes::_checkcast:
721 case Bytecodes::_instanceof:
722 case Bytecodes::_aastore:
723 if (TypeProfileCasts) {
724 return ReceiverTypeData::static_cell_count();
725 } else {
726 return BitData::static_cell_count();
727 }
728 case Bytecodes::_invokespecial:
729 case Bytecodes::_invokestatic:
730 if (MethodData::profile_arguments() || MethodData::profile_return()) {
731 return variable_cell_count;
732 } else {
733 return CounterData::static_cell_count();
734 }
735 case Bytecodes::_goto:
736 case Bytecodes::_goto_w:
737 case Bytecodes::_jsr:
738 case Bytecodes::_jsr_w:
739 return JumpData::static_cell_count();
740 case Bytecodes::_invokevirtual:
741 case Bytecodes::_invokeinterface:
742 if (MethodData::profile_arguments() || MethodData::profile_return()) {
743 return variable_cell_count;
744 } else {
745 return VirtualCallData::static_cell_count();
746 }
747 case Bytecodes::_invokedynamic:
748 if (MethodData::profile_arguments() || MethodData::profile_return()) {
749 return variable_cell_count;
750 } else {
751 return CounterData::static_cell_count();
752 }
753 case Bytecodes::_ret:
754 return RetData::static_cell_count();
755 case Bytecodes::_ifeq:
756 case Bytecodes::_ifne:
757 case Bytecodes::_iflt:
758 case Bytecodes::_ifge:
759 case Bytecodes::_ifgt:
760 case Bytecodes::_ifle:
761 case Bytecodes::_if_icmpeq:
762 case Bytecodes::_if_icmpne:
763 case Bytecodes::_if_icmplt:
764 case Bytecodes::_if_icmpge:
765 case Bytecodes::_if_icmpgt:
766 case Bytecodes::_if_icmple:
767 case Bytecodes::_if_acmpeq:
768 case Bytecodes::_if_acmpne:
769 case Bytecodes::_ifnull:
770 case Bytecodes::_ifnonnull:
771 return BranchData::static_cell_count();
772 case Bytecodes::_lookupswitch:
773 case Bytecodes::_tableswitch:
774 return variable_cell_count;
775 default:
776 return no_profile_data;
777 }
778}
779
780// Compute the size of the profiling information corresponding to
781// the current bytecode.
782int MethodData::compute_data_size(BytecodeStream* stream) {
783 int cell_count = bytecode_cell_count(stream->code());
784 if (cell_count == no_profile_data) {
785 return 0;
786 }
787 if (cell_count == variable_cell_count) {
788 switch (stream->code()) {
789 case Bytecodes::_lookupswitch:
790 case Bytecodes::_tableswitch:
791 cell_count = MultiBranchData::compute_cell_count(stream);
792 break;
793 case Bytecodes::_invokespecial:
794 case Bytecodes::_invokestatic:
795 case Bytecodes::_invokedynamic:
796 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
797 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
798 profile_return_for_invoke(stream->method(), stream->bci())) {
799 cell_count = CallTypeData::compute_cell_count(stream);
800 } else {
801 cell_count = CounterData::static_cell_count();
802 }
803 break;
804 case Bytecodes::_invokevirtual:
805 case Bytecodes::_invokeinterface: {
806 assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
807 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
808 profile_return_for_invoke(stream->method(), stream->bci())) {
809 cell_count = VirtualCallTypeData::compute_cell_count(stream);
810 } else {
811 cell_count = VirtualCallData::static_cell_count();
812 }
813 break;
814 }
815 default:
816 fatal("unexpected bytecode for var length profile data");
817 }
818 }
819 // Note: cell_count might be zero, meaning that there is just
820 // a DataLayout header, with no extra cells.
821 assert(cell_count >= 0, "sanity");
822 return DataLayout::compute_size_in_bytes(cell_count);
823}
824
825bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
826 // Bytecodes for which we may use speculation
827 switch (code) {
828 case Bytecodes::_checkcast:
829 case Bytecodes::_instanceof:
830 case Bytecodes::_aastore:
831 case Bytecodes::_invokevirtual:
832 case Bytecodes::_invokeinterface:
833 case Bytecodes::_if_acmpeq:
834 case Bytecodes::_if_acmpne:
835 case Bytecodes::_ifnull:
836 case Bytecodes::_ifnonnull:
837 case Bytecodes::_invokestatic:
838#ifdef COMPILER2
839 if (is_server_compilation_mode_vm()) {
840 return UseTypeSpeculation;
841 }
842#endif
843 default:
844 return false;
845 }
846 return false;
847}
848
849#if INCLUDE_JVMCI
850
851void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
852 return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
853}
854
855FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(NULL) {
856 memcpy(data(), speculation, speculation_len);
857}
858
859// A heuristic check to detect nmethods that outlive a failed speculations list.
860static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
861 jlong head = (jlong)(address) *failed_speculations_address;
862 if ((head & 0x1) == 0x1) {
863 stringStream st;
864 if (nm != NULL) {
865 st.print("%d", nm->compile_id());
866 Method* method = nm->method();
867 st.print_raw("{");
868 if (method != NULL) {
869 method->print_name(&st);
870 } else {
871 const char* jvmci_name = nm->jvmci_name();
872 if (jvmci_name != NULL) {
873 st.print_raw(jvmci_name);
874 }
875 }
876 st.print_raw("}");
877 } else {
878 st.print("<unknown>");
879 }
880 fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
881 }
882}
883
884bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
885 assert(failed_speculations_address != NULL, "must be");
886 size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
887 FailedSpeculation* fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
888 if (fs == NULL) {
889 // no memory -> ignore failed speculation
890 return false;
891 }
892
893 guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
894 guarantee_failed_speculations_alive(nm, failed_speculations_address);
895
896 FailedSpeculation** cursor = failed_speculations_address;
897 do {
898 if (*cursor == NULL) {
899 FailedSpeculation* old_fs = Atomic::cmpxchg(fs, cursor, (FailedSpeculation*) NULL);
900 if (old_fs == NULL) {
901 // Successfully appended fs to end of the list
902 return true;
903 }
904 cursor = old_fs->next_adr();
905 } else {
906 cursor = (*cursor)->next_adr();
907 }
908 } while (true);
909}
910
911void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
912 assert(failed_speculations_address != NULL, "must be");
913 FailedSpeculation* fs = *failed_speculations_address;
914 while (fs != NULL) {
915 FailedSpeculation* next = fs->next();
916 delete fs;
917 fs = next;
918 }
919
920 // Write an unaligned value to failed_speculations_address to denote
921 // that it is no longer a valid pointer. This is allows for the check
922 // in add_failed_speculation against adding to a freed failed
923 // speculations list.
924 long* head = (long*) failed_speculations_address;
925 (*head) = (*head) | 0x1;
926}
927#endif // INCLUDE_JVMCI
928
929int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
930#if INCLUDE_JVMCI
931 if (ProfileTraps) {
932 // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
933 int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
934
935 // Make sure we have a minimum number of extra data slots to
936 // allocate SpeculativeTrapData entries. We would want to have one
937 // entry per compilation that inlines this method and for which
938 // some type speculation assumption fails. So the room we need for
939 // the SpeculativeTrapData entries doesn't directly depend on the
940 // size of the method. Because it's hard to estimate, we reserve
941 // space for an arbitrary number of entries.
942 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
943 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
944
945 return MAX2(extra_data_count, spec_data_count);
946 } else {
947 return 0;
948 }
949#else // INCLUDE_JVMCI
950 if (ProfileTraps) {
951 // Assume that up to 3% of BCIs with no MDP will need to allocate one.
952 int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
953 // If the method is large, let the extra BCIs grow numerous (to ~1%).
954 int one_percent_of_data
955 = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
956 if (extra_data_count < one_percent_of_data)
957 extra_data_count = one_percent_of_data;
958 if (extra_data_count > empty_bc_count)
959 extra_data_count = empty_bc_count; // no need for more
960
961 // Make sure we have a minimum number of extra data slots to
962 // allocate SpeculativeTrapData entries. We would want to have one
963 // entry per compilation that inlines this method and for which
964 // some type speculation assumption fails. So the room we need for
965 // the SpeculativeTrapData entries doesn't directly depend on the
966 // size of the method. Because it's hard to estimate, we reserve
967 // space for an arbitrary number of entries.
968 int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
969 (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
970
971 return MAX2(extra_data_count, spec_data_count);
972 } else {
973 return 0;
974 }
975#endif // INCLUDE_JVMCI
976}
977
978// Compute the size of the MethodData* necessary to store
979// profiling information about a given method. Size is in bytes.
980int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
981 int data_size = 0;
982 BytecodeStream stream(method);
983 Bytecodes::Code c;
984 int empty_bc_count = 0; // number of bytecodes lacking data
985 bool needs_speculative_traps = false;
986 while ((c = stream.next()) >= 0) {
987 int size_in_bytes = compute_data_size(&stream);
988 data_size += size_in_bytes;
989 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1;
990 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
991 }
992 int object_size = in_bytes(data_offset()) + data_size;
993
994 // Add some extra DataLayout cells (at least one) to track stray traps.
995 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
996 object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
997
998 // Add a cell to record information about modified arguments.
999 int arg_size = method->size_of_parameters();
1000 object_size += DataLayout::compute_size_in_bytes(arg_size+1);
1001
1002 // Reserve room for an area of the MDO dedicated to profiling of
1003 // parameters
1004 int args_cell = ParametersTypeData::compute_cell_count(method());
1005 if (args_cell > 0) {
1006 object_size += DataLayout::compute_size_in_bytes(args_cell);
1007 }
1008 return object_size;
1009}
1010
1011// Compute the size of the MethodData* necessary to store
1012// profiling information about a given method. Size is in words
1013int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1014 int byte_size = compute_allocation_size_in_bytes(method);
1015 int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1016 return align_metadata_size(word_size);
1017}
1018
1019// Initialize an individual data segment. Returns the size of
1020// the segment in bytes.
1021int MethodData::initialize_data(BytecodeStream* stream,
1022 int data_index) {
1023 if (is_client_compilation_mode_vm()) {
1024 return 0;
1025 }
1026 int cell_count = -1;
1027 int tag = DataLayout::no_tag;
1028 DataLayout* data_layout = data_layout_at(data_index);
1029 Bytecodes::Code c = stream->code();
1030 switch (c) {
1031 case Bytecodes::_checkcast:
1032 case Bytecodes::_instanceof:
1033 case Bytecodes::_aastore:
1034 if (TypeProfileCasts) {
1035 cell_count = ReceiverTypeData::static_cell_count();
1036 tag = DataLayout::receiver_type_data_tag;
1037 } else {
1038 cell_count = BitData::static_cell_count();
1039 tag = DataLayout::bit_data_tag;
1040 }
1041 break;
1042 case Bytecodes::_invokespecial:
1043 case Bytecodes::_invokestatic: {
1044 int counter_data_cell_count = CounterData::static_cell_count();
1045 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1046 profile_return_for_invoke(stream->method(), stream->bci())) {
1047 cell_count = CallTypeData::compute_cell_count(stream);
1048 } else {
1049 cell_count = counter_data_cell_count;
1050 }
1051 if (cell_count > counter_data_cell_count) {
1052 tag = DataLayout::call_type_data_tag;
1053 } else {
1054 tag = DataLayout::counter_data_tag;
1055 }
1056 break;
1057 }
1058 case Bytecodes::_goto:
1059 case Bytecodes::_goto_w:
1060 case Bytecodes::_jsr:
1061 case Bytecodes::_jsr_w:
1062 cell_count = JumpData::static_cell_count();
1063 tag = DataLayout::jump_data_tag;
1064 break;
1065 case Bytecodes::_invokevirtual:
1066 case Bytecodes::_invokeinterface: {
1067 int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1068 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1069 profile_return_for_invoke(stream->method(), stream->bci())) {
1070 cell_count = VirtualCallTypeData::compute_cell_count(stream);
1071 } else {
1072 cell_count = virtual_call_data_cell_count;
1073 }
1074 if (cell_count > virtual_call_data_cell_count) {
1075 tag = DataLayout::virtual_call_type_data_tag;
1076 } else {
1077 tag = DataLayout::virtual_call_data_tag;
1078 }
1079 break;
1080 }
1081 case Bytecodes::_invokedynamic: {
1082 // %%% should make a type profile for any invokedynamic that takes a ref argument
1083 int counter_data_cell_count = CounterData::static_cell_count();
1084 if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1085 profile_return_for_invoke(stream->method(), stream->bci())) {
1086 cell_count = CallTypeData::compute_cell_count(stream);
1087 } else {
1088 cell_count = counter_data_cell_count;
1089 }
1090 if (cell_count > counter_data_cell_count) {
1091 tag = DataLayout::call_type_data_tag;
1092 } else {
1093 tag = DataLayout::counter_data_tag;
1094 }
1095 break;
1096 }
1097 case Bytecodes::_ret:
1098 cell_count = RetData::static_cell_count();
1099 tag = DataLayout::ret_data_tag;
1100 break;
1101 case Bytecodes::_ifeq:
1102 case Bytecodes::_ifne:
1103 case Bytecodes::_iflt:
1104 case Bytecodes::_ifge:
1105 case Bytecodes::_ifgt:
1106 case Bytecodes::_ifle:
1107 case Bytecodes::_if_icmpeq:
1108 case Bytecodes::_if_icmpne:
1109 case Bytecodes::_if_icmplt:
1110 case Bytecodes::_if_icmpge:
1111 case Bytecodes::_if_icmpgt:
1112 case Bytecodes::_if_icmple:
1113 case Bytecodes::_if_acmpeq:
1114 case Bytecodes::_if_acmpne:
1115 case Bytecodes::_ifnull:
1116 case Bytecodes::_ifnonnull:
1117 cell_count = BranchData::static_cell_count();
1118 tag = DataLayout::branch_data_tag;
1119 break;
1120 case Bytecodes::_lookupswitch:
1121 case Bytecodes::_tableswitch:
1122 cell_count = MultiBranchData::compute_cell_count(stream);
1123 tag = DataLayout::multi_branch_data_tag;
1124 break;
1125 default:
1126 break;
1127 }
1128 assert(tag == DataLayout::multi_branch_data_tag ||
1129 ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1130 (tag == DataLayout::call_type_data_tag ||
1131 tag == DataLayout::counter_data_tag ||
1132 tag == DataLayout::virtual_call_type_data_tag ||
1133 tag == DataLayout::virtual_call_data_tag)) ||
1134 cell_count == bytecode_cell_count(c), "cell counts must agree");
1135 if (cell_count >= 0) {
1136 assert(tag != DataLayout::no_tag, "bad tag");
1137 assert(bytecode_has_profile(c), "agree w/ BHP");
1138 data_layout->initialize(tag, stream->bci(), cell_count);
1139 return DataLayout::compute_size_in_bytes(cell_count);
1140 } else {
1141 assert(!bytecode_has_profile(c), "agree w/ !BHP");
1142 return 0;
1143 }
1144}
1145
1146// Get the data at an arbitrary (sort of) data index.
1147ProfileData* MethodData::data_at(int data_index) const {
1148 if (out_of_bounds(data_index)) {
1149 return NULL;
1150 }
1151 DataLayout* data_layout = data_layout_at(data_index);
1152 return data_layout->data_in();
1153}
1154
1155ProfileData* DataLayout::data_in() {
1156 switch (tag()) {
1157 case DataLayout::no_tag:
1158 default:
1159 ShouldNotReachHere();
1160 return NULL;
1161 case DataLayout::bit_data_tag:
1162 return new BitData(this);
1163 case DataLayout::counter_data_tag:
1164 return new CounterData(this);
1165 case DataLayout::jump_data_tag:
1166 return new JumpData(this);
1167 case DataLayout::receiver_type_data_tag:
1168 return new ReceiverTypeData(this);
1169 case DataLayout::virtual_call_data_tag:
1170 return new VirtualCallData(this);
1171 case DataLayout::ret_data_tag:
1172 return new RetData(this);
1173 case DataLayout::branch_data_tag:
1174 return new BranchData(this);
1175 case DataLayout::multi_branch_data_tag:
1176 return new MultiBranchData(this);
1177 case DataLayout::arg_info_data_tag:
1178 return new ArgInfoData(this);
1179 case DataLayout::call_type_data_tag:
1180 return new CallTypeData(this);
1181 case DataLayout::virtual_call_type_data_tag:
1182 return new VirtualCallTypeData(this);
1183 case DataLayout::parameters_type_data_tag:
1184 return new ParametersTypeData(this);
1185 case DataLayout::speculative_trap_data_tag:
1186 return new SpeculativeTrapData(this);
1187 }
1188}
1189
1190// Iteration over data.
1191ProfileData* MethodData::next_data(ProfileData* current) const {
1192 int current_index = dp_to_di(current->dp());
1193 int next_index = current_index + current->size_in_bytes();
1194 ProfileData* next = data_at(next_index);
1195 return next;
1196}
1197
1198// Give each of the data entries a chance to perform specific
1199// data initialization.
1200void MethodData::post_initialize(BytecodeStream* stream) {
1201 ResourceMark rm;
1202 ProfileData* data;
1203 for (data = first_data(); is_valid(data); data = next_data(data)) {
1204 stream->set_start(data->bci());
1205 stream->next();
1206 data->post_initialize(stream, this);
1207 }
1208 if (_parameters_type_data_di != no_parameters) {
1209 parameters_type_data()->post_initialize(NULL, this);
1210 }
1211}
1212
1213// Initialize the MethodData* corresponding to a given method.
1214MethodData::MethodData(const methodHandle& method, int size, TRAPS)
1215 : _extra_data_lock(Monitor::leaf, "MDO extra data lock"),
1216 _parameters_type_data_di(parameters_uninitialized) {
1217 // Set the method back-pointer.
1218 _method = method();
1219 initialize();
1220}
1221
1222void MethodData::initialize() {
1223 NoSafepointVerifier no_safepoint; // init function atomic wrt GC
1224 ResourceMark rm;
1225
1226 init();
1227 set_creation_mileage(mileage_of(method()));
1228
1229 // Go through the bytecodes and allocate and initialize the
1230 // corresponding data cells.
1231 int data_size = 0;
1232 int empty_bc_count = 0; // number of bytecodes lacking data
1233 _data[0] = 0; // apparently not set below.
1234 BytecodeStream stream(method());
1235 Bytecodes::Code c;
1236 bool needs_speculative_traps = false;
1237 while ((c = stream.next()) >= 0) {
1238 int size_in_bytes = initialize_data(&stream, data_size);
1239 data_size += size_in_bytes;
1240 if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c))) empty_bc_count += 1;
1241 needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1242 }
1243 _data_size = data_size;
1244 int object_size = in_bytes(data_offset()) + data_size;
1245
1246 // Add some extra DataLayout cells (at least one) to track stray traps.
1247 int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1248 int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1249
1250 // Let's zero the space for the extra data
1251 Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1252
1253 // Add a cell to record information about modified arguments.
1254 // Set up _args_modified array after traps cells so that
1255 // the code for traps cells works.
1256 DataLayout *dp = data_layout_at(data_size + extra_size);
1257
1258 int arg_size = method()->size_of_parameters();
1259 dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1260
1261 int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1262 object_size += extra_size + arg_data_size;
1263
1264 int parms_cell = ParametersTypeData::compute_cell_count(method());
1265 // If we are profiling parameters, we reserver an area near the end
1266 // of the MDO after the slots for bytecodes (because there's no bci
1267 // for method entry so they don't fit with the framework for the
1268 // profiling of bytecodes). We store the offset within the MDO of
1269 // this area (or -1 if no parameter is profiled)
1270 if (parms_cell > 0) {
1271 object_size += DataLayout::compute_size_in_bytes(parms_cell);
1272 _parameters_type_data_di = data_size + extra_size + arg_data_size;
1273 DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1274 dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1275 } else {
1276 _parameters_type_data_di = no_parameters;
1277 }
1278
1279 // Set an initial hint. Don't use set_hint_di() because
1280 // first_di() may be out of bounds if data_size is 0.
1281 // In that situation, _hint_di is never used, but at
1282 // least well-defined.
1283 _hint_di = first_di();
1284
1285 post_initialize(&stream);
1286
1287 assert(object_size == compute_allocation_size_in_bytes(methodHandle(_method)), "MethodData: computed size != initialized size");
1288 set_size(object_size);
1289}
1290
1291void MethodData::init() {
1292 _invocation_counter.init();
1293 _backedge_counter.init();
1294 _invocation_counter_start = 0;
1295 _backedge_counter_start = 0;
1296
1297 // Set per-method invoke- and backedge mask.
1298 double scale = 1.0;
1299 CompilerOracle::has_option_value(_method, "CompileThresholdScaling", scale);
1300 _invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1301 _backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1302
1303 _tenure_traps = 0;
1304 _num_loops = 0;
1305 _num_blocks = 0;
1306 _would_profile = unknown;
1307
1308#if INCLUDE_JVMCI
1309 _jvmci_ir_size = 0;
1310 _failed_speculations = NULL;
1311#endif
1312
1313#if INCLUDE_RTM_OPT
1314 _rtm_state = NoRTM; // No RTM lock eliding by default
1315 if (UseRTMLocking &&
1316 !CompilerOracle::has_option_string(_method, "NoRTMLockEliding")) {
1317 if (CompilerOracle::has_option_string(_method, "UseRTMLockEliding") || !UseRTMDeopt) {
1318 // Generate RTM lock eliding code without abort ratio calculation code.
1319 _rtm_state = UseRTM;
1320 } else if (UseRTMDeopt) {
1321 // Generate RTM lock eliding code and include abort ratio calculation
1322 // code if UseRTMDeopt is on.
1323 _rtm_state = ProfileRTM;
1324 }
1325 }
1326#endif
1327
1328 // Initialize flags and trap history.
1329 _nof_decompiles = 0;
1330 _nof_overflow_recompiles = 0;
1331 _nof_overflow_traps = 0;
1332 clear_escape_info();
1333 assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align");
1334 Copy::zero_to_words((HeapWord*) &_trap_hist,
1335 sizeof(_trap_hist) / sizeof(HeapWord));
1336}
1337
1338// Get a measure of how much mileage the method has on it.
1339int MethodData::mileage_of(Method* method) {
1340 int mileage = 0;
1341 if (TieredCompilation) {
1342 mileage = MAX2(method->invocation_count(), method->backedge_count());
1343 } else {
1344 int iic = method->interpreter_invocation_count();
1345 if (mileage < iic) mileage = iic;
1346 MethodCounters* mcs = method->method_counters();
1347 if (mcs != NULL) {
1348 InvocationCounter* ic = mcs->invocation_counter();
1349 InvocationCounter* bc = mcs->backedge_counter();
1350 int icval = ic->count();
1351 if (ic->carry()) icval += CompileThreshold;
1352 if (mileage < icval) mileage = icval;
1353 int bcval = bc->count();
1354 if (bc->carry()) bcval += CompileThreshold;
1355 if (mileage < bcval) mileage = bcval;
1356 }
1357 }
1358 return mileage;
1359}
1360
1361bool MethodData::is_mature() const {
1362 return CompilationPolicy::policy()->is_mature(_method);
1363}
1364
1365// Translate a bci to its corresponding data index (di).
1366address MethodData::bci_to_dp(int bci) {
1367 ResourceMark rm;
1368 ProfileData* data = data_before(bci);
1369 ProfileData* prev = NULL;
1370 for ( ; is_valid(data); data = next_data(data)) {
1371 if (data->bci() >= bci) {
1372 if (data->bci() == bci) set_hint_di(dp_to_di(data->dp()));
1373 else if (prev != NULL) set_hint_di(dp_to_di(prev->dp()));
1374 return data->dp();
1375 }
1376 prev = data;
1377 }
1378 return (address)limit_data_position();
1379}
1380
1381// Translate a bci to its corresponding data, or NULL.
1382ProfileData* MethodData::bci_to_data(int bci) {
1383 ProfileData* data = data_before(bci);
1384 for ( ; is_valid(data); data = next_data(data)) {
1385 if (data->bci() == bci) {
1386 set_hint_di(dp_to_di(data->dp()));
1387 return data;
1388 } else if (data->bci() > bci) {
1389 break;
1390 }
1391 }
1392 return bci_to_extra_data(bci, NULL, false);
1393}
1394
1395DataLayout* MethodData::next_extra(DataLayout* dp) {
1396 int nb_cells = 0;
1397 switch(dp->tag()) {
1398 case DataLayout::bit_data_tag:
1399 case DataLayout::no_tag:
1400 nb_cells = BitData::static_cell_count();
1401 break;
1402 case DataLayout::speculative_trap_data_tag:
1403 nb_cells = SpeculativeTrapData::static_cell_count();
1404 break;
1405 default:
1406 fatal("unexpected tag %d", dp->tag());
1407 }
1408 return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1409}
1410
1411ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent) {
1412 DataLayout* end = args_data_limit();
1413
1414 for (;; dp = next_extra(dp)) {
1415 assert(dp < end, "moved past end of extra data");
1416 // No need for "OrderAccess::load_acquire" ops,
1417 // since the data structure is monotonic.
1418 switch(dp->tag()) {
1419 case DataLayout::no_tag:
1420 return NULL;
1421 case DataLayout::arg_info_data_tag:
1422 dp = end;
1423 return NULL; // ArgInfoData is at the end of extra data section.
1424 case DataLayout::bit_data_tag:
1425 if (m == NULL && dp->bci() == bci) {
1426 return new BitData(dp);
1427 }
1428 break;
1429 case DataLayout::speculative_trap_data_tag:
1430 if (m != NULL) {
1431 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1432 // data->method() may be null in case of a concurrent
1433 // allocation. Maybe it's for the same method. Try to use that
1434 // entry in that case.
1435 if (dp->bci() == bci) {
1436 if (data->method() == NULL) {
1437 assert(concurrent, "impossible because no concurrent allocation");
1438 return NULL;
1439 } else if (data->method() == m) {
1440 return data;
1441 }
1442 }
1443 }
1444 break;
1445 default:
1446 fatal("unexpected tag %d", dp->tag());
1447 }
1448 }
1449 return NULL;
1450}
1451
1452
1453// Translate a bci to its corresponding extra data, or NULL.
1454ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1455 // This code assumes an entry for a SpeculativeTrapData is 2 cells
1456 assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1457 DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1458 "code needs to be adjusted");
1459
1460 // Do not create one of these if method has been redefined.
1461 if (m != NULL && m->is_old()) {
1462 return NULL;
1463 }
1464
1465 DataLayout* dp = extra_data_base();
1466 DataLayout* end = args_data_limit();
1467
1468 // Allocation in the extra data space has to be atomic because not
1469 // all entries have the same size and non atomic concurrent
1470 // allocation would result in a corrupted extra data space.
1471 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, true);
1472 if (result != NULL) {
1473 return result;
1474 }
1475
1476 if (create_if_missing && dp < end) {
1477 MutexLocker ml(&_extra_data_lock);
1478 // Check again now that we have the lock. Another thread may
1479 // have added extra data entries.
1480 ProfileData* result = bci_to_extra_data_helper(bci, m, dp, false);
1481 if (result != NULL || dp >= end) {
1482 return result;
1483 }
1484
1485 assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
1486 assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1487 u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1488 // SpeculativeTrapData is 2 slots. Make sure we have room.
1489 if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
1490 return NULL;
1491 }
1492 DataLayout temp;
1493 temp.initialize(tag, bci, 0);
1494
1495 dp->set_header(temp.header());
1496 assert(dp->tag() == tag, "sane");
1497 assert(dp->bci() == bci, "no concurrent allocation");
1498 if (tag == DataLayout::bit_data_tag) {
1499 return new BitData(dp);
1500 } else {
1501 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1502 data->set_method(m);
1503 return data;
1504 }
1505 }
1506 return NULL;
1507}
1508
1509ArgInfoData *MethodData::arg_info() {
1510 DataLayout* dp = extra_data_base();
1511 DataLayout* end = args_data_limit();
1512 for (; dp < end; dp = next_extra(dp)) {
1513 if (dp->tag() == DataLayout::arg_info_data_tag)
1514 return new ArgInfoData(dp);
1515 }
1516 return NULL;
1517}
1518
1519// Printing
1520
1521void MethodData::print_on(outputStream* st) const {
1522 assert(is_methodData(), "should be method data");
1523 st->print("method data for ");
1524 method()->print_value_on(st);
1525 st->cr();
1526 print_data_on(st);
1527}
1528
1529void MethodData::print_value_on(outputStream* st) const {
1530 assert(is_methodData(), "should be method data");
1531 st->print("method data for ");
1532 method()->print_value_on(st);
1533}
1534
1535void MethodData::print_data_on(outputStream* st) const {
1536 ResourceMark rm;
1537 ProfileData* data = first_data();
1538 if (_parameters_type_data_di != no_parameters) {
1539 parameters_type_data()->print_data_on(st);
1540 }
1541 for ( ; is_valid(data); data = next_data(data)) {
1542 st->print("%d", dp_to_di(data->dp()));
1543 st->fill_to(6);
1544 data->print_data_on(st, this);
1545 }
1546 st->print_cr("--- Extra data:");
1547 DataLayout* dp = extra_data_base();
1548 DataLayout* end = args_data_limit();
1549 for (;; dp = next_extra(dp)) {
1550 assert(dp < end, "moved past end of extra data");
1551 // No need for "OrderAccess::load_acquire" ops,
1552 // since the data structure is monotonic.
1553 switch(dp->tag()) {
1554 case DataLayout::no_tag:
1555 continue;
1556 case DataLayout::bit_data_tag:
1557 data = new BitData(dp);
1558 break;
1559 case DataLayout::speculative_trap_data_tag:
1560 data = new SpeculativeTrapData(dp);
1561 break;
1562 case DataLayout::arg_info_data_tag:
1563 data = new ArgInfoData(dp);
1564 dp = end; // ArgInfoData is at the end of extra data section.
1565 break;
1566 default:
1567 fatal("unexpected tag %d", dp->tag());
1568 }
1569 st->print("%d", dp_to_di(data->dp()));
1570 st->fill_to(6);
1571 data->print_data_on(st);
1572 if (dp >= end) return;
1573 }
1574}
1575
1576#if INCLUDE_SERVICES
1577// Size Statistics
1578void MethodData::collect_statistics(KlassSizeStats *sz) const {
1579 int n = sz->count(this);
1580 sz->_method_data_bytes += n;
1581 sz->_method_all_bytes += n;
1582 sz->_rw_bytes += n;
1583}
1584#endif // INCLUDE_SERVICES
1585
1586// Verification
1587
1588void MethodData::verify_on(outputStream* st) {
1589 guarantee(is_methodData(), "object must be method data");
1590 // guarantee(m->is_perm(), "should be in permspace");
1591 this->verify_data_on(st);
1592}
1593
1594void MethodData::verify_data_on(outputStream* st) {
1595 NEEDS_CLEANUP;
1596 // not yet implemented.
1597}
1598
1599bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1600 if (m->is_compiled_lambda_form()) {
1601 return true;
1602 }
1603
1604 Bytecode_invoke inv(m , bci);
1605 return inv.is_invokedynamic() || inv.is_invokehandle();
1606}
1607
1608bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1609 Bytecode_invoke inv(m , bci);
1610 if (inv.is_invokevirtual() && inv.klass() == vmSymbols::jdk_internal_misc_Unsafe()) {
1611 ResourceMark rm;
1612 char* name = inv.name()->as_C_string();
1613 if (!strncmp(name, "get", 3) || !strncmp(name, "put", 3)) {
1614 return true;
1615 }
1616 }
1617 return false;
1618}
1619
1620int MethodData::profile_arguments_flag() {
1621 return TypeProfileLevel % 10;
1622}
1623
1624bool MethodData::profile_arguments() {
1625 return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
1626}
1627
1628bool MethodData::profile_arguments_jsr292_only() {
1629 return profile_arguments_flag() == type_profile_jsr292;
1630}
1631
1632bool MethodData::profile_all_arguments() {
1633 return profile_arguments_flag() == type_profile_all;
1634}
1635
1636bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1637 if (!profile_arguments()) {
1638 return false;
1639 }
1640
1641 if (profile_all_arguments()) {
1642 return true;
1643 }
1644
1645 if (profile_unsafe(m, bci)) {
1646 return true;
1647 }
1648
1649 assert(profile_arguments_jsr292_only(), "inconsistent");
1650 return profile_jsr292(m, bci);
1651}
1652
1653int MethodData::profile_return_flag() {
1654 return (TypeProfileLevel % 100) / 10;
1655}
1656
1657bool MethodData::profile_return() {
1658 return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1659}
1660
1661bool MethodData::profile_return_jsr292_only() {
1662 return profile_return_flag() == type_profile_jsr292;
1663}
1664
1665bool MethodData::profile_all_return() {
1666 return profile_return_flag() == type_profile_all;
1667}
1668
1669bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1670 if (!profile_return()) {
1671 return false;
1672 }
1673
1674 if (profile_all_return()) {
1675 return true;
1676 }
1677
1678 assert(profile_return_jsr292_only(), "inconsistent");
1679 return profile_jsr292(m, bci);
1680}
1681
1682int MethodData::profile_parameters_flag() {
1683 return TypeProfileLevel / 100;
1684}
1685
1686bool MethodData::profile_parameters() {
1687 return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1688}
1689
1690bool MethodData::profile_parameters_jsr292_only() {
1691 return profile_parameters_flag() == type_profile_jsr292;
1692}
1693
1694bool MethodData::profile_all_parameters() {
1695 return profile_parameters_flag() == type_profile_all;
1696}
1697
1698bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1699 if (!profile_parameters()) {
1700 return false;
1701 }
1702
1703 if (profile_all_parameters()) {
1704 return true;
1705 }
1706
1707 assert(profile_parameters_jsr292_only(), "inconsistent");
1708 return m->is_compiled_lambda_form();
1709}
1710
1711void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1712 log_trace(cds)("Iter(MethodData): %p", this);
1713 it->push(&_method);
1714}
1715
1716void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1717 if (shift == 0) {
1718 return;
1719 }
1720 if (!reset) {
1721 // Move all cells of trap entry at dp left by "shift" cells
1722 intptr_t* start = (intptr_t*)dp;
1723 intptr_t* end = (intptr_t*)next_extra(dp);
1724 for (intptr_t* ptr = start; ptr < end; ptr++) {
1725 *(ptr-shift) = *ptr;
1726 }
1727 } else {
1728 // Reset "shift" cells stopping at dp
1729 intptr_t* start = ((intptr_t*)dp) - shift;
1730 intptr_t* end = (intptr_t*)dp;
1731 for (intptr_t* ptr = start; ptr < end; ptr++) {
1732 *ptr = 0;
1733 }
1734 }
1735}
1736
1737// Check for entries that reference an unloaded method
1738class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1739 bool _always_clean;
1740public:
1741 CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1742 bool is_live(Method* m) {
1743 return !(_always_clean) && m->method_holder()->is_loader_alive();
1744 }
1745};
1746
1747// Check for entries that reference a redefined method
1748class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1749public:
1750 CleanExtraDataMethodClosure() {}
1751 bool is_live(Method* m) { return !m->is_old(); }
1752};
1753
1754
1755// Remove SpeculativeTrapData entries that reference an unloaded or
1756// redefined method
1757void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1758 DataLayout* dp = extra_data_base();
1759 DataLayout* end = args_data_limit();
1760
1761 int shift = 0;
1762 for (; dp < end; dp = next_extra(dp)) {
1763 switch(dp->tag()) {
1764 case DataLayout::speculative_trap_data_tag: {
1765 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1766 Method* m = data->method();
1767 assert(m != NULL, "should have a method");
1768 if (!cl->is_live(m)) {
1769 // "shift" accumulates the number of cells for dead
1770 // SpeculativeTrapData entries that have been seen so
1771 // far. Following entries must be shifted left by that many
1772 // cells to remove the dead SpeculativeTrapData entries.
1773 shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1774 } else {
1775 // Shift this entry left if it follows dead
1776 // SpeculativeTrapData entries
1777 clean_extra_data_helper(dp, shift);
1778 }
1779 break;
1780 }
1781 case DataLayout::bit_data_tag:
1782 // Shift this entry left if it follows dead SpeculativeTrapData
1783 // entries
1784 clean_extra_data_helper(dp, shift);
1785 continue;
1786 case DataLayout::no_tag:
1787 case DataLayout::arg_info_data_tag:
1788 // We are at end of the live trap entries. The previous "shift"
1789 // cells contain entries that are either dead or were shifted
1790 // left. They need to be reset to no_tag
1791 clean_extra_data_helper(dp, shift, true);
1792 return;
1793 default:
1794 fatal("unexpected tag %d", dp->tag());
1795 }
1796 }
1797}
1798
1799// Verify there's no unloaded or redefined method referenced by a
1800// SpeculativeTrapData entry
1801void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1802#ifdef ASSERT
1803 DataLayout* dp = extra_data_base();
1804 DataLayout* end = args_data_limit();
1805
1806 for (; dp < end; dp = next_extra(dp)) {
1807 switch(dp->tag()) {
1808 case DataLayout::speculative_trap_data_tag: {
1809 SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1810 Method* m = data->method();
1811 assert(m != NULL && cl->is_live(m), "Method should exist");
1812 break;
1813 }
1814 case DataLayout::bit_data_tag:
1815 continue;
1816 case DataLayout::no_tag:
1817 case DataLayout::arg_info_data_tag:
1818 return;
1819 default:
1820 fatal("unexpected tag %d", dp->tag());
1821 }
1822 }
1823#endif
1824}
1825
1826void MethodData::clean_method_data(bool always_clean) {
1827 ResourceMark rm;
1828 for (ProfileData* data = first_data();
1829 is_valid(data);
1830 data = next_data(data)) {
1831 data->clean_weak_klass_links(always_clean);
1832 }
1833 ParametersTypeData* parameters = parameters_type_data();
1834 if (parameters != NULL) {
1835 parameters->clean_weak_klass_links(always_clean);
1836 }
1837
1838 CleanExtraDataKlassClosure cl(always_clean);
1839 clean_extra_data(&cl);
1840 verify_extra_data_clean(&cl);
1841}
1842
1843// This is called during redefinition to clean all "old" redefined
1844// methods out of MethodData for all methods.
1845void MethodData::clean_weak_method_links() {
1846 ResourceMark rm;
1847 for (ProfileData* data = first_data();
1848 is_valid(data);
1849 data = next_data(data)) {
1850 data->clean_weak_method_links();
1851 }
1852
1853 CleanExtraDataMethodClosure cl;
1854 clean_extra_data(&cl);
1855 verify_extra_data_clean(&cl);
1856}
1857
1858#ifdef ASSERT
1859void MethodData::verify_clean_weak_method_links() {
1860 ResourceMark rm;
1861 for (ProfileData* data = first_data();
1862 is_valid(data);
1863 data = next_data(data)) {
1864 data->verify_clean_weak_method_links();
1865 }
1866
1867 CleanExtraDataMethodClosure cl;
1868 verify_extra_data_clean(&cl);
1869}
1870#endif // ASSERT
1871