1/*
2 * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_OOPS_METHODDATA_HPP
26#define SHARE_OOPS_METHODDATA_HPP
27
28#include "interpreter/bytecodes.hpp"
29#include "oops/metadata.hpp"
30#include "oops/method.hpp"
31#include "oops/oop.hpp"
32#include "runtime/atomic.hpp"
33#include "utilities/align.hpp"
34
35class BytecodeStream;
36class KlassSizeStats;
37
38// The MethodData object collects counts and other profile information
39// during zeroth-tier (interpretive) and first-tier execution.
40// The profile is used later by compilation heuristics. Some heuristics
41// enable use of aggressive (or "heroic") optimizations. An aggressive
42// optimization often has a down-side, a corner case that it handles
43// poorly, but which is thought to be rare. The profile provides
44// evidence of this rarity for a given method or even BCI. It allows
45// the compiler to back out of the optimization at places where it
46// has historically been a poor choice. Other heuristics try to use
47// specific information gathered about types observed at a given site.
48//
49// All data in the profile is approximate. It is expected to be accurate
50// on the whole, but the system expects occasional inaccuraces, due to
51// counter overflow, multiprocessor races during data collection, space
52// limitations, missing MDO blocks, etc. Bad or missing data will degrade
53// optimization quality but will not affect correctness. Also, each MDO
54// is marked with its birth-date ("creation_mileage") which can be used
55// to assess the quality ("maturity") of its data.
56//
57// Short (<32-bit) counters are designed to overflow to a known "saturated"
58// state. Also, certain recorded per-BCI events are given one-bit counters
59// which overflow to a saturated state which applied to all counters at
60// that BCI. In other words, there is a small lattice which approximates
61// the ideal of an infinite-precision counter for each event at each BCI,
62// and the lattice quickly "bottoms out" in a state where all counters
63// are taken to be indefinitely large.
64//
65// The reader will find many data races in profile gathering code, starting
66// with invocation counter incrementation. None of these races harm correct
67// execution of the compiled code.
68
69// forward decl
70class ProfileData;
71
72// DataLayout
73//
74// Overlay for generic profiling data.
75class DataLayout {
76 friend class VMStructs;
77 friend class JVMCIVMStructs;
78
79private:
80 // Every data layout begins with a header. This header
81 // contains a tag, which is used to indicate the size/layout
82 // of the data, 8 bits of flags, which can be used in any way,
83 // 32 bits of trap history (none/one reason/many reasons),
84 // and a bci, which is used to tie this piece of data to a
85 // specific bci in the bytecodes.
86 union {
87 u8 _bits;
88 struct {
89 u1 _tag;
90 u1 _flags;
91 u2 _bci;
92 u4 _traps;
93 } _struct;
94 } _header;
95
96 // The data layout has an arbitrary number of cells, each sized
97 // to accomodate a pointer or an integer.
98 intptr_t _cells[1];
99
100 // Some types of data layouts need a length field.
101 static bool needs_array_len(u1 tag);
102
103public:
104 enum {
105 counter_increment = 1
106 };
107
108 enum {
109 cell_size = sizeof(intptr_t)
110 };
111
112 // Tag values
113 enum {
114 no_tag,
115 bit_data_tag,
116 counter_data_tag,
117 jump_data_tag,
118 receiver_type_data_tag,
119 virtual_call_data_tag,
120 ret_data_tag,
121 branch_data_tag,
122 multi_branch_data_tag,
123 arg_info_data_tag,
124 call_type_data_tag,
125 virtual_call_type_data_tag,
126 parameters_type_data_tag,
127 speculative_trap_data_tag
128 };
129
130 enum {
131 // The trap state breaks down as [recompile:1 | reason:31].
132 // This further breakdown is defined in deoptimization.cpp.
133 // See Deoptimization::trap_state_reason for an assert that
134 // trap_bits is big enough to hold reasons < Reason_RECORDED_LIMIT.
135 //
136 // The trap_state is collected only if ProfileTraps is true.
137 trap_bits = 1+31, // 31: enough to distinguish [0..Reason_RECORDED_LIMIT].
138 trap_mask = -1,
139 first_flag = 0
140 };
141
142 // Size computation
143 static int header_size_in_bytes() {
144 return header_size_in_cells() * cell_size;
145 }
146 static int header_size_in_cells() {
147 return LP64_ONLY(1) NOT_LP64(2);
148 }
149
150 static int compute_size_in_bytes(int cell_count) {
151 return header_size_in_bytes() + cell_count * cell_size;
152 }
153
154 // Initialization
155 void initialize(u1 tag, u2 bci, int cell_count);
156
157 // Accessors
158 u1 tag() {
159 return _header._struct._tag;
160 }
161
162 // Return 32 bits of trap state.
163 // The state tells if traps with zero, one, or many reasons have occurred.
164 // It also tells whether zero or many recompilations have occurred.
165 // The associated trap histogram in the MDO itself tells whether
166 // traps are common or not. If a BCI shows that a trap X has
167 // occurred, and the MDO shows N occurrences of X, we make the
168 // simplifying assumption that all N occurrences can be blamed
169 // on that BCI.
170 uint trap_state() const {
171 return _header._struct._traps;
172 }
173
174 void set_trap_state(uint new_state) {
175 assert(ProfileTraps, "used only under +ProfileTraps");
176 uint old_flags = _header._struct._traps;
177 _header._struct._traps = new_state | old_flags;
178 }
179
180 u1 flags() const {
181 return _header._struct._flags;
182 }
183
184 u2 bci() const {
185 return _header._struct._bci;
186 }
187
188 void set_header(u8 value) {
189 _header._bits = value;
190 }
191 u8 header() {
192 return _header._bits;
193 }
194 void set_cell_at(int index, intptr_t value) {
195 _cells[index] = value;
196 }
197 void release_set_cell_at(int index, intptr_t value);
198 intptr_t cell_at(int index) const {
199 return _cells[index];
200 }
201
202 void set_flag_at(u1 flag_number) {
203 _header._struct._flags |= (0x1 << flag_number);
204 }
205 bool flag_at(u1 flag_number) const {
206 return (_header._struct._flags & (0x1 << flag_number)) != 0;
207 }
208
209 // Low-level support for code generation.
210 static ByteSize header_offset() {
211 return byte_offset_of(DataLayout, _header);
212 }
213 static ByteSize tag_offset() {
214 return byte_offset_of(DataLayout, _header._struct._tag);
215 }
216 static ByteSize flags_offset() {
217 return byte_offset_of(DataLayout, _header._struct._flags);
218 }
219 static ByteSize bci_offset() {
220 return byte_offset_of(DataLayout, _header._struct._bci);
221 }
222 static ByteSize cell_offset(int index) {
223 return byte_offset_of(DataLayout, _cells) + in_ByteSize(index * cell_size);
224 }
225 // Return a value which, when or-ed as a byte into _flags, sets the flag.
226 static u1 flag_number_to_constant(u1 flag_number) {
227 DataLayout temp; temp.set_header(0);
228 temp.set_flag_at(flag_number);
229 return temp._header._struct._flags;
230 }
231 // Return a value which, when or-ed as a word into _header, sets the flag.
232 static u8 flag_mask_to_header_mask(uint byte_constant) {
233 DataLayout temp; temp.set_header(0);
234 temp._header._struct._flags = byte_constant;
235 return temp._header._bits;
236 }
237
238 ProfileData* data_in();
239
240 // GC support
241 void clean_weak_klass_links(bool always_clean);
242
243 // Redefinition support
244 void clean_weak_method_links();
245 DEBUG_ONLY(void verify_clean_weak_method_links();)
246};
247
248
249// ProfileData class hierarchy
250class ProfileData;
251class BitData;
252class CounterData;
253class ReceiverTypeData;
254class VirtualCallData;
255class VirtualCallTypeData;
256class RetData;
257class CallTypeData;
258class JumpData;
259class BranchData;
260class ArrayData;
261class MultiBranchData;
262class ArgInfoData;
263class ParametersTypeData;
264class SpeculativeTrapData;
265
266// ProfileData
267//
268// A ProfileData object is created to refer to a section of profiling
269// data in a structured way.
270class ProfileData : public ResourceObj {
271 friend class TypeEntries;
272 friend class ReturnTypeEntry;
273 friend class TypeStackSlotEntries;
274private:
275 enum {
276 tab_width_one = 16,
277 tab_width_two = 36
278 };
279
280 // This is a pointer to a section of profiling data.
281 DataLayout* _data;
282
283 char* print_data_on_helper(const MethodData* md) const;
284
285protected:
286 DataLayout* data() { return _data; }
287 const DataLayout* data() const { return _data; }
288
289 enum {
290 cell_size = DataLayout::cell_size
291 };
292
293public:
294 // How many cells are in this?
295 virtual int cell_count() const {
296 ShouldNotReachHere();
297 return -1;
298 }
299
300 // Return the size of this data.
301 int size_in_bytes() {
302 return DataLayout::compute_size_in_bytes(cell_count());
303 }
304
305protected:
306 // Low-level accessors for underlying data
307 void set_intptr_at(int index, intptr_t value) {
308 assert(0 <= index && index < cell_count(), "oob");
309 data()->set_cell_at(index, value);
310 }
311 void release_set_intptr_at(int index, intptr_t value);
312 intptr_t intptr_at(int index) const {
313 assert(0 <= index && index < cell_count(), "oob");
314 return data()->cell_at(index);
315 }
316 void set_uint_at(int index, uint value) {
317 set_intptr_at(index, (intptr_t) value);
318 }
319 void release_set_uint_at(int index, uint value);
320 uint uint_at(int index) const {
321 return (uint)intptr_at(index);
322 }
323 void set_int_at(int index, int value) {
324 set_intptr_at(index, (intptr_t) value);
325 }
326 void release_set_int_at(int index, int value);
327 int int_at(int index) const {
328 return (int)intptr_at(index);
329 }
330 int int_at_unchecked(int index) const {
331 return (int)data()->cell_at(index);
332 }
333 void set_oop_at(int index, oop value) {
334 set_intptr_at(index, cast_from_oop<intptr_t>(value));
335 }
336 oop oop_at(int index) const {
337 return cast_to_oop(intptr_at(index));
338 }
339
340 void set_flag_at(int flag_number) {
341 data()->set_flag_at(flag_number);
342 }
343 bool flag_at(int flag_number) const {
344 return data()->flag_at(flag_number);
345 }
346
347 // two convenient imports for use by subclasses:
348 static ByteSize cell_offset(int index) {
349 return DataLayout::cell_offset(index);
350 }
351 static int flag_number_to_constant(int flag_number) {
352 return DataLayout::flag_number_to_constant(flag_number);
353 }
354
355 ProfileData(DataLayout* data) {
356 _data = data;
357 }
358
359public:
360 // Constructor for invalid ProfileData.
361 ProfileData();
362
363 u2 bci() const {
364 return data()->bci();
365 }
366
367 address dp() {
368 return (address)_data;
369 }
370
371 int trap_state() const {
372 return data()->trap_state();
373 }
374 void set_trap_state(int new_state) {
375 data()->set_trap_state(new_state);
376 }
377
378 // Type checking
379 virtual bool is_BitData() const { return false; }
380 virtual bool is_CounterData() const { return false; }
381 virtual bool is_JumpData() const { return false; }
382 virtual bool is_ReceiverTypeData()const { return false; }
383 virtual bool is_VirtualCallData() const { return false; }
384 virtual bool is_RetData() const { return false; }
385 virtual bool is_BranchData() const { return false; }
386 virtual bool is_ArrayData() const { return false; }
387 virtual bool is_MultiBranchData() const { return false; }
388 virtual bool is_ArgInfoData() const { return false; }
389 virtual bool is_CallTypeData() const { return false; }
390 virtual bool is_VirtualCallTypeData()const { return false; }
391 virtual bool is_ParametersTypeData() const { return false; }
392 virtual bool is_SpeculativeTrapData()const { return false; }
393
394
395 BitData* as_BitData() const {
396 assert(is_BitData(), "wrong type");
397 return is_BitData() ? (BitData*) this : NULL;
398 }
399 CounterData* as_CounterData() const {
400 assert(is_CounterData(), "wrong type");
401 return is_CounterData() ? (CounterData*) this : NULL;
402 }
403 JumpData* as_JumpData() const {
404 assert(is_JumpData(), "wrong type");
405 return is_JumpData() ? (JumpData*) this : NULL;
406 }
407 ReceiverTypeData* as_ReceiverTypeData() const {
408 assert(is_ReceiverTypeData(), "wrong type");
409 return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
410 }
411 VirtualCallData* as_VirtualCallData() const {
412 assert(is_VirtualCallData(), "wrong type");
413 return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
414 }
415 RetData* as_RetData() const {
416 assert(is_RetData(), "wrong type");
417 return is_RetData() ? (RetData*) this : NULL;
418 }
419 BranchData* as_BranchData() const {
420 assert(is_BranchData(), "wrong type");
421 return is_BranchData() ? (BranchData*) this : NULL;
422 }
423 ArrayData* as_ArrayData() const {
424 assert(is_ArrayData(), "wrong type");
425 return is_ArrayData() ? (ArrayData*) this : NULL;
426 }
427 MultiBranchData* as_MultiBranchData() const {
428 assert(is_MultiBranchData(), "wrong type");
429 return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
430 }
431 ArgInfoData* as_ArgInfoData() const {
432 assert(is_ArgInfoData(), "wrong type");
433 return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
434 }
435 CallTypeData* as_CallTypeData() const {
436 assert(is_CallTypeData(), "wrong type");
437 return is_CallTypeData() ? (CallTypeData*)this : NULL;
438 }
439 VirtualCallTypeData* as_VirtualCallTypeData() const {
440 assert(is_VirtualCallTypeData(), "wrong type");
441 return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
442 }
443 ParametersTypeData* as_ParametersTypeData() const {
444 assert(is_ParametersTypeData(), "wrong type");
445 return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
446 }
447 SpeculativeTrapData* as_SpeculativeTrapData() const {
448 assert(is_SpeculativeTrapData(), "wrong type");
449 return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : NULL;
450 }
451
452
453 // Subclass specific initialization
454 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo) {}
455
456 // GC support
457 virtual void clean_weak_klass_links(bool always_clean) {}
458
459 // Redefinition support
460 virtual void clean_weak_method_links() {}
461 DEBUG_ONLY(virtual void verify_clean_weak_method_links() {})
462
463 // CI translation: ProfileData can represent both MethodDataOop data
464 // as well as CIMethodData data. This function is provided for translating
465 // an oop in a ProfileData to the ci equivalent. Generally speaking,
466 // most ProfileData don't require any translation, so we provide the null
467 // translation here, and the required translators are in the ci subclasses.
468 virtual void translate_from(const ProfileData* data) {}
469
470 virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
471 ShouldNotReachHere();
472 }
473
474 void print_data_on(outputStream* st, const MethodData* md) const;
475
476 void print_shared(outputStream* st, const char* name, const char* extra) const;
477 void tab(outputStream* st, bool first = false) const;
478};
479
480// BitData
481//
482// A BitData holds a flag or two in its header.
483class BitData : public ProfileData {
484 friend class VMStructs;
485 friend class JVMCIVMStructs;
486protected:
487 enum {
488 // null_seen:
489 // saw a null operand (cast/aastore/instanceof)
490 null_seen_flag = DataLayout::first_flag + 0
491#if INCLUDE_JVMCI
492 // bytecode threw any exception
493 , exception_seen_flag = null_seen_flag + 1
494#endif
495 };
496 enum { bit_cell_count = 0 }; // no additional data fields needed.
497public:
498 BitData(DataLayout* layout) : ProfileData(layout) {
499 }
500
501 virtual bool is_BitData() const { return true; }
502
503 static int static_cell_count() {
504 return bit_cell_count;
505 }
506
507 virtual int cell_count() const {
508 return static_cell_count();
509 }
510
511 // Accessor
512
513 // The null_seen flag bit is specially known to the interpreter.
514 // Consulting it allows the compiler to avoid setting up null_check traps.
515 bool null_seen() { return flag_at(null_seen_flag); }
516 void set_null_seen() { set_flag_at(null_seen_flag); }
517
518#if INCLUDE_JVMCI
519 // true if an exception was thrown at the specific BCI
520 bool exception_seen() { return flag_at(exception_seen_flag); }
521 void set_exception_seen() { set_flag_at(exception_seen_flag); }
522#endif
523
524 // Code generation support
525 static int null_seen_byte_constant() {
526 return flag_number_to_constant(null_seen_flag);
527 }
528
529 static ByteSize bit_data_size() {
530 return cell_offset(bit_cell_count);
531 }
532
533 void print_data_on(outputStream* st, const char* extra = NULL) const;
534};
535
536// CounterData
537//
538// A CounterData corresponds to a simple counter.
539class CounterData : public BitData {
540 friend class VMStructs;
541 friend class JVMCIVMStructs;
542protected:
543 enum {
544 count_off,
545 counter_cell_count
546 };
547public:
548 CounterData(DataLayout* layout) : BitData(layout) {}
549
550 virtual bool is_CounterData() const { return true; }
551
552 static int static_cell_count() {
553 return counter_cell_count;
554 }
555
556 virtual int cell_count() const {
557 return static_cell_count();
558 }
559
560 // Direct accessor
561 int count() const {
562 intptr_t raw_data = intptr_at(count_off);
563 if (raw_data > max_jint) {
564 raw_data = max_jint;
565 } else if (raw_data < min_jint) {
566 raw_data = min_jint;
567 }
568 return int(raw_data);
569 }
570
571 // Code generation support
572 static ByteSize count_offset() {
573 return cell_offset(count_off);
574 }
575 static ByteSize counter_data_size() {
576 return cell_offset(counter_cell_count);
577 }
578
579 void set_count(int count) {
580 set_int_at(count_off, count);
581 }
582
583 void print_data_on(outputStream* st, const char* extra = NULL) const;
584};
585
586// JumpData
587//
588// A JumpData is used to access profiling information for a direct
589// branch. It is a counter, used for counting the number of branches,
590// plus a data displacement, used for realigning the data pointer to
591// the corresponding target bci.
592class JumpData : public ProfileData {
593 friend class VMStructs;
594 friend class JVMCIVMStructs;
595protected:
596 enum {
597 taken_off_set,
598 displacement_off_set,
599 jump_cell_count
600 };
601
602 void set_displacement(int displacement) {
603 set_int_at(displacement_off_set, displacement);
604 }
605
606public:
607 JumpData(DataLayout* layout) : ProfileData(layout) {
608 assert(layout->tag() == DataLayout::jump_data_tag ||
609 layout->tag() == DataLayout::branch_data_tag, "wrong type");
610 }
611
612 virtual bool is_JumpData() const { return true; }
613
614 static int static_cell_count() {
615 return jump_cell_count;
616 }
617
618 virtual int cell_count() const {
619 return static_cell_count();
620 }
621
622 // Direct accessor
623 uint taken() const {
624 return uint_at(taken_off_set);
625 }
626
627 void set_taken(uint cnt) {
628 set_uint_at(taken_off_set, cnt);
629 }
630
631 // Saturating counter
632 uint inc_taken() {
633 uint cnt = taken() + 1;
634 // Did we wrap? Will compiler screw us??
635 if (cnt == 0) cnt--;
636 set_uint_at(taken_off_set, cnt);
637 return cnt;
638 }
639
640 int displacement() const {
641 return int_at(displacement_off_set);
642 }
643
644 // Code generation support
645 static ByteSize taken_offset() {
646 return cell_offset(taken_off_set);
647 }
648
649 static ByteSize displacement_offset() {
650 return cell_offset(displacement_off_set);
651 }
652
653 // Specific initialization.
654 void post_initialize(BytecodeStream* stream, MethodData* mdo);
655
656 void print_data_on(outputStream* st, const char* extra = NULL) const;
657};
658
659// Entries in a ProfileData object to record types: it can either be
660// none (no profile), unknown (conflicting profile data) or a klass if
661// a single one is seen. Whether a null reference was seen is also
662// recorded. No counter is associated with the type and a single type
663// is tracked (unlike VirtualCallData).
664class TypeEntries {
665
666public:
667
668 // A single cell is used to record information for a type:
669 // - the cell is initialized to 0
670 // - when a type is discovered it is stored in the cell
671 // - bit zero of the cell is used to record whether a null reference
672 // was encountered or not
673 // - bit 1 is set to record a conflict in the type information
674
675 enum {
676 null_seen = 1,
677 type_mask = ~null_seen,
678 type_unknown = 2,
679 status_bits = null_seen | type_unknown,
680 type_klass_mask = ~status_bits
681 };
682
683 // what to initialize a cell to
684 static intptr_t type_none() {
685 return 0;
686 }
687
688 // null seen = bit 0 set?
689 static bool was_null_seen(intptr_t v) {
690 return (v & null_seen) != 0;
691 }
692
693 // conflicting type information = bit 1 set?
694 static bool is_type_unknown(intptr_t v) {
695 return (v & type_unknown) != 0;
696 }
697
698 // not type information yet = all bits cleared, ignoring bit 0?
699 static bool is_type_none(intptr_t v) {
700 return (v & type_mask) == 0;
701 }
702
703 // recorded type: cell without bit 0 and 1
704 static intptr_t klass_part(intptr_t v) {
705 intptr_t r = v & type_klass_mask;
706 return r;
707 }
708
709 // type recorded
710 static Klass* valid_klass(intptr_t k) {
711 if (!is_type_none(k) &&
712 !is_type_unknown(k)) {
713 Klass* res = (Klass*)klass_part(k);
714 assert(res != NULL, "invalid");
715 return res;
716 } else {
717 return NULL;
718 }
719 }
720
721 static intptr_t with_status(intptr_t k, intptr_t in) {
722 return k | (in & status_bits);
723 }
724
725 static intptr_t with_status(Klass* k, intptr_t in) {
726 return with_status((intptr_t)k, in);
727 }
728
729 static void print_klass(outputStream* st, intptr_t k);
730
731protected:
732 // ProfileData object these entries are part of
733 ProfileData* _pd;
734 // offset within the ProfileData object where the entries start
735 const int _base_off;
736
737 TypeEntries(int base_off)
738 : _pd(NULL), _base_off(base_off) {}
739
740 void set_intptr_at(int index, intptr_t value) {
741 _pd->set_intptr_at(index, value);
742 }
743
744 intptr_t intptr_at(int index) const {
745 return _pd->intptr_at(index);
746 }
747
748public:
749 void set_profile_data(ProfileData* pd) {
750 _pd = pd;
751 }
752};
753
754// Type entries used for arguments passed at a call and parameters on
755// method entry. 2 cells per entry: one for the type encoded as in
756// TypeEntries and one initialized with the stack slot where the
757// profiled object is to be found so that the interpreter can locate
758// it quickly.
759class TypeStackSlotEntries : public TypeEntries {
760
761private:
762 enum {
763 stack_slot_entry,
764 type_entry,
765 per_arg_cell_count
766 };
767
768 // offset of cell for stack slot for entry i within ProfileData object
769 int stack_slot_offset(int i) const {
770 return _base_off + stack_slot_local_offset(i);
771 }
772
773 const int _number_of_entries;
774
775 // offset of cell for type for entry i within ProfileData object
776 int type_offset_in_cells(int i) const {
777 return _base_off + type_local_offset(i);
778 }
779
780public:
781
782 TypeStackSlotEntries(int base_off, int nb_entries)
783 : TypeEntries(base_off), _number_of_entries(nb_entries) {}
784
785 static int compute_cell_count(Symbol* signature, bool include_receiver, int max);
786
787 void post_initialize(Symbol* signature, bool has_receiver, bool include_receiver);
788
789 int number_of_entries() const { return _number_of_entries; }
790
791 // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
792 static int stack_slot_local_offset(int i) {
793 return i * per_arg_cell_count + stack_slot_entry;
794 }
795
796 // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
797 static int type_local_offset(int i) {
798 return i * per_arg_cell_count + type_entry;
799 }
800
801 // stack slot for entry i
802 uint stack_slot(int i) const {
803 assert(i >= 0 && i < _number_of_entries, "oob");
804 return _pd->uint_at(stack_slot_offset(i));
805 }
806
807 // set stack slot for entry i
808 void set_stack_slot(int i, uint num) {
809 assert(i >= 0 && i < _number_of_entries, "oob");
810 _pd->set_uint_at(stack_slot_offset(i), num);
811 }
812
813 // type for entry i
814 intptr_t type(int i) const {
815 assert(i >= 0 && i < _number_of_entries, "oob");
816 return _pd->intptr_at(type_offset_in_cells(i));
817 }
818
819 // set type for entry i
820 void set_type(int i, intptr_t k) {
821 assert(i >= 0 && i < _number_of_entries, "oob");
822 _pd->set_intptr_at(type_offset_in_cells(i), k);
823 }
824
825 static ByteSize per_arg_size() {
826 return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
827 }
828
829 static int per_arg_count() {
830 return per_arg_cell_count;
831 }
832
833 ByteSize type_offset(int i) const {
834 return DataLayout::cell_offset(type_offset_in_cells(i));
835 }
836
837 // GC support
838 void clean_weak_klass_links(bool always_clean);
839
840 void print_data_on(outputStream* st) const;
841};
842
843// Type entry used for return from a call. A single cell to record the
844// type.
845class ReturnTypeEntry : public TypeEntries {
846
847private:
848 enum {
849 cell_count = 1
850 };
851
852public:
853 ReturnTypeEntry(int base_off)
854 : TypeEntries(base_off) {}
855
856 void post_initialize() {
857 set_type(type_none());
858 }
859
860 intptr_t type() const {
861 return _pd->intptr_at(_base_off);
862 }
863
864 void set_type(intptr_t k) {
865 _pd->set_intptr_at(_base_off, k);
866 }
867
868 static int static_cell_count() {
869 return cell_count;
870 }
871
872 static ByteSize size() {
873 return in_ByteSize(cell_count * DataLayout::cell_size);
874 }
875
876 ByteSize type_offset() {
877 return DataLayout::cell_offset(_base_off);
878 }
879
880 // GC support
881 void clean_weak_klass_links(bool always_clean);
882
883 void print_data_on(outputStream* st) const;
884};
885
886// Entries to collect type information at a call: contains arguments
887// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
888// number of cells. Because the number of cells for the return type is
889// smaller than the number of cells for the type of an arguments, the
890// number of cells is used to tell how many arguments are profiled and
891// whether a return value is profiled. See has_arguments() and
892// has_return().
893class TypeEntriesAtCall {
894private:
895 static int stack_slot_local_offset(int i) {
896 return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
897 }
898
899 static int argument_type_local_offset(int i) {
900 return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);
901 }
902
903public:
904
905 static int header_cell_count() {
906 return 1;
907 }
908
909 static int cell_count_local_offset() {
910 return 0;
911 }
912
913 static int compute_cell_count(BytecodeStream* stream);
914
915 static void initialize(DataLayout* dl, int base, int cell_count) {
916 int off = base + cell_count_local_offset();
917 dl->set_cell_at(off, cell_count - base - header_cell_count());
918 }
919
920 static bool arguments_profiling_enabled();
921 static bool return_profiling_enabled();
922
923 // Code generation support
924 static ByteSize cell_count_offset() {
925 return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
926 }
927
928 static ByteSize args_data_offset() {
929 return in_ByteSize(header_cell_count() * DataLayout::cell_size);
930 }
931
932 static ByteSize stack_slot_offset(int i) {
933 return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
934 }
935
936 static ByteSize argument_type_offset(int i) {
937 return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
938 }
939
940 static ByteSize return_only_size() {
941 return ReturnTypeEntry::size() + in_ByteSize(header_cell_count() * DataLayout::cell_size);
942 }
943
944};
945
946// CallTypeData
947//
948// A CallTypeData is used to access profiling information about a non
949// virtual call for which we collect type information about arguments
950// and return value.
951class CallTypeData : public CounterData {
952private:
953 // entries for arguments if any
954 TypeStackSlotEntries _args;
955 // entry for return type if any
956 ReturnTypeEntry _ret;
957
958 int cell_count_global_offset() const {
959 return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
960 }
961
962 // number of cells not counting the header
963 int cell_count_no_header() const {
964 return uint_at(cell_count_global_offset());
965 }
966
967 void check_number_of_arguments(int total) {
968 assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
969 }
970
971public:
972 CallTypeData(DataLayout* layout) :
973 CounterData(layout),
974 _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
975 _ret(cell_count() - ReturnTypeEntry::static_cell_count())
976 {
977 assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
978 // Some compilers (VC++) don't want this passed in member initialization list
979 _args.set_profile_data(this);
980 _ret.set_profile_data(this);
981 }
982
983 const TypeStackSlotEntries* args() const {
984 assert(has_arguments(), "no profiling of arguments");
985 return &_args;
986 }
987
988 const ReturnTypeEntry* ret() const {
989 assert(has_return(), "no profiling of return value");
990 return &_ret;
991 }
992
993 virtual bool is_CallTypeData() const { return true; }
994
995 static int static_cell_count() {
996 return -1;
997 }
998
999 static int compute_cell_count(BytecodeStream* stream) {
1000 return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1001 }
1002
1003 static void initialize(DataLayout* dl, int cell_count) {
1004 TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
1005 }
1006
1007 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1008
1009 virtual int cell_count() const {
1010 return CounterData::static_cell_count() +
1011 TypeEntriesAtCall::header_cell_count() +
1012 int_at_unchecked(cell_count_global_offset());
1013 }
1014
1015 int number_of_arguments() const {
1016 return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1017 }
1018
1019 void set_argument_type(int i, Klass* k) {
1020 assert(has_arguments(), "no arguments!");
1021 intptr_t current = _args.type(i);
1022 _args.set_type(i, TypeEntries::with_status(k, current));
1023 }
1024
1025 void set_return_type(Klass* k) {
1026 assert(has_return(), "no return!");
1027 intptr_t current = _ret.type();
1028 _ret.set_type(TypeEntries::with_status(k, current));
1029 }
1030
1031 // An entry for a return value takes less space than an entry for an
1032 // argument so if the number of cells exceeds the number of cells
1033 // needed for an argument, this object contains type information for
1034 // at least one argument.
1035 bool has_arguments() const {
1036 bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1037 assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1038 return res;
1039 }
1040
1041 // An entry for a return value takes less space than an entry for an
1042 // argument, so if the remainder of the number of cells divided by
1043 // the number of cells for an argument is not null, a return value
1044 // is profiled in this object.
1045 bool has_return() const {
1046 bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1047 assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1048 return res;
1049 }
1050
1051 // Code generation support
1052 static ByteSize args_data_offset() {
1053 return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1054 }
1055
1056 ByteSize argument_type_offset(int i) {
1057 return _args.type_offset(i);
1058 }
1059
1060 ByteSize return_type_offset() {
1061 return _ret.type_offset();
1062 }
1063
1064 // GC support
1065 virtual void clean_weak_klass_links(bool always_clean) {
1066 if (has_arguments()) {
1067 _args.clean_weak_klass_links(always_clean);
1068 }
1069 if (has_return()) {
1070 _ret.clean_weak_klass_links(always_clean);
1071 }
1072 }
1073
1074 virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1075};
1076
1077// ReceiverTypeData
1078//
1079// A ReceiverTypeData is used to access profiling information about a
1080// dynamic type check. It consists of a counter which counts the total times
1081// that the check is reached, and a series of (Klass*, count) pairs
1082// which are used to store a type profile for the receiver of the check.
1083class ReceiverTypeData : public CounterData {
1084 friend class VMStructs;
1085 friend class JVMCIVMStructs;
1086protected:
1087 enum {
1088#if INCLUDE_JVMCI
1089 // Description of the different counters
1090 // ReceiverTypeData for instanceof/checkcast/aastore:
1091 // count is decremented for failed type checks
1092 // JVMCI only: nonprofiled_count is incremented on type overflow
1093 // VirtualCallData for invokevirtual/invokeinterface:
1094 // count is incremented on type overflow
1095 // JVMCI only: nonprofiled_count is incremented on method overflow
1096
1097 // JVMCI is interested in knowing the percentage of type checks involving a type not explicitly in the profile
1098 nonprofiled_count_off_set = counter_cell_count,
1099 receiver0_offset,
1100#else
1101 receiver0_offset = counter_cell_count,
1102#endif
1103 count0_offset,
1104 receiver_type_row_cell_count = (count0_offset + 1) - receiver0_offset
1105 };
1106
1107public:
1108 ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
1109 assert(layout->tag() == DataLayout::receiver_type_data_tag ||
1110 layout->tag() == DataLayout::virtual_call_data_tag ||
1111 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1112 }
1113
1114 virtual bool is_ReceiverTypeData() const { return true; }
1115
1116 static int static_cell_count() {
1117 return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count JVMCI_ONLY(+ 1);
1118 }
1119
1120 virtual int cell_count() const {
1121 return static_cell_count();
1122 }
1123
1124 // Direct accessors
1125 static uint row_limit() {
1126 return TypeProfileWidth;
1127 }
1128 static int receiver_cell_index(uint row) {
1129 return receiver0_offset + row * receiver_type_row_cell_count;
1130 }
1131 static int receiver_count_cell_index(uint row) {
1132 return count0_offset + row * receiver_type_row_cell_count;
1133 }
1134
1135 Klass* receiver(uint row) const {
1136 assert(row < row_limit(), "oob");
1137
1138 Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
1139 assert(recv == NULL || recv->is_klass(), "wrong type");
1140 return recv;
1141 }
1142
1143 void set_receiver(uint row, Klass* k) {
1144 assert((uint)row < row_limit(), "oob");
1145 set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
1146 }
1147
1148 uint receiver_count(uint row) const {
1149 assert(row < row_limit(), "oob");
1150 return uint_at(receiver_count_cell_index(row));
1151 }
1152
1153 void set_receiver_count(uint row, uint count) {
1154 assert(row < row_limit(), "oob");
1155 set_uint_at(receiver_count_cell_index(row), count);
1156 }
1157
1158 void clear_row(uint row) {
1159 assert(row < row_limit(), "oob");
1160 // Clear total count - indicator of polymorphic call site.
1161 // The site may look like as monomorphic after that but
1162 // it allow to have more accurate profiling information because
1163 // there was execution phase change since klasses were unloaded.
1164 // If the site is still polymorphic then MDO will be updated
1165 // to reflect it. But it could be the case that the site becomes
1166 // only bimorphic. Then keeping total count not 0 will be wrong.
1167 // Even if we use monomorphic (when it is not) for compilation
1168 // we will only have trap, deoptimization and recompile again
1169 // with updated MDO after executing method in Interpreter.
1170 // An additional receiver will be recorded in the cleaned row
1171 // during next call execution.
1172 //
1173 // Note: our profiling logic works with empty rows in any slot.
1174 // We do sorting a profiling info (ciCallProfile) for compilation.
1175 //
1176 set_count(0);
1177 set_receiver(row, NULL);
1178 set_receiver_count(row, 0);
1179#if INCLUDE_JVMCI
1180 if (!this->is_VirtualCallData()) {
1181 // if this is a ReceiverTypeData for JVMCI, the nonprofiled_count
1182 // must also be reset (see "Description of the different counters" above)
1183 set_nonprofiled_count(0);
1184 }
1185#endif
1186 }
1187
1188 // Code generation support
1189 static ByteSize receiver_offset(uint row) {
1190 return cell_offset(receiver_cell_index(row));
1191 }
1192 static ByteSize receiver_count_offset(uint row) {
1193 return cell_offset(receiver_count_cell_index(row));
1194 }
1195#if INCLUDE_JVMCI
1196 static ByteSize nonprofiled_receiver_count_offset() {
1197 return cell_offset(nonprofiled_count_off_set);
1198 }
1199 uint nonprofiled_count() const {
1200 return uint_at(nonprofiled_count_off_set);
1201 }
1202 void set_nonprofiled_count(uint count) {
1203 set_uint_at(nonprofiled_count_off_set, count);
1204 }
1205#endif // INCLUDE_JVMCI
1206 static ByteSize receiver_type_data_size() {
1207 return cell_offset(static_cell_count());
1208 }
1209
1210 // GC support
1211 virtual void clean_weak_klass_links(bool always_clean);
1212
1213 void print_receiver_data_on(outputStream* st) const;
1214 void print_data_on(outputStream* st, const char* extra = NULL) const;
1215};
1216
1217// VirtualCallData
1218//
1219// A VirtualCallData is used to access profiling information about a
1220// virtual call. For now, it has nothing more than a ReceiverTypeData.
1221class VirtualCallData : public ReceiverTypeData {
1222public:
1223 VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
1224 assert(layout->tag() == DataLayout::virtual_call_data_tag ||
1225 layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1226 }
1227
1228 virtual bool is_VirtualCallData() const { return true; }
1229
1230 static int static_cell_count() {
1231 // At this point we could add more profile state, e.g., for arguments.
1232 // But for now it's the same size as the base record type.
1233 return ReceiverTypeData::static_cell_count() JVMCI_ONLY(+ (uint) MethodProfileWidth * receiver_type_row_cell_count);
1234 }
1235
1236 virtual int cell_count() const {
1237 return static_cell_count();
1238 }
1239
1240 // Direct accessors
1241 static ByteSize virtual_call_data_size() {
1242 return cell_offset(static_cell_count());
1243 }
1244
1245#if INCLUDE_JVMCI
1246 static ByteSize method_offset(uint row) {
1247 return cell_offset(method_cell_index(row));
1248 }
1249 static ByteSize method_count_offset(uint row) {
1250 return cell_offset(method_count_cell_index(row));
1251 }
1252 static int method_cell_index(uint row) {
1253 return receiver0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
1254 }
1255 static int method_count_cell_index(uint row) {
1256 return count0_offset + (row + TypeProfileWidth) * receiver_type_row_cell_count;
1257 }
1258 static uint method_row_limit() {
1259 return MethodProfileWidth;
1260 }
1261
1262 Method* method(uint row) const {
1263 assert(row < method_row_limit(), "oob");
1264
1265 Method* method = (Method*)intptr_at(method_cell_index(row));
1266 assert(method == NULL || method->is_method(), "must be");
1267 return method;
1268 }
1269
1270 uint method_count(uint row) const {
1271 assert(row < method_row_limit(), "oob");
1272 return uint_at(method_count_cell_index(row));
1273 }
1274
1275 void set_method(uint row, Method* m) {
1276 assert((uint)row < method_row_limit(), "oob");
1277 set_intptr_at(method_cell_index(row), (uintptr_t)m);
1278 }
1279
1280 void set_method_count(uint row, uint count) {
1281 assert(row < method_row_limit(), "oob");
1282 set_uint_at(method_count_cell_index(row), count);
1283 }
1284
1285 void clear_method_row(uint row) {
1286 assert(row < method_row_limit(), "oob");
1287 // Clear total count - indicator of polymorphic call site (see comment for clear_row() in ReceiverTypeData).
1288 set_nonprofiled_count(0);
1289 set_method(row, NULL);
1290 set_method_count(row, 0);
1291 }
1292
1293 // GC support
1294 virtual void clean_weak_klass_links(bool always_clean);
1295
1296 // Redefinition support
1297 virtual void clean_weak_method_links();
1298#endif // INCLUDE_JVMCI
1299
1300 void print_method_data_on(outputStream* st) const NOT_JVMCI_RETURN;
1301 void print_data_on(outputStream* st, const char* extra = NULL) const;
1302};
1303
1304// VirtualCallTypeData
1305//
1306// A VirtualCallTypeData is used to access profiling information about
1307// a virtual call for which we collect type information about
1308// arguments and return value.
1309class VirtualCallTypeData : public VirtualCallData {
1310private:
1311 // entries for arguments if any
1312 TypeStackSlotEntries _args;
1313 // entry for return type if any
1314 ReturnTypeEntry _ret;
1315
1316 int cell_count_global_offset() const {
1317 return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
1318 }
1319
1320 // number of cells not counting the header
1321 int cell_count_no_header() const {
1322 return uint_at(cell_count_global_offset());
1323 }
1324
1325 void check_number_of_arguments(int total) {
1326 assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
1327 }
1328
1329public:
1330 VirtualCallTypeData(DataLayout* layout) :
1331 VirtualCallData(layout),
1332 _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
1333 _ret(cell_count() - ReturnTypeEntry::static_cell_count())
1334 {
1335 assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
1336 // Some compilers (VC++) don't want this passed in member initialization list
1337 _args.set_profile_data(this);
1338 _ret.set_profile_data(this);
1339 }
1340
1341 const TypeStackSlotEntries* args() const {
1342 assert(has_arguments(), "no profiling of arguments");
1343 return &_args;
1344 }
1345
1346 const ReturnTypeEntry* ret() const {
1347 assert(has_return(), "no profiling of return value");
1348 return &_ret;
1349 }
1350
1351 virtual bool is_VirtualCallTypeData() const { return true; }
1352
1353 static int static_cell_count() {
1354 return -1;
1355 }
1356
1357 static int compute_cell_count(BytecodeStream* stream) {
1358 return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
1359 }
1360
1361 static void initialize(DataLayout* dl, int cell_count) {
1362 TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
1363 }
1364
1365 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1366
1367 virtual int cell_count() const {
1368 return VirtualCallData::static_cell_count() +
1369 TypeEntriesAtCall::header_cell_count() +
1370 int_at_unchecked(cell_count_global_offset());
1371 }
1372
1373 int number_of_arguments() const {
1374 return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
1375 }
1376
1377 void set_argument_type(int i, Klass* k) {
1378 assert(has_arguments(), "no arguments!");
1379 intptr_t current = _args.type(i);
1380 _args.set_type(i, TypeEntries::with_status(k, current));
1381 }
1382
1383 void set_return_type(Klass* k) {
1384 assert(has_return(), "no return!");
1385 intptr_t current = _ret.type();
1386 _ret.set_type(TypeEntries::with_status(k, current));
1387 }
1388
1389 // An entry for a return value takes less space than an entry for an
1390 // argument, so if the remainder of the number of cells divided by
1391 // the number of cells for an argument is not null, a return value
1392 // is profiled in this object.
1393 bool has_return() const {
1394 bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
1395 assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
1396 return res;
1397 }
1398
1399 // An entry for a return value takes less space than an entry for an
1400 // argument so if the number of cells exceeds the number of cells
1401 // needed for an argument, this object contains type information for
1402 // at least one argument.
1403 bool has_arguments() const {
1404 bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
1405 assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
1406 return res;
1407 }
1408
1409 // Code generation support
1410 static ByteSize args_data_offset() {
1411 return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
1412 }
1413
1414 ByteSize argument_type_offset(int i) {
1415 return _args.type_offset(i);
1416 }
1417
1418 ByteSize return_type_offset() {
1419 return _ret.type_offset();
1420 }
1421
1422 // GC support
1423 virtual void clean_weak_klass_links(bool always_clean) {
1424 ReceiverTypeData::clean_weak_klass_links(always_clean);
1425 if (has_arguments()) {
1426 _args.clean_weak_klass_links(always_clean);
1427 }
1428 if (has_return()) {
1429 _ret.clean_weak_klass_links(always_clean);
1430 }
1431 }
1432
1433 virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1434};
1435
1436// RetData
1437//
1438// A RetData is used to access profiling information for a ret bytecode.
1439// It is composed of a count of the number of times that the ret has
1440// been executed, followed by a series of triples of the form
1441// (bci, count, di) which count the number of times that some bci was the
1442// target of the ret and cache a corresponding data displacement.
1443class RetData : public CounterData {
1444protected:
1445 enum {
1446 bci0_offset = counter_cell_count,
1447 count0_offset,
1448 displacement0_offset,
1449 ret_row_cell_count = (displacement0_offset + 1) - bci0_offset
1450 };
1451
1452 void set_bci(uint row, int bci) {
1453 assert((uint)row < row_limit(), "oob");
1454 set_int_at(bci0_offset + row * ret_row_cell_count, bci);
1455 }
1456 void release_set_bci(uint row, int bci);
1457 void set_bci_count(uint row, uint count) {
1458 assert((uint)row < row_limit(), "oob");
1459 set_uint_at(count0_offset + row * ret_row_cell_count, count);
1460 }
1461 void set_bci_displacement(uint row, int disp) {
1462 set_int_at(displacement0_offset + row * ret_row_cell_count, disp);
1463 }
1464
1465public:
1466 RetData(DataLayout* layout) : CounterData(layout) {
1467 assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
1468 }
1469
1470 virtual bool is_RetData() const { return true; }
1471
1472 enum {
1473 no_bci = -1 // value of bci when bci1/2 are not in use.
1474 };
1475
1476 static int static_cell_count() {
1477 return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
1478 }
1479
1480 virtual int cell_count() const {
1481 return static_cell_count();
1482 }
1483
1484 static uint row_limit() {
1485 return BciProfileWidth;
1486 }
1487 static int bci_cell_index(uint row) {
1488 return bci0_offset + row * ret_row_cell_count;
1489 }
1490 static int bci_count_cell_index(uint row) {
1491 return count0_offset + row * ret_row_cell_count;
1492 }
1493 static int bci_displacement_cell_index(uint row) {
1494 return displacement0_offset + row * ret_row_cell_count;
1495 }
1496
1497 // Direct accessors
1498 int bci(uint row) const {
1499 return int_at(bci_cell_index(row));
1500 }
1501 uint bci_count(uint row) const {
1502 return uint_at(bci_count_cell_index(row));
1503 }
1504 int bci_displacement(uint row) const {
1505 return int_at(bci_displacement_cell_index(row));
1506 }
1507
1508 // Interpreter Runtime support
1509 address fixup_ret(int return_bci, MethodData* mdo);
1510
1511 // Code generation support
1512 static ByteSize bci_offset(uint row) {
1513 return cell_offset(bci_cell_index(row));
1514 }
1515 static ByteSize bci_count_offset(uint row) {
1516 return cell_offset(bci_count_cell_index(row));
1517 }
1518 static ByteSize bci_displacement_offset(uint row) {
1519 return cell_offset(bci_displacement_cell_index(row));
1520 }
1521
1522 // Specific initialization.
1523 void post_initialize(BytecodeStream* stream, MethodData* mdo);
1524
1525 void print_data_on(outputStream* st, const char* extra = NULL) const;
1526};
1527
1528// BranchData
1529//
1530// A BranchData is used to access profiling data for a two-way branch.
1531// It consists of taken and not_taken counts as well as a data displacement
1532// for the taken case.
1533class BranchData : public JumpData {
1534 friend class VMStructs;
1535 friend class JVMCIVMStructs;
1536protected:
1537 enum {
1538 not_taken_off_set = jump_cell_count,
1539 branch_cell_count
1540 };
1541
1542 void set_displacement(int displacement) {
1543 set_int_at(displacement_off_set, displacement);
1544 }
1545
1546public:
1547 BranchData(DataLayout* layout) : JumpData(layout) {
1548 assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
1549 }
1550
1551 virtual bool is_BranchData() const { return true; }
1552
1553 static int static_cell_count() {
1554 return branch_cell_count;
1555 }
1556
1557 virtual int cell_count() const {
1558 return static_cell_count();
1559 }
1560
1561 // Direct accessor
1562 uint not_taken() const {
1563 return uint_at(not_taken_off_set);
1564 }
1565
1566 void set_not_taken(uint cnt) {
1567 set_uint_at(not_taken_off_set, cnt);
1568 }
1569
1570 uint inc_not_taken() {
1571 uint cnt = not_taken() + 1;
1572 // Did we wrap? Will compiler screw us??
1573 if (cnt == 0) cnt--;
1574 set_uint_at(not_taken_off_set, cnt);
1575 return cnt;
1576 }
1577
1578 // Code generation support
1579 static ByteSize not_taken_offset() {
1580 return cell_offset(not_taken_off_set);
1581 }
1582 static ByteSize branch_data_size() {
1583 return cell_offset(branch_cell_count);
1584 }
1585
1586 // Specific initialization.
1587 void post_initialize(BytecodeStream* stream, MethodData* mdo);
1588
1589 void print_data_on(outputStream* st, const char* extra = NULL) const;
1590};
1591
1592// ArrayData
1593//
1594// A ArrayData is a base class for accessing profiling data which does
1595// not have a statically known size. It consists of an array length
1596// and an array start.
1597class ArrayData : public ProfileData {
1598 friend class VMStructs;
1599 friend class JVMCIVMStructs;
1600protected:
1601 friend class DataLayout;
1602
1603 enum {
1604 array_len_off_set,
1605 array_start_off_set
1606 };
1607
1608 uint array_uint_at(int index) const {
1609 int aindex = index + array_start_off_set;
1610 return uint_at(aindex);
1611 }
1612 int array_int_at(int index) const {
1613 int aindex = index + array_start_off_set;
1614 return int_at(aindex);
1615 }
1616 oop array_oop_at(int index) const {
1617 int aindex = index + array_start_off_set;
1618 return oop_at(aindex);
1619 }
1620 void array_set_int_at(int index, int value) {
1621 int aindex = index + array_start_off_set;
1622 set_int_at(aindex, value);
1623 }
1624
1625 // Code generation support for subclasses.
1626 static ByteSize array_element_offset(int index) {
1627 return cell_offset(array_start_off_set + index);
1628 }
1629
1630public:
1631 ArrayData(DataLayout* layout) : ProfileData(layout) {}
1632
1633 virtual bool is_ArrayData() const { return true; }
1634
1635 static int static_cell_count() {
1636 return -1;
1637 }
1638
1639 int array_len() const {
1640 return int_at_unchecked(array_len_off_set);
1641 }
1642
1643 virtual int cell_count() const {
1644 return array_len() + 1;
1645 }
1646
1647 // Code generation support
1648 static ByteSize array_len_offset() {
1649 return cell_offset(array_len_off_set);
1650 }
1651 static ByteSize array_start_offset() {
1652 return cell_offset(array_start_off_set);
1653 }
1654};
1655
1656// MultiBranchData
1657//
1658// A MultiBranchData is used to access profiling information for
1659// a multi-way branch (*switch bytecodes). It consists of a series
1660// of (count, displacement) pairs, which count the number of times each
1661// case was taken and specify the data displacment for each branch target.
1662class MultiBranchData : public ArrayData {
1663 friend class VMStructs;
1664 friend class JVMCIVMStructs;
1665protected:
1666 enum {
1667 default_count_off_set,
1668 default_disaplacement_off_set,
1669 case_array_start
1670 };
1671 enum {
1672 relative_count_off_set,
1673 relative_displacement_off_set,
1674 per_case_cell_count
1675 };
1676
1677 void set_default_displacement(int displacement) {
1678 array_set_int_at(default_disaplacement_off_set, displacement);
1679 }
1680 void set_displacement_at(int index, int displacement) {
1681 array_set_int_at(case_array_start +
1682 index * per_case_cell_count +
1683 relative_displacement_off_set,
1684 displacement);
1685 }
1686
1687public:
1688 MultiBranchData(DataLayout* layout) : ArrayData(layout) {
1689 assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
1690 }
1691
1692 virtual bool is_MultiBranchData() const { return true; }
1693
1694 static int compute_cell_count(BytecodeStream* stream);
1695
1696 int number_of_cases() const {
1697 int alen = array_len() - 2; // get rid of default case here.
1698 assert(alen % per_case_cell_count == 0, "must be even");
1699 return (alen / per_case_cell_count);
1700 }
1701
1702 uint default_count() const {
1703 return array_uint_at(default_count_off_set);
1704 }
1705 int default_displacement() const {
1706 return array_int_at(default_disaplacement_off_set);
1707 }
1708
1709 uint count_at(int index) const {
1710 return array_uint_at(case_array_start +
1711 index * per_case_cell_count +
1712 relative_count_off_set);
1713 }
1714 int displacement_at(int index) const {
1715 return array_int_at(case_array_start +
1716 index * per_case_cell_count +
1717 relative_displacement_off_set);
1718 }
1719
1720 // Code generation support
1721 static ByteSize default_count_offset() {
1722 return array_element_offset(default_count_off_set);
1723 }
1724 static ByteSize default_displacement_offset() {
1725 return array_element_offset(default_disaplacement_off_set);
1726 }
1727 static ByteSize case_count_offset(int index) {
1728 return case_array_offset() +
1729 (per_case_size() * index) +
1730 relative_count_offset();
1731 }
1732 static ByteSize case_array_offset() {
1733 return array_element_offset(case_array_start);
1734 }
1735 static ByteSize per_case_size() {
1736 return in_ByteSize(per_case_cell_count) * cell_size;
1737 }
1738 static ByteSize relative_count_offset() {
1739 return in_ByteSize(relative_count_off_set) * cell_size;
1740 }
1741 static ByteSize relative_displacement_offset() {
1742 return in_ByteSize(relative_displacement_off_set) * cell_size;
1743 }
1744
1745 // Specific initialization.
1746 void post_initialize(BytecodeStream* stream, MethodData* mdo);
1747
1748 void print_data_on(outputStream* st, const char* extra = NULL) const;
1749};
1750
1751class ArgInfoData : public ArrayData {
1752
1753public:
1754 ArgInfoData(DataLayout* layout) : ArrayData(layout) {
1755 assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
1756 }
1757
1758 virtual bool is_ArgInfoData() const { return true; }
1759
1760
1761 int number_of_args() const {
1762 return array_len();
1763 }
1764
1765 uint arg_modified(int arg) const {
1766 return array_uint_at(arg);
1767 }
1768
1769 void set_arg_modified(int arg, uint val) {
1770 array_set_int_at(arg, val);
1771 }
1772
1773 void print_data_on(outputStream* st, const char* extra = NULL) const;
1774};
1775
1776// ParametersTypeData
1777//
1778// A ParametersTypeData is used to access profiling information about
1779// types of parameters to a method
1780class ParametersTypeData : public ArrayData {
1781
1782private:
1783 TypeStackSlotEntries _parameters;
1784
1785 static int stack_slot_local_offset(int i) {
1786 assert_profiling_enabled();
1787 return array_start_off_set + TypeStackSlotEntries::stack_slot_local_offset(i);
1788 }
1789
1790 static int type_local_offset(int i) {
1791 assert_profiling_enabled();
1792 return array_start_off_set + TypeStackSlotEntries::type_local_offset(i);
1793 }
1794
1795 static bool profiling_enabled();
1796 static void assert_profiling_enabled() {
1797 assert(profiling_enabled(), "method parameters profiling should be on");
1798 }
1799
1800public:
1801 ParametersTypeData(DataLayout* layout) : ArrayData(layout), _parameters(1, number_of_parameters()) {
1802 assert(layout->tag() == DataLayout::parameters_type_data_tag, "wrong type");
1803 // Some compilers (VC++) don't want this passed in member initialization list
1804 _parameters.set_profile_data(this);
1805 }
1806
1807 static int compute_cell_count(Method* m);
1808
1809 virtual bool is_ParametersTypeData() const { return true; }
1810
1811 virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
1812
1813 int number_of_parameters() const {
1814 return array_len() / TypeStackSlotEntries::per_arg_count();
1815 }
1816
1817 const TypeStackSlotEntries* parameters() const { return &_parameters; }
1818
1819 uint stack_slot(int i) const {
1820 return _parameters.stack_slot(i);
1821 }
1822
1823 void set_type(int i, Klass* k) {
1824 intptr_t current = _parameters.type(i);
1825 _parameters.set_type(i, TypeEntries::with_status((intptr_t)k, current));
1826 }
1827
1828 virtual void clean_weak_klass_links(bool always_clean) {
1829 _parameters.clean_weak_klass_links(always_clean);
1830 }
1831
1832 virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1833
1834 static ByteSize stack_slot_offset(int i) {
1835 return cell_offset(stack_slot_local_offset(i));
1836 }
1837
1838 static ByteSize type_offset(int i) {
1839 return cell_offset(type_local_offset(i));
1840 }
1841};
1842
1843// SpeculativeTrapData
1844//
1845// A SpeculativeTrapData is used to record traps due to type
1846// speculation. It records the root of the compilation: that type
1847// speculation is wrong in the context of one compilation (for
1848// method1) doesn't mean it's wrong in the context of another one (for
1849// method2). Type speculation could have more/different data in the
1850// context of the compilation of method2 and it's worthwhile to try an
1851// optimization that failed for compilation of method1 in the context
1852// of compilation of method2.
1853// Space for SpeculativeTrapData entries is allocated from the extra
1854// data space in the MDO. If we run out of space, the trap data for
1855// the ProfileData at that bci is updated.
1856class SpeculativeTrapData : public ProfileData {
1857protected:
1858 enum {
1859 speculative_trap_method,
1860#ifndef _LP64
1861 // The size of the area for traps is a multiple of the header
1862 // size, 2 cells on 32 bits. Packed at the end of this area are
1863 // argument info entries (with tag
1864 // DataLayout::arg_info_data_tag). The logic in
1865 // MethodData::bci_to_extra_data() that guarantees traps don't
1866 // overflow over argument info entries assumes the size of a
1867 // SpeculativeTrapData is twice the header size. On 32 bits, a
1868 // SpeculativeTrapData must be 4 cells.
1869 padding,
1870#endif
1871 speculative_trap_cell_count
1872 };
1873public:
1874 SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
1875 assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
1876 }
1877
1878 virtual bool is_SpeculativeTrapData() const { return true; }
1879
1880 static int static_cell_count() {
1881 return speculative_trap_cell_count;
1882 }
1883
1884 virtual int cell_count() const {
1885 return static_cell_count();
1886 }
1887
1888 // Direct accessor
1889 Method* method() const {
1890 return (Method*)intptr_at(speculative_trap_method);
1891 }
1892
1893 void set_method(Method* m) {
1894 assert(!m->is_old(), "cannot add old methods");
1895 set_intptr_at(speculative_trap_method, (intptr_t)m);
1896 }
1897
1898 static ByteSize method_offset() {
1899 return cell_offset(speculative_trap_method);
1900 }
1901
1902 virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
1903};
1904
1905// MethodData*
1906//
1907// A MethodData* holds information which has been collected about
1908// a method. Its layout looks like this:
1909//
1910// -----------------------------
1911// | header |
1912// | klass |
1913// -----------------------------
1914// | method |
1915// | size of the MethodData* |
1916// -----------------------------
1917// | Data entries... |
1918// | (variable size) |
1919// | |
1920// . .
1921// . .
1922// . .
1923// | |
1924// -----------------------------
1925//
1926// The data entry area is a heterogeneous array of DataLayouts. Each
1927// DataLayout in the array corresponds to a specific bytecode in the
1928// method. The entries in the array are sorted by the corresponding
1929// bytecode. Access to the data is via resource-allocated ProfileData,
1930// which point to the underlying blocks of DataLayout structures.
1931//
1932// During interpretation, if profiling in enabled, the interpreter
1933// maintains a method data pointer (mdp), which points at the entry
1934// in the array corresponding to the current bci. In the course of
1935// intepretation, when a bytecode is encountered that has profile data
1936// associated with it, the entry pointed to by mdp is updated, then the
1937// mdp is adjusted to point to the next appropriate DataLayout. If mdp
1938// is NULL to begin with, the interpreter assumes that the current method
1939// is not (yet) being profiled.
1940//
1941// In MethodData* parlance, "dp" is a "data pointer", the actual address
1942// of a DataLayout element. A "di" is a "data index", the offset in bytes
1943// from the base of the data entry array. A "displacement" is the byte offset
1944// in certain ProfileData objects that indicate the amount the mdp must be
1945// adjusted in the event of a change in control flow.
1946//
1947
1948class CleanExtraDataClosure : public StackObj {
1949public:
1950 virtual bool is_live(Method* m) = 0;
1951};
1952
1953
1954#if INCLUDE_JVMCI
1955// Encapsulates an encoded speculation reason. These are linked together in
1956// a list that is atomically appended to during deoptimization. Entries are
1957// never removed from the list.
1958// @see jdk.vm.ci.hotspot.HotSpotSpeculationLog.HotSpotSpeculationEncoding
1959class FailedSpeculation: public CHeapObj<mtCompiler> {
1960 private:
1961 // The length of HotSpotSpeculationEncoding.toByteArray(). The data itself
1962 // is an array embedded at the end of this object.
1963 int _data_len;
1964
1965 // Next entry in a linked list.
1966 FailedSpeculation* _next;
1967
1968 FailedSpeculation(address data, int data_len);
1969
1970 FailedSpeculation** next_adr() { return &_next; }
1971
1972 // Placement new operator for inlining the speculation data into
1973 // the FailedSpeculation object.
1974 void* operator new(size_t size, size_t fs_size) throw();
1975
1976 public:
1977 char* data() { return (char*)(((address) this) + sizeof(FailedSpeculation)); }
1978 int data_len() const { return _data_len; }
1979 FailedSpeculation* next() const { return _next; }
1980
1981 // Atomically appends a speculation from nm to the list whose head is at (*failed_speculations_address).
1982 // Returns false if the FailedSpeculation object could not be allocated.
1983 static bool add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len);
1984
1985 // Frees all entries in the linked list whose head is at (*failed_speculations_address).
1986 static void free_failed_speculations(FailedSpeculation** failed_speculations_address);
1987};
1988#endif
1989
1990class MethodData : public Metadata {
1991 friend class VMStructs;
1992 friend class JVMCIVMStructs;
1993private:
1994 friend class ProfileData;
1995 friend class TypeEntriesAtCall;
1996
1997 // If you add a new field that points to any metaspace object, you
1998 // must add this field to MethodData::metaspace_pointers_do().
1999
2000 // Back pointer to the Method*
2001 Method* _method;
2002
2003 // Size of this oop in bytes
2004 int _size;
2005
2006 // Cached hint for bci_to_dp and bci_to_data
2007 int _hint_di;
2008
2009 Mutex _extra_data_lock;
2010
2011 MethodData(const methodHandle& method, int size, TRAPS);
2012public:
2013 static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
2014 MethodData() : _extra_data_lock(Monitor::leaf, "MDO extra data lock") {}; // For ciMethodData
2015
2016 bool is_methodData() const volatile { return true; }
2017 void initialize();
2018
2019 // Whole-method sticky bits and flags
2020 enum {
2021 _trap_hist_limit = 25 JVMCI_ONLY(+5), // decoupled from Deoptimization::Reason_LIMIT
2022 _trap_hist_mask = max_jubyte,
2023 _extra_data_count = 4 // extra DataLayout headers, for trap history
2024 }; // Public flag values
2025private:
2026 uint _nof_decompiles; // count of all nmethod removals
2027 uint _nof_overflow_recompiles; // recompile count, excluding recomp. bits
2028 uint _nof_overflow_traps; // trap count, excluding _trap_hist
2029 union {
2030 intptr_t _align;
2031 u1 _array[JVMCI_ONLY(2 *) _trap_hist_limit];
2032 } _trap_hist;
2033
2034 // Support for interprocedural escape analysis, from Thomas Kotzmann.
2035 intx _eflags; // flags on escape information
2036 intx _arg_local; // bit set of non-escaping arguments
2037 intx _arg_stack; // bit set of stack-allocatable arguments
2038 intx _arg_returned; // bit set of returned arguments
2039
2040 int _creation_mileage; // method mileage at MDO creation
2041
2042 // How many invocations has this MDO seen?
2043 // These counters are used to determine the exact age of MDO.
2044 // We need those because in tiered a method can be concurrently
2045 // executed at different levels.
2046 InvocationCounter _invocation_counter;
2047 // Same for backedges.
2048 InvocationCounter _backedge_counter;
2049 // Counter values at the time profiling started.
2050 int _invocation_counter_start;
2051 int _backedge_counter_start;
2052 uint _tenure_traps;
2053 int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
2054 int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
2055
2056#if INCLUDE_RTM_OPT
2057 // State of RTM code generation during compilation of the method
2058 int _rtm_state;
2059#endif
2060
2061 // Number of loops and blocks is computed when compiling the first
2062 // time with C1. It is used to determine if method is trivial.
2063 short _num_loops;
2064 short _num_blocks;
2065 // Does this method contain anything worth profiling?
2066 enum WouldProfile {unknown, no_profile, profile};
2067 WouldProfile _would_profile;
2068
2069#if INCLUDE_JVMCI
2070 // Support for HotSpotMethodData.setCompiledIRSize(int)
2071 int _jvmci_ir_size;
2072 FailedSpeculation* _failed_speculations;
2073#endif
2074
2075 // Size of _data array in bytes. (Excludes header and extra_data fields.)
2076 int _data_size;
2077
2078 // data index for the area dedicated to parameters. -1 if no
2079 // parameter profiling.
2080 enum { no_parameters = -2, parameters_uninitialized = -1 };
2081 int _parameters_type_data_di;
2082 int parameters_size_in_bytes() const {
2083 ParametersTypeData* param = parameters_type_data();
2084 return param == NULL ? 0 : param->size_in_bytes();
2085 }
2086
2087 // Beginning of the data entries
2088 intptr_t _data[1];
2089
2090 // Helper for size computation
2091 static int compute_data_size(BytecodeStream* stream);
2092 static int bytecode_cell_count(Bytecodes::Code code);
2093 static bool is_speculative_trap_bytecode(Bytecodes::Code code);
2094 enum { no_profile_data = -1, variable_cell_count = -2 };
2095
2096 // Helper for initialization
2097 DataLayout* data_layout_at(int data_index) const {
2098 assert(data_index % sizeof(intptr_t) == 0, "unaligned");
2099 return (DataLayout*) (((address)_data) + data_index);
2100 }
2101
2102 // Initialize an individual data segment. Returns the size of
2103 // the segment in bytes.
2104 int initialize_data(BytecodeStream* stream, int data_index);
2105
2106 // Helper for data_at
2107 DataLayout* limit_data_position() const {
2108 return data_layout_at(_data_size);
2109 }
2110 bool out_of_bounds(int data_index) const {
2111 return data_index >= data_size();
2112 }
2113
2114 // Give each of the data entries a chance to perform specific
2115 // data initialization.
2116 void post_initialize(BytecodeStream* stream);
2117
2118 // hint accessors
2119 int hint_di() const { return _hint_di; }
2120 void set_hint_di(int di) {
2121 assert(!out_of_bounds(di), "hint_di out of bounds");
2122 _hint_di = di;
2123 }
2124 ProfileData* data_before(int bci) {
2125 // avoid SEGV on this edge case
2126 if (data_size() == 0)
2127 return NULL;
2128 int hint = hint_di();
2129 if (data_layout_at(hint)->bci() <= bci)
2130 return data_at(hint);
2131 return first_data();
2132 }
2133
2134 // What is the index of the first data entry?
2135 int first_di() const { return 0; }
2136
2137 ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp, bool concurrent);
2138 // Find or create an extra ProfileData:
2139 ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
2140
2141 // return the argument info cell
2142 ArgInfoData *arg_info();
2143
2144 enum {
2145 no_type_profile = 0,
2146 type_profile_jsr292 = 1,
2147 type_profile_all = 2
2148 };
2149
2150 static bool profile_jsr292(const methodHandle& m, int bci);
2151 static bool profile_unsafe(const methodHandle& m, int bci);
2152 static int profile_arguments_flag();
2153 static bool profile_all_arguments();
2154 static bool profile_arguments_for_invoke(const methodHandle& m, int bci);
2155 static int profile_return_flag();
2156 static bool profile_all_return();
2157 static bool profile_return_for_invoke(const methodHandle& m, int bci);
2158 static int profile_parameters_flag();
2159 static bool profile_parameters_jsr292_only();
2160 static bool profile_all_parameters();
2161
2162 void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
2163 void verify_extra_data_clean(CleanExtraDataClosure* cl);
2164
2165public:
2166 void clean_extra_data(CleanExtraDataClosure* cl);
2167
2168 static int header_size() {
2169 return sizeof(MethodData)/wordSize;
2170 }
2171
2172 // Compute the size of a MethodData* before it is created.
2173 static int compute_allocation_size_in_bytes(const methodHandle& method);
2174 static int compute_allocation_size_in_words(const methodHandle& method);
2175 static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
2176
2177 // Determine if a given bytecode can have profile information.
2178 static bool bytecode_has_profile(Bytecodes::Code code) {
2179 return bytecode_cell_count(code) != no_profile_data;
2180 }
2181
2182 // reset into original state
2183 void init();
2184
2185 // My size
2186 int size_in_bytes() const { return _size; }
2187 int size() const { return align_metadata_size(align_up(_size, BytesPerWord)/BytesPerWord); }
2188#if INCLUDE_SERVICES
2189 void collect_statistics(KlassSizeStats *sz) const;
2190#endif
2191
2192 int creation_mileage() const { return _creation_mileage; }
2193 void set_creation_mileage(int x) { _creation_mileage = x; }
2194
2195 int invocation_count() {
2196 if (invocation_counter()->carry()) {
2197 return InvocationCounter::count_limit;
2198 }
2199 return invocation_counter()->count();
2200 }
2201 int backedge_count() {
2202 if (backedge_counter()->carry()) {
2203 return InvocationCounter::count_limit;
2204 }
2205 return backedge_counter()->count();
2206 }
2207
2208 int invocation_count_start() {
2209 if (invocation_counter()->carry()) {
2210 return 0;
2211 }
2212 return _invocation_counter_start;
2213 }
2214
2215 int backedge_count_start() {
2216 if (backedge_counter()->carry()) {
2217 return 0;
2218 }
2219 return _backedge_counter_start;
2220 }
2221
2222 int invocation_count_delta() { return invocation_count() - invocation_count_start(); }
2223 int backedge_count_delta() { return backedge_count() - backedge_count_start(); }
2224
2225 void reset_start_counters() {
2226 _invocation_counter_start = invocation_count();
2227 _backedge_counter_start = backedge_count();
2228 }
2229
2230 InvocationCounter* invocation_counter() { return &_invocation_counter; }
2231 InvocationCounter* backedge_counter() { return &_backedge_counter; }
2232
2233#if INCLUDE_JVMCI
2234 FailedSpeculation** get_failed_speculations_address() {
2235 return &_failed_speculations;
2236 }
2237#endif
2238
2239#if INCLUDE_RTM_OPT
2240 int rtm_state() const {
2241 return _rtm_state;
2242 }
2243 void set_rtm_state(RTMState rstate) {
2244 _rtm_state = (int)rstate;
2245 }
2246 void atomic_set_rtm_state(RTMState rstate) {
2247 Atomic::store((int)rstate, &_rtm_state);
2248 }
2249
2250 static int rtm_state_offset_in_bytes() {
2251 return offset_of(MethodData, _rtm_state);
2252 }
2253#endif
2254
2255 void set_would_profile(bool p) { _would_profile = p ? profile : no_profile; }
2256 bool would_profile() const { return _would_profile != no_profile; }
2257
2258 int num_loops() const { return _num_loops; }
2259 void set_num_loops(int n) { _num_loops = n; }
2260 int num_blocks() const { return _num_blocks; }
2261 void set_num_blocks(int n) { _num_blocks = n; }
2262
2263 bool is_mature() const; // consult mileage and ProfileMaturityPercentage
2264 static int mileage_of(Method* m);
2265
2266 // Support for interprocedural escape analysis, from Thomas Kotzmann.
2267 enum EscapeFlag {
2268 estimated = 1 << 0,
2269 return_local = 1 << 1,
2270 return_allocated = 1 << 2,
2271 allocated_escapes = 1 << 3,
2272 unknown_modified = 1 << 4
2273 };
2274
2275 intx eflags() { return _eflags; }
2276 intx arg_local() { return _arg_local; }
2277 intx arg_stack() { return _arg_stack; }
2278 intx arg_returned() { return _arg_returned; }
2279 uint arg_modified(int a) { ArgInfoData *aid = arg_info();
2280 assert(aid != NULL, "arg_info must be not null");
2281 assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2282 return aid->arg_modified(a); }
2283
2284 void set_eflags(intx v) { _eflags = v; }
2285 void set_arg_local(intx v) { _arg_local = v; }
2286 void set_arg_stack(intx v) { _arg_stack = v; }
2287 void set_arg_returned(intx v) { _arg_returned = v; }
2288 void set_arg_modified(int a, uint v) { ArgInfoData *aid = arg_info();
2289 assert(aid != NULL, "arg_info must be not null");
2290 assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
2291 aid->set_arg_modified(a, v); }
2292
2293 void clear_escape_info() { _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
2294
2295 // Location and size of data area
2296 address data_base() const {
2297 return (address) _data;
2298 }
2299 int data_size() const {
2300 return _data_size;
2301 }
2302
2303 // Accessors
2304 Method* method() const { return _method; }
2305
2306 // Get the data at an arbitrary (sort of) data index.
2307 ProfileData* data_at(int data_index) const;
2308
2309 // Walk through the data in order.
2310 ProfileData* first_data() const { return data_at(first_di()); }
2311 ProfileData* next_data(ProfileData* current) const;
2312 bool is_valid(ProfileData* current) const { return current != NULL; }
2313
2314 // Convert a dp (data pointer) to a di (data index).
2315 int dp_to_di(address dp) const {
2316 return dp - ((address)_data);
2317 }
2318
2319 // bci to di/dp conversion.
2320 address bci_to_dp(int bci);
2321 int bci_to_di(int bci) {
2322 return dp_to_di(bci_to_dp(bci));
2323 }
2324
2325 // Get the data at an arbitrary bci, or NULL if there is none.
2326 ProfileData* bci_to_data(int bci);
2327
2328 // Same, but try to create an extra_data record if one is needed:
2329 ProfileData* allocate_bci_to_data(int bci, Method* m) {
2330 ProfileData* data = NULL;
2331 // If m not NULL, try to allocate a SpeculativeTrapData entry
2332 if (m == NULL) {
2333 data = bci_to_data(bci);
2334 }
2335 if (data != NULL) {
2336 return data;
2337 }
2338 data = bci_to_extra_data(bci, m, true);
2339 if (data != NULL) {
2340 return data;
2341 }
2342 // If SpeculativeTrapData allocation fails try to allocate a
2343 // regular entry
2344 data = bci_to_data(bci);
2345 if (data != NULL) {
2346 return data;
2347 }
2348 return bci_to_extra_data(bci, NULL, true);
2349 }
2350
2351 // Add a handful of extra data records, for trap tracking.
2352 DataLayout* extra_data_base() const { return limit_data_position(); }
2353 DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
2354 DataLayout* args_data_limit() const { return (DataLayout*)((address)this + size_in_bytes() -
2355 parameters_size_in_bytes()); }
2356 int extra_data_size() const { return (address)extra_data_limit() - (address)extra_data_base(); }
2357 static DataLayout* next_extra(DataLayout* dp);
2358
2359 // Return (uint)-1 for overflow.
2360 uint trap_count(int reason) const {
2361 assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
2362 return (int)((_trap_hist._array[reason]+1) & _trap_hist_mask) - 1;
2363 }
2364 // For loops:
2365 static uint trap_reason_limit() { return _trap_hist_limit; }
2366 static uint trap_count_limit() { return _trap_hist_mask; }
2367 uint inc_trap_count(int reason) {
2368 // Count another trap, anywhere in this method.
2369 assert(reason >= 0, "must be single trap");
2370 assert((uint)reason < JVMCI_ONLY(2*) _trap_hist_limit, "oob");
2371 uint cnt1 = 1 + _trap_hist._array[reason];
2372 if ((cnt1 & _trap_hist_mask) != 0) { // if no counter overflow...
2373 _trap_hist._array[reason] = cnt1;
2374 return cnt1;
2375 } else {
2376 return _trap_hist_mask + (++_nof_overflow_traps);
2377 }
2378 }
2379
2380 uint overflow_trap_count() const {
2381 return _nof_overflow_traps;
2382 }
2383 uint overflow_recompile_count() const {
2384 return _nof_overflow_recompiles;
2385 }
2386 void inc_overflow_recompile_count() {
2387 _nof_overflow_recompiles += 1;
2388 }
2389 uint decompile_count() const {
2390 return _nof_decompiles;
2391 }
2392 void inc_decompile_count() {
2393 _nof_decompiles += 1;
2394 if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
2395 method()->set_not_compilable("decompile_count > PerMethodRecompilationCutoff", CompLevel_full_optimization);
2396 }
2397 }
2398 uint tenure_traps() const {
2399 return _tenure_traps;
2400 }
2401 void inc_tenure_traps() {
2402 _tenure_traps += 1;
2403 }
2404
2405 // Return pointer to area dedicated to parameters in MDO
2406 ParametersTypeData* parameters_type_data() const {
2407 assert(_parameters_type_data_di != parameters_uninitialized, "called too early");
2408 return _parameters_type_data_di != no_parameters ? data_layout_at(_parameters_type_data_di)->data_in()->as_ParametersTypeData() : NULL;
2409 }
2410
2411 int parameters_type_data_di() const {
2412 assert(_parameters_type_data_di != parameters_uninitialized && _parameters_type_data_di != no_parameters, "no args type data");
2413 return _parameters_type_data_di;
2414 }
2415
2416 // Support for code generation
2417 static ByteSize data_offset() {
2418 return byte_offset_of(MethodData, _data[0]);
2419 }
2420
2421 static ByteSize trap_history_offset() {
2422 return byte_offset_of(MethodData, _trap_hist._array);
2423 }
2424
2425 static ByteSize invocation_counter_offset() {
2426 return byte_offset_of(MethodData, _invocation_counter);
2427 }
2428
2429 static ByteSize backedge_counter_offset() {
2430 return byte_offset_of(MethodData, _backedge_counter);
2431 }
2432
2433 static ByteSize invoke_mask_offset() {
2434 return byte_offset_of(MethodData, _invoke_mask);
2435 }
2436
2437 static ByteSize backedge_mask_offset() {
2438 return byte_offset_of(MethodData, _backedge_mask);
2439 }
2440
2441 static ByteSize parameters_type_data_di_offset() {
2442 return byte_offset_of(MethodData, _parameters_type_data_di);
2443 }
2444
2445 virtual void metaspace_pointers_do(MetaspaceClosure* iter);
2446 virtual MetaspaceObj::Type type() const { return MethodDataType; }
2447
2448 // Deallocation support - no pointer fields to deallocate
2449 void deallocate_contents(ClassLoaderData* loader_data) {}
2450
2451 // GC support
2452 void set_size(int object_size_in_bytes) { _size = object_size_in_bytes; }
2453
2454 // Printing
2455 void print_on (outputStream* st) const;
2456 void print_value_on(outputStream* st) const;
2457
2458 // printing support for method data
2459 void print_data_on(outputStream* st) const;
2460
2461 const char* internal_name() const { return "{method data}"; }
2462
2463 // verification
2464 void verify_on(outputStream* st);
2465 void verify_data_on(outputStream* st);
2466
2467 static bool profile_parameters_for_method(const methodHandle& m);
2468 static bool profile_arguments();
2469 static bool profile_arguments_jsr292_only();
2470 static bool profile_return();
2471 static bool profile_parameters();
2472 static bool profile_return_jsr292_only();
2473
2474 void clean_method_data(bool always_clean);
2475 void clean_weak_method_links();
2476 DEBUG_ONLY(void verify_clean_weak_method_links();)
2477 Mutex* extra_data_lock() { return &_extra_data_lock; }
2478};
2479
2480#endif // SHARE_OOPS_METHODDATA_HPP
2481