1 | /* |
2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_CODE_VTABLESTUBS_HPP |
26 | #define SHARE_CODE_VTABLESTUBS_HPP |
27 | |
28 | #include "asm/macroAssembler.hpp" |
29 | #include "code/vmreg.hpp" |
30 | #include "memory/allocation.hpp" |
31 | |
32 | // A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables |
33 | // There's a one-to-one relationship between a VtableStub and such a pair. |
34 | |
35 | // A word on VtableStub sizing: |
36 | // Such a vtable/itable stub consists of the instance data |
37 | // and an immediately following CodeBuffer. |
38 | // Unfortunately, the required space for the code buffer varies, depending on |
39 | // the setting of compile time macros (PRODUCT, ASSERT, ...) and of command line |
40 | // parameters. Actual data may have an influence on the size as well. |
41 | // |
42 | // A simple approximation for the VtableStub size would be to just take a value |
43 | // "large enough" for all circumstances - a worst case estimate. |
44 | // As there can exist many stubs - and they never go away - we certainly don't |
45 | // want to waste more code cache space than absolutely necessary. |
46 | // |
47 | // We need a different approach which, as far as possible, should be independent |
48 | // from or adaptive to code size variations. These variations may be caused by |
49 | // changed compile time or run time switches as well as by changed emitter code. |
50 | // |
51 | // Here is the idea: |
52 | // For the first stub we generate, we allocate a "large enough" code buffer. |
53 | // Once all instructions are emitted, we know the actual size of the stub. |
54 | // Remembering that size allows us to allocate a tightly matching code buffer |
55 | // for all subsequent stubs. That covers all "static variance", i.e. all variance |
56 | // that is due to compile time macros, command line parameters, machine capabilities, |
57 | // and other influences which are immutable for the life span of the vm. |
58 | // |
59 | // Life isn't always that easy. Code size may depend on actual data, "load constant" |
60 | // being an example for that. All code segments with such "dynamic variance" require |
61 | // additional care. We need to know or estimate the worst case code size for each |
62 | // such segment. With that knowledge, we can maintain a "slop counter" in the |
63 | // platform-specific stub emitters. It accumulates the difference between worst-case |
64 | // and actual code size. When the stub is fully generated, the actual stub size is |
65 | // adjusted (increased) by the slop counter value. |
66 | // |
67 | // As a result, we allocate all but the first code buffers with the same, tightly matching size. |
68 | // |
69 | |
70 | // VtableStubs creates the code stubs for compiled calls through vtables. |
71 | // There is one stub per (vtable index, args_size) pair, and the stubs are |
72 | // never deallocated. They don't need to be GCed because they contain no oops. |
73 | class VtableStub; |
74 | |
75 | class VtableStubs : AllStatic { |
76 | public: // N must be public (some compilers need this for _table) |
77 | enum { |
78 | N = 256, // size of stub table; must be power of two |
79 | mask = N - 1 |
80 | }; |
81 | |
82 | private: |
83 | friend class VtableStub; |
84 | static VtableStub* _table[N]; // table of existing stubs |
85 | static int _number_of_vtable_stubs; // number of stubs created so far (for statistics) |
86 | static int _vtab_stub_size; // current size estimate for vtable stub (quasi-constant) |
87 | static int _itab_stub_size; // current size estimate for itable stub (quasi-constant) |
88 | |
89 | static VtableStub* create_vtable_stub(int vtable_index); |
90 | static VtableStub* create_itable_stub(int vtable_index); |
91 | static VtableStub* lookup (bool is_vtable_stub, int vtable_index); |
92 | static void enter (bool is_vtable_stub, int vtable_index, VtableStub* s); |
93 | static inline uint hash (bool is_vtable_stub, int vtable_index); |
94 | static address find_stub (bool is_vtable_stub, int vtable_index); |
95 | static void bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s, |
96 | address npe_addr, address ame_addr, bool is_vtable_stub, |
97 | int index, int slop_bytes, int index_dependent_slop); |
98 | static int code_size_limit(bool is_vtable_stub); |
99 | static void check_and_set_size_limit(bool is_vtable_stub, |
100 | int code_size, |
101 | int padding); |
102 | |
103 | public: |
104 | static address find_vtable_stub(int vtable_index) { return find_stub(true, vtable_index); } |
105 | static address find_itable_stub(int itable_index) { return find_stub(false, itable_index); } |
106 | |
107 | static VtableStub* entry_point(address pc); // vtable stub entry point for a pc |
108 | static bool contains(address pc); // is pc within any stub? |
109 | static VtableStub* stub_containing(address pc); // stub containing pc or NULL |
110 | static int number_of_vtable_stubs() { return _number_of_vtable_stubs; } |
111 | static void initialize(); |
112 | static void vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs |
113 | }; |
114 | |
115 | |
116 | class VtableStub { |
117 | private: |
118 | friend class VtableStubs; |
119 | |
120 | static address _chunk; // For allocation |
121 | static address _chunk_end; // For allocation |
122 | static VMReg _receiver_location; // Where to find receiver |
123 | |
124 | VtableStub* _next; // Pointer to next entry in hash table |
125 | const short _index; // vtable index |
126 | short _ame_offset; // Where an AbstractMethodError might occur |
127 | short _npe_offset; // Where a NullPointerException might occur |
128 | bool _is_vtable_stub; // True if vtable stub, false, is itable stub |
129 | /* code follows here */ // The vtableStub code |
130 | |
131 | void* operator new(size_t size, int code_size) throw(); |
132 | |
133 | VtableStub(bool is_vtable_stub, int index) |
134 | : _next(NULL), _index(index), _ame_offset(-1), _npe_offset(-1), |
135 | _is_vtable_stub(is_vtable_stub) {} |
136 | VtableStub* next() const { return _next; } |
137 | int index() const { return _index; } |
138 | static VMReg receiver_location() { return _receiver_location; } |
139 | void set_next(VtableStub* n) { _next = n; } |
140 | |
141 | public: |
142 | address code_begin() const { return (address)(this + 1); } |
143 | address code_end() const { return code_begin() + VtableStubs::code_size_limit(_is_vtable_stub); } |
144 | address entry_point() const { return code_begin(); } |
145 | static int entry_offset() { return sizeof(class VtableStub); } |
146 | |
147 | bool matches(bool is_vtable_stub, int index) const { |
148 | return _index == index && _is_vtable_stub == is_vtable_stub; |
149 | } |
150 | bool contains(address pc) const { return code_begin() <= pc && pc < code_end(); } |
151 | |
152 | private: |
153 | void set_exception_points(address npe_addr, address ame_addr) { |
154 | _npe_offset = npe_addr - code_begin(); |
155 | _ame_offset = ame_addr - code_begin(); |
156 | assert(is_abstract_method_error(ame_addr), "offset must be correct" ); |
157 | assert(is_null_pointer_exception(npe_addr), "offset must be correct" ); |
158 | assert(!is_abstract_method_error(npe_addr), "offset must be correct" ); |
159 | assert(!is_null_pointer_exception(ame_addr), "offset must be correct" ); |
160 | } |
161 | |
162 | // platform-dependent routines |
163 | static int pd_code_alignment(); |
164 | // CNC: Removed because vtable stubs are now made with an ideal graph |
165 | // static bool pd_disregard_arg_size(); |
166 | |
167 | static void align_chunk() { |
168 | uintptr_t off = (uintptr_t)( _chunk + sizeof(VtableStub) ) % pd_code_alignment(); |
169 | if (off != 0) _chunk += pd_code_alignment() - off; |
170 | } |
171 | |
172 | public: |
173 | // Query |
174 | bool is_itable_stub() { return !_is_vtable_stub; } |
175 | bool is_vtable_stub() { return _is_vtable_stub; } |
176 | bool is_abstract_method_error(address epc) { return epc == code_begin()+_ame_offset; } |
177 | bool is_null_pointer_exception(address epc) { return epc == code_begin()+_npe_offset; } |
178 | |
179 | void print_on(outputStream* st) const; |
180 | void print() const; |
181 | |
182 | }; |
183 | |
184 | #endif // SHARE_CODE_VTABLESTUBS_HPP |
185 | |