1 | /* |
2 | * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_OPTO_OUTPUT_HPP |
26 | #define SHARE_OPTO_OUTPUT_HPP |
27 | |
28 | #include "opto/ad.hpp" |
29 | #include "opto/block.hpp" |
30 | #include "opto/node.hpp" |
31 | |
32 | class Arena; |
33 | class Bundle; |
34 | class Block; |
35 | class Block_Array; |
36 | class Node; |
37 | class Node_Array; |
38 | class Node_List; |
39 | class PhaseCFG; |
40 | class PhaseChaitin; |
41 | class Pipeline_Use_Element; |
42 | class Pipeline_Use; |
43 | |
44 | #ifndef PRODUCT |
45 | #define DEBUG_ARG(x) , x |
46 | #else |
47 | #define DEBUG_ARG(x) |
48 | #endif |
49 | |
50 | // Define the initial sizes for allocation of the resizable code buffer |
51 | enum { |
52 | initial_code_capacity = 16 * 1024, |
53 | initial_stub_capacity = 4 * 1024, |
54 | initial_const_capacity = 4 * 1024, |
55 | initial_locs_capacity = 3 * 1024 |
56 | }; |
57 | |
58 | //------------------------------Scheduling---------------------------------- |
59 | // This class contains all the information necessary to implement instruction |
60 | // scheduling and bundling. |
61 | class Scheduling { |
62 | |
63 | private: |
64 | // Arena to use |
65 | Arena *_arena; |
66 | |
67 | // Control-Flow Graph info |
68 | PhaseCFG *_cfg; |
69 | |
70 | // Register Allocation info |
71 | PhaseRegAlloc *_regalloc; |
72 | |
73 | // Number of nodes in the method |
74 | uint _node_bundling_limit; |
75 | |
76 | // List of scheduled nodes. Generated in reverse order |
77 | Node_List _scheduled; |
78 | |
79 | // List of nodes currently available for choosing for scheduling |
80 | Node_List _available; |
81 | |
82 | // For each instruction beginning a bundle, the number of following |
83 | // nodes to be bundled with it. |
84 | Bundle *_node_bundling_base; |
85 | |
86 | // Mapping from register to Node |
87 | Node_List _reg_node; |
88 | |
89 | // Free list for pinch nodes. |
90 | Node_List _pinch_free_list; |
91 | |
92 | // Latency from the beginning of the containing basic block (base 1) |
93 | // for each node. |
94 | unsigned short *_node_latency; |
95 | |
96 | // Number of uses of this node within the containing basic block. |
97 | short *_uses; |
98 | |
99 | // Schedulable portion of current block. Skips Region/Phi/CreateEx up |
100 | // front, branch+proj at end. Also skips Catch/CProj (same as |
101 | // branch-at-end), plus just-prior exception-throwing call. |
102 | uint _bb_start, _bb_end; |
103 | |
104 | // Latency from the end of the basic block as scheduled |
105 | unsigned short *_current_latency; |
106 | |
107 | // Remember the next node |
108 | Node *_next_node; |
109 | |
110 | // Use this for an unconditional branch delay slot |
111 | Node *_unconditional_delay_slot; |
112 | |
113 | // Pointer to a Nop |
114 | MachNopNode *_nop; |
115 | |
116 | // Length of the current bundle, in instructions |
117 | uint _bundle_instr_count; |
118 | |
119 | // Current Cycle number, for computing latencies and bundling |
120 | uint _bundle_cycle_number; |
121 | |
122 | // Bundle information |
123 | Pipeline_Use_Element _bundle_use_elements[resource_count]; |
124 | Pipeline_Use _bundle_use; |
125 | |
126 | // Dump the available list |
127 | void dump_available() const; |
128 | |
129 | public: |
130 | Scheduling(Arena *arena, Compile &compile); |
131 | |
132 | // Destructor |
133 | NOT_PRODUCT( ~Scheduling(); ) |
134 | |
135 | // Step ahead "i" cycles |
136 | void step(uint i); |
137 | |
138 | // Step ahead 1 cycle, and clear the bundle state (for example, |
139 | // at a branch target) |
140 | void step_and_clear(); |
141 | |
142 | Bundle* node_bundling(const Node *n) { |
143 | assert(valid_bundle_info(n), "oob" ); |
144 | return (&_node_bundling_base[n->_idx]); |
145 | } |
146 | |
147 | bool valid_bundle_info(const Node *n) const { |
148 | return (_node_bundling_limit > n->_idx); |
149 | } |
150 | |
151 | bool starts_bundle(const Node *n) const { |
152 | return (_node_bundling_limit > n->_idx && _node_bundling_base[n->_idx].starts_bundle()); |
153 | } |
154 | |
155 | // Do the scheduling |
156 | void DoScheduling(); |
157 | |
158 | // Compute the local latencies walking forward over the list of |
159 | // nodes for a basic block |
160 | void ComputeLocalLatenciesForward(const Block *bb); |
161 | |
162 | // Compute the register antidependencies within a basic block |
163 | void ComputeRegisterAntidependencies(Block *bb); |
164 | void verify_do_def( Node *n, OptoReg::Name def, const char *msg ); |
165 | void verify_good_schedule( Block *b, const char *msg ); |
166 | void anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ); |
167 | void anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ); |
168 | |
169 | // Add a node to the current bundle |
170 | void AddNodeToBundle(Node *n, const Block *bb); |
171 | |
172 | // Add a node to the list of available nodes |
173 | void AddNodeToAvailableList(Node *n); |
174 | |
175 | // Compute the local use count for the nodes in a block, and compute |
176 | // the list of instructions with no uses in the block as available |
177 | void ComputeUseCount(const Block *bb); |
178 | |
179 | // Choose an instruction from the available list to add to the bundle |
180 | Node * ChooseNodeToBundle(); |
181 | |
182 | // See if this Node fits into the currently accumulating bundle |
183 | bool NodeFitsInBundle(Node *n); |
184 | |
185 | // Decrement the use count for a node |
186 | void DecrementUseCounts(Node *n, const Block *bb); |
187 | |
188 | // Garbage collect pinch nodes for reuse by other blocks. |
189 | void garbage_collect_pinch_nodes(); |
190 | // Clean up a pinch node for reuse (helper for above). |
191 | void cleanup_pinch( Node *pinch ); |
192 | |
193 | // Information for statistics gathering |
194 | #ifndef PRODUCT |
195 | private: |
196 | // Gather information on size of nops relative to total |
197 | uint _branches, _unconditional_delays; |
198 | |
199 | static uint _total_nop_size, _total_method_size; |
200 | static uint _total_branches, _total_unconditional_delays; |
201 | static uint _total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1]; |
202 | |
203 | public: |
204 | static void print_statistics(); |
205 | |
206 | static void increment_instructions_per_bundle(uint i) { |
207 | _total_instructions_per_bundle[i]++; |
208 | } |
209 | |
210 | static void increment_nop_size(uint s) { |
211 | _total_nop_size += s; |
212 | } |
213 | |
214 | static void increment_method_size(uint s) { |
215 | _total_method_size += s; |
216 | } |
217 | #endif |
218 | |
219 | }; |
220 | |
221 | #endif // SHARE_OPTO_OUTPUT_HPP |
222 | |