1 | /* |
2 | * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "memory/allocation.inline.hpp" |
27 | #include "memory/resourceArea.hpp" |
28 | #include "opto/chaitin.hpp" |
29 | #include "opto/machnode.hpp" |
30 | |
31 | // See if this register (or pairs, or vector) already contains the value. |
32 | static bool register_contains_value(Node* val, OptoReg::Name reg, int n_regs, |
33 | Node_List& value) { |
34 | for (int i = 0; i < n_regs; i++) { |
35 | OptoReg::Name nreg = OptoReg::add(reg,-i); |
36 | if (value[nreg] != val) |
37 | return false; |
38 | } |
39 | return true; |
40 | } |
41 | |
42 | //---------------------------may_be_copy_of_callee----------------------------- |
43 | // Check to see if we can possibly be a copy of a callee-save value. |
44 | bool PhaseChaitin::may_be_copy_of_callee( Node *def ) const { |
45 | // Short circuit if there are no callee save registers |
46 | if (_matcher.number_of_saved_registers() == 0) return false; |
47 | |
48 | // Expect only a spill-down and reload on exit for callee-save spills. |
49 | // Chains of copies cannot be deep. |
50 | // 5008997 - This is wishful thinking. Register allocator seems to |
51 | // be splitting live ranges for callee save registers to such |
52 | // an extent that in large methods the chains can be very long |
53 | // (50+). The conservative answer is to return true if we don't |
54 | // know as this prevents optimizations from occurring. |
55 | |
56 | const int limit = 60; |
57 | int i; |
58 | for( i=0; i < limit; i++ ) { |
59 | if( def->is_Proj() && def->in(0)->is_Start() && |
60 | _matcher.is_save_on_entry(lrgs(_lrg_map.live_range_id(def)).reg())) |
61 | return true; // Direct use of callee-save proj |
62 | if( def->is_Copy() ) // Copies carry value through |
63 | def = def->in(def->is_Copy()); |
64 | else if( def->is_Phi() ) // Phis can merge it from any direction |
65 | def = def->in(1); |
66 | else |
67 | break; |
68 | guarantee(def != NULL, "must not resurrect dead copy" ); |
69 | } |
70 | // If we reached the end and didn't find a callee save proj |
71 | // then this may be a callee save proj so we return true |
72 | // as the conservative answer. If we didn't reach then end |
73 | // we must have discovered that it was not a callee save |
74 | // else we would have returned. |
75 | return i == limit; |
76 | } |
77 | |
78 | //------------------------------yank----------------------------------- |
79 | // Helper function for yank_if_dead |
80 | int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) { |
81 | int blk_adjust=0; |
82 | Block *oldb = _cfg.get_block_for_node(old); |
83 | oldb->find_remove(old); |
84 | // Count 1 if deleting an instruction from the current block |
85 | if (oldb == current_block) { |
86 | blk_adjust++; |
87 | } |
88 | _cfg.unmap_node_from_block(old); |
89 | OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg(); |
90 | if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available? |
91 | value->map(old_reg,NULL); // Yank from value/regnd maps |
92 | regnd->map(old_reg,NULL); // This register's value is now unknown |
93 | } |
94 | return blk_adjust; |
95 | } |
96 | |
97 | #ifdef ASSERT |
98 | static bool expected_yanked_node(Node *old, Node *orig_old) { |
99 | // This code is expected only next original nodes: |
100 | // - load from constant table node which may have next data input nodes: |
101 | // MachConstantBase, MachTemp, MachSpillCopy |
102 | // - Phi nodes that are considered Junk |
103 | // - load constant node which may have next data input nodes: |
104 | // MachTemp, MachSpillCopy |
105 | // - MachSpillCopy |
106 | // - MachProj and Copy dead nodes |
107 | if (old->is_MachSpillCopy()) { |
108 | return true; |
109 | } else if (old->is_Con()) { |
110 | return true; |
111 | } else if (old->is_MachProj()) { // Dead kills projection of Con node |
112 | return (old == orig_old); |
113 | } else if (old->is_Copy()) { // Dead copy of a callee-save value |
114 | return (old == orig_old); |
115 | } else if (old->is_MachTemp()) { |
116 | return orig_old->is_Con(); |
117 | } else if (old->is_Phi()) { // Junk phi's |
118 | return true; |
119 | } else if (old->is_MachConstantBase()) { |
120 | return (orig_old->is_Con() && orig_old->is_MachConstant()); |
121 | } |
122 | return false; |
123 | } |
124 | #endif |
125 | |
126 | //------------------------------yank_if_dead----------------------------------- |
127 | // Removed edges from 'old'. Yank if dead. Return adjustment counts to |
128 | // iterators in the current block. |
129 | int PhaseChaitin::yank_if_dead_recurse(Node *old, Node *orig_old, Block *current_block, |
130 | Node_List *value, Node_List *regnd) { |
131 | int blk_adjust=0; |
132 | if (old->outcnt() == 0 && old != C->top()) { |
133 | #ifdef ASSERT |
134 | if (!expected_yanked_node(old, orig_old)) { |
135 | tty->print_cr("==============================================" ); |
136 | tty->print_cr("orig_old:" ); |
137 | orig_old->dump(); |
138 | tty->print_cr("old:" ); |
139 | old->dump(); |
140 | assert(false, "unexpected yanked node" ); |
141 | } |
142 | if (old->is_Con()) |
143 | orig_old = old; // Reset to satisfy expected nodes checks. |
144 | #endif |
145 | blk_adjust += yank(old, current_block, value, regnd); |
146 | |
147 | for (uint i = 1; i < old->req(); i++) { |
148 | Node* n = old->in(i); |
149 | if (n != NULL) { |
150 | old->set_req(i, NULL); |
151 | blk_adjust += yank_if_dead_recurse(n, orig_old, current_block, value, regnd); |
152 | } |
153 | } |
154 | // Disconnect control and remove precedence edges if any exist |
155 | old->disconnect_inputs(NULL, C); |
156 | } |
157 | return blk_adjust; |
158 | } |
159 | |
160 | //------------------------------use_prior_register----------------------------- |
161 | // Use the prior value instead of the current value, in an effort to make |
162 | // the current value go dead. Return block iterator adjustment, in case |
163 | // we yank some instructions from this block. |
164 | int PhaseChaitin::use_prior_register( Node *n, uint idx, Node *def, Block *current_block, Node_List &value, Node_List ®nd ) { |
165 | // No effect? |
166 | if( def == n->in(idx) ) return 0; |
167 | // Def is currently dead and can be removed? Do not resurrect |
168 | if( def->outcnt() == 0 ) return 0; |
169 | |
170 | // Not every pair of physical registers are assignment compatible, |
171 | // e.g. on sparc floating point registers are not assignable to integer |
172 | // registers. |
173 | const LRG &def_lrg = lrgs(_lrg_map.live_range_id(def)); |
174 | OptoReg::Name def_reg = def_lrg.reg(); |
175 | const RegMask &use_mask = n->in_RegMask(idx); |
176 | bool can_use = ( RegMask::can_represent(def_reg) ? (use_mask.Member(def_reg) != 0) |
177 | : (use_mask.is_AllStack() != 0)); |
178 | if (!RegMask::is_vector(def->ideal_reg())) { |
179 | // Check for a copy to or from a misaligned pair. |
180 | // It is workaround for a sparc with misaligned pairs. |
181 | can_use = can_use && !use_mask.is_misaligned_pair() && !def_lrg.mask().is_misaligned_pair(); |
182 | } |
183 | if (!can_use) |
184 | return 0; |
185 | |
186 | // Capture the old def in case it goes dead... |
187 | Node *old = n->in(idx); |
188 | |
189 | // Save-on-call copies can only be elided if the entire copy chain can go |
190 | // away, lest we get the same callee-save value alive in 2 locations at |
191 | // once. We check for the obvious trivial case here. Although it can |
192 | // sometimes be elided with cooperation outside our scope, here we will just |
193 | // miss the opportunity. :-( |
194 | if( may_be_copy_of_callee(def) ) { |
195 | if( old->outcnt() > 1 ) return 0; // We're the not last user |
196 | int idx = old->is_Copy(); |
197 | assert( idx, "chain of copies being removed" ); |
198 | Node *old2 = old->in(idx); // Chain of copies |
199 | if( old2->outcnt() > 1 ) return 0; // old is not the last user |
200 | int idx2 = old2->is_Copy(); |
201 | if( !idx2 ) return 0; // Not a chain of 2 copies |
202 | if( def != old2->in(idx2) ) return 0; // Chain of exactly 2 copies |
203 | } |
204 | |
205 | // Use the new def |
206 | n->set_req(idx,def); |
207 | _post_alloc++; |
208 | |
209 | // Is old def now dead? We successfully yanked a copy? |
210 | return yank_if_dead(old,current_block,&value,®nd); |
211 | } |
212 | |
213 | |
214 | //------------------------------skip_copies------------------------------------ |
215 | // Skip through any number of copies (that don't mod oop-i-ness) |
216 | Node *PhaseChaitin::skip_copies( Node *c ) { |
217 | int idx = c->is_Copy(); |
218 | uint is_oop = lrgs(_lrg_map.live_range_id(c))._is_oop; |
219 | while (idx != 0) { |
220 | guarantee(c->in(idx) != NULL, "must not resurrect dead copy" ); |
221 | if (lrgs(_lrg_map.live_range_id(c->in(idx)))._is_oop != is_oop) { |
222 | break; // casting copy, not the same value |
223 | } |
224 | c = c->in(idx); |
225 | idx = c->is_Copy(); |
226 | } |
227 | return c; |
228 | } |
229 | |
230 | //------------------------------elide_copy------------------------------------- |
231 | // Remove (bypass) copies along Node n, edge k. |
232 | int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List ®nd, bool can_change_regs ) { |
233 | int blk_adjust = 0; |
234 | |
235 | uint nk_idx = _lrg_map.live_range_id(n->in(k)); |
236 | OptoReg::Name nk_reg = lrgs(nk_idx).reg(); |
237 | |
238 | // Remove obvious same-register copies |
239 | Node *x = n->in(k); |
240 | int idx; |
241 | while( (idx=x->is_Copy()) != 0 ) { |
242 | Node *copy = x->in(idx); |
243 | guarantee(copy != NULL, "must not resurrect dead copy" ); |
244 | if(lrgs(_lrg_map.live_range_id(copy)).reg() != nk_reg) { |
245 | break; |
246 | } |
247 | blk_adjust += use_prior_register(n,k,copy,current_block,value,regnd); |
248 | if (n->in(k) != copy) { |
249 | break; // Failed for some cutout? |
250 | } |
251 | x = copy; // Progress, try again |
252 | } |
253 | |
254 | // Phis and 2-address instructions cannot change registers so easily - their |
255 | // outputs must match their input. |
256 | if( !can_change_regs ) |
257 | return blk_adjust; // Only check stupid copies! |
258 | |
259 | // Loop backedges won't have a value-mapping yet |
260 | if( &value == NULL ) return blk_adjust; |
261 | |
262 | // Skip through all copies to the _value_ being used. Do not change from |
263 | // int to pointer. This attempts to jump through a chain of copies, where |
264 | // intermediate copies might be illegal, i.e., value is stored down to stack |
265 | // then reloaded BUT survives in a register the whole way. |
266 | Node *val = skip_copies(n->in(k)); |
267 | if (val == x) return blk_adjust; // No progress? |
268 | |
269 | int n_regs = RegMask::num_registers(val->ideal_reg()); |
270 | uint val_idx = _lrg_map.live_range_id(val); |
271 | OptoReg::Name val_reg = lrgs(val_idx).reg(); |
272 | |
273 | // See if it happens to already be in the correct register! |
274 | // (either Phi's direct register, or the common case of the name |
275 | // never-clobbered original-def register) |
276 | if (register_contains_value(val, val_reg, n_regs, value)) { |
277 | blk_adjust += use_prior_register(n,k,regnd[val_reg],current_block,value,regnd); |
278 | if( n->in(k) == regnd[val_reg] ) // Success! Quit trying |
279 | return blk_adjust; |
280 | } |
281 | |
282 | // See if we can skip the copy by changing registers. Don't change from |
283 | // using a register to using the stack unless we know we can remove a |
284 | // copy-load. Otherwise we might end up making a pile of Intel cisc-spill |
285 | // ops reading from memory instead of just loading once and using the |
286 | // register. |
287 | |
288 | // Also handle duplicate copies here. |
289 | const Type *t = val->is_Con() ? val->bottom_type() : NULL; |
290 | |
291 | // Scan all registers to see if this value is around already |
292 | for( uint reg = 0; reg < (uint)_max_reg; reg++ ) { |
293 | if (reg == (uint)nk_reg) { |
294 | // Found ourselves so check if there is only one user of this |
295 | // copy and keep on searching for a better copy if so. |
296 | bool ignore_self = true; |
297 | x = n->in(k); |
298 | DUIterator_Fast imax, i = x->fast_outs(imax); |
299 | Node* first = x->fast_out(i); i++; |
300 | while (i < imax && ignore_self) { |
301 | Node* use = x->fast_out(i); i++; |
302 | if (use != first) ignore_self = false; |
303 | } |
304 | if (ignore_self) continue; |
305 | } |
306 | |
307 | Node *vv = value[reg]; |
308 | if (n_regs > 1) { // Doubles and vectors check for aligned-adjacent set |
309 | uint last = (n_regs-1); // Looking for the last part of a set |
310 | if ((reg&last) != last) continue; // Wrong part of a set |
311 | if (!register_contains_value(vv, reg, n_regs, value)) continue; // Different value |
312 | } |
313 | if( vv == val || // Got a direct hit? |
314 | (t && vv && vv->bottom_type() == t && vv->is_Mach() && |
315 | vv->as_Mach()->rule() == val->as_Mach()->rule()) ) { // Or same constant? |
316 | assert( !n->is_Phi(), "cannot change registers at a Phi so easily" ); |
317 | if( OptoReg::is_stack(nk_reg) || // CISC-loading from stack OR |
318 | OptoReg::is_reg(reg) || // turning into a register use OR |
319 | regnd[reg]->outcnt()==1 ) { // last use of a spill-load turns into a CISC use |
320 | blk_adjust += use_prior_register(n,k,regnd[reg],current_block,value,regnd); |
321 | if( n->in(k) == regnd[reg] ) // Success! Quit trying |
322 | return blk_adjust; |
323 | } // End of if not degrading to a stack |
324 | } // End of if found value in another register |
325 | } // End of scan all machine registers |
326 | return blk_adjust; |
327 | } |
328 | |
329 | |
330 | // |
331 | // Check if nreg already contains the constant value val. Normal copy |
332 | // elimination doesn't doesn't work on constants because multiple |
333 | // nodes can represent the same constant so the type and rule of the |
334 | // MachNode must be checked to ensure equivalence. |
335 | // |
336 | bool PhaseChaitin::eliminate_copy_of_constant(Node* val, Node* n, |
337 | Block *current_block, |
338 | Node_List& value, Node_List& regnd, |
339 | OptoReg::Name nreg, OptoReg::Name nreg2) { |
340 | if (value[nreg] != val && val->is_Con() && |
341 | value[nreg] != NULL && value[nreg]->is_Con() && |
342 | (nreg2 == OptoReg::Bad || value[nreg] == value[nreg2]) && |
343 | value[nreg]->bottom_type() == val->bottom_type() && |
344 | value[nreg]->as_Mach()->rule() == val->as_Mach()->rule()) { |
345 | // This code assumes that two MachNodes representing constants |
346 | // which have the same rule and the same bottom type will produce |
347 | // identical effects into a register. This seems like it must be |
348 | // objectively true unless there are hidden inputs to the nodes |
349 | // but if that were to change this code would need to updated. |
350 | // Since they are equivalent the second one if redundant and can |
351 | // be removed. |
352 | // |
353 | // n will be replaced with the old value but n might have |
354 | // kills projections associated with it so remove them now so that |
355 | // yank_if_dead will be able to eliminate the copy once the uses |
356 | // have been transferred to the old[value]. |
357 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { |
358 | Node* use = n->fast_out(i); |
359 | if (use->is_Proj() && use->outcnt() == 0) { |
360 | // Kill projections have no users and one input |
361 | use->set_req(0, C->top()); |
362 | yank_if_dead(use, current_block, &value, ®nd); |
363 | --i; --imax; |
364 | } |
365 | } |
366 | _post_alloc++; |
367 | return true; |
368 | } |
369 | return false; |
370 | } |
371 | |
372 | // The algorithms works as follows: |
373 | // We traverse the block top to bottom. possibly_merge_multidef() is invoked for every input edge k |
374 | // of the instruction n. We check to see if the input is a multidef lrg. If it is, we record the fact that we've |
375 | // seen a definition (coming as an input) and add that fact to the reg2defuse array. The array maps registers to their |
376 | // current reaching definitions (we track only multidefs though). With each definition we also associate the first |
377 | // instruction we saw use it. If we encounter the situation when we observe an def (an input) that is a part of the |
378 | // same lrg but is different from the previous seen def we merge the two with a MachMerge node and substitute |
379 | // all the uses that we've seen so far to use the merge. After that we keep replacing the new defs in the same lrg |
380 | // as they get encountered with the merge node and keep adding these defs to the merge inputs. |
381 | void PhaseChaitin::merge_multidefs() { |
382 | Compile::TracePhase tp("mergeMultidefs" , &timers[_t_mergeMultidefs]); |
383 | ResourceMark rm; |
384 | // Keep track of the defs seen in registers and collect their uses in the block. |
385 | RegToDefUseMap reg2defuse(_max_reg, _max_reg, RegDefUse()); |
386 | for (uint i = 0; i < _cfg.number_of_blocks(); i++) { |
387 | Block* block = _cfg.get_block(i); |
388 | for (uint j = 1; j < block->number_of_nodes(); j++) { |
389 | Node* n = block->get_node(j); |
390 | if (n->is_Phi()) continue; |
391 | for (uint k = 1; k < n->req(); k++) { |
392 | j += possibly_merge_multidef(n, k, block, reg2defuse); |
393 | } |
394 | // Null out the value produced by the instruction itself, since we're only interested in defs |
395 | // implicitly defined by the uses. We are actually interested in tracking only redefinitions |
396 | // of the multidef lrgs in the same register. For that matter it's enough to track changes in |
397 | // the base register only and ignore other effects of multi-register lrgs and fat projections. |
398 | // It is also ok to ignore defs coming from singledefs. After an implicit overwrite by one of |
399 | // those our register is guaranteed to be used by another lrg and we won't attempt to merge it. |
400 | uint lrg = _lrg_map.live_range_id(n); |
401 | if (lrg > 0 && lrgs(lrg).is_multidef()) { |
402 | OptoReg::Name reg = lrgs(lrg).reg(); |
403 | reg2defuse.at(reg).clear(); |
404 | } |
405 | } |
406 | // Clear reg->def->use tracking for the next block |
407 | for (int j = 0; j < reg2defuse.length(); j++) { |
408 | reg2defuse.at(j).clear(); |
409 | } |
410 | } |
411 | } |
412 | |
413 | int PhaseChaitin::possibly_merge_multidef(Node *n, uint k, Block *block, RegToDefUseMap& reg2defuse) { |
414 | int blk_adjust = 0; |
415 | |
416 | uint lrg = _lrg_map.live_range_id(n->in(k)); |
417 | if (lrg > 0 && lrgs(lrg).is_multidef()) { |
418 | OptoReg::Name reg = lrgs(lrg).reg(); |
419 | |
420 | Node* def = reg2defuse.at(reg).def(); |
421 | if (def != NULL && lrg == _lrg_map.live_range_id(def) && def != n->in(k)) { |
422 | // Same lrg but different node, we have to merge. |
423 | MachMergeNode* merge; |
424 | if (def->is_MachMerge()) { // is it already a merge? |
425 | merge = def->as_MachMerge(); |
426 | } else { |
427 | merge = new MachMergeNode(def); |
428 | |
429 | // Insert the merge node into the block before the first use. |
430 | uint use_index = block->find_node(reg2defuse.at(reg).first_use()); |
431 | block->insert_node(merge, use_index++); |
432 | _cfg.map_node_to_block(merge, block); |
433 | |
434 | // Let the allocator know about the new node, use the same lrg |
435 | _lrg_map.extend(merge->_idx, lrg); |
436 | blk_adjust++; |
437 | |
438 | // Fixup all the uses (there is at least one) that happened between the first |
439 | // use and before the current one. |
440 | for (; use_index < block->number_of_nodes(); use_index++) { |
441 | Node* use = block->get_node(use_index); |
442 | if (use == n) { |
443 | break; |
444 | } |
445 | use->replace_edge(def, merge); |
446 | } |
447 | } |
448 | if (merge->find_edge(n->in(k)) == -1) { |
449 | merge->add_req(n->in(k)); |
450 | } |
451 | n->set_req(k, merge); |
452 | } |
453 | |
454 | // update the uses |
455 | reg2defuse.at(reg).update(n->in(k), n); |
456 | } |
457 | |
458 | return blk_adjust; |
459 | } |
460 | |
461 | |
462 | //------------------------------post_allocate_copy_removal--------------------- |
463 | // Post-Allocation peephole copy removal. We do this in 1 pass over the |
464 | // basic blocks. We maintain a mapping of registers to Nodes (an array of |
465 | // Nodes indexed by machine register or stack slot number). NULL means that a |
466 | // register is not mapped to any Node. We can (want to have!) have several |
467 | // registers map to the same Node. We walk forward over the instructions |
468 | // updating the mapping as we go. At merge points we force a NULL if we have |
469 | // to merge 2 different Nodes into the same register. Phi functions will give |
470 | // us a new Node if there is a proper value merging. Since the blocks are |
471 | // arranged in some RPO, we will visit all parent blocks before visiting any |
472 | // successor blocks (except at loops). |
473 | // |
474 | // If we find a Copy we look to see if the Copy's source register is a stack |
475 | // slot and that value has already been loaded into some machine register; if |
476 | // so we use machine register directly. This turns a Load into a reg-reg |
477 | // Move. We also look for reloads of identical constants. |
478 | // |
479 | // When we see a use from a reg-reg Copy, we will attempt to use the copy's |
480 | // source directly and make the copy go dead. |
481 | void PhaseChaitin::post_allocate_copy_removal() { |
482 | Compile::TracePhase tp("postAllocCopyRemoval" , &timers[_t_postAllocCopyRemoval]); |
483 | ResourceMark rm; |
484 | |
485 | // Need a mapping from basic block Node_Lists. We need a Node_List to |
486 | // map from register number to value-producing Node. |
487 | Node_List **blk2value = NEW_RESOURCE_ARRAY( Node_List *, _cfg.number_of_blocks() + 1); |
488 | memset(blk2value, 0, sizeof(Node_List*) * (_cfg.number_of_blocks() + 1)); |
489 | // Need a mapping from basic block Node_Lists. We need a Node_List to |
490 | // map from register number to register-defining Node. |
491 | Node_List **blk2regnd = NEW_RESOURCE_ARRAY( Node_List *, _cfg.number_of_blocks() + 1); |
492 | memset(blk2regnd, 0, sizeof(Node_List*) * (_cfg.number_of_blocks() + 1)); |
493 | |
494 | // We keep unused Node_Lists on a free_list to avoid wasting |
495 | // memory. |
496 | GrowableArray<Node_List*> free_list = GrowableArray<Node_List*>(16); |
497 | |
498 | // For all blocks |
499 | for (uint i = 0; i < _cfg.number_of_blocks(); i++) { |
500 | uint j; |
501 | Block* block = _cfg.get_block(i); |
502 | |
503 | // Count of Phis in block |
504 | uint phi_dex; |
505 | for (phi_dex = 1; phi_dex < block->number_of_nodes(); phi_dex++) { |
506 | Node* phi = block->get_node(phi_dex); |
507 | if (!phi->is_Phi()) { |
508 | break; |
509 | } |
510 | } |
511 | |
512 | // If any predecessor has not been visited, we do not know the state |
513 | // of registers at the start. Check for this, while updating copies |
514 | // along Phi input edges |
515 | bool missing_some_inputs = false; |
516 | Block *freed = NULL; |
517 | for (j = 1; j < block->num_preds(); j++) { |
518 | Block* pb = _cfg.get_block_for_node(block->pred(j)); |
519 | // Remove copies along phi edges |
520 | for (uint k = 1; k < phi_dex; k++) { |
521 | elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false); |
522 | } |
523 | if (blk2value[pb->_pre_order]) { // Have a mapping on this edge? |
524 | // See if this predecessor's mappings have been used by everybody |
525 | // who wants them. If so, free 'em. |
526 | uint k; |
527 | for (k = 0; k < pb->_num_succs; k++) { |
528 | Block* pbsucc = pb->_succs[k]; |
529 | if (!blk2value[pbsucc->_pre_order] && pbsucc != block) { |
530 | break; // Found a future user |
531 | } |
532 | } |
533 | if (k >= pb->_num_succs) { // No more uses, free! |
534 | freed = pb; // Record last block freed |
535 | free_list.push(blk2value[pb->_pre_order]); |
536 | free_list.push(blk2regnd[pb->_pre_order]); |
537 | } |
538 | } else { // This block has unvisited (loopback) inputs |
539 | missing_some_inputs = true; |
540 | } |
541 | } |
542 | |
543 | |
544 | // Extract Node_List mappings. If 'freed' is non-zero, we just popped |
545 | // 'freed's blocks off the list |
546 | Node_List ®nd = *(free_list.is_empty() ? new Node_List() : free_list.pop()); |
547 | Node_List &value = *(free_list.is_empty() ? new Node_List() : free_list.pop()); |
548 | assert( !freed || blk2value[freed->_pre_order] == &value, "" ); |
549 | value.map(_max_reg,NULL); |
550 | regnd.map(_max_reg,NULL); |
551 | // Set mappings as OUR mappings |
552 | blk2value[block->_pre_order] = &value; |
553 | blk2regnd[block->_pre_order] = ®nd; |
554 | |
555 | // Initialize value & regnd for this block |
556 | if (missing_some_inputs) { |
557 | // Some predecessor has not yet been visited; zap map to empty |
558 | for (uint k = 0; k < (uint)_max_reg; k++) { |
559 | value.map(k,NULL); |
560 | regnd.map(k,NULL); |
561 | } |
562 | } else { |
563 | if( !freed ) { // Didn't get a freebie prior block |
564 | // Must clone some data |
565 | freed = _cfg.get_block_for_node(block->pred(1)); |
566 | Node_List &f_value = *blk2value[freed->_pre_order]; |
567 | Node_List &f_regnd = *blk2regnd[freed->_pre_order]; |
568 | for( uint k = 0; k < (uint)_max_reg; k++ ) { |
569 | value.map(k,f_value[k]); |
570 | regnd.map(k,f_regnd[k]); |
571 | } |
572 | } |
573 | // Merge all inputs together, setting to NULL any conflicts. |
574 | for (j = 1; j < block->num_preds(); j++) { |
575 | Block* pb = _cfg.get_block_for_node(block->pred(j)); |
576 | if (pb == freed) { |
577 | continue; // Did self already via freelist |
578 | } |
579 | Node_List &p_regnd = *blk2regnd[pb->_pre_order]; |
580 | for( uint k = 0; k < (uint)_max_reg; k++ ) { |
581 | if( regnd[k] != p_regnd[k] ) { // Conflict on reaching defs? |
582 | value.map(k,NULL); // Then no value handy |
583 | regnd.map(k,NULL); |
584 | } |
585 | } |
586 | } |
587 | } |
588 | |
589 | // For all Phi's |
590 | for (j = 1; j < phi_dex; j++) { |
591 | uint k; |
592 | Node *phi = block->get_node(j); |
593 | uint pidx = _lrg_map.live_range_id(phi); |
594 | OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg(); |
595 | |
596 | // Remove copies remaining on edges. Check for junk phi. |
597 | Node *u = NULL; |
598 | for (k = 1; k < phi->req(); k++) { |
599 | Node *x = phi->in(k); |
600 | if( phi != x && u != x ) // Found a different input |
601 | u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input |
602 | } |
603 | if (u != NodeSentinel) { // Junk Phi. Remove |
604 | phi->replace_by(u); |
605 | j -= yank_if_dead(phi, block, &value, ®nd); |
606 | phi_dex--; |
607 | continue; |
608 | } |
609 | // Note that if value[pidx] exists, then we merged no new values here |
610 | // and the phi is useless. This can happen even with the above phi |
611 | // removal for complex flows. I cannot keep the better known value here |
612 | // because locally the phi appears to define a new merged value. If I |
613 | // keep the better value then a copy of the phi, being unable to use the |
614 | // global flow analysis, can't "peek through" the phi to the original |
615 | // reaching value and so will act like it's defining a new value. This |
616 | // can lead to situations where some uses are from the old and some from |
617 | // the new values. Not illegal by itself but throws the over-strong |
618 | // assert in scheduling. |
619 | if( pidx ) { |
620 | value.map(preg,phi); |
621 | regnd.map(preg,phi); |
622 | int n_regs = RegMask::num_registers(phi->ideal_reg()); |
623 | for (int l = 1; l < n_regs; l++) { |
624 | OptoReg::Name preg_lo = OptoReg::add(preg,-l); |
625 | value.map(preg_lo,phi); |
626 | regnd.map(preg_lo,phi); |
627 | } |
628 | } |
629 | } |
630 | |
631 | // For all remaining instructions |
632 | for (j = phi_dex; j < block->number_of_nodes(); j++) { |
633 | Node* n = block->get_node(j); |
634 | |
635 | if(n->outcnt() == 0 && // Dead? |
636 | n != C->top() && // (ignore TOP, it has no du info) |
637 | !n->is_Proj() ) { // fat-proj kills |
638 | j -= yank_if_dead(n, block, &value, ®nd); |
639 | continue; |
640 | } |
641 | |
642 | // Improve reaching-def info. Occasionally post-alloc's liveness gives |
643 | // up (at loop backedges, because we aren't doing a full flow pass). |
644 | // The presence of a live use essentially asserts that the use's def is |
645 | // alive and well at the use (or else the allocator fubar'd). Take |
646 | // advantage of this info to set a reaching def for the use-reg. |
647 | uint k; |
648 | for (k = 1; k < n->req(); k++) { |
649 | Node *def = n->in(k); // n->in(k) is a USE; def is the DEF for this USE |
650 | guarantee(def != NULL, "no disconnected nodes at this point" ); |
651 | uint useidx = _lrg_map.live_range_id(def); // useidx is the live range index for this USE |
652 | |
653 | if( useidx ) { |
654 | OptoReg::Name ureg = lrgs(useidx).reg(); |
655 | if( !value[ureg] ) { |
656 | int idx; // Skip occasional useless copy |
657 | while( (idx=def->is_Copy()) != 0 && |
658 | def->in(idx) != NULL && // NULL should not happen |
659 | ureg == lrgs(_lrg_map.live_range_id(def->in(idx))).reg()) |
660 | def = def->in(idx); |
661 | Node *valdef = skip_copies(def); // tighten up val through non-useless copies |
662 | value.map(ureg,valdef); // record improved reaching-def info |
663 | regnd.map(ureg, def); |
664 | // Record other half of doubles |
665 | uint def_ideal_reg = def->ideal_reg(); |
666 | int n_regs = RegMask::num_registers(def_ideal_reg); |
667 | for (int l = 1; l < n_regs; l++) { |
668 | OptoReg::Name ureg_lo = OptoReg::add(ureg,-l); |
669 | if (!value[ureg_lo] && |
670 | (!RegMask::can_represent(ureg_lo) || |
671 | lrgs(useidx).mask().Member(ureg_lo))) { // Nearly always adjacent |
672 | value.map(ureg_lo,valdef); // record improved reaching-def info |
673 | regnd.map(ureg_lo, def); |
674 | } |
675 | } |
676 | } |
677 | } |
678 | } |
679 | |
680 | const uint two_adr = n->is_Mach() ? n->as_Mach()->two_adr() : 0; |
681 | |
682 | // Remove copies along input edges |
683 | for (k = 1; k < n->req(); k++) { |
684 | j -= elide_copy(n, k, block, value, regnd, two_adr != k); |
685 | } |
686 | |
687 | // Unallocated Nodes define no registers |
688 | uint lidx = _lrg_map.live_range_id(n); |
689 | if (!lidx) { |
690 | continue; |
691 | } |
692 | |
693 | // Update the register defined by this instruction |
694 | OptoReg::Name nreg = lrgs(lidx).reg(); |
695 | // Skip through all copies to the _value_ being defined. |
696 | // Do not change from int to pointer |
697 | Node *val = skip_copies(n); |
698 | |
699 | // Clear out a dead definition before starting so that the |
700 | // elimination code doesn't have to guard against it. The |
701 | // definition could in fact be a kill projection with a count of |
702 | // 0 which is safe but since those are uninteresting for copy |
703 | // elimination just delete them as well. |
704 | if (regnd[nreg] != NULL && regnd[nreg]->outcnt() == 0) { |
705 | regnd.map(nreg, NULL); |
706 | value.map(nreg, NULL); |
707 | } |
708 | |
709 | uint n_ideal_reg = n->ideal_reg(); |
710 | int n_regs = RegMask::num_registers(n_ideal_reg); |
711 | if (n_regs == 1) { |
712 | // If Node 'n' does not change the value mapped by the register, |
713 | // then 'n' is a useless copy. Do not update the register->node |
714 | // mapping so 'n' will go dead. |
715 | if( value[nreg] != val ) { |
716 | if (eliminate_copy_of_constant(val, n, block, value, regnd, nreg, OptoReg::Bad)) { |
717 | j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); |
718 | } else { |
719 | // Update the mapping: record new Node defined by the register |
720 | regnd.map(nreg,n); |
721 | // Update mapping for defined *value*, which is the defined |
722 | // Node after skipping all copies. |
723 | value.map(nreg,val); |
724 | } |
725 | } else if( !may_be_copy_of_callee(n) ) { |
726 | assert(n->is_Copy(), "" ); |
727 | j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); |
728 | } |
729 | } else if (RegMask::is_vector(n_ideal_reg)) { |
730 | // If Node 'n' does not change the value mapped by the register, |
731 | // then 'n' is a useless copy. Do not update the register->node |
732 | // mapping so 'n' will go dead. |
733 | if (!register_contains_value(val, nreg, n_regs, value)) { |
734 | // Update the mapping: record new Node defined by the register |
735 | regnd.map(nreg,n); |
736 | // Update mapping for defined *value*, which is the defined |
737 | // Node after skipping all copies. |
738 | value.map(nreg,val); |
739 | for (int l = 1; l < n_regs; l++) { |
740 | OptoReg::Name nreg_lo = OptoReg::add(nreg,-l); |
741 | regnd.map(nreg_lo, n ); |
742 | value.map(nreg_lo,val); |
743 | } |
744 | } else if (n->is_Copy()) { |
745 | // Note: vector can't be constant and can't be copy of calee. |
746 | j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); |
747 | } |
748 | } else { |
749 | // If the value occupies a register pair, record same info |
750 | // in both registers. |
751 | OptoReg::Name nreg_lo = OptoReg::add(nreg,-1); |
752 | if( RegMask::can_represent(nreg_lo) && // Either a spill slot, or |
753 | !lrgs(lidx).mask().Member(nreg_lo) ) { // Nearly always adjacent |
754 | // Sparc occasionally has non-adjacent pairs. |
755 | // Find the actual other value |
756 | RegMask tmp = lrgs(lidx).mask(); |
757 | tmp.Remove(nreg); |
758 | nreg_lo = tmp.find_first_elem(); |
759 | } |
760 | if (value[nreg] != val || value[nreg_lo] != val) { |
761 | if (eliminate_copy_of_constant(val, n, block, value, regnd, nreg, nreg_lo)) { |
762 | j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); |
763 | } else { |
764 | regnd.map(nreg , n ); |
765 | regnd.map(nreg_lo, n ); |
766 | value.map(nreg ,val); |
767 | value.map(nreg_lo,val); |
768 | } |
769 | } else if (!may_be_copy_of_callee(n)) { |
770 | assert(n->is_Copy(), "" ); |
771 | j -= replace_and_yank_if_dead(n, nreg, block, value, regnd); |
772 | } |
773 | } |
774 | |
775 | // Fat projections kill many registers |
776 | if( n_ideal_reg == MachProjNode::fat_proj ) { |
777 | RegMask rm = n->out_RegMask(); |
778 | // wow, what an expensive iterator... |
779 | nreg = rm.find_first_elem(); |
780 | while( OptoReg::is_valid(nreg)) { |
781 | rm.Remove(nreg); |
782 | value.map(nreg,n); |
783 | regnd.map(nreg,n); |
784 | nreg = rm.find_first_elem(); |
785 | } |
786 | } |
787 | |
788 | } // End of for all instructions in the block |
789 | |
790 | } // End for all blocks |
791 | } |
792 | |