Fri, 18 Oct 2013 10:50:17 +0200
8022783: Nashorn test fails with: assert(!def_outside->member(r))
Summary: Enables private copies of inputs for recent spill copies as well
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "libadt/vectset.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/addnode.hpp"
29 #include "opto/c2compiler.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/cfgnode.hpp"
32 #include "opto/chaitin.hpp"
33 #include "opto/loopnode.hpp"
34 #include "opto/machnode.hpp"
36 //------------------------------Split--------------------------------------
37 // Walk the graph in RPO and for each lrg which spills, propagate reaching
38 // definitions. During propagation, split the live range around regions of
39 // High Register Pressure (HRP). If a Def is in a region of Low Register
40 // Pressure (LRP), it will not get spilled until we encounter a region of
41 // HRP between it and one of its uses. We will spill at the transition
42 // point between LRP and HRP. Uses in the HRP region will use the spilled
43 // Def. The first Use outside the HRP region will generate a SpillCopy to
44 // hoist the live range back up into a register, and all subsequent uses
45 // will use that new Def until another HRP region is encountered. Defs in
46 // HRP regions will get trailing SpillCopies to push the LRG down into the
47 // stack immediately.
48 //
49 // As a side effect, unlink from (hence make dead) coalesced copies.
50 //
52 static const char out_of_nodes[] = "out of nodes during split";
54 //------------------------------get_spillcopy_wide-----------------------------
55 // Get a SpillCopy node with wide-enough masks. Use the 'wide-mask', the
56 // wide ideal-register spill-mask if possible. If the 'wide-mask' does
57 // not cover the input (or output), use the input (or output) mask instead.
58 Node *PhaseChaitin::get_spillcopy_wide( Node *def, Node *use, uint uidx ) {
59 // If ideal reg doesn't exist we've got a bad schedule happening
60 // that is forcing us to spill something that isn't spillable.
61 // Bail rather than abort
62 int ireg = def->ideal_reg();
63 if( ireg == 0 || ireg == Op_RegFlags ) {
64 assert(false, "attempted to spill a non-spillable item");
65 C->record_method_not_compilable("attempted to spill a non-spillable item");
66 return NULL;
67 }
68 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
69 return NULL;
70 }
71 const RegMask *i_mask = &def->out_RegMask();
72 const RegMask *w_mask = C->matcher()->idealreg2spillmask[ireg];
73 const RegMask *o_mask = use ? &use->in_RegMask(uidx) : w_mask;
74 const RegMask *w_i_mask = w_mask->overlap( *i_mask ) ? w_mask : i_mask;
75 const RegMask *w_o_mask;
77 int num_regs = RegMask::num_registers(ireg);
78 bool is_vect = RegMask::is_vector(ireg);
79 if( w_mask->overlap( *o_mask ) && // Overlap AND
80 ((num_regs == 1) // Single use or aligned
81 || is_vect // or vector
82 || !is_vect && o_mask->is_aligned_pairs()) ) {
83 assert(!is_vect || o_mask->is_aligned_sets(num_regs), "vectors are aligned");
84 // Don't come here for mis-aligned doubles
85 w_o_mask = w_mask;
86 } else { // wide ideal mask does not overlap with o_mask
87 // Mis-aligned doubles come here and XMM->FPR moves on x86.
88 w_o_mask = o_mask; // Must target desired registers
89 // Does the ideal-reg-mask overlap with o_mask? I.e., can I use
90 // a reg-reg move or do I need a trip across register classes
91 // (and thus through memory)?
92 if( !C->matcher()->idealreg2regmask[ireg]->overlap( *o_mask) && o_mask->is_UP() )
93 // Here we assume a trip through memory is required.
94 w_i_mask = &C->FIRST_STACK_mask();
95 }
96 return new (C) MachSpillCopyNode( def, *w_i_mask, *w_o_mask );
97 }
99 //------------------------------insert_proj------------------------------------
100 // Insert the spill at chosen location. Skip over any intervening Proj's or
101 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block
102 // instead. Update high-pressure indices. Create a new live range.
103 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
104 // Skip intervening ProjNodes. Do not insert between a ProjNode and
105 // its definer.
106 while( i < b->number_of_nodes() &&
107 (b->get_node(i)->is_Proj() ||
108 b->get_node(i)->is_Phi() ) )
109 i++;
111 // Do not insert between a call and his Catch
112 if( b->get_node(i)->is_Catch() ) {
113 // Put the instruction at the top of the fall-thru block.
114 // Find the fall-thru projection
115 while( 1 ) {
116 const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
117 if( cp->_con == CatchProjNode::fall_through_index )
118 break;
119 }
120 int sidx = i - b->end_idx()-1;
121 b = b->_succs[sidx]; // Switch to successor block
122 i = 1; // Right at start of block
123 }
125 b->insert_node(spill, i); // Insert node in block
126 _cfg.map_node_to_block(spill, b); // Update node->block mapping to reflect
127 // Adjust the point where we go hi-pressure
128 if( i <= b->_ihrp_index ) b->_ihrp_index++;
129 if( i <= b->_fhrp_index ) b->_fhrp_index++;
131 // Assign a new Live Range Number to the SpillCopy and grow
132 // the node->live range mapping.
133 new_lrg(spill,maxlrg);
134 }
136 //------------------------------split_DEF--------------------------------------
137 // There are four categories of Split; UP/DOWN x DEF/USE
138 // Only three of these really occur as DOWN/USE will always color
139 // Any Split with a DEF cannot CISC-Spill now. Thus we need
140 // two helper routines, one for Split DEFS (insert after instruction),
141 // one for Split USES (insert before instruction). DEF insertion
142 // happens inside Split, where the Leaveblock array is updated.
143 uint PhaseChaitin::split_DEF( Node *def, Block *b, int loc, uint maxlrg, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx ) {
144 #ifdef ASSERT
145 // Increment the counter for this lrg
146 splits.at_put(slidx, splits.at(slidx)+1);
147 #endif
148 // If we are spilling the memory op for an implicit null check, at the
149 // null check location (ie - null check is in HRP block) we need to do
150 // the null-check first, then spill-down in the following block.
151 // (The implicit_null_check function ensures the use is also dominated
152 // by the branch-not-taken block.)
153 Node *be = b->end();
154 if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
155 // Spill goes in the branch-not-taken block
156 b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
157 loc = 0; // Just past the Region
158 }
159 assert( loc >= 0, "must insert past block head" );
161 // Get a def-side SpillCopy
162 Node *spill = get_spillcopy_wide(def,NULL,0);
163 // Did we fail to split?, then bail
164 if (!spill) {
165 return 0;
166 }
168 // Insert the spill at chosen location
169 insert_proj( b, loc+1, spill, maxlrg++);
171 // Insert new node into Reaches array
172 Reachblock[slidx] = spill;
173 // Update debug list of reaching down definitions by adding this one
174 debug_defs[slidx] = spill;
176 // return updated count of live ranges
177 return maxlrg;
178 }
180 //------------------------------split_USE--------------------------------------
181 // Splits at uses can involve redeffing the LRG, so no CISC Spilling there.
182 // Debug uses want to know if def is already stack enabled.
183 uint PhaseChaitin::split_USE( Node *def, Block *b, Node *use, uint useidx, uint maxlrg, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx ) {
184 #ifdef ASSERT
185 // Increment the counter for this lrg
186 splits.at_put(slidx, splits.at(slidx)+1);
187 #endif
189 // Some setup stuff for handling debug node uses
190 JVMState* jvms = use->jvms();
191 uint debug_start = jvms ? jvms->debug_start() : 999999;
192 uint debug_end = jvms ? jvms->debug_end() : 999999;
194 //-------------------------------------------
195 // Check for use of debug info
196 if (useidx >= debug_start && useidx < debug_end) {
197 // Actually it's perfectly legal for constant debug info to appear
198 // just unlikely. In this case the optimizer left a ConI of a 4
199 // as both inputs to a Phi with only a debug use. It's a single-def
200 // live range of a rematerializable value. The live range spills,
201 // rematerializes and now the ConI directly feeds into the debug info.
202 // assert(!def->is_Con(), "constant debug info already constructed directly");
204 // Special split handling for Debug Info
205 // If DEF is DOWN, just hook the edge and return
206 // If DEF is UP, Split it DOWN for this USE.
207 if( def->is_Mach() ) {
208 if( def_down ) {
209 // DEF is DOWN, so connect USE directly to the DEF
210 use->set_req(useidx, def);
211 } else {
212 // Block and index where the use occurs.
213 Block *b = _cfg.get_block_for_node(use);
214 // Put the clone just prior to use
215 int bindex = b->find_node(use);
216 // DEF is UP, so must copy it DOWN and hook in USE
217 // Insert SpillCopy before the USE, which uses DEF as its input,
218 // and defs a new live range, which is used by this node.
219 Node *spill = get_spillcopy_wide(def,use,useidx);
220 // did we fail to split?
221 if (!spill) {
222 // Bail
223 return 0;
224 }
225 // insert into basic block
226 insert_proj( b, bindex, spill, maxlrg++ );
227 // Use the new split
228 use->set_req(useidx,spill);
229 }
230 // No further split handling needed for this use
231 return maxlrg;
232 } // End special splitting for debug info live range
233 } // If debug info
235 // CISC-SPILLING
236 // Finally, check to see if USE is CISC-Spillable, and if so,
237 // gather_lrg_masks will add the flags bit to its mask, and
238 // no use side copy is needed. This frees up the live range
239 // register choices without causing copy coalescing, etc.
240 if( UseCISCSpill && cisc_sp ) {
241 int inp = use->cisc_operand();
242 if( inp != AdlcVMDeps::Not_cisc_spillable )
243 // Convert operand number to edge index number
244 inp = use->as_Mach()->operand_index(inp);
245 if( inp == (int)useidx ) {
246 use->set_req(useidx, def);
247 #ifndef PRODUCT
248 if( TraceCISCSpill ) {
249 tty->print(" set_split: ");
250 use->dump();
251 }
252 #endif
253 return maxlrg;
254 }
255 }
257 //-------------------------------------------
258 // Insert a Copy before the use
260 // Block and index where the use occurs.
261 int bindex;
262 // Phi input spill-copys belong at the end of the prior block
263 if( use->is_Phi() ) {
264 b = _cfg.get_block_for_node(b->pred(useidx));
265 bindex = b->end_idx();
266 } else {
267 // Put the clone just prior to use
268 bindex = b->find_node(use);
269 }
271 Node *spill = get_spillcopy_wide( def, use, useidx );
272 if( !spill ) return 0; // Bailed out
273 // Insert SpillCopy before the USE, which uses the reaching DEF as
274 // its input, and defs a new live range, which is used by this node.
275 insert_proj( b, bindex, spill, maxlrg++ );
276 // Use the spill/clone
277 use->set_req(useidx,spill);
279 // return updated live range count
280 return maxlrg;
281 }
283 //------------------------------clone_node----------------------------
284 // Clone node with anti dependence check.
285 Node* clone_node(Node* def, Block *b, Compile* C) {
286 if (def->needs_anti_dependence_check()) {
287 #ifdef ASSERT
288 if (Verbose) {
289 tty->print_cr("RA attempts to clone node with anti_dependence:");
290 def->dump(-1); tty->cr();
291 tty->print_cr("into block:");
292 b->dump();
293 }
294 #endif
295 if (C->subsume_loads() == true && !C->failing()) {
296 // Retry with subsume_loads == false
297 // If this is the first failure, the sentinel string will "stick"
298 // to the Compile object, and the C2Compiler will see it and retry.
299 C->record_failure(C2Compiler::retry_no_subsuming_loads());
300 } else {
301 // Bailout without retry
302 C->record_method_not_compilable("RA Split failed: attempt to clone node with anti_dependence");
303 }
304 return 0;
305 }
306 return def->clone();
307 }
309 //------------------------------split_Rematerialize----------------------------
310 // Clone a local copy of the def.
311 Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru ) {
312 // The input live ranges will be stretched to the site of the new
313 // instruction. They might be stretched past a def and will thus
314 // have the old and new values of the same live range alive at the
315 // same time - a definite no-no. Split out private copies of
316 // the inputs.
317 if( def->req() > 1 ) {
318 for( uint i = 1; i < def->req(); i++ ) {
319 Node *in = def->in(i);
320 uint lidx = _lrg_map.live_range_id(in);
321 // We do not need this for live ranges that are only defined once.
322 // However, this is not true for spill copies that are added in this
323 // Split() pass, since they might get coalesced later on in this pass.
324 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_singledef()) {
325 continue;
326 }
328 Block *b_def = _cfg.get_block_for_node(def);
329 int idx_def = b_def->find_node(def);
330 Node *in_spill = get_spillcopy_wide( in, def, i );
331 if( !in_spill ) return 0; // Bailed out
332 insert_proj(b_def,idx_def,in_spill,maxlrg++);
333 if( b_def == b )
334 insidx++;
335 def->set_req(i,in_spill);
336 }
337 }
339 Node *spill = clone_node(def, b, C);
340 if (spill == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
341 // Check when generating nodes
342 return 0;
343 }
345 // See if any inputs are currently being spilled, and take the
346 // latest copy of spilled inputs.
347 if( spill->req() > 1 ) {
348 for( uint i = 1; i < spill->req(); i++ ) {
349 Node *in = spill->in(i);
350 uint lidx = _lrg_map.find_id(in);
352 // Walk backwards thru spill copy node intermediates
353 if (walkThru) {
354 while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) {
355 in = in->in(1);
356 lidx = _lrg_map.find_id(in);
357 }
359 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) {
360 // walkThru found a multidef LRG, which is unsafe to use, so
361 // just keep the original def used in the clone.
362 in = spill->in(i);
363 lidx = _lrg_map.find_id(in);
364 }
365 }
367 if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) {
368 assert(Reachblock != NULL, "Reachblock must be non-NULL");
369 Node *rdef = Reachblock[lrg2reach[lidx]];
370 if (rdef) {
371 spill->set_req(i, rdef);
372 }
373 }
374 }
375 }
378 assert( spill->out_RegMask().is_UP(), "rematerialize to a reg" );
379 // Rematerialized op is def->spilled+1
380 set_was_spilled(spill);
381 if( _spilled_once.test(def->_idx) )
382 set_was_spilled(spill);
384 insert_proj( b, insidx, spill, maxlrg++ );
385 #ifdef ASSERT
386 // Increment the counter for this lrg
387 splits.at_put(slidx, splits.at(slidx)+1);
388 #endif
389 // See if the cloned def kills any flags, and copy those kills as well
390 uint i = insidx+1;
391 int found_projs = clone_projs( b, i, def, spill, maxlrg);
392 if (found_projs > 0) {
393 // Adjust the point where we go hi-pressure
394 if (i <= b->_ihrp_index) {
395 b->_ihrp_index += found_projs;
396 }
397 if (i <= b->_fhrp_index) {
398 b->_fhrp_index += found_projs;
399 }
400 }
402 return spill;
403 }
405 //------------------------------is_high_pressure-------------------------------
406 // Function to compute whether or not this live range is "high pressure"
407 // in this block - whether it spills eagerly or not.
408 bool PhaseChaitin::is_high_pressure( Block *b, LRG *lrg, uint insidx ) {
409 if( lrg->_was_spilled1 ) return true;
410 // Forced spilling due to conflict? Then split only at binding uses
411 // or defs, not for supposed capacity problems.
412 // CNC - Turned off 7/8/99, causes too much spilling
413 // if( lrg->_is_bound ) return false;
415 // Use float pressure numbers for vectors.
416 bool is_float_or_vector = lrg->_is_float || lrg->_is_vector;
417 // Not yet reached the high-pressure cutoff point, so low pressure
418 uint hrp_idx = is_float_or_vector ? b->_fhrp_index : b->_ihrp_index;
419 if( insidx < hrp_idx ) return false;
420 // Register pressure for the block as a whole depends on reg class
421 int block_pres = is_float_or_vector ? b->_freg_pressure : b->_reg_pressure;
422 // Bound live ranges will split at the binding points first;
423 // Intermediate splits should assume the live range's register set
424 // got "freed up" and that num_regs will become INT_PRESSURE.
425 int bound_pres = is_float_or_vector ? FLOATPRESSURE : INTPRESSURE;
426 // Effective register pressure limit.
427 int lrg_pres = (lrg->get_invalid_mask_size() > lrg->num_regs())
428 ? (lrg->get_invalid_mask_size() >> (lrg->num_regs()-1)) : bound_pres;
429 // High pressure if block pressure requires more register freedom
430 // than live range has.
431 return block_pres >= lrg_pres;
432 }
435 //------------------------------prompt_use---------------------------------
436 // True if lidx is used before any real register is def'd in the block
437 bool PhaseChaitin::prompt_use( Block *b, uint lidx ) {
438 if (lrgs(lidx)._was_spilled2) {
439 return false;
440 }
442 // Scan block for 1st use.
443 for( uint i = 1; i <= b->end_idx(); i++ ) {
444 Node *n = b->get_node(i);
445 // Ignore PHI use, these can be up or down
446 if (n->is_Phi()) {
447 continue;
448 }
449 for (uint j = 1; j < n->req(); j++) {
450 if (_lrg_map.find_id(n->in(j)) == lidx) {
451 return true; // Found 1st use!
452 }
453 }
454 if (n->out_RegMask().is_NotEmpty()) {
455 return false;
456 }
457 }
458 return false;
459 }
461 //------------------------------Split--------------------------------------
462 //----------Split Routine----------
463 // ***** NEW SPLITTING HEURISTIC *****
464 // DEFS: If the DEF is in a High Register Pressure(HRP) Block, split there.
465 // Else, no split unless there is a HRP block between a DEF and
466 // one of its uses, and then split at the HRP block.
467 //
468 // USES: If USE is in HRP, split at use to leave main LRG on stack.
469 // Else, hoist LRG back up to register only (ie - split is also DEF)
470 // We will compute a new maxlrg as we go
471 uint PhaseChaitin::Split(uint maxlrg, ResourceArea* split_arena) {
472 NOT_PRODUCT( Compile::TracePhase t3("regAllocSplit", &_t_regAllocSplit, TimeCompiler); )
474 // Free thread local resources used by this method on exit.
475 ResourceMark rm(split_arena);
477 uint bidx, pidx, slidx, insidx, inpidx, twoidx;
478 uint non_phi = 1, spill_cnt = 0;
479 Node **Reachblock;
480 Node *n1, *n2, *n3;
481 Node_List *defs,*phis;
482 bool *UPblock;
483 bool u1, u2, u3;
484 Block *b, *pred;
485 PhiNode *phi;
486 GrowableArray<uint> lidxs(split_arena, maxlrg, 0, 0);
488 // Array of counters to count splits per live range
489 GrowableArray<uint> splits(split_arena, maxlrg, 0, 0);
491 #define NEW_SPLIT_ARRAY(type, size)\
492 (type*) split_arena->allocate_bytes((size) * sizeof(type))
494 //----------Setup Code----------
495 // Create a convenient mapping from lrg numbers to reaches/leaves indices
496 uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg);
497 // Keep track of DEFS & Phis for later passes
498 defs = new Node_List();
499 phis = new Node_List();
500 // Gather info on which LRG's are spilling, and build maps
501 for (bidx = 1; bidx < maxlrg; bidx++) {
502 if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) {
503 assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color");
504 lrg2reach[bidx] = spill_cnt;
505 spill_cnt++;
506 lidxs.append(bidx);
507 #ifdef ASSERT
508 // Initialize the split counts to zero
509 splits.append(0);
510 #endif
511 #ifndef PRODUCT
512 if( PrintOpto && WizardMode && lrgs(bidx)._was_spilled1 )
513 tty->print_cr("Warning, 2nd spill of L%d",bidx);
514 #endif
515 }
516 }
518 // Create side arrays for propagating reaching defs info.
519 // Each block needs a node pointer for each spilling live range for the
520 // Def which is live into the block. Phi nodes handle multiple input
521 // Defs by querying the output of their predecessor blocks and resolving
522 // them to a single Def at the phi. The pointer is updated for each
523 // Def in the block, and then becomes the output for the block when
524 // processing of the block is complete. We also need to track whether
525 // a Def is UP or DOWN. UP means that it should get a register (ie -
526 // it is always in LRP regions), and DOWN means that it is probably
527 // on the stack (ie - it crosses HRP regions).
528 Node ***Reaches = NEW_SPLIT_ARRAY( Node**, _cfg.number_of_blocks() + 1);
529 bool **UP = NEW_SPLIT_ARRAY( bool*, _cfg.number_of_blocks() + 1);
530 Node **debug_defs = NEW_SPLIT_ARRAY( Node*, spill_cnt );
531 VectorSet **UP_entry= NEW_SPLIT_ARRAY( VectorSet*, spill_cnt );
533 // Initialize Reaches & UP
534 for (bidx = 0; bidx < _cfg.number_of_blocks() + 1; bidx++) {
535 Reaches[bidx] = NEW_SPLIT_ARRAY( Node*, spill_cnt );
536 UP[bidx] = NEW_SPLIT_ARRAY( bool, spill_cnt );
537 Node **Reachblock = Reaches[bidx];
538 bool *UPblock = UP[bidx];
539 for( slidx = 0; slidx < spill_cnt; slidx++ ) {
540 UPblock[slidx] = true; // Assume they start in registers
541 Reachblock[slidx] = NULL; // Assume that no def is present
542 }
543 }
545 #undef NEW_SPLIT_ARRAY
547 // Initialize to array of empty vectorsets
548 for( slidx = 0; slidx < spill_cnt; slidx++ )
549 UP_entry[slidx] = new VectorSet(split_arena);
551 //----------PASS 1----------
552 //----------Propagation & Node Insertion Code----------
553 // Walk the Blocks in RPO for DEF & USE info
554 for( bidx = 0; bidx < _cfg.number_of_blocks(); bidx++ ) {
556 if (C->check_node_count(spill_cnt, out_of_nodes)) {
557 return 0;
558 }
560 b = _cfg.get_block(bidx);
561 // Reaches & UP arrays for this block
562 Reachblock = Reaches[b->_pre_order];
563 UPblock = UP[b->_pre_order];
564 // Reset counter of start of non-Phi nodes in block
565 non_phi = 1;
566 //----------Block Entry Handling----------
567 // Check for need to insert a new phi
568 // Cycle through this block's predecessors, collecting Reaches
569 // info for each spilled LRG. If they are identical, no phi is
570 // needed. If they differ, check for a phi, and insert if missing,
571 // or update edges if present. Set current block's Reaches set to
572 // be either the phi's or the reaching def, as appropriate.
573 // If no Phi is needed, check if the LRG needs to spill on entry
574 // to the block due to HRP.
575 for( slidx = 0; slidx < spill_cnt; slidx++ ) {
576 // Grab the live range number
577 uint lidx = lidxs.at(slidx);
578 // Do not bother splitting or putting in Phis for single-def
579 // rematerialized live ranges. This happens alot to constants
580 // with long live ranges.
581 if( lrgs(lidx).is_singledef() &&
582 lrgs(lidx)._def->rematerialize() ) {
583 // reset the Reaches & UP entries
584 Reachblock[slidx] = lrgs(lidx)._def;
585 UPblock[slidx] = true;
586 // Record following instruction in case 'n' rematerializes and
587 // kills flags
588 Block *pred1 = _cfg.get_block_for_node(b->pred(1));
589 continue;
590 }
592 // Initialize needs_phi and needs_split
593 bool needs_phi = false;
594 bool needs_split = false;
595 bool has_phi = false;
596 // Walk the predecessor blocks to check inputs for that live range
597 // Grab predecessor block header
598 n1 = b->pred(1);
599 // Grab the appropriate reaching def info for inpidx
600 pred = _cfg.get_block_for_node(n1);
601 pidx = pred->_pre_order;
602 Node **Ltmp = Reaches[pidx];
603 bool *Utmp = UP[pidx];
604 n1 = Ltmp[slidx];
605 u1 = Utmp[slidx];
606 // Initialize node for saving type info
607 n3 = n1;
608 u3 = u1;
610 // Compare inputs to see if a Phi is needed
611 for( inpidx = 2; inpidx < b->num_preds(); inpidx++ ) {
612 // Grab predecessor block headers
613 n2 = b->pred(inpidx);
614 // Grab the appropriate reaching def info for inpidx
615 pred = _cfg.get_block_for_node(n2);
616 pidx = pred->_pre_order;
617 Ltmp = Reaches[pidx];
618 Utmp = UP[pidx];
619 n2 = Ltmp[slidx];
620 u2 = Utmp[slidx];
621 // For each LRG, decide if a phi is necessary
622 if( n1 != n2 ) {
623 needs_phi = true;
624 }
625 // See if the phi has mismatched inputs, UP vs. DOWN
626 if( n1 && n2 && (u1 != u2) ) {
627 needs_split = true;
628 }
629 // Move n2/u2 to n1/u1 for next iteration
630 n1 = n2;
631 u1 = u2;
632 // Preserve a non-NULL predecessor for later type referencing
633 if( (n3 == NULL) && (n2 != NULL) ){
634 n3 = n2;
635 u3 = u2;
636 }
637 } // End for all potential Phi inputs
639 // check block for appropriate phinode & update edges
640 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
641 n1 = b->get_node(insidx);
642 // bail if this is not a phi
643 phi = n1->is_Phi() ? n1->as_Phi() : NULL;
644 if( phi == NULL ) {
645 // Keep track of index of first non-PhiNode instruction in block
646 non_phi = insidx;
647 // break out of the for loop as we have handled all phi nodes
648 break;
649 }
650 // must be looking at a phi
651 if (_lrg_map.find_id(n1) == lidxs.at(slidx)) {
652 // found the necessary phi
653 needs_phi = false;
654 has_phi = true;
655 // initialize the Reaches entry for this LRG
656 Reachblock[slidx] = phi;
657 break;
658 } // end if found correct phi
659 } // end for all phi's
661 // If a phi is needed or exist, check for it
662 if( needs_phi || has_phi ) {
663 // add new phinode if one not already found
664 if( needs_phi ) {
665 // create a new phi node and insert it into the block
666 // type is taken from left over pointer to a predecessor
667 assert(n3,"No non-NULL reaching DEF for a Phi");
668 phi = new (C) PhiNode(b->head(), n3->bottom_type());
669 // initialize the Reaches entry for this LRG
670 Reachblock[slidx] = phi;
672 // add node to block & node_to_block mapping
673 insert_proj(b, insidx++, phi, maxlrg++);
674 non_phi++;
675 // Reset new phi's mapping to be the spilling live range
676 _lrg_map.map(phi->_idx, lidx);
677 assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping");
678 } // end if not found correct phi
679 // Here you have either found or created the Phi, so record it
680 assert(phi != NULL,"Must have a Phi Node here");
681 phis->push(phi);
682 // PhiNodes should either force the LRG UP or DOWN depending
683 // on its inputs and the register pressure in the Phi's block.
684 UPblock[slidx] = true; // Assume new DEF is UP
685 // If entering a high-pressure area with no immediate use,
686 // assume Phi is DOWN
687 if( is_high_pressure( b, &lrgs(lidx), b->end_idx()) && !prompt_use(b,lidx) )
688 UPblock[slidx] = false;
689 // If we are not split up/down and all inputs are down, then we
690 // are down
691 if( !needs_split && !u3 )
692 UPblock[slidx] = false;
693 } // end if phi is needed
695 // Do not need a phi, so grab the reaching DEF
696 else {
697 // Grab predecessor block header
698 n1 = b->pred(1);
699 // Grab the appropriate reaching def info for k
700 pred = _cfg.get_block_for_node(n1);
701 pidx = pred->_pre_order;
702 Node **Ltmp = Reaches[pidx];
703 bool *Utmp = UP[pidx];
704 // reset the Reaches & UP entries
705 Reachblock[slidx] = Ltmp[slidx];
706 UPblock[slidx] = Utmp[slidx];
707 } // end else no Phi is needed
708 } // end for all spilling live ranges
709 // DEBUG
710 #ifndef PRODUCT
711 if(trace_spilling()) {
712 tty->print("/`\nBlock %d: ", b->_pre_order);
713 tty->print("Reaching Definitions after Phi handling\n");
714 for( uint x = 0; x < spill_cnt; x++ ) {
715 tty->print("Spill Idx %d: UP %d: Node\n",x,UPblock[x]);
716 if( Reachblock[x] )
717 Reachblock[x]->dump();
718 else
719 tty->print("Undefined\n");
720 }
721 }
722 #endif
724 //----------Non-Phi Node Splitting----------
725 // Since phi-nodes have now been handled, the Reachblock array for this
726 // block is initialized with the correct starting value for the defs which
727 // reach non-phi instructions in this block. Thus, process non-phi
728 // instructions normally, inserting SpillCopy nodes for all spill
729 // locations.
731 // Memoize any DOWN reaching definitions for use as DEBUG info
732 for( insidx = 0; insidx < spill_cnt; insidx++ ) {
733 debug_defs[insidx] = (UPblock[insidx]) ? NULL : Reachblock[insidx];
734 if( UPblock[insidx] ) // Memoize UP decision at block start
735 UP_entry[insidx]->set( b->_pre_order );
736 }
738 //----------Walk Instructions in the Block and Split----------
739 // For all non-phi instructions in the block
740 for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
741 Node *n = b->get_node(insidx);
742 // Find the defining Node's live range index
743 uint defidx = _lrg_map.find_id(n);
744 uint cnt = n->req();
746 if (n->is_Phi()) {
747 // Skip phi nodes after removing dead copies.
748 if (defidx < _lrg_map.max_lrg_id()) {
749 // Check for useless Phis. These appear if we spill, then
750 // coalesce away copies. Dont touch Phis in spilling live
751 // ranges; they are busy getting modifed in this pass.
752 if( lrgs(defidx).reg() < LRG::SPILL_REG ) {
753 uint i;
754 Node *u = NULL;
755 // Look for the Phi merging 2 unique inputs
756 for( i = 1; i < cnt; i++ ) {
757 // Ignore repeats and self
758 if( n->in(i) != u && n->in(i) != n ) {
759 // Found a unique input
760 if( u != NULL ) // If it's the 2nd, bail out
761 break;
762 u = n->in(i); // Else record it
763 }
764 }
765 assert( u, "at least 1 valid input expected" );
766 if (i >= cnt) { // Found one unique input
767 assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
768 n->replace_by(u); // Then replace with unique input
769 n->disconnect_inputs(NULL, C);
770 b->remove_node(insidx);
771 insidx--;
772 b->_ihrp_index--;
773 b->_fhrp_index--;
774 }
775 }
776 }
777 continue;
778 }
779 assert( insidx > b->_ihrp_index ||
780 (b->_reg_pressure < (uint)INTPRESSURE) ||
781 b->_ihrp_index > 4000000 ||
782 b->_ihrp_index >= b->end_idx() ||
783 !b->get_node(b->_ihrp_index)->is_Proj(), "" );
784 assert( insidx > b->_fhrp_index ||
785 (b->_freg_pressure < (uint)FLOATPRESSURE) ||
786 b->_fhrp_index > 4000000 ||
787 b->_fhrp_index >= b->end_idx() ||
788 !b->get_node(b->_fhrp_index)->is_Proj(), "" );
790 // ********** Handle Crossing HRP Boundry **********
791 if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) {
792 for( slidx = 0; slidx < spill_cnt; slidx++ ) {
793 // Check for need to split at HRP boundary - split if UP
794 n1 = Reachblock[slidx];
795 // bail out if no reaching DEF
796 if( n1 == NULL ) continue;
797 // bail out if live range is 'isolated' around inner loop
798 uint lidx = lidxs.at(slidx);
799 // If live range is currently UP
800 if( UPblock[slidx] ) {
801 // set location to insert spills at
802 // SPLIT DOWN HERE - NO CISC SPILL
803 if( is_high_pressure( b, &lrgs(lidx), insidx ) &&
804 !n1->rematerialize() ) {
805 // If there is already a valid stack definition available, use it
806 if( debug_defs[slidx] != NULL ) {
807 Reachblock[slidx] = debug_defs[slidx];
808 }
809 else {
810 // Insert point is just past last use or def in the block
811 int insert_point = insidx-1;
812 while( insert_point > 0 ) {
813 Node *n = b->get_node(insert_point);
814 // Hit top of block? Quit going backwards
815 if (n->is_Phi()) {
816 break;
817 }
818 // Found a def? Better split after it.
819 if (_lrg_map.live_range_id(n) == lidx) {
820 break;
821 }
822 // Look for a use
823 uint i;
824 for( i = 1; i < n->req(); i++ ) {
825 if (_lrg_map.live_range_id(n->in(i)) == lidx) {
826 break;
827 }
828 }
829 // Found a use? Better split after it.
830 if (i < n->req()) {
831 break;
832 }
833 insert_point--;
834 }
835 uint orig_eidx = b->end_idx();
836 maxlrg = split_DEF( n1, b, insert_point, maxlrg, Reachblock, debug_defs, splits, slidx);
837 // If it wasn't split bail
838 if (!maxlrg) {
839 return 0;
840 }
841 // Spill of NULL check mem op goes into the following block.
842 if (b->end_idx() > orig_eidx) {
843 insidx++;
844 }
845 }
846 // This is a new DEF, so update UP
847 UPblock[slidx] = false;
848 #ifndef PRODUCT
849 // DEBUG
850 if( trace_spilling() ) {
851 tty->print("\nNew Split DOWN DEF of Spill Idx ");
852 tty->print("%d, UP %d:\n",slidx,false);
853 n1->dump();
854 }
855 #endif
856 }
857 } // end if LRG is UP
858 } // end for all spilling live ranges
859 assert( b->get_node(insidx) == n, "got insidx set incorrectly" );
860 } // end if crossing HRP Boundry
862 // If the LRG index is oob, then this is a new spillcopy, skip it.
863 if (defidx >= _lrg_map.max_lrg_id()) {
864 continue;
865 }
866 LRG &deflrg = lrgs(defidx);
867 uint copyidx = n->is_Copy();
868 // Remove coalesced copy from CFG
869 if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
870 n->replace_by( n->in(copyidx) );
871 n->set_req( copyidx, NULL );
872 b->remove_node(insidx--);
873 b->_ihrp_index--; // Adjust the point where we go hi-pressure
874 b->_fhrp_index--;
875 continue;
876 }
878 #define DERIVED 0
880 // ********** Handle USES **********
881 bool nullcheck = false;
882 // Implicit null checks never use the spilled value
883 if( n->is_MachNullCheck() )
884 nullcheck = true;
885 if( !nullcheck ) {
886 // Search all inputs for a Spill-USE
887 JVMState* jvms = n->jvms();
888 uint oopoff = jvms ? jvms->oopoff() : cnt;
889 uint old_last = cnt - 1;
890 for( inpidx = 1; inpidx < cnt; inpidx++ ) {
891 // Derived/base pairs may be added to our inputs during this loop.
892 // If inpidx > old_last, then one of these new inputs is being
893 // handled. Skip the derived part of the pair, but process
894 // the base like any other input.
895 if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) {
896 continue; // skip derived_debug added below
897 }
898 // Get lidx of input
899 uint useidx = _lrg_map.find_id(n->in(inpidx));
900 // Not a brand-new split, and it is a spill use
901 if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) {
902 // Check for valid reaching DEF
903 slidx = lrg2reach[useidx];
904 Node *def = Reachblock[slidx];
905 assert( def != NULL, "Using Undefined Value in Split()\n");
907 // (+++) %%%% remove this in favor of pre-pass in matcher.cpp
908 // monitor references do not care where they live, so just hook
909 if ( jvms && jvms->is_monitor_use(inpidx) ) {
910 // The effect of this clone is to drop the node out of the block,
911 // so that the allocator does not see it anymore, and therefore
912 // does not attempt to assign it a register.
913 def = clone_node(def, b, C);
914 if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
915 return 0;
916 }
917 _lrg_map.extend(def->_idx, 0);
918 _cfg.map_node_to_block(def, b);
919 n->set_req(inpidx, def);
920 continue;
921 }
923 // Rematerializable? Then clone def at use site instead
924 // of store/load
925 if( def->rematerialize() ) {
926 int old_size = b->number_of_nodes();
927 def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
928 if( !def ) return 0; // Bail out
929 insidx += b->number_of_nodes()-old_size;
930 }
932 MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
933 // Base pointers and oopmap references do not care where they live.
934 if ((inpidx >= oopoff) ||
935 (mach && mach->ideal_Opcode() == Op_AddP && inpidx == AddPNode::Base)) {
936 if (def->rematerialize() && lrgs(useidx)._was_spilled2) {
937 // This def has been rematerialized a couple of times without
938 // progress. It doesn't care if it lives UP or DOWN, so
939 // spill it down now.
940 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false,splits,slidx);
941 // If it wasn't split bail
942 if (!maxlrg) {
943 return 0;
944 }
945 insidx++; // Reset iterator to skip USE side split
946 } else {
947 // Just hook the def edge
948 n->set_req(inpidx, def);
949 }
951 if (inpidx >= oopoff) {
952 // After oopoff, we have derived/base pairs. We must mention all
953 // derived pointers here as derived/base pairs for GC. If the
954 // derived value is spilling and we have a copy both in Reachblock
955 // (called here 'def') and debug_defs[slidx] we need to mention
956 // both in derived/base pairs or kill one.
957 Node *derived_debug = debug_defs[slidx];
958 if( ((inpidx - oopoff) & 1) == DERIVED && // derived vs base?
959 mach && mach->ideal_Opcode() != Op_Halt &&
960 derived_debug != NULL &&
961 derived_debug != def ) { // Actual 2nd value appears
962 // We have already set 'def' as a derived value.
963 // Also set debug_defs[slidx] as a derived value.
964 uint k;
965 for( k = oopoff; k < cnt; k += 2 )
966 if( n->in(k) == derived_debug )
967 break; // Found an instance of debug derived
968 if( k == cnt ) {// No instance of debug_defs[slidx]
969 // Add a derived/base pair to cover the debug info.
970 // We have to process the added base later since it is not
971 // handled yet at this point but skip derived part.
972 assert(((n->req() - oopoff) & 1) == DERIVED,
973 "must match skip condition above");
974 n->add_req( derived_debug ); // this will be skipped above
975 n->add_req( n->in(inpidx+1) ); // this will be processed
976 // Increment cnt to handle added input edges on
977 // subsequent iterations.
978 cnt += 2;
979 }
980 }
981 }
982 continue;
983 }
984 // Special logic for DEBUG info
985 if( jvms && b->_freq > BLOCK_FREQUENCY(0.5) ) {
986 uint debug_start = jvms->debug_start();
987 // If this is debug info use & there is a reaching DOWN def
988 if ((debug_start <= inpidx) && (debug_defs[slidx] != NULL)) {
989 assert(inpidx < oopoff, "handle only debug info here");
990 // Just hook it in & move on
991 n->set_req(inpidx, debug_defs[slidx]);
992 // (Note that this can make two sides of a split live at the
993 // same time: The debug def on stack, and another def in a
994 // register. The GC needs to know about both of them, but any
995 // derived pointers after oopoff will refer to only one of the
996 // two defs and the GC would therefore miss the other. Thus
997 // this hack is only allowed for debug info which is Java state
998 // and therefore never a derived pointer.)
999 continue;
1000 }
1001 }
1002 // Grab register mask info
1003 const RegMask &dmask = def->out_RegMask();
1004 const RegMask &umask = n->in_RegMask(inpidx);
1005 bool is_vect = RegMask::is_vector(def->ideal_reg());
1006 assert(inpidx < oopoff, "cannot use-split oop map info");
1008 bool dup = UPblock[slidx];
1009 bool uup = umask.is_UP();
1011 // Need special logic to handle bound USES. Insert a split at this
1012 // bound use if we can't rematerialize the def, or if we need the
1013 // split to form a misaligned pair.
1014 if( !umask.is_AllStack() &&
1015 (int)umask.Size() <= lrgs(useidx).num_regs() &&
1016 (!def->rematerialize() ||
1017 !is_vect && umask.is_misaligned_pair())) {
1018 // These need a Split regardless of overlap or pressure
1019 // SPLIT - NO DEF - NO CISC SPILL
1020 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
1021 // If it wasn't split bail
1022 if (!maxlrg) {
1023 return 0;
1024 }
1025 insidx++; // Reset iterator to skip USE side split
1026 continue;
1027 }
1029 if (UseFPUForSpilling && n->is_MachCall() && !uup && !dup ) {
1030 // The use at the call can force the def down so insert
1031 // a split before the use to allow the def more freedom.
1032 maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
1033 // If it wasn't split bail
1034 if (!maxlrg) {
1035 return 0;
1036 }
1037 insidx++; // Reset iterator to skip USE side split
1038 continue;
1039 }
1041 // Here is the logic chart which describes USE Splitting:
1042 // 0 = false or DOWN, 1 = true or UP
1043 //
1044 // Overlap | DEF | USE | Action
1045 //-------------------------------------------------------
1046 // 0 | 0 | 0 | Copy - mem -> mem
1047 // 0 | 0 | 1 | Split-UP - Check HRP
1048 // 0 | 1 | 0 | Split-DOWN - Debug Info?
1049 // 0 | 1 | 1 | Copy - reg -> reg
1050 // 1 | 0 | 0 | Reset Input Edge (no Split)
1051 // 1 | 0 | 1 | Split-UP - Check HRP
1052 // 1 | 1 | 0 | Split-DOWN - Debug Info?
1053 // 1 | 1 | 1 | Reset Input Edge (no Split)
1054 //
1055 // So, if (dup == uup), then overlap test determines action,
1056 // with true being no split, and false being copy. Else,
1057 // if DEF is DOWN, Split-UP, and check HRP to decide on
1058 // resetting DEF. Finally if DEF is UP, Split-DOWN, with
1059 // special handling for Debug Info.
1060 if( dup == uup ) {
1061 if( dmask.overlap(umask) ) {
1062 // Both are either up or down, and there is overlap, No Split
1063 n->set_req(inpidx, def);
1064 }
1065 else { // Both are either up or down, and there is no overlap
1066 if( dup ) { // If UP, reg->reg copy
1067 // COPY ACROSS HERE - NO DEF - NO CISC SPILL
1068 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx);
1069 // If it wasn't split bail
1070 if (!maxlrg) {
1071 return 0;
1072 }
1073 insidx++; // Reset iterator to skip USE side split
1074 }
1075 else { // DOWN, mem->mem copy
1076 // COPY UP & DOWN HERE - NO DEF - NO CISC SPILL
1077 // First Split-UP to move value into Register
1078 uint def_ideal = def->ideal_reg();
1079 const RegMask* tmp_rm = Matcher::idealreg2regmask[def_ideal];
1080 Node *spill = new (C) MachSpillCopyNode(def, dmask, *tmp_rm);
1081 insert_proj( b, insidx, spill, maxlrg );
1082 // Then Split-DOWN as if previous Split was DEF
1083 maxlrg = split_USE(spill,b,n,inpidx,maxlrg,false,false, splits,slidx);
1084 // If it wasn't split bail
1085 if (!maxlrg) {
1086 return 0;
1087 }
1088 insidx += 2; // Reset iterator to skip USE side splits
1089 }
1090 } // End else no overlap
1091 } // End if dup == uup
1092 // dup != uup, so check dup for direction of Split
1093 else {
1094 if( dup ) { // If UP, Split-DOWN and check Debug Info
1095 // If this node is already a SpillCopy, just patch the edge
1096 // except the case of spilling to stack.
1097 if( n->is_SpillCopy() ) {
1098 RegMask tmp_rm(umask);
1099 tmp_rm.SUBTRACT(Matcher::STACK_ONLY_mask);
1100 if( dmask.overlap(tmp_rm) ) {
1101 if( def != n->in(inpidx) ) {
1102 n->set_req(inpidx, def);
1103 }
1104 continue;
1105 }
1106 }
1107 // COPY DOWN HERE - NO DEF - NO CISC SPILL
1108 maxlrg = split_USE(def,b,n,inpidx,maxlrg,false,false, splits,slidx);
1109 // If it wasn't split bail
1110 if (!maxlrg) {
1111 return 0;
1112 }
1113 insidx++; // Reset iterator to skip USE side split
1114 // Check for debug-info split. Capture it for later
1115 // debug splits of the same value
1116 if (jvms && jvms->debug_start() <= inpidx && inpidx < oopoff)
1117 debug_defs[slidx] = n->in(inpidx);
1119 }
1120 else { // DOWN, Split-UP and check register pressure
1121 if( is_high_pressure( b, &lrgs(useidx), insidx ) ) {
1122 // COPY UP HERE - NO DEF - CISC SPILL
1123 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,true, splits,slidx);
1124 // If it wasn't split bail
1125 if (!maxlrg) {
1126 return 0;
1127 }
1128 insidx++; // Reset iterator to skip USE side split
1129 } else { // LRP
1130 // COPY UP HERE - WITH DEF - NO CISC SPILL
1131 maxlrg = split_USE(def,b,n,inpidx,maxlrg,true,false, splits,slidx);
1132 // If it wasn't split bail
1133 if (!maxlrg) {
1134 return 0;
1135 }
1136 // Flag this lift-up in a low-pressure block as
1137 // already-spilled, so if it spills again it will
1138 // spill hard (instead of not spilling hard and
1139 // coalescing away).
1140 set_was_spilled(n->in(inpidx));
1141 // Since this is a new DEF, update Reachblock & UP
1142 Reachblock[slidx] = n->in(inpidx);
1143 UPblock[slidx] = true;
1144 insidx++; // Reset iterator to skip USE side split
1145 }
1146 } // End else DOWN
1147 } // End dup != uup
1148 } // End if Spill USE
1149 } // End For All Inputs
1150 } // End If not nullcheck
1152 // ********** Handle DEFS **********
1153 // DEFS either Split DOWN in HRP regions or when the LRG is bound, or
1154 // just reset the Reaches info in LRP regions. DEFS must always update
1155 // UP info.
1156 if( deflrg.reg() >= LRG::SPILL_REG ) { // Spilled?
1157 uint slidx = lrg2reach[defidx];
1158 // Add to defs list for later assignment of new live range number
1159 defs->push(n);
1160 // Set a flag on the Node indicating it has already spilled.
1161 // Only do it for capacity spills not conflict spills.
1162 if( !deflrg._direct_conflict )
1163 set_was_spilled(n);
1164 assert(!n->is_Phi(),"Cannot insert Phi into DEFS list");
1165 // Grab UP info for DEF
1166 const RegMask &dmask = n->out_RegMask();
1167 bool defup = dmask.is_UP();
1168 int ireg = n->ideal_reg();
1169 bool is_vect = RegMask::is_vector(ireg);
1170 // Only split at Def if this is a HRP block or bound (and spilled once)
1171 if( !n->rematerialize() &&
1172 (((dmask.is_bound(ireg) || !is_vect && dmask.is_misaligned_pair()) &&
1173 (deflrg._direct_conflict || deflrg._must_spill)) ||
1174 // Check for LRG being up in a register and we are inside a high
1175 // pressure area. Spill it down immediately.
1176 (defup && is_high_pressure(b,&deflrg,insidx))) ) {
1177 assert( !n->rematerialize(), "" );
1178 assert( !n->is_SpillCopy(), "" );
1179 // Do a split at the def site.
1180 maxlrg = split_DEF( n, b, insidx, maxlrg, Reachblock, debug_defs, splits, slidx );
1181 // If it wasn't split bail
1182 if (!maxlrg) {
1183 return 0;
1184 }
1185 // Split DEF's Down
1186 UPblock[slidx] = 0;
1187 #ifndef PRODUCT
1188 // DEBUG
1189 if( trace_spilling() ) {
1190 tty->print("\nNew Split DOWN DEF of Spill Idx ");
1191 tty->print("%d, UP %d:\n",slidx,false);
1192 n->dump();
1193 }
1194 #endif
1195 }
1196 else { // Neither bound nor HRP, must be LRP
1197 // otherwise, just record the def
1198 Reachblock[slidx] = n;
1199 // UP should come from the outRegmask() of the DEF
1200 UPblock[slidx] = defup;
1201 // Update debug list of reaching down definitions, kill if DEF is UP
1202 debug_defs[slidx] = defup ? NULL : n;
1203 #ifndef PRODUCT
1204 // DEBUG
1205 if( trace_spilling() ) {
1206 tty->print("\nNew DEF of Spill Idx ");
1207 tty->print("%d, UP %d:\n",slidx,defup);
1208 n->dump();
1209 }
1210 #endif
1211 } // End else LRP
1212 } // End if spill def
1214 // ********** Split Left Over Mem-Mem Moves **********
1215 // Check for mem-mem copies and split them now. Do not do this
1216 // to copies about to be spilled; they will be Split shortly.
1217 if (copyidx) {
1218 Node *use = n->in(copyidx);
1219 uint useidx = _lrg_map.find_id(use);
1220 if (useidx < _lrg_map.max_lrg_id() && // This is not a new split
1221 OptoReg::is_stack(deflrg.reg()) &&
1222 deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack
1223 LRG &uselrg = lrgs(useidx);
1224 if( OptoReg::is_stack(uselrg.reg()) &&
1225 uselrg.reg() < LRG::SPILL_REG && // USE is from stack
1226 deflrg.reg() != uselrg.reg() ) { // Not trivially removed
1227 uint def_ideal_reg = n->bottom_type()->ideal_reg();
1228 const RegMask &def_rm = *Matcher::idealreg2regmask[def_ideal_reg];
1229 const RegMask &use_rm = n->in_RegMask(copyidx);
1230 if( def_rm.overlap(use_rm) && n->is_SpillCopy() ) { // Bug 4707800, 'n' may be a storeSSL
1231 if (C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) { // Check when generating nodes
1232 return 0;
1233 }
1234 Node *spill = new (C) MachSpillCopyNode(use,use_rm,def_rm);
1235 n->set_req(copyidx,spill);
1236 n->as_MachSpillCopy()->set_in_RegMask(def_rm);
1237 // Put the spill just before the copy
1238 insert_proj( b, insidx++, spill, maxlrg++ );
1239 }
1240 }
1241 }
1242 }
1243 } // End For All Instructions in Block - Non-PHI Pass
1245 // Check if each LRG is live out of this block so as not to propagate
1246 // beyond the last use of a LRG.
1247 for( slidx = 0; slidx < spill_cnt; slidx++ ) {
1248 uint defidx = lidxs.at(slidx);
1249 IndexSet *liveout = _live->live(b);
1250 if( !liveout->member(defidx) ) {
1251 #ifdef ASSERT
1252 // The index defidx is not live. Check the liveout array to ensure that
1253 // it contains no members which compress to defidx. Finding such an
1254 // instance may be a case to add liveout adjustment in compress_uf_map().
1255 // See 5063219.
1256 uint member;
1257 IndexSetIterator isi(liveout);
1258 while ((member = isi.next()) != 0) {
1259 assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed");
1260 }
1261 #endif
1262 Reachblock[slidx] = NULL;
1263 } else {
1264 assert(Reachblock[slidx] != NULL,"No reaching definition for liveout value");
1265 }
1266 }
1267 #ifndef PRODUCT
1268 if( trace_spilling() )
1269 b->dump();
1270 #endif
1271 } // End For All Blocks
1273 //----------PASS 2----------
1274 // Reset all DEF live range numbers here
1275 for( insidx = 0; insidx < defs->size(); insidx++ ) {
1276 // Grab the def
1277 n1 = defs->at(insidx);
1278 // Set new lidx for DEF
1279 new_lrg(n1, maxlrg++);
1280 }
1281 //----------Phi Node Splitting----------
1282 // Clean up a phi here, and assign a new live range number
1283 // Cycle through this block's predecessors, collecting Reaches
1284 // info for each spilled LRG and update edges.
1285 // Walk the phis list to patch inputs, split phis, and name phis
1286 uint lrgs_before_phi_split = maxlrg;
1287 for( insidx = 0; insidx < phis->size(); insidx++ ) {
1288 Node *phi = phis->at(insidx);
1289 assert(phi->is_Phi(),"This list must only contain Phi Nodes");
1290 Block *b = _cfg.get_block_for_node(phi);
1291 // Grab the live range number
1292 uint lidx = _lrg_map.find_id(phi);
1293 uint slidx = lrg2reach[lidx];
1294 // Update node to lidx map
1295 new_lrg(phi, maxlrg++);
1296 // Get PASS1's up/down decision for the block.
1297 int phi_up = !!UP_entry[slidx]->test(b->_pre_order);
1299 // Force down if double-spilling live range
1300 if( lrgs(lidx)._was_spilled1 )
1301 phi_up = false;
1303 // When splitting a Phi we an split it normal or "inverted".
1304 // An inverted split makes the splits target the Phi's UP/DOWN
1305 // sense inverted; then the Phi is followed by a final def-side
1306 // split to invert back. It changes which blocks the spill code
1307 // goes in.
1309 // Walk the predecessor blocks and assign the reaching def to the Phi.
1310 // Split Phi nodes by placing USE side splits wherever the reaching
1311 // DEF has the wrong UP/DOWN value.
1312 for( uint i = 1; i < b->num_preds(); i++ ) {
1313 // Get predecessor block pre-order number
1314 Block *pred = _cfg.get_block_for_node(b->pred(i));
1315 pidx = pred->_pre_order;
1316 // Grab reaching def
1317 Node *def = Reaches[pidx][slidx];
1318 assert( def, "must have reaching def" );
1319 // If input up/down sense and reg-pressure DISagree
1320 if (def->rematerialize()) {
1321 // Place the rematerialized node above any MSCs created during
1322 // phi node splitting. end_idx points at the insertion point
1323 // so look at the node before it.
1324 int insert = pred->end_idx();
1325 while (insert >= 1 &&
1326 pred->get_node(insert - 1)->is_SpillCopy() &&
1327 _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
1328 insert--;
1329 }
1330 // since the def cannot contain any live range input, we can pass in NULL as Reachlock parameter
1331 def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, NULL, false);
1332 if (!def) {
1333 return 0; // Bail out
1334 }
1335 }
1336 // Update the Phi's input edge array
1337 phi->set_req(i,def);
1338 // Grab the UP/DOWN sense for the input
1339 u1 = UP[pidx][slidx];
1340 if( u1 != (phi_up != 0)) {
1341 maxlrg = split_USE(def, b, phi, i, maxlrg, !u1, false, splits,slidx);
1342 // If it wasn't split bail
1343 if (!maxlrg) {
1344 return 0;
1345 }
1346 }
1347 } // End for all inputs to the Phi
1348 } // End for all Phi Nodes
1349 // Update _maxlrg to save Union asserts
1350 _lrg_map.set_max_lrg_id(maxlrg);
1353 //----------PASS 3----------
1354 // Pass over all Phi's to union the live ranges
1355 for( insidx = 0; insidx < phis->size(); insidx++ ) {
1356 Node *phi = phis->at(insidx);
1357 assert(phi->is_Phi(),"This list must only contain Phi Nodes");
1358 // Walk all inputs to Phi and Union input live range with Phi live range
1359 for( uint i = 1; i < phi->req(); i++ ) {
1360 // Grab the input node
1361 Node *n = phi->in(i);
1362 assert(n, "node should exist");
1363 uint lidx = _lrg_map.find(n);
1364 uint pidx = _lrg_map.find(phi);
1365 if (lidx < pidx) {
1366 Union(n, phi);
1367 }
1368 else if(lidx > pidx) {
1369 Union(phi, n);
1370 }
1371 } // End for all inputs to the Phi Node
1372 } // End for all Phi Nodes
1373 // Now union all two address instructions
1374 for (insidx = 0; insidx < defs->size(); insidx++) {
1375 // Grab the def
1376 n1 = defs->at(insidx);
1377 // Set new lidx for DEF & handle 2-addr instructions
1378 if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) {
1379 assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index");
1380 // Union the input and output live ranges
1381 uint lr1 = _lrg_map.find(n1);
1382 uint lr2 = _lrg_map.find(n1->in(twoidx));
1383 if (lr1 < lr2) {
1384 Union(n1, n1->in(twoidx));
1385 }
1386 else if (lr1 > lr2) {
1387 Union(n1->in(twoidx), n1);
1388 }
1389 } // End if two address
1390 } // End for all defs
1391 // DEBUG
1392 #ifdef ASSERT
1393 // Validate all live range index assignments
1394 for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) {
1395 b = _cfg.get_block(bidx);
1396 for (insidx = 0; insidx <= b->end_idx(); insidx++) {
1397 Node *n = b->get_node(insidx);
1398 uint defidx = _lrg_map.find(n);
1399 assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
1400 assert(defidx < maxlrg,"Bad live range index in Split");
1401 }
1402 }
1403 // Issue a warning if splitting made no progress
1404 int noprogress = 0;
1405 for (slidx = 0; slidx < spill_cnt; slidx++) {
1406 if (PrintOpto && WizardMode && splits.at(slidx) == 0) {
1407 tty->print_cr("Failed to split live range %d", lidxs.at(slidx));
1408 //BREAKPOINT;
1409 }
1410 else {
1411 noprogress++;
1412 }
1413 }
1414 if(!noprogress) {
1415 tty->print_cr("Failed to make progress in Split");
1416 //BREAKPOINT;
1417 }
1418 #endif
1419 // Return updated count of live ranges
1420 return maxlrg;
1421 }