Tue, 11 Sep 2012 16:20:57 +0200
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
Summary: C1 needs knowledge of T_METADATA at the LIR level.
Reviewed-by: kvn, coleenp
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "compiler/oopMap.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/block.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/chaitin.hpp"
34 #include "opto/coalesce.hpp"
35 #include "opto/connode.hpp"
36 #include "opto/idealGraphPrinter.hpp"
37 #include "opto/indexSet.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/memnode.hpp"
40 #include "opto/opcodes.hpp"
41 #include "opto/rootnode.hpp"
43 //=============================================================================
45 #ifndef PRODUCT
46 void LRG::dump( ) const {
47 ttyLocker ttyl;
48 tty->print("%d ",num_regs());
49 _mask.dump();
50 if( _msize_valid ) {
51 if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
52 else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
53 } else {
54 tty->print(", #?(%d) ",_mask.Size());
55 }
57 tty->print("EffDeg: ");
58 if( _degree_valid ) tty->print( "%d ", _eff_degree );
59 else tty->print("? ");
61 if( is_multidef() ) {
62 tty->print("MultiDef ");
63 if (_defs != NULL) {
64 tty->print("(");
65 for (int i = 0; i < _defs->length(); i++) {
66 tty->print("N%d ", _defs->at(i)->_idx);
67 }
68 tty->print(") ");
69 }
70 }
71 else if( _def == 0 ) tty->print("Dead ");
72 else tty->print("Def: N%d ",_def->_idx);
74 tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score());
75 // Flags
76 if( _is_oop ) tty->print("Oop ");
77 if( _is_float ) tty->print("Float ");
78 if( _is_vector ) tty->print("Vector ");
79 if( _was_spilled1 ) tty->print("Spilled ");
80 if( _was_spilled2 ) tty->print("Spilled2 ");
81 if( _direct_conflict ) tty->print("Direct_conflict ");
82 if( _fat_proj ) tty->print("Fat ");
83 if( _was_lo ) tty->print("Lo ");
84 if( _has_copy ) tty->print("Copy ");
85 if( _at_risk ) tty->print("Risk ");
87 if( _must_spill ) tty->print("Must_spill ");
88 if( _is_bound ) tty->print("Bound ");
89 if( _msize_valid ) {
90 if( _degree_valid && lo_degree() ) tty->print("Trivial ");
91 }
93 tty->cr();
94 }
95 #endif
97 //------------------------------score------------------------------------------
98 // Compute score from cost and area. Low score is best to spill.
99 static double raw_score( double cost, double area ) {
100 return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
101 }
103 double LRG::score() const {
104 // Scale _area by RegisterCostAreaRatio/64K then subtract from cost.
105 // Bigger area lowers score, encourages spilling this live range.
106 // Bigger cost raise score, prevents spilling this live range.
107 // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer
108 // to turn a divide by a constant into a multiply by the reciprical).
109 double score = raw_score( _cost, _area);
111 // Account for area. Basically, LRGs covering large areas are better
112 // to spill because more other LRGs get freed up.
113 if( _area == 0.0 ) // No area? Then no progress to spill
114 return 1e35;
116 if( _was_spilled2 ) // If spilled once before, we are unlikely
117 return score + 1e30; // to make progress again.
119 if( _cost >= _area*3.0 ) // Tiny area relative to cost
120 return score + 1e17; // Probably no progress to spill
122 if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost
123 return score + 1e10; // Likely no progress to spill
125 return score;
126 }
128 //------------------------------LRG_List---------------------------------------
129 LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) {
130 memset( _lidxs, 0, sizeof(uint)*max );
131 }
133 void LRG_List::extend( uint nidx, uint lidx ) {
134 _nesting.check();
135 if( nidx >= _max ) {
136 uint size = 16;
137 while( size <= nidx ) size <<=1;
138 _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size );
139 _max = size;
140 }
141 while( _cnt <= nidx )
142 _lidxs[_cnt++] = 0;
143 _lidxs[nidx] = lidx;
144 }
146 #define NUMBUCKS 3
148 //------------------------------Chaitin----------------------------------------
149 PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
150 : PhaseRegAlloc(unique, cfg, matcher,
151 #ifndef PRODUCT
152 print_chaitin_statistics
153 #else
154 NULL
155 #endif
156 ),
157 _names(unique), _uf_map(unique),
158 _maxlrg(0), _live(0),
159 _spilled_once(Thread::current()->resource_area()),
160 _spilled_twice(Thread::current()->resource_area()),
161 _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0),
162 _oldphi(unique)
163 #ifndef PRODUCT
164 , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
165 #endif
166 {
167 NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
169 _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq);
171 uint i,j;
172 // Build a list of basic blocks, sorted by frequency
173 _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
174 // Experiment with sorting strategies to speed compilation
175 double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
176 Block **buckets[NUMBUCKS]; // Array of buckets
177 uint buckcnt[NUMBUCKS]; // Array of bucket counters
178 double buckval[NUMBUCKS]; // Array of bucket value cutoffs
179 for( i = 0; i < NUMBUCKS; i++ ) {
180 buckets[i] = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
181 buckcnt[i] = 0;
182 // Bump by three orders of magnitude each time
183 cutoff *= 0.001;
184 buckval[i] = cutoff;
185 for( j = 0; j < _cfg._num_blocks; j++ ) {
186 buckets[i][j] = NULL;
187 }
188 }
189 // Sort blocks into buckets
190 for( i = 0; i < _cfg._num_blocks; i++ ) {
191 for( j = 0; j < NUMBUCKS; j++ ) {
192 if( (j == NUMBUCKS-1) || (_cfg._blocks[i]->_freq > buckval[j]) ) {
193 // Assign block to end of list for appropriate bucket
194 buckets[j][buckcnt[j]++] = _cfg._blocks[i];
195 break; // kick out of inner loop
196 }
197 }
198 }
199 // Dump buckets into final block array
200 uint blkcnt = 0;
201 for( i = 0; i < NUMBUCKS; i++ ) {
202 for( j = 0; j < buckcnt[i]; j++ ) {
203 _blks[blkcnt++] = buckets[i][j];
204 }
205 }
207 assert(blkcnt == _cfg._num_blocks, "Block array not totally filled");
208 }
210 void PhaseChaitin::Register_Allocate() {
212 // Above the OLD FP (and in registers) are the incoming arguments. Stack
213 // slots in this area are called "arg_slots". Above the NEW FP (and in
214 // registers) is the outgoing argument area; above that is the spill/temp
215 // area. These are all "frame_slots". Arg_slots start at the zero
216 // stack_slots and count up to the known arg_size. Frame_slots start at
217 // the stack_slot #arg_size and go up. After allocation I map stack
218 // slots to actual offsets. Stack-slots in the arg_slot area are biased
219 // by the frame_size; stack-slots in the frame_slot area are biased by 0.
221 _trip_cnt = 0;
222 _alternate = 0;
223 _matcher._allocation_started = true;
225 ResourceArea split_arena; // Arena for Split local resources
226 ResourceArea live_arena; // Arena for liveness & IFG info
227 ResourceMark rm(&live_arena);
229 // Need live-ness for the IFG; need the IFG for coalescing. If the
230 // liveness is JUST for coalescing, then I can get some mileage by renaming
231 // all copy-related live ranges low and then using the max copy-related
232 // live range as a cut-off for LIVE and the IFG. In other words, I can
233 // build a subset of LIVE and IFG just for copies.
234 PhaseLive live(_cfg,_names,&live_arena);
236 // Need IFG for coalescing and coloring
237 PhaseIFG ifg( &live_arena );
238 _ifg = &ifg;
240 if (C->unique() > _names.Size()) _names.extend(C->unique()-1, 0);
242 // Come out of SSA world to the Named world. Assign (virtual) registers to
243 // Nodes. Use the same register for all inputs and the output of PhiNodes
244 // - effectively ending SSA form. This requires either coalescing live
245 // ranges or inserting copies. For the moment, we insert "virtual copies"
246 // - we pretend there is a copy prior to each Phi in predecessor blocks.
247 // We will attempt to coalesce such "virtual copies" before we manifest
248 // them for real.
249 de_ssa();
251 #ifdef ASSERT
252 // Veify the graph before RA.
253 verify(&live_arena);
254 #endif
256 {
257 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
258 _live = NULL; // Mark live as being not available
259 rm.reset_to_mark(); // Reclaim working storage
260 IndexSet::reset_memory(C, &live_arena);
261 ifg.init(_maxlrg); // Empty IFG
262 gather_lrg_masks( false ); // Collect LRG masks
263 live.compute( _maxlrg ); // Compute liveness
264 _live = &live; // Mark LIVE as being available
265 }
267 // Base pointers are currently "used" by instructions which define new
268 // derived pointers. This makes base pointers live up to the where the
269 // derived pointer is made, but not beyond. Really, they need to be live
270 // across any GC point where the derived value is live. So this code looks
271 // at all the GC points, and "stretches" the live range of any base pointer
272 // to the GC point.
273 if( stretch_base_pointer_live_ranges(&live_arena) ) {
274 NOT_PRODUCT( Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler); )
275 // Since some live range stretched, I need to recompute live
276 _live = NULL;
277 rm.reset_to_mark(); // Reclaim working storage
278 IndexSet::reset_memory(C, &live_arena);
279 ifg.init(_maxlrg);
280 gather_lrg_masks( false );
281 live.compute( _maxlrg );
282 _live = &live;
283 }
284 // Create the interference graph using virtual copies
285 build_ifg_virtual( ); // Include stack slots this time
287 // Aggressive (but pessimistic) copy coalescing.
288 // This pass works on virtual copies. Any virtual copies which are not
289 // coalesced get manifested as actual copies
290 {
291 // The IFG is/was triangular. I am 'squaring it up' so Union can run
292 // faster. Union requires a 'for all' operation which is slow on the
293 // triangular adjacency matrix (quick reminder: the IFG is 'sparse' -
294 // meaning I can visit all the Nodes neighbors less than a Node in time
295 // O(# of neighbors), but I have to visit all the Nodes greater than a
296 // given Node and search them for an instance, i.e., time O(#MaxLRG)).
297 _ifg->SquareUp();
299 PhaseAggressiveCoalesce coalesce( *this );
300 coalesce.coalesce_driver( );
301 // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do
302 // not match the Phi itself, insert a copy.
303 coalesce.insert_copies(_matcher);
304 }
306 // After aggressive coalesce, attempt a first cut at coloring.
307 // To color, we need the IFG and for that we need LIVE.
308 {
309 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
310 _live = NULL;
311 rm.reset_to_mark(); // Reclaim working storage
312 IndexSet::reset_memory(C, &live_arena);
313 ifg.init(_maxlrg);
314 gather_lrg_masks( true );
315 live.compute( _maxlrg );
316 _live = &live;
317 }
319 // Build physical interference graph
320 uint must_spill = 0;
321 must_spill = build_ifg_physical( &live_arena );
322 // If we have a guaranteed spill, might as well spill now
323 if( must_spill ) {
324 if( !_maxlrg ) return;
325 // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
326 C->check_node_count(10*must_spill, "out of nodes before split");
327 if (C->failing()) return;
328 _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere
329 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
330 // or we failed to split
331 C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
332 if (C->failing()) return;
334 NOT_PRODUCT( C->verify_graph_edges(); )
336 compact(); // Compact LRGs; return new lower max lrg
338 {
339 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
340 _live = NULL;
341 rm.reset_to_mark(); // Reclaim working storage
342 IndexSet::reset_memory(C, &live_arena);
343 ifg.init(_maxlrg); // Build a new interference graph
344 gather_lrg_masks( true ); // Collect intersect mask
345 live.compute( _maxlrg ); // Compute LIVE
346 _live = &live;
347 }
348 build_ifg_physical( &live_arena );
349 _ifg->SquareUp();
350 _ifg->Compute_Effective_Degree();
351 // Only do conservative coalescing if requested
352 if( OptoCoalesce ) {
353 // Conservative (and pessimistic) copy coalescing of those spills
354 PhaseConservativeCoalesce coalesce( *this );
355 // If max live ranges greater than cutoff, don't color the stack.
356 // This cutoff can be larger than below since it is only done once.
357 coalesce.coalesce_driver( );
358 }
359 compress_uf_map_for_nodes();
361 #ifdef ASSERT
362 verify(&live_arena, true);
363 #endif
364 } else {
365 ifg.SquareUp();
366 ifg.Compute_Effective_Degree();
367 #ifdef ASSERT
368 set_was_low();
369 #endif
370 }
372 // Prepare for Simplify & Select
373 cache_lrg_info(); // Count degree of LRGs
375 // Simplify the InterFerence Graph by removing LRGs of low degree.
376 // LRGs of low degree are trivially colorable.
377 Simplify();
379 // Select colors by re-inserting LRGs back into the IFG in reverse order.
380 // Return whether or not something spills.
381 uint spills = Select( );
383 // If we spill, split and recycle the entire thing
384 while( spills ) {
385 if( _trip_cnt++ > 24 ) {
386 DEBUG_ONLY( dump_for_spill_split_recycle(); )
387 if( _trip_cnt > 27 ) {
388 C->record_method_not_compilable("failed spill-split-recycle sanity check");
389 return;
390 }
391 }
393 if( !_maxlrg ) return;
394 _maxlrg = Split(_maxlrg, &split_arena); // Split spilling LRG everywhere
395 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
396 C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after split");
397 if (C->failing()) return;
399 compact(); // Compact LRGs; return new lower max lrg
401 // Nuke the live-ness and interference graph and LiveRanGe info
402 {
403 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
404 _live = NULL;
405 rm.reset_to_mark(); // Reclaim working storage
406 IndexSet::reset_memory(C, &live_arena);
407 ifg.init(_maxlrg);
409 // Create LiveRanGe array.
410 // Intersect register masks for all USEs and DEFs
411 gather_lrg_masks( true );
412 live.compute( _maxlrg );
413 _live = &live;
414 }
415 must_spill = build_ifg_physical( &live_arena );
416 _ifg->SquareUp();
417 _ifg->Compute_Effective_Degree();
419 // Only do conservative coalescing if requested
420 if( OptoCoalesce ) {
421 // Conservative (and pessimistic) copy coalescing
422 PhaseConservativeCoalesce coalesce( *this );
423 // Check for few live ranges determines how aggressive coalesce is.
424 coalesce.coalesce_driver( );
425 }
426 compress_uf_map_for_nodes();
427 #ifdef ASSERT
428 verify(&live_arena, true);
429 #endif
430 cache_lrg_info(); // Count degree of LRGs
432 // Simplify the InterFerence Graph by removing LRGs of low degree.
433 // LRGs of low degree are trivially colorable.
434 Simplify();
436 // Select colors by re-inserting LRGs back into the IFG in reverse order.
437 // Return whether or not something spills.
438 spills = Select( );
439 }
441 // Count number of Simplify-Select trips per coloring success.
442 _allocator_attempts += _trip_cnt + 1;
443 _allocator_successes += 1;
445 // Peephole remove copies
446 post_allocate_copy_removal();
448 #ifdef ASSERT
449 // Veify the graph after RA.
450 verify(&live_arena);
451 #endif
453 // max_reg is past the largest *register* used.
454 // Convert that to a frame_slot number.
455 if( _max_reg <= _matcher._new_SP )
456 _framesize = C->out_preserve_stack_slots();
457 else _framesize = _max_reg -_matcher._new_SP;
458 assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
460 // This frame must preserve the required fp alignment
461 _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots());
462 assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
463 #ifndef PRODUCT
464 _total_framesize += _framesize;
465 if( (int)_framesize > _max_framesize )
466 _max_framesize = _framesize;
467 #endif
469 // Convert CISC spills
470 fixup_spills();
472 // Log regalloc results
473 CompileLog* log = Compile::current()->log();
474 if (log != NULL) {
475 log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
476 }
478 if (C->failing()) return;
480 NOT_PRODUCT( C->verify_graph_edges(); )
482 // Move important info out of the live_arena to longer lasting storage.
483 alloc_node_regs(_names.Size());
484 for (uint i=0; i < _names.Size(); i++) {
485 if (_names[i]) { // Live range associated with Node?
486 LRG &lrg = lrgs(_names[i]);
487 if (!lrg.alive()) {
488 set_bad(i);
489 } else if (lrg.num_regs() == 1) {
490 set1(i, lrg.reg());
491 } else { // Must be a register-set
492 if (!lrg._fat_proj) { // Must be aligned adjacent register set
493 // Live ranges record the highest register in their mask.
494 // We want the low register for the AD file writer's convenience.
495 OptoReg::Name hi = lrg.reg(); // Get hi register
496 OptoReg::Name lo = OptoReg::add(hi, (1-lrg.num_regs())); // Find lo
497 // We have to use pair [lo,lo+1] even for wide vectors because
498 // the rest of code generation works only with pairs. It is safe
499 // since for registers encoding only 'lo' is used.
500 // Second reg from pair is used in ScheduleAndBundle on SPARC where
501 // vector max size is 8 which corresponds to registers pair.
502 // It is also used in BuildOopMaps but oop operations are not
503 // vectorized.
504 set2(i, lo);
505 } else { // Misaligned; extract 2 bits
506 OptoReg::Name hi = lrg.reg(); // Get hi register
507 lrg.Remove(hi); // Yank from mask
508 int lo = lrg.mask().find_first_elem(); // Find lo
509 set_pair(i, hi, lo);
510 }
511 }
512 if( lrg._is_oop ) _node_oops.set(i);
513 } else {
514 set_bad(i);
515 }
516 }
518 // Done!
519 _live = NULL;
520 _ifg = NULL;
521 C->set_indexSet_arena(NULL); // ResourceArea is at end of scope
522 }
524 //------------------------------de_ssa-----------------------------------------
525 void PhaseChaitin::de_ssa() {
526 // Set initial Names for all Nodes. Most Nodes get the virtual register
527 // number. A few get the ZERO live range number. These do not
528 // get allocated, but instead rely on correct scheduling to ensure that
529 // only one instance is simultaneously live at a time.
530 uint lr_counter = 1;
531 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
532 Block *b = _cfg._blocks[i];
533 uint cnt = b->_nodes.size();
535 // Handle all the normal Nodes in the block
536 for( uint j = 0; j < cnt; j++ ) {
537 Node *n = b->_nodes[j];
538 // Pre-color to the zero live range, or pick virtual register
539 const RegMask &rm = n->out_RegMask();
540 _names.map( n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0 );
541 }
542 }
543 // Reset the Union-Find mapping to be identity
544 reset_uf_map(lr_counter);
545 }
548 //------------------------------gather_lrg_masks-------------------------------
549 // Gather LiveRanGe information, including register masks. Modification of
550 // cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
551 void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
553 // Nail down the frame pointer live range
554 uint fp_lrg = n2lidx(_cfg._root->in(1)->in(TypeFunc::FramePtr));
555 lrgs(fp_lrg)._cost += 1e12; // Cost is infinite
557 // For all blocks
558 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
559 Block *b = _cfg._blocks[i];
561 // For all instructions
562 for( uint j = 1; j < b->_nodes.size(); j++ ) {
563 Node *n = b->_nodes[j];
564 uint input_edge_start =1; // Skip control most nodes
565 if( n->is_Mach() ) input_edge_start = n->as_Mach()->oper_input_base();
566 uint idx = n->is_Copy();
568 // Get virtual register number, same as LiveRanGe index
569 uint vreg = n2lidx(n);
570 LRG &lrg = lrgs(vreg);
571 if( vreg ) { // No vreg means un-allocable (e.g. memory)
573 // Collect has-copy bit
574 if( idx ) {
575 lrg._has_copy = 1;
576 uint clidx = n2lidx(n->in(idx));
577 LRG ©_src = lrgs(clidx);
578 copy_src._has_copy = 1;
579 }
581 // Check for float-vs-int live range (used in register-pressure
582 // calculations)
583 const Type *n_type = n->bottom_type();
584 if (n_type->is_floatingpoint())
585 lrg._is_float = 1;
587 // Check for twice prior spilling. Once prior spilling might have
588 // spilled 'soft', 2nd prior spill should have spilled 'hard' and
589 // further spilling is unlikely to make progress.
590 if( _spilled_once.test(n->_idx) ) {
591 lrg._was_spilled1 = 1;
592 if( _spilled_twice.test(n->_idx) )
593 lrg._was_spilled2 = 1;
594 }
596 #ifndef PRODUCT
597 if (trace_spilling() && lrg._def != NULL) {
598 // collect defs for MultiDef printing
599 if (lrg._defs == NULL) {
600 lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
601 lrg._defs->append(lrg._def);
602 }
603 lrg._defs->append(n);
604 }
605 #endif
607 // Check for a single def LRG; these can spill nicely
608 // via rematerialization. Flag as NULL for no def found
609 // yet, or 'n' for single def or -1 for many defs.
610 lrg._def = lrg._def ? NodeSentinel : n;
612 // Limit result register mask to acceptable registers
613 const RegMask &rm = n->out_RegMask();
614 lrg.AND( rm );
616 int ireg = n->ideal_reg();
617 assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
618 "oops must be in Op_RegP's" );
620 // Check for vector live range (only if vector register is used).
621 // On SPARC vector uses RegD which could be misaligned so it is not
622 // processes as vector in RA.
623 if (RegMask::is_vector(ireg))
624 lrg._is_vector = 1;
625 assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD,
626 "vector must be in vector registers");
628 // Check for bound register masks
629 const RegMask &lrgmask = lrg.mask();
630 if (lrgmask.is_bound(ireg))
631 lrg._is_bound = 1;
633 // Check for maximum frequency value
634 if (lrg._maxfreq < b->_freq)
635 lrg._maxfreq = b->_freq;
637 // Check for oop-iness, or long/double
638 // Check for multi-kill projection
639 switch( ireg ) {
640 case MachProjNode::fat_proj:
641 // Fat projections have size equal to number of registers killed
642 lrg.set_num_regs(rm.Size());
643 lrg.set_reg_pressure(lrg.num_regs());
644 lrg._fat_proj = 1;
645 lrg._is_bound = 1;
646 break;
647 case Op_RegP:
648 #ifdef _LP64
649 lrg.set_num_regs(2); // Size is 2 stack words
650 #else
651 lrg.set_num_regs(1); // Size is 1 stack word
652 #endif
653 // Register pressure is tracked relative to the maximum values
654 // suggested for that platform, INTPRESSURE and FLOATPRESSURE,
655 // and relative to other types which compete for the same regs.
656 //
657 // The following table contains suggested values based on the
658 // architectures as defined in each .ad file.
659 // INTPRESSURE and FLOATPRESSURE may be tuned differently for
660 // compile-speed or performance.
661 // Note1:
662 // SPARC and SPARCV9 reg_pressures are at 2 instead of 1
663 // since .ad registers are defined as high and low halves.
664 // These reg_pressure values remain compatible with the code
665 // in is_high_pressure() which relates get_invalid_mask_size(),
666 // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.
667 // Note2:
668 // SPARC -d32 has 24 registers available for integral values,
669 // but only 10 of these are safe for 64-bit longs.
670 // Using set_reg_pressure(2) for both int and long means
671 // the allocator will believe it can fit 26 longs into
672 // registers. Using 2 for longs and 1 for ints means the
673 // allocator will attempt to put 52 integers into registers.
674 // The settings below limit this problem to methods with
675 // many long values which are being run on 32-bit SPARC.
676 //
677 // ------------------- reg_pressure --------------------
678 // Each entry is reg_pressure_per_value,number_of_regs
679 // RegL RegI RegFlags RegF RegD INTPRESSURE FLOATPRESSURE
680 // IA32 2 1 1 1 1 6 6
681 // IA64 1 1 1 1 1 50 41
682 // SPARC 2 2 2 2 2 48 (24) 52 (26)
683 // SPARCV9 2 2 2 2 2 48 (24) 52 (26)
684 // AMD64 1 1 1 1 1 14 15
685 // -----------------------------------------------------
686 #if defined(SPARC)
687 lrg.set_reg_pressure(2); // use for v9 as well
688 #else
689 lrg.set_reg_pressure(1); // normally one value per register
690 #endif
691 if( n_type->isa_oop_ptr() ) {
692 lrg._is_oop = 1;
693 }
694 break;
695 case Op_RegL: // Check for long or double
696 case Op_RegD:
697 lrg.set_num_regs(2);
698 // Define platform specific register pressure
699 #if defined(SPARC) || defined(ARM)
700 lrg.set_reg_pressure(2);
701 #elif defined(IA32)
702 if( ireg == Op_RegL ) {
703 lrg.set_reg_pressure(2);
704 } else {
705 lrg.set_reg_pressure(1);
706 }
707 #else
708 lrg.set_reg_pressure(1); // normally one value per register
709 #endif
710 // If this def of a double forces a mis-aligned double,
711 // flag as '_fat_proj' - really flag as allowing misalignment
712 // AND changes how we count interferences. A mis-aligned
713 // double can interfere with TWO aligned pairs, or effectively
714 // FOUR registers!
715 if (rm.is_misaligned_pair()) {
716 lrg._fat_proj = 1;
717 lrg._is_bound = 1;
718 }
719 break;
720 case Op_RegF:
721 case Op_RegI:
722 case Op_RegN:
723 case Op_RegFlags:
724 case 0: // not an ideal register
725 lrg.set_num_regs(1);
726 #ifdef SPARC
727 lrg.set_reg_pressure(2);
728 #else
729 lrg.set_reg_pressure(1);
730 #endif
731 break;
732 case Op_VecS:
733 assert(Matcher::vector_size_supported(T_BYTE,4), "sanity");
734 assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity");
735 lrg.set_num_regs(RegMask::SlotsPerVecS);
736 lrg.set_reg_pressure(1);
737 break;
738 case Op_VecD:
739 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity");
740 assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity");
741 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned");
742 lrg.set_num_regs(RegMask::SlotsPerVecD);
743 lrg.set_reg_pressure(1);
744 break;
745 case Op_VecX:
746 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity");
747 assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity");
748 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned");
749 lrg.set_num_regs(RegMask::SlotsPerVecX);
750 lrg.set_reg_pressure(1);
751 break;
752 case Op_VecY:
753 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity");
754 assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity");
755 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned");
756 lrg.set_num_regs(RegMask::SlotsPerVecY);
757 lrg.set_reg_pressure(1);
758 break;
759 default:
760 ShouldNotReachHere();
761 }
762 }
764 // Now do the same for inputs
765 uint cnt = n->req();
766 // Setup for CISC SPILLING
767 uint inp = (uint)AdlcVMDeps::Not_cisc_spillable;
768 if( UseCISCSpill && after_aggressive ) {
769 inp = n->cisc_operand();
770 if( inp != (uint)AdlcVMDeps::Not_cisc_spillable )
771 // Convert operand number to edge index number
772 inp = n->as_Mach()->operand_index(inp);
773 }
774 // Prepare register mask for each input
775 for( uint k = input_edge_start; k < cnt; k++ ) {
776 uint vreg = n2lidx(n->in(k));
777 if( !vreg ) continue;
779 // If this instruction is CISC Spillable, add the flags
780 // bit to its appropriate input
781 if( UseCISCSpill && after_aggressive && inp == k ) {
782 #ifndef PRODUCT
783 if( TraceCISCSpill ) {
784 tty->print(" use_cisc_RegMask: ");
785 n->dump();
786 }
787 #endif
788 n->as_Mach()->use_cisc_RegMask();
789 }
791 LRG &lrg = lrgs(vreg);
792 // // Testing for floating point code shape
793 // Node *test = n->in(k);
794 // if( test->is_Mach() ) {
795 // MachNode *m = test->as_Mach();
796 // int op = m->ideal_Opcode();
797 // if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) {
798 // int zzz = 1;
799 // }
800 // }
802 // Limit result register mask to acceptable registers.
803 // Do not limit registers from uncommon uses before
804 // AggressiveCoalesce. This effectively pre-virtual-splits
805 // around uncommon uses of common defs.
806 const RegMask &rm = n->in_RegMask(k);
807 if( !after_aggressive &&
808 _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
809 // Since we are BEFORE aggressive coalesce, leave the register
810 // mask untrimmed by the call. This encourages more coalescing.
811 // Later, AFTER aggressive, this live range will have to spill
812 // but the spiller handles slow-path calls very nicely.
813 } else {
814 lrg.AND( rm );
815 }
817 // Check for bound register masks
818 const RegMask &lrgmask = lrg.mask();
819 int kreg = n->in(k)->ideal_reg();
820 bool is_vect = RegMask::is_vector(kreg);
821 assert(n->in(k)->bottom_type()->isa_vect() == NULL ||
822 is_vect || kreg == Op_RegD,
823 "vector must be in vector registers");
824 if (lrgmask.is_bound(kreg))
825 lrg._is_bound = 1;
827 // If this use of a double forces a mis-aligned double,
828 // flag as '_fat_proj' - really flag as allowing misalignment
829 // AND changes how we count interferences. A mis-aligned
830 // double can interfere with TWO aligned pairs, or effectively
831 // FOUR registers!
832 #ifdef ASSERT
833 if (is_vect) {
834 assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
835 assert(!lrg._fat_proj, "sanity");
836 assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
837 }
838 #endif
839 if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
840 lrg._fat_proj = 1;
841 lrg._is_bound = 1;
842 }
843 // if the LRG is an unaligned pair, we will have to spill
844 // so clear the LRG's register mask if it is not already spilled
845 if (!is_vect && !n->is_SpillCopy() &&
846 (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
847 lrgmask.is_misaligned_pair()) {
848 lrg.Clear();
849 }
851 // Check for maximum frequency value
852 if( lrg._maxfreq < b->_freq )
853 lrg._maxfreq = b->_freq;
855 } // End for all allocated inputs
856 } // end for all instructions
857 } // end for all blocks
859 // Final per-liverange setup
860 for (uint i2=0; i2<_maxlrg; i2++) {
861 LRG &lrg = lrgs(i2);
862 assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
863 if (lrg.num_regs() > 1 && !lrg._fat_proj) {
864 lrg.clear_to_sets();
865 }
866 lrg.compute_set_mask_size();
867 if (lrg.not_free()) { // Handle case where we lose from the start
868 lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
869 lrg._direct_conflict = 1;
870 }
871 lrg.set_degree(0); // no neighbors in IFG yet
872 }
873 }
875 //------------------------------set_was_low------------------------------------
876 // Set the was-lo-degree bit. Conservative coalescing should not change the
877 // colorability of the graph. If any live range was of low-degree before
878 // coalescing, it should Simplify. This call sets the was-lo-degree bit.
879 // The bit is checked in Simplify.
880 void PhaseChaitin::set_was_low() {
881 #ifdef ASSERT
882 for( uint i = 1; i < _maxlrg; i++ ) {
883 int size = lrgs(i).num_regs();
884 uint old_was_lo = lrgs(i)._was_lo;
885 lrgs(i)._was_lo = 0;
886 if( lrgs(i).lo_degree() ) {
887 lrgs(i)._was_lo = 1; // Trivially of low degree
888 } else { // Else check the Brigg's assertion
889 // Brigg's observation is that the lo-degree neighbors of a
890 // hi-degree live range will not interfere with the color choices
891 // of said hi-degree live range. The Simplify reverse-stack-coloring
892 // order takes care of the details. Hence you do not have to count
893 // low-degree neighbors when determining if this guy colors.
894 int briggs_degree = 0;
895 IndexSet *s = _ifg->neighbors(i);
896 IndexSetIterator elements(s);
897 uint lidx;
898 while((lidx = elements.next()) != 0) {
899 if( !lrgs(lidx).lo_degree() )
900 briggs_degree += MAX2(size,lrgs(lidx).num_regs());
901 }
902 if( briggs_degree < lrgs(i).degrees_of_freedom() )
903 lrgs(i)._was_lo = 1; // Low degree via the briggs assertion
904 }
905 assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease");
906 }
907 #endif
908 }
910 #define REGISTER_CONSTRAINED 16
912 //------------------------------cache_lrg_info---------------------------------
913 // Compute cost/area ratio, in case we spill. Build the lo-degree list.
914 void PhaseChaitin::cache_lrg_info( ) {
916 for( uint i = 1; i < _maxlrg; i++ ) {
917 LRG &lrg = lrgs(i);
919 // Check for being of low degree: means we can be trivially colored.
920 // Low degree, dead or must-spill guys just get to simplify right away
921 if( lrg.lo_degree() ||
922 !lrg.alive() ||
923 lrg._must_spill ) {
924 // Split low degree list into those guys that must get a
925 // register and those that can go to register or stack.
926 // The idea is LRGs that can go register or stack color first when
927 // they have a good chance of getting a register. The register-only
928 // lo-degree live ranges always get a register.
929 OptoReg::Name hi_reg = lrg.mask().find_last_elem();
930 if( OptoReg::is_stack(hi_reg)) { // Can go to stack?
931 lrg._next = _lo_stk_degree;
932 _lo_stk_degree = i;
933 } else {
934 lrg._next = _lo_degree;
935 _lo_degree = i;
936 }
937 } else { // Else high degree
938 lrgs(_hi_degree)._prev = i;
939 lrg._next = _hi_degree;
940 lrg._prev = 0;
941 _hi_degree = i;
942 }
943 }
944 }
946 //------------------------------Pre-Simplify-----------------------------------
947 // Simplify the IFG by removing LRGs of low degree that have NO copies
948 void PhaseChaitin::Pre_Simplify( ) {
950 // Warm up the lo-degree no-copy list
951 int lo_no_copy = 0;
952 for( uint i = 1; i < _maxlrg; i++ ) {
953 if( (lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
954 !lrgs(i).alive() ||
955 lrgs(i)._must_spill ) {
956 lrgs(i)._next = lo_no_copy;
957 lo_no_copy = i;
958 }
959 }
961 while( lo_no_copy ) {
962 uint lo = lo_no_copy;
963 lo_no_copy = lrgs(lo)._next;
964 int size = lrgs(lo).num_regs();
966 // Put the simplified guy on the simplified list.
967 lrgs(lo)._next = _simplified;
968 _simplified = lo;
970 // Yank this guy from the IFG.
971 IndexSet *adj = _ifg->remove_node( lo );
973 // If any neighbors' degrees fall below their number of
974 // allowed registers, then put that neighbor on the low degree
975 // list. Note that 'degree' can only fall and 'numregs' is
976 // unchanged by this action. Thus the two are equal at most once,
977 // so LRGs hit the lo-degree worklists at most once.
978 IndexSetIterator elements(adj);
979 uint neighbor;
980 while ((neighbor = elements.next()) != 0) {
981 LRG *n = &lrgs(neighbor);
982 assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
984 // Check for just becoming of-low-degree
985 if( n->just_lo_degree() && !n->_has_copy ) {
986 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
987 // Put on lo-degree list
988 n->_next = lo_no_copy;
989 lo_no_copy = neighbor;
990 }
991 }
992 } // End of while lo-degree no_copy worklist not empty
994 // No more lo-degree no-copy live ranges to simplify
995 }
997 //------------------------------Simplify---------------------------------------
998 // Simplify the IFG by removing LRGs of low degree.
999 void PhaseChaitin::Simplify( ) {
1001 while( 1 ) { // Repeat till simplified it all
1002 // May want to explore simplifying lo_degree before _lo_stk_degree.
1003 // This might result in more spills coloring into registers during
1004 // Select().
1005 while( _lo_degree || _lo_stk_degree ) {
1006 // If possible, pull from lo_stk first
1007 uint lo;
1008 if( _lo_degree ) {
1009 lo = _lo_degree;
1010 _lo_degree = lrgs(lo)._next;
1011 } else {
1012 lo = _lo_stk_degree;
1013 _lo_stk_degree = lrgs(lo)._next;
1014 }
1016 // Put the simplified guy on the simplified list.
1017 lrgs(lo)._next = _simplified;
1018 _simplified = lo;
1019 // If this guy is "at risk" then mark his current neighbors
1020 if( lrgs(lo)._at_risk ) {
1021 IndexSetIterator elements(_ifg->neighbors(lo));
1022 uint datum;
1023 while ((datum = elements.next()) != 0) {
1024 lrgs(datum)._risk_bias = lo;
1025 }
1026 }
1028 // Yank this guy from the IFG.
1029 IndexSet *adj = _ifg->remove_node( lo );
1031 // If any neighbors' degrees fall below their number of
1032 // allowed registers, then put that neighbor on the low degree
1033 // list. Note that 'degree' can only fall and 'numregs' is
1034 // unchanged by this action. Thus the two are equal at most once,
1035 // so LRGs hit the lo-degree worklist at most once.
1036 IndexSetIterator elements(adj);
1037 uint neighbor;
1038 while ((neighbor = elements.next()) != 0) {
1039 LRG *n = &lrgs(neighbor);
1040 #ifdef ASSERT
1041 if( VerifyOpto || VerifyRegisterAllocator ) {
1042 assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1043 }
1044 #endif
1046 // Check for just becoming of-low-degree just counting registers.
1047 // _must_spill live ranges are already on the low degree list.
1048 if( n->just_lo_degree() && !n->_must_spill ) {
1049 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1050 // Pull from hi-degree list
1051 uint prev = n->_prev;
1052 uint next = n->_next;
1053 if( prev ) lrgs(prev)._next = next;
1054 else _hi_degree = next;
1055 lrgs(next)._prev = prev;
1056 n->_next = _lo_degree;
1057 _lo_degree = neighbor;
1058 }
1059 }
1060 } // End of while lo-degree/lo_stk_degree worklist not empty
1062 // Check for got everything: is hi-degree list empty?
1063 if( !_hi_degree ) break;
1065 // Time to pick a potential spill guy
1066 uint lo_score = _hi_degree;
1067 double score = lrgs(lo_score).score();
1068 double area = lrgs(lo_score)._area;
1069 double cost = lrgs(lo_score)._cost;
1070 bool bound = lrgs(lo_score)._is_bound;
1072 // Find cheapest guy
1073 debug_only( int lo_no_simplify=0; );
1074 for( uint i = _hi_degree; i; i = lrgs(i)._next ) {
1075 assert( !(*_ifg->_yanked)[i], "" );
1076 // It's just vaguely possible to move hi-degree to lo-degree without
1077 // going through a just-lo-degree stage: If you remove a double from
1078 // a float live range it's degree will drop by 2 and you can skip the
1079 // just-lo-degree stage. It's very rare (shows up after 5000+ methods
1080 // in -Xcomp of Java2Demo). So just choose this guy to simplify next.
1081 if( lrgs(i).lo_degree() ) {
1082 lo_score = i;
1083 break;
1084 }
1085 debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
1086 double iscore = lrgs(i).score();
1087 double iarea = lrgs(i)._area;
1088 double icost = lrgs(i)._cost;
1089 bool ibound = lrgs(i)._is_bound;
1091 // Compare cost/area of i vs cost/area of lo_score. Smaller cost/area
1092 // wins. Ties happen because all live ranges in question have spilled
1093 // a few times before and the spill-score adds a huge number which
1094 // washes out the low order bits. We are choosing the lesser of 2
1095 // evils; in this case pick largest area to spill.
1096 // Ties also happen when live ranges are defined and used only inside
1097 // one block. In which case their area is 0 and score set to max.
1098 // In such case choose bound live range over unbound to free registers
1099 // or with smaller cost to spill.
1100 if( iscore < score ||
1101 (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ||
1102 (iscore == score && iarea == area &&
1103 ( (ibound && !bound) || ibound == bound && (icost < cost) )) ) {
1104 lo_score = i;
1105 score = iscore;
1106 area = iarea;
1107 cost = icost;
1108 bound = ibound;
1109 }
1110 }
1111 LRG *lo_lrg = &lrgs(lo_score);
1112 // The live range we choose for spilling is either hi-degree, or very
1113 // rarely it can be low-degree. If we choose a hi-degree live range
1114 // there better not be any lo-degree choices.
1115 assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" );
1117 // Pull from hi-degree list
1118 uint prev = lo_lrg->_prev;
1119 uint next = lo_lrg->_next;
1120 if( prev ) lrgs(prev)._next = next;
1121 else _hi_degree = next;
1122 lrgs(next)._prev = prev;
1123 // Jam him on the lo-degree list, despite his high degree.
1124 // Maybe he'll get a color, and maybe he'll spill.
1125 // Only Select() will know.
1126 lrgs(lo_score)._at_risk = true;
1127 _lo_degree = lo_score;
1128 lo_lrg->_next = 0;
1130 } // End of while not simplified everything
1132 }
1134 //------------------------------is_legal_reg-----------------------------------
1135 // Is 'reg' register legal for 'lrg'?
1136 static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
1137 if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
1138 lrg.mask().Member(OptoReg::add(reg,-chunk))) {
1139 // RA uses OptoReg which represent the highest element of a registers set.
1140 // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
1141 // in which XMMd is used by RA to represent such vectors. A double value
1142 // uses [XMM,XMMb] pairs and XMMb is used by RA for it.
1143 // The register mask uses largest bits set of overlapping register sets.
1144 // On x86 with AVX it uses 8 bits for each XMM registers set.
1145 //
1146 // The 'lrg' already has cleared-to-set register mask (done in Select()
1147 // before calling choose_color()). Passing mask.Member(reg) check above
1148 // indicates that the size (num_regs) of 'reg' set is less or equal to
1149 // 'lrg' set size.
1150 // For set size 1 any register which is member of 'lrg' mask is legal.
1151 if (lrg.num_regs()==1)
1152 return true;
1153 // For larger sets only an aligned register with the same set size is legal.
1154 int mask = lrg.num_regs()-1;
1155 if ((reg&mask) == mask)
1156 return true;
1157 }
1158 return false;
1159 }
1161 //------------------------------bias_color-------------------------------------
1162 // Choose a color using the biasing heuristic
1163 OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
1165 // Check for "at_risk" LRG's
1166 uint risk_lrg = Find(lrg._risk_bias);
1167 if( risk_lrg != 0 ) {
1168 // Walk the colored neighbors of the "at_risk" candidate
1169 // Choose a color which is both legal and already taken by a neighbor
1170 // of the "at_risk" candidate in order to improve the chances of the
1171 // "at_risk" candidate of coloring
1172 IndexSetIterator elements(_ifg->neighbors(risk_lrg));
1173 uint datum;
1174 while ((datum = elements.next()) != 0) {
1175 OptoReg::Name reg = lrgs(datum).reg();
1176 // If this LRG's register is legal for us, choose it
1177 if (is_legal_reg(lrg, reg, chunk))
1178 return reg;
1179 }
1180 }
1182 uint copy_lrg = Find(lrg._copy_bias);
1183 if( copy_lrg != 0 ) {
1184 // If he has a color,
1185 if( !(*(_ifg->_yanked))[copy_lrg] ) {
1186 OptoReg::Name reg = lrgs(copy_lrg).reg();
1187 // And it is legal for you,
1188 if (is_legal_reg(lrg, reg, chunk))
1189 return reg;
1190 } else if( chunk == 0 ) {
1191 // Choose a color which is legal for him
1192 RegMask tempmask = lrg.mask();
1193 tempmask.AND(lrgs(copy_lrg).mask());
1194 tempmask.clear_to_sets(lrg.num_regs());
1195 OptoReg::Name reg = tempmask.find_first_set(lrg.num_regs());
1196 if (OptoReg::is_valid(reg))
1197 return reg;
1198 }
1199 }
1201 // If no bias info exists, just go with the register selection ordering
1202 if (lrg._is_vector || lrg.num_regs() == 2) {
1203 // Find an aligned set
1204 return OptoReg::add(lrg.mask().find_first_set(lrg.num_regs()),chunk);
1205 }
1207 // CNC - Fun hack. Alternate 1st and 2nd selection. Enables post-allocate
1208 // copy removal to remove many more copies, by preventing a just-assigned
1209 // register from being repeatedly assigned.
1210 OptoReg::Name reg = lrg.mask().find_first_elem();
1211 if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
1212 // This 'Remove; find; Insert' idiom is an expensive way to find the
1213 // SECOND element in the mask.
1214 lrg.Remove(reg);
1215 OptoReg::Name reg2 = lrg.mask().find_first_elem();
1216 lrg.Insert(reg);
1217 if( OptoReg::is_reg(reg2))
1218 reg = reg2;
1219 }
1220 return OptoReg::add( reg, chunk );
1221 }
1223 //------------------------------choose_color-----------------------------------
1224 // Choose a color in the current chunk
1225 OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
1226 assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
1227 assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)");
1229 if( lrg.num_regs() == 1 || // Common Case
1230 !lrg._fat_proj ) // Aligned+adjacent pairs ok
1231 // Use a heuristic to "bias" the color choice
1232 return bias_color(lrg, chunk);
1234 assert(!lrg._is_vector, "should be not vector here" );
1235 assert( lrg.num_regs() >= 2, "dead live ranges do not color" );
1237 // Fat-proj case or misaligned double argument.
1238 assert(lrg.compute_mask_size() == lrg.num_regs() ||
1239 lrg.num_regs() == 2,"fat projs exactly color" );
1240 assert( !chunk, "always color in 1st chunk" );
1241 // Return the highest element in the set.
1242 return lrg.mask().find_last_elem();
1243 }
1245 //------------------------------Select-----------------------------------------
1246 // Select colors by re-inserting LRGs back into the IFG. LRGs are re-inserted
1247 // in reverse order of removal. As long as nothing of hi-degree was yanked,
1248 // everything going back is guaranteed a color. Select that color. If some
1249 // hi-degree LRG cannot get a color then we record that we must spill.
1250 uint PhaseChaitin::Select( ) {
1251 uint spill_reg = LRG::SPILL_REG;
1252 _max_reg = OptoReg::Name(0); // Past max register used
1253 while( _simplified ) {
1254 // Pull next LRG from the simplified list - in reverse order of removal
1255 uint lidx = _simplified;
1256 LRG *lrg = &lrgs(lidx);
1257 _simplified = lrg->_next;
1260 #ifndef PRODUCT
1261 if (trace_spilling()) {
1262 ttyLocker ttyl;
1263 tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(),
1264 lrg->degrees_of_freedom());
1265 lrg->dump();
1266 }
1267 #endif
1269 // Re-insert into the IFG
1270 _ifg->re_insert(lidx);
1271 if( !lrg->alive() ) continue;
1272 // capture allstackedness flag before mask is hacked
1273 const int is_allstack = lrg->mask().is_AllStack();
1275 // Yeah, yeah, yeah, I know, I know. I can refactor this
1276 // to avoid the GOTO, although the refactored code will not
1277 // be much clearer. We arrive here IFF we have a stack-based
1278 // live range that cannot color in the current chunk, and it
1279 // has to move into the next free stack chunk.
1280 int chunk = 0; // Current chunk is first chunk
1281 retry_next_chunk:
1283 // Remove neighbor colors
1284 IndexSet *s = _ifg->neighbors(lidx);
1286 debug_only(RegMask orig_mask = lrg->mask();)
1287 IndexSetIterator elements(s);
1288 uint neighbor;
1289 while ((neighbor = elements.next()) != 0) {
1290 // Note that neighbor might be a spill_reg. In this case, exclusion
1291 // of its color will be a no-op, since the spill_reg chunk is in outer
1292 // space. Also, if neighbor is in a different chunk, this exclusion
1293 // will be a no-op. (Later on, if lrg runs out of possible colors in
1294 // its chunk, a new chunk of color may be tried, in which case
1295 // examination of neighbors is started again, at retry_next_chunk.)
1296 LRG &nlrg = lrgs(neighbor);
1297 OptoReg::Name nreg = nlrg.reg();
1298 // Only subtract masks in the same chunk
1299 if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) {
1300 #ifndef PRODUCT
1301 uint size = lrg->mask().Size();
1302 RegMask rm = lrg->mask();
1303 #endif
1304 lrg->SUBTRACT(nlrg.mask());
1305 #ifndef PRODUCT
1306 if (trace_spilling() && lrg->mask().Size() != size) {
1307 ttyLocker ttyl;
1308 tty->print("L%d ", lidx);
1309 rm.dump();
1310 tty->print(" intersected L%d ", neighbor);
1311 nlrg.mask().dump();
1312 tty->print(" removed ");
1313 rm.SUBTRACT(lrg->mask());
1314 rm.dump();
1315 tty->print(" leaving ");
1316 lrg->mask().dump();
1317 tty->cr();
1318 }
1319 #endif
1320 }
1321 }
1322 //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness");
1323 // Aligned pairs need aligned masks
1324 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1325 if (lrg->num_regs() > 1 && !lrg->_fat_proj) {
1326 lrg->clear_to_sets();
1327 }
1329 // Check if a color is available and if so pick the color
1330 OptoReg::Name reg = choose_color( *lrg, chunk );
1331 #ifdef SPARC
1332 debug_only(lrg->compute_set_mask_size());
1333 assert(lrg->num_regs() < 2 || lrg->is_bound() || is_even(reg-1), "allocate all doubles aligned");
1334 #endif
1336 //---------------
1337 // If we fail to color and the AllStack flag is set, trigger
1338 // a chunk-rollover event
1339 if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) {
1340 // Bump register mask up to next stack chunk
1341 chunk += RegMask::CHUNK_SIZE;
1342 lrg->Set_All();
1344 goto retry_next_chunk;
1345 }
1347 //---------------
1348 // Did we get a color?
1349 else if( OptoReg::is_valid(reg)) {
1350 #ifndef PRODUCT
1351 RegMask avail_rm = lrg->mask();
1352 #endif
1354 // Record selected register
1355 lrg->set_reg(reg);
1357 if( reg >= _max_reg ) // Compute max register limit
1358 _max_reg = OptoReg::add(reg,1);
1359 // Fold reg back into normal space
1360 reg = OptoReg::add(reg,-chunk);
1362 // If the live range is not bound, then we actually had some choices
1363 // to make. In this case, the mask has more bits in it than the colors
1364 // chosen. Restrict the mask to just what was picked.
1365 int n_regs = lrg->num_regs();
1366 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1367 if (n_regs == 1 || !lrg->_fat_proj) {
1368 assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecY, "sanity");
1369 lrg->Clear(); // Clear the mask
1370 lrg->Insert(reg); // Set regmask to match selected reg
1371 // For vectors and pairs, also insert the low bit of the pair
1372 for (int i = 1; i < n_regs; i++)
1373 lrg->Insert(OptoReg::add(reg,-i));
1374 lrg->set_mask_size(n_regs);
1375 } else { // Else fatproj
1376 // mask must be equal to fatproj bits, by definition
1377 }
1378 #ifndef PRODUCT
1379 if (trace_spilling()) {
1380 ttyLocker ttyl;
1381 tty->print("L%d selected ", lidx);
1382 lrg->mask().dump();
1383 tty->print(" from ");
1384 avail_rm.dump();
1385 tty->cr();
1386 }
1387 #endif
1388 // Note that reg is the highest-numbered register in the newly-bound mask.
1389 } // end color available case
1391 //---------------
1392 // Live range is live and no colors available
1393 else {
1394 assert( lrg->alive(), "" );
1395 assert( !lrg->_fat_proj || lrg->is_multidef() ||
1396 lrg->_def->outcnt() > 0, "fat_proj cannot spill");
1397 assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
1399 // Assign the special spillreg register
1400 lrg->set_reg(OptoReg::Name(spill_reg++));
1401 // Do not empty the regmask; leave mask_size lying around
1402 // for use during Spilling
1403 #ifndef PRODUCT
1404 if( trace_spilling() ) {
1405 ttyLocker ttyl;
1406 tty->print("L%d spilling with neighbors: ", lidx);
1407 s->dump();
1408 debug_only(tty->print(" original mask: "));
1409 debug_only(orig_mask.dump());
1410 dump_lrg(lidx);
1411 }
1412 #endif
1413 } // end spill case
1415 }
1417 return spill_reg-LRG::SPILL_REG; // Return number of spills
1418 }
1421 //------------------------------copy_was_spilled-------------------------------
1422 // Copy 'was_spilled'-edness from the source Node to the dst Node.
1423 void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
1424 if( _spilled_once.test(src->_idx) ) {
1425 _spilled_once.set(dst->_idx);
1426 lrgs(Find(dst))._was_spilled1 = 1;
1427 if( _spilled_twice.test(src->_idx) ) {
1428 _spilled_twice.set(dst->_idx);
1429 lrgs(Find(dst))._was_spilled2 = 1;
1430 }
1431 }
1432 }
1434 //------------------------------set_was_spilled--------------------------------
1435 // Set the 'spilled_once' or 'spilled_twice' flag on a node.
1436 void PhaseChaitin::set_was_spilled( Node *n ) {
1437 if( _spilled_once.test_set(n->_idx) )
1438 _spilled_twice.set(n->_idx);
1439 }
1441 //------------------------------fixup_spills-----------------------------------
1442 // Convert Ideal spill instructions into proper FramePtr + offset Loads and
1443 // Stores. Use-def chains are NOT preserved, but Node->LRG->reg maps are.
1444 void PhaseChaitin::fixup_spills() {
1445 // This function does only cisc spill work.
1446 if( !UseCISCSpill ) return;
1448 NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); )
1450 // Grab the Frame Pointer
1451 Node *fp = _cfg._broot->head()->in(1)->in(TypeFunc::FramePtr);
1453 // For all blocks
1454 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
1455 Block *b = _cfg._blocks[i];
1457 // For all instructions in block
1458 uint last_inst = b->end_idx();
1459 for( uint j = 1; j <= last_inst; j++ ) {
1460 Node *n = b->_nodes[j];
1462 // Dead instruction???
1463 assert( n->outcnt() != 0 ||// Nothing dead after post alloc
1464 C->top() == n || // Or the random TOP node
1465 n->is_Proj(), // Or a fat-proj kill node
1466 "No dead instructions after post-alloc" );
1468 int inp = n->cisc_operand();
1469 if( inp != AdlcVMDeps::Not_cisc_spillable ) {
1470 // Convert operand number to edge index number
1471 MachNode *mach = n->as_Mach();
1472 inp = mach->operand_index(inp);
1473 Node *src = n->in(inp); // Value to load or store
1474 LRG &lrg_cisc = lrgs( Find_const(src) );
1475 OptoReg::Name src_reg = lrg_cisc.reg();
1476 // Doubles record the HIGH register of an adjacent pair.
1477 src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
1478 if( OptoReg::is_stack(src_reg) ) { // If input is on stack
1479 // This is a CISC Spill, get stack offset and construct new node
1480 #ifndef PRODUCT
1481 if( TraceCISCSpill ) {
1482 tty->print(" reg-instr: ");
1483 n->dump();
1484 }
1485 #endif
1486 int stk_offset = reg2offset(src_reg);
1487 // Bailout if we might exceed node limit when spilling this instruction
1488 C->check_node_count(0, "out of nodes fixing spills");
1489 if (C->failing()) return;
1490 // Transform node
1491 MachNode *cisc = mach->cisc_version(stk_offset, C)->as_Mach();
1492 cisc->set_req(inp,fp); // Base register is frame pointer
1493 if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
1494 assert( cisc->oper_input_base() == 2, "Only adding one edge");
1495 cisc->ins_req(1,src); // Requires a memory edge
1496 }
1497 b->_nodes.map(j,cisc); // Insert into basic block
1498 n->subsume_by(cisc); // Correct graph
1499 //
1500 ++_used_cisc_instructions;
1501 #ifndef PRODUCT
1502 if( TraceCISCSpill ) {
1503 tty->print(" cisc-instr: ");
1504 cisc->dump();
1505 }
1506 #endif
1507 } else {
1508 #ifndef PRODUCT
1509 if( TraceCISCSpill ) {
1510 tty->print(" using reg-instr: ");
1511 n->dump();
1512 }
1513 #endif
1514 ++_unused_cisc_instructions; // input can be on stack
1515 }
1516 }
1518 } // End of for all instructions
1520 } // End of for all blocks
1521 }
1523 //------------------------------find_base_for_derived--------------------------
1524 // Helper to stretch above; recursively discover the base Node for a
1525 // given derived Node. Easy for AddP-related machine nodes, but needs
1526 // to be recursive for derived Phis.
1527 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1528 // See if already computed; if so return it
1529 if( derived_base_map[derived->_idx] )
1530 return derived_base_map[derived->_idx];
1532 // See if this happens to be a base.
1533 // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1534 // pointers derived from NULL! These are always along paths that
1535 // can't happen at run-time but the optimizer cannot deduce it so
1536 // we have to handle it gracefully.
1537 assert(!derived->bottom_type()->isa_narrowoop() ||
1538 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1539 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1540 // If its an OOP with a non-zero offset, then it is derived.
1541 if( tj == NULL || tj->_offset == 0 ) {
1542 derived_base_map[derived->_idx] = derived;
1543 return derived;
1544 }
1545 // Derived is NULL+offset? Base is NULL!
1546 if( derived->is_Con() ) {
1547 Node *base = _matcher.mach_null();
1548 assert(base != NULL, "sanity");
1549 if (base->in(0) == NULL) {
1550 // Initialize it once and make it shared:
1551 // set control to _root and place it into Start block
1552 // (where top() node is placed).
1553 base->init_req(0, _cfg._root);
1554 Block *startb = _cfg._bbs[C->top()->_idx];
1555 startb->_nodes.insert(startb->find_node(C->top()), base );
1556 _cfg._bbs.map( base->_idx, startb );
1557 assert (n2lidx(base) == 0, "should not have LRG yet");
1558 }
1559 if (n2lidx(base) == 0) {
1560 new_lrg(base, maxlrg++);
1561 }
1562 assert(base->in(0) == _cfg._root &&
1563 _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
1564 derived_base_map[derived->_idx] = base;
1565 return base;
1566 }
1568 // Check for AddP-related opcodes
1569 if( !derived->is_Phi() ) {
1570 assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name()));
1571 Node *base = derived->in(AddPNode::Base);
1572 derived_base_map[derived->_idx] = base;
1573 return base;
1574 }
1576 // Recursively find bases for Phis.
1577 // First check to see if we can avoid a base Phi here.
1578 Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg);
1579 uint i;
1580 for( i = 2; i < derived->req(); i++ )
1581 if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg))
1582 break;
1583 // Went to the end without finding any different bases?
1584 if( i == derived->req() ) { // No need for a base Phi here
1585 derived_base_map[derived->_idx] = base;
1586 return base;
1587 }
1589 // Now we see we need a base-Phi here to merge the bases
1590 const Type *t = base->bottom_type();
1591 base = new (C, derived->req()) PhiNode( derived->in(0), t );
1592 for( i = 1; i < derived->req(); i++ ) {
1593 base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg));
1594 t = t->meet(base->in(i)->bottom_type());
1595 }
1596 base->as_Phi()->set_type(t);
1598 // Search the current block for an existing base-Phi
1599 Block *b = _cfg._bbs[derived->_idx];
1600 for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
1601 Node *phi = b->_nodes[i];
1602 if( !phi->is_Phi() ) { // Found end of Phis with no match?
1603 b->_nodes.insert( i, base ); // Must insert created Phi here as base
1604 _cfg._bbs.map( base->_idx, b );
1605 new_lrg(base,maxlrg++);
1606 break;
1607 }
1608 // See if Phi matches.
1609 uint j;
1610 for( j = 1; j < base->req(); j++ )
1611 if( phi->in(j) != base->in(j) &&
1612 !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs
1613 break;
1614 if( j == base->req() ) { // All inputs match?
1615 base = phi; // Then use existing 'phi' and drop 'base'
1616 break;
1617 }
1618 }
1621 // Cache info for later passes
1622 derived_base_map[derived->_idx] = base;
1623 return base;
1624 }
1627 //------------------------------stretch_base_pointer_live_ranges---------------
1628 // At each Safepoint, insert extra debug edges for each pair of derived value/
1629 // base pointer that is live across the Safepoint for oopmap building. The
1630 // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
1631 // required edge set.
1632 bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) {
1633 int must_recompute_live = false;
1634 uint maxlrg = _maxlrg;
1635 Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
1636 memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
1638 // For all blocks in RPO do...
1639 for( uint i=0; i<_cfg._num_blocks; i++ ) {
1640 Block *b = _cfg._blocks[i];
1641 // Note use of deep-copy constructor. I cannot hammer the original
1642 // liveout bits, because they are needed by the following coalesce pass.
1643 IndexSet liveout(_live->live(b));
1645 for( uint j = b->end_idx() + 1; j > 1; j-- ) {
1646 Node *n = b->_nodes[j-1];
1648 // Pre-split compares of loop-phis. Loop-phis form a cycle we would
1649 // like to see in the same register. Compare uses the loop-phi and so
1650 // extends its live range BUT cannot be part of the cycle. If this
1651 // extended live range overlaps with the update of the loop-phi value
1652 // we need both alive at the same time -- which requires at least 1
1653 // copy. But because Intel has only 2-address registers we end up with
1654 // at least 2 copies, one before the loop-phi update instruction and
1655 // one after. Instead we split the input to the compare just after the
1656 // phi.
1657 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
1658 Node *phi = n->in(1);
1659 if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
1660 Block *phi_block = _cfg._bbs[phi->_idx];
1661 if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) {
1662 const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
1663 Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
1664 insert_proj( phi_block, 1, spill, maxlrg++ );
1665 n->set_req(1,spill);
1666 must_recompute_live = true;
1667 }
1668 }
1669 }
1671 // Get value being defined
1672 uint lidx = n2lidx(n);
1673 if( lidx && lidx < _maxlrg /* Ignore the occasional brand-new live range */) {
1674 // Remove from live-out set
1675 liveout.remove(lidx);
1677 // Copies do not define a new value and so do not interfere.
1678 // Remove the copies source from the liveout set before interfering.
1679 uint idx = n->is_Copy();
1680 if( idx ) liveout.remove( n2lidx(n->in(idx)) );
1681 }
1683 // Found a safepoint?
1684 JVMState *jvms = n->jvms();
1685 if( jvms ) {
1686 // Now scan for a live derived pointer
1687 IndexSetIterator elements(&liveout);
1688 uint neighbor;
1689 while ((neighbor = elements.next()) != 0) {
1690 // Find reaching DEF for base and derived values
1691 // This works because we are still in SSA during this call.
1692 Node *derived = lrgs(neighbor)._def;
1693 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1694 assert(!derived->bottom_type()->isa_narrowoop() ||
1695 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1696 // If its an OOP with a non-zero offset, then it is derived.
1697 if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1698 Node *base = find_base_for_derived( derived_base_map, derived, maxlrg );
1699 assert( base->_idx < _names.Size(), "" );
1700 // Add reaching DEFs of derived pointer and base pointer as a
1701 // pair of inputs
1702 n->add_req( derived );
1703 n->add_req( base );
1705 // See if the base pointer is already live to this point.
1706 // Since I'm working on the SSA form, live-ness amounts to
1707 // reaching def's. So if I find the base's live range then
1708 // I know the base's def reaches here.
1709 if( (n2lidx(base) >= _maxlrg ||// (Brand new base (hence not live) or
1710 !liveout.member( n2lidx(base) ) ) && // not live) AND
1711 (n2lidx(base) > 0) && // not a constant
1712 _cfg._bbs[base->_idx] != b ) { // base not def'd in blk)
1713 // Base pointer is not currently live. Since I stretched
1714 // the base pointer to here and it crosses basic-block
1715 // boundaries, the global live info is now incorrect.
1716 // Recompute live.
1717 must_recompute_live = true;
1718 } // End of if base pointer is not live to debug info
1719 }
1720 } // End of scan all live data for derived ptrs crossing GC point
1721 } // End of if found a GC point
1723 // Make all inputs live
1724 if( !n->is_Phi() ) { // Phi function uses come from prior block
1725 for( uint k = 1; k < n->req(); k++ ) {
1726 uint lidx = n2lidx(n->in(k));
1727 if( lidx < _maxlrg )
1728 liveout.insert( lidx );
1729 }
1730 }
1732 } // End of forall instructions in block
1733 liveout.clear(); // Free the memory used by liveout.
1735 } // End of forall blocks
1736 _maxlrg = maxlrg;
1738 // If I created a new live range I need to recompute live
1739 if( maxlrg != _ifg->_maxlrg )
1740 must_recompute_live = true;
1742 return must_recompute_live != 0;
1743 }
1746 //------------------------------add_reference----------------------------------
1747 // Extend the node to LRG mapping
1748 void PhaseChaitin::add_reference( const Node *node, const Node *old_node ) {
1749 _names.extend( node->_idx, n2lidx(old_node) );
1750 }
1752 //------------------------------dump-------------------------------------------
1753 #ifndef PRODUCT
1754 void PhaseChaitin::dump( const Node *n ) const {
1755 uint r = (n->_idx < _names.Size() ) ? Find_const(n) : 0;
1756 tty->print("L%d",r);
1757 if( r && n->Opcode() != Op_Phi ) {
1758 if( _node_regs ) { // Got a post-allocation copy of allocation?
1759 tty->print("[");
1760 OptoReg::Name second = get_reg_second(n);
1761 if( OptoReg::is_valid(second) ) {
1762 if( OptoReg::is_reg(second) )
1763 tty->print("%s:",Matcher::regName[second]);
1764 else
1765 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second));
1766 }
1767 OptoReg::Name first = get_reg_first(n);
1768 if( OptoReg::is_reg(first) )
1769 tty->print("%s]",Matcher::regName[first]);
1770 else
1771 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first));
1772 } else
1773 n->out_RegMask().dump();
1774 }
1775 tty->print("/N%d\t",n->_idx);
1776 tty->print("%s === ", n->Name());
1777 uint k;
1778 for( k = 0; k < n->req(); k++) {
1779 Node *m = n->in(k);
1780 if( !m ) tty->print("_ ");
1781 else {
1782 uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0;
1783 tty->print("L%d",r);
1784 // Data MultiNode's can have projections with no real registers.
1785 // Don't die while dumping them.
1786 int op = n->Opcode();
1787 if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) {
1788 if( _node_regs ) {
1789 tty->print("[");
1790 OptoReg::Name second = get_reg_second(n->in(k));
1791 if( OptoReg::is_valid(second) ) {
1792 if( OptoReg::is_reg(second) )
1793 tty->print("%s:",Matcher::regName[second]);
1794 else
1795 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer),
1796 reg2offset_unchecked(second));
1797 }
1798 OptoReg::Name first = get_reg_first(n->in(k));
1799 if( OptoReg::is_reg(first) )
1800 tty->print("%s]",Matcher::regName[first]);
1801 else
1802 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer),
1803 reg2offset_unchecked(first));
1804 } else
1805 n->in_RegMask(k).dump();
1806 }
1807 tty->print("/N%d ",m->_idx);
1808 }
1809 }
1810 if( k < n->len() && n->in(k) ) tty->print("| ");
1811 for( ; k < n->len(); k++ ) {
1812 Node *m = n->in(k);
1813 if( !m ) break;
1814 uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0;
1815 tty->print("L%d",r);
1816 tty->print("/N%d ",m->_idx);
1817 }
1818 if( n->is_Mach() ) n->as_Mach()->dump_spec(tty);
1819 else n->dump_spec(tty);
1820 if( _spilled_once.test(n->_idx ) ) {
1821 tty->print(" Spill_1");
1822 if( _spilled_twice.test(n->_idx ) )
1823 tty->print(" Spill_2");
1824 }
1825 tty->print("\n");
1826 }
1828 void PhaseChaitin::dump( const Block * b ) const {
1829 b->dump_head( &_cfg._bbs );
1831 // For all instructions
1832 for( uint j = 0; j < b->_nodes.size(); j++ )
1833 dump(b->_nodes[j]);
1834 // Print live-out info at end of block
1835 if( _live ) {
1836 tty->print("Liveout: ");
1837 IndexSet *live = _live->live(b);
1838 IndexSetIterator elements(live);
1839 tty->print("{");
1840 uint i;
1841 while ((i = elements.next()) != 0) {
1842 tty->print("L%d ", Find_const(i));
1843 }
1844 tty->print_cr("}");
1845 }
1846 tty->print("\n");
1847 }
1849 void PhaseChaitin::dump() const {
1850 tty->print( "--- Chaitin -- argsize: %d framesize: %d ---\n",
1851 _matcher._new_SP, _framesize );
1853 // For all blocks
1854 for( uint i = 0; i < _cfg._num_blocks; i++ )
1855 dump(_cfg._blocks[i]);
1856 // End of per-block dump
1857 tty->print("\n");
1859 if (!_ifg) {
1860 tty->print("(No IFG.)\n");
1861 return;
1862 }
1864 // Dump LRG array
1865 tty->print("--- Live RanGe Array ---\n");
1866 for(uint i2 = 1; i2 < _maxlrg; i2++ ) {
1867 tty->print("L%d: ",i2);
1868 if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( );
1869 else tty->print_cr("new LRG");
1870 }
1871 tty->print_cr("");
1873 // Dump lo-degree list
1874 tty->print("Lo degree: ");
1875 for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next )
1876 tty->print("L%d ",i3);
1877 tty->print_cr("");
1879 // Dump lo-stk-degree list
1880 tty->print("Lo stk degree: ");
1881 for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next )
1882 tty->print("L%d ",i4);
1883 tty->print_cr("");
1885 // Dump lo-degree list
1886 tty->print("Hi degree: ");
1887 for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next )
1888 tty->print("L%d ",i5);
1889 tty->print_cr("");
1890 }
1892 //------------------------------dump_degree_lists------------------------------
1893 void PhaseChaitin::dump_degree_lists() const {
1894 // Dump lo-degree list
1895 tty->print("Lo degree: ");
1896 for( uint i = _lo_degree; i; i = lrgs(i)._next )
1897 tty->print("L%d ",i);
1898 tty->print_cr("");
1900 // Dump lo-stk-degree list
1901 tty->print("Lo stk degree: ");
1902 for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next )
1903 tty->print("L%d ",i2);
1904 tty->print_cr("");
1906 // Dump lo-degree list
1907 tty->print("Hi degree: ");
1908 for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next )
1909 tty->print("L%d ",i3);
1910 tty->print_cr("");
1911 }
1913 //------------------------------dump_simplified--------------------------------
1914 void PhaseChaitin::dump_simplified() const {
1915 tty->print("Simplified: ");
1916 for( uint i = _simplified; i; i = lrgs(i)._next )
1917 tty->print("L%d ",i);
1918 tty->print_cr("");
1919 }
1921 static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
1922 if ((int)reg < 0)
1923 sprintf(buf, "<OptoReg::%d>", (int)reg);
1924 else if (OptoReg::is_reg(reg))
1925 strcpy(buf, Matcher::regName[reg]);
1926 else
1927 sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer),
1928 pc->reg2offset(reg));
1929 return buf+strlen(buf);
1930 }
1932 //------------------------------dump_register----------------------------------
1933 // Dump a register name into a buffer. Be intelligent if we get called
1934 // before allocation is complete.
1935 char *PhaseChaitin::dump_register( const Node *n, char *buf ) const {
1936 if( !this ) { // Not got anything?
1937 sprintf(buf,"N%d",n->_idx); // Then use Node index
1938 } else if( _node_regs ) {
1939 // Post allocation, use direct mappings, no LRG info available
1940 print_reg( get_reg_first(n), this, buf );
1941 } else {
1942 uint lidx = Find_const(n); // Grab LRG number
1943 if( !_ifg ) {
1944 sprintf(buf,"L%d",lidx); // No register binding yet
1945 } else if( !lidx ) { // Special, not allocated value
1946 strcpy(buf,"Special");
1947 } else {
1948 if (lrgs(lidx)._is_vector) {
1949 if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs()))
1950 print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register
1951 else
1952 sprintf(buf,"L%d",lidx); // No register binding yet
1953 } else if( (lrgs(lidx).num_regs() == 1)
1954 ? lrgs(lidx).mask().is_bound1()
1955 : lrgs(lidx).mask().is_bound_pair() ) {
1956 // Hah! We have a bound machine register
1957 print_reg( lrgs(lidx).reg(), this, buf );
1958 } else {
1959 sprintf(buf,"L%d",lidx); // No register binding yet
1960 }
1961 }
1962 }
1963 return buf+strlen(buf);
1964 }
1966 //----------------------dump_for_spill_split_recycle--------------------------
1967 void PhaseChaitin::dump_for_spill_split_recycle() const {
1968 if( WizardMode && (PrintCompilation || PrintOpto) ) {
1969 // Display which live ranges need to be split and the allocator's state
1970 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
1971 for( uint bidx = 1; bidx < _maxlrg; bidx++ ) {
1972 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
1973 tty->print("L%d: ", bidx);
1974 lrgs(bidx).dump();
1975 }
1976 }
1977 tty->cr();
1978 dump();
1979 }
1980 }
1982 //------------------------------dump_frame------------------------------------
1983 void PhaseChaitin::dump_frame() const {
1984 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
1985 const TypeTuple *domain = C->tf()->domain();
1986 const int argcnt = domain->cnt() - TypeFunc::Parms;
1988 // Incoming arguments in registers dump
1989 for( int k = 0; k < argcnt; k++ ) {
1990 OptoReg::Name parmreg = _matcher._parm_regs[k].first();
1991 if( OptoReg::is_reg(parmreg)) {
1992 const char *reg_name = OptoReg::regname(parmreg);
1993 tty->print("#r%3.3d %s", parmreg, reg_name);
1994 parmreg = _matcher._parm_regs[k].second();
1995 if( OptoReg::is_reg(parmreg)) {
1996 tty->print(":%s", OptoReg::regname(parmreg));
1997 }
1998 tty->print(" : parm %d: ", k);
1999 domain->field_at(k + TypeFunc::Parms)->dump();
2000 tty->print_cr("");
2001 }
2002 }
2004 // Check for un-owned padding above incoming args
2005 OptoReg::Name reg = _matcher._new_SP;
2006 if( reg > _matcher._in_arg_limit ) {
2007 reg = OptoReg::add(reg, -1);
2008 tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2009 }
2011 // Incoming argument area dump
2012 OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2013 while( reg > begin_in_arg ) {
2014 reg = OptoReg::add(reg, -1);
2015 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2016 int j;
2017 for( j = 0; j < argcnt; j++) {
2018 if( _matcher._parm_regs[j].first() == reg ||
2019 _matcher._parm_regs[j].second() == reg ) {
2020 tty->print("parm %d: ",j);
2021 domain->field_at(j + TypeFunc::Parms)->dump();
2022 tty->print_cr("");
2023 break;
2024 }
2025 }
2026 if( j >= argcnt )
2027 tty->print_cr("HOLE, owned by SELF");
2028 }
2030 // Old outgoing preserve area
2031 while( reg > _matcher._old_SP ) {
2032 reg = OptoReg::add(reg, -1);
2033 tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2034 }
2036 // Old SP
2037 tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2038 reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2040 // Preserve area dump
2041 int fixed_slots = C->fixed_slots();
2042 OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
2043 OptoReg::Name return_addr = _matcher.return_addr();
2045 reg = OptoReg::add(reg, -1);
2046 while (OptoReg::is_stack(reg)) {
2047 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2048 if (return_addr == reg) {
2049 tty->print_cr("return address");
2050 } else if (reg >= begin_in_preserve) {
2051 // Preserved slots are present on x86
2052 if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
2053 tty->print_cr("saved fp register");
2054 else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
2055 VerifyStackAtCalls)
2056 tty->print_cr("0xBADB100D +VerifyStackAtCalls");
2057 else
2058 tty->print_cr("in_preserve");
2059 } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
2060 tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
2061 } else {
2062 tty->print_cr("pad2, stack alignment");
2063 }
2064 reg = OptoReg::add(reg, -1);
2065 }
2067 // Spill area dump
2068 reg = OptoReg::add(_matcher._new_SP, _framesize );
2069 while( reg > _matcher._out_arg_limit ) {
2070 reg = OptoReg::add(reg, -1);
2071 tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg));
2072 }
2074 // Outgoing argument area dump
2075 while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) {
2076 reg = OptoReg::add(reg, -1);
2077 tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg));
2078 }
2080 // Outgoing new preserve area
2081 while( reg > _matcher._new_SP ) {
2082 reg = OptoReg::add(reg, -1);
2083 tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg));
2084 }
2085 tty->print_cr("#");
2086 }
2088 //------------------------------dump_bb----------------------------------------
2089 void PhaseChaitin::dump_bb( uint pre_order ) const {
2090 tty->print_cr("---dump of B%d---",pre_order);
2091 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
2092 Block *b = _cfg._blocks[i];
2093 if( b->_pre_order == pre_order )
2094 dump(b);
2095 }
2096 }
2098 //------------------------------dump_lrg---------------------------------------
2099 void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
2100 tty->print_cr("---dump of L%d---",lidx);
2102 if( _ifg ) {
2103 if( lidx >= _maxlrg ) {
2104 tty->print("Attempt to print live range index beyond max live range.\n");
2105 return;
2106 }
2107 tty->print("L%d: ",lidx);
2108 if( lidx < _ifg->_maxlrg ) lrgs(lidx).dump( );
2109 else tty->print_cr("new LRG");
2110 }
2111 if( _ifg && lidx < _ifg->_maxlrg) {
2112 tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
2113 _ifg->neighbors(lidx)->dump();
2114 tty->cr();
2115 }
2116 // For all blocks
2117 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
2118 Block *b = _cfg._blocks[i];
2119 int dump_once = 0;
2121 // For all instructions
2122 for( uint j = 0; j < b->_nodes.size(); j++ ) {
2123 Node *n = b->_nodes[j];
2124 if( Find_const(n) == lidx ) {
2125 if( !dump_once++ ) {
2126 tty->cr();
2127 b->dump_head( &_cfg._bbs );
2128 }
2129 dump(n);
2130 continue;
2131 }
2132 if (!defs_only) {
2133 uint cnt = n->req();
2134 for( uint k = 1; k < cnt; k++ ) {
2135 Node *m = n->in(k);
2136 if (!m) continue; // be robust in the dumper
2137 if( Find_const(m) == lidx ) {
2138 if( !dump_once++ ) {
2139 tty->cr();
2140 b->dump_head( &_cfg._bbs );
2141 }
2142 dump(n);
2143 }
2144 }
2145 }
2146 }
2147 } // End of per-block dump
2148 tty->cr();
2149 }
2150 #endif // not PRODUCT
2152 //------------------------------print_chaitin_statistics-------------------------------
2153 int PhaseChaitin::_final_loads = 0;
2154 int PhaseChaitin::_final_stores = 0;
2155 int PhaseChaitin::_final_memoves= 0;
2156 int PhaseChaitin::_final_copies = 0;
2157 double PhaseChaitin::_final_load_cost = 0;
2158 double PhaseChaitin::_final_store_cost = 0;
2159 double PhaseChaitin::_final_memove_cost= 0;
2160 double PhaseChaitin::_final_copy_cost = 0;
2161 int PhaseChaitin::_conserv_coalesce = 0;
2162 int PhaseChaitin::_conserv_coalesce_pair = 0;
2163 int PhaseChaitin::_conserv_coalesce_trie = 0;
2164 int PhaseChaitin::_conserv_coalesce_quad = 0;
2165 int PhaseChaitin::_post_alloc = 0;
2166 int PhaseChaitin::_lost_opp_pp_coalesce = 0;
2167 int PhaseChaitin::_lost_opp_cflow_coalesce = 0;
2168 int PhaseChaitin::_used_cisc_instructions = 0;
2169 int PhaseChaitin::_unused_cisc_instructions = 0;
2170 int PhaseChaitin::_allocator_attempts = 0;
2171 int PhaseChaitin::_allocator_successes = 0;
2173 #ifndef PRODUCT
2174 uint PhaseChaitin::_high_pressure = 0;
2175 uint PhaseChaitin::_low_pressure = 0;
2177 void PhaseChaitin::print_chaitin_statistics() {
2178 tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies);
2179 tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost);
2180 tty->print_cr("Adjusted spill cost = %7.0f.",
2181 _final_load_cost*4.0 + _final_store_cost * 2.0 +
2182 _final_copy_cost*1.0 + _final_memove_cost*12.0);
2183 tty->print("Conservatively coalesced %d copies, %d pairs",
2184 _conserv_coalesce, _conserv_coalesce_pair);
2185 if( _conserv_coalesce_trie || _conserv_coalesce_quad )
2186 tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad);
2187 tty->print_cr(", %d post alloc.", _post_alloc);
2188 if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce )
2189 tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.",
2190 _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce );
2191 if( _used_cisc_instructions || _unused_cisc_instructions )
2192 tty->print_cr("Used cisc instruction %d, remained in register %d",
2193 _used_cisc_instructions, _unused_cisc_instructions);
2194 if( _allocator_successes != 0 )
2195 tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes);
2196 tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure);
2197 }
2198 #endif // not PRODUCT