Thu, 01 Aug 2013 17:25:10 -0700
Merge
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "compiler/compileLog.hpp"
27 #include "compiler/oopMap.hpp"
28 #include "memory/allocation.inline.hpp"
29 #include "opto/addnode.hpp"
30 #include "opto/block.hpp"
31 #include "opto/callnode.hpp"
32 #include "opto/cfgnode.hpp"
33 #include "opto/chaitin.hpp"
34 #include "opto/coalesce.hpp"
35 #include "opto/connode.hpp"
36 #include "opto/idealGraphPrinter.hpp"
37 #include "opto/indexSet.hpp"
38 #include "opto/machnode.hpp"
39 #include "opto/memnode.hpp"
40 #include "opto/opcodes.hpp"
41 #include "opto/rootnode.hpp"
43 //=============================================================================
45 #ifndef PRODUCT
46 void LRG::dump( ) const {
47 ttyLocker ttyl;
48 tty->print("%d ",num_regs());
49 _mask.dump();
50 if( _msize_valid ) {
51 if( mask_size() == compute_mask_size() ) tty->print(", #%d ",_mask_size);
52 else tty->print(", #!!!_%d_vs_%d ",_mask_size,_mask.Size());
53 } else {
54 tty->print(", #?(%d) ",_mask.Size());
55 }
57 tty->print("EffDeg: ");
58 if( _degree_valid ) tty->print( "%d ", _eff_degree );
59 else tty->print("? ");
61 if( is_multidef() ) {
62 tty->print("MultiDef ");
63 if (_defs != NULL) {
64 tty->print("(");
65 for (int i = 0; i < _defs->length(); i++) {
66 tty->print("N%d ", _defs->at(i)->_idx);
67 }
68 tty->print(") ");
69 }
70 }
71 else if( _def == 0 ) tty->print("Dead ");
72 else tty->print("Def: N%d ",_def->_idx);
74 tty->print("Cost:%4.2g Area:%4.2g Score:%4.2g ",_cost,_area, score());
75 // Flags
76 if( _is_oop ) tty->print("Oop ");
77 if( _is_float ) tty->print("Float ");
78 if( _is_vector ) tty->print("Vector ");
79 if( _was_spilled1 ) tty->print("Spilled ");
80 if( _was_spilled2 ) tty->print("Spilled2 ");
81 if( _direct_conflict ) tty->print("Direct_conflict ");
82 if( _fat_proj ) tty->print("Fat ");
83 if( _was_lo ) tty->print("Lo ");
84 if( _has_copy ) tty->print("Copy ");
85 if( _at_risk ) tty->print("Risk ");
87 if( _must_spill ) tty->print("Must_spill ");
88 if( _is_bound ) tty->print("Bound ");
89 if( _msize_valid ) {
90 if( _degree_valid && lo_degree() ) tty->print("Trivial ");
91 }
93 tty->cr();
94 }
95 #endif
97 //------------------------------score------------------------------------------
98 // Compute score from cost and area. Low score is best to spill.
99 static double raw_score( double cost, double area ) {
100 return cost - (area*RegisterCostAreaRatio) * 1.52588e-5;
101 }
103 double LRG::score() const {
104 // Scale _area by RegisterCostAreaRatio/64K then subtract from cost.
105 // Bigger area lowers score, encourages spilling this live range.
106 // Bigger cost raise score, prevents spilling this live range.
107 // (Note: 1/65536 is the magic constant below; I dont trust the C optimizer
108 // to turn a divide by a constant into a multiply by the reciprical).
109 double score = raw_score( _cost, _area);
111 // Account for area. Basically, LRGs covering large areas are better
112 // to spill because more other LRGs get freed up.
113 if( _area == 0.0 ) // No area? Then no progress to spill
114 return 1e35;
116 if( _was_spilled2 ) // If spilled once before, we are unlikely
117 return score + 1e30; // to make progress again.
119 if( _cost >= _area*3.0 ) // Tiny area relative to cost
120 return score + 1e17; // Probably no progress to spill
122 if( (_cost+_cost) >= _area*3.0 ) // Small area relative to cost
123 return score + 1e10; // Likely no progress to spill
125 return score;
126 }
128 //------------------------------LRG_List---------------------------------------
129 LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) {
130 memset( _lidxs, 0, sizeof(uint)*max );
131 }
133 void LRG_List::extend( uint nidx, uint lidx ) {
134 _nesting.check();
135 if( nidx >= _max ) {
136 uint size = 16;
137 while( size <= nidx ) size <<=1;
138 _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size );
139 _max = size;
140 }
141 while( _cnt <= nidx )
142 _lidxs[_cnt++] = 0;
143 _lidxs[nidx] = lidx;
144 }
146 #define NUMBUCKS 3
148 // Straight out of Tarjan's union-find algorithm
149 uint LiveRangeMap::find_compress(uint lrg) {
150 uint cur = lrg;
151 uint next = _uf_map[cur];
152 while (next != cur) { // Scan chain of equivalences
153 assert( next < cur, "always union smaller");
154 cur = next; // until find a fixed-point
155 next = _uf_map[cur];
156 }
158 // Core of union-find algorithm: update chain of
159 // equivalences to be equal to the root.
160 while (lrg != next) {
161 uint tmp = _uf_map[lrg];
162 _uf_map.map(lrg, next);
163 lrg = tmp;
164 }
165 return lrg;
166 }
168 // Reset the Union-Find map to identity
169 void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
170 _max_lrg_id= max_lrg_id;
171 // Force the Union-Find mapping to be at least this large
172 _uf_map.extend(_max_lrg_id, 0);
173 // Initialize it to be the ID mapping.
174 for (uint i = 0; i < _max_lrg_id; ++i) {
175 _uf_map.map(i, i);
176 }
177 }
179 // Make all Nodes map directly to their final live range; no need for
180 // the Union-Find mapping after this call.
181 void LiveRangeMap::compress_uf_map_for_nodes() {
182 // For all Nodes, compress mapping
183 uint unique = _names.Size();
184 for (uint i = 0; i < unique; ++i) {
185 uint lrg = _names[i];
186 uint compressed_lrg = find(lrg);
187 if (lrg != compressed_lrg) {
188 _names.map(i, compressed_lrg);
189 }
190 }
191 }
193 // Like Find above, but no path compress, so bad asymptotic behavior
194 uint LiveRangeMap::find_const(uint lrg) const {
195 if (!lrg) {
196 return lrg; // Ignore the zero LRG
197 }
199 // Off the end? This happens during debugging dumps when you got
200 // brand new live ranges but have not told the allocator yet.
201 if (lrg >= _max_lrg_id) {
202 return lrg;
203 }
205 uint next = _uf_map[lrg];
206 while (next != lrg) { // Scan chain of equivalences
207 assert(next < lrg, "always union smaller");
208 lrg = next; // until find a fixed-point
209 next = _uf_map[lrg];
210 }
211 return next;
212 }
214 //------------------------------Chaitin----------------------------------------
215 PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
216 : PhaseRegAlloc(unique, cfg, matcher,
217 #ifndef PRODUCT
218 print_chaitin_statistics
219 #else
220 NULL
221 #endif
222 )
223 , _lrg_map(unique)
224 , _live(0)
225 , _spilled_once(Thread::current()->resource_area())
226 , _spilled_twice(Thread::current()->resource_area())
227 , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
228 , _oldphi(unique)
229 #ifndef PRODUCT
230 , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
231 #endif
232 {
233 NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
235 _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq);
237 // Build a list of basic blocks, sorted by frequency
238 _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
239 // Experiment with sorting strategies to speed compilation
240 double cutoff = BLOCK_FREQUENCY(1.0); // Cutoff for high frequency bucket
241 Block **buckets[NUMBUCKS]; // Array of buckets
242 uint buckcnt[NUMBUCKS]; // Array of bucket counters
243 double buckval[NUMBUCKS]; // Array of bucket value cutoffs
244 for (uint i = 0; i < NUMBUCKS; i++) {
245 buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks);
246 buckcnt[i] = 0;
247 // Bump by three orders of magnitude each time
248 cutoff *= 0.001;
249 buckval[i] = cutoff;
250 for (uint j = 0; j < _cfg._num_blocks; j++) {
251 buckets[i][j] = NULL;
252 }
253 }
254 // Sort blocks into buckets
255 for (uint i = 0; i < _cfg._num_blocks; i++) {
256 for (uint j = 0; j < NUMBUCKS; j++) {
257 if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) {
258 // Assign block to end of list for appropriate bucket
259 buckets[j][buckcnt[j]++] = _cfg._blocks[i];
260 break; // kick out of inner loop
261 }
262 }
263 }
264 // Dump buckets into final block array
265 uint blkcnt = 0;
266 for (uint i = 0; i < NUMBUCKS; i++) {
267 for (uint j = 0; j < buckcnt[i]; j++) {
268 _blks[blkcnt++] = buckets[i][j];
269 }
270 }
272 assert(blkcnt == _cfg._num_blocks, "Block array not totally filled");
273 }
275 //------------------------------Union------------------------------------------
276 // union 2 sets together.
277 void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
278 uint src = _lrg_map.find(src_n);
279 uint dst = _lrg_map.find(dst_n);
280 assert(src, "");
281 assert(dst, "");
282 assert(src < _lrg_map.max_lrg_id(), "oob");
283 assert(dst < _lrg_map.max_lrg_id(), "oob");
284 assert(src < dst, "always union smaller");
285 _lrg_map.uf_map(dst, src);
286 }
288 //------------------------------new_lrg----------------------------------------
289 void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
290 // Make the Node->LRG mapping
291 _lrg_map.extend(x->_idx,lrg);
292 // Make the Union-Find mapping an identity function
293 _lrg_map.uf_extend(lrg, lrg);
294 }
297 bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
298 Block *bcon = _cfg._bbs[con->_idx];
299 uint cindex = bcon->find_node(con);
300 Node *con_next = bcon->_nodes[cindex+1];
301 if (con_next->in(0) != con || !con_next->is_MachProj()) {
302 return false; // No MachProj's follow
303 }
305 // Copy kills after the cloned constant
306 Node *kills = con_next->clone();
307 kills->set_req(0, copy);
308 b->_nodes.insert(idx, kills);
309 _cfg._bbs.map(kills->_idx, b);
310 new_lrg(kills, max_lrg_id);
311 return true;
312 }
314 //------------------------------compact----------------------------------------
315 // Renumber the live ranges to compact them. Makes the IFG smaller.
316 void PhaseChaitin::compact() {
317 // Current the _uf_map contains a series of short chains which are headed
318 // by a self-cycle. All the chains run from big numbers to little numbers.
319 // The Find() call chases the chains & shortens them for the next Find call.
320 // We are going to change this structure slightly. Numbers above a moving
321 // wave 'i' are unchanged. Numbers below 'j' point directly to their
322 // compacted live range with no further chaining. There are no chains or
323 // cycles below 'i', so the Find call no longer works.
324 uint j=1;
325 uint i;
326 for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
327 uint lr = _lrg_map.uf_live_range_id(i);
328 // Ignore unallocated live ranges
329 if (!lr) {
330 continue;
331 }
332 assert(lr <= i, "");
333 _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
334 }
335 // Now change the Node->LR mapping to reflect the compacted names
336 uint unique = _lrg_map.size();
337 for (i = 0; i < unique; i++) {
338 uint lrg_id = _lrg_map.live_range_id(i);
339 _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
340 }
342 // Reset the Union-Find mapping
343 _lrg_map.reset_uf_map(j);
344 }
346 void PhaseChaitin::Register_Allocate() {
348 // Above the OLD FP (and in registers) are the incoming arguments. Stack
349 // slots in this area are called "arg_slots". Above the NEW FP (and in
350 // registers) is the outgoing argument area; above that is the spill/temp
351 // area. These are all "frame_slots". Arg_slots start at the zero
352 // stack_slots and count up to the known arg_size. Frame_slots start at
353 // the stack_slot #arg_size and go up. After allocation I map stack
354 // slots to actual offsets. Stack-slots in the arg_slot area are biased
355 // by the frame_size; stack-slots in the frame_slot area are biased by 0.
357 _trip_cnt = 0;
358 _alternate = 0;
359 _matcher._allocation_started = true;
361 ResourceArea split_arena; // Arena for Split local resources
362 ResourceArea live_arena; // Arena for liveness & IFG info
363 ResourceMark rm(&live_arena);
365 // Need live-ness for the IFG; need the IFG for coalescing. If the
366 // liveness is JUST for coalescing, then I can get some mileage by renaming
367 // all copy-related live ranges low and then using the max copy-related
368 // live range as a cut-off for LIVE and the IFG. In other words, I can
369 // build a subset of LIVE and IFG just for copies.
370 PhaseLive live(_cfg, _lrg_map.names(), &live_arena);
372 // Need IFG for coalescing and coloring
373 PhaseIFG ifg(&live_arena);
374 _ifg = &ifg;
376 // Come out of SSA world to the Named world. Assign (virtual) registers to
377 // Nodes. Use the same register for all inputs and the output of PhiNodes
378 // - effectively ending SSA form. This requires either coalescing live
379 // ranges or inserting copies. For the moment, we insert "virtual copies"
380 // - we pretend there is a copy prior to each Phi in predecessor blocks.
381 // We will attempt to coalesce such "virtual copies" before we manifest
382 // them for real.
383 de_ssa();
385 #ifdef ASSERT
386 // Veify the graph before RA.
387 verify(&live_arena);
388 #endif
390 {
391 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
392 _live = NULL; // Mark live as being not available
393 rm.reset_to_mark(); // Reclaim working storage
394 IndexSet::reset_memory(C, &live_arena);
395 ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
396 gather_lrg_masks( false ); // Collect LRG masks
397 live.compute(_lrg_map.max_lrg_id()); // Compute liveness
398 _live = &live; // Mark LIVE as being available
399 }
401 // Base pointers are currently "used" by instructions which define new
402 // derived pointers. This makes base pointers live up to the where the
403 // derived pointer is made, but not beyond. Really, they need to be live
404 // across any GC point where the derived value is live. So this code looks
405 // at all the GC points, and "stretches" the live range of any base pointer
406 // to the GC point.
407 if (stretch_base_pointer_live_ranges(&live_arena)) {
408 NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);)
409 // Since some live range stretched, I need to recompute live
410 _live = NULL;
411 rm.reset_to_mark(); // Reclaim working storage
412 IndexSet::reset_memory(C, &live_arena);
413 ifg.init(_lrg_map.max_lrg_id());
414 gather_lrg_masks(false);
415 live.compute(_lrg_map.max_lrg_id());
416 _live = &live;
417 }
418 // Create the interference graph using virtual copies
419 build_ifg_virtual(); // Include stack slots this time
421 // Aggressive (but pessimistic) copy coalescing.
422 // This pass works on virtual copies. Any virtual copies which are not
423 // coalesced get manifested as actual copies
424 {
425 // The IFG is/was triangular. I am 'squaring it up' so Union can run
426 // faster. Union requires a 'for all' operation which is slow on the
427 // triangular adjacency matrix (quick reminder: the IFG is 'sparse' -
428 // meaning I can visit all the Nodes neighbors less than a Node in time
429 // O(# of neighbors), but I have to visit all the Nodes greater than a
430 // given Node and search them for an instance, i.e., time O(#MaxLRG)).
431 _ifg->SquareUp();
433 PhaseAggressiveCoalesce coalesce(*this);
434 coalesce.coalesce_driver();
435 // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do
436 // not match the Phi itself, insert a copy.
437 coalesce.insert_copies(_matcher);
438 if (C->failing()) {
439 return;
440 }
441 }
443 // After aggressive coalesce, attempt a first cut at coloring.
444 // To color, we need the IFG and for that we need LIVE.
445 {
446 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
447 _live = NULL;
448 rm.reset_to_mark(); // Reclaim working storage
449 IndexSet::reset_memory(C, &live_arena);
450 ifg.init(_lrg_map.max_lrg_id());
451 gather_lrg_masks( true );
452 live.compute(_lrg_map.max_lrg_id());
453 _live = &live;
454 }
456 // Build physical interference graph
457 uint must_spill = 0;
458 must_spill = build_ifg_physical(&live_arena);
459 // If we have a guaranteed spill, might as well spill now
460 if (must_spill) {
461 if(!_lrg_map.max_lrg_id()) {
462 return;
463 }
464 // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
465 C->check_node_count(10*must_spill, "out of nodes before split");
466 if (C->failing()) {
467 return;
468 }
470 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere
471 _lrg_map.set_max_lrg_id(new_max_lrg_id);
472 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
473 // or we failed to split
474 C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
475 if (C->failing()) {
476 return;
477 }
479 NOT_PRODUCT(C->verify_graph_edges();)
481 compact(); // Compact LRGs; return new lower max lrg
483 {
484 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
485 _live = NULL;
486 rm.reset_to_mark(); // Reclaim working storage
487 IndexSet::reset_memory(C, &live_arena);
488 ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
489 gather_lrg_masks( true ); // Collect intersect mask
490 live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
491 _live = &live;
492 }
493 build_ifg_physical(&live_arena);
494 _ifg->SquareUp();
495 _ifg->Compute_Effective_Degree();
496 // Only do conservative coalescing if requested
497 if (OptoCoalesce) {
498 // Conservative (and pessimistic) copy coalescing of those spills
499 PhaseConservativeCoalesce coalesce(*this);
500 // If max live ranges greater than cutoff, don't color the stack.
501 // This cutoff can be larger than below since it is only done once.
502 coalesce.coalesce_driver();
503 }
504 _lrg_map.compress_uf_map_for_nodes();
506 #ifdef ASSERT
507 verify(&live_arena, true);
508 #endif
509 } else {
510 ifg.SquareUp();
511 ifg.Compute_Effective_Degree();
512 #ifdef ASSERT
513 set_was_low();
514 #endif
515 }
517 // Prepare for Simplify & Select
518 cache_lrg_info(); // Count degree of LRGs
520 // Simplify the InterFerence Graph by removing LRGs of low degree.
521 // LRGs of low degree are trivially colorable.
522 Simplify();
524 // Select colors by re-inserting LRGs back into the IFG in reverse order.
525 // Return whether or not something spills.
526 uint spills = Select( );
528 // If we spill, split and recycle the entire thing
529 while( spills ) {
530 if( _trip_cnt++ > 24 ) {
531 DEBUG_ONLY( dump_for_spill_split_recycle(); )
532 if( _trip_cnt > 27 ) {
533 C->record_method_not_compilable("failed spill-split-recycle sanity check");
534 return;
535 }
536 }
538 if (!_lrg_map.max_lrg_id()) {
539 return;
540 }
541 uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena); // Split spilling LRG everywhere
542 _lrg_map.set_max_lrg_id(new_max_lrg_id);
543 // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
544 C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
545 if (C->failing()) {
546 return;
547 }
549 compact(); // Compact LRGs; return new lower max lrg
551 // Nuke the live-ness and interference graph and LiveRanGe info
552 {
553 NOT_PRODUCT( Compile::TracePhase t3("computeLive", &_t_computeLive, TimeCompiler); )
554 _live = NULL;
555 rm.reset_to_mark(); // Reclaim working storage
556 IndexSet::reset_memory(C, &live_arena);
557 ifg.init(_lrg_map.max_lrg_id());
559 // Create LiveRanGe array.
560 // Intersect register masks for all USEs and DEFs
561 gather_lrg_masks(true);
562 live.compute(_lrg_map.max_lrg_id());
563 _live = &live;
564 }
565 must_spill = build_ifg_physical(&live_arena);
566 _ifg->SquareUp();
567 _ifg->Compute_Effective_Degree();
569 // Only do conservative coalescing if requested
570 if (OptoCoalesce) {
571 // Conservative (and pessimistic) copy coalescing
572 PhaseConservativeCoalesce coalesce(*this);
573 // Check for few live ranges determines how aggressive coalesce is.
574 coalesce.coalesce_driver();
575 }
576 _lrg_map.compress_uf_map_for_nodes();
577 #ifdef ASSERT
578 verify(&live_arena, true);
579 #endif
580 cache_lrg_info(); // Count degree of LRGs
582 // Simplify the InterFerence Graph by removing LRGs of low degree.
583 // LRGs of low degree are trivially colorable.
584 Simplify();
586 // Select colors by re-inserting LRGs back into the IFG in reverse order.
587 // Return whether or not something spills.
588 spills = Select();
589 }
591 // Count number of Simplify-Select trips per coloring success.
592 _allocator_attempts += _trip_cnt + 1;
593 _allocator_successes += 1;
595 // Peephole remove copies
596 post_allocate_copy_removal();
598 #ifdef ASSERT
599 // Veify the graph after RA.
600 verify(&live_arena);
601 #endif
603 // max_reg is past the largest *register* used.
604 // Convert that to a frame_slot number.
605 if (_max_reg <= _matcher._new_SP) {
606 _framesize = C->out_preserve_stack_slots();
607 }
608 else {
609 _framesize = _max_reg -_matcher._new_SP;
610 }
611 assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
613 // This frame must preserve the required fp alignment
614 _framesize = round_to(_framesize, Matcher::stack_alignment_in_slots());
615 assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
616 #ifndef PRODUCT
617 _total_framesize += _framesize;
618 if ((int)_framesize > _max_framesize) {
619 _max_framesize = _framesize;
620 }
621 #endif
623 // Convert CISC spills
624 fixup_spills();
626 // Log regalloc results
627 CompileLog* log = Compile::current()->log();
628 if (log != NULL) {
629 log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
630 }
632 if (C->failing()) {
633 return;
634 }
636 NOT_PRODUCT(C->verify_graph_edges();)
638 // Move important info out of the live_arena to longer lasting storage.
639 alloc_node_regs(_lrg_map.size());
640 for (uint i=0; i < _lrg_map.size(); i++) {
641 if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
642 LRG &lrg = lrgs(_lrg_map.live_range_id(i));
643 if (!lrg.alive()) {
644 set_bad(i);
645 } else if (lrg.num_regs() == 1) {
646 set1(i, lrg.reg());
647 } else { // Must be a register-set
648 if (!lrg._fat_proj) { // Must be aligned adjacent register set
649 // Live ranges record the highest register in their mask.
650 // We want the low register for the AD file writer's convenience.
651 OptoReg::Name hi = lrg.reg(); // Get hi register
652 OptoReg::Name lo = OptoReg::add(hi, (1-lrg.num_regs())); // Find lo
653 // We have to use pair [lo,lo+1] even for wide vectors because
654 // the rest of code generation works only with pairs. It is safe
655 // since for registers encoding only 'lo' is used.
656 // Second reg from pair is used in ScheduleAndBundle on SPARC where
657 // vector max size is 8 which corresponds to registers pair.
658 // It is also used in BuildOopMaps but oop operations are not
659 // vectorized.
660 set2(i, lo);
661 } else { // Misaligned; extract 2 bits
662 OptoReg::Name hi = lrg.reg(); // Get hi register
663 lrg.Remove(hi); // Yank from mask
664 int lo = lrg.mask().find_first_elem(); // Find lo
665 set_pair(i, hi, lo);
666 }
667 }
668 if( lrg._is_oop ) _node_oops.set(i);
669 } else {
670 set_bad(i);
671 }
672 }
674 // Done!
675 _live = NULL;
676 _ifg = NULL;
677 C->set_indexSet_arena(NULL); // ResourceArea is at end of scope
678 }
680 //------------------------------de_ssa-----------------------------------------
681 void PhaseChaitin::de_ssa() {
682 // Set initial Names for all Nodes. Most Nodes get the virtual register
683 // number. A few get the ZERO live range number. These do not
684 // get allocated, but instead rely on correct scheduling to ensure that
685 // only one instance is simultaneously live at a time.
686 uint lr_counter = 1;
687 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
688 Block *b = _cfg._blocks[i];
689 uint cnt = b->_nodes.size();
691 // Handle all the normal Nodes in the block
692 for( uint j = 0; j < cnt; j++ ) {
693 Node *n = b->_nodes[j];
694 // Pre-color to the zero live range, or pick virtual register
695 const RegMask &rm = n->out_RegMask();
696 _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
697 }
698 }
699 // Reset the Union-Find mapping to be identity
700 _lrg_map.reset_uf_map(lr_counter);
701 }
704 //------------------------------gather_lrg_masks-------------------------------
705 // Gather LiveRanGe information, including register masks. Modification of
706 // cisc spillable in_RegMasks should not be done before AggressiveCoalesce.
707 void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
709 // Nail down the frame pointer live range
710 uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr));
711 lrgs(fp_lrg)._cost += 1e12; // Cost is infinite
713 // For all blocks
714 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
715 Block *b = _cfg._blocks[i];
717 // For all instructions
718 for( uint j = 1; j < b->_nodes.size(); j++ ) {
719 Node *n = b->_nodes[j];
720 uint input_edge_start =1; // Skip control most nodes
721 if( n->is_Mach() ) input_edge_start = n->as_Mach()->oper_input_base();
722 uint idx = n->is_Copy();
724 // Get virtual register number, same as LiveRanGe index
725 uint vreg = _lrg_map.live_range_id(n);
726 LRG &lrg = lrgs(vreg);
727 if( vreg ) { // No vreg means un-allocable (e.g. memory)
729 // Collect has-copy bit
730 if( idx ) {
731 lrg._has_copy = 1;
732 uint clidx = _lrg_map.live_range_id(n->in(idx));
733 LRG ©_src = lrgs(clidx);
734 copy_src._has_copy = 1;
735 }
737 // Check for float-vs-int live range (used in register-pressure
738 // calculations)
739 const Type *n_type = n->bottom_type();
740 if (n_type->is_floatingpoint())
741 lrg._is_float = 1;
743 // Check for twice prior spilling. Once prior spilling might have
744 // spilled 'soft', 2nd prior spill should have spilled 'hard' and
745 // further spilling is unlikely to make progress.
746 if( _spilled_once.test(n->_idx) ) {
747 lrg._was_spilled1 = 1;
748 if( _spilled_twice.test(n->_idx) )
749 lrg._was_spilled2 = 1;
750 }
752 #ifndef PRODUCT
753 if (trace_spilling() && lrg._def != NULL) {
754 // collect defs for MultiDef printing
755 if (lrg._defs == NULL) {
756 lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
757 lrg._defs->append(lrg._def);
758 }
759 lrg._defs->append(n);
760 }
761 #endif
763 // Check for a single def LRG; these can spill nicely
764 // via rematerialization. Flag as NULL for no def found
765 // yet, or 'n' for single def or -1 for many defs.
766 lrg._def = lrg._def ? NodeSentinel : n;
768 // Limit result register mask to acceptable registers
769 const RegMask &rm = n->out_RegMask();
770 lrg.AND( rm );
772 int ireg = n->ideal_reg();
773 assert( !n->bottom_type()->isa_oop_ptr() || ireg == Op_RegP,
774 "oops must be in Op_RegP's" );
776 // Check for vector live range (only if vector register is used).
777 // On SPARC vector uses RegD which could be misaligned so it is not
778 // processes as vector in RA.
779 if (RegMask::is_vector(ireg))
780 lrg._is_vector = 1;
781 assert(n_type->isa_vect() == NULL || lrg._is_vector || ireg == Op_RegD,
782 "vector must be in vector registers");
784 // Check for bound register masks
785 const RegMask &lrgmask = lrg.mask();
786 if (lrgmask.is_bound(ireg))
787 lrg._is_bound = 1;
789 // Check for maximum frequency value
790 if (lrg._maxfreq < b->_freq)
791 lrg._maxfreq = b->_freq;
793 // Check for oop-iness, or long/double
794 // Check for multi-kill projection
795 switch( ireg ) {
796 case MachProjNode::fat_proj:
797 // Fat projections have size equal to number of registers killed
798 lrg.set_num_regs(rm.Size());
799 lrg.set_reg_pressure(lrg.num_regs());
800 lrg._fat_proj = 1;
801 lrg._is_bound = 1;
802 break;
803 case Op_RegP:
804 #ifdef _LP64
805 lrg.set_num_regs(2); // Size is 2 stack words
806 #else
807 lrg.set_num_regs(1); // Size is 1 stack word
808 #endif
809 // Register pressure is tracked relative to the maximum values
810 // suggested for that platform, INTPRESSURE and FLOATPRESSURE,
811 // and relative to other types which compete for the same regs.
812 //
813 // The following table contains suggested values based on the
814 // architectures as defined in each .ad file.
815 // INTPRESSURE and FLOATPRESSURE may be tuned differently for
816 // compile-speed or performance.
817 // Note1:
818 // SPARC and SPARCV9 reg_pressures are at 2 instead of 1
819 // since .ad registers are defined as high and low halves.
820 // These reg_pressure values remain compatible with the code
821 // in is_high_pressure() which relates get_invalid_mask_size(),
822 // Block::_reg_pressure and INTPRESSURE, FLOATPRESSURE.
823 // Note2:
824 // SPARC -d32 has 24 registers available for integral values,
825 // but only 10 of these are safe for 64-bit longs.
826 // Using set_reg_pressure(2) for both int and long means
827 // the allocator will believe it can fit 26 longs into
828 // registers. Using 2 for longs and 1 for ints means the
829 // allocator will attempt to put 52 integers into registers.
830 // The settings below limit this problem to methods with
831 // many long values which are being run on 32-bit SPARC.
832 //
833 // ------------------- reg_pressure --------------------
834 // Each entry is reg_pressure_per_value,number_of_regs
835 // RegL RegI RegFlags RegF RegD INTPRESSURE FLOATPRESSURE
836 // IA32 2 1 1 1 1 6 6
837 // IA64 1 1 1 1 1 50 41
838 // SPARC 2 2 2 2 2 48 (24) 52 (26)
839 // SPARCV9 2 2 2 2 2 48 (24) 52 (26)
840 // AMD64 1 1 1 1 1 14 15
841 // -----------------------------------------------------
842 #if defined(SPARC)
843 lrg.set_reg_pressure(2); // use for v9 as well
844 #else
845 lrg.set_reg_pressure(1); // normally one value per register
846 #endif
847 if( n_type->isa_oop_ptr() ) {
848 lrg._is_oop = 1;
849 }
850 break;
851 case Op_RegL: // Check for long or double
852 case Op_RegD:
853 lrg.set_num_regs(2);
854 // Define platform specific register pressure
855 #if defined(SPARC) || defined(ARM)
856 lrg.set_reg_pressure(2);
857 #elif defined(IA32)
858 if( ireg == Op_RegL ) {
859 lrg.set_reg_pressure(2);
860 } else {
861 lrg.set_reg_pressure(1);
862 }
863 #else
864 lrg.set_reg_pressure(1); // normally one value per register
865 #endif
866 // If this def of a double forces a mis-aligned double,
867 // flag as '_fat_proj' - really flag as allowing misalignment
868 // AND changes how we count interferences. A mis-aligned
869 // double can interfere with TWO aligned pairs, or effectively
870 // FOUR registers!
871 if (rm.is_misaligned_pair()) {
872 lrg._fat_proj = 1;
873 lrg._is_bound = 1;
874 }
875 break;
876 case Op_RegF:
877 case Op_RegI:
878 case Op_RegN:
879 case Op_RegFlags:
880 case 0: // not an ideal register
881 lrg.set_num_regs(1);
882 #ifdef SPARC
883 lrg.set_reg_pressure(2);
884 #else
885 lrg.set_reg_pressure(1);
886 #endif
887 break;
888 case Op_VecS:
889 assert(Matcher::vector_size_supported(T_BYTE,4), "sanity");
890 assert(RegMask::num_registers(Op_VecS) == RegMask::SlotsPerVecS, "sanity");
891 lrg.set_num_regs(RegMask::SlotsPerVecS);
892 lrg.set_reg_pressure(1);
893 break;
894 case Op_VecD:
895 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecD), "sanity");
896 assert(RegMask::num_registers(Op_VecD) == RegMask::SlotsPerVecD, "sanity");
897 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecD), "vector should be aligned");
898 lrg.set_num_regs(RegMask::SlotsPerVecD);
899 lrg.set_reg_pressure(1);
900 break;
901 case Op_VecX:
902 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecX), "sanity");
903 assert(RegMask::num_registers(Op_VecX) == RegMask::SlotsPerVecX, "sanity");
904 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecX), "vector should be aligned");
905 lrg.set_num_regs(RegMask::SlotsPerVecX);
906 lrg.set_reg_pressure(1);
907 break;
908 case Op_VecY:
909 assert(Matcher::vector_size_supported(T_FLOAT,RegMask::SlotsPerVecY), "sanity");
910 assert(RegMask::num_registers(Op_VecY) == RegMask::SlotsPerVecY, "sanity");
911 assert(lrgmask.is_aligned_sets(RegMask::SlotsPerVecY), "vector should be aligned");
912 lrg.set_num_regs(RegMask::SlotsPerVecY);
913 lrg.set_reg_pressure(1);
914 break;
915 default:
916 ShouldNotReachHere();
917 }
918 }
920 // Now do the same for inputs
921 uint cnt = n->req();
922 // Setup for CISC SPILLING
923 uint inp = (uint)AdlcVMDeps::Not_cisc_spillable;
924 if( UseCISCSpill && after_aggressive ) {
925 inp = n->cisc_operand();
926 if( inp != (uint)AdlcVMDeps::Not_cisc_spillable )
927 // Convert operand number to edge index number
928 inp = n->as_Mach()->operand_index(inp);
929 }
930 // Prepare register mask for each input
931 for( uint k = input_edge_start; k < cnt; k++ ) {
932 uint vreg = _lrg_map.live_range_id(n->in(k));
933 if (!vreg) {
934 continue;
935 }
937 // If this instruction is CISC Spillable, add the flags
938 // bit to its appropriate input
939 if( UseCISCSpill && after_aggressive && inp == k ) {
940 #ifndef PRODUCT
941 if( TraceCISCSpill ) {
942 tty->print(" use_cisc_RegMask: ");
943 n->dump();
944 }
945 #endif
946 n->as_Mach()->use_cisc_RegMask();
947 }
949 LRG &lrg = lrgs(vreg);
950 // // Testing for floating point code shape
951 // Node *test = n->in(k);
952 // if( test->is_Mach() ) {
953 // MachNode *m = test->as_Mach();
954 // int op = m->ideal_Opcode();
955 // if (n->is_Call() && (op == Op_AddF || op == Op_MulF) ) {
956 // int zzz = 1;
957 // }
958 // }
960 // Limit result register mask to acceptable registers.
961 // Do not limit registers from uncommon uses before
962 // AggressiveCoalesce. This effectively pre-virtual-splits
963 // around uncommon uses of common defs.
964 const RegMask &rm = n->in_RegMask(k);
965 if( !after_aggressive &&
966 _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
967 // Since we are BEFORE aggressive coalesce, leave the register
968 // mask untrimmed by the call. This encourages more coalescing.
969 // Later, AFTER aggressive, this live range will have to spill
970 // but the spiller handles slow-path calls very nicely.
971 } else {
972 lrg.AND( rm );
973 }
975 // Check for bound register masks
976 const RegMask &lrgmask = lrg.mask();
977 int kreg = n->in(k)->ideal_reg();
978 bool is_vect = RegMask::is_vector(kreg);
979 assert(n->in(k)->bottom_type()->isa_vect() == NULL ||
980 is_vect || kreg == Op_RegD,
981 "vector must be in vector registers");
982 if (lrgmask.is_bound(kreg))
983 lrg._is_bound = 1;
985 // If this use of a double forces a mis-aligned double,
986 // flag as '_fat_proj' - really flag as allowing misalignment
987 // AND changes how we count interferences. A mis-aligned
988 // double can interfere with TWO aligned pairs, or effectively
989 // FOUR registers!
990 #ifdef ASSERT
991 if (is_vect) {
992 assert(lrgmask.is_aligned_sets(lrg.num_regs()), "vector should be aligned");
993 assert(!lrg._fat_proj, "sanity");
994 assert(RegMask::num_registers(kreg) == lrg.num_regs(), "sanity");
995 }
996 #endif
997 if (!is_vect && lrg.num_regs() == 2 && !lrg._fat_proj && rm.is_misaligned_pair()) {
998 lrg._fat_proj = 1;
999 lrg._is_bound = 1;
1000 }
1001 // if the LRG is an unaligned pair, we will have to spill
1002 // so clear the LRG's register mask if it is not already spilled
1003 if (!is_vect && !n->is_SpillCopy() &&
1004 (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
1005 lrgmask.is_misaligned_pair()) {
1006 lrg.Clear();
1007 }
1009 // Check for maximum frequency value
1010 if( lrg._maxfreq < b->_freq )
1011 lrg._maxfreq = b->_freq;
1013 } // End for all allocated inputs
1014 } // end for all instructions
1015 } // end for all blocks
1017 // Final per-liverange setup
1018 for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
1019 LRG &lrg = lrgs(i2);
1020 assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
1021 if (lrg.num_regs() > 1 && !lrg._fat_proj) {
1022 lrg.clear_to_sets();
1023 }
1024 lrg.compute_set_mask_size();
1025 if (lrg.not_free()) { // Handle case where we lose from the start
1026 lrg.set_reg(OptoReg::Name(LRG::SPILL_REG));
1027 lrg._direct_conflict = 1;
1028 }
1029 lrg.set_degree(0); // no neighbors in IFG yet
1030 }
1031 }
1033 //------------------------------set_was_low------------------------------------
1034 // Set the was-lo-degree bit. Conservative coalescing should not change the
1035 // colorability of the graph. If any live range was of low-degree before
1036 // coalescing, it should Simplify. This call sets the was-lo-degree bit.
1037 // The bit is checked in Simplify.
1038 void PhaseChaitin::set_was_low() {
1039 #ifdef ASSERT
1040 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1041 int size = lrgs(i).num_regs();
1042 uint old_was_lo = lrgs(i)._was_lo;
1043 lrgs(i)._was_lo = 0;
1044 if( lrgs(i).lo_degree() ) {
1045 lrgs(i)._was_lo = 1; // Trivially of low degree
1046 } else { // Else check the Brigg's assertion
1047 // Brigg's observation is that the lo-degree neighbors of a
1048 // hi-degree live range will not interfere with the color choices
1049 // of said hi-degree live range. The Simplify reverse-stack-coloring
1050 // order takes care of the details. Hence you do not have to count
1051 // low-degree neighbors when determining if this guy colors.
1052 int briggs_degree = 0;
1053 IndexSet *s = _ifg->neighbors(i);
1054 IndexSetIterator elements(s);
1055 uint lidx;
1056 while((lidx = elements.next()) != 0) {
1057 if( !lrgs(lidx).lo_degree() )
1058 briggs_degree += MAX2(size,lrgs(lidx).num_regs());
1059 }
1060 if( briggs_degree < lrgs(i).degrees_of_freedom() )
1061 lrgs(i)._was_lo = 1; // Low degree via the briggs assertion
1062 }
1063 assert(old_was_lo <= lrgs(i)._was_lo, "_was_lo may not decrease");
1064 }
1065 #endif
1066 }
1068 #define REGISTER_CONSTRAINED 16
1070 //------------------------------cache_lrg_info---------------------------------
1071 // Compute cost/area ratio, in case we spill. Build the lo-degree list.
1072 void PhaseChaitin::cache_lrg_info( ) {
1074 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1075 LRG &lrg = lrgs(i);
1077 // Check for being of low degree: means we can be trivially colored.
1078 // Low degree, dead or must-spill guys just get to simplify right away
1079 if( lrg.lo_degree() ||
1080 !lrg.alive() ||
1081 lrg._must_spill ) {
1082 // Split low degree list into those guys that must get a
1083 // register and those that can go to register or stack.
1084 // The idea is LRGs that can go register or stack color first when
1085 // they have a good chance of getting a register. The register-only
1086 // lo-degree live ranges always get a register.
1087 OptoReg::Name hi_reg = lrg.mask().find_last_elem();
1088 if( OptoReg::is_stack(hi_reg)) { // Can go to stack?
1089 lrg._next = _lo_stk_degree;
1090 _lo_stk_degree = i;
1091 } else {
1092 lrg._next = _lo_degree;
1093 _lo_degree = i;
1094 }
1095 } else { // Else high degree
1096 lrgs(_hi_degree)._prev = i;
1097 lrg._next = _hi_degree;
1098 lrg._prev = 0;
1099 _hi_degree = i;
1100 }
1101 }
1102 }
1104 //------------------------------Pre-Simplify-----------------------------------
1105 // Simplify the IFG by removing LRGs of low degree that have NO copies
1106 void PhaseChaitin::Pre_Simplify( ) {
1108 // Warm up the lo-degree no-copy list
1109 int lo_no_copy = 0;
1110 for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
1111 if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
1112 !lrgs(i).alive() ||
1113 lrgs(i)._must_spill) {
1114 lrgs(i)._next = lo_no_copy;
1115 lo_no_copy = i;
1116 }
1117 }
1119 while( lo_no_copy ) {
1120 uint lo = lo_no_copy;
1121 lo_no_copy = lrgs(lo)._next;
1122 int size = lrgs(lo).num_regs();
1124 // Put the simplified guy on the simplified list.
1125 lrgs(lo)._next = _simplified;
1126 _simplified = lo;
1128 // Yank this guy from the IFG.
1129 IndexSet *adj = _ifg->remove_node( lo );
1131 // If any neighbors' degrees fall below their number of
1132 // allowed registers, then put that neighbor on the low degree
1133 // list. Note that 'degree' can only fall and 'numregs' is
1134 // unchanged by this action. Thus the two are equal at most once,
1135 // so LRGs hit the lo-degree worklists at most once.
1136 IndexSetIterator elements(adj);
1137 uint neighbor;
1138 while ((neighbor = elements.next()) != 0) {
1139 LRG *n = &lrgs(neighbor);
1140 assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1142 // Check for just becoming of-low-degree
1143 if( n->just_lo_degree() && !n->_has_copy ) {
1144 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1145 // Put on lo-degree list
1146 n->_next = lo_no_copy;
1147 lo_no_copy = neighbor;
1148 }
1149 }
1150 } // End of while lo-degree no_copy worklist not empty
1152 // No more lo-degree no-copy live ranges to simplify
1153 }
1155 //------------------------------Simplify---------------------------------------
1156 // Simplify the IFG by removing LRGs of low degree.
1157 void PhaseChaitin::Simplify( ) {
1159 while( 1 ) { // Repeat till simplified it all
1160 // May want to explore simplifying lo_degree before _lo_stk_degree.
1161 // This might result in more spills coloring into registers during
1162 // Select().
1163 while( _lo_degree || _lo_stk_degree ) {
1164 // If possible, pull from lo_stk first
1165 uint lo;
1166 if( _lo_degree ) {
1167 lo = _lo_degree;
1168 _lo_degree = lrgs(lo)._next;
1169 } else {
1170 lo = _lo_stk_degree;
1171 _lo_stk_degree = lrgs(lo)._next;
1172 }
1174 // Put the simplified guy on the simplified list.
1175 lrgs(lo)._next = _simplified;
1176 _simplified = lo;
1177 // If this guy is "at risk" then mark his current neighbors
1178 if( lrgs(lo)._at_risk ) {
1179 IndexSetIterator elements(_ifg->neighbors(lo));
1180 uint datum;
1181 while ((datum = elements.next()) != 0) {
1182 lrgs(datum)._risk_bias = lo;
1183 }
1184 }
1186 // Yank this guy from the IFG.
1187 IndexSet *adj = _ifg->remove_node( lo );
1189 // If any neighbors' degrees fall below their number of
1190 // allowed registers, then put that neighbor on the low degree
1191 // list. Note that 'degree' can only fall and 'numregs' is
1192 // unchanged by this action. Thus the two are equal at most once,
1193 // so LRGs hit the lo-degree worklist at most once.
1194 IndexSetIterator elements(adj);
1195 uint neighbor;
1196 while ((neighbor = elements.next()) != 0) {
1197 LRG *n = &lrgs(neighbor);
1198 #ifdef ASSERT
1199 if( VerifyOpto || VerifyRegisterAllocator ) {
1200 assert( _ifg->effective_degree(neighbor) == n->degree(), "" );
1201 }
1202 #endif
1204 // Check for just becoming of-low-degree just counting registers.
1205 // _must_spill live ranges are already on the low degree list.
1206 if( n->just_lo_degree() && !n->_must_spill ) {
1207 assert(!(*_ifg->_yanked)[neighbor],"Cannot move to lo degree twice");
1208 // Pull from hi-degree list
1209 uint prev = n->_prev;
1210 uint next = n->_next;
1211 if( prev ) lrgs(prev)._next = next;
1212 else _hi_degree = next;
1213 lrgs(next)._prev = prev;
1214 n->_next = _lo_degree;
1215 _lo_degree = neighbor;
1216 }
1217 }
1218 } // End of while lo-degree/lo_stk_degree worklist not empty
1220 // Check for got everything: is hi-degree list empty?
1221 if( !_hi_degree ) break;
1223 // Time to pick a potential spill guy
1224 uint lo_score = _hi_degree;
1225 double score = lrgs(lo_score).score();
1226 double area = lrgs(lo_score)._area;
1227 double cost = lrgs(lo_score)._cost;
1228 bool bound = lrgs(lo_score)._is_bound;
1230 // Find cheapest guy
1231 debug_only( int lo_no_simplify=0; );
1232 for( uint i = _hi_degree; i; i = lrgs(i)._next ) {
1233 assert( !(*_ifg->_yanked)[i], "" );
1234 // It's just vaguely possible to move hi-degree to lo-degree without
1235 // going through a just-lo-degree stage: If you remove a double from
1236 // a float live range it's degree will drop by 2 and you can skip the
1237 // just-lo-degree stage. It's very rare (shows up after 5000+ methods
1238 // in -Xcomp of Java2Demo). So just choose this guy to simplify next.
1239 if( lrgs(i).lo_degree() ) {
1240 lo_score = i;
1241 break;
1242 }
1243 debug_only( if( lrgs(i)._was_lo ) lo_no_simplify=i; );
1244 double iscore = lrgs(i).score();
1245 double iarea = lrgs(i)._area;
1246 double icost = lrgs(i)._cost;
1247 bool ibound = lrgs(i)._is_bound;
1249 // Compare cost/area of i vs cost/area of lo_score. Smaller cost/area
1250 // wins. Ties happen because all live ranges in question have spilled
1251 // a few times before and the spill-score adds a huge number which
1252 // washes out the low order bits. We are choosing the lesser of 2
1253 // evils; in this case pick largest area to spill.
1254 // Ties also happen when live ranges are defined and used only inside
1255 // one block. In which case their area is 0 and score set to max.
1256 // In such case choose bound live range over unbound to free registers
1257 // or with smaller cost to spill.
1258 if( iscore < score ||
1259 (iscore == score && iarea > area && lrgs(lo_score)._was_spilled2) ||
1260 (iscore == score && iarea == area &&
1261 ( (ibound && !bound) || ibound == bound && (icost < cost) )) ) {
1262 lo_score = i;
1263 score = iscore;
1264 area = iarea;
1265 cost = icost;
1266 bound = ibound;
1267 }
1268 }
1269 LRG *lo_lrg = &lrgs(lo_score);
1270 // The live range we choose for spilling is either hi-degree, or very
1271 // rarely it can be low-degree. If we choose a hi-degree live range
1272 // there better not be any lo-degree choices.
1273 assert( lo_lrg->lo_degree() || !lo_no_simplify, "Live range was lo-degree before coalesce; should simplify" );
1275 // Pull from hi-degree list
1276 uint prev = lo_lrg->_prev;
1277 uint next = lo_lrg->_next;
1278 if( prev ) lrgs(prev)._next = next;
1279 else _hi_degree = next;
1280 lrgs(next)._prev = prev;
1281 // Jam him on the lo-degree list, despite his high degree.
1282 // Maybe he'll get a color, and maybe he'll spill.
1283 // Only Select() will know.
1284 lrgs(lo_score)._at_risk = true;
1285 _lo_degree = lo_score;
1286 lo_lrg->_next = 0;
1288 } // End of while not simplified everything
1290 }
1292 //------------------------------is_legal_reg-----------------------------------
1293 // Is 'reg' register legal for 'lrg'?
1294 static bool is_legal_reg(LRG &lrg, OptoReg::Name reg, int chunk) {
1295 if (reg >= chunk && reg < (chunk + RegMask::CHUNK_SIZE) &&
1296 lrg.mask().Member(OptoReg::add(reg,-chunk))) {
1297 // RA uses OptoReg which represent the highest element of a registers set.
1298 // For example, vectorX (128bit) on x86 uses [XMM,XMMb,XMMc,XMMd] set
1299 // in which XMMd is used by RA to represent such vectors. A double value
1300 // uses [XMM,XMMb] pairs and XMMb is used by RA for it.
1301 // The register mask uses largest bits set of overlapping register sets.
1302 // On x86 with AVX it uses 8 bits for each XMM registers set.
1303 //
1304 // The 'lrg' already has cleared-to-set register mask (done in Select()
1305 // before calling choose_color()). Passing mask.Member(reg) check above
1306 // indicates that the size (num_regs) of 'reg' set is less or equal to
1307 // 'lrg' set size.
1308 // For set size 1 any register which is member of 'lrg' mask is legal.
1309 if (lrg.num_regs()==1)
1310 return true;
1311 // For larger sets only an aligned register with the same set size is legal.
1312 int mask = lrg.num_regs()-1;
1313 if ((reg&mask) == mask)
1314 return true;
1315 }
1316 return false;
1317 }
1319 //------------------------------bias_color-------------------------------------
1320 // Choose a color using the biasing heuristic
1321 OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
1323 // Check for "at_risk" LRG's
1324 uint risk_lrg = _lrg_map.find(lrg._risk_bias);
1325 if( risk_lrg != 0 ) {
1326 // Walk the colored neighbors of the "at_risk" candidate
1327 // Choose a color which is both legal and already taken by a neighbor
1328 // of the "at_risk" candidate in order to improve the chances of the
1329 // "at_risk" candidate of coloring
1330 IndexSetIterator elements(_ifg->neighbors(risk_lrg));
1331 uint datum;
1332 while ((datum = elements.next()) != 0) {
1333 OptoReg::Name reg = lrgs(datum).reg();
1334 // If this LRG's register is legal for us, choose it
1335 if (is_legal_reg(lrg, reg, chunk))
1336 return reg;
1337 }
1338 }
1340 uint copy_lrg = _lrg_map.find(lrg._copy_bias);
1341 if( copy_lrg != 0 ) {
1342 // If he has a color,
1343 if( !(*(_ifg->_yanked))[copy_lrg] ) {
1344 OptoReg::Name reg = lrgs(copy_lrg).reg();
1345 // And it is legal for you,
1346 if (is_legal_reg(lrg, reg, chunk))
1347 return reg;
1348 } else if( chunk == 0 ) {
1349 // Choose a color which is legal for him
1350 RegMask tempmask = lrg.mask();
1351 tempmask.AND(lrgs(copy_lrg).mask());
1352 tempmask.clear_to_sets(lrg.num_regs());
1353 OptoReg::Name reg = tempmask.find_first_set(lrg.num_regs());
1354 if (OptoReg::is_valid(reg))
1355 return reg;
1356 }
1357 }
1359 // If no bias info exists, just go with the register selection ordering
1360 if (lrg._is_vector || lrg.num_regs() == 2) {
1361 // Find an aligned set
1362 return OptoReg::add(lrg.mask().find_first_set(lrg.num_regs()),chunk);
1363 }
1365 // CNC - Fun hack. Alternate 1st and 2nd selection. Enables post-allocate
1366 // copy removal to remove many more copies, by preventing a just-assigned
1367 // register from being repeatedly assigned.
1368 OptoReg::Name reg = lrg.mask().find_first_elem();
1369 if( (++_alternate & 1) && OptoReg::is_valid(reg) ) {
1370 // This 'Remove; find; Insert' idiom is an expensive way to find the
1371 // SECOND element in the mask.
1372 lrg.Remove(reg);
1373 OptoReg::Name reg2 = lrg.mask().find_first_elem();
1374 lrg.Insert(reg);
1375 if( OptoReg::is_reg(reg2))
1376 reg = reg2;
1377 }
1378 return OptoReg::add( reg, chunk );
1379 }
1381 //------------------------------choose_color-----------------------------------
1382 // Choose a color in the current chunk
1383 OptoReg::Name PhaseChaitin::choose_color( LRG &lrg, int chunk ) {
1384 assert( C->in_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP-1)), "must not allocate stack0 (inside preserve area)");
1385 assert(C->out_preserve_stack_slots() == 0 || chunk != 0 || lrg._is_bound || lrg.mask().is_bound1() || !lrg.mask().Member(OptoReg::Name(_matcher._old_SP+0)), "must not allocate stack0 (inside preserve area)");
1387 if( lrg.num_regs() == 1 || // Common Case
1388 !lrg._fat_proj ) // Aligned+adjacent pairs ok
1389 // Use a heuristic to "bias" the color choice
1390 return bias_color(lrg, chunk);
1392 assert(!lrg._is_vector, "should be not vector here" );
1393 assert( lrg.num_regs() >= 2, "dead live ranges do not color" );
1395 // Fat-proj case or misaligned double argument.
1396 assert(lrg.compute_mask_size() == lrg.num_regs() ||
1397 lrg.num_regs() == 2,"fat projs exactly color" );
1398 assert( !chunk, "always color in 1st chunk" );
1399 // Return the highest element in the set.
1400 return lrg.mask().find_last_elem();
1401 }
1403 //------------------------------Select-----------------------------------------
1404 // Select colors by re-inserting LRGs back into the IFG. LRGs are re-inserted
1405 // in reverse order of removal. As long as nothing of hi-degree was yanked,
1406 // everything going back is guaranteed a color. Select that color. If some
1407 // hi-degree LRG cannot get a color then we record that we must spill.
1408 uint PhaseChaitin::Select( ) {
1409 uint spill_reg = LRG::SPILL_REG;
1410 _max_reg = OptoReg::Name(0); // Past max register used
1411 while( _simplified ) {
1412 // Pull next LRG from the simplified list - in reverse order of removal
1413 uint lidx = _simplified;
1414 LRG *lrg = &lrgs(lidx);
1415 _simplified = lrg->_next;
1418 #ifndef PRODUCT
1419 if (trace_spilling()) {
1420 ttyLocker ttyl;
1421 tty->print_cr("L%d selecting degree %d degrees_of_freedom %d", lidx, lrg->degree(),
1422 lrg->degrees_of_freedom());
1423 lrg->dump();
1424 }
1425 #endif
1427 // Re-insert into the IFG
1428 _ifg->re_insert(lidx);
1429 if( !lrg->alive() ) continue;
1430 // capture allstackedness flag before mask is hacked
1431 const int is_allstack = lrg->mask().is_AllStack();
1433 // Yeah, yeah, yeah, I know, I know. I can refactor this
1434 // to avoid the GOTO, although the refactored code will not
1435 // be much clearer. We arrive here IFF we have a stack-based
1436 // live range that cannot color in the current chunk, and it
1437 // has to move into the next free stack chunk.
1438 int chunk = 0; // Current chunk is first chunk
1439 retry_next_chunk:
1441 // Remove neighbor colors
1442 IndexSet *s = _ifg->neighbors(lidx);
1444 debug_only(RegMask orig_mask = lrg->mask();)
1445 IndexSetIterator elements(s);
1446 uint neighbor;
1447 while ((neighbor = elements.next()) != 0) {
1448 // Note that neighbor might be a spill_reg. In this case, exclusion
1449 // of its color will be a no-op, since the spill_reg chunk is in outer
1450 // space. Also, if neighbor is in a different chunk, this exclusion
1451 // will be a no-op. (Later on, if lrg runs out of possible colors in
1452 // its chunk, a new chunk of color may be tried, in which case
1453 // examination of neighbors is started again, at retry_next_chunk.)
1454 LRG &nlrg = lrgs(neighbor);
1455 OptoReg::Name nreg = nlrg.reg();
1456 // Only subtract masks in the same chunk
1457 if( nreg >= chunk && nreg < chunk + RegMask::CHUNK_SIZE ) {
1458 #ifndef PRODUCT
1459 uint size = lrg->mask().Size();
1460 RegMask rm = lrg->mask();
1461 #endif
1462 lrg->SUBTRACT(nlrg.mask());
1463 #ifndef PRODUCT
1464 if (trace_spilling() && lrg->mask().Size() != size) {
1465 ttyLocker ttyl;
1466 tty->print("L%d ", lidx);
1467 rm.dump();
1468 tty->print(" intersected L%d ", neighbor);
1469 nlrg.mask().dump();
1470 tty->print(" removed ");
1471 rm.SUBTRACT(lrg->mask());
1472 rm.dump();
1473 tty->print(" leaving ");
1474 lrg->mask().dump();
1475 tty->cr();
1476 }
1477 #endif
1478 }
1479 }
1480 //assert(is_allstack == lrg->mask().is_AllStack(), "nbrs must not change AllStackedness");
1481 // Aligned pairs need aligned masks
1482 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1483 if (lrg->num_regs() > 1 && !lrg->_fat_proj) {
1484 lrg->clear_to_sets();
1485 }
1487 // Check if a color is available and if so pick the color
1488 OptoReg::Name reg = choose_color( *lrg, chunk );
1489 #ifdef SPARC
1490 debug_only(lrg->compute_set_mask_size());
1491 assert(lrg->num_regs() < 2 || lrg->is_bound() || is_even(reg-1), "allocate all doubles aligned");
1492 #endif
1494 //---------------
1495 // If we fail to color and the AllStack flag is set, trigger
1496 // a chunk-rollover event
1497 if(!OptoReg::is_valid(OptoReg::add(reg,-chunk)) && is_allstack) {
1498 // Bump register mask up to next stack chunk
1499 chunk += RegMask::CHUNK_SIZE;
1500 lrg->Set_All();
1502 goto retry_next_chunk;
1503 }
1505 //---------------
1506 // Did we get a color?
1507 else if( OptoReg::is_valid(reg)) {
1508 #ifndef PRODUCT
1509 RegMask avail_rm = lrg->mask();
1510 #endif
1512 // Record selected register
1513 lrg->set_reg(reg);
1515 if( reg >= _max_reg ) // Compute max register limit
1516 _max_reg = OptoReg::add(reg,1);
1517 // Fold reg back into normal space
1518 reg = OptoReg::add(reg,-chunk);
1520 // If the live range is not bound, then we actually had some choices
1521 // to make. In this case, the mask has more bits in it than the colors
1522 // chosen. Restrict the mask to just what was picked.
1523 int n_regs = lrg->num_regs();
1524 assert(!lrg->_is_vector || !lrg->_fat_proj, "sanity");
1525 if (n_regs == 1 || !lrg->_fat_proj) {
1526 assert(!lrg->_is_vector || n_regs <= RegMask::SlotsPerVecY, "sanity");
1527 lrg->Clear(); // Clear the mask
1528 lrg->Insert(reg); // Set regmask to match selected reg
1529 // For vectors and pairs, also insert the low bit of the pair
1530 for (int i = 1; i < n_regs; i++)
1531 lrg->Insert(OptoReg::add(reg,-i));
1532 lrg->set_mask_size(n_regs);
1533 } else { // Else fatproj
1534 // mask must be equal to fatproj bits, by definition
1535 }
1536 #ifndef PRODUCT
1537 if (trace_spilling()) {
1538 ttyLocker ttyl;
1539 tty->print("L%d selected ", lidx);
1540 lrg->mask().dump();
1541 tty->print(" from ");
1542 avail_rm.dump();
1543 tty->cr();
1544 }
1545 #endif
1546 // Note that reg is the highest-numbered register in the newly-bound mask.
1547 } // end color available case
1549 //---------------
1550 // Live range is live and no colors available
1551 else {
1552 assert( lrg->alive(), "" );
1553 assert( !lrg->_fat_proj || lrg->is_multidef() ||
1554 lrg->_def->outcnt() > 0, "fat_proj cannot spill");
1555 assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
1557 // Assign the special spillreg register
1558 lrg->set_reg(OptoReg::Name(spill_reg++));
1559 // Do not empty the regmask; leave mask_size lying around
1560 // for use during Spilling
1561 #ifndef PRODUCT
1562 if( trace_spilling() ) {
1563 ttyLocker ttyl;
1564 tty->print("L%d spilling with neighbors: ", lidx);
1565 s->dump();
1566 debug_only(tty->print(" original mask: "));
1567 debug_only(orig_mask.dump());
1568 dump_lrg(lidx);
1569 }
1570 #endif
1571 } // end spill case
1573 }
1575 return spill_reg-LRG::SPILL_REG; // Return number of spills
1576 }
1579 //------------------------------copy_was_spilled-------------------------------
1580 // Copy 'was_spilled'-edness from the source Node to the dst Node.
1581 void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
1582 if( _spilled_once.test(src->_idx) ) {
1583 _spilled_once.set(dst->_idx);
1584 lrgs(_lrg_map.find(dst))._was_spilled1 = 1;
1585 if( _spilled_twice.test(src->_idx) ) {
1586 _spilled_twice.set(dst->_idx);
1587 lrgs(_lrg_map.find(dst))._was_spilled2 = 1;
1588 }
1589 }
1590 }
1592 //------------------------------set_was_spilled--------------------------------
1593 // Set the 'spilled_once' or 'spilled_twice' flag on a node.
1594 void PhaseChaitin::set_was_spilled( Node *n ) {
1595 if( _spilled_once.test_set(n->_idx) )
1596 _spilled_twice.set(n->_idx);
1597 }
1599 //------------------------------fixup_spills-----------------------------------
1600 // Convert Ideal spill instructions into proper FramePtr + offset Loads and
1601 // Stores. Use-def chains are NOT preserved, but Node->LRG->reg maps are.
1602 void PhaseChaitin::fixup_spills() {
1603 // This function does only cisc spill work.
1604 if( !UseCISCSpill ) return;
1606 NOT_PRODUCT( Compile::TracePhase t3("fixupSpills", &_t_fixupSpills, TimeCompiler); )
1608 // Grab the Frame Pointer
1609 Node *fp = _cfg._broot->head()->in(1)->in(TypeFunc::FramePtr);
1611 // For all blocks
1612 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
1613 Block *b = _cfg._blocks[i];
1615 // For all instructions in block
1616 uint last_inst = b->end_idx();
1617 for( uint j = 1; j <= last_inst; j++ ) {
1618 Node *n = b->_nodes[j];
1620 // Dead instruction???
1621 assert( n->outcnt() != 0 ||// Nothing dead after post alloc
1622 C->top() == n || // Or the random TOP node
1623 n->is_Proj(), // Or a fat-proj kill node
1624 "No dead instructions after post-alloc" );
1626 int inp = n->cisc_operand();
1627 if( inp != AdlcVMDeps::Not_cisc_spillable ) {
1628 // Convert operand number to edge index number
1629 MachNode *mach = n->as_Mach();
1630 inp = mach->operand_index(inp);
1631 Node *src = n->in(inp); // Value to load or store
1632 LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
1633 OptoReg::Name src_reg = lrg_cisc.reg();
1634 // Doubles record the HIGH register of an adjacent pair.
1635 src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
1636 if( OptoReg::is_stack(src_reg) ) { // If input is on stack
1637 // This is a CISC Spill, get stack offset and construct new node
1638 #ifndef PRODUCT
1639 if( TraceCISCSpill ) {
1640 tty->print(" reg-instr: ");
1641 n->dump();
1642 }
1643 #endif
1644 int stk_offset = reg2offset(src_reg);
1645 // Bailout if we might exceed node limit when spilling this instruction
1646 C->check_node_count(0, "out of nodes fixing spills");
1647 if (C->failing()) return;
1648 // Transform node
1649 MachNode *cisc = mach->cisc_version(stk_offset, C)->as_Mach();
1650 cisc->set_req(inp,fp); // Base register is frame pointer
1651 if( cisc->oper_input_base() > 1 && mach->oper_input_base() <= 1 ) {
1652 assert( cisc->oper_input_base() == 2, "Only adding one edge");
1653 cisc->ins_req(1,src); // Requires a memory edge
1654 }
1655 b->_nodes.map(j,cisc); // Insert into basic block
1656 n->subsume_by(cisc, C); // Correct graph
1657 //
1658 ++_used_cisc_instructions;
1659 #ifndef PRODUCT
1660 if( TraceCISCSpill ) {
1661 tty->print(" cisc-instr: ");
1662 cisc->dump();
1663 }
1664 #endif
1665 } else {
1666 #ifndef PRODUCT
1667 if( TraceCISCSpill ) {
1668 tty->print(" using reg-instr: ");
1669 n->dump();
1670 }
1671 #endif
1672 ++_unused_cisc_instructions; // input can be on stack
1673 }
1674 }
1676 } // End of for all instructions
1678 } // End of for all blocks
1679 }
1681 //------------------------------find_base_for_derived--------------------------
1682 // Helper to stretch above; recursively discover the base Node for a
1683 // given derived Node. Easy for AddP-related machine nodes, but needs
1684 // to be recursive for derived Phis.
1685 Node *PhaseChaitin::find_base_for_derived( Node **derived_base_map, Node *derived, uint &maxlrg ) {
1686 // See if already computed; if so return it
1687 if( derived_base_map[derived->_idx] )
1688 return derived_base_map[derived->_idx];
1690 // See if this happens to be a base.
1691 // NOTE: we use TypePtr instead of TypeOopPtr because we can have
1692 // pointers derived from NULL! These are always along paths that
1693 // can't happen at run-time but the optimizer cannot deduce it so
1694 // we have to handle it gracefully.
1695 assert(!derived->bottom_type()->isa_narrowoop() ||
1696 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1697 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1698 // If its an OOP with a non-zero offset, then it is derived.
1699 if( tj == NULL || tj->_offset == 0 ) {
1700 derived_base_map[derived->_idx] = derived;
1701 return derived;
1702 }
1703 // Derived is NULL+offset? Base is NULL!
1704 if( derived->is_Con() ) {
1705 Node *base = _matcher.mach_null();
1706 assert(base != NULL, "sanity");
1707 if (base->in(0) == NULL) {
1708 // Initialize it once and make it shared:
1709 // set control to _root and place it into Start block
1710 // (where top() node is placed).
1711 base->init_req(0, _cfg._root);
1712 Block *startb = _cfg._bbs[C->top()->_idx];
1713 startb->_nodes.insert(startb->find_node(C->top()), base );
1714 _cfg._bbs.map( base->_idx, startb );
1715 assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
1716 }
1717 if (_lrg_map.live_range_id(base) == 0) {
1718 new_lrg(base, maxlrg++);
1719 }
1720 assert(base->in(0) == _cfg._root &&
1721 _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
1722 derived_base_map[derived->_idx] = base;
1723 return base;
1724 }
1726 // Check for AddP-related opcodes
1727 if (!derived->is_Phi()) {
1728 assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name()));
1729 Node *base = derived->in(AddPNode::Base);
1730 derived_base_map[derived->_idx] = base;
1731 return base;
1732 }
1734 // Recursively find bases for Phis.
1735 // First check to see if we can avoid a base Phi here.
1736 Node *base = find_base_for_derived( derived_base_map, derived->in(1),maxlrg);
1737 uint i;
1738 for( i = 2; i < derived->req(); i++ )
1739 if( base != find_base_for_derived( derived_base_map,derived->in(i),maxlrg))
1740 break;
1741 // Went to the end without finding any different bases?
1742 if( i == derived->req() ) { // No need for a base Phi here
1743 derived_base_map[derived->_idx] = base;
1744 return base;
1745 }
1747 // Now we see we need a base-Phi here to merge the bases
1748 const Type *t = base->bottom_type();
1749 base = new (C) PhiNode( derived->in(0), t );
1750 for( i = 1; i < derived->req(); i++ ) {
1751 base->init_req(i, find_base_for_derived(derived_base_map, derived->in(i), maxlrg));
1752 t = t->meet(base->in(i)->bottom_type());
1753 }
1754 base->as_Phi()->set_type(t);
1756 // Search the current block for an existing base-Phi
1757 Block *b = _cfg._bbs[derived->_idx];
1758 for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
1759 Node *phi = b->_nodes[i];
1760 if( !phi->is_Phi() ) { // Found end of Phis with no match?
1761 b->_nodes.insert( i, base ); // Must insert created Phi here as base
1762 _cfg._bbs.map( base->_idx, b );
1763 new_lrg(base,maxlrg++);
1764 break;
1765 }
1766 // See if Phi matches.
1767 uint j;
1768 for( j = 1; j < base->req(); j++ )
1769 if( phi->in(j) != base->in(j) &&
1770 !(phi->in(j)->is_Con() && base->in(j)->is_Con()) ) // allow different NULLs
1771 break;
1772 if( j == base->req() ) { // All inputs match?
1773 base = phi; // Then use existing 'phi' and drop 'base'
1774 break;
1775 }
1776 }
1779 // Cache info for later passes
1780 derived_base_map[derived->_idx] = base;
1781 return base;
1782 }
1785 //------------------------------stretch_base_pointer_live_ranges---------------
1786 // At each Safepoint, insert extra debug edges for each pair of derived value/
1787 // base pointer that is live across the Safepoint for oopmap building. The
1788 // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
1789 // required edge set.
1790 bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
1791 int must_recompute_live = false;
1792 uint maxlrg = _lrg_map.max_lrg_id();
1793 Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
1794 memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
1796 // For all blocks in RPO do...
1797 for( uint i=0; i<_cfg._num_blocks; i++ ) {
1798 Block *b = _cfg._blocks[i];
1799 // Note use of deep-copy constructor. I cannot hammer the original
1800 // liveout bits, because they are needed by the following coalesce pass.
1801 IndexSet liveout(_live->live(b));
1803 for( uint j = b->end_idx() + 1; j > 1; j-- ) {
1804 Node *n = b->_nodes[j-1];
1806 // Pre-split compares of loop-phis. Loop-phis form a cycle we would
1807 // like to see in the same register. Compare uses the loop-phi and so
1808 // extends its live range BUT cannot be part of the cycle. If this
1809 // extended live range overlaps with the update of the loop-phi value
1810 // we need both alive at the same time -- which requires at least 1
1811 // copy. But because Intel has only 2-address registers we end up with
1812 // at least 2 copies, one before the loop-phi update instruction and
1813 // one after. Instead we split the input to the compare just after the
1814 // phi.
1815 if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
1816 Node *phi = n->in(1);
1817 if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
1818 Block *phi_block = _cfg._bbs[phi->_idx];
1819 if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) {
1820 const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
1821 Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
1822 insert_proj( phi_block, 1, spill, maxlrg++ );
1823 n->set_req(1,spill);
1824 must_recompute_live = true;
1825 }
1826 }
1827 }
1829 // Get value being defined
1830 uint lidx = _lrg_map.live_range_id(n);
1831 // Ignore the occasional brand-new live range
1832 if (lidx && lidx < _lrg_map.max_lrg_id()) {
1833 // Remove from live-out set
1834 liveout.remove(lidx);
1836 // Copies do not define a new value and so do not interfere.
1837 // Remove the copies source from the liveout set before interfering.
1838 uint idx = n->is_Copy();
1839 if (idx) {
1840 liveout.remove(_lrg_map.live_range_id(n->in(idx)));
1841 }
1842 }
1844 // Found a safepoint?
1845 JVMState *jvms = n->jvms();
1846 if( jvms ) {
1847 // Now scan for a live derived pointer
1848 IndexSetIterator elements(&liveout);
1849 uint neighbor;
1850 while ((neighbor = elements.next()) != 0) {
1851 // Find reaching DEF for base and derived values
1852 // This works because we are still in SSA during this call.
1853 Node *derived = lrgs(neighbor)._def;
1854 const TypePtr *tj = derived->bottom_type()->isa_ptr();
1855 assert(!derived->bottom_type()->isa_narrowoop() ||
1856 derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
1857 // If its an OOP with a non-zero offset, then it is derived.
1858 if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
1859 Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
1860 assert(base->_idx < _lrg_map.size(), "");
1861 // Add reaching DEFs of derived pointer and base pointer as a
1862 // pair of inputs
1863 n->add_req(derived);
1864 n->add_req(base);
1866 // See if the base pointer is already live to this point.
1867 // Since I'm working on the SSA form, live-ness amounts to
1868 // reaching def's. So if I find the base's live range then
1869 // I know the base's def reaches here.
1870 if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
1871 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
1872 (_lrg_map.live_range_id(base) > 0) && // not a constant
1873 _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
1874 // Base pointer is not currently live. Since I stretched
1875 // the base pointer to here and it crosses basic-block
1876 // boundaries, the global live info is now incorrect.
1877 // Recompute live.
1878 must_recompute_live = true;
1879 } // End of if base pointer is not live to debug info
1880 }
1881 } // End of scan all live data for derived ptrs crossing GC point
1882 } // End of if found a GC point
1884 // Make all inputs live
1885 if (!n->is_Phi()) { // Phi function uses come from prior block
1886 for (uint k = 1; k < n->req(); k++) {
1887 uint lidx = _lrg_map.live_range_id(n->in(k));
1888 if (lidx < _lrg_map.max_lrg_id()) {
1889 liveout.insert(lidx);
1890 }
1891 }
1892 }
1894 } // End of forall instructions in block
1895 liveout.clear(); // Free the memory used by liveout.
1897 } // End of forall blocks
1898 _lrg_map.set_max_lrg_id(maxlrg);
1900 // If I created a new live range I need to recompute live
1901 if (maxlrg != _ifg->_maxlrg) {
1902 must_recompute_live = true;
1903 }
1905 return must_recompute_live != 0;
1906 }
1909 //------------------------------add_reference----------------------------------
1910 // Extend the node to LRG mapping
1912 void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
1913 _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
1914 }
1916 //------------------------------dump-------------------------------------------
1917 #ifndef PRODUCT
1918 void PhaseChaitin::dump(const Node *n) const {
1919 uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
1920 tty->print("L%d",r);
1921 if (r && n->Opcode() != Op_Phi) {
1922 if( _node_regs ) { // Got a post-allocation copy of allocation?
1923 tty->print("[");
1924 OptoReg::Name second = get_reg_second(n);
1925 if( OptoReg::is_valid(second) ) {
1926 if( OptoReg::is_reg(second) )
1927 tty->print("%s:",Matcher::regName[second]);
1928 else
1929 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(second));
1930 }
1931 OptoReg::Name first = get_reg_first(n);
1932 if( OptoReg::is_reg(first) )
1933 tty->print("%s]",Matcher::regName[first]);
1934 else
1935 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer), reg2offset_unchecked(first));
1936 } else
1937 n->out_RegMask().dump();
1938 }
1939 tty->print("/N%d\t",n->_idx);
1940 tty->print("%s === ", n->Name());
1941 uint k;
1942 for (k = 0; k < n->req(); k++) {
1943 Node *m = n->in(k);
1944 if (!m) {
1945 tty->print("_ ");
1946 }
1947 else {
1948 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1949 tty->print("L%d",r);
1950 // Data MultiNode's can have projections with no real registers.
1951 // Don't die while dumping them.
1952 int op = n->Opcode();
1953 if( r && op != Op_Phi && op != Op_Proj && op != Op_SCMemProj) {
1954 if( _node_regs ) {
1955 tty->print("[");
1956 OptoReg::Name second = get_reg_second(n->in(k));
1957 if( OptoReg::is_valid(second) ) {
1958 if( OptoReg::is_reg(second) )
1959 tty->print("%s:",Matcher::regName[second]);
1960 else
1961 tty->print("%s+%d:",OptoReg::regname(OptoReg::c_frame_pointer),
1962 reg2offset_unchecked(second));
1963 }
1964 OptoReg::Name first = get_reg_first(n->in(k));
1965 if( OptoReg::is_reg(first) )
1966 tty->print("%s]",Matcher::regName[first]);
1967 else
1968 tty->print("%s+%d]",OptoReg::regname(OptoReg::c_frame_pointer),
1969 reg2offset_unchecked(first));
1970 } else
1971 n->in_RegMask(k).dump();
1972 }
1973 tty->print("/N%d ",m->_idx);
1974 }
1975 }
1976 if( k < n->len() && n->in(k) ) tty->print("| ");
1977 for( ; k < n->len(); k++ ) {
1978 Node *m = n->in(k);
1979 if(!m) {
1980 break;
1981 }
1982 uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
1983 tty->print("L%d",r);
1984 tty->print("/N%d ",m->_idx);
1985 }
1986 if( n->is_Mach() ) n->as_Mach()->dump_spec(tty);
1987 else n->dump_spec(tty);
1988 if( _spilled_once.test(n->_idx ) ) {
1989 tty->print(" Spill_1");
1990 if( _spilled_twice.test(n->_idx ) )
1991 tty->print(" Spill_2");
1992 }
1993 tty->print("\n");
1994 }
1996 void PhaseChaitin::dump( const Block * b ) const {
1997 b->dump_head( &_cfg._bbs );
1999 // For all instructions
2000 for( uint j = 0; j < b->_nodes.size(); j++ )
2001 dump(b->_nodes[j]);
2002 // Print live-out info at end of block
2003 if( _live ) {
2004 tty->print("Liveout: ");
2005 IndexSet *live = _live->live(b);
2006 IndexSetIterator elements(live);
2007 tty->print("{");
2008 uint i;
2009 while ((i = elements.next()) != 0) {
2010 tty->print("L%d ", _lrg_map.find_const(i));
2011 }
2012 tty->print_cr("}");
2013 }
2014 tty->print("\n");
2015 }
2017 void PhaseChaitin::dump() const {
2018 tty->print( "--- Chaitin -- argsize: %d framesize: %d ---\n",
2019 _matcher._new_SP, _framesize );
2021 // For all blocks
2022 for( uint i = 0; i < _cfg._num_blocks; i++ )
2023 dump(_cfg._blocks[i]);
2024 // End of per-block dump
2025 tty->print("\n");
2027 if (!_ifg) {
2028 tty->print("(No IFG.)\n");
2029 return;
2030 }
2032 // Dump LRG array
2033 tty->print("--- Live RanGe Array ---\n");
2034 for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
2035 tty->print("L%d: ",i2);
2036 if (i2 < _ifg->_maxlrg) {
2037 lrgs(i2).dump();
2038 }
2039 else {
2040 tty->print_cr("new LRG");
2041 }
2042 }
2043 tty->print_cr("");
2045 // Dump lo-degree list
2046 tty->print("Lo degree: ");
2047 for(uint i3 = _lo_degree; i3; i3 = lrgs(i3)._next )
2048 tty->print("L%d ",i3);
2049 tty->print_cr("");
2051 // Dump lo-stk-degree list
2052 tty->print("Lo stk degree: ");
2053 for(uint i4 = _lo_stk_degree; i4; i4 = lrgs(i4)._next )
2054 tty->print("L%d ",i4);
2055 tty->print_cr("");
2057 // Dump lo-degree list
2058 tty->print("Hi degree: ");
2059 for(uint i5 = _hi_degree; i5; i5 = lrgs(i5)._next )
2060 tty->print("L%d ",i5);
2061 tty->print_cr("");
2062 }
2064 //------------------------------dump_degree_lists------------------------------
2065 void PhaseChaitin::dump_degree_lists() const {
2066 // Dump lo-degree list
2067 tty->print("Lo degree: ");
2068 for( uint i = _lo_degree; i; i = lrgs(i)._next )
2069 tty->print("L%d ",i);
2070 tty->print_cr("");
2072 // Dump lo-stk-degree list
2073 tty->print("Lo stk degree: ");
2074 for(uint i2 = _lo_stk_degree; i2; i2 = lrgs(i2)._next )
2075 tty->print("L%d ",i2);
2076 tty->print_cr("");
2078 // Dump lo-degree list
2079 tty->print("Hi degree: ");
2080 for(uint i3 = _hi_degree; i3; i3 = lrgs(i3)._next )
2081 tty->print("L%d ",i3);
2082 tty->print_cr("");
2083 }
2085 //------------------------------dump_simplified--------------------------------
2086 void PhaseChaitin::dump_simplified() const {
2087 tty->print("Simplified: ");
2088 for( uint i = _simplified; i; i = lrgs(i)._next )
2089 tty->print("L%d ",i);
2090 tty->print_cr("");
2091 }
2093 static char *print_reg( OptoReg::Name reg, const PhaseChaitin *pc, char *buf ) {
2094 if ((int)reg < 0)
2095 sprintf(buf, "<OptoReg::%d>", (int)reg);
2096 else if (OptoReg::is_reg(reg))
2097 strcpy(buf, Matcher::regName[reg]);
2098 else
2099 sprintf(buf,"%s + #%d",OptoReg::regname(OptoReg::c_frame_pointer),
2100 pc->reg2offset(reg));
2101 return buf+strlen(buf);
2102 }
2104 //------------------------------dump_register----------------------------------
2105 // Dump a register name into a buffer. Be intelligent if we get called
2106 // before allocation is complete.
2107 char *PhaseChaitin::dump_register( const Node *n, char *buf ) const {
2108 if( !this ) { // Not got anything?
2109 sprintf(buf,"N%d",n->_idx); // Then use Node index
2110 } else if( _node_regs ) {
2111 // Post allocation, use direct mappings, no LRG info available
2112 print_reg( get_reg_first(n), this, buf );
2113 } else {
2114 uint lidx = _lrg_map.find_const(n); // Grab LRG number
2115 if( !_ifg ) {
2116 sprintf(buf,"L%d",lidx); // No register binding yet
2117 } else if( !lidx ) { // Special, not allocated value
2118 strcpy(buf,"Special");
2119 } else {
2120 if (lrgs(lidx)._is_vector) {
2121 if (lrgs(lidx).mask().is_bound_set(lrgs(lidx).num_regs()))
2122 print_reg( lrgs(lidx).reg(), this, buf ); // a bound machine register
2123 else
2124 sprintf(buf,"L%d",lidx); // No register binding yet
2125 } else if( (lrgs(lidx).num_regs() == 1)
2126 ? lrgs(lidx).mask().is_bound1()
2127 : lrgs(lidx).mask().is_bound_pair() ) {
2128 // Hah! We have a bound machine register
2129 print_reg( lrgs(lidx).reg(), this, buf );
2130 } else {
2131 sprintf(buf,"L%d",lidx); // No register binding yet
2132 }
2133 }
2134 }
2135 return buf+strlen(buf);
2136 }
2138 //----------------------dump_for_spill_split_recycle--------------------------
2139 void PhaseChaitin::dump_for_spill_split_recycle() const {
2140 if( WizardMode && (PrintCompilation || PrintOpto) ) {
2141 // Display which live ranges need to be split and the allocator's state
2142 tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
2143 for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
2144 if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
2145 tty->print("L%d: ", bidx);
2146 lrgs(bidx).dump();
2147 }
2148 }
2149 tty->cr();
2150 dump();
2151 }
2152 }
2154 //------------------------------dump_frame------------------------------------
2155 void PhaseChaitin::dump_frame() const {
2156 const char *fp = OptoReg::regname(OptoReg::c_frame_pointer);
2157 const TypeTuple *domain = C->tf()->domain();
2158 const int argcnt = domain->cnt() - TypeFunc::Parms;
2160 // Incoming arguments in registers dump
2161 for( int k = 0; k < argcnt; k++ ) {
2162 OptoReg::Name parmreg = _matcher._parm_regs[k].first();
2163 if( OptoReg::is_reg(parmreg)) {
2164 const char *reg_name = OptoReg::regname(parmreg);
2165 tty->print("#r%3.3d %s", parmreg, reg_name);
2166 parmreg = _matcher._parm_regs[k].second();
2167 if( OptoReg::is_reg(parmreg)) {
2168 tty->print(":%s", OptoReg::regname(parmreg));
2169 }
2170 tty->print(" : parm %d: ", k);
2171 domain->field_at(k + TypeFunc::Parms)->dump();
2172 tty->print_cr("");
2173 }
2174 }
2176 // Check for un-owned padding above incoming args
2177 OptoReg::Name reg = _matcher._new_SP;
2178 if( reg > _matcher._in_arg_limit ) {
2179 reg = OptoReg::add(reg, -1);
2180 tty->print_cr("#r%3.3d %s+%2d: pad0, owned by CALLER", reg, fp, reg2offset_unchecked(reg));
2181 }
2183 // Incoming argument area dump
2184 OptoReg::Name begin_in_arg = OptoReg::add(_matcher._old_SP,C->out_preserve_stack_slots());
2185 while( reg > begin_in_arg ) {
2186 reg = OptoReg::add(reg, -1);
2187 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2188 int j;
2189 for( j = 0; j < argcnt; j++) {
2190 if( _matcher._parm_regs[j].first() == reg ||
2191 _matcher._parm_regs[j].second() == reg ) {
2192 tty->print("parm %d: ",j);
2193 domain->field_at(j + TypeFunc::Parms)->dump();
2194 tty->print_cr("");
2195 break;
2196 }
2197 }
2198 if( j >= argcnt )
2199 tty->print_cr("HOLE, owned by SELF");
2200 }
2202 // Old outgoing preserve area
2203 while( reg > _matcher._old_SP ) {
2204 reg = OptoReg::add(reg, -1);
2205 tty->print_cr("#r%3.3d %s+%2d: old out preserve",reg,fp,reg2offset_unchecked(reg));
2206 }
2208 // Old SP
2209 tty->print_cr("# -- Old %s -- Framesize: %d --",fp,
2210 reg2offset_unchecked(OptoReg::add(_matcher._old_SP,-1)) - reg2offset_unchecked(_matcher._new_SP)+jintSize);
2212 // Preserve area dump
2213 int fixed_slots = C->fixed_slots();
2214 OptoReg::Name begin_in_preserve = OptoReg::add(_matcher._old_SP, -(int)C->in_preserve_stack_slots());
2215 OptoReg::Name return_addr = _matcher.return_addr();
2217 reg = OptoReg::add(reg, -1);
2218 while (OptoReg::is_stack(reg)) {
2219 tty->print("#r%3.3d %s+%2d: ",reg,fp,reg2offset_unchecked(reg));
2220 if (return_addr == reg) {
2221 tty->print_cr("return address");
2222 } else if (reg >= begin_in_preserve) {
2223 // Preserved slots are present on x86
2224 if (return_addr == OptoReg::add(reg, VMRegImpl::slots_per_word))
2225 tty->print_cr("saved fp register");
2226 else if (return_addr == OptoReg::add(reg, 2*VMRegImpl::slots_per_word) &&
2227 VerifyStackAtCalls)
2228 tty->print_cr("0xBADB100D +VerifyStackAtCalls");
2229 else
2230 tty->print_cr("in_preserve");
2231 } else if ((int)OptoReg::reg2stack(reg) < fixed_slots) {
2232 tty->print_cr("Fixed slot %d", OptoReg::reg2stack(reg));
2233 } else {
2234 tty->print_cr("pad2, stack alignment");
2235 }
2236 reg = OptoReg::add(reg, -1);
2237 }
2239 // Spill area dump
2240 reg = OptoReg::add(_matcher._new_SP, _framesize );
2241 while( reg > _matcher._out_arg_limit ) {
2242 reg = OptoReg::add(reg, -1);
2243 tty->print_cr("#r%3.3d %s+%2d: spill",reg,fp,reg2offset_unchecked(reg));
2244 }
2246 // Outgoing argument area dump
2247 while( reg > OptoReg::add(_matcher._new_SP, C->out_preserve_stack_slots()) ) {
2248 reg = OptoReg::add(reg, -1);
2249 tty->print_cr("#r%3.3d %s+%2d: outgoing argument",reg,fp,reg2offset_unchecked(reg));
2250 }
2252 // Outgoing new preserve area
2253 while( reg > _matcher._new_SP ) {
2254 reg = OptoReg::add(reg, -1);
2255 tty->print_cr("#r%3.3d %s+%2d: new out preserve",reg,fp,reg2offset_unchecked(reg));
2256 }
2257 tty->print_cr("#");
2258 }
2260 //------------------------------dump_bb----------------------------------------
2261 void PhaseChaitin::dump_bb( uint pre_order ) const {
2262 tty->print_cr("---dump of B%d---",pre_order);
2263 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
2264 Block *b = _cfg._blocks[i];
2265 if( b->_pre_order == pre_order )
2266 dump(b);
2267 }
2268 }
2270 //------------------------------dump_lrg---------------------------------------
2271 void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
2272 tty->print_cr("---dump of L%d---",lidx);
2274 if (_ifg) {
2275 if (lidx >= _lrg_map.max_lrg_id()) {
2276 tty->print("Attempt to print live range index beyond max live range.\n");
2277 return;
2278 }
2279 tty->print("L%d: ",lidx);
2280 if (lidx < _ifg->_maxlrg) {
2281 lrgs(lidx).dump();
2282 } else {
2283 tty->print_cr("new LRG");
2284 }
2285 }
2286 if( _ifg && lidx < _ifg->_maxlrg) {
2287 tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
2288 _ifg->neighbors(lidx)->dump();
2289 tty->cr();
2290 }
2291 // For all blocks
2292 for( uint i = 0; i < _cfg._num_blocks; i++ ) {
2293 Block *b = _cfg._blocks[i];
2294 int dump_once = 0;
2296 // For all instructions
2297 for( uint j = 0; j < b->_nodes.size(); j++ ) {
2298 Node *n = b->_nodes[j];
2299 if (_lrg_map.find_const(n) == lidx) {
2300 if (!dump_once++) {
2301 tty->cr();
2302 b->dump_head( &_cfg._bbs );
2303 }
2304 dump(n);
2305 continue;
2306 }
2307 if (!defs_only) {
2308 uint cnt = n->req();
2309 for( uint k = 1; k < cnt; k++ ) {
2310 Node *m = n->in(k);
2311 if (!m) {
2312 continue; // be robust in the dumper
2313 }
2314 if (_lrg_map.find_const(m) == lidx) {
2315 if (!dump_once++) {
2316 tty->cr();
2317 b->dump_head(&_cfg._bbs);
2318 }
2319 dump(n);
2320 }
2321 }
2322 }
2323 }
2324 } // End of per-block dump
2325 tty->cr();
2326 }
2327 #endif // not PRODUCT
2329 //------------------------------print_chaitin_statistics-------------------------------
2330 int PhaseChaitin::_final_loads = 0;
2331 int PhaseChaitin::_final_stores = 0;
2332 int PhaseChaitin::_final_memoves= 0;
2333 int PhaseChaitin::_final_copies = 0;
2334 double PhaseChaitin::_final_load_cost = 0;
2335 double PhaseChaitin::_final_store_cost = 0;
2336 double PhaseChaitin::_final_memove_cost= 0;
2337 double PhaseChaitin::_final_copy_cost = 0;
2338 int PhaseChaitin::_conserv_coalesce = 0;
2339 int PhaseChaitin::_conserv_coalesce_pair = 0;
2340 int PhaseChaitin::_conserv_coalesce_trie = 0;
2341 int PhaseChaitin::_conserv_coalesce_quad = 0;
2342 int PhaseChaitin::_post_alloc = 0;
2343 int PhaseChaitin::_lost_opp_pp_coalesce = 0;
2344 int PhaseChaitin::_lost_opp_cflow_coalesce = 0;
2345 int PhaseChaitin::_used_cisc_instructions = 0;
2346 int PhaseChaitin::_unused_cisc_instructions = 0;
2347 int PhaseChaitin::_allocator_attempts = 0;
2348 int PhaseChaitin::_allocator_successes = 0;
2350 #ifndef PRODUCT
2351 uint PhaseChaitin::_high_pressure = 0;
2352 uint PhaseChaitin::_low_pressure = 0;
2354 void PhaseChaitin::print_chaitin_statistics() {
2355 tty->print_cr("Inserted %d spill loads, %d spill stores, %d mem-mem moves and %d copies.", _final_loads, _final_stores, _final_memoves, _final_copies);
2356 tty->print_cr("Total load cost= %6.0f, store cost = %6.0f, mem-mem cost = %5.2f, copy cost = %5.0f.", _final_load_cost, _final_store_cost, _final_memove_cost, _final_copy_cost);
2357 tty->print_cr("Adjusted spill cost = %7.0f.",
2358 _final_load_cost*4.0 + _final_store_cost * 2.0 +
2359 _final_copy_cost*1.0 + _final_memove_cost*12.0);
2360 tty->print("Conservatively coalesced %d copies, %d pairs",
2361 _conserv_coalesce, _conserv_coalesce_pair);
2362 if( _conserv_coalesce_trie || _conserv_coalesce_quad )
2363 tty->print(", %d tries, %d quads", _conserv_coalesce_trie, _conserv_coalesce_quad);
2364 tty->print_cr(", %d post alloc.", _post_alloc);
2365 if( _lost_opp_pp_coalesce || _lost_opp_cflow_coalesce )
2366 tty->print_cr("Lost coalesce opportunity, %d private-private, and %d cflow interfered.",
2367 _lost_opp_pp_coalesce, _lost_opp_cflow_coalesce );
2368 if( _used_cisc_instructions || _unused_cisc_instructions )
2369 tty->print_cr("Used cisc instruction %d, remained in register %d",
2370 _used_cisc_instructions, _unused_cisc_instructions);
2371 if( _allocator_successes != 0 )
2372 tty->print_cr("Average allocation trips %f", (float)_allocator_attempts/(float)_allocator_successes);
2373 tty->print_cr("High Pressure Blocks = %d, Low Pressure Blocks = %d", _high_pressure, _low_pressure);
2374 }
2375 #endif // not PRODUCT