src/share/vm/opto/buildOopMap.cpp

Fri, 29 Apr 2016 00:06:10 +0800

author
aoqi
date
Fri, 29 Apr 2016 00:06:10 +0800
changeset 1
2d8a650513c2
parent 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Added MIPS 64-bit port.

     1 /*
     2  * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 /*
    26  * This file has been modified by Loongson Technology in 2015. These
    27  * modifications are Copyright (c) 2015 Loongson Technology, and are made
    28  * available on the same license terms set forth above.
    29  */
    31 #include "precompiled.hpp"
    32 #include "compiler/oopMap.hpp"
    33 #include "opto/addnode.hpp"
    34 #include "opto/callnode.hpp"
    35 #include "opto/compile.hpp"
    36 #include "opto/machnode.hpp"
    37 #include "opto/matcher.hpp"
    38 #include "opto/phase.hpp"
    39 #include "opto/regalloc.hpp"
    40 #include "opto/rootnode.hpp"
    41 #ifdef TARGET_ARCH_x86
    42 # include "vmreg_x86.inline.hpp"
    43 #endif
    44 #ifdef TARGET_ARCH_sparc
    45 # include "vmreg_sparc.inline.hpp"
    46 #endif
    47 #ifdef TARGET_ARCH_zero
    48 # include "vmreg_zero.inline.hpp"
    49 #endif
    50 #ifdef TARGET_ARCH_arm
    51 # include "vmreg_arm.inline.hpp"
    52 #endif
    53 #ifdef TARGET_ARCH_ppc
    54 # include "vmreg_ppc.inline.hpp"
    55 #endif
    56 #ifdef TARGET_ARCH_mips
    57 # include "vmreg_mips.inline.hpp"
    58 #endif
    60 // The functions in this file builds OopMaps after all scheduling is done.
    61 //
    62 // OopMaps contain a list of all registers and stack-slots containing oops (so
    63 // they can be updated by GC).  OopMaps also contain a list of derived-pointer
    64 // base-pointer pairs.  When the base is moved, the derived pointer moves to
    65 // follow it.  Finally, any registers holding callee-save values are also
    66 // recorded.  These might contain oops, but only the caller knows.
    67 //
    68 // BuildOopMaps implements a simple forward reaching-defs solution.  At each
    69 // GC point we'll have the reaching-def Nodes.  If the reaching Nodes are
    70 // typed as pointers (no offset), then they are oops.  Pointers+offsets are
    71 // derived pointers, and bases can be found from them.  Finally, we'll also
    72 // track reaching callee-save values.  Note that a copy of a callee-save value
    73 // "kills" it's source, so that only 1 copy of a callee-save value is alive at
    74 // a time.
    75 //
    76 // We run a simple bitvector liveness pass to help trim out dead oops.  Due to
    77 // irreducible loops, we can have a reaching def of an oop that only reaches
    78 // along one path and no way to know if it's valid or not on the other path.
    79 // The bitvectors are quite dense and the liveness pass is fast.
    80 //
    81 // At GC points, we consult this information to build OopMaps.  All reaching
    82 // defs typed as oops are added to the OopMap.  Only 1 instance of a
    83 // callee-save register can be recorded.  For derived pointers, we'll have to
    84 // find and record the register holding the base.
    85 //
    86 // The reaching def's is a simple 1-pass worklist approach.  I tried a clever
    87 // breadth-first approach but it was worse (showed O(n^2) in the
    88 // pick-next-block code).
    89 //
    90 // The relevant data is kept in a struct of arrays (it could just as well be
    91 // an array of structs, but the struct-of-arrays is generally a little more
    92 // efficient).  The arrays are indexed by register number (including
    93 // stack-slots as registers) and so is bounded by 200 to 300 elements in
    94 // practice.  One array will map to a reaching def Node (or NULL for
    95 // conflict/dead).  The other array will map to a callee-saved register or
    96 // OptoReg::Bad for not-callee-saved.
    99 // Structure to pass around
   100 struct OopFlow : public ResourceObj {
   101   short *_callees;              // Array mapping register to callee-saved
   102   Node **_defs;                 // array mapping register to reaching def
   103                                 // or NULL if dead/conflict
   104   // OopFlow structs, when not being actively modified, describe the _end_ of
   105   // this block.
   106   Block *_b;                    // Block for this struct
   107   OopFlow *_next;               // Next free OopFlow
   108                                 // or NULL if dead/conflict
   109   Compile* C;
   111   OopFlow( short *callees, Node **defs, Compile* c ) : _callees(callees), _defs(defs),
   112     _b(NULL), _next(NULL), C(c) { }
   114   // Given reaching-defs for this block start, compute it for this block end
   115   void compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash );
   117   // Merge these two OopFlows into the 'this' pointer.
   118   void merge( OopFlow *flow, int max_reg );
   120   // Copy a 'flow' over an existing flow
   121   void clone( OopFlow *flow, int max_size);
   123   // Make a new OopFlow from scratch
   124   static OopFlow *make( Arena *A, int max_size, Compile* C );
   126   // Build an oopmap from the current flow info
   127   OopMap *build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live );
   128 };
   130 // Given reaching-defs for this block start, compute it for this block end
   131 void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
   133   for( uint i=0; i<_b->number_of_nodes(); i++ ) {
   134     Node *n = _b->get_node(i);
   136     if( n->jvms() ) {           // Build an OopMap here?
   137       JVMState *jvms = n->jvms();
   138       // no map needed for leaf calls
   139       if( n->is_MachSafePoint() && !n->is_MachCallLeaf() ) {
   140         int *live = (int*) (*safehash)[n];
   141         assert( live, "must find live" );
   142         n->as_MachSafePoint()->set_oop_map( build_oop_map(n,max_reg,regalloc, live) );
   143       }
   144     }
   146     // Assign new reaching def's.
   147     // Note that I padded the _defs and _callees arrays so it's legal
   148     // to index at _defs[OptoReg::Bad].
   149     OptoReg::Name first = regalloc->get_reg_first(n);
   150     OptoReg::Name second = regalloc->get_reg_second(n);
   151     _defs[first] = n;
   152     _defs[second] = n;
   154     // Pass callee-save info around copies
   155     int idx = n->is_Copy();
   156     if( idx ) {                 // Copies move callee-save info
   157       OptoReg::Name old_first = regalloc->get_reg_first(n->in(idx));
   158       OptoReg::Name old_second = regalloc->get_reg_second(n->in(idx));
   159       int tmp_first = _callees[old_first];
   160       int tmp_second = _callees[old_second];
   161       _callees[old_first] = OptoReg::Bad; // callee-save is moved, dead in old location
   162       _callees[old_second] = OptoReg::Bad;
   163       _callees[first] = tmp_first;
   164       _callees[second] = tmp_second;
   165     } else if( n->is_Phi() ) {  // Phis do not mod callee-saves
   166       assert( _callees[first] == _callees[regalloc->get_reg_first(n->in(1))], "" );
   167       assert( _callees[second] == _callees[regalloc->get_reg_second(n->in(1))], "" );
   168       assert( _callees[first] == _callees[regalloc->get_reg_first(n->in(n->req()-1))], "" );
   169       assert( _callees[second] == _callees[regalloc->get_reg_second(n->in(n->req()-1))], "" );
   170     } else {
   171       _callees[first] = OptoReg::Bad; // No longer holding a callee-save value
   172       _callees[second] = OptoReg::Bad;
   174       // Find base case for callee saves
   175       if( n->is_Proj() && n->in(0)->is_Start() ) {
   176         if( OptoReg::is_reg(first) &&
   177             regalloc->_matcher.is_save_on_entry(first) )
   178           _callees[first] = first;
   179         if( OptoReg::is_reg(second) &&
   180             regalloc->_matcher.is_save_on_entry(second) )
   181           _callees[second] = second;
   182       }
   183     }
   184   }
   185 }
   187 // Merge the given flow into the 'this' flow
   188 void OopFlow::merge( OopFlow *flow, int max_reg ) {
   189   assert( _b == NULL, "merging into a happy flow" );
   190   assert( flow->_b, "this flow is still alive" );
   191   assert( flow != this, "no self flow" );
   193   // Do the merge.  If there are any differences, drop to 'bottom' which
   194   // is OptoReg::Bad or NULL depending.
   195   for( int i=0; i<max_reg; i++ ) {
   196     // Merge the callee-save's
   197     if( _callees[i] != flow->_callees[i] )
   198       _callees[i] = OptoReg::Bad;
   199     // Merge the reaching defs
   200     if( _defs[i] != flow->_defs[i] )
   201       _defs[i] = NULL;
   202   }
   204 }
   206 void OopFlow::clone( OopFlow *flow, int max_size ) {
   207   _b = flow->_b;
   208   memcpy( _callees, flow->_callees, sizeof(short)*max_size);
   209   memcpy( _defs   , flow->_defs   , sizeof(Node*)*max_size);
   210 }
   212 OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) {
   213   short *callees = NEW_ARENA_ARRAY(A,short,max_size+1);
   214   Node **defs    = NEW_ARENA_ARRAY(A,Node*,max_size+1);
   215   debug_only( memset(defs,0,(max_size+1)*sizeof(Node*)) );
   216   OopFlow *flow = new (A) OopFlow(callees+1, defs+1, C);
   217   assert( &flow->_callees[OptoReg::Bad] == callees, "Ok to index at OptoReg::Bad" );
   218   assert( &flow->_defs   [OptoReg::Bad] == defs   , "Ok to index at OptoReg::Bad" );
   219   return flow;
   220 }
   222 static int get_live_bit( int *live, int reg ) {
   223   return live[reg>>LogBitsPerInt] &   (1<<(reg&(BitsPerInt-1))); }
   224 static void set_live_bit( int *live, int reg ) {
   225          live[reg>>LogBitsPerInt] |=  (1<<(reg&(BitsPerInt-1))); }
   226 static void clr_live_bit( int *live, int reg ) {
   227          live[reg>>LogBitsPerInt] &= ~(1<<(reg&(BitsPerInt-1))); }
   229 // Build an oopmap from the current flow info
   230 OopMap *OopFlow::build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live ) {
   231   int framesize = regalloc->_framesize;
   232   int max_inarg_slot = OptoReg::reg2stack(regalloc->_matcher._new_SP);
   233   debug_only( char *dup_check = NEW_RESOURCE_ARRAY(char,OptoReg::stack0());
   234               memset(dup_check,0,OptoReg::stack0()) );
   236   OopMap *omap = new OopMap( framesize,  max_inarg_slot );
   237   MachCallNode *mcall = n->is_MachCall() ? n->as_MachCall() : NULL;
   238   JVMState* jvms = n->jvms();
   240   // For all registers do...
   241   for( int reg=0; reg<max_reg; reg++ ) {
   242     if( get_live_bit(live,reg) == 0 )
   243       continue;                 // Ignore if not live
   245     // %%% C2 can use 2 OptoRegs when the physical register is only one 64bit
   246     // register in that case we'll get an non-concrete register for the second
   247     // half. We only need to tell the map the register once!
   248     //
   249     // However for the moment we disable this change and leave things as they
   250     // were.
   252     VMReg r = OptoReg::as_VMReg(OptoReg::Name(reg), framesize, max_inarg_slot);
   254     if (false && r->is_reg() && !r->is_concrete()) {
   255       continue;
   256     }
   258     // See if dead (no reaching def).
   259     Node *def = _defs[reg];     // Get reaching def
   260     assert( def, "since live better have reaching def" );
   262     // Classify the reaching def as oop, derived, callee-save, dead, or other
   263     const Type *t = def->bottom_type();
   264     if( t->isa_oop_ptr() ) {    // Oop or derived?
   265       assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
   266 #ifdef _LP64
   267       // 64-bit pointers record oop-ishness on 2 aligned adjacent registers.
   268       // Make sure both are record from the same reaching def, but do not
   269       // put both into the oopmap.
   270       if( (reg&1) == 1 ) {      // High half of oop-pair?
   271         assert( _defs[reg-1] == _defs[reg], "both halves from same reaching def" );
   272         continue;               // Do not record high parts in oopmap
   273       }
   274 #endif
   276       // Check for a legal reg name in the oopMap and bailout if it is not.
   277       if (!omap->legal_vm_reg_name(r)) {
   278         regalloc->C->record_method_not_compilable("illegal oopMap register name");
   279         continue;
   280       }
   281       if( t->is_ptr()->_offset == 0 ) { // Not derived?
   282         if( mcall ) {
   283           // Outgoing argument GC mask responsibility belongs to the callee,
   284           // not the caller.  Inspect the inputs to the call, to see if
   285           // this live-range is one of them.
   286           uint cnt = mcall->tf()->domain()->cnt();
   287           uint j;
   288           for( j = TypeFunc::Parms; j < cnt; j++)
   289             if( mcall->in(j) == def )
   290               break;            // reaching def is an argument oop
   291           if( j < cnt )         // arg oops dont go in GC map
   292             continue;           // Continue on to the next register
   293         }
   294         omap->set_oop(r);
   295       } else {                  // Else it's derived.
   296         // Find the base of the derived value.
   297         uint i;
   298         // Fast, common case, scan
   299         for( i = jvms->oopoff(); i < n->req(); i+=2 )
   300           if( n->in(i) == def ) break; // Common case
   301         if( i == n->req() ) {   // Missed, try a more generous scan
   302           // Scan again, but this time peek through copies
   303           for( i = jvms->oopoff(); i < n->req(); i+=2 ) {
   304             Node *m = n->in(i); // Get initial derived value
   305             while( 1 ) {
   306               Node *d = def;    // Get initial reaching def
   307               while( 1 ) {      // Follow copies of reaching def to end
   308                 if( m == d ) goto found; // breaks 3 loops
   309                 int idx = d->is_Copy();
   310                 if( !idx ) break;
   311                 d = d->in(idx);     // Link through copy
   312               }
   313               int idx = m->is_Copy();
   314               if( !idx ) break;
   315               m = m->in(idx);
   316             }
   317           }
   318           guarantee( 0, "must find derived/base pair" );
   319         }
   320       found: ;
   321         Node *base = n->in(i+1); // Base is other half of pair
   322         int breg = regalloc->get_reg_first(base);
   323         VMReg b = OptoReg::as_VMReg(OptoReg::Name(breg), framesize, max_inarg_slot);
   325         // I record liveness at safepoints BEFORE I make the inputs
   326         // live.  This is because argument oops are NOT live at a
   327         // safepoint (or at least they cannot appear in the oopmap).
   328         // Thus bases of base/derived pairs might not be in the
   329         // liveness data but they need to appear in the oopmap.
   330         if( get_live_bit(live,breg) == 0 ) {// Not live?
   331           // Flag it, so next derived pointer won't re-insert into oopmap
   332           set_live_bit(live,breg);
   333           // Already missed our turn?
   334           if( breg < reg ) {
   335             if (b->is_stack() || b->is_concrete() || true ) {
   336               omap->set_oop( b);
   337             }
   338           }
   339         }
   340         if (b->is_stack() || b->is_concrete() || true ) {
   341           omap->set_derived_oop( r, b);
   342         }
   343       }
   345     } else if( t->isa_narrowoop() ) {
   346       assert( !OptoReg::is_valid(_callees[reg]), "oop can't be callee save" );
   347       // Check for a legal reg name in the oopMap and bailout if it is not.
   348       if (!omap->legal_vm_reg_name(r)) {
   349         regalloc->C->record_method_not_compilable("illegal oopMap register name");
   350         continue;
   351       }
   352       if( mcall ) {
   353           // Outgoing argument GC mask responsibility belongs to the callee,
   354           // not the caller.  Inspect the inputs to the call, to see if
   355           // this live-range is one of them.
   356         uint cnt = mcall->tf()->domain()->cnt();
   357         uint j;
   358         for( j = TypeFunc::Parms; j < cnt; j++)
   359           if( mcall->in(j) == def )
   360             break;            // reaching def is an argument oop
   361         if( j < cnt )         // arg oops dont go in GC map
   362           continue;           // Continue on to the next register
   363       }
   364       omap->set_narrowoop(r);
   365     } else if( OptoReg::is_valid(_callees[reg])) { // callee-save?
   366       // It's a callee-save value
   367       assert( dup_check[_callees[reg]]==0, "trying to callee save same reg twice" );
   368       debug_only( dup_check[_callees[reg]]=1; )
   369       VMReg callee = OptoReg::as_VMReg(OptoReg::Name(_callees[reg]));
   370       if ( callee->is_concrete() || true ) {
   371         omap->set_callee_saved( r, callee);
   372       }
   374     } else {
   375       // Other - some reaching non-oop value
   376       omap->set_value( r);
   377 #ifdef ASSERT
   378       if( t->isa_rawptr() && C->cfg()->_raw_oops.member(def) ) {
   379         def->dump();
   380         n->dump();
   381         assert(false, "there should be a oop in OopMap instead of a live raw oop at safepoint");
   382       }
   383 #endif
   384     }
   386   }
   388 #ifdef ASSERT
   389   /* Nice, Intel-only assert
   390   int cnt_callee_saves=0;
   391   int reg2 = 0;
   392   while (OptoReg::is_reg(reg2)) {
   393     if( dup_check[reg2] != 0) cnt_callee_saves++;
   394     assert( cnt_callee_saves==3 || cnt_callee_saves==5, "missed some callee-save" );
   395     reg2++;
   396   }
   397   */
   398 #endif
   400 #ifdef ASSERT
   401   for( OopMapStream oms1(omap, OopMapValue::derived_oop_value); !oms1.is_done(); oms1.next()) {
   402     OopMapValue omv1 = oms1.current();
   403     bool found = false;
   404     for( OopMapStream oms2(omap,OopMapValue::oop_value); !oms2.is_done(); oms2.next()) {
   405       if( omv1.content_reg() == oms2.current().reg() ) {
   406         found = true;
   407         break;
   408       }
   409     }
   410     assert( found, "derived with no base in oopmap" );
   411   }
   412 #endif
   414   return omap;
   415 }
   417 // Compute backwards liveness on registers
   418 static void do_liveness(PhaseRegAlloc* regalloc, PhaseCFG* cfg, Block_List* worklist, int max_reg_ints, Arena* A, Dict* safehash) {
   419   int* live = NEW_ARENA_ARRAY(A, int, (cfg->number_of_blocks() + 1) * max_reg_ints);
   420   int* tmp_live = &live[cfg->number_of_blocks() * max_reg_ints];
   421   Node* root = cfg->get_root_node();
   422   // On CISC platforms, get the node representing the stack pointer  that regalloc
   423   // used for spills
   424   Node *fp = NodeSentinel;
   425   if (UseCISCSpill && root->req() > 1) {
   426     fp = root->in(1)->in(TypeFunc::FramePtr);
   427   }
   428   memset(live, 0, cfg->number_of_blocks() * (max_reg_ints << LogBytesPerInt));
   429   // Push preds onto worklist
   430   for (uint i = 1; i < root->req(); i++) {
   431     Block* block = cfg->get_block_for_node(root->in(i));
   432     worklist->push(block);
   433   }
   435   // ZKM.jar includes tiny infinite loops which are unreached from below.
   436   // If we missed any blocks, we'll retry here after pushing all missed
   437   // blocks on the worklist.  Normally this outer loop never trips more
   438   // than once.
   439   while (1) {
   441     while( worklist->size() ) { // Standard worklist algorithm
   442       Block *b = worklist->rpop();
   444       // Copy first successor into my tmp_live space
   445       int s0num = b->_succs[0]->_pre_order;
   446       int *t = &live[s0num*max_reg_ints];
   447       for( int i=0; i<max_reg_ints; i++ )
   448         tmp_live[i] = t[i];
   450       // OR in the remaining live registers
   451       for( uint j=1; j<b->_num_succs; j++ ) {
   452         uint sjnum = b->_succs[j]->_pre_order;
   453         int *t = &live[sjnum*max_reg_ints];
   454         for( int i=0; i<max_reg_ints; i++ )
   455           tmp_live[i] |= t[i];
   456       }
   458       // Now walk tmp_live up the block backwards, computing live
   459       for( int k=b->number_of_nodes()-1; k>=0; k-- ) {
   460         Node *n = b->get_node(k);
   461         // KILL def'd bits
   462         int first = regalloc->get_reg_first(n);
   463         int second = regalloc->get_reg_second(n);
   464         if( OptoReg::is_valid(first) ) clr_live_bit(tmp_live,first);
   465         if( OptoReg::is_valid(second) ) clr_live_bit(tmp_live,second);
   467         MachNode *m = n->is_Mach() ? n->as_Mach() : NULL;
   469         // Check if m is potentially a CISC alternate instruction (i.e, possibly
   470         // synthesized by RegAlloc from a conventional instruction and a
   471         // spilled input)
   472         bool is_cisc_alternate = false;
   473         if (UseCISCSpill && m) {
   474           is_cisc_alternate = m->is_cisc_alternate();
   475         }
   477         // GEN use'd bits
   478         for( uint l=1; l<n->req(); l++ ) {
   479           Node *def = n->in(l);
   480           assert(def != 0, "input edge required");
   481           int first = regalloc->get_reg_first(def);
   482           int second = regalloc->get_reg_second(def);
   483           if( OptoReg::is_valid(first) ) set_live_bit(tmp_live,first);
   484           if( OptoReg::is_valid(second) ) set_live_bit(tmp_live,second);
   485           // If we use the stack pointer in a cisc-alternative instruction,
   486           // check for use as a memory operand.  Then reconstruct the RegName
   487           // for this stack location, and set the appropriate bit in the
   488           // live vector 4987749.
   489           if (is_cisc_alternate && def == fp) {
   490             const TypePtr *adr_type = NULL;
   491             intptr_t offset;
   492             const Node* base = m->get_base_and_disp(offset, adr_type);
   493             if (base == NodeSentinel) {
   494               // Machnode has multiple memory inputs. We are unable to reason
   495               // with these, but are presuming (with trepidation) that not any of
   496               // them are oops. This can be fixed by making get_base_and_disp()
   497               // look at a specific input instead of all inputs.
   498               assert(!def->bottom_type()->isa_oop_ptr(), "expecting non-oop mem input");
   499             } else if (base != fp || offset == Type::OffsetBot) {
   500               // Do nothing: the fp operand is either not from a memory use
   501               // (base == NULL) OR the fp is used in a non-memory context
   502               // (base is some other register) OR the offset is not constant,
   503               // so it is not a stack slot.
   504             } else {
   505               assert(offset >= 0, "unexpected negative offset");
   506               offset -= (offset % jintSize);  // count the whole word
   507               int stack_reg = regalloc->offset2reg(offset);
   508               if (OptoReg::is_stack(stack_reg)) {
   509                 set_live_bit(tmp_live, stack_reg);
   510               } else {
   511                 assert(false, "stack_reg not on stack?");
   512               }
   513             }
   514           }
   515         }
   517         if( n->jvms() ) {       // Record liveness at safepoint
   519           // This placement of this stanza means inputs to calls are
   520           // considered live at the callsite's OopMap.  Argument oops are
   521           // hence live, but NOT included in the oopmap.  See cutout in
   522           // build_oop_map.  Debug oops are live (and in OopMap).
   523           int *n_live = NEW_ARENA_ARRAY(A, int, max_reg_ints);
   524           for( int l=0; l<max_reg_ints; l++ )
   525             n_live[l] = tmp_live[l];
   526           safehash->Insert(n,n_live);
   527         }
   529       }
   531       // Now at block top, see if we have any changes.  If so, propagate
   532       // to prior blocks.
   533       int *old_live = &live[b->_pre_order*max_reg_ints];
   534       int l;
   535       for( l=0; l<max_reg_ints; l++ )
   536         if( tmp_live[l] != old_live[l] )
   537           break;
   538       if( l<max_reg_ints ) {     // Change!
   539         // Copy in new value
   540         for( l=0; l<max_reg_ints; l++ )
   541           old_live[l] = tmp_live[l];
   542         // Push preds onto worklist
   543         for (l = 1; l < (int)b->num_preds(); l++) {
   544           Block* block = cfg->get_block_for_node(b->pred(l));
   545           worklist->push(block);
   546         }
   547       }
   548     }
   550     // Scan for any missing safepoints.  Happens to infinite loops
   551     // ala ZKM.jar
   552     uint i;
   553     for (i = 1; i < cfg->number_of_blocks(); i++) {
   554       Block* block = cfg->get_block(i);
   555       uint j;
   556       for (j = 1; j < block->number_of_nodes(); j++) {
   557         if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) {
   558            break;
   559         }
   560       }
   561       if (j < block->number_of_nodes()) {
   562         break;
   563       }
   564     }
   565     if (i == cfg->number_of_blocks()) {
   566       break;                    // Got 'em all
   567     }
   568 #ifndef PRODUCT
   569     if( PrintOpto && Verbose )
   570       tty->print_cr("retripping live calc");
   571 #endif
   572     // Force the issue (expensively): recheck everybody
   573     for (i = 1; i < cfg->number_of_blocks(); i++) {
   574       worklist->push(cfg->get_block(i));
   575     }
   576   }
   577 }
   579 // Collect GC mask info - where are all the OOPs?
   580 void Compile::BuildOopMaps() {
   581   NOT_PRODUCT( TracePhase t3("bldOopMaps", &_t_buildOopMaps, TimeCompiler); )
   582   // Can't resource-mark because I need to leave all those OopMaps around,
   583   // or else I need to resource-mark some arena other than the default.
   584   // ResourceMark rm;              // Reclaim all OopFlows when done
   585   int max_reg = _regalloc->_max_reg; // Current array extent
   587   Arena *A = Thread::current()->resource_area();
   588   Block_List worklist;          // Worklist of pending blocks
   590   int max_reg_ints = round_to(max_reg, BitsPerInt)>>LogBitsPerInt;
   591   Dict *safehash = NULL;        // Used for assert only
   592   // Compute a backwards liveness per register.  Needs a bitarray of
   593   // #blocks x (#registers, rounded up to ints)
   594   safehash = new Dict(cmpkey,hashkey,A);
   595   do_liveness( _regalloc, _cfg, &worklist, max_reg_ints, A, safehash );
   596   OopFlow *free_list = NULL;    // Free, unused
   598   // Array mapping blocks to completed oopflows
   599   OopFlow **flows = NEW_ARENA_ARRAY(A, OopFlow*, _cfg->number_of_blocks());
   600   memset( flows, 0, _cfg->number_of_blocks() * sizeof(OopFlow*) );
   603   // Do the first block 'by hand' to prime the worklist
   604   Block *entry = _cfg->get_block(1);
   605   OopFlow *rootflow = OopFlow::make(A,max_reg,this);
   606   // Initialize to 'bottom' (not 'top')
   607   memset( rootflow->_callees, OptoReg::Bad, max_reg*sizeof(short) );
   608   memset( rootflow->_defs   ,            0, max_reg*sizeof(Node*) );
   609   flows[entry->_pre_order] = rootflow;
   611   // Do the first block 'by hand' to prime the worklist
   612   rootflow->_b = entry;
   613   rootflow->compute_reach( _regalloc, max_reg, safehash );
   614   for( uint i=0; i<entry->_num_succs; i++ )
   615     worklist.push(entry->_succs[i]);
   617   // Now worklist contains blocks which have some, but perhaps not all,
   618   // predecessors visited.
   619   while( worklist.size() ) {
   620     // Scan for a block with all predecessors visited, or any randoms slob
   621     // otherwise.  All-preds-visited order allows me to recycle OopFlow
   622     // structures rapidly and cut down on the memory footprint.
   623     // Note: not all predecessors might be visited yet (must happen for
   624     // irreducible loops).  This is OK, since every live value must have the
   625     // SAME reaching def for the block, so any reaching def is OK.
   626     uint i;
   628     Block *b = worklist.pop();
   629     // Ignore root block
   630     if (b == _cfg->get_root_block()) {
   631       continue;
   632     }
   633     // Block is already done?  Happens if block has several predecessors,
   634     // he can get on the worklist more than once.
   635     if( flows[b->_pre_order] ) continue;
   637     // If this block has a visited predecessor AND that predecessor has this
   638     // last block as his only undone child, we can move the OopFlow from the
   639     // pred to this block.  Otherwise we have to grab a new OopFlow.
   640     OopFlow *flow = NULL;       // Flag for finding optimized flow
   641     Block *pred = (Block*)0xdeadbeef;
   642     // Scan this block's preds to find a done predecessor
   643     for (uint j = 1; j < b->num_preds(); j++) {
   644       Block* p = _cfg->get_block_for_node(b->pred(j));
   645       OopFlow *p_flow = flows[p->_pre_order];
   646       if( p_flow ) {            // Predecessor is done
   647         assert( p_flow->_b == p, "cross check" );
   648         pred = p;               // Record some predecessor
   649         // If all successors of p are done except for 'b', then we can carry
   650         // p_flow forward to 'b' without copying, otherwise we have to draw
   651         // from the free_list and clone data.
   652         uint k;
   653         for( k=0; k<p->_num_succs; k++ )
   654           if( !flows[p->_succs[k]->_pre_order] &&
   655               p->_succs[k] != b )
   656             break;
   658         // Either carry-forward the now-unused OopFlow for b's use
   659         // or draw a new one from the free list
   660         if( k==p->_num_succs ) {
   661           flow = p_flow;
   662           break;                // Found an ideal pred, use him
   663         }
   664       }
   665     }
   667     if( flow ) {
   668       // We have an OopFlow that's the last-use of a predecessor.
   669       // Carry it forward.
   670     } else {                    // Draw a new OopFlow from the freelist
   671       if( !free_list )
   672         free_list = OopFlow::make(A,max_reg,C);
   673       flow = free_list;
   674       assert( flow->_b == NULL, "oopFlow is not free" );
   675       free_list = flow->_next;
   676       flow->_next = NULL;
   678       // Copy/clone over the data
   679       flow->clone(flows[pred->_pre_order], max_reg);
   680     }
   682     // Mark flow for block.  Blocks can only be flowed over once,
   683     // because after the first time they are guarded from entering
   684     // this code again.
   685     assert( flow->_b == pred, "have some prior flow" );
   686     flow->_b = NULL;
   688     // Now push flow forward
   689     flows[b->_pre_order] = flow;// Mark flow for this block
   690     flow->_b = b;
   691     flow->compute_reach( _regalloc, max_reg, safehash );
   693     // Now push children onto worklist
   694     for( i=0; i<b->_num_succs; i++ )
   695       worklist.push(b->_succs[i]);
   697   }
   698 }

mercurial