src/share/vm/c1/c1_IR.cpp

Thu, 02 Dec 2010 17:21:12 -0800

author
iveresov
date
Thu, 02 Dec 2010 17:21:12 -0800
changeset 2349
5ddfcf4b079e
parent 2314
f95d63e2154a
child 3592
701a83c86f28
permissions
-rw-r--r--

7003554: (tiered) assert(is_null_object() || handle() != NULL) failed: cannot embed null pointer
Summary: C1 with profiling doesn't check whether the MDO has been really allocated, which can silently fail if the perm gen is full. The solution is to check if the allocation failed and bailout out of inlining or compilation.
Reviewed-by: kvn, never

     1 /*
     2  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "c1/c1_Compilation.hpp"
    27 #include "c1/c1_FrameMap.hpp"
    28 #include "c1/c1_GraphBuilder.hpp"
    29 #include "c1/c1_IR.hpp"
    30 #include "c1/c1_InstructionPrinter.hpp"
    31 #include "c1/c1_Optimizer.hpp"
    32 #include "utilities/bitMap.inline.hpp"
    35 // Implementation of XHandlers
    36 //
    37 // Note: This code could eventually go away if we are
    38 //       just using the ciExceptionHandlerStream.
    40 XHandlers::XHandlers(ciMethod* method) : _list(method->exception_table_length()) {
    41   ciExceptionHandlerStream s(method);
    42   while (!s.is_done()) {
    43     _list.append(new XHandler(s.handler()));
    44     s.next();
    45   }
    46   assert(s.count() == method->exception_table_length(), "exception table lengths inconsistent");
    47 }
    49 // deep copy of all XHandler contained in list
    50 XHandlers::XHandlers(XHandlers* other) :
    51   _list(other->length())
    52 {
    53   for (int i = 0; i < other->length(); i++) {
    54     _list.append(new XHandler(other->handler_at(i)));
    55   }
    56 }
    58 // Returns whether a particular exception type can be caught.  Also
    59 // returns true if klass is unloaded or any exception handler
    60 // classes are unloaded.  type_is_exact indicates whether the throw
    61 // is known to be exactly that class or it might throw a subtype.
    62 bool XHandlers::could_catch(ciInstanceKlass* klass, bool type_is_exact) const {
    63   // the type is unknown so be conservative
    64   if (!klass->is_loaded()) {
    65     return true;
    66   }
    68   for (int i = 0; i < length(); i++) {
    69     XHandler* handler = handler_at(i);
    70     if (handler->is_catch_all()) {
    71       // catch of ANY
    72       return true;
    73     }
    74     ciInstanceKlass* handler_klass = handler->catch_klass();
    75     // if it's unknown it might be catchable
    76     if (!handler_klass->is_loaded()) {
    77       return true;
    78     }
    79     // if the throw type is definitely a subtype of the catch type
    80     // then it can be caught.
    81     if (klass->is_subtype_of(handler_klass)) {
    82       return true;
    83     }
    84     if (!type_is_exact) {
    85       // If the type isn't exactly known then it can also be caught by
    86       // catch statements where the inexact type is a subtype of the
    87       // catch type.
    88       // given: foo extends bar extends Exception
    89       // throw bar can be caught by catch foo, catch bar, and catch
    90       // Exception, however it can't be caught by any handlers without
    91       // bar in its type hierarchy.
    92       if (handler_klass->is_subtype_of(klass)) {
    93         return true;
    94       }
    95     }
    96   }
    98   return false;
    99 }
   102 bool XHandlers::equals(XHandlers* others) const {
   103   if (others == NULL) return false;
   104   if (length() != others->length()) return false;
   106   for (int i = 0; i < length(); i++) {
   107     if (!handler_at(i)->equals(others->handler_at(i))) return false;
   108   }
   109   return true;
   110 }
   112 bool XHandler::equals(XHandler* other) const {
   113   assert(entry_pco() != -1 && other->entry_pco() != -1, "must have entry_pco");
   115   if (entry_pco() != other->entry_pco()) return false;
   116   if (scope_count() != other->scope_count()) return false;
   117   if (_desc != other->_desc) return false;
   119   assert(entry_block() == other->entry_block(), "entry_block must be equal when entry_pco is equal");
   120   return true;
   121 }
   124 // Implementation of IRScope
   125 BlockBegin* IRScope::build_graph(Compilation* compilation, int osr_bci) {
   126   GraphBuilder gm(compilation, this);
   127   NOT_PRODUCT(if (PrintValueNumbering && Verbose) gm.print_stats());
   128   if (compilation->bailed_out()) return NULL;
   129   return gm.start();
   130 }
   133 IRScope::IRScope(Compilation* compilation, IRScope* caller, int caller_bci, ciMethod* method, int osr_bci, bool create_graph)
   134 : _callees(2)
   135 , _compilation(compilation)
   136 , _requires_phi_function(method->max_locals())
   137 {
   138   _caller             = caller;
   139   _level              = caller == NULL ?  0 : caller->level() + 1;
   140   _method             = method;
   141   _xhandlers          = new XHandlers(method);
   142   _number_of_locks    = 0;
   143   _monitor_pairing_ok = method->has_balanced_monitors();
   144   _start              = NULL;
   146   if (osr_bci == -1) {
   147     _requires_phi_function.clear();
   148   } else {
   149         // selective creation of phi functions is not possibel in osr-methods
   150     _requires_phi_function.set_range(0, method->max_locals());
   151   }
   153   assert(method->holder()->is_loaded() , "method holder must be loaded");
   155   // build graph if monitor pairing is ok
   156   if (create_graph && monitor_pairing_ok()) _start = build_graph(compilation, osr_bci);
   157 }
   160 int IRScope::max_stack() const {
   161   int my_max = method()->max_stack();
   162   int callee_max = 0;
   163   for (int i = 0; i < number_of_callees(); i++) {
   164     callee_max = MAX2(callee_max, callee_no(i)->max_stack());
   165   }
   166   return my_max + callee_max;
   167 }
   170 bool IRScopeDebugInfo::should_reexecute() {
   171   ciMethod* cur_method = scope()->method();
   172   int       cur_bci    = bci();
   173   if (cur_method != NULL && cur_bci != SynchronizationEntryBCI) {
   174     Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
   175     return Interpreter::bytecode_should_reexecute(code);
   176   } else
   177     return false;
   178 }
   181 // Implementation of CodeEmitInfo
   183 // Stack must be NON-null
   184 CodeEmitInfo::CodeEmitInfo(ValueStack* stack, XHandlers* exception_handlers)
   185   : _scope(stack->scope())
   186   , _scope_debug_info(NULL)
   187   , _oop_map(NULL)
   188   , _stack(stack)
   189   , _exception_handlers(exception_handlers)
   190   , _is_method_handle_invoke(false) {
   191   assert(_stack != NULL, "must be non null");
   192 }
   195 CodeEmitInfo::CodeEmitInfo(CodeEmitInfo* info, ValueStack* stack)
   196   : _scope(info->_scope)
   197   , _exception_handlers(NULL)
   198   , _scope_debug_info(NULL)
   199   , _oop_map(NULL)
   200   , _stack(stack == NULL ? info->_stack : stack)
   201   , _is_method_handle_invoke(info->_is_method_handle_invoke) {
   203   // deep copy of exception handlers
   204   if (info->_exception_handlers != NULL) {
   205     _exception_handlers = new XHandlers(info->_exception_handlers);
   206   }
   207 }
   210 void CodeEmitInfo::record_debug_info(DebugInformationRecorder* recorder, int pc_offset) {
   211   // record the safepoint before recording the debug info for enclosing scopes
   212   recorder->add_safepoint(pc_offset, _oop_map->deep_copy());
   213   _scope_debug_info->record_debug_info(recorder, pc_offset, true/*topmost*/, _is_method_handle_invoke);
   214   recorder->end_safepoint(pc_offset);
   215 }
   218 void CodeEmitInfo::add_register_oop(LIR_Opr opr) {
   219   assert(_oop_map != NULL, "oop map must already exist");
   220   assert(opr->is_single_cpu(), "should not call otherwise");
   222   VMReg name = frame_map()->regname(opr);
   223   _oop_map->set_oop(name);
   224 }
   229 // Implementation of IR
   231 IR::IR(Compilation* compilation, ciMethod* method, int osr_bci) :
   232     _locals_size(in_WordSize(-1))
   233   , _num_loops(0) {
   234   // setup IR fields
   235   _compilation = compilation;
   236   _top_scope   = new IRScope(compilation, NULL, -1, method, osr_bci, true);
   237   _code        = NULL;
   238 }
   241 void IR::optimize() {
   242   Optimizer opt(this);
   243   if (!compilation()->profile_branches()) {
   244     if (DoCEE) {
   245       opt.eliminate_conditional_expressions();
   246 #ifndef PRODUCT
   247       if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); }
   248       if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); }
   249 #endif
   250     }
   251     if (EliminateBlocks) {
   252       opt.eliminate_blocks();
   253 #ifndef PRODUCT
   254       if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); }
   255       if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); }
   256 #endif
   257     }
   258   }
   259   if (EliminateNullChecks) {
   260     opt.eliminate_null_checks();
   261 #ifndef PRODUCT
   262     if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after null check elimination"); print(true); }
   263     if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after null check elimination"); print(false); }
   264 #endif
   265   }
   266 }
   269 static int sort_pairs(BlockPair** a, BlockPair** b) {
   270   if ((*a)->from() == (*b)->from()) {
   271     return (*a)->to()->block_id() - (*b)->to()->block_id();
   272   } else {
   273     return (*a)->from()->block_id() - (*b)->from()->block_id();
   274   }
   275 }
   278 class CriticalEdgeFinder: public BlockClosure {
   279   BlockPairList blocks;
   280   IR*       _ir;
   282  public:
   283   CriticalEdgeFinder(IR* ir): _ir(ir) {}
   284   void block_do(BlockBegin* bb) {
   285     BlockEnd* be = bb->end();
   286     int nos = be->number_of_sux();
   287     if (nos >= 2) {
   288       for (int i = 0; i < nos; i++) {
   289         BlockBegin* sux = be->sux_at(i);
   290         if (sux->number_of_preds() >= 2) {
   291           blocks.append(new BlockPair(bb, sux));
   292         }
   293       }
   294     }
   295   }
   297   void split_edges() {
   298     BlockPair* last_pair = NULL;
   299     blocks.sort(sort_pairs);
   300     for (int i = 0; i < blocks.length(); i++) {
   301       BlockPair* pair = blocks.at(i);
   302       if (last_pair != NULL && pair->is_same(last_pair)) continue;
   303       BlockBegin* from = pair->from();
   304       BlockBegin* to = pair->to();
   305       BlockBegin* split = from->insert_block_between(to);
   306 #ifndef PRODUCT
   307       if ((PrintIR || PrintIR1) && Verbose) {
   308         tty->print_cr("Split critical edge B%d -> B%d (new block B%d)",
   309                       from->block_id(), to->block_id(), split->block_id());
   310       }
   311 #endif
   312       last_pair = pair;
   313     }
   314   }
   315 };
   317 void IR::split_critical_edges() {
   318   CriticalEdgeFinder cef(this);
   320   iterate_preorder(&cef);
   321   cef.split_edges();
   322 }
   325 class UseCountComputer: public ValueVisitor, BlockClosure {
   326  private:
   327   void visit(Value* n) {
   328     // Local instructions and Phis for expression stack values at the
   329     // start of basic blocks are not added to the instruction list
   330     if (!(*n)->is_linked() && (*n)->can_be_linked()) {
   331       assert(false, "a node was not appended to the graph");
   332       Compilation::current()->bailout("a node was not appended to the graph");
   333     }
   334     // use n's input if not visited before
   335     if (!(*n)->is_pinned() && !(*n)->has_uses()) {
   336       // note: a) if the instruction is pinned, it will be handled by compute_use_count
   337       //       b) if the instruction has uses, it was touched before
   338       //       => in both cases we don't need to update n's values
   339       uses_do(n);
   340     }
   341     // use n
   342     (*n)->_use_count++;
   343   }
   345   Values* worklist;
   346   int depth;
   347   enum {
   348     max_recurse_depth = 20
   349   };
   351   void uses_do(Value* n) {
   352     depth++;
   353     if (depth > max_recurse_depth) {
   354       // don't allow the traversal to recurse too deeply
   355       worklist->push(*n);
   356     } else {
   357       (*n)->input_values_do(this);
   358       // special handling for some instructions
   359       if ((*n)->as_BlockEnd() != NULL) {
   360         // note on BlockEnd:
   361         //   must 'use' the stack only if the method doesn't
   362         //   terminate, however, in those cases stack is empty
   363         (*n)->state_values_do(this);
   364       }
   365     }
   366     depth--;
   367   }
   369   void block_do(BlockBegin* b) {
   370     depth = 0;
   371     // process all pinned nodes as the roots of expression trees
   372     for (Instruction* n = b; n != NULL; n = n->next()) {
   373       if (n->is_pinned()) uses_do(&n);
   374     }
   375     assert(depth == 0, "should have counted back down");
   377     // now process any unpinned nodes which recursed too deeply
   378     while (worklist->length() > 0) {
   379       Value t = worklist->pop();
   380       if (!t->is_pinned()) {
   381         // compute the use count
   382         uses_do(&t);
   384         // pin the instruction so that LIRGenerator doesn't recurse
   385         // too deeply during it's evaluation.
   386         t->pin();
   387       }
   388     }
   389     assert(depth == 0, "should have counted back down");
   390   }
   392   UseCountComputer() {
   393     worklist = new Values();
   394     depth = 0;
   395   }
   397  public:
   398   static void compute(BlockList* blocks) {
   399     UseCountComputer ucc;
   400     blocks->iterate_backward(&ucc);
   401   }
   402 };
   405 // helper macro for short definition of trace-output inside code
   406 #ifndef PRODUCT
   407   #define TRACE_LINEAR_SCAN(level, code)       \
   408     if (TraceLinearScanLevel >= level) {       \
   409       code;                                    \
   410     }
   411 #else
   412   #define TRACE_LINEAR_SCAN(level, code)
   413 #endif
   415 class ComputeLinearScanOrder : public StackObj {
   416  private:
   417   int        _max_block_id;        // the highest block_id of a block
   418   int        _num_blocks;          // total number of blocks (smaller than _max_block_id)
   419   int        _num_loops;           // total number of loops
   420   bool       _iterative_dominators;// method requires iterative computation of dominatiors
   422   BlockList* _linear_scan_order;   // the resulting list of blocks in correct order
   424   BitMap     _visited_blocks;      // used for recursive processing of blocks
   425   BitMap     _active_blocks;       // used for recursive processing of blocks
   426   BitMap     _dominator_blocks;    // temproary BitMap used for computation of dominator
   427   intArray   _forward_branches;    // number of incoming forward branches for each block
   428   BlockList  _loop_end_blocks;     // list of all loop end blocks collected during count_edges
   429   BitMap2D   _loop_map;            // two-dimensional bit set: a bit is set if a block is contained in a loop
   430   BlockList  _work_list;           // temporary list (used in mark_loops and compute_order)
   432   Compilation* _compilation;
   434   // accessors for _visited_blocks and _active_blocks
   435   void init_visited()                     { _active_blocks.clear(); _visited_blocks.clear(); }
   436   bool is_visited(BlockBegin* b) const    { return _visited_blocks.at(b->block_id()); }
   437   bool is_active(BlockBegin* b) const     { return _active_blocks.at(b->block_id()); }
   438   void set_visited(BlockBegin* b)         { assert(!is_visited(b), "already set"); _visited_blocks.set_bit(b->block_id()); }
   439   void set_active(BlockBegin* b)          { assert(!is_active(b), "already set");  _active_blocks.set_bit(b->block_id()); }
   440   void clear_active(BlockBegin* b)        { assert(is_active(b), "not already");   _active_blocks.clear_bit(b->block_id()); }
   442   // accessors for _forward_branches
   443   void inc_forward_branches(BlockBegin* b) { _forward_branches.at_put(b->block_id(), _forward_branches.at(b->block_id()) + 1); }
   444   int  dec_forward_branches(BlockBegin* b) { _forward_branches.at_put(b->block_id(), _forward_branches.at(b->block_id()) - 1); return _forward_branches.at(b->block_id()); }
   446   // accessors for _loop_map
   447   bool is_block_in_loop   (int loop_idx, BlockBegin* b) const { return _loop_map.at(loop_idx, b->block_id()); }
   448   void set_block_in_loop  (int loop_idx, BlockBegin* b)       { _loop_map.set_bit(loop_idx, b->block_id()); }
   449   void clear_block_in_loop(int loop_idx, int block_id)        { _loop_map.clear_bit(loop_idx, block_id); }
   451   // count edges between blocks
   452   void count_edges(BlockBegin* cur, BlockBegin* parent);
   454   // loop detection
   455   void mark_loops();
   456   void clear_non_natural_loops(BlockBegin* start_block);
   457   void assign_loop_depth(BlockBegin* start_block);
   459   // computation of final block order
   460   BlockBegin* common_dominator(BlockBegin* a, BlockBegin* b);
   461   void compute_dominator(BlockBegin* cur, BlockBegin* parent);
   462   int  compute_weight(BlockBegin* cur);
   463   bool ready_for_processing(BlockBegin* cur);
   464   void sort_into_work_list(BlockBegin* b);
   465   void append_block(BlockBegin* cur);
   466   void compute_order(BlockBegin* start_block);
   468   // fixup of dominators for non-natural loops
   469   bool compute_dominators_iter();
   470   void compute_dominators();
   472   // debug functions
   473   NOT_PRODUCT(void print_blocks();)
   474   DEBUG_ONLY(void verify();)
   476   Compilation* compilation() const { return _compilation; }
   477  public:
   478   ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block);
   480   // accessors for final result
   481   BlockList* linear_scan_order() const    { return _linear_scan_order; }
   482   int        num_loops() const            { return _num_loops; }
   483 };
   486 ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block) :
   487   _max_block_id(BlockBegin::number_of_blocks()),
   488   _num_blocks(0),
   489   _num_loops(0),
   490   _iterative_dominators(false),
   491   _visited_blocks(_max_block_id),
   492   _active_blocks(_max_block_id),
   493   _dominator_blocks(_max_block_id),
   494   _forward_branches(_max_block_id, 0),
   495   _loop_end_blocks(8),
   496   _work_list(8),
   497   _linear_scan_order(NULL), // initialized later with correct size
   498   _loop_map(0, 0),          // initialized later with correct size
   499   _compilation(c)
   500 {
   501   TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order");
   503   init_visited();
   504   count_edges(start_block, NULL);
   506   if (compilation()->is_profiling()) {
   507     ciMethod *method = compilation()->method();
   508     if (!method->is_accessor()) {
   509       ciMethodData* md = method->method_data_or_null();
   510       assert(md != NULL, "Sanity");
   511       md->set_compilation_stats(_num_loops, _num_blocks);
   512     }
   513   }
   515   if (_num_loops > 0) {
   516     mark_loops();
   517     clear_non_natural_loops(start_block);
   518     assign_loop_depth(start_block);
   519   }
   521   compute_order(start_block);
   522   compute_dominators();
   524   NOT_PRODUCT(print_blocks());
   525   DEBUG_ONLY(verify());
   526 }
   529 // Traverse the CFG:
   530 // * count total number of blocks
   531 // * count all incoming edges and backward incoming edges
   532 // * number loop header blocks
   533 // * create a list with all loop end blocks
   534 void ComputeLinearScanOrder::count_edges(BlockBegin* cur, BlockBegin* parent) {
   535   TRACE_LINEAR_SCAN(3, tty->print_cr("Enter count_edges for block B%d coming from B%d", cur->block_id(), parent != NULL ? parent->block_id() : -1));
   536   assert(cur->dominator() == NULL, "dominator already initialized");
   538   if (is_active(cur)) {
   539     TRACE_LINEAR_SCAN(3, tty->print_cr("backward branch"));
   540     assert(is_visited(cur), "block must be visisted when block is active");
   541     assert(parent != NULL, "must have parent");
   543     cur->set(BlockBegin::linear_scan_loop_header_flag);
   544     cur->set(BlockBegin::backward_branch_target_flag);
   546     parent->set(BlockBegin::linear_scan_loop_end_flag);
   548     // When a loop header is also the start of an exception handler, then the backward branch is
   549     // an exception edge. Because such edges are usually critical edges which cannot be split, the
   550     // loop must be excluded here from processing.
   551     if (cur->is_set(BlockBegin::exception_entry_flag)) {
   552       // Make sure that dominators are correct in this weird situation
   553       _iterative_dominators = true;
   554       return;
   555     }
   556     assert(parent->number_of_sux() == 1 && parent->sux_at(0) == cur,
   557            "loop end blocks must have one successor (critical edges are split)");
   559     _loop_end_blocks.append(parent);
   560     return;
   561   }
   563   // increment number of incoming forward branches
   564   inc_forward_branches(cur);
   566   if (is_visited(cur)) {
   567     TRACE_LINEAR_SCAN(3, tty->print_cr("block already visited"));
   568     return;
   569   }
   571   _num_blocks++;
   572   set_visited(cur);
   573   set_active(cur);
   575   // recursive call for all successors
   576   int i;
   577   for (i = cur->number_of_sux() - 1; i >= 0; i--) {
   578     count_edges(cur->sux_at(i), cur);
   579   }
   580   for (i = cur->number_of_exception_handlers() - 1; i >= 0; i--) {
   581     count_edges(cur->exception_handler_at(i), cur);
   582   }
   584   clear_active(cur);
   586   // Each loop has a unique number.
   587   // When multiple loops are nested, assign_loop_depth assumes that the
   588   // innermost loop has the lowest number. This is guaranteed by setting
   589   // the loop number after the recursive calls for the successors above
   590   // have returned.
   591   if (cur->is_set(BlockBegin::linear_scan_loop_header_flag)) {
   592     assert(cur->loop_index() == -1, "cannot set loop-index twice");
   593     TRACE_LINEAR_SCAN(3, tty->print_cr("Block B%d is loop header of loop %d", cur->block_id(), _num_loops));
   595     cur->set_loop_index(_num_loops);
   596     _num_loops++;
   597   }
   599   TRACE_LINEAR_SCAN(3, tty->print_cr("Finished count_edges for block B%d", cur->block_id()));
   600 }
   603 void ComputeLinearScanOrder::mark_loops() {
   604   TRACE_LINEAR_SCAN(3, tty->print_cr("----- marking loops"));
   606   _loop_map = BitMap2D(_num_loops, _max_block_id);
   607   _loop_map.clear();
   609   for (int i = _loop_end_blocks.length() - 1; i >= 0; i--) {
   610     BlockBegin* loop_end   = _loop_end_blocks.at(i);
   611     BlockBegin* loop_start = loop_end->sux_at(0);
   612     int         loop_idx   = loop_start->loop_index();
   614     TRACE_LINEAR_SCAN(3, tty->print_cr("Processing loop from B%d to B%d (loop %d):", loop_start->block_id(), loop_end->block_id(), loop_idx));
   615     assert(loop_end->is_set(BlockBegin::linear_scan_loop_end_flag), "loop end flag must be set");
   616     assert(loop_end->number_of_sux() == 1, "incorrect number of successors");
   617     assert(loop_start->is_set(BlockBegin::linear_scan_loop_header_flag), "loop header flag must be set");
   618     assert(loop_idx >= 0 && loop_idx < _num_loops, "loop index not set");
   619     assert(_work_list.is_empty(), "work list must be empty before processing");
   621     // add the end-block of the loop to the working list
   622     _work_list.push(loop_end);
   623     set_block_in_loop(loop_idx, loop_end);
   624     do {
   625       BlockBegin* cur = _work_list.pop();
   627       TRACE_LINEAR_SCAN(3, tty->print_cr("    processing B%d", cur->block_id()));
   628       assert(is_block_in_loop(loop_idx, cur), "bit in loop map must be set when block is in work list");
   630       // recursive processing of all predecessors ends when start block of loop is reached
   631       if (cur != loop_start && !cur->is_set(BlockBegin::osr_entry_flag)) {
   632         for (int j = cur->number_of_preds() - 1; j >= 0; j--) {
   633           BlockBegin* pred = cur->pred_at(j);
   635           if (!is_block_in_loop(loop_idx, pred) /*&& !pred->is_set(BlockBeginosr_entry_flag)*/) {
   636             // this predecessor has not been processed yet, so add it to work list
   637             TRACE_LINEAR_SCAN(3, tty->print_cr("    pushing B%d", pred->block_id()));
   638             _work_list.push(pred);
   639             set_block_in_loop(loop_idx, pred);
   640           }
   641         }
   642       }
   643     } while (!_work_list.is_empty());
   644   }
   645 }
   648 // check for non-natural loops (loops where the loop header does not dominate
   649 // all other loop blocks = loops with mulitple entries).
   650 // such loops are ignored
   651 void ComputeLinearScanOrder::clear_non_natural_loops(BlockBegin* start_block) {
   652   for (int i = _num_loops - 1; i >= 0; i--) {
   653     if (is_block_in_loop(i, start_block)) {
   654       // loop i contains the entry block of the method
   655       // -> this is not a natural loop, so ignore it
   656       TRACE_LINEAR_SCAN(2, tty->print_cr("Loop %d is non-natural, so it is ignored", i));
   658       for (int block_id = _max_block_id - 1; block_id >= 0; block_id--) {
   659         clear_block_in_loop(i, block_id);
   660       }
   661       _iterative_dominators = true;
   662     }
   663   }
   664 }
   666 void ComputeLinearScanOrder::assign_loop_depth(BlockBegin* start_block) {
   667   TRACE_LINEAR_SCAN(3, "----- computing loop-depth and weight");
   668   init_visited();
   670   assert(_work_list.is_empty(), "work list must be empty before processing");
   671   _work_list.append(start_block);
   673   do {
   674     BlockBegin* cur = _work_list.pop();
   676     if (!is_visited(cur)) {
   677       set_visited(cur);
   678       TRACE_LINEAR_SCAN(4, tty->print_cr("Computing loop depth for block B%d", cur->block_id()));
   680       // compute loop-depth and loop-index for the block
   681       assert(cur->loop_depth() == 0, "cannot set loop-depth twice");
   682       int i;
   683       int loop_depth = 0;
   684       int min_loop_idx = -1;
   685       for (i = _num_loops - 1; i >= 0; i--) {
   686         if (is_block_in_loop(i, cur)) {
   687           loop_depth++;
   688           min_loop_idx = i;
   689         }
   690       }
   691       cur->set_loop_depth(loop_depth);
   692       cur->set_loop_index(min_loop_idx);
   694       // append all unvisited successors to work list
   695       for (i = cur->number_of_sux() - 1; i >= 0; i--) {
   696         _work_list.append(cur->sux_at(i));
   697       }
   698       for (i = cur->number_of_exception_handlers() - 1; i >= 0; i--) {
   699         _work_list.append(cur->exception_handler_at(i));
   700       }
   701     }
   702   } while (!_work_list.is_empty());
   703 }
   706 BlockBegin* ComputeLinearScanOrder::common_dominator(BlockBegin* a, BlockBegin* b) {
   707   assert(a != NULL && b != NULL, "must have input blocks");
   709   _dominator_blocks.clear();
   710   while (a != NULL) {
   711     _dominator_blocks.set_bit(a->block_id());
   712     assert(a->dominator() != NULL || a == _linear_scan_order->at(0), "dominator must be initialized");
   713     a = a->dominator();
   714   }
   715   while (b != NULL && !_dominator_blocks.at(b->block_id())) {
   716     assert(b->dominator() != NULL || b == _linear_scan_order->at(0), "dominator must be initialized");
   717     b = b->dominator();
   718   }
   720   assert(b != NULL, "could not find dominator");
   721   return b;
   722 }
   724 void ComputeLinearScanOrder::compute_dominator(BlockBegin* cur, BlockBegin* parent) {
   725   if (cur->dominator() == NULL) {
   726     TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: initializing dominator of B%d to B%d", cur->block_id(), parent->block_id()));
   727     cur->set_dominator(parent);
   729   } else if (!(cur->is_set(BlockBegin::linear_scan_loop_header_flag) && parent->is_set(BlockBegin::linear_scan_loop_end_flag))) {
   730     TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: computing dominator of B%d: common dominator of B%d and B%d is B%d", cur->block_id(), parent->block_id(), cur->dominator()->block_id(), common_dominator(cur->dominator(), parent)->block_id()));
   731     assert(cur->number_of_preds() > 1, "");
   732     cur->set_dominator(common_dominator(cur->dominator(), parent));
   733   }
   734 }
   737 int ComputeLinearScanOrder::compute_weight(BlockBegin* cur) {
   738   BlockBegin* single_sux = NULL;
   739   if (cur->number_of_sux() == 1) {
   740     single_sux = cur->sux_at(0);
   741   }
   743   // limit loop-depth to 15 bit (only for security reason, it will never be so big)
   744   int weight = (cur->loop_depth() & 0x7FFF) << 16;
   746   // general macro for short definition of weight flags
   747   // the first instance of INC_WEIGHT_IF has the highest priority
   748   int cur_bit = 15;
   749   #define INC_WEIGHT_IF(condition) if ((condition)) { weight |= (1 << cur_bit); } cur_bit--;
   751   // this is necessery for the (very rare) case that two successing blocks have
   752   // the same loop depth, but a different loop index (can happen for endless loops
   753   // with exception handlers)
   754   INC_WEIGHT_IF(!cur->is_set(BlockBegin::linear_scan_loop_header_flag));
   756   // loop end blocks (blocks that end with a backward branch) are added
   757   // after all other blocks of the loop.
   758   INC_WEIGHT_IF(!cur->is_set(BlockBegin::linear_scan_loop_end_flag));
   760   // critical edge split blocks are prefered because than they have a bigger
   761   // proability to be completely empty
   762   INC_WEIGHT_IF(cur->is_set(BlockBegin::critical_edge_split_flag));
   764   // exceptions should not be thrown in normal control flow, so these blocks
   765   // are added as late as possible
   766   INC_WEIGHT_IF(cur->end()->as_Throw() == NULL  && (single_sux == NULL || single_sux->end()->as_Throw()  == NULL));
   767   INC_WEIGHT_IF(cur->end()->as_Return() == NULL && (single_sux == NULL || single_sux->end()->as_Return() == NULL));
   769   // exceptions handlers are added as late as possible
   770   INC_WEIGHT_IF(!cur->is_set(BlockBegin::exception_entry_flag));
   772   // guarantee that weight is > 0
   773   weight |= 1;
   775   #undef INC_WEIGHT_IF
   776   assert(cur_bit >= 0, "too many flags");
   777   assert(weight > 0, "weight cannot become negative");
   779   return weight;
   780 }
   782 bool ComputeLinearScanOrder::ready_for_processing(BlockBegin* cur) {
   783   // Discount the edge just traveled.
   784   // When the number drops to zero, all forward branches were processed
   785   if (dec_forward_branches(cur) != 0) {
   786     return false;
   787   }
   789   assert(_linear_scan_order->index_of(cur) == -1, "block already processed (block can be ready only once)");
   790   assert(_work_list.index_of(cur) == -1, "block already in work-list (block can be ready only once)");
   791   return true;
   792 }
   794 void ComputeLinearScanOrder::sort_into_work_list(BlockBegin* cur) {
   795   assert(_work_list.index_of(cur) == -1, "block already in work list");
   797   int cur_weight = compute_weight(cur);
   799   // the linear_scan_number is used to cache the weight of a block
   800   cur->set_linear_scan_number(cur_weight);
   802 #ifndef PRODUCT
   803   if (StressLinearScan) {
   804     _work_list.insert_before(0, cur);
   805     return;
   806   }
   807 #endif
   809   _work_list.append(NULL); // provide space for new element
   811   int insert_idx = _work_list.length() - 1;
   812   while (insert_idx > 0 && _work_list.at(insert_idx - 1)->linear_scan_number() > cur_weight) {
   813     _work_list.at_put(insert_idx, _work_list.at(insert_idx - 1));
   814     insert_idx--;
   815   }
   816   _work_list.at_put(insert_idx, cur);
   818   TRACE_LINEAR_SCAN(3, tty->print_cr("Sorted B%d into worklist. new worklist:", cur->block_id()));
   819   TRACE_LINEAR_SCAN(3, for (int i = 0; i < _work_list.length(); i++) tty->print_cr("%8d B%2d  weight:%6x", i, _work_list.at(i)->block_id(), _work_list.at(i)->linear_scan_number()));
   821 #ifdef ASSERT
   822   for (int i = 0; i < _work_list.length(); i++) {
   823     assert(_work_list.at(i)->linear_scan_number() > 0, "weight not set");
   824     assert(i == 0 || _work_list.at(i - 1)->linear_scan_number() <= _work_list.at(i)->linear_scan_number(), "incorrect order in worklist");
   825   }
   826 #endif
   827 }
   829 void ComputeLinearScanOrder::append_block(BlockBegin* cur) {
   830   TRACE_LINEAR_SCAN(3, tty->print_cr("appending block B%d (weight 0x%6x) to linear-scan order", cur->block_id(), cur->linear_scan_number()));
   831   assert(_linear_scan_order->index_of(cur) == -1, "cannot add the same block twice");
   833   // currently, the linear scan order and code emit order are equal.
   834   // therefore the linear_scan_number and the weight of a block must also
   835   // be equal.
   836   cur->set_linear_scan_number(_linear_scan_order->length());
   837   _linear_scan_order->append(cur);
   838 }
   840 void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) {
   841   TRACE_LINEAR_SCAN(3, "----- computing final block order");
   843   // the start block is always the first block in the linear scan order
   844   _linear_scan_order = new BlockList(_num_blocks);
   845   append_block(start_block);
   847   assert(start_block->end()->as_Base() != NULL, "start block must end with Base-instruction");
   848   BlockBegin* std_entry = ((Base*)start_block->end())->std_entry();
   849   BlockBegin* osr_entry = ((Base*)start_block->end())->osr_entry();
   851   BlockBegin* sux_of_osr_entry = NULL;
   852   if (osr_entry != NULL) {
   853     // special handling for osr entry:
   854     // ignore the edge between the osr entry and its successor for processing
   855     // the osr entry block is added manually below
   856     assert(osr_entry->number_of_sux() == 1, "osr entry must have exactly one successor");
   857     assert(osr_entry->sux_at(0)->number_of_preds() >= 2, "sucessor of osr entry must have two predecessors (otherwise it is not present in normal control flow");
   859     sux_of_osr_entry = osr_entry->sux_at(0);
   860     dec_forward_branches(sux_of_osr_entry);
   862     compute_dominator(osr_entry, start_block);
   863     _iterative_dominators = true;
   864   }
   865   compute_dominator(std_entry, start_block);
   867   // start processing with standard entry block
   868   assert(_work_list.is_empty(), "list must be empty before processing");
   870   if (ready_for_processing(std_entry)) {
   871     sort_into_work_list(std_entry);
   872   } else {
   873     assert(false, "the std_entry must be ready for processing (otherwise, the method has no start block)");
   874   }
   876   do {
   877     BlockBegin* cur = _work_list.pop();
   879     if (cur == sux_of_osr_entry) {
   880       // the osr entry block is ignored in normal processing, it is never added to the
   881       // work list. Instead, it is added as late as possible manually here.
   882       append_block(osr_entry);
   883       compute_dominator(cur, osr_entry);
   884     }
   885     append_block(cur);
   887     int i;
   888     int num_sux = cur->number_of_sux();
   889     // changed loop order to get "intuitive" order of if- and else-blocks
   890     for (i = 0; i < num_sux; i++) {
   891       BlockBegin* sux = cur->sux_at(i);
   892       compute_dominator(sux, cur);
   893       if (ready_for_processing(sux)) {
   894         sort_into_work_list(sux);
   895       }
   896     }
   897     num_sux = cur->number_of_exception_handlers();
   898     for (i = 0; i < num_sux; i++) {
   899       BlockBegin* sux = cur->exception_handler_at(i);
   900       compute_dominator(sux, cur);
   901       if (ready_for_processing(sux)) {
   902         sort_into_work_list(sux);
   903       }
   904     }
   905   } while (_work_list.length() > 0);
   906 }
   909 bool ComputeLinearScanOrder::compute_dominators_iter() {
   910   bool changed = false;
   911   int num_blocks = _linear_scan_order->length();
   913   assert(_linear_scan_order->at(0)->dominator() == NULL, "must not have dominator");
   914   assert(_linear_scan_order->at(0)->number_of_preds() == 0, "must not have predecessors");
   915   for (int i = 1; i < num_blocks; i++) {
   916     BlockBegin* block = _linear_scan_order->at(i);
   918     BlockBegin* dominator = block->pred_at(0);
   919     int num_preds = block->number_of_preds();
   920     for (int i = 1; i < num_preds; i++) {
   921       dominator = common_dominator(dominator, block->pred_at(i));
   922     }
   924     if (dominator != block->dominator()) {
   925       TRACE_LINEAR_SCAN(4, tty->print_cr("DOM: updating dominator of B%d from B%d to B%d", block->block_id(), block->dominator()->block_id(), dominator->block_id()));
   927       block->set_dominator(dominator);
   928       changed = true;
   929     }
   930   }
   931   return changed;
   932 }
   934 void ComputeLinearScanOrder::compute_dominators() {
   935   TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing dominators (iterative computation reqired: %d)", _iterative_dominators));
   937   // iterative computation of dominators is only required for methods with non-natural loops
   938   // and OSR-methods. For all other methods, the dominators computed when generating the
   939   // linear scan block order are correct.
   940   if (_iterative_dominators) {
   941     do {
   942       TRACE_LINEAR_SCAN(1, tty->print_cr("DOM: next iteration of fix-point calculation"));
   943     } while (compute_dominators_iter());
   944   }
   946   // check that dominators are correct
   947   assert(!compute_dominators_iter(), "fix point not reached");
   948 }
   951 #ifndef PRODUCT
   952 void ComputeLinearScanOrder::print_blocks() {
   953   if (TraceLinearScanLevel >= 2) {
   954     tty->print_cr("----- loop information:");
   955     for (int block_idx = 0; block_idx < _linear_scan_order->length(); block_idx++) {
   956       BlockBegin* cur = _linear_scan_order->at(block_idx);
   958       tty->print("%4d: B%2d: ", cur->linear_scan_number(), cur->block_id());
   959       for (int loop_idx = 0; loop_idx < _num_loops; loop_idx++) {
   960         tty->print ("%d ", is_block_in_loop(loop_idx, cur));
   961       }
   962       tty->print_cr(" -> loop_index: %2d, loop_depth: %2d", cur->loop_index(), cur->loop_depth());
   963     }
   964   }
   966   if (TraceLinearScanLevel >= 1) {
   967     tty->print_cr("----- linear-scan block order:");
   968     for (int block_idx = 0; block_idx < _linear_scan_order->length(); block_idx++) {
   969       BlockBegin* cur = _linear_scan_order->at(block_idx);
   970       tty->print("%4d: B%2d    loop: %2d  depth: %2d", cur->linear_scan_number(), cur->block_id(), cur->loop_index(), cur->loop_depth());
   972       tty->print(cur->is_set(BlockBegin::exception_entry_flag)         ? " ex" : "   ");
   973       tty->print(cur->is_set(BlockBegin::critical_edge_split_flag)     ? " ce" : "   ");
   974       tty->print(cur->is_set(BlockBegin::linear_scan_loop_header_flag) ? " lh" : "   ");
   975       tty->print(cur->is_set(BlockBegin::linear_scan_loop_end_flag)    ? " le" : "   ");
   977       if (cur->dominator() != NULL) {
   978         tty->print("    dom: B%d ", cur->dominator()->block_id());
   979       } else {
   980         tty->print("    dom: NULL ");
   981       }
   983       if (cur->number_of_preds() > 0) {
   984         tty->print("    preds: ");
   985         for (int j = 0; j < cur->number_of_preds(); j++) {
   986           BlockBegin* pred = cur->pred_at(j);
   987           tty->print("B%d ", pred->block_id());
   988         }
   989       }
   990       if (cur->number_of_sux() > 0) {
   991         tty->print("    sux: ");
   992         for (int j = 0; j < cur->number_of_sux(); j++) {
   993           BlockBegin* sux = cur->sux_at(j);
   994           tty->print("B%d ", sux->block_id());
   995         }
   996       }
   997       if (cur->number_of_exception_handlers() > 0) {
   998         tty->print("    ex: ");
   999         for (int j = 0; j < cur->number_of_exception_handlers(); j++) {
  1000           BlockBegin* ex = cur->exception_handler_at(j);
  1001           tty->print("B%d ", ex->block_id());
  1004       tty->cr();
  1008 #endif
  1010 #ifdef ASSERT
  1011 void ComputeLinearScanOrder::verify() {
  1012   assert(_linear_scan_order->length() == _num_blocks, "wrong number of blocks in list");
  1014   if (StressLinearScan) {
  1015     // blocks are scrambled when StressLinearScan is used
  1016     return;
  1019   // check that all successors of a block have a higher linear-scan-number
  1020   // and that all predecessors of a block have a lower linear-scan-number
  1021   // (only backward branches of loops are ignored)
  1022   int i;
  1023   for (i = 0; i < _linear_scan_order->length(); i++) {
  1024     BlockBegin* cur = _linear_scan_order->at(i);
  1026     assert(cur->linear_scan_number() == i, "incorrect linear_scan_number");
  1027     assert(cur->linear_scan_number() >= 0 && cur->linear_scan_number() == _linear_scan_order->index_of(cur), "incorrect linear_scan_number");
  1029     int j;
  1030     for (j = cur->number_of_sux() - 1; j >= 0; j--) {
  1031       BlockBegin* sux = cur->sux_at(j);
  1033       assert(sux->linear_scan_number() >= 0 && sux->linear_scan_number() == _linear_scan_order->index_of(sux), "incorrect linear_scan_number");
  1034       if (!cur->is_set(BlockBegin::linear_scan_loop_end_flag)) {
  1035         assert(cur->linear_scan_number() < sux->linear_scan_number(), "invalid order");
  1037       if (cur->loop_depth() == sux->loop_depth()) {
  1038         assert(cur->loop_index() == sux->loop_index() || sux->is_set(BlockBegin::linear_scan_loop_header_flag), "successing blocks with same loop depth must have same loop index");
  1042     for (j = cur->number_of_preds() - 1; j >= 0; j--) {
  1043       BlockBegin* pred = cur->pred_at(j);
  1045       assert(pred->linear_scan_number() >= 0 && pred->linear_scan_number() == _linear_scan_order->index_of(pred), "incorrect linear_scan_number");
  1046       if (!cur->is_set(BlockBegin::linear_scan_loop_header_flag)) {
  1047         assert(cur->linear_scan_number() > pred->linear_scan_number(), "invalid order");
  1049       if (cur->loop_depth() == pred->loop_depth()) {
  1050         assert(cur->loop_index() == pred->loop_index() || cur->is_set(BlockBegin::linear_scan_loop_header_flag), "successing blocks with same loop depth must have same loop index");
  1053       assert(cur->dominator()->linear_scan_number() <= cur->pred_at(j)->linear_scan_number(), "dominator must be before predecessors");
  1056     // check dominator
  1057     if (i == 0) {
  1058       assert(cur->dominator() == NULL, "first block has no dominator");
  1059     } else {
  1060       assert(cur->dominator() != NULL, "all but first block must have dominator");
  1062     assert(cur->number_of_preds() != 1 || cur->dominator() == cur->pred_at(0), "Single predecessor must also be dominator");
  1065   // check that all loops are continuous
  1066   for (int loop_idx = 0; loop_idx < _num_loops; loop_idx++) {
  1067     int block_idx = 0;
  1068     assert(!is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx)), "the first block must not be present in any loop");
  1070     // skip blocks before the loop
  1071     while (block_idx < _num_blocks && !is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx))) {
  1072       block_idx++;
  1074     // skip blocks of loop
  1075     while (block_idx < _num_blocks && is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx))) {
  1076       block_idx++;
  1078     // after the first non-loop block, there must not be another loop-block
  1079     while (block_idx < _num_blocks) {
  1080       assert(!is_block_in_loop(loop_idx, _linear_scan_order->at(block_idx)), "loop not continuous in linear-scan order");
  1081       block_idx++;
  1085 #endif
  1088 void IR::compute_code() {
  1089   assert(is_valid(), "IR must be valid");
  1091   ComputeLinearScanOrder compute_order(compilation(), start());
  1092   _num_loops = compute_order.num_loops();
  1093   _code = compute_order.linear_scan_order();
  1097 void IR::compute_use_counts() {
  1098   // make sure all values coming out of this block get evaluated.
  1099   int num_blocks = _code->length();
  1100   for (int i = 0; i < num_blocks; i++) {
  1101     _code->at(i)->end()->state()->pin_stack_for_linear_scan();
  1104   // compute use counts
  1105   UseCountComputer::compute(_code);
  1109 void IR::iterate_preorder(BlockClosure* closure) {
  1110   assert(is_valid(), "IR must be valid");
  1111   start()->iterate_preorder(closure);
  1115 void IR::iterate_postorder(BlockClosure* closure) {
  1116   assert(is_valid(), "IR must be valid");
  1117   start()->iterate_postorder(closure);
  1120 void IR::iterate_linear_scan_order(BlockClosure* closure) {
  1121   linear_scan_order()->iterate_forward(closure);
  1125 #ifndef PRODUCT
  1126 class BlockPrinter: public BlockClosure {
  1127  private:
  1128   InstructionPrinter* _ip;
  1129   bool                _cfg_only;
  1130   bool                _live_only;
  1132  public:
  1133   BlockPrinter(InstructionPrinter* ip, bool cfg_only, bool live_only = false) {
  1134     _ip       = ip;
  1135     _cfg_only = cfg_only;
  1136     _live_only = live_only;
  1139   virtual void block_do(BlockBegin* block) {
  1140     if (_cfg_only) {
  1141       _ip->print_instr(block); tty->cr();
  1142     } else {
  1143       block->print_block(*_ip, _live_only);
  1146 };
  1149 void IR::print(BlockBegin* start, bool cfg_only, bool live_only) {
  1150   ttyLocker ttyl;
  1151   InstructionPrinter ip(!cfg_only);
  1152   BlockPrinter bp(&ip, cfg_only, live_only);
  1153   start->iterate_preorder(&bp);
  1154   tty->cr();
  1157 void IR::print(bool cfg_only, bool live_only) {
  1158   if (is_valid()) {
  1159     print(start(), cfg_only, live_only);
  1160   } else {
  1161     tty->print_cr("invalid IR");
  1166 define_array(BlockListArray, BlockList*)
  1167 define_stack(BlockListList, BlockListArray)
  1169 class PredecessorValidator : public BlockClosure {
  1170  private:
  1171   BlockListList* _predecessors;
  1172   BlockList*     _blocks;
  1174   static int cmp(BlockBegin** a, BlockBegin** b) {
  1175     return (*a)->block_id() - (*b)->block_id();
  1178  public:
  1179   PredecessorValidator(IR* hir) {
  1180     ResourceMark rm;
  1181     _predecessors = new BlockListList(BlockBegin::number_of_blocks(), NULL);
  1182     _blocks = new BlockList();
  1184     int i;
  1185     hir->start()->iterate_preorder(this);
  1186     if (hir->code() != NULL) {
  1187       assert(hir->code()->length() == _blocks->length(), "must match");
  1188       for (i = 0; i < _blocks->length(); i++) {
  1189         assert(hir->code()->contains(_blocks->at(i)), "should be in both lists");
  1193     for (i = 0; i < _blocks->length(); i++) {
  1194       BlockBegin* block = _blocks->at(i);
  1195       BlockList* preds = _predecessors->at(block->block_id());
  1196       if (preds == NULL) {
  1197         assert(block->number_of_preds() == 0, "should be the same");
  1198         continue;
  1201       // clone the pred list so we can mutate it
  1202       BlockList* pred_copy = new BlockList();
  1203       int j;
  1204       for (j = 0; j < block->number_of_preds(); j++) {
  1205         pred_copy->append(block->pred_at(j));
  1207       // sort them in the same order
  1208       preds->sort(cmp);
  1209       pred_copy->sort(cmp);
  1210       int length = MIN2(preds->length(), block->number_of_preds());
  1211       for (j = 0; j < block->number_of_preds(); j++) {
  1212         assert(preds->at(j) == pred_copy->at(j), "must match");
  1215       assert(preds->length() == block->number_of_preds(), "should be the same");
  1219   virtual void block_do(BlockBegin* block) {
  1220     _blocks->append(block);
  1221     BlockEnd* be = block->end();
  1222     int n = be->number_of_sux();
  1223     int i;
  1224     for (i = 0; i < n; i++) {
  1225       BlockBegin* sux = be->sux_at(i);
  1226       assert(!sux->is_set(BlockBegin::exception_entry_flag), "must not be xhandler");
  1228       BlockList* preds = _predecessors->at_grow(sux->block_id(), NULL);
  1229       if (preds == NULL) {
  1230         preds = new BlockList();
  1231         _predecessors->at_put(sux->block_id(), preds);
  1233       preds->append(block);
  1236     n = block->number_of_exception_handlers();
  1237     for (i = 0; i < n; i++) {
  1238       BlockBegin* sux = block->exception_handler_at(i);
  1239       assert(sux->is_set(BlockBegin::exception_entry_flag), "must be xhandler");
  1241       BlockList* preds = _predecessors->at_grow(sux->block_id(), NULL);
  1242       if (preds == NULL) {
  1243         preds = new BlockList();
  1244         _predecessors->at_put(sux->block_id(), preds);
  1246       preds->append(block);
  1249 };
  1251 void IR::verify() {
  1252 #ifdef ASSERT
  1253   PredecessorValidator pv(this);
  1254 #endif
  1257 #endif // PRODUCT
  1259 void SubstitutionResolver::visit(Value* v) {
  1260   Value v0 = *v;
  1261   if (v0) {
  1262     Value vs = v0->subst();
  1263     if (vs != v0) {
  1264       *v = v0->subst();
  1269 #ifdef ASSERT
  1270 class SubstitutionChecker: public ValueVisitor {
  1271   void visit(Value* v) {
  1272     Value v0 = *v;
  1273     if (v0) {
  1274       Value vs = v0->subst();
  1275       assert(vs == v0, "missed substitution");
  1278 };
  1279 #endif
  1282 void SubstitutionResolver::block_do(BlockBegin* block) {
  1283   Instruction* last = NULL;
  1284   for (Instruction* n = block; n != NULL;) {
  1285     n->values_do(this);
  1286     // need to remove this instruction from the instruction stream
  1287     if (n->subst() != n) {
  1288       assert(last != NULL, "must have last");
  1289       last->set_next(n->next());
  1290     } else {
  1291       last = n;
  1293     n = last->next();
  1296 #ifdef ASSERT
  1297   SubstitutionChecker check_substitute;
  1298   if (block->state()) block->state()->values_do(&check_substitute);
  1299   block->block_values_do(&check_substitute);
  1300   if (block->end() && block->end()->state()) block->end()->state()->values_do(&check_substitute);
  1301 #endif

mercurial