src/share/vm/opto/parse3.cpp

Wed, 20 Apr 2011 18:29:35 -0700

author
kvn
date
Wed, 20 Apr 2011 18:29:35 -0700
changeset 2810
66b0e2371912
parent 2658
c7f3d0b4570f
child 3002
263247c478c5
permissions
-rw-r--r--

7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post
Summary: memory slices should be always created for non-static fields after allocation
Reviewed-by: never

     1 /*
     2  * Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "compiler/compileLog.hpp"
    27 #include "interpreter/linkResolver.hpp"
    28 #include "memory/universe.inline.hpp"
    29 #include "oops/objArrayKlass.hpp"
    30 #include "opto/addnode.hpp"
    31 #include "opto/memnode.hpp"
    32 #include "opto/parse.hpp"
    33 #include "opto/rootnode.hpp"
    34 #include "opto/runtime.hpp"
    35 #include "opto/subnode.hpp"
    36 #include "runtime/deoptimization.hpp"
    37 #include "runtime/handles.inline.hpp"
    39 //=============================================================================
    40 // Helper methods for _get* and _put* bytecodes
    41 //=============================================================================
    42 bool Parse::static_field_ok_in_clinit(ciField *field, ciMethod *method) {
    43   // Could be the field_holder's <clinit> method, or <clinit> for a subklass.
    44   // Better to check now than to Deoptimize as soon as we execute
    45   assert( field->is_static(), "Only check if field is static");
    46   // is_being_initialized() is too generous.  It allows access to statics
    47   // by threads that are not running the <clinit> before the <clinit> finishes.
    48   // return field->holder()->is_being_initialized();
    50   // The following restriction is correct but conservative.
    51   // It is also desirable to allow compilation of methods called from <clinit>
    52   // but this generated code will need to be made safe for execution by
    53   // other threads, or the transition from interpreted to compiled code would
    54   // need to be guarded.
    55   ciInstanceKlass *field_holder = field->holder();
    57   bool access_OK = false;
    58   if (method->holder()->is_subclass_of(field_holder)) {
    59     if (method->is_static()) {
    60       if (method->name() == ciSymbol::class_initializer_name()) {
    61         // OK to access static fields inside initializer
    62         access_OK = true;
    63       }
    64     } else {
    65       if (method->name() == ciSymbol::object_initializer_name()) {
    66         // It's also OK to access static fields inside a constructor,
    67         // because any thread calling the constructor must first have
    68         // synchronized on the class by executing a '_new' bytecode.
    69         access_OK = true;
    70       }
    71     }
    72   }
    74   return access_OK;
    76 }
    79 void Parse::do_field_access(bool is_get, bool is_field) {
    80   bool will_link;
    81   ciField* field = iter().get_field(will_link);
    82   assert(will_link, "getfield: typeflow responsibility");
    84   ciInstanceKlass* field_holder = field->holder();
    86   if (is_field == field->is_static()) {
    87     // Interpreter will throw java_lang_IncompatibleClassChangeError
    88     // Check this before allowing <clinit> methods to access static fields
    89     uncommon_trap(Deoptimization::Reason_unhandled,
    90                   Deoptimization::Action_none);
    91     return;
    92   }
    94   if (!is_field && !field_holder->is_initialized()) {
    95     if (!static_field_ok_in_clinit(field, method())) {
    96       uncommon_trap(Deoptimization::Reason_uninitialized,
    97                     Deoptimization::Action_reinterpret,
    98                     NULL, "!static_field_ok_in_clinit");
    99       return;
   100     }
   101   }
   103   assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility");
   105   // Note:  We do not check for an unloaded field type here any more.
   107   // Generate code for the object pointer.
   108   Node* obj;
   109   if (is_field) {
   110     int obj_depth = is_get ? 0 : field->type()->size();
   111     obj = do_null_check(peek(obj_depth), T_OBJECT);
   112     // Compile-time detect of null-exception?
   113     if (stopped())  return;
   115 #ifdef ASSERT
   116     const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
   117     assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
   118 #endif
   120     if (is_get) {
   121       --_sp;  // pop receiver before getting
   122       do_get_xxx(obj, field, is_field);
   123     } else {
   124       do_put_xxx(obj, field, is_field);
   125       --_sp;  // pop receiver after putting
   126     }
   127   } else {
   128     const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
   129     obj = _gvn.makecon(tip);
   130     if (is_get) {
   131       do_get_xxx(obj, field, is_field);
   132     } else {
   133       do_put_xxx(obj, field, is_field);
   134     }
   135   }
   136 }
   139 void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
   140   // Does this field have a constant value?  If so, just push the value.
   141   if (field->is_constant()) {
   142     if (field->is_static()) {
   143       // final static field
   144       if (push_constant(field->constant_value()))
   145         return;
   146     }
   147     else {
   148       // final non-static field of a trusted class (classes in
   149       // java.lang.invoke and sun.invoke packages and subpackages).
   150       if (obj->is_Con()) {
   151         const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
   152         ciObject* constant_oop = oop_ptr->const_oop();
   153         ciConstant constant = field->constant_value_of(constant_oop);
   155         if (push_constant(constant, true))
   156           return;
   157       }
   158     }
   159   }
   161   ciType* field_klass = field->type();
   162   bool is_vol = field->is_volatile();
   164   // Compute address and memory type.
   165   int offset = field->offset_in_bytes();
   166   const TypePtr* adr_type = C->alias_type(field)->adr_type();
   167   Node *adr = basic_plus_adr(obj, obj, offset);
   168   BasicType bt = field->layout_type();
   170   // Build the resultant type of the load
   171   const Type *type;
   173   bool must_assert_null = false;
   175   if( bt == T_OBJECT ) {
   176     if (!field->type()->is_loaded()) {
   177       type = TypeInstPtr::BOTTOM;
   178       must_assert_null = true;
   179     } else if (field->is_constant() && field->is_static()) {
   180       // This can happen if the constant oop is non-perm.
   181       ciObject* con = field->constant_value().as_object();
   182       // Do not "join" in the previous type; it doesn't add value,
   183       // and may yield a vacuous result if the field is of interface type.
   184       type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
   185       assert(type != NULL, "field singleton type must be consistent");
   186     } else {
   187       type = TypeOopPtr::make_from_klass(field_klass->as_klass());
   188     }
   189   } else {
   190     type = Type::get_const_basic_type(bt);
   191   }
   192   // Build the load.
   193   Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);
   195   // Adjust Java stack
   196   if (type2size[bt] == 1)
   197     push(ld);
   198   else
   199     push_pair(ld);
   201   if (must_assert_null) {
   202     // Do not take a trap here.  It's possible that the program
   203     // will never load the field's class, and will happily see
   204     // null values in this field forever.  Don't stumble into a
   205     // trap for such a program, or we might get a long series
   206     // of useless recompilations.  (Or, we might load a class
   207     // which should not be loaded.)  If we ever see a non-null
   208     // value, we will then trap and recompile.  (The trap will
   209     // not need to mention the class index, since the class will
   210     // already have been loaded if we ever see a non-null value.)
   211     // uncommon_trap(iter().get_field_signature_index());
   212 #ifndef PRODUCT
   213     if (PrintOpto && (Verbose || WizardMode)) {
   214       method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
   215     }
   216 #endif
   217     if (C->log() != NULL) {
   218       C->log()->elem("assert_null reason='field' klass='%d'",
   219                      C->log()->identify(field->type()));
   220     }
   221     // If there is going to be a trap, put it at the next bytecode:
   222     set_bci(iter().next_bci());
   223     do_null_assert(peek(), T_OBJECT);
   224     set_bci(iter().cur_bci()); // put it back
   225   }
   227   // If reference is volatile, prevent following memory ops from
   228   // floating up past the volatile read.  Also prevents commoning
   229   // another volatile read.
   230   if (field->is_volatile()) {
   231     // Memory barrier includes bogus read of value to force load BEFORE membar
   232     insert_mem_bar(Op_MemBarAcquire, ld);
   233   }
   234 }
   236 void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) {
   237   bool is_vol = field->is_volatile();
   238   // If reference is volatile, prevent following memory ops from
   239   // floating down past the volatile write.  Also prevents commoning
   240   // another volatile read.
   241   if (is_vol)  insert_mem_bar(Op_MemBarRelease);
   243   // Compute address and memory type.
   244   int offset = field->offset_in_bytes();
   245   const TypePtr* adr_type = C->alias_type(field)->adr_type();
   246   Node* adr = basic_plus_adr(obj, obj, offset);
   247   BasicType bt = field->layout_type();
   248   // Value to be stored
   249   Node* val = type2size[bt] == 1 ? pop() : pop_pair();
   250   // Round doubles before storing
   251   if (bt == T_DOUBLE)  val = dstore_rounding(val);
   253   // Store the value.
   254   Node* store;
   255   if (bt == T_OBJECT) {
   256     const TypeOopPtr* field_type;
   257     if (!field->type()->is_loaded()) {
   258       field_type = TypeInstPtr::BOTTOM;
   259     } else {
   260       field_type = TypeOopPtr::make_from_klass(field->type()->as_klass());
   261     }
   262     store = store_oop_to_object( control(), obj, adr, adr_type, val, field_type, bt);
   263   } else {
   264     store = store_to_memory( control(), adr, val, bt, adr_type, is_vol );
   265   }
   267   // If reference is volatile, prevent following volatiles ops from
   268   // floating up before the volatile write.
   269   if (is_vol) {
   270     // First place the specific membar for THIS volatile index. This first
   271     // membar is dependent on the store, keeping any other membars generated
   272     // below from floating up past the store.
   273     int adr_idx = C->get_alias_index(adr_type);
   274     insert_mem_bar_volatile(Op_MemBarVolatile, adr_idx, store);
   276     // Now place a membar for AliasIdxBot for the unknown yet-to-be-parsed
   277     // volatile alias indices. Skip this if the membar is redundant.
   278     if (adr_idx != Compile::AliasIdxBot) {
   279       insert_mem_bar_volatile(Op_MemBarVolatile, Compile::AliasIdxBot, store);
   280     }
   282     // Finally, place alias-index-specific membars for each volatile index
   283     // that isn't the adr_idx membar. Typically there's only 1 or 2.
   284     for( int i = Compile::AliasIdxRaw; i < C->num_alias_types(); i++ ) {
   285       if (i != adr_idx && C->alias_type(i)->is_volatile()) {
   286         insert_mem_bar_volatile(Op_MemBarVolatile, i, store);
   287       }
   288     }
   289   }
   291   // If the field is final, the rules of Java say we are in <init> or <clinit>.
   292   // Note the presence of writes to final non-static fields, so that we
   293   // can insert a memory barrier later on to keep the writes from floating
   294   // out of the constructor.
   295   if (is_field && field->is_final()) {
   296     set_wrote_final(true);
   297   }
   298 }
   301 bool Parse::push_constant(ciConstant constant, bool require_constant) {
   302   switch (constant.basic_type()) {
   303   case T_BOOLEAN:  push( intcon(constant.as_boolean()) ); break;
   304   case T_INT:      push( intcon(constant.as_int())     ); break;
   305   case T_CHAR:     push( intcon(constant.as_char())    ); break;
   306   case T_BYTE:     push( intcon(constant.as_byte())    ); break;
   307   case T_SHORT:    push( intcon(constant.as_short())   ); break;
   308   case T_FLOAT:    push( makecon(TypeF::make(constant.as_float())) );  break;
   309   case T_DOUBLE:   push_pair( makecon(TypeD::make(constant.as_double())) );  break;
   310   case T_LONG:     push_pair( longcon(constant.as_long()) ); break;
   311   case T_ARRAY:
   312   case T_OBJECT: {
   313     // cases:
   314     //   can_be_constant    = (oop not scavengable || ScavengeRootsInCode != 0)
   315     //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
   316     // An oop is not scavengable if it is in the perm gen.
   317     ciObject* oop_constant = constant.as_object();
   318     if (oop_constant->is_null_object()) {
   319       push( zerocon(T_OBJECT) );
   320       break;
   321     } else if (require_constant || oop_constant->should_be_constant()) {
   322       push( makecon(TypeOopPtr::make_from_constant(oop_constant, require_constant)) );
   323       break;
   324     } else {
   325       // we cannot inline the oop, but we can use it later to narrow a type
   326       return false;
   327     }
   328   }
   329   case T_ILLEGAL: {
   330     // Invalid ciConstant returned due to OutOfMemoryError in the CI
   331     assert(C->env()->failing(), "otherwise should not see this");
   332     // These always occur because of object types; we are going to
   333     // bail out anyway, so make the stack depths match up
   334     push( zerocon(T_OBJECT) );
   335     return false;
   336   }
   337   default:
   338     ShouldNotReachHere();
   339     return false;
   340   }
   342   // success
   343   return true;
   344 }
   348 //=============================================================================
   349 void Parse::do_anewarray() {
   350   bool will_link;
   351   ciKlass* klass = iter().get_klass(will_link);
   353   // Uncommon Trap when class that array contains is not loaded
   354   // we need the loaded class for the rest of graph; do not
   355   // initialize the container class (see Java spec)!!!
   356   assert(will_link, "anewarray: typeflow responsibility");
   358   ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
   359   // Check that array_klass object is loaded
   360   if (!array_klass->is_loaded()) {
   361     // Generate uncommon_trap for unloaded array_class
   362     uncommon_trap(Deoptimization::Reason_unloaded,
   363                   Deoptimization::Action_reinterpret,
   364                   array_klass);
   365     return;
   366   }
   368   kill_dead_locals();
   370   const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
   371   Node* count_val = pop();
   372   Node* obj = new_array(makecon(array_klass_type), count_val, 1);
   373   push(obj);
   374 }
   377 void Parse::do_newarray(BasicType elem_type) {
   378   kill_dead_locals();
   380   Node*   count_val = pop();
   381   const TypeKlassPtr* array_klass = TypeKlassPtr::make(ciTypeArrayKlass::make(elem_type));
   382   Node*   obj = new_array(makecon(array_klass), count_val, 1);
   383   // Push resultant oop onto stack
   384   push(obj);
   385 }
   387 // Expand simple expressions like new int[3][5] and new Object[2][nonConLen].
   388 // Also handle the degenerate 1-dimensional case of anewarray.
   389 Node* Parse::expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs) {
   390   Node* length = lengths[0];
   391   assert(length != NULL, "");
   392   Node* array = new_array(makecon(TypeKlassPtr::make(array_klass)), length, nargs);
   393   if (ndimensions > 1) {
   394     jint length_con = find_int_con(length, -1);
   395     guarantee(length_con >= 0, "non-constant multianewarray");
   396     ciArrayKlass* array_klass_1 = array_klass->as_obj_array_klass()->element_klass()->as_array_klass();
   397     const TypePtr* adr_type = TypeAryPtr::OOPS;
   398     const TypeOopPtr*    elemtype = _gvn.type(array)->is_aryptr()->elem()->make_oopptr();
   399     const intptr_t header   = arrayOopDesc::base_offset_in_bytes(T_OBJECT);
   400     for (jint i = 0; i < length_con; i++) {
   401       Node*    elem   = expand_multianewarray(array_klass_1, &lengths[1], ndimensions-1, nargs);
   402       intptr_t offset = header + ((intptr_t)i << LogBytesPerHeapOop);
   403       Node*    eaddr  = basic_plus_adr(array, offset);
   404       store_oop_to_array(control(), array, eaddr, adr_type, elem, elemtype, T_OBJECT);
   405     }
   406   }
   407   return array;
   408 }
   410 void Parse::do_multianewarray() {
   411   int ndimensions = iter().get_dimensions();
   413   // the m-dimensional array
   414   bool will_link;
   415   ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
   416   assert(will_link, "multianewarray: typeflow responsibility");
   418   // Note:  Array classes are always initialized; no is_initialized check.
   420   enum { MAX_DIMENSION = 5 };
   421   if (ndimensions > MAX_DIMENSION || ndimensions <= 0) {
   422     uncommon_trap(Deoptimization::Reason_unhandled,
   423                   Deoptimization::Action_none);
   424     return;
   425   }
   427   kill_dead_locals();
   429   // get the lengths from the stack (first dimension is on top)
   430   Node* length[MAX_DIMENSION+1];
   431   length[ndimensions] = NULL;  // terminating null for make_runtime_call
   432   int j;
   433   for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();
   435   // The original expression was of this form: new T[length0][length1]...
   436   // It is often the case that the lengths are small (except the last).
   437   // If that happens, use the fast 1-d creator a constant number of times.
   438   const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100);
   439   jint expand_count = 1;        // count of allocations in the expansion
   440   jint expand_fanout = 1;       // running total fanout
   441   for (j = 0; j < ndimensions-1; j++) {
   442     jint dim_con = find_int_con(length[j], -1);
   443     expand_fanout *= dim_con;
   444     expand_count  += expand_fanout; // count the level-J sub-arrays
   445     if (dim_con <= 0
   446         || dim_con > expand_limit
   447         || expand_count > expand_limit) {
   448       expand_count = 0;
   449       break;
   450     }
   451   }
   453   // Can use multianewarray instead of [a]newarray if only one dimension,
   454   // or if all non-final dimensions are small constants.
   455   if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
   456     Node* obj = NULL;
   457     // Set the original stack and the reexecute bit for the interpreter
   458     // to reexecute the multianewarray bytecode if deoptimization happens.
   459     // Do it unconditionally even for one dimension multianewarray.
   460     // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
   461     // when AllocateArray node for newarray is created.
   462     { PreserveReexecuteState preexecs(this);
   463       _sp += ndimensions;
   464       // Pass 0 as nargs since uncommon trap code does not need to restore stack.
   465       obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
   466     } //original reexecute and sp are set back here
   467     push(obj);
   468     return;
   469   }
   471   address fun = NULL;
   472   switch (ndimensions) {
   473   //case 1: Actually, there is no case 1.  It's handled by new_array.
   474   case 2: fun = OptoRuntime::multianewarray2_Java(); break;
   475   case 3: fun = OptoRuntime::multianewarray3_Java(); break;
   476   case 4: fun = OptoRuntime::multianewarray4_Java(); break;
   477   case 5: fun = OptoRuntime::multianewarray5_Java(); break;
   478   default: ShouldNotReachHere();
   479   };
   481   Node* c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
   482                               OptoRuntime::multianewarray_Type(ndimensions),
   483                               fun, NULL, TypeRawPtr::BOTTOM,
   484                               makecon(TypeKlassPtr::make(array_klass)),
   485                               length[0], length[1], length[2],
   486                               length[3], length[4]);
   487   Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms));
   489   const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);
   491   // Improve the type:  We know it's not null, exact, and of a given length.
   492   type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
   493   type = type->is_aryptr()->cast_to_exactness(true);
   495   const TypeInt* ltype = _gvn.find_int_type(length[0]);
   496   if (ltype != NULL)
   497     type = type->is_aryptr()->cast_to_size(ltype);
   499   // We cannot sharpen the nested sub-arrays, since the top level is mutable.
   501   Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) );
   502   push(cast);
   504   // Possible improvements:
   505   // - Make a fast path for small multi-arrays.  (W/ implicit init. loops.)
   506   // - Issue CastII against length[*] values, to TypeInt::POS.
   507 }

mercurial