Merge

Tue, 07 Jul 2009 09:54:06 -0700

author
kvn
date
Tue, 07 Jul 2009 09:54:06 -0700
changeset 1272
f0bd02f95856
parent 1267
e7d5557ad624
parent 1271
4325cdaa78ad
child 1274
bb18957ad21e
child 1284
83906a156fc0

Merge

     1.1 --- a/agent/src/share/classes/sun/jvm/hotspot/ui/tree/OopTreeNodeAdapter.java	Thu Jul 02 16:28:15 2009 -0700
     1.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/tree/OopTreeNodeAdapter.java	Tue Jul 07 09:54:06 2009 -0700
     1.3 @@ -161,6 +161,8 @@
     1.4            child = new OopTreeNodeAdapter(field.getValue(getObj()), field.getID(), getTreeTableMode());
     1.5          } catch (AddressException e) {
     1.6            child = new BadOopTreeNodeAdapter(field.getValueAsOopHandle(getObj()), field.getID(), getTreeTableMode());
     1.7 +        } catch (UnknownOopException e) {
     1.8 +          child = new BadOopTreeNodeAdapter(field.getValueAsOopHandle(getObj()), field.getID(), getTreeTableMode());
     1.9          }
    1.10        }
    1.11        ++curField;
     2.1 --- a/make/solaris/makefiles/optimized.make	Thu Jul 02 16:28:15 2009 -0700
     2.2 +++ b/make/solaris/makefiles/optimized.make	Tue Jul 07 09:54:06 2009 -0700
     2.3 @@ -41,7 +41,7 @@
     2.4  endif
     2.5  
     2.6  # Workaround SS11 bug 6345274 (all platforms) (Fixed in SS11 patch and SS12)
     2.7 -ifeq ($(COMPILER_REV_NUMERIC),508))
     2.8 +ifeq ($(COMPILER_REV_NUMERIC),508)
     2.9  OPT_CFLAGS/ciTypeFlow.o = $(OPT_CFLAGS/O2)
    2.10  endif # COMPILER_REV_NUMERIC == 508
    2.11  
     3.1 --- a/src/share/vm/opto/block.cpp	Thu Jul 02 16:28:15 2009 -0700
     3.2 +++ b/src/share/vm/opto/block.cpp	Tue Jul 07 09:54:06 2009 -0700
     3.3 @@ -357,6 +357,9 @@
     3.4  #ifndef PRODUCT
     3.5    , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
     3.6  #endif
     3.7 +#ifdef ASSERT
     3.8 +  , _raw_oops(a)
     3.9 +#endif
    3.10  {
    3.11    ResourceMark rm;
    3.12    // I'll need a few machine-specific GotoNodes.  Make an Ideal GotoNode,
     4.1 --- a/src/share/vm/opto/block.hpp	Thu Jul 02 16:28:15 2009 -0700
     4.2 +++ b/src/share/vm/opto/block.hpp	Tue Jul 07 09:54:06 2009 -0700
     4.3 @@ -380,6 +380,10 @@
     4.4    bool _trace_opto_pipelining;  // tracing flag
     4.5  #endif
     4.6  
     4.7 +#ifdef ASSERT
     4.8 +  Unique_Node_List _raw_oops;
     4.9 +#endif
    4.10 +
    4.11    // Build dominators
    4.12    void Dominators();
    4.13  
     5.1 --- a/src/share/vm/opto/buildOopMap.cpp	Thu Jul 02 16:28:15 2009 -0700
     5.2 +++ b/src/share/vm/opto/buildOopMap.cpp	Tue Jul 07 09:54:06 2009 -0700
     5.3 @@ -74,9 +74,11 @@
     5.4    // this block.
     5.5    Block *_b;                    // Block for this struct
     5.6    OopFlow *_next;               // Next free OopFlow
     5.7 +                                // or NULL if dead/conflict
     5.8 +  Compile* C;
     5.9  
    5.10 -  OopFlow( short *callees, Node **defs ) : _callees(callees), _defs(defs),
    5.11 -    _b(NULL), _next(NULL) { }
    5.12 +  OopFlow( short *callees, Node **defs, Compile* c ) : _callees(callees), _defs(defs),
    5.13 +    _b(NULL), _next(NULL), C(c) { }
    5.14  
    5.15    // Given reaching-defs for this block start, compute it for this block end
    5.16    void compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash );
    5.17 @@ -88,7 +90,7 @@
    5.18    void clone( OopFlow *flow, int max_size);
    5.19  
    5.20    // Make a new OopFlow from scratch
    5.21 -  static OopFlow *make( Arena *A, int max_size );
    5.22 +  static OopFlow *make( Arena *A, int max_size, Compile* C );
    5.23  
    5.24    // Build an oopmap from the current flow info
    5.25    OopMap *build_oop_map( Node *n, int max_reg, PhaseRegAlloc *regalloc, int* live );
    5.26 @@ -180,11 +182,11 @@
    5.27  }
    5.28  
    5.29  //------------------------------make-------------------------------------------
    5.30 -OopFlow *OopFlow::make( Arena *A, int max_size ) {
    5.31 +OopFlow *OopFlow::make( Arena *A, int max_size, Compile* C ) {
    5.32    short *callees = NEW_ARENA_ARRAY(A,short,max_size+1);
    5.33    Node **defs    = NEW_ARENA_ARRAY(A,Node*,max_size+1);
    5.34    debug_only( memset(defs,0,(max_size+1)*sizeof(Node*)) );
    5.35 -  OopFlow *flow = new (A) OopFlow(callees+1, defs+1);
    5.36 +  OopFlow *flow = new (A) OopFlow(callees+1, defs+1, C);
    5.37    assert( &flow->_callees[OptoReg::Bad] == callees, "Ok to index at OptoReg::Bad" );
    5.38    assert( &flow->_defs   [OptoReg::Bad] == defs   , "Ok to index at OptoReg::Bad" );
    5.39    return flow;
    5.40 @@ -288,7 +290,7 @@
    5.41                m = m->in(idx);
    5.42              }
    5.43            }
    5.44 -         guarantee( 0, "must find derived/base pair" );
    5.45 +          guarantee( 0, "must find derived/base pair" );
    5.46          }
    5.47        found: ;
    5.48          Node *base = n->in(i+1); // Base is other half of pair
    5.49 @@ -347,6 +349,13 @@
    5.50      } else {
    5.51        // Other - some reaching non-oop value
    5.52        omap->set_value( r);
    5.53 +#ifdef ASSERT
    5.54 +      if( t->isa_rawptr() && C->cfg()->_raw_oops.member(def) ) {
    5.55 +        def->dump();
    5.56 +        n->dump();
    5.57 +        assert(false, "there should be a oop in OopMap instead of a live raw oop at safepoint");
    5.58 +      }
    5.59 +#endif
    5.60      }
    5.61  
    5.62    }
    5.63 @@ -562,7 +571,7 @@
    5.64  
    5.65    // Do the first block 'by hand' to prime the worklist
    5.66    Block *entry = _cfg->_blocks[1];
    5.67 -  OopFlow *rootflow = OopFlow::make(A,max_reg);
    5.68 +  OopFlow *rootflow = OopFlow::make(A,max_reg,this);
    5.69    // Initialize to 'bottom' (not 'top')
    5.70    memset( rootflow->_callees, OptoReg::Bad, max_reg*sizeof(short) );
    5.71    memset( rootflow->_defs   ,            0, max_reg*sizeof(Node*) );
    5.72 @@ -628,7 +637,7 @@
    5.73        // Carry it forward.
    5.74      } else {                    // Draw a new OopFlow from the freelist
    5.75        if( !free_list )
    5.76 -        free_list = OopFlow::make(A,max_reg);
    5.77 +        free_list = OopFlow::make(A,max_reg,C);
    5.78        flow = free_list;
    5.79        assert( flow->_b == NULL, "oopFlow is not free" );
    5.80        free_list = flow->_next;
     6.1 --- a/src/share/vm/opto/gcm.cpp	Thu Jul 02 16:28:15 2009 -0700
     6.2 +++ b/src/share/vm/opto/gcm.cpp	Tue Jul 07 09:54:06 2009 -0700
     6.3 @@ -1130,6 +1130,9 @@
     6.4          Node *def = self->in(1);
     6.5          if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
     6.6            early->add_inst(self);
     6.7 +#ifdef ASSERT
     6.8 +          _raw_oops.push(def);
     6.9 +#endif
    6.10            continue;
    6.11          }
    6.12          break;
     7.1 --- a/src/share/vm/opto/library_call.cpp	Thu Jul 02 16:28:15 2009 -0700
     7.2 +++ b/src/share/vm/opto/library_call.cpp	Tue Jul 07 09:54:06 2009 -0700
     7.3 @@ -165,6 +165,7 @@
     7.4    bool inline_native_getLength();
     7.5    bool inline_array_copyOf(bool is_copyOfRange);
     7.6    bool inline_array_equals();
     7.7 +  void copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark);
     7.8    bool inline_native_clone(bool is_virtual);
     7.9    bool inline_native_Reflection_getCallerClass();
    7.10    bool inline_native_AtomicLong_get();
    7.11 @@ -181,7 +182,6 @@
    7.12                            Node* src,  Node* src_offset,
    7.13                            Node* dest, Node* dest_offset,
    7.14                            Node* copy_length,
    7.15 -                          int nargs,  // arguments on stack for debug info
    7.16                            bool disjoint_bases = false,
    7.17                            bool length_never_negative = false,
    7.18                            RegionNode* slow_region = NULL);
    7.19 @@ -202,17 +202,16 @@
    7.20    void generate_slow_arraycopy(const TypePtr* adr_type,
    7.21                                 Node* src,  Node* src_offset,
    7.22                                 Node* dest, Node* dest_offset,
    7.23 -                               Node* copy_length,
    7.24 -                               int nargs);
    7.25 +                               Node* copy_length);
    7.26    Node* generate_checkcast_arraycopy(const TypePtr* adr_type,
    7.27                                       Node* dest_elem_klass,
    7.28                                       Node* src,  Node* src_offset,
    7.29                                       Node* dest, Node* dest_offset,
    7.30 -                                     Node* copy_length, int nargs);
    7.31 +                                     Node* copy_length);
    7.32    Node* generate_generic_arraycopy(const TypePtr* adr_type,
    7.33                                     Node* src,  Node* src_offset,
    7.34                                     Node* dest, Node* dest_offset,
    7.35 -                                   Node* copy_length, int nargs);
    7.36 +                                   Node* copy_length);
    7.37    void generate_unchecked_arraycopy(const TypePtr* adr_type,
    7.38                                      BasicType basic_elem_type,
    7.39                                      bool disjoint_bases,
    7.40 @@ -3229,7 +3228,8 @@
    7.41      Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
    7.42      Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
    7.43  
    7.44 -    Node* newcopy = new_array(klass_node, length, nargs);
    7.45 +    const bool raw_mem_only = true;
    7.46 +    Node* newcopy = new_array(klass_node, length, nargs, raw_mem_only);
    7.47  
    7.48      // Generate a direct call to the right arraycopy function(s).
    7.49      // We know the copy is disjoint but we might not know if the
    7.50 @@ -3240,7 +3240,7 @@
    7.51      bool length_never_negative = true;
    7.52      generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
    7.53                         original, start, newcopy, intcon(0), moved,
    7.54 -                       nargs, disjoint_bases, length_never_negative);
    7.55 +                       disjoint_bases, length_never_negative);
    7.56  
    7.57      push(newcopy);
    7.58    }
    7.59 @@ -3882,6 +3882,98 @@
    7.60    return true;
    7.61  }
    7.62  
    7.63 +//------------------------clone_coping-----------------------------------
    7.64 +// Helper function for inline_native_clone.
    7.65 +void LibraryCallKit::copy_to_clone(Node* obj, Node* alloc_obj, Node* obj_size, bool is_array, bool card_mark) {
    7.66 +  assert(obj_size != NULL, "");
    7.67 +  Node* raw_obj = alloc_obj->in(1);
    7.68 +  assert(alloc_obj->is_CheckCastPP() && raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
    7.69 +
    7.70 +  if (ReduceBulkZeroing) {
    7.71 +    // We will be completely responsible for initializing this object -
    7.72 +    // mark Initialize node as complete.
    7.73 +    AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
    7.74 +    // The object was just allocated - there should be no any stores!
    7.75 +    guarantee(alloc != NULL && alloc->maybe_set_complete(&_gvn), "");
    7.76 +  }
    7.77 +
    7.78 +  // Cast to Object for arraycopy.
    7.79 +  // We can't use the original CheckCastPP since it should be moved
    7.80 +  // after the arraycopy to prevent stores flowing above it.
    7.81 +  Node* new_obj = new(C, 2) CheckCastPPNode(alloc_obj->in(0), raw_obj,
    7.82 +                                            TypeInstPtr::NOTNULL);
    7.83 +  new_obj = _gvn.transform(new_obj);
    7.84 +  // Substitute in the locally valid dest_oop.
    7.85 +  replace_in_map(alloc_obj, new_obj);
    7.86 +
    7.87 +  // Copy the fastest available way.
    7.88 +  // TODO: generate fields copies for small objects instead.
    7.89 +  Node* src  = obj;
    7.90 +  Node* dest = new_obj;
    7.91 +  Node* size = _gvn.transform(obj_size);
    7.92 +
    7.93 +  // Exclude the header but include array length to copy by 8 bytes words.
    7.94 +  // Can't use base_offset_in_bytes(bt) since basic type is unknown.
    7.95 +  int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() :
    7.96 +                            instanceOopDesc::base_offset_in_bytes();
    7.97 +  // base_off:
    7.98 +  // 8  - 32-bit VM
    7.99 +  // 12 - 64-bit VM, compressed oops
   7.100 +  // 16 - 64-bit VM, normal oops
   7.101 +  if (base_off % BytesPerLong != 0) {
   7.102 +    assert(UseCompressedOops, "");
   7.103 +    if (is_array) {
   7.104 +      // Exclude length to copy by 8 bytes words.
   7.105 +      base_off += sizeof(int);
   7.106 +    } else {
   7.107 +      // Include klass to copy by 8 bytes words.
   7.108 +      base_off = instanceOopDesc::klass_offset_in_bytes();
   7.109 +    }
   7.110 +    assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment");
   7.111 +  }
   7.112 +  src  = basic_plus_adr(src,  base_off);
   7.113 +  dest = basic_plus_adr(dest, base_off);
   7.114 +
   7.115 +  // Compute the length also, if needed:
   7.116 +  Node* countx = size;
   7.117 +  countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) );
   7.118 +  countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) ));
   7.119 +
   7.120 +  const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
   7.121 +  bool disjoint_bases = true;
   7.122 +  generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
   7.123 +                               src, NULL, dest, NULL, countx);
   7.124 +
   7.125 +  // If necessary, emit some card marks afterwards.  (Non-arrays only.)
   7.126 +  if (card_mark) {
   7.127 +    assert(!is_array, "");
   7.128 +    // Put in store barrier for any and all oops we are sticking
   7.129 +    // into this object.  (We could avoid this if we could prove
   7.130 +    // that the object type contains no oop fields at all.)
   7.131 +    Node* no_particular_value = NULL;
   7.132 +    Node* no_particular_field = NULL;
   7.133 +    int raw_adr_idx = Compile::AliasIdxRaw;
   7.134 +    post_barrier(control(),
   7.135 +                 memory(raw_adr_type),
   7.136 +                 new_obj,
   7.137 +                 no_particular_field,
   7.138 +                 raw_adr_idx,
   7.139 +                 no_particular_value,
   7.140 +                 T_OBJECT,
   7.141 +                 false);
   7.142 +  }
   7.143 +
   7.144 +  // Move the original CheckCastPP after arraycopy.
   7.145 +  _gvn.hash_delete(alloc_obj);
   7.146 +  alloc_obj->set_req(0, control());
   7.147 +  // Replace raw memory edge with new CheckCastPP to have a live oop
   7.148 +  // at safepoints instead of raw value.
   7.149 +  assert(new_obj->is_CheckCastPP() && new_obj->in(1) == alloc_obj->in(1), "sanity");
   7.150 +  alloc_obj->set_req(1, new_obj);    // cast to the original type
   7.151 +  _gvn.hash_find_insert(alloc_obj);  // put back into GVN table
   7.152 +  // Restore in the locally valid dest_oop.
   7.153 +  replace_in_map(new_obj, alloc_obj);
   7.154 +}
   7.155  
   7.156  //------------------------inline_native_clone----------------------------
   7.157  // Here are the simple edge cases:
   7.158 @@ -3916,8 +4008,9 @@
   7.159    // paths into result_reg:
   7.160    enum {
   7.161      _slow_path = 1,     // out-of-line call to clone method (virtual or not)
   7.162 -    _objArray_path,     // plain allocation, plus arrayof_oop_arraycopy
   7.163 -    _fast_path,         // plain allocation, plus a CopyArray operation
   7.164 +    _objArray_path,     // plain array allocation, plus arrayof_oop_arraycopy
   7.165 +    _array_path,        // plain array allocation, plus arrayof_long_arraycopy
   7.166 +    _instance_path,     // plain instance allocation, plus arrayof_long_arraycopy
   7.167      PATH_LIMIT
   7.168    };
   7.169    RegionNode* result_reg = new(C, PATH_LIMIT) RegionNode(PATH_LIMIT);
   7.170 @@ -3932,18 +4025,6 @@
   7.171    int raw_adr_idx = Compile::AliasIdxRaw;
   7.172    const bool raw_mem_only = true;
   7.173  
   7.174 -  // paths into alloc_reg (on the fast path, just before the CopyArray):
   7.175 -  enum { _typeArray_alloc = 1, _instance_alloc, ALLOC_LIMIT };
   7.176 -  RegionNode* alloc_reg = new(C, ALLOC_LIMIT) RegionNode(ALLOC_LIMIT);
   7.177 -  PhiNode*    alloc_val = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, raw_adr_type);
   7.178 -  PhiNode*    alloc_siz = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, TypeX_X);
   7.179 -  PhiNode*    alloc_i_o = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, Type::ABIO);
   7.180 -  PhiNode*    alloc_mem = new(C, ALLOC_LIMIT) PhiNode(alloc_reg, Type::MEMORY,
   7.181 -                                                      raw_adr_type);
   7.182 -  record_for_igvn(alloc_reg);
   7.183 -
   7.184 -  bool card_mark = false;  // (see below)
   7.185 -
   7.186    Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
   7.187    if (array_ctl != NULL) {
   7.188      // It's an array.
   7.189 @@ -3953,16 +4034,6 @@
   7.190      Node* obj_size = NULL;
   7.191      Node* alloc_obj = new_array(obj_klass, obj_length, nargs,
   7.192                                  raw_mem_only, &obj_size);
   7.193 -    assert(obj_size != NULL, "");
   7.194 -    Node* raw_obj = alloc_obj->in(1);
   7.195 -    assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
   7.196 -    if (ReduceBulkZeroing) {
   7.197 -      AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
   7.198 -      if (alloc != NULL) {
   7.199 -        // We will be completely responsible for initializing this object.
   7.200 -        alloc->maybe_set_complete(&_gvn);
   7.201 -      }
   7.202 -    }
   7.203  
   7.204      if (!use_ReduceInitialCardMarks()) {
   7.205        // If it is an oop array, it requires very special treatment,
   7.206 @@ -3976,7 +4047,7 @@
   7.207          bool length_never_negative = true;
   7.208          generate_arraycopy(TypeAryPtr::OOPS, T_OBJECT,
   7.209                             obj, intcon(0), alloc_obj, intcon(0),
   7.210 -                           obj_length, nargs,
   7.211 +                           obj_length,
   7.212                             disjoint_bases, length_never_negative);
   7.213          result_reg->init_req(_objArray_path, control());
   7.214          result_val->init_req(_objArray_path, alloc_obj);
   7.215 @@ -3991,19 +4062,24 @@
   7.216      // the object.
   7.217  
   7.218      // Otherwise, there are no card marks to worry about.
   7.219 -    alloc_val->init_req(_typeArray_alloc, raw_obj);
   7.220 -    alloc_siz->init_req(_typeArray_alloc, obj_size);
   7.221 -    alloc_reg->init_req(_typeArray_alloc, control());
   7.222 -    alloc_i_o->init_req(_typeArray_alloc, i_o());
   7.223 -    alloc_mem->init_req(_typeArray_alloc, memory(raw_adr_type));
   7.224 +
   7.225 +    if (!stopped()) {
   7.226 +      copy_to_clone(obj, alloc_obj, obj_size, true, false);
   7.227 +
   7.228 +      // Present the results of the copy.
   7.229 +      result_reg->init_req(_array_path, control());
   7.230 +      result_val->init_req(_array_path, alloc_obj);
   7.231 +      result_i_o ->set_req(_array_path, i_o());
   7.232 +      result_mem ->set_req(_array_path, reset_memory());
   7.233 +    }
   7.234    }
   7.235  
   7.236 -  // We only go to the fast case code if we pass a number of guards.
   7.237 +  // We only go to the instance fast case code if we pass a number of guards.
   7.238    // The paths which do not pass are accumulated in the slow_region.
   7.239    RegionNode* slow_region = new (C, 1) RegionNode(1);
   7.240    record_for_igvn(slow_region);
   7.241    if (!stopped()) {
   7.242 -    // It's an instance.  Make the slow-path tests.
   7.243 +    // It's an instance (we did array above).  Make the slow-path tests.
   7.244      // If this is a virtual call, we generate a funny guard.  We grab
   7.245      // the vtable entry corresponding to clone() from the target object.
   7.246      // If the target method which we are calling happens to be the
   7.247 @@ -4030,25 +4106,14 @@
   7.248      PreserveJVMState pjvms(this);
   7.249      Node* obj_size = NULL;
   7.250      Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
   7.251 -    assert(obj_size != NULL, "");
   7.252 -    Node* raw_obj = alloc_obj->in(1);
   7.253 -    assert(raw_obj->is_Proj() && raw_obj->in(0)->is_Allocate(), "");
   7.254 -    if (ReduceBulkZeroing) {
   7.255 -      AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_obj, &_gvn);
   7.256 -      if (alloc != NULL && !alloc->maybe_set_complete(&_gvn))
   7.257 -        alloc = NULL;
   7.258 -    }
   7.259 -    if (!use_ReduceInitialCardMarks()) {
   7.260 -      // Put in store barrier for any and all oops we are sticking
   7.261 -      // into this object.  (We could avoid this if we could prove
   7.262 -      // that the object type contains no oop fields at all.)
   7.263 -      card_mark = true;
   7.264 -    }
   7.265 -    alloc_val->init_req(_instance_alloc, raw_obj);
   7.266 -    alloc_siz->init_req(_instance_alloc, obj_size);
   7.267 -    alloc_reg->init_req(_instance_alloc, control());
   7.268 -    alloc_i_o->init_req(_instance_alloc, i_o());
   7.269 -    alloc_mem->init_req(_instance_alloc, memory(raw_adr_type));
   7.270 +
   7.271 +    copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
   7.272 +
   7.273 +    // Present the results of the slow call.
   7.274 +    result_reg->init_req(_instance_path, control());
   7.275 +    result_val->init_req(_instance_path, alloc_obj);
   7.276 +    result_i_o ->set_req(_instance_path, i_o());
   7.277 +    result_mem ->set_req(_instance_path, reset_memory());
   7.278    }
   7.279  
   7.280    // Generate code for the slow case.  We make a call to clone().
   7.281 @@ -4064,82 +4129,12 @@
   7.282      result_mem ->set_req(_slow_path, reset_memory());
   7.283    }
   7.284  
   7.285 -  // The object is allocated, as an array and/or an instance.  Now copy it.
   7.286 -  set_control( _gvn.transform(alloc_reg) );
   7.287 -  set_i_o(     _gvn.transform(alloc_i_o) );
   7.288 -  set_memory(  _gvn.transform(alloc_mem), raw_adr_type );
   7.289 -  Node* raw_obj  = _gvn.transform(alloc_val);
   7.290 -
   7.291 -  if (!stopped()) {
   7.292 -    // Copy the fastest available way.
   7.293 -    // (No need for PreserveJVMState, since we're using it all up now.)
   7.294 -    // TODO: generate fields/elements copies for small objects instead.
   7.295 -    Node* src  = obj;
   7.296 -    Node* dest = raw_obj;
   7.297 -    Node* size = _gvn.transform(alloc_siz);
   7.298 -
   7.299 -    // Exclude the header.
   7.300 -    int base_off = instanceOopDesc::base_offset_in_bytes();
   7.301 -    if (UseCompressedOops) {
   7.302 -      assert(base_off % BytesPerLong != 0, "base with compressed oops");
   7.303 -      // With compressed oops base_offset_in_bytes is 12 which creates
   7.304 -      // the gap since countx is rounded by 8 bytes below.
   7.305 -      // Copy klass and the gap.
   7.306 -      base_off = instanceOopDesc::klass_offset_in_bytes();
   7.307 -    }
   7.308 -    src  = basic_plus_adr(src,  base_off);
   7.309 -    dest = basic_plus_adr(dest, base_off);
   7.310 -
   7.311 -    // Compute the length also, if needed:
   7.312 -    Node* countx = size;
   7.313 -    countx = _gvn.transform( new (C, 3) SubXNode(countx, MakeConX(base_off)) );
   7.314 -    countx = _gvn.transform( new (C, 3) URShiftXNode(countx, intcon(LogBytesPerLong) ));
   7.315 -
   7.316 -    // Select an appropriate instruction to initialize the range.
   7.317 -    // The CopyArray instruction (if supported) can be optimized
   7.318 -    // into a discrete set of scalar loads and stores.
   7.319 -    bool disjoint_bases = true;
   7.320 -    generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases,
   7.321 -                                 src, NULL, dest, NULL, countx);
   7.322 -
   7.323 -    // Now that the object is properly initialized, type it as an oop.
   7.324 -    // Use a secondary InitializeNode memory barrier.
   7.325 -    InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, raw_adr_idx,
   7.326 -                                                   raw_obj)->as_Initialize();
   7.327 -    init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
   7.328 -    Node* new_obj = new(C, 2) CheckCastPPNode(control(), raw_obj,
   7.329 -                                              TypeInstPtr::NOTNULL);
   7.330 -    new_obj = _gvn.transform(new_obj);
   7.331 -
   7.332 -    // If necessary, emit some card marks afterwards.  (Non-arrays only.)
   7.333 -    if (card_mark) {
   7.334 -      Node* no_particular_value = NULL;
   7.335 -      Node* no_particular_field = NULL;
   7.336 -      post_barrier(control(),
   7.337 -                   memory(raw_adr_type),
   7.338 -                   new_obj,
   7.339 -                   no_particular_field,
   7.340 -                   raw_adr_idx,
   7.341 -                   no_particular_value,
   7.342 -                   T_OBJECT,
   7.343 -                   false);
   7.344 -    }
   7.345 -    // Present the results of the slow call.
   7.346 -    result_reg->init_req(_fast_path, control());
   7.347 -    result_val->init_req(_fast_path, new_obj);
   7.348 -    result_i_o ->set_req(_fast_path, i_o());
   7.349 -    result_mem ->set_req(_fast_path, reset_memory());
   7.350 -  }
   7.351 -
   7.352    // Return the combined state.
   7.353    set_control(    _gvn.transform(result_reg) );
   7.354    set_i_o(        _gvn.transform(result_i_o) );
   7.355    set_all_memory( _gvn.transform(result_mem) );
   7.356  
   7.357 -  // Cast the result to a sharper type, since we know what clone does.
   7.358 -  Node* new_obj = _gvn.transform(result_val);
   7.359 -  Node* cast    = new (C, 2) CheckCastPPNode(control(), new_obj, toop);
   7.360 -  push(_gvn.transform(cast));
   7.361 +  push(_gvn.transform(result_val));
   7.362  
   7.363    return true;
   7.364  }
   7.365 @@ -4278,8 +4273,7 @@
   7.366  
   7.367      // Call StubRoutines::generic_arraycopy stub.
   7.368      generate_arraycopy(TypeRawPtr::BOTTOM, T_CONFLICT,
   7.369 -                       src, src_offset, dest, dest_offset, length,
   7.370 -                       nargs);
   7.371 +                       src, src_offset, dest, dest_offset, length);
   7.372  
   7.373      // Do not let reads from the destination float above the arraycopy.
   7.374      // Since we cannot type the arrays, we don't know which slices
   7.375 @@ -4302,8 +4296,7 @@
   7.376      // The component types are not the same or are not recognized.  Punt.
   7.377      // (But, avoid the native method wrapper to JVM_ArrayCopy.)
   7.378      generate_slow_arraycopy(TypePtr::BOTTOM,
   7.379 -                            src, src_offset, dest, dest_offset, length,
   7.380 -                            nargs);
   7.381 +                            src, src_offset, dest, dest_offset, length);
   7.382      return true;
   7.383    }
   7.384  
   7.385 @@ -4360,7 +4353,7 @@
   7.386    const TypePtr* adr_type = TypeAryPtr::get_array_body_type(dest_elem);
   7.387    generate_arraycopy(adr_type, dest_elem,
   7.388                       src, src_offset, dest, dest_offset, length,
   7.389 -                     nargs, false, false, slow_region);
   7.390 +                     false, false, slow_region);
   7.391  
   7.392    return true;
   7.393  }
   7.394 @@ -4405,7 +4398,6 @@
   7.395                                     Node* src,  Node* src_offset,
   7.396                                     Node* dest, Node* dest_offset,
   7.397                                     Node* copy_length,
   7.398 -                                   int nargs,
   7.399                                     bool disjoint_bases,
   7.400                                     bool length_never_negative,
   7.401                                     RegionNode* slow_region) {
   7.402 @@ -4417,7 +4409,6 @@
   7.403  
   7.404    Node* original_dest      = dest;
   7.405    AllocateArrayNode* alloc = NULL;  // used for zeroing, if needed
   7.406 -  Node* raw_dest           = NULL;  // used before zeroing, if needed
   7.407    bool  must_clear_dest    = false;
   7.408  
   7.409    // See if this is the initialization of a newly-allocated array.
   7.410 @@ -4436,15 +4427,18 @@
   7.411      // "You break it, you buy it."
   7.412      InitializeNode* init = alloc->initialization();
   7.413      assert(init->is_complete(), "we just did this");
   7.414 -    assert(dest->Opcode() == Op_CheckCastPP, "sanity");
   7.415 +    assert(dest->is_CheckCastPP(), "sanity");
   7.416      assert(dest->in(0)->in(0) == init, "dest pinned");
   7.417 -    raw_dest = dest->in(1);  // grab the raw pointer!
   7.418 -    original_dest = dest;
   7.419 -    dest = raw_dest;
   7.420 +
   7.421 +    // Cast to Object for arraycopy.
   7.422 +    // We can't use the original CheckCastPP since it should be moved
   7.423 +    // after the arraycopy to prevent stores flowing above it.
   7.424 +    Node* new_obj = new(C, 2) CheckCastPPNode(dest->in(0), dest->in(1),
   7.425 +                                              TypeInstPtr::NOTNULL);
   7.426 +    dest = _gvn.transform(new_obj);
   7.427 +    // Substitute in the locally valid dest_oop.
   7.428 +    replace_in_map(original_dest, dest);
   7.429      adr_type = TypeRawPtr::BOTTOM;  // all initializations are into raw memory
   7.430 -    // Decouple the original InitializeNode, turning it into a simple membar.
   7.431 -    // We will build a new one at the end of this routine.
   7.432 -    init->set_req(InitializeNode::RawAddress, top());
   7.433      // From this point on, every exit path is responsible for
   7.434      // initializing any non-copied parts of the object to zero.
   7.435      must_clear_dest = true;
   7.436 @@ -4487,7 +4481,7 @@
   7.437      assert(!must_clear_dest, "");
   7.438      Node* cv = generate_generic_arraycopy(adr_type,
   7.439                                            src, src_offset, dest, dest_offset,
   7.440 -                                          copy_length, nargs);
   7.441 +                                          copy_length);
   7.442      if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
   7.443      checked_control = control();
   7.444      checked_i_o     = i_o();
   7.445 @@ -4506,16 +4500,24 @@
   7.446        generate_negative_guard(copy_length, slow_region);
   7.447      }
   7.448  
   7.449 +    // copy_length is 0.
   7.450      if (!stopped() && must_clear_dest) {
   7.451        Node* dest_length = alloc->in(AllocateNode::ALength);
   7.452        if (_gvn.eqv_uncast(copy_length, dest_length)
   7.453            || _gvn.find_int_con(dest_length, 1) <= 0) {
   7.454 -        // There is no zeroing to do.
   7.455 +        // There is no zeroing to do. No need for a secondary raw memory barrier.
   7.456        } else {
   7.457          // Clear the whole thing since there are no source elements to copy.
   7.458          generate_clear_array(adr_type, dest, basic_elem_type,
   7.459                               intcon(0), NULL,
   7.460                               alloc->in(AllocateNode::AllocSize));
   7.461 +        // Use a secondary InitializeNode as raw memory barrier.
   7.462 +        // Currently it is needed only on this path since other
   7.463 +        // paths have stub or runtime calls as raw memory barriers.
   7.464 +        InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
   7.465 +                                                       Compile::AliasIdxRaw,
   7.466 +                                                       top())->as_Initialize();
   7.467 +        init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
   7.468        }
   7.469      }
   7.470  
   7.471 @@ -4637,8 +4639,7 @@
   7.472        Node* cv = generate_checkcast_arraycopy(adr_type,
   7.473                                                dest_elem_klass,
   7.474                                                src, src_offset, dest, dest_offset,
   7.475 -                                              copy_length,
   7.476 -                                              nargs);
   7.477 +                                              copy_length);
   7.478        if (cv == NULL)  cv = intcon(-1);  // failure (no stub available)
   7.479        checked_control = control();
   7.480        checked_i_o     = i_o();
   7.481 @@ -4700,8 +4701,8 @@
   7.482      slow_i_o2  ->init_req(1, slow_i_o);
   7.483      slow_mem2  ->init_req(1, slow_mem);
   7.484      slow_reg2  ->init_req(2, control());
   7.485 -    slow_i_o2  ->init_req(2, i_o());
   7.486 -    slow_mem2  ->init_req(2, memory(adr_type));
   7.487 +    slow_i_o2  ->init_req(2, checked_i_o);
   7.488 +    slow_mem2  ->init_req(2, checked_mem);
   7.489  
   7.490      slow_control = _gvn.transform(slow_reg2);
   7.491      slow_i_o     = _gvn.transform(slow_i_o2);
   7.492 @@ -4746,21 +4747,9 @@
   7.493                             alloc->in(AllocateNode::AllocSize));
   7.494      }
   7.495  
   7.496 -    if (dest != original_dest) {
   7.497 -      // Promote from rawptr to oop, so it looks right in the call's GC map.
   7.498 -      dest = _gvn.transform( new(C,2) CheckCastPPNode(control(), dest,
   7.499 -                                                      TypeInstPtr::NOTNULL) );
   7.500 -
   7.501 -      // Edit the call's debug-info to avoid referring to original_dest.
   7.502 -      // (The problem with original_dest is that it isn't ready until
   7.503 -      // after the InitializeNode completes, but this stuff is before.)
   7.504 -      // Substitute in the locally valid dest_oop.
   7.505 -      replace_in_map(original_dest, dest);
   7.506 -    }
   7.507 -
   7.508      generate_slow_arraycopy(adr_type,
   7.509                              src, src_offset, dest, dest_offset,
   7.510 -                            copy_length, nargs);
   7.511 +                            copy_length);
   7.512  
   7.513      result_region->init_req(slow_call_path, control());
   7.514      result_i_o   ->init_req(slow_call_path, i_o());
   7.515 @@ -4780,16 +4769,16 @@
   7.516  
   7.517    if (dest != original_dest) {
   7.518      // Pin the "finished" array node after the arraycopy/zeroing operations.
   7.519 -    // Use a secondary InitializeNode memory barrier.
   7.520 -    InitializeNode* init = insert_mem_bar_volatile(Op_Initialize,
   7.521 -                                                   Compile::AliasIdxRaw,
   7.522 -                                                   raw_dest)->as_Initialize();
   7.523 -    init->set_complete(&_gvn);  // (there is no corresponding AllocateNode)
   7.524      _gvn.hash_delete(original_dest);
   7.525      original_dest->set_req(0, control());
   7.526 +    // Replace raw memory edge with new CheckCastPP to have a live oop
   7.527 +    // at safepoints instead of raw value.
   7.528 +    assert(dest->is_CheckCastPP() && dest->in(1) == original_dest->in(1), "sanity");
   7.529 +    original_dest->set_req(1, dest);       // cast to the original type
   7.530      _gvn.hash_find_insert(original_dest);  // put back into GVN table
   7.531 +    // Restore in the locally valid dest_oop.
   7.532 +    replace_in_map(dest, original_dest);
   7.533    }
   7.534 -
   7.535    // The memory edges above are precise in order to model effects around
   7.536    // array copies accurately to allow value numbering of field loads around
   7.537    // arraycopy.  Such field loads, both before and after, are common in Java
   7.538 @@ -5073,16 +5062,13 @@
   7.539  LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type,
   7.540                                          Node* src,  Node* src_offset,
   7.541                                          Node* dest, Node* dest_offset,
   7.542 -                                        Node* copy_length,
   7.543 -                                        int nargs) {
   7.544 -  _sp += nargs; // any deopt will start just before call to enclosing method
   7.545 +                                        Node* copy_length) {
   7.546    Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON,
   7.547                                   OptoRuntime::slow_arraycopy_Type(),
   7.548                                   OptoRuntime::slow_arraycopy_Java(),
   7.549                                   "slow_arraycopy", adr_type,
   7.550                                   src, src_offset, dest, dest_offset,
   7.551                                   copy_length);
   7.552 -  _sp -= nargs;
   7.553  
   7.554    // Handle exceptions thrown by this fellow:
   7.555    make_slow_call_ex(call, env()->Throwable_klass(), false);
   7.556 @@ -5094,8 +5080,7 @@
   7.557                                               Node* dest_elem_klass,
   7.558                                               Node* src,  Node* src_offset,
   7.559                                               Node* dest, Node* dest_offset,
   7.560 -                                             Node* copy_length,
   7.561 -                                             int nargs) {
   7.562 +                                             Node* copy_length) {
   7.563    if (stopped())  return NULL;
   7.564  
   7.565    address copyfunc_addr = StubRoutines::checkcast_arraycopy();
   7.566 @@ -5136,8 +5121,7 @@
   7.567  LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type,
   7.568                                             Node* src,  Node* src_offset,
   7.569                                             Node* dest, Node* dest_offset,
   7.570 -                                           Node* copy_length,
   7.571 -                                           int nargs) {
   7.572 +                                           Node* copy_length) {
   7.573    if (stopped())  return NULL;
   7.574  
   7.575    address copyfunc_addr = StubRoutines::generic_arraycopy();
     8.1 --- a/src/share/vm/opto/loopopts.cpp	Thu Jul 02 16:28:15 2009 -0700
     8.2 +++ b/src/share/vm/opto/loopopts.cpp	Tue Jul 07 09:54:06 2009 -0700
     8.3 @@ -667,7 +667,6 @@
     8.4    }
     8.5  }
     8.6  
     8.7 -#ifdef _LP64
     8.8  static bool merge_point_safe(Node* region) {
     8.9    // 4799512: Stop split_if_with_blocks from splitting a block with a ConvI2LNode
    8.10    // having a PhiNode input. This sidesteps the dangerous case where the split
    8.11 @@ -676,20 +675,25 @@
    8.12    // uses.
    8.13    // A better fix for this problem can be found in the BugTraq entry, but
    8.14    // expediency for Mantis demands this hack.
    8.15 +  // 6855164: If the merge point has a FastLockNode with a PhiNode input, we stop
    8.16 +  // split_if_with_blocks from splitting a block because we could not move around
    8.17 +  // the FastLockNode.
    8.18    for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
    8.19      Node* n = region->fast_out(i);
    8.20      if (n->is_Phi()) {
    8.21        for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
    8.22          Node* m = n->fast_out(j);
    8.23 -        if (m->Opcode() == Op_ConvI2L) {
    8.24 +        if (m->is_FastLock())
    8.25            return false;
    8.26 -        }
    8.27 +#ifdef _LP64
    8.28 +        if (m->Opcode() == Op_ConvI2L)
    8.29 +          return false;
    8.30 +#endif
    8.31        }
    8.32      }
    8.33    }
    8.34    return true;
    8.35  }
    8.36 -#endif
    8.37  
    8.38  
    8.39  //------------------------------place_near_use---------------------------------
    8.40 @@ -771,12 +775,10 @@
    8.41        if( get_loop(n_ctrl->in(j)) != n_loop )
    8.42          return;
    8.43  
    8.44 -#ifdef _LP64
    8.45      // Check for safety of the merge point.
    8.46      if( !merge_point_safe(n_ctrl) ) {
    8.47        return;
    8.48      }
    8.49 -#endif
    8.50  
    8.51      // Split compare 'n' through the merge point if it is profitable
    8.52      Node *phi = split_thru_phi( n, n_ctrl, policy );
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/test/compiler/6855164/Test.java	Tue Jul 07 09:54:06 2009 -0700
     9.3 @@ -0,0 +1,55 @@
     9.4 +/*
     9.5 + * Copyright 2009 Sun Microsystems, Inc.  All Rights Reserved.
     9.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.7 + *
     9.8 + * This code is free software; you can redistribute it and/or modify it
     9.9 + * under the terms of the GNU General Public License version 2 only, as
    9.10 + * published by the Free Software Foundation.
    9.11 + *
    9.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    9.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    9.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    9.15 + * version 2 for more details (a copy is included in the LICENSE file that
    9.16 + * accompanied this code).
    9.17 + *
    9.18 + * You should have received a copy of the GNU General Public License version
    9.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    9.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    9.21 + *
    9.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    9.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    9.24 + * have any questions.
    9.25 + */
    9.26 +
    9.27 +/*
    9.28 + * @test
    9.29 + * @bug 6855164
    9.30 + * @summary SIGSEGV during compilation of method involving loop over CharSequence
    9.31 + * @run main/othervm -Xbatch Test
    9.32 + */
    9.33 +
    9.34 +public class Test{
    9.35 +    public static void main(String[] args) throws Exception {
    9.36 +        StringBuffer builder = new StringBuffer();
    9.37 +
    9.38 +        for(int i = 0; i < 100; i++)
    9.39 +            builder.append("I am the very model of a modern major general\n");
    9.40 +
    9.41 +        for(int j = 0; j < builder.length(); j++){
    9.42 +            previousSpaceIndex(builder, j);
    9.43 +        }
    9.44 +    }
    9.45 +
    9.46 +    private static final int previousSpaceIndex(CharSequence sb, int seek) {
    9.47 +        seek--;
    9.48 +        while (seek > 0) {
    9.49 +            if (sb.charAt(seek) == ' ') {
    9.50 +                while (seek > 0 && sb.charAt(seek - 1) == ' ')
    9.51 +                    seek--;
    9.52 +                return seek;
    9.53 +            }
    9.54 +            seek--;
    9.55 +        }
    9.56 +        return 0;
    9.57 +    }
    9.58 +}

mercurial