Merge

Mon, 09 Aug 2010 17:51:56 -0700

author
never
date
Mon, 09 Aug 2010 17:51:56 -0700
changeset 2044
f4f596978298
parent 2036
126ea7725993
parent 2043
2dfd013a7465
child 2045
36519c19beeb

Merge

src/share/vm/asm/codeBuffer.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/allocation.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/allocation.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/vmError.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/make/solaris/makefiles/sparcWorks.make	Tue Aug 03 08:13:38 2010 -0400
     1.2 +++ b/make/solaris/makefiles/sparcWorks.make	Mon Aug 09 17:51:56 2010 -0700
     1.3 @@ -145,11 +145,20 @@
     1.4  OPT_CFLAGS/O2=-xO2
     1.5  OPT_CFLAGS/NOOPT=-xO1
     1.6  
     1.7 +#################################################
     1.8 +# Begin current (>=5.9) Forte compiler options #
     1.9 +#################################################
    1.10 +
    1.11  ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
    1.12  ifeq ($(Platform_arch), x86)
    1.13  OPT_CFLAGS/NO_TAIL_CALL_OPT  = -Wu,-O~yz
    1.14  OPT_CCFLAGS/NO_TAIL_CALL_OPT = -Qoption ube -O~yz
    1.15 +OPT_CFLAGS/stubGenerator_x86_32.o = $(OPT_CFLAGS) -xspace
    1.16 +OPT_CFLAGS/stubGenerator_x86_64.o = $(OPT_CFLAGS) -xspace
    1.17  endif # Platform_arch == x86
    1.18 +ifeq ("${Platform_arch}", "sparc")
    1.19 +OPT_CFLAGS/stubGenerator_sparc.o = $(OPT_CFLAGS) -xspace
    1.20 +endif
    1.21  endif # COMPILER_REV_NUMERIC >= 509
    1.22  
    1.23  #################################################
     2.1 --- a/src/cpu/x86/vm/assembler_x86.cpp	Tue Aug 03 08:13:38 2010 -0400
     2.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp	Mon Aug 09 17:51:56 2010 -0700
     2.3 @@ -7568,21 +7568,27 @@
     2.4  
     2.5    // Scan RCX words at [RDI] for an occurrence of RAX.
     2.6    // Set NZ/Z based on last compare.
     2.7 +  // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
     2.8 +  // not change flags (only scas instruction which is repeated sets flags).
     2.9 +  // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
    2.10  #ifdef _LP64
    2.11    // This part is tricky, as values in supers array could be 32 or 64 bit wide
    2.12    // and we store values in objArrays always encoded, thus we need to encode
    2.13    // the value of rax before repne.  Note that rax is dead after the repne.
    2.14    if (UseCompressedOops) {
    2.15 -    encode_heap_oop_not_null(rax);
    2.16 +    encode_heap_oop_not_null(rax); // Changes flags.
    2.17      // The superclass is never null; it would be a basic system error if a null
    2.18      // pointer were to sneak in here.  Note that we have already loaded the
    2.19      // Klass::super_check_offset from the super_klass in the fast path,
    2.20      // so if there is a null in that register, we are already in the afterlife.
    2.21 +    testl(rax,rax); // Set Z = 0
    2.22      repne_scanl();
    2.23    } else
    2.24  #endif // _LP64
    2.25 +  {
    2.26 +    testptr(rax,rax); // Set Z = 0
    2.27      repne_scan();
    2.28 -
    2.29 +  }
    2.30    // Unspill the temp. registers:
    2.31    if (pushed_rdi)  pop(rdi);
    2.32    if (pushed_rcx)  pop(rcx);
    2.33 @@ -8257,30 +8263,35 @@
    2.34    }
    2.35  }
    2.36  
    2.37 +#ifdef ASSERT
    2.38 +void MacroAssembler::verify_heapbase(const char* msg) {
    2.39 +  assert (UseCompressedOops, "should be compressed");
    2.40 +  assert (Universe::heap() != NULL, "java heap should be initialized");
    2.41 +  if (CheckCompressedOops) {
    2.42 +    Label ok;
    2.43 +    push(rscratch1); // cmpptr trashes rscratch1
    2.44 +    cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
    2.45 +    jcc(Assembler::equal, ok);
    2.46 +    stop(msg);
    2.47 +    bind(ok);
    2.48 +    pop(rscratch1);
    2.49 +  }
    2.50 +}
    2.51 +#endif
    2.52 +
    2.53  // Algorithm must match oop.inline.hpp encode_heap_oop.
    2.54  void MacroAssembler::encode_heap_oop(Register r) {
    2.55 -  assert (UseCompressedOops, "should be compressed");
    2.56 -  assert (Universe::heap() != NULL, "java heap should be initialized");
    2.57 +#ifdef ASSERT
    2.58 +  verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
    2.59 +#endif
    2.60 +  verify_oop(r, "broken oop in encode_heap_oop");
    2.61    if (Universe::narrow_oop_base() == NULL) {
    2.62 -    verify_oop(r, "broken oop in encode_heap_oop");
    2.63      if (Universe::narrow_oop_shift() != 0) {
    2.64        assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
    2.65        shrq(r, LogMinObjAlignmentInBytes);
    2.66      }
    2.67      return;
    2.68    }
    2.69 -#ifdef ASSERT
    2.70 -  if (CheckCompressedOops) {
    2.71 -    Label ok;
    2.72 -    push(rscratch1); // cmpptr trashes rscratch1
    2.73 -    cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
    2.74 -    jcc(Assembler::equal, ok);
    2.75 -    stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
    2.76 -    bind(ok);
    2.77 -    pop(rscratch1);
    2.78 -  }
    2.79 -#endif
    2.80 -  verify_oop(r, "broken oop in encode_heap_oop");
    2.81    testq(r, r);
    2.82    cmovq(Assembler::equal, r, r12_heapbase);
    2.83    subq(r, r12_heapbase);
    2.84 @@ -8288,9 +8299,8 @@
    2.85  }
    2.86  
    2.87  void MacroAssembler::encode_heap_oop_not_null(Register r) {
    2.88 -  assert (UseCompressedOops, "should be compressed");
    2.89 -  assert (Universe::heap() != NULL, "java heap should be initialized");
    2.90  #ifdef ASSERT
    2.91 +  verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
    2.92    if (CheckCompressedOops) {
    2.93      Label ok;
    2.94      testq(r, r);
    2.95 @@ -8310,9 +8320,8 @@
    2.96  }
    2.97  
    2.98  void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
    2.99 -  assert (UseCompressedOops, "should be compressed");
   2.100 -  assert (Universe::heap() != NULL, "java heap should be initialized");
   2.101  #ifdef ASSERT
   2.102 +  verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
   2.103    if (CheckCompressedOops) {
   2.104      Label ok;
   2.105      testq(src, src);
   2.106 @@ -8335,40 +8344,21 @@
   2.107  }
   2.108  
   2.109  void  MacroAssembler::decode_heap_oop(Register r) {
   2.110 -  assert (UseCompressedOops, "should be compressed");
   2.111 -  assert (Universe::heap() != NULL, "java heap should be initialized");
   2.112 +#ifdef ASSERT
   2.113 +  verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
   2.114 +#endif
   2.115    if (Universe::narrow_oop_base() == NULL) {
   2.116      if (Universe::narrow_oop_shift() != 0) {
   2.117        assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
   2.118        shlq(r, LogMinObjAlignmentInBytes);
   2.119      }
   2.120 -    verify_oop(r, "broken oop in decode_heap_oop");
   2.121 -    return;
   2.122 -  }
   2.123 -#ifdef ASSERT
   2.124 -  if (CheckCompressedOops) {
   2.125 -    Label ok;
   2.126 -    push(rscratch1);
   2.127 -    cmpptr(r12_heapbase,
   2.128 -           ExternalAddress((address)Universe::narrow_oop_base_addr()));
   2.129 -    jcc(Assembler::equal, ok);
   2.130 -    stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
   2.131 -    bind(ok);
   2.132 -    pop(rscratch1);
   2.133 -  }
   2.134 -#endif
   2.135 -
   2.136 -  Label done;
   2.137 -  shlq(r, LogMinObjAlignmentInBytes);
   2.138 -  jccb(Assembler::equal, done);
   2.139 -  addq(r, r12_heapbase);
   2.140 -#if 0
   2.141 -   // alternate decoding probably a wash.
   2.142 -   testq(r, r);
   2.143 -   jccb(Assembler::equal, done);
   2.144 -   leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
   2.145 -#endif
   2.146 -  bind(done);
   2.147 +  } else {
   2.148 +    Label done;
   2.149 +    shlq(r, LogMinObjAlignmentInBytes);
   2.150 +    jccb(Assembler::equal, done);
   2.151 +    addq(r, r12_heapbase);
   2.152 +    bind(done);
   2.153 +  }
   2.154    verify_oop(r, "broken oop in decode_heap_oop");
   2.155  }
   2.156  
   2.157 @@ -8410,9 +8400,11 @@
   2.158          addq(dst, r12_heapbase);
   2.159        }
   2.160      }
   2.161 -  } else if (dst != src) {
   2.162 +  } else {
   2.163      assert (Universe::narrow_oop_base() == NULL, "sanity");
   2.164 -    movq(dst, src);
   2.165 +    if (dst != src) {
   2.166 +      movq(dst, src);
   2.167 +    }
   2.168    }
   2.169  }
   2.170  
     3.1 --- a/src/cpu/x86/vm/assembler_x86.hpp	Tue Aug 03 08:13:38 2010 -0400
     3.2 +++ b/src/cpu/x86/vm/assembler_x86.hpp	Mon Aug 09 17:51:56 2010 -0700
     3.3 @@ -1714,6 +1714,9 @@
     3.4  
     3.5    // if heap base register is used - reinit it with the correct value
     3.6    void reinit_heapbase();
     3.7 +
     3.8 +  DEBUG_ONLY(void verify_heapbase(const char* msg);)
     3.9 +
    3.10  #endif // _LP64
    3.11  
    3.12    // Int division/remainder for Java
     4.1 --- a/src/share/vm/asm/codeBuffer.cpp	Tue Aug 03 08:13:38 2010 -0400
     4.2 +++ b/src/share/vm/asm/codeBuffer.cpp	Mon Aug 09 17:51:56 2010 -0700
     4.3 @@ -128,7 +128,11 @@
     4.4    delete _overflow_arena;
     4.5  
     4.6  #ifdef ASSERT
     4.7 +  // Save allocation type to execute assert in ~ResourceObj()
     4.8 +  // which is called after this destructor.
     4.9 +  ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type();
    4.10    Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
    4.11 +  ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
    4.12  #endif
    4.13  }
    4.14  
     5.1 --- a/src/share/vm/asm/codeBuffer.hpp	Tue Aug 03 08:13:38 2010 -0400
     5.2 +++ b/src/share/vm/asm/codeBuffer.hpp	Mon Aug 09 17:51:56 2010 -0700
     5.3 @@ -278,7 +278,7 @@
     5.4    // special case during expansion which is handled internally.  This
     5.5    // is done to guarantee proper cleanup of resources.
     5.6    void* operator new(size_t size) { return ResourceObj::operator new(size); }
     5.7 -  void  operator delete(void* p)  {        ResourceObj::operator delete(p); }
     5.8 +  void  operator delete(void* p)  { ShouldNotCallThis(); }
     5.9  
    5.10   public:
    5.11    typedef int csize_t;  // code size type; would be size_t except for history
     6.1 --- a/src/share/vm/ci/ciField.cpp	Tue Aug 03 08:13:38 2010 -0400
     6.2 +++ b/src/share/vm/ci/ciField.cpp	Mon Aug 09 17:51:56 2010 -0700
     6.3 @@ -339,7 +339,7 @@
     6.4    if (_type != NULL) _type->print_name();
     6.5    else               tty->print("(reference)");
     6.6    tty->print(" is_constant=%s", bool_to_str(_is_constant));
     6.7 -  if (_is_constant) {
     6.8 +  if (_is_constant && is_static()) {
     6.9      tty->print(" constant_value=");
    6.10      _constant_value.print();
    6.11    }
     7.1 --- a/src/share/vm/ci/ciInstanceKlass.cpp	Tue Aug 03 08:13:38 2010 -0400
     7.2 +++ b/src/share/vm/ci/ciInstanceKlass.cpp	Mon Aug 09 17:51:56 2010 -0700
     7.3 @@ -403,8 +403,9 @@
     7.4      instanceKlass* ik = get_instanceKlass();
     7.5      int max_n_fields = ik->fields()->length()/instanceKlass::next_offset;
     7.6  
     7.7 +    Arena* arena = curEnv->arena();
     7.8      _non_static_fields =
     7.9 -      new (curEnv->arena()) GrowableArray<ciField*>(max_n_fields);
    7.10 +      new (arena) GrowableArray<ciField*>(arena, max_n_fields, 0, NULL);
    7.11      NonStaticFieldFiller filler(curEnv, _non_static_fields);
    7.12      ik->do_nonstatic_fields(&filler);
    7.13    }
     8.1 --- a/src/share/vm/ci/ciMethodBlocks.cpp	Tue Aug 03 08:13:38 2010 -0400
     8.2 +++ b/src/share/vm/ci/ciMethodBlocks.cpp	Mon Aug 09 17:51:56 2010 -0700
     8.3 @@ -252,7 +252,7 @@
     8.4                            _arena(arena), _num_blocks(0), _code_size(meth->code_size()) {
     8.5    int block_estimate = _code_size / 8;
     8.6  
     8.7 -  _blocks =  new(_arena) GrowableArray<ciBlock *>(block_estimate);
     8.8 +  _blocks =  new(_arena) GrowableArray<ciBlock *>(_arena, block_estimate, 0, NULL);
     8.9    int b2bsize = _code_size * sizeof(ciBlock **);
    8.10    _bci_to_block = (ciBlock **) arena->Amalloc(b2bsize);
    8.11    Copy::zero_to_words((HeapWord*) _bci_to_block, b2bsize / sizeof(HeapWord));
     9.1 --- a/src/share/vm/ci/ciTypeFlow.cpp	Tue Aug 03 08:13:38 2010 -0400
     9.2 +++ b/src/share/vm/ci/ciTypeFlow.cpp	Mon Aug 09 17:51:56 2010 -0700
     9.3 @@ -2591,7 +2591,7 @@
     9.4                                 StateVector* temp_vector,
     9.5                                 JsrSet* temp_set) {
     9.6    int dft_len = 100;
     9.7 -  GrowableArray<Block*> stk(arena(), dft_len, 0, NULL);
     9.8 +  GrowableArray<Block*> stk(dft_len);
     9.9  
    9.10    ciBlock* dummy = _methodBlocks->make_dummy_block();
    9.11    JsrSet* root_set = new JsrSet(NULL, 0);
    10.1 --- a/src/share/vm/classfile/classFileParser.cpp	Tue Aug 03 08:13:38 2010 -0400
    10.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Mon Aug 09 17:51:56 2010 -0700
    10.3 @@ -62,6 +62,7 @@
    10.4    ClassFileStream cfs1 = *cfs0;
    10.5    ClassFileStream* cfs = &cfs1;
    10.6  #ifdef ASSERT
    10.7 +  assert(cfs->allocated_on_stack(),"should be local");
    10.8    u1* old_current = cfs0->current();
    10.9  #endif
   10.10  
    11.1 --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Tue Aug 03 08:13:38 2010 -0400
    11.2 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.cpp	Mon Aug 09 17:51:56 2010 -0700
    11.3 @@ -158,13 +158,18 @@
    11.4    // The line below is the worst bit of C++ hackery I've ever written
    11.5    // (Detlefs, 11/23).  You should think of it as equivalent to
    11.6    // "_regions(100, true)": initialize the growable array and inform it
    11.7 -  // that it should allocate its elem array(s) on the C heap.  The first
    11.8 -  // argument, however, is actually a comma expression (new-expr, 100).
    11.9 -  // The purpose of the new_expr is to inform the growable array that it
   11.10 -  // is *already* allocated on the C heap: it uses the placement syntax to
   11.11 -  // keep it from actually doing any allocation.
   11.12 -  _markedRegions((ResourceObj::operator new (sizeof(GrowableArray<HeapRegion*>),
   11.13 -                                             (void*)&_markedRegions,
   11.14 +  // that it should allocate its elem array(s) on the C heap.
   11.15 +  //
   11.16 +  // The first argument, however, is actually a comma expression
   11.17 +  // (set_allocation_type(this, C_HEAP), 100). The purpose of the
   11.18 +  // set_allocation_type() call is to replace the default allocation
   11.19 +  // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
   11.20 +  // allow to pass the assert in GenericGrowableArray() which checks
   11.21 +  // that a growable array object must be on C heap if elements are.
   11.22 +  //
   11.23 +  // Note: containing object is allocated on C heap since it is CHeapObj.
   11.24 +  //
   11.25 +  _markedRegions((ResourceObj::set_allocation_type((address)&_markedRegions,
   11.26                                               ResourceObj::C_HEAP),
   11.27                    100),
   11.28                   true),
    12.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Aug 03 08:13:38 2010 -0400
    12.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Mon Aug 09 17:51:56 2010 -0700
    12.3 @@ -42,14 +42,19 @@
    12.4    // The line below is the worst bit of C++ hackery I've ever written
    12.5    // (Detlefs, 11/23).  You should think of it as equivalent to
    12.6    // "_regions(100, true)": initialize the growable array and inform it
    12.7 -  // that it should allocate its elem array(s) on the C heap.  The first
    12.8 -  // argument, however, is actually a comma expression (new-expr, 100).
    12.9 -  // The purpose of the new_expr is to inform the growable array that it
   12.10 -  // is *already* allocated on the C heap: it uses the placement syntax to
   12.11 -  // keep it from actually doing any allocation.
   12.12 -  _regions((ResourceObj::operator new (sizeof(GrowableArray<HeapRegion*>),
   12.13 -                                       (void*)&_regions,
   12.14 -                                       ResourceObj::C_HEAP),
   12.15 +  // that it should allocate its elem array(s) on the C heap.
   12.16 +  //
   12.17 +  // The first argument, however, is actually a comma expression
   12.18 +  // (set_allocation_type(this, C_HEAP), 100). The purpose of the
   12.19 +  // set_allocation_type() call is to replace the default allocation
   12.20 +  // type for embedded objects STACK_OR_EMBEDDED with C_HEAP. It will
   12.21 +  // allow to pass the assert in GenericGrowableArray() which checks
   12.22 +  // that a growable array object must be on C heap if elements are.
   12.23 +  //
   12.24 +  // Note: containing object is allocated on C heap since it is CHeapObj.
   12.25 +  //
   12.26 +  _regions((ResourceObj::set_allocation_type((address)&_regions,
   12.27 +                                             ResourceObj::C_HEAP),
   12.28              (int)max_size),
   12.29             true),
   12.30    _next_rr_candidate(0),
    13.1 --- a/src/share/vm/memory/allocation.cpp	Tue Aug 03 08:13:38 2010 -0400
    13.2 +++ b/src/share/vm/memory/allocation.cpp	Mon Aug 09 17:51:56 2010 -0700
    13.3 @@ -43,24 +43,73 @@
    13.4    switch (type) {
    13.5     case C_HEAP:
    13.6      res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
    13.7 +    DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
    13.8      break;
    13.9     case RESOURCE_AREA:
   13.10 +    // new(size) sets allocation type RESOURCE_AREA.
   13.11      res = (address)operator new(size);
   13.12      break;
   13.13     default:
   13.14      ShouldNotReachHere();
   13.15    }
   13.16 -  // Set allocation type in the resource object for assertion checks.
   13.17 -  DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
   13.18    return res;
   13.19  }
   13.20  
   13.21  void ResourceObj::operator delete(void* p) {
   13.22    assert(((ResourceObj *)p)->allocated_on_C_heap(),
   13.23           "delete only allowed for C_HEAP objects");
   13.24 +  DEBUG_ONLY(((ResourceObj *)p)->_allocation = badHeapOopVal;)
   13.25    FreeHeap(p);
   13.26  }
   13.27  
   13.28 +#ifdef ASSERT
   13.29 +void ResourceObj::set_allocation_type(address res, allocation_type type) {
   13.30 +    // Set allocation type in the resource object
   13.31 +    uintptr_t allocation = (uintptr_t)res;
   13.32 +    assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
   13.33 +    assert(type <= allocation_mask, "incorrect allocation type");
   13.34 +    ((ResourceObj *)res)->_allocation = ~(allocation + type);
   13.35 +}
   13.36 +
   13.37 +ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
   13.38 +    assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object");
   13.39 +    return (allocation_type)((~_allocation) & allocation_mask);
   13.40 +}
   13.41 +
   13.42 +ResourceObj::ResourceObj() { // default constructor
   13.43 +    if (~(_allocation | allocation_mask) != (uintptr_t)this) {
   13.44 +      set_allocation_type((address)this, STACK_OR_EMBEDDED);
   13.45 +    } else if (allocated_on_stack()) {
   13.46 +      // For some reason we got a value which looks like an allocation on stack.
   13.47 +      // Pass if it is really allocated on stack.
   13.48 +      assert(Thread::current()->on_local_stack((address)this),"should be on stack");
   13.49 +    } else {
   13.50 +      assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(),
   13.51 +             "allocation_type should be set by operator new()");
   13.52 +    }
   13.53 +}
   13.54 +
   13.55 +ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
   13.56 +    // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
   13.57 +    set_allocation_type((address)this, STACK_OR_EMBEDDED);
   13.58 +}
   13.59 +
   13.60 +ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
   13.61 +    // Used in InlineTree::ok_to_inline() for WarmCallInfo.
   13.62 +    assert(allocated_on_stack(), "copy only into local");
   13.63 +    // Keep current _allocation value;
   13.64 +    return *this;
   13.65 +}
   13.66 +
   13.67 +ResourceObj::~ResourceObj() {
   13.68 +    // allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
   13.69 +    if (!allocated_on_C_heap()) {  // ResourceObj::delete() zaps _allocation for C_heap.
   13.70 +      _allocation = badHeapOopVal; // zap type
   13.71 +    }
   13.72 +}
   13.73 +#endif // ASSERT
   13.74 +
   13.75 +
   13.76  void trace_heap_malloc(size_t size, const char* name, void* p) {
   13.77    // A lock is not needed here - tty uses a lock internally
   13.78    tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name);
    14.1 --- a/src/share/vm/memory/allocation.hpp	Tue Aug 03 08:13:38 2010 -0400
    14.2 +++ b/src/share/vm/memory/allocation.hpp	Mon Aug 09 17:51:56 2010 -0700
    14.3 @@ -317,32 +317,36 @@
    14.4  // use delete to deallocate.
    14.5  class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
    14.6   public:
    14.7 -  enum allocation_type { UNKNOWN = 0, C_HEAP, RESOURCE_AREA, ARENA };
    14.8 +  enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
    14.9 +  static void set_allocation_type(address res, allocation_type type) NOT_DEBUG_RETURN;
   14.10  #ifdef ASSERT
   14.11   private:
   14.12 -  allocation_type _allocation;
   14.13 +  // When this object is allocated on stack the new() operator is not
   14.14 +  // called but garbage on stack may look like a valid allocation_type.
   14.15 +  // Store negated 'this' pointer when new() is called to distinguish cases.
   14.16 +  uintptr_t _allocation;
   14.17   public:
   14.18 -  bool allocated_on_C_heap()    { return _allocation == C_HEAP; }
   14.19 +  allocation_type get_allocation_type() const;
   14.20 +  bool allocated_on_stack()    const { return get_allocation_type() == STACK_OR_EMBEDDED; }
   14.21 +  bool allocated_on_res_area() const { return get_allocation_type() == RESOURCE_AREA; }
   14.22 +  bool allocated_on_C_heap()   const { return get_allocation_type() == C_HEAP; }
   14.23 +  bool allocated_on_arena()    const { return get_allocation_type() == ARENA; }
   14.24 +  ResourceObj(); // default construtor
   14.25 +  ResourceObj(const ResourceObj& r); // default copy construtor
   14.26 +  ResourceObj& operator=(const ResourceObj& r); // default copy assignment
   14.27 +  ~ResourceObj();
   14.28  #endif // ASSERT
   14.29  
   14.30   public:
   14.31    void* operator new(size_t size, allocation_type type);
   14.32    void* operator new(size_t size, Arena *arena) {
   14.33        address res = (address)arena->Amalloc(size);
   14.34 -      // Set allocation type in the resource object
   14.35 -      DEBUG_ONLY(((ResourceObj *)res)->_allocation = ARENA;)
   14.36 +      DEBUG_ONLY(set_allocation_type(res, ARENA);)
   14.37        return res;
   14.38    }
   14.39    void* operator new(size_t size) {
   14.40        address res = (address)resource_allocate_bytes(size);
   14.41 -      // Set allocation type in the resource object
   14.42 -      DEBUG_ONLY(((ResourceObj *)res)->_allocation = RESOURCE_AREA;)
   14.43 -      return res;
   14.44 -  }
   14.45 -  void* operator new(size_t size, void* where, allocation_type type) {
   14.46 -      void* res = where;
   14.47 -      // Set allocation type in the resource object
   14.48 -      DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
   14.49 +      DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
   14.50        return res;
   14.51    }
   14.52    void  operator delete(void* p);
    15.1 --- a/src/share/vm/opto/block.cpp	Tue Aug 03 08:13:38 2010 -0400
    15.2 +++ b/src/share/vm/opto/block.cpp	Mon Aug 09 17:51:56 2010 -0700
    15.3 @@ -353,7 +353,8 @@
    15.4  PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
    15.5    Phase(CFG),
    15.6    _bbs(a),
    15.7 -  _root(r)
    15.8 +  _root(r),
    15.9 +  _node_latency(NULL)
   15.10  #ifndef PRODUCT
   15.11    , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
   15.12  #endif
    16.1 --- a/src/share/vm/opto/block.hpp	Tue Aug 03 08:13:38 2010 -0400
    16.2 +++ b/src/share/vm/opto/block.hpp	Mon Aug 09 17:51:56 2010 -0700
    16.3 @@ -374,7 +374,7 @@
    16.4    float _outer_loop_freq;       // Outmost loop frequency
    16.5  
    16.6    // Per node latency estimation, valid only during GCM
    16.7 -  GrowableArray<uint> _node_latency;
    16.8 +  GrowableArray<uint> *_node_latency;
    16.9  
   16.10  #ifndef PRODUCT
   16.11    bool _trace_opto_pipelining;  // tracing flag
    17.1 --- a/src/share/vm/opto/c2_globals.hpp	Tue Aug 03 08:13:38 2010 -0400
    17.2 +++ b/src/share/vm/opto/c2_globals.hpp	Mon Aug 09 17:51:56 2010 -0700
    17.3 @@ -281,6 +281,12 @@
    17.4    product(bool, InsertMemBarAfterArraycopy, true,                           \
    17.5            "Insert memory barrier after arraycopy call")                     \
    17.6                                                                              \
    17.7 +  develop(bool, SubsumeLoads, true,                                         \
    17.8 +          "Attempt to compile while subsuming loads into machine instructions.") \
    17.9 +                                                                            \
   17.10 +  develop(bool, StressRecompilation, false,                                 \
   17.11 +          "Recompile each compiled method without subsuming loads or escape analysis.") \
   17.12 +                                                                            \
   17.13    /* controls for tier 1 compilations */                                    \
   17.14                                                                              \
   17.15    develop(bool, Tier1CountInvocations, true,                                \
    18.1 --- a/src/share/vm/opto/c2compiler.cpp	Tue Aug 03 08:13:38 2010 -0400
    18.2 +++ b/src/share/vm/opto/c2compiler.cpp	Mon Aug 09 17:51:56 2010 -0700
    18.3 @@ -103,13 +103,14 @@
    18.4    if (!is_initialized()) {
    18.5      initialize();
    18.6    }
    18.7 -  bool subsume_loads = true;
    18.8 +  bool subsume_loads = SubsumeLoads;
    18.9    bool do_escape_analysis = DoEscapeAnalysis &&
   18.10      !env->jvmti_can_access_local_variables();
   18.11    while (!env->failing()) {
   18.12      // Attempt to compile while subsuming loads into machine instructions.
   18.13      Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
   18.14  
   18.15 +
   18.16      // Check result and retry if appropriate.
   18.17      if (C.failure_reason() != NULL) {
   18.18        if (C.failure_reason_is(retry_no_subsuming_loads())) {
   18.19 @@ -127,6 +128,16 @@
   18.20        // on the ciEnv via env->record_method_not_compilable().
   18.21        env->record_failure(C.failure_reason());
   18.22      }
   18.23 +    if (StressRecompilation) {
   18.24 +      if (subsume_loads) {
   18.25 +        subsume_loads = false;
   18.26 +        continue;  // retry
   18.27 +      }
   18.28 +      if (do_escape_analysis) {
   18.29 +        do_escape_analysis = false;
   18.30 +        continue;  // retry
   18.31 +      }
   18.32 +    }
   18.33  
   18.34      // No retry; just break the loop.
   18.35      break;
    19.1 --- a/src/share/vm/opto/chaitin.cpp	Tue Aug 03 08:13:38 2010 -0400
    19.2 +++ b/src/share/vm/opto/chaitin.cpp	Mon Aug 09 17:51:56 2010 -0700
    19.3 @@ -569,7 +569,7 @@
    19.4          if (trace_spilling() && lrg._def != NULL) {
    19.5            // collect defs for MultiDef printing
    19.6            if (lrg._defs == NULL) {
    19.7 -            lrg._defs = new (_ifg->_arena) GrowableArray<Node*>();
    19.8 +            lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
    19.9              lrg._defs->append(lrg._def);
   19.10            }
   19.11            lrg._defs->append(n);
    20.1 --- a/src/share/vm/opto/compile.cpp	Tue Aug 03 08:13:38 2010 -0400
    20.2 +++ b/src/share/vm/opto/compile.cpp	Mon Aug 09 17:51:56 2010 -0700
    20.3 @@ -904,8 +904,8 @@
    20.4    probe_alias_cache(NULL)->_index = AliasIdxTop;
    20.5  
    20.6    _intrinsics = NULL;
    20.7 -  _macro_nodes = new GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
    20.8 -  _predicate_opaqs = new GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
    20.9 +  _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   20.10 +  _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8,  0, NULL);
   20.11    register_library_intrinsics();
   20.12  }
   20.13  
    21.1 --- a/src/share/vm/opto/gcm.cpp	Tue Aug 03 08:13:38 2010 -0400
    21.2 +++ b/src/share/vm/opto/gcm.cpp	Mon Aug 09 17:51:56 2010 -0700
    21.3 @@ -841,7 +841,7 @@
    21.4  #ifndef PRODUCT
    21.5    if (trace_opto_pipelining()) {
    21.6      tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
    21.7 -               n->_idx, _node_latency.at_grow(n->_idx));
    21.8 +               n->_idx, _node_latency->at_grow(n->_idx));
    21.9      dump();
   21.10    }
   21.11  #endif
   21.12 @@ -853,7 +853,7 @@
   21.13      return;
   21.14  
   21.15    uint nlen = n->len();
   21.16 -  uint use_latency = _node_latency.at_grow(n->_idx);
   21.17 +  uint use_latency = _node_latency->at_grow(n->_idx);
   21.18    uint use_pre_order = _bbs[n->_idx]->_pre_order;
   21.19  
   21.20    for ( uint j=0; j<nlen; j++ ) {
   21.21 @@ -884,15 +884,15 @@
   21.22      uint delta_latency = n->latency(j);
   21.23      uint current_latency = delta_latency + use_latency;
   21.24  
   21.25 -    if (_node_latency.at_grow(def->_idx) < current_latency) {
   21.26 -      _node_latency.at_put_grow(def->_idx, current_latency);
   21.27 +    if (_node_latency->at_grow(def->_idx) < current_latency) {
   21.28 +      _node_latency->at_put_grow(def->_idx, current_latency);
   21.29      }
   21.30  
   21.31  #ifndef PRODUCT
   21.32      if (trace_opto_pipelining()) {
   21.33        tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
   21.34                      use_latency, j, delta_latency, current_latency, def->_idx,
   21.35 -                    _node_latency.at_grow(def->_idx));
   21.36 +                    _node_latency->at_grow(def->_idx));
   21.37      }
   21.38  #endif
   21.39    }
   21.40 @@ -926,7 +926,7 @@
   21.41        return 0;
   21.42  
   21.43      uint nlen = use->len();
   21.44 -    uint nl = _node_latency.at_grow(use->_idx);
   21.45 +    uint nl = _node_latency->at_grow(use->_idx);
   21.46  
   21.47      for ( uint j=0; j<nlen; j++ ) {
   21.48        if (use->in(j) == n) {
   21.49 @@ -962,7 +962,7 @@
   21.50  #ifndef PRODUCT
   21.51    if (trace_opto_pipelining()) {
   21.52      tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
   21.53 -               n->_idx, _node_latency.at_grow(n->_idx));
   21.54 +               n->_idx, _node_latency->at_grow(n->_idx));
   21.55      dump();
   21.56    }
   21.57  #endif
   21.58 @@ -975,7 +975,7 @@
   21.59      if (latency < l) latency = l;
   21.60    }
   21.61  
   21.62 -  _node_latency.at_put_grow(n->_idx, latency);
   21.63 +  _node_latency->at_put_grow(n->_idx, latency);
   21.64  }
   21.65  
   21.66  //------------------------------hoist_to_cheaper_block-------------------------
   21.67 @@ -985,9 +985,9 @@
   21.68    const double delta = 1+PROB_UNLIKELY_MAG(4);
   21.69    Block* least       = LCA;
   21.70    double least_freq  = least->_freq;
   21.71 -  uint target        = _node_latency.at_grow(self->_idx);
   21.72 -  uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx);
   21.73 -  uint end_latency   = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
   21.74 +  uint target        = _node_latency->at_grow(self->_idx);
   21.75 +  uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
   21.76 +  uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
   21.77    bool in_latency    = (target <= start_latency);
   21.78    const Block* root_block = _bbs[_root->_idx];
   21.79  
   21.80 @@ -1005,7 +1005,7 @@
   21.81  #ifndef PRODUCT
   21.82    if (trace_opto_pipelining()) {
   21.83      tty->print("# Find cheaper block for latency %d: ",
   21.84 -      _node_latency.at_grow(self->_idx));
   21.85 +      _node_latency->at_grow(self->_idx));
   21.86      self->dump();
   21.87      tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
   21.88        LCA->_pre_order,
   21.89 @@ -1032,9 +1032,9 @@
   21.90      if (mach && LCA == root_block)
   21.91        break;
   21.92  
   21.93 -    uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx);
   21.94 +    uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx);
   21.95      uint end_idx   = LCA->end_idx();
   21.96 -    uint end_lat   = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx);
   21.97 +    uint end_lat   = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx);
   21.98      double LCA_freq = LCA->_freq;
   21.99  #ifndef PRODUCT
  21.100      if (trace_opto_pipelining()) {
  21.101 @@ -1073,7 +1073,7 @@
  21.102        tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
  21.103      }
  21.104  #endif
  21.105 -    _node_latency.at_put_grow(self->_idx, end_latency);
  21.106 +    _node_latency->at_put_grow(self->_idx, end_latency);
  21.107      partial_latency_of_defs(self);
  21.108    }
  21.109  
  21.110 @@ -1255,8 +1255,7 @@
  21.111  
  21.112    // Compute the latency information (via backwards walk) for all the
  21.113    // instructions in the graph
  21.114 -  GrowableArray<uint> node_latency;
  21.115 -  _node_latency = node_latency;
  21.116 +  _node_latency = new GrowableArray<uint>(); // resource_area allocation
  21.117  
  21.118    if( C->do_scheduling() )
  21.119      ComputeLatenciesBackwards(visited, stack);
  21.120 @@ -1341,6 +1340,8 @@
  21.121      }
  21.122    }
  21.123  #endif
  21.124 +  // Dead.
  21.125 +  _node_latency = (GrowableArray<uint> *)0xdeadbeef;
  21.126  }
  21.127  
  21.128  
    22.1 --- a/src/share/vm/opto/lcm.cpp	Tue Aug 03 08:13:38 2010 -0400
    22.2 +++ b/src/share/vm/opto/lcm.cpp	Mon Aug 09 17:51:56 2010 -0700
    22.3 @@ -461,7 +461,7 @@
    22.4        n_choice = 1;
    22.5      }
    22.6  
    22.7 -    uint n_latency = cfg->_node_latency.at_grow(n->_idx);
    22.8 +    uint n_latency = cfg->_node_latency->at_grow(n->_idx);
    22.9      uint n_score   = n->req();   // Many inputs get high score to break ties
   22.10  
   22.11      // Keep best latency found
   22.12 @@ -738,7 +738,7 @@
   22.13          Node     *n = _nodes[j];
   22.14          int     idx = n->_idx;
   22.15          tty->print("#   ready cnt:%3d  ", ready_cnt[idx]);
   22.16 -        tty->print("latency:%3d  ", cfg->_node_latency.at_grow(idx));
   22.17 +        tty->print("latency:%3d  ", cfg->_node_latency->at_grow(idx));
   22.18          tty->print("%4d: %s\n", idx, n->Name());
   22.19        }
   22.20      }
   22.21 @@ -765,7 +765,7 @@
   22.22  #ifndef PRODUCT
   22.23      if (cfg->trace_opto_pipelining()) {
   22.24        tty->print("#    select %d: %s", n->_idx, n->Name());
   22.25 -      tty->print(", latency:%d", cfg->_node_latency.at_grow(n->_idx));
   22.26 +      tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx));
   22.27        n->dump();
   22.28        if (Verbose) {
   22.29          tty->print("#   ready list:");
    23.1 --- a/src/share/vm/opto/macro.cpp	Tue Aug 03 08:13:38 2010 -0400
    23.2 +++ b/src/share/vm/opto/macro.cpp	Mon Aug 09 17:51:56 2010 -0700
    23.3 @@ -720,7 +720,7 @@
    23.4        if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) {
    23.5          if (!elem_type->is_loaded()) {
    23.6            field_type = TypeInstPtr::BOTTOM;
    23.7 -        } else if (field != NULL && field->is_constant()) {
    23.8 +        } else if (field != NULL && field->is_constant() && field->is_static()) {
    23.9            // This can happen if the constant oop is non-perm.
   23.10            ciObject* con = field->constant_value().as_object();
   23.11            // Do not "join" in the previous type; it doesn't add value,
    24.1 --- a/src/share/vm/opto/output.cpp	Tue Aug 03 08:13:38 2010 -0400
    24.2 +++ b/src/share/vm/opto/output.cpp	Mon Aug 09 17:51:56 2010 -0700
    24.3 @@ -382,6 +382,10 @@
    24.4            if (min_offset_from_last_call == 0) {
    24.5              blk_size += nop_size;
    24.6            }
    24.7 +        } else if (mach->ideal_Opcode() == Op_Jump) {
    24.8 +          const_size += b->_num_succs; // Address table size
    24.9 +          // The size is valid even for 64 bit since it is
   24.10 +          // multiplied by 2*jintSize on this method exit.
   24.11          }
   24.12        }
   24.13        min_offset_from_last_call += inst_size;
    25.1 --- a/src/share/vm/runtime/globals.hpp	Tue Aug 03 08:13:38 2010 -0400
    25.2 +++ b/src/share/vm/runtime/globals.hpp	Mon Aug 09 17:51:56 2010 -0700
    25.3 @@ -2442,6 +2442,10 @@
    25.4            "Call fatal if this exception is thrown.  Example: "              \
    25.5            "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
    25.6                                                                              \
    25.7 +  notproduct(ccstr, AbortVMOnExceptionMessage, NULL,                        \
    25.8 +          "Call fatal if the exception pointed by AbortVMOnException "      \
    25.9 +          "has this message.")                                              \
   25.10 +                                                                            \
   25.11    develop(bool, DebugVtables, false,                                        \
   25.12            "add debugging code to vtable dispatch")                          \
   25.13                                                                              \
    26.1 --- a/src/share/vm/runtime/thread.cpp	Tue Aug 03 08:13:38 2010 -0400
    26.2 +++ b/src/share/vm/runtime/thread.cpp	Mon Aug 09 17:51:56 2010 -0700
    26.3 @@ -807,7 +807,7 @@
    26.4  // should be revisited, and they should be removed if possible.
    26.5  
    26.6  bool Thread::is_lock_owned(address adr) const {
    26.7 -  return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
    26.8 +  return on_local_stack(adr);
    26.9  }
   26.10  
   26.11  bool Thread::set_as_starting_thread() {
    27.1 --- a/src/share/vm/runtime/thread.hpp	Tue Aug 03 08:13:38 2010 -0400
    27.2 +++ b/src/share/vm/runtime/thread.hpp	Mon Aug 09 17:51:56 2010 -0700
    27.3 @@ -446,6 +446,11 @@
    27.4    void    set_stack_size(size_t size)  { _stack_size = size; }
    27.5    void    record_stack_base_and_size();
    27.6  
    27.7 +  bool    on_local_stack(address adr) const {
    27.8 +    /* QQQ this has knowledge of direction, ought to be a stack method */
    27.9 +    return (_stack_base >= adr && adr >= (_stack_base - _stack_size));
   27.10 +  }
   27.11 +
   27.12    int     lgrp_id() const                 { return _lgrp_id; }
   27.13    void    set_lgrp_id(int value)          { _lgrp_id = value; }
   27.14  
    28.1 --- a/src/share/vm/utilities/exceptions.cpp	Tue Aug 03 08:13:38 2010 -0400
    28.2 +++ b/src/share/vm/utilities/exceptions.cpp	Mon Aug 09 17:51:56 2010 -0700
    28.3 @@ -117,7 +117,7 @@
    28.4                    (address)h_exception(), file, line, thread);
    28.5    }
    28.6    // for AbortVMOnException flag
    28.7 -  NOT_PRODUCT(Exceptions::debug_check_abort(h_exception));
    28.8 +  NOT_PRODUCT(Exceptions::debug_check_abort(h_exception, message));
    28.9  
   28.10    // Check for special boot-strapping/vm-thread handling
   28.11    if (special_exception(thread, file, line, h_exception)) return;
   28.12 @@ -375,17 +375,26 @@
   28.13  
   28.14  #ifndef PRODUCT
   28.15  // caller frees value_string if necessary
   28.16 -void Exceptions::debug_check_abort(const char *value_string) {
   28.17 +void Exceptions::debug_check_abort(const char *value_string, const char* message) {
   28.18    if (AbortVMOnException != NULL && value_string != NULL &&
   28.19        strstr(value_string, AbortVMOnException)) {
   28.20 -    fatal(err_msg("Saw %s, aborting", value_string));
   28.21 +    if (AbortVMOnExceptionMessage == NULL || message == NULL ||
   28.22 +        strcmp(message, AbortVMOnExceptionMessage) == 0) {
   28.23 +      fatal(err_msg("Saw %s, aborting", value_string));
   28.24 +    }
   28.25    }
   28.26  }
   28.27  
   28.28 -void Exceptions::debug_check_abort(Handle exception) {
   28.29 +void Exceptions::debug_check_abort(Handle exception, const char* message) {
   28.30    if (AbortVMOnException != NULL) {
   28.31      ResourceMark rm;
   28.32 -    debug_check_abort(instanceKlass::cast(exception()->klass())->external_name());
   28.33 +    if (message == NULL && exception->is_a(SystemDictionary::Throwable_klass())) {
   28.34 +      oop msg = java_lang_Throwable::message(exception);
   28.35 +      if (msg != NULL) {
   28.36 +        message = java_lang_String::as_utf8_string(msg);
   28.37 +      }
   28.38 +    }
   28.39 +    debug_check_abort(instanceKlass::cast(exception()->klass())->external_name(), message);
   28.40    }
   28.41  }
   28.42  #endif
    29.1 --- a/src/share/vm/utilities/exceptions.hpp	Tue Aug 03 08:13:38 2010 -0400
    29.2 +++ b/src/share/vm/utilities/exceptions.hpp	Mon Aug 09 17:51:56 2010 -0700
    29.3 @@ -143,8 +143,8 @@
    29.4    static void throw_stack_overflow_exception(Thread* thread, const char* file, int line);
    29.5  
    29.6    // for AbortVMOnException flag
    29.7 -  NOT_PRODUCT(static void debug_check_abort(Handle exception);)
    29.8 -  NOT_PRODUCT(static void debug_check_abort(const char *value_string);)
    29.9 +  NOT_PRODUCT(static void debug_check_abort(Handle exception, const char* message = NULL);)
   29.10 +  NOT_PRODUCT(static void debug_check_abort(const char *value_string, const char* message = NULL);)
   29.11  };
   29.12  
   29.13  
    30.1 --- a/src/share/vm/utilities/growableArray.hpp	Tue Aug 03 08:13:38 2010 -0400
    30.2 +++ b/src/share/vm/utilities/growableArray.hpp	Mon Aug 09 17:51:56 2010 -0700
    30.3 @@ -97,7 +97,10 @@
    30.4      assert(_len >= 0 && _len <= _max, "initial_len too big");
    30.5      _arena = (c_heap ? (Arena*)1 : NULL);
    30.6      set_nesting();
    30.7 -    assert(!c_heap || allocated_on_C_heap(), "growable array must be on C heap if elements are");
    30.8 +    assert(!on_C_heap() || allocated_on_C_heap(), "growable array must be on C heap if elements are");
    30.9 +    assert(!on_stack() ||
   30.10 +           (allocated_on_res_area() || allocated_on_stack()),
   30.11 +           "growable array must be on stack if elements are not on arena and not on C heap");
   30.12    }
   30.13  
   30.14    // This GA will use the given arena for storage.
   30.15 @@ -108,6 +111,10 @@
   30.16      assert(_len >= 0 && _len <= _max, "initial_len too big");
   30.17      _arena = arena;
   30.18      assert(on_arena(), "arena has taken on reserved value 0 or 1");
   30.19 +    // Relax next assert to allow object allocation on resource area,
   30.20 +    // on stack or embedded into an other object.
   30.21 +    assert(allocated_on_arena() || allocated_on_stack(),
   30.22 +           "growable array must be on arena or on stack if elements are on arena");
   30.23    }
   30.24  
   30.25    void* raw_allocate(int elementSize);
    31.1 --- a/src/share/vm/utilities/vmError.cpp	Tue Aug 03 08:13:38 2010 -0400
    31.2 +++ b/src/share/vm/utilities/vmError.cpp	Mon Aug 09 17:51:56 2010 -0700
    31.3 @@ -479,8 +479,8 @@
    31.4  
    31.5         if (fr.sp()) {
    31.6           st->print(",  sp=" PTR_FORMAT, fr.sp());
    31.7 -         st->print(",  free space=%" INTPTR_FORMAT "k",
    31.8 -                     ((intptr_t)fr.sp() - (intptr_t)stack_bottom) >> 10);
    31.9 +         size_t free_stack_size = pointer_delta(fr.sp(), stack_bottom, 1024);
   31.10 +         st->print(",  free space=" SIZE_FORMAT "k", free_stack_size);
   31.11         }
   31.12  
   31.13         st->cr();

mercurial