Merge

Sat, 14 Sep 2013 20:40:34 +0100

author
chegar
date
Sat, 14 Sep 2013 20:40:34 +0100
changeset 6240
9b4ce069642e
parent 6239
2a907fd129cb
parent 5653
9cd0183fe325
child 6241
6fa574bfd32a

Merge

src/share/vm/classfile/classFileParser.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/genericSignatures.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/genericSignatures.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/os.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Fri Sep 06 09:55:38 2013 +0100
     1.2 +++ b/.hgtags	Sat Sep 14 20:40:34 2013 +0100
     1.3 @@ -374,3 +374,5 @@
     1.4  acac3bde66b2c22791c257a8d99611d6d08c6713 jdk8-b105
     1.5  18b4798adbc42c6fa16f5ecb7d5cd3ca130754bf hs25-b48
     1.6  aed585cafc0d9655726af6d1e1081d1c94cb3b5c jdk8-b106
     1.7 +50794d8ac11c9579b41dec4de23b808fef9f34a1 hs25-b49
     1.8 +5b7f90aab3ad25a25b75b7b2bb18d5ae23d8231c jdk8-b107
     2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Fri Sep 06 09:55:38 2013 +0100
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java	Sat Sep 14 20:40:34 2013 +0100
     2.3 @@ -354,9 +354,16 @@
     2.4    public boolean   getIsMarkedDependent()   { return                isMarkedDependent.getValue(this) != 0; }
     2.5    public long      getVtableLen()           { return                vtableLen.getValue(this); }
     2.6    public long      getItableLen()           { return                itableLen.getValue(this); }
     2.7 -  public Symbol    getGenericSignature()    { return                getConstants().getSymbolAt(genericSignatureIndex.getValue(this)); }
     2.8    public long      majorVersion()           { return                majorVersion.getValue(this); }
     2.9    public long      minorVersion()           { return                minorVersion.getValue(this); }
    2.10 +  public Symbol    getGenericSignature()    {
    2.11 +    long index = genericSignatureIndex.getValue(this);
    2.12 +    if (index != 0) {
    2.13 +      return getConstants().getSymbolAt(index);
    2.14 +    } else {
    2.15 +      return null;
    2.16 +    }
    2.17 +  }
    2.18  
    2.19    // "size helper" == instance size in words
    2.20    public long getSizeHelper() {
     3.1 --- a/make/bsd/makefiles/gcc.make	Fri Sep 06 09:55:38 2013 +0100
     3.2 +++ b/make/bsd/makefiles/gcc.make	Sat Sep 14 20:40:34 2013 +0100
     3.3 @@ -129,16 +129,21 @@
     3.4    
     3.5      # We only use precompiled headers for the JVM build
     3.6      CFLAGS += $(VM_PCH_FLAG)
     3.7 -  
     3.8 -    # There are some files which don't like precompiled headers
     3.9 -    # The following files are build with 'OPT_CFLAGS/NOOPT' (-O0) in the opt build.
    3.10 -    # But Clang doesn't support a precompiled header which was compiled with -O3
    3.11 -    # to be used in a compilation unit which uses '-O0'. We could also prepare an
    3.12 -    # extra '-O0' PCH file for the opt build and use it here, but it's probably
    3.13 -    # not worth the effort as long as only two files need this special handling.
    3.14 + 
    3.15 +    # The following files are compiled at various optimization
    3.16 +    # levels due to optimization issues encountered at the
    3.17 +    # 'OPT_CFLAGS_DEFAULT' level. The Clang compiler issues a compile
    3.18 +    # time error if there is an optimization level specification
    3.19 +    # skew between the PCH file and the C++ file.  Especially if the
    3.20 +    # PCH file is compiled at a higher optimization level than
    3.21 +    # the C++ file.  One solution might be to prepare extra optimization
    3.22 +    # level specific PCH files for the opt build and use them here, but
    3.23 +    # it's probably not worth the effort as long as only a few files
    3.24 +    # need this special handling.
    3.25      PCH_FLAG/loopTransform.o = $(PCH_FLAG/NO_PCH)
    3.26      PCH_FLAG/sharedRuntimeTrig.o = $(PCH_FLAG/NO_PCH)
    3.27      PCH_FLAG/sharedRuntimeTrans.o = $(PCH_FLAG/NO_PCH)
    3.28 +    PCH_FLAG/unsafe.o = $(PCH_FLAG/NO_PCH)
    3.29    
    3.30    endif
    3.31  else # ($(USE_CLANG), true)
    3.32 @@ -306,6 +311,7 @@
    3.33  ifeq ($(USE_CLANG), true)
    3.34    ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1)
    3.35      OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT)
    3.36 +    OPT_CFLAGS/unsafe.o += -O1
    3.37    endif
    3.38  else
    3.39    # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation.
     4.1 --- a/make/hotspot_version	Fri Sep 06 09:55:38 2013 +0100
     4.2 +++ b/make/hotspot_version	Sat Sep 14 20:40:34 2013 +0100
     4.3 @@ -35,7 +35,7 @@
     4.4  
     4.5  HS_MAJOR_VER=25
     4.6  HS_MINOR_VER=0
     4.7 -HS_BUILD_NUMBER=48
     4.8 +HS_BUILD_NUMBER=49
     4.9  
    4.10  JDK_MAJOR_VER=1
    4.11  JDK_MINOR_VER=8
     5.1 --- a/make/windows/create.bat	Fri Sep 06 09:55:38 2013 +0100
     5.2 +++ b/make/windows/create.bat	Sat Sep 14 20:40:34 2013 +0100
     5.3 @@ -82,6 +82,7 @@
     5.4  
     5.5  echo **************************************************************
     5.6  set ProjectFile=%HotSpotBuildSpace%\jvm.vcproj
     5.7 +echo MSC_VER = "%MSC_VER%" 
     5.8  if "%MSC_VER%" == "1200" (
     5.9  set ProjectFile=%HotSpotBuildSpace%\jvm.dsp
    5.10  echo Will generate VC6 project {unsupported}
    5.11 @@ -96,11 +97,17 @@
    5.12  echo Will generate VC10 {Visual Studio 2010}
    5.13  set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
    5.14  ) else (
    5.15 +if "%MSC_VER%" == "1700" (
    5.16 +echo Will generate VC10 {compatible with Visual Studio 2012}
    5.17 +echo After opening in VS 2012, click "Update" when prompted.
    5.18 +set ProjectFile=%HotSpotBuildSpace%\jvm.vcxproj
    5.19 +) else (
    5.20  echo Will generate VC7 project {Visual Studio 2003 .NET}
    5.21  )
    5.22  )
    5.23  )
    5.24  )
    5.25 +)
    5.26  echo %ProjectFile%
    5.27  echo **************************************************************
    5.28  
     6.1 --- a/make/windows/makefiles/rules.make	Fri Sep 06 09:55:38 2013 +0100
     6.2 +++ b/make/windows/makefiles/rules.make	Sat Sep 14 20:40:34 2013 +0100
     6.3 @@ -69,6 +69,13 @@
     6.4  VcVersion=VC10
     6.5  ProjectFile=jvm.vcxproj
     6.6  
     6.7 +!elseif "$(MSC_VER)" == "1700"
     6.8 +# This is VS2012, but it loads VS10 projects just fine (and will
     6.9 +# upgrade them automatically to VS2012 format).
    6.10 +
    6.11 +VcVersion=VC10
    6.12 +ProjectFile=jvm.vcxproj
    6.13 +
    6.14  !else
    6.15  
    6.16  VcVersion=VC7
     7.1 --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Fri Sep 06 09:55:38 2013 +0100
     7.2 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Sat Sep 14 20:40:34 2013 +0100
     7.3 @@ -307,7 +307,7 @@
     7.4        assert(a_byte == *start++, "should be the same code");
     7.5      }
     7.6  #endif
     7.7 -  } else if (_id == load_mirror_id) {
     7.8 +  } else if (_id == load_mirror_id || _id == load_appendix_id) {
     7.9      // produce a copy of the load mirror instruction for use by the being initialized case
    7.10  #ifdef ASSERT
    7.11      address start = __ pc();
    7.12 @@ -384,6 +384,7 @@
    7.13      case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
    7.14      case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
    7.15      case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
    7.16 +    case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
    7.17      default: ShouldNotReachHere();
    7.18    }
    7.19    __ bind(call_patch);
    7.20 @@ -397,7 +398,7 @@
    7.21    ce->add_call_info_here(_info);
    7.22    __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
    7.23    __ delayed()->nop();
    7.24 -  if (_id == load_klass_id || _id == load_mirror_id) {
    7.25 +  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
    7.26      CodeSection* cs = __ code_section();
    7.27      address pc = (address)_pc_start;
    7.28      RelocIterator iter(cs, pc, pc + 1);
     8.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Sep 06 09:55:38 2013 +0100
     8.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Sat Sep 14 20:40:34 2013 +0100
     8.3 @@ -520,7 +520,7 @@
     8.4  void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo *info) {
     8.5    // Allocate a new index in table to hold the object once it's been patched
     8.6    int oop_index = __ oop_recorder()->allocate_oop_index(NULL);
     8.7 -  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id, oop_index);
     8.8 +  PatchingStub* patch = new PatchingStub(_masm, patching_id(info), oop_index);
     8.9  
    8.10    AddressLiteral addrlit(NULL, oop_Relocation::spec(oop_index));
    8.11    assert(addrlit.rspec().type() == relocInfo::oop_type, "must be an oop reloc");
     9.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Sep 06 09:55:38 2013 +0100
     9.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Sat Sep 14 20:40:34 2013 +0100
     9.3 @@ -804,6 +804,12 @@
     9.4        }
     9.5        break;
     9.6  
     9.7 +    case load_appendix_patching_id:
     9.8 +      { __ set_info("load_appendix_patching", dont_gc_arguments);
     9.9 +        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
    9.10 +      }
    9.11 +      break;
    9.12 +
    9.13      case dtrace_object_alloc_id:
    9.14        { // O0: object
    9.15          __ set_info("dtrace_object_alloc", dont_gc_arguments);
    10.1 --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Fri Sep 06 09:55:38 2013 +0100
    10.2 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Sat Sep 14 20:40:34 2013 +0100
    10.3 @@ -402,6 +402,7 @@
    10.4      case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
    10.5      case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
    10.6      case load_mirror_id:   target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
    10.7 +    case load_appendix_id:      target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
    10.8      default: ShouldNotReachHere();
    10.9    }
   10.10    __ bind(call_patch);
   10.11 @@ -419,7 +420,7 @@
   10.12    for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
   10.13      __ nop();
   10.14    }
   10.15 -  if (_id == load_klass_id || _id == load_mirror_id) {
   10.16 +  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
   10.17      CodeSection* cs = __ code_section();
   10.18      RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
   10.19      relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
    11.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Sep 06 09:55:38 2013 +0100
    11.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Sat Sep 14 20:40:34 2013 +0100
    11.3 @@ -362,7 +362,7 @@
    11.4  
    11.5  void LIR_Assembler::jobject2reg_with_patching(Register reg, CodeEmitInfo* info) {
    11.6    jobject o = NULL;
    11.7 -  PatchingStub* patch = new PatchingStub(_masm, PatchingStub::load_mirror_id);
    11.8 +  PatchingStub* patch = new PatchingStub(_masm, patching_id(info));
    11.9    __ movoop(reg, o);
   11.10    patching_epilog(patch, lir_patch_normal, reg, info);
   11.11  }
    12.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Fri Sep 06 09:55:38 2013 +0100
    12.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Sat Sep 14 20:40:34 2013 +0100
    12.3 @@ -1499,6 +1499,13 @@
    12.4        }
    12.5        break;
    12.6  
    12.7 +    case load_appendix_patching_id:
    12.8 +      { StubFrame f(sasm, "load_appendix_patching", dont_gc_arguments);
    12.9 +        // we should set up register map
   12.10 +        oop_maps = generate_patching(sasm, CAST_FROM_FN_PTR(address, move_appendix_patching));
   12.11 +      }
   12.12 +      break;
   12.13 +
   12.14      case dtrace_object_alloc_id:
   12.15        { // rax,: object
   12.16          StubFrame f(sasm, "dtrace_object_alloc", dont_gc_arguments);
    13.1 --- a/src/os/linux/vm/os_linux.cpp	Fri Sep 06 09:55:38 2013 +0100
    13.2 +++ b/src/os/linux/vm/os_linux.cpp	Sat Sep 14 20:40:34 2013 +0100
    13.3 @@ -2767,7 +2767,19 @@
    13.4    Linux::numa_interleave_memory(addr, bytes);
    13.5  }
    13.6  
    13.7 +// Define for numa_set_bind_policy(int). Setting the argument to 0 will set the
    13.8 +// bind policy to MPOL_PREFERRED for the current thread.
    13.9 +#define USE_MPOL_PREFERRED 0
   13.10 +
   13.11  void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
   13.12 +  // To make NUMA and large pages more robust when both enabled, we need to ease
   13.13 +  // the requirements on where the memory should be allocated. MPOL_BIND is the
   13.14 +  // default policy and it will force memory to be allocated on the specified
   13.15 +  // node. Changing this to MPOL_PREFERRED will prefer to allocate the memory on
   13.16 +  // the specified node, but will not force it. Using this policy will prevent
   13.17 +  // getting SIGBUS when trying to allocate large pages on NUMA nodes with no
   13.18 +  // free large pages.
   13.19 +  Linux::numa_set_bind_policy(USE_MPOL_PREFERRED);
   13.20    Linux::numa_tonode_memory(addr, bytes, lgrp_hint);
   13.21  }
   13.22  
   13.23 @@ -2869,6 +2881,8 @@
   13.24                                              libnuma_dlsym(handle, "numa_tonode_memory")));
   13.25        set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
   13.26                                              libnuma_dlsym(handle, "numa_interleave_memory")));
   13.27 +      set_numa_set_bind_policy(CAST_TO_FN_PTR(numa_set_bind_policy_func_t,
   13.28 +                                            libnuma_dlsym(handle, "numa_set_bind_policy")));
   13.29  
   13.30  
   13.31        if (numa_available() != -1) {
   13.32 @@ -2935,6 +2949,7 @@
   13.33  os::Linux::numa_available_func_t os::Linux::_numa_available;
   13.34  os::Linux::numa_tonode_memory_func_t os::Linux::_numa_tonode_memory;
   13.35  os::Linux::numa_interleave_memory_func_t os::Linux::_numa_interleave_memory;
   13.36 +os::Linux::numa_set_bind_policy_func_t os::Linux::_numa_set_bind_policy;
   13.37  unsigned long* os::Linux::_numa_all_nodes;
   13.38  
   13.39  bool os::pd_uncommit_memory(char* addr, size_t size) {
   13.40 @@ -2943,6 +2958,53 @@
   13.41    return res  != (uintptr_t) MAP_FAILED;
   13.42  }
   13.43  
   13.44 +static
   13.45 +address get_stack_commited_bottom(address bottom, size_t size) {
   13.46 +  address nbot = bottom;
   13.47 +  address ntop = bottom + size;
   13.48 +
   13.49 +  size_t page_sz = os::vm_page_size();
   13.50 +  unsigned pages = size / page_sz;
   13.51 +
   13.52 +  unsigned char vec[1];
   13.53 +  unsigned imin = 1, imax = pages + 1, imid;
   13.54 +  int mincore_return_value;
   13.55 +
   13.56 +  while (imin < imax) {
   13.57 +    imid = (imax + imin) / 2;
   13.58 +    nbot = ntop - (imid * page_sz);
   13.59 +
   13.60 +    // Use a trick with mincore to check whether the page is mapped or not.
   13.61 +    // mincore sets vec to 1 if page resides in memory and to 0 if page
   13.62 +    // is swapped output but if page we are asking for is unmapped
   13.63 +    // it returns -1,ENOMEM
   13.64 +    mincore_return_value = mincore(nbot, page_sz, vec);
   13.65 +
   13.66 +    if (mincore_return_value == -1) {
   13.67 +      // Page is not mapped go up
   13.68 +      // to find first mapped page
   13.69 +      if (errno != EAGAIN) {
   13.70 +        assert(errno == ENOMEM, "Unexpected mincore errno");
   13.71 +        imax = imid;
   13.72 +      }
   13.73 +    } else {
   13.74 +      // Page is mapped go down
   13.75 +      // to find first not mapped page
   13.76 +      imin = imid + 1;
   13.77 +    }
   13.78 +  }
   13.79 +
   13.80 +  nbot = nbot + page_sz;
   13.81 +
   13.82 +  // Adjust stack bottom one page up if last checked page is not mapped
   13.83 +  if (mincore_return_value == -1) {
   13.84 +    nbot = nbot + page_sz;
   13.85 +  }
   13.86 +
   13.87 +  return nbot;
   13.88 +}
   13.89 +
   13.90 +
   13.91  // Linux uses a growable mapping for the stack, and if the mapping for
   13.92  // the stack guard pages is not removed when we detach a thread the
   13.93  // stack cannot grow beyond the pages where the stack guard was
   13.94 @@ -2957,59 +3019,37 @@
   13.95  // So, we need to know the extent of the stack mapping when
   13.96  // create_stack_guard_pages() is called.
   13.97  
   13.98 -// Find the bounds of the stack mapping.  Return true for success.
   13.99 -//
  13.100  // We only need this for stacks that are growable: at the time of
  13.101  // writing thread stacks don't use growable mappings (i.e. those
  13.102  // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
  13.103  // only applies to the main thread.
  13.104  
  13.105 -static
  13.106 -bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) {
  13.107 -
  13.108 -  char buf[128];
  13.109 -  int fd, sz;
  13.110 -
  13.111 -  if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) {
  13.112 -    return false;
  13.113 -  }
  13.114 -
  13.115 -  const char kw[] = "[stack]";
  13.116 -  const int kwlen = sizeof(kw)-1;
  13.117 -
  13.118 -  // Address part of /proc/self/maps couldn't be more than 128 bytes
  13.119 -  while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) {
  13.120 -     if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) {
  13.121 -        // Extract addresses
  13.122 -        if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
  13.123 -           uintptr_t sp = (uintptr_t) __builtin_frame_address(0);
  13.124 -           if (sp >= *bottom && sp <= *top) {
  13.125 -              ::close(fd);
  13.126 -              return true;
  13.127 -           }
  13.128 -        }
  13.129 -     }
  13.130 -  }
  13.131 -
  13.132 - ::close(fd);
  13.133 -  return false;
  13.134 -}
  13.135 -
  13.136 -
  13.137  // If the (growable) stack mapping already extends beyond the point
  13.138  // where we're going to put our guard pages, truncate the mapping at
  13.139  // that point by munmap()ping it.  This ensures that when we later
  13.140  // munmap() the guard pages we don't leave a hole in the stack
  13.141 -// mapping. This only affects the main/initial thread, but guard
  13.142 -// against future OS changes
  13.143 +// mapping. This only affects the main/initial thread
  13.144 +
  13.145  bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
  13.146 -  uintptr_t stack_extent, stack_base;
  13.147 -  bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
  13.148 -  if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
  13.149 -      assert(os::Linux::is_initial_thread(),
  13.150 -           "growable stack in non-initial thread");
  13.151 -    if (stack_extent < (uintptr_t)addr)
  13.152 -      ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
  13.153 +
  13.154 +  if (os::Linux::is_initial_thread()) {
  13.155 +    // As we manually grow stack up to bottom inside create_attached_thread(),
  13.156 +    // it's likely that os::Linux::initial_thread_stack_bottom is mapped and
  13.157 +    // we don't need to do anything special.
  13.158 +    // Check it first, before calling heavy function.
  13.159 +    uintptr_t stack_extent = (uintptr_t) os::Linux::initial_thread_stack_bottom();
  13.160 +    unsigned char vec[1];
  13.161 +
  13.162 +    if (mincore((address)stack_extent, os::vm_page_size(), vec) == -1) {
  13.163 +      // Fallback to slow path on all errors, including EAGAIN
  13.164 +      stack_extent = (uintptr_t) get_stack_commited_bottom(
  13.165 +                                    os::Linux::initial_thread_stack_bottom(),
  13.166 +                                    (size_t)addr - stack_extent);
  13.167 +    }
  13.168 +
  13.169 +    if (stack_extent < (uintptr_t)addr) {
  13.170 +      ::munmap((void*)stack_extent, (uintptr_t)(addr - stack_extent));
  13.171 +    }
  13.172    }
  13.173  
  13.174    return os::commit_memory(addr, size, !ExecMem);
  13.175 @@ -3018,13 +3058,13 @@
  13.176  // If this is a growable mapping, remove the guard pages entirely by
  13.177  // munmap()ping them.  If not, just call uncommit_memory(). This only
  13.178  // affects the main/initial thread, but guard against future OS changes
  13.179 +// It's safe to always unmap guard pages for initial thread because we
  13.180 +// always place it right after end of the mapped region
  13.181 +
  13.182  bool os::remove_stack_guard_pages(char* addr, size_t size) {
  13.183    uintptr_t stack_extent, stack_base;
  13.184 -  bool chk_bounds = NOT_DEBUG(os::Linux::is_initial_thread()) DEBUG_ONLY(true);
  13.185 -  if (chk_bounds && get_stack_bounds(&stack_extent, &stack_base)) {
  13.186 -      assert(os::Linux::is_initial_thread(),
  13.187 -           "growable stack in non-initial thread");
  13.188 -
  13.189 +
  13.190 +  if (os::Linux::is_initial_thread()) {
  13.191      return ::munmap(addr, size) == 0;
  13.192    }
  13.193  
    14.1 --- a/src/os/linux/vm/os_linux.hpp	Fri Sep 06 09:55:38 2013 +0100
    14.2 +++ b/src/os/linux/vm/os_linux.hpp	Sat Sep 14 20:40:34 2013 +0100
    14.3 @@ -235,6 +235,7 @@
    14.4    typedef int (*numa_available_func_t)(void);
    14.5    typedef int (*numa_tonode_memory_func_t)(void *start, size_t size, int node);
    14.6    typedef void (*numa_interleave_memory_func_t)(void *start, size_t size, unsigned long *nodemask);
    14.7 +  typedef void (*numa_set_bind_policy_func_t)(int policy);
    14.8  
    14.9    static sched_getcpu_func_t _sched_getcpu;
   14.10    static numa_node_to_cpus_func_t _numa_node_to_cpus;
   14.11 @@ -242,6 +243,7 @@
   14.12    static numa_available_func_t _numa_available;
   14.13    static numa_tonode_memory_func_t _numa_tonode_memory;
   14.14    static numa_interleave_memory_func_t _numa_interleave_memory;
   14.15 +  static numa_set_bind_policy_func_t _numa_set_bind_policy;
   14.16    static unsigned long* _numa_all_nodes;
   14.17  
   14.18    static void set_sched_getcpu(sched_getcpu_func_t func) { _sched_getcpu = func; }
   14.19 @@ -250,6 +252,7 @@
   14.20    static void set_numa_available(numa_available_func_t func) { _numa_available = func; }
   14.21    static void set_numa_tonode_memory(numa_tonode_memory_func_t func) { _numa_tonode_memory = func; }
   14.22    static void set_numa_interleave_memory(numa_interleave_memory_func_t func) { _numa_interleave_memory = func; }
   14.23 +  static void set_numa_set_bind_policy(numa_set_bind_policy_func_t func) { _numa_set_bind_policy = func; }
   14.24    static void set_numa_all_nodes(unsigned long* ptr) { _numa_all_nodes = ptr; }
   14.25    static int sched_getcpu_syscall(void);
   14.26  public:
   14.27 @@ -267,6 +270,11 @@
   14.28        _numa_interleave_memory(start, size, _numa_all_nodes);
   14.29      }
   14.30    }
   14.31 +  static void numa_set_bind_policy(int policy) {
   14.32 +    if (_numa_set_bind_policy != NULL) {
   14.33 +      _numa_set_bind_policy(policy);
   14.34 +    }
   14.35 +  }
   14.36    static int get_node_by_cpu(int cpu_id);
   14.37  };
   14.38  
    15.1 --- a/src/os/posix/vm/os_posix.cpp	Fri Sep 06 09:55:38 2013 +0100
    15.2 +++ b/src/os/posix/vm/os_posix.cpp	Sat Sep 14 20:40:34 2013 +0100
    15.3 @@ -30,6 +30,8 @@
    15.4  #include <unistd.h>
    15.5  #include <sys/resource.h>
    15.6  #include <sys/utsname.h>
    15.7 +#include <pthread.h>
    15.8 +#include <signal.h>
    15.9  
   15.10  
   15.11  // Check core dump limit and report possible place where core can be found
   15.12 @@ -320,11 +322,17 @@
   15.13   * The callback is supposed to provide the method that should be protected.
   15.14   */
   15.15  bool os::WatcherThreadCrashProtection::call(os::CrashProtectionCallback& cb) {
   15.16 +  sigset_t saved_sig_mask;
   15.17 +
   15.18    assert(Thread::current()->is_Watcher_thread(), "Only for WatcherThread");
   15.19    assert(!WatcherThread::watcher_thread()->has_crash_protection(),
   15.20        "crash_protection already set?");
   15.21  
   15.22 -  if (sigsetjmp(_jmpbuf, 1) == 0) {
   15.23 +  // we cannot rely on sigsetjmp/siglongjmp to save/restore the signal mask
   15.24 +  // since on at least some systems (OS X) siglongjmp will restore the mask
   15.25 +  // for the process, not the thread
   15.26 +  pthread_sigmask(0, NULL, &saved_sig_mask);
   15.27 +  if (sigsetjmp(_jmpbuf, 0) == 0) {
   15.28      // make sure we can see in the signal handler that we have crash protection
   15.29      // installed
   15.30      WatcherThread::watcher_thread()->set_crash_protection(this);
   15.31 @@ -334,6 +342,7 @@
   15.32      return true;
   15.33    }
   15.34    // this happens when we siglongjmp() back
   15.35 +  pthread_sigmask(SIG_SETMASK, &saved_sig_mask, NULL);
   15.36    WatcherThread::watcher_thread()->set_crash_protection(NULL);
   15.37    return false;
   15.38  }
    16.1 --- a/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Fri Sep 06 09:55:38 2013 +0100
    16.2 +++ b/src/os_cpu/solaris_x86/vm/globals_solaris_x86.hpp	Sat Sep 14 20:40:34 2013 +0100
    16.3 @@ -44,6 +44,6 @@
    16.4  define_pd_global(intx, CompilerThreadStackSize,  0);
    16.5  
    16.6  // Used on 64 bit platforms for UseCompressedOops base address
    16.7 -define_pd_global(uintx,HeapBaseMinAddress,       256*M);
    16.8 +define_pd_global(uintx,HeapBaseMinAddress,       2*G);
    16.9  
   16.10  #endif // OS_CPU_SOLARIS_X86_VM_GLOBALS_SOLARIS_X86_HPP
    17.1 --- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Fri Sep 06 09:55:38 2013 +0100
    17.2 +++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/CallSite.java	Sat Sep 14 20:40:34 2013 +0100
    17.3 @@ -106,10 +106,12 @@
    17.4                          " (" + getMethod().getBytes() + " bytes) " + getReason());
    17.5              }
    17.6          }
    17.7 +        stream.printf(" (end time: %6.4f", getTimeStamp());
    17.8          if (getEndNodes() > 0) {
    17.9 -            stream.printf(" (end time: %6.4f nodes: %d live: %d)", getTimeStamp(), getEndNodes(), getEndLiveNodes());
   17.10 +            stream.printf(" nodes: %d live: %d", getEndNodes(), getEndLiveNodes());
   17.11          }
   17.12 -        stream.println("");
   17.13 +        stream.println(")");
   17.14 +
   17.15          if (getReceiver() != null) {
   17.16              emit(stream, indent + 4);
   17.17              //                 stream.println("type profile " + method.holder + " -> " + receiver + " (" +
    18.1 --- a/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Fri Sep 06 09:55:38 2013 +0100
    18.2 +++ b/src/share/tools/LogCompilation/src/com/sun/hotspot/tools/compiler/LogParser.java	Sat Sep 14 20:40:34 2013 +0100
    18.3 @@ -207,7 +207,12 @@
    18.4      }
    18.5  
    18.6      String search(Attributes attr, String name) {
    18.7 -        return search(attr, name, null);
    18.8 +        String result = attr.getValue(name);
    18.9 +        if (result != null) {
   18.10 +            return result;
   18.11 +        } else {
   18.12 +            throw new InternalError("can't find " + name);
   18.13 +        }
   18.14      }
   18.15  
   18.16      String search(Attributes attr, String name, String defaultValue) {
   18.17 @@ -215,13 +220,7 @@
   18.18          if (result != null) {
   18.19              return result;
   18.20          }
   18.21 -        if (defaultValue != null) {
   18.22 -            return defaultValue;
   18.23 -        }
   18.24 -        for (int i = 0; i < attr.getLength(); i++) {
   18.25 -            System.out.println(attr.getQName(i) + " " + attr.getValue(attr.getQName(i)));
   18.26 -        }
   18.27 -        throw new InternalError("can't find " + name);
   18.28 +        return defaultValue;
   18.29      }
   18.30      int indent = 0;
   18.31  
   18.32 @@ -268,17 +267,18 @@
   18.33              Phase p = new Phase(search(atts, "name"),
   18.34                      Double.parseDouble(search(atts, "stamp")),
   18.35                      Integer.parseInt(search(atts, "nodes", "0")),
   18.36 -                    Integer.parseInt(search(atts, "live")));
   18.37 +                    Integer.parseInt(search(atts, "live", "0")));
   18.38              phaseStack.push(p);
   18.39          } else if (qname.equals("phase_done")) {
   18.40              Phase p = phaseStack.pop();
   18.41 -            if (! p.getId().equals(search(atts, "name"))) {
   18.42 +            String phaseName = search(atts, "name", null);
   18.43 +            if (phaseName != null && !p.getId().equals(phaseName)) {
   18.44                  System.out.println("phase: " + p.getId());
   18.45                  throw new InternalError("phase name mismatch");
   18.46              }
   18.47              p.setEnd(Double.parseDouble(search(atts, "stamp")));
   18.48              p.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
   18.49 -            p.setEndLiveNodes(Integer.parseInt(search(atts, "live")));
   18.50 +            p.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
   18.51              compile.getPhases().add(p);
   18.52          } else if (qname.equals("task")) {
   18.53              compile = new Compilation(Integer.parseInt(search(atts, "compile_id", "-1")));
   18.54 @@ -413,8 +413,8 @@
   18.55              }
   18.56          } else if (qname.equals("parse_done")) {
   18.57              CallSite call = scopes.pop();
   18.58 -            call.setEndNodes(Integer.parseInt(search(atts, "nodes", "1")));
   18.59 -            call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "1")));
   18.60 +            call.setEndNodes(Integer.parseInt(search(atts, "nodes", "0")));
   18.61 +            call.setEndLiveNodes(Integer.parseInt(search(atts, "live", "0")));
   18.62              call.setTimeStamp(Double.parseDouble(search(atts, "stamp")));
   18.63              scopes.push(call);
   18.64          }
    19.1 --- a/src/share/vm/adlc/arena.cpp	Fri Sep 06 09:55:38 2013 +0100
    19.2 +++ b/src/share/vm/adlc/arena.cpp	Sat Sep 14 20:40:34 2013 +0100
    19.3 @@ -1,5 +1,5 @@
    19.4  /*
    19.5 - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    19.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    19.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    19.8   *
    19.9   * This code is free software; you can redistribute it and/or modify it
   19.10 @@ -24,7 +24,7 @@
   19.11  
   19.12  #include "adlc.hpp"
   19.13  
   19.14 -void* Chunk::operator new(size_t requested_size, size_t length) {
   19.15 +void* Chunk::operator new(size_t requested_size, size_t length) throw() {
   19.16    return CHeapObj::operator new(requested_size + length);
   19.17  }
   19.18  
   19.19 @@ -163,7 +163,7 @@
   19.20  //-----------------------------------------------------------------------------
   19.21  // CHeapObj
   19.22  
   19.23 -void* CHeapObj::operator new(size_t size){
   19.24 +void* CHeapObj::operator new(size_t size) throw() {
   19.25    return (void *) malloc(size);
   19.26  }
   19.27  
    20.1 --- a/src/share/vm/adlc/arena.hpp	Fri Sep 06 09:55:38 2013 +0100
    20.2 +++ b/src/share/vm/adlc/arena.hpp	Sat Sep 14 20:40:34 2013 +0100
    20.3 @@ -1,5 +1,5 @@
    20.4  /*
    20.5 - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    20.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    20.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    20.8   *
    20.9   * This code is free software; you can redistribute it and/or modify it
   20.10 @@ -42,7 +42,7 @@
   20.11  
   20.12  class CHeapObj {
   20.13   public:
   20.14 -  void* operator new(size_t size);
   20.15 +  void* operator new(size_t size) throw();
   20.16    void  operator delete(void* p);
   20.17    void* new_array(size_t size);
   20.18  };
   20.19 @@ -53,7 +53,7 @@
   20.20  
   20.21  class ValueObj {
   20.22   public:
   20.23 -  void* operator new(size_t size);
   20.24 +  void* operator new(size_t size) throw();
   20.25    void operator delete(void* p);
   20.26  };
   20.27  
   20.28 @@ -61,7 +61,7 @@
   20.29  
   20.30  class AllStatic {
   20.31   public:
   20.32 -  void* operator new(size_t size);
   20.33 +  void* operator new(size_t size) throw();
   20.34    void operator delete(void* p);
   20.35  };
   20.36  
   20.37 @@ -70,7 +70,7 @@
   20.38  // Linked list of raw memory chunks
   20.39  class Chunk: public CHeapObj {
   20.40   public:
   20.41 -  void* operator new(size_t size, size_t length);
   20.42 +  void* operator new(size_t size, size_t length) throw();
   20.43    void  operator delete(void* p, size_t length);
   20.44    Chunk(size_t length);
   20.45  
    21.1 --- a/src/share/vm/adlc/main.cpp	Fri Sep 06 09:55:38 2013 +0100
    21.2 +++ b/src/share/vm/adlc/main.cpp	Sat Sep 14 20:40:34 2013 +0100
    21.3 @@ -1,5 +1,5 @@
    21.4  /*
    21.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    21.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    21.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    21.8   *
    21.9   * This code is free software; you can redistribute it and/or modify it
   21.10 @@ -485,7 +485,7 @@
   21.11  
   21.12  // VS2005 has its own definition, identical to this one.
   21.13  #if !defined(_WIN32) || defined(_WIN64) || _MSC_VER < 1400
   21.14 -void *operator new( size_t size, int, const char *, int ) {
   21.15 +void *operator new( size_t size, int, const char *, int ) throw() {
   21.16    return ::operator new( size );
   21.17  }
   21.18  #endif
    22.1 --- a/src/share/vm/adlc/output_c.cpp	Fri Sep 06 09:55:38 2013 +0100
    22.2 +++ b/src/share/vm/adlc/output_c.cpp	Sat Sep 14 20:40:34 2013 +0100
    22.3 @@ -1095,7 +1095,7 @@
    22.4          fprintf(fp, "  // Identify previous instruction if inside this block\n");
    22.5          fprintf(fp, "  if( ");
    22.6          print_block_index(fp, inst_position);
    22.7 -        fprintf(fp, " > 0 ) {\n    Node *n = block->_nodes.at(");
    22.8 +        fprintf(fp, " > 0 ) {\n    Node *n = block->get_node(");
    22.9          print_block_index(fp, inst_position);
   22.10          fprintf(fp, ");\n    inst%d = (n->is_Mach()) ? ", inst_position);
   22.11          fprintf(fp, "n->as_Mach() : NULL;\n  }\n");
    23.1 --- a/src/share/vm/asm/codeBuffer.hpp	Fri Sep 06 09:55:38 2013 +0100
    23.2 +++ b/src/share/vm/asm/codeBuffer.hpp	Sat Sep 14 20:40:34 2013 +0100
    23.3 @@ -1,5 +1,5 @@
    23.4  /*
    23.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    23.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    23.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    23.8   *
    23.9   * This code is free software; you can redistribute it and/or modify it
   23.10 @@ -296,8 +296,8 @@
   23.11    // CodeBuffers must be allocated on the stack except for a single
   23.12    // special case during expansion which is handled internally.  This
   23.13    // is done to guarantee proper cleanup of resources.
   23.14 -  void* operator new(size_t size) { return ResourceObj::operator new(size); }
   23.15 -  void  operator delete(void* p)  { ShouldNotCallThis(); }
   23.16 +  void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
   23.17 +  void  operator delete(void* p)          { ShouldNotCallThis(); }
   23.18  
   23.19   public:
   23.20    typedef int csize_t;  // code size type; would be size_t except for history
    24.1 --- a/src/share/vm/c1/c1_CodeStubs.hpp	Fri Sep 06 09:55:38 2013 +0100
    24.2 +++ b/src/share/vm/c1/c1_CodeStubs.hpp	Sat Sep 14 20:40:34 2013 +0100
    24.3 @@ -364,7 +364,8 @@
    24.4    enum PatchID {
    24.5      access_field_id,
    24.6      load_klass_id,
    24.7 -    load_mirror_id
    24.8 +    load_mirror_id,
    24.9 +    load_appendix_id
   24.10    };
   24.11    enum constants {
   24.12      patch_info_size = 3
   24.13 @@ -417,7 +418,7 @@
   24.14        }
   24.15        NativeMovRegMem* n_move = nativeMovRegMem_at(pc_start());
   24.16        n_move->set_offset(field_offset);
   24.17 -    } else if (_id == load_klass_id || _id == load_mirror_id) {
   24.18 +    } else if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
   24.19        assert(_obj != noreg, "must have register object for load_klass/load_mirror");
   24.20  #ifdef ASSERT
   24.21        // verify that we're pointing at a NativeMovConstReg
    25.1 --- a/src/share/vm/c1/c1_Compilation.cpp	Fri Sep 06 09:55:38 2013 +0100
    25.2 +++ b/src/share/vm/c1/c1_Compilation.cpp	Sat Sep 14 20:40:34 2013 +0100
    25.3 @@ -74,16 +74,19 @@
    25.4   private:
    25.5    JavaThread* _thread;
    25.6    CompileLog* _log;
    25.7 +  TimerName _timer;
    25.8  
    25.9   public:
   25.10    PhaseTraceTime(TimerName timer)
   25.11 -  : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose), _log(NULL) {
   25.12 +  : TraceTime("", &timers[timer], CITime || CITimeEach, Verbose),
   25.13 +    _log(NULL), _timer(timer)
   25.14 +  {
   25.15      if (Compilation::current() != NULL) {
   25.16        _log = Compilation::current()->log();
   25.17      }
   25.18  
   25.19      if (_log != NULL) {
   25.20 -      _log->begin_head("phase name='%s'", timer_name[timer]);
   25.21 +      _log->begin_head("phase name='%s'", timer_name[_timer]);
   25.22        _log->stamp();
   25.23        _log->end_head();
   25.24      }
   25.25 @@ -91,7 +94,7 @@
   25.26  
   25.27    ~PhaseTraceTime() {
   25.28      if (_log != NULL)
   25.29 -      _log->done("phase");
   25.30 +      _log->done("phase name='%s'", timer_name[_timer]);
   25.31    }
   25.32  };
   25.33  
    26.1 --- a/src/share/vm/c1/c1_Compilation.hpp	Fri Sep 06 09:55:38 2013 +0100
    26.2 +++ b/src/share/vm/c1/c1_Compilation.hpp	Sat Sep 14 20:40:34 2013 +0100
    26.3 @@ -1,5 +1,5 @@
    26.4  /*
    26.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    26.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    26.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    26.8   *
    26.9   * This code is free software; you can redistribute it and/or modify it
   26.10 @@ -279,8 +279,8 @@
   26.11  // Base class for objects allocated by the compiler in the compilation arena
   26.12  class CompilationResourceObj ALLOCATION_SUPER_CLASS_SPEC {
   26.13   public:
   26.14 -  void* operator new(size_t size) { return Compilation::current()->arena()->Amalloc(size); }
   26.15 -  void* operator new(size_t size, Arena* arena) {
   26.16 +  void* operator new(size_t size) throw() { return Compilation::current()->arena()->Amalloc(size); }
   26.17 +  void* operator new(size_t size, Arena* arena) throw() {
   26.18      return arena->Amalloc(size);
   26.19    }
   26.20    void  operator delete(void* p) {} // nothing to do
    27.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 06 09:55:38 2013 +0100
    27.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Sat Sep 14 20:40:34 2013 +0100
    27.3 @@ -1583,7 +1583,7 @@
    27.4        ObjectType* obj_type = obj->type()->as_ObjectType();
    27.5        if (obj_type->is_constant() && !PatchALot) {
    27.6          ciObject* const_oop = obj_type->constant_value();
    27.7 -        if (!const_oop->is_null_object()) {
    27.8 +        if (!const_oop->is_null_object() && const_oop->is_loaded()) {
    27.9            if (field->is_constant()) {
   27.10              ciConstant field_val = field->constant_value_of(const_oop);
   27.11              BasicType field_type = field_val.basic_type();
   27.12 @@ -1667,9 +1667,8 @@
   27.13    const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
   27.14    assert(declared_signature != NULL, "cannot be null");
   27.15  
   27.16 -  // FIXME bail out for now
   27.17 -  if (Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
   27.18 -    BAILOUT("unlinked call site (FIXME needs patching or recompile support)");
   27.19 +  if (!C1PatchInvokeDynamic && Bytecodes::has_optional_appendix(bc_raw) && !will_link) {
   27.20 +    BAILOUT("unlinked call site (C1PatchInvokeDynamic is off)");
   27.21    }
   27.22  
   27.23    // we have to make sure the argument size (incl. the receiver)
   27.24 @@ -1713,10 +1712,23 @@
   27.25        code = target->is_static() ? Bytecodes::_invokestatic : Bytecodes::_invokespecial;
   27.26        break;
   27.27      }
   27.28 +  } else {
   27.29 +    if (bc_raw == Bytecodes::_invokehandle) {
   27.30 +      assert(!will_link, "should come here only for unlinked call");
   27.31 +      code = Bytecodes::_invokespecial;
   27.32 +    }
   27.33    }
   27.34  
   27.35    // Push appendix argument (MethodType, CallSite, etc.), if one.
   27.36 -  if (stream()->has_appendix()) {
   27.37 +  bool patch_for_appendix = false;
   27.38 +  int patching_appendix_arg = 0;
   27.39 +  if (C1PatchInvokeDynamic &&
   27.40 +      (Bytecodes::has_optional_appendix(bc_raw) && (!will_link || PatchALot))) {
   27.41 +    Value arg = append(new Constant(new ObjectConstant(compilation()->env()->unloaded_ciinstance()), copy_state_before()));
   27.42 +    apush(arg);
   27.43 +    patch_for_appendix = true;
   27.44 +    patching_appendix_arg = (will_link && stream()->has_appendix()) ? 0 : 1;
   27.45 +  } else if (stream()->has_appendix()) {
   27.46      ciObject* appendix = stream()->get_appendix();
   27.47      Value arg = append(new Constant(new ObjectConstant(appendix)));
   27.48      apush(arg);
   27.49 @@ -1732,7 +1744,8 @@
   27.50    if (UseCHA && DeoptC1 && klass->is_loaded() && target->is_loaded() &&
   27.51        !(// %%% FIXME: Are both of these relevant?
   27.52          target->is_method_handle_intrinsic() ||
   27.53 -        target->is_compiled_lambda_form())) {
   27.54 +        target->is_compiled_lambda_form()) &&
   27.55 +      !patch_for_appendix) {
   27.56      Value receiver = NULL;
   27.57      ciInstanceKlass* receiver_klass = NULL;
   27.58      bool type_is_exact = false;
   27.59 @@ -1850,7 +1863,8 @@
   27.60    // check if we could do inlining
   27.61    if (!PatchALot && Inline && klass->is_loaded() &&
   27.62        (klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
   27.63 -      && target->is_loaded()) {
   27.64 +      && target->is_loaded()
   27.65 +      && !patch_for_appendix) {
   27.66      // callee is known => check if we have static binding
   27.67      assert(target->is_loaded(), "callee must be known");
   27.68      if (code == Bytecodes::_invokestatic  ||
   27.69 @@ -1901,7 +1915,7 @@
   27.70      code == Bytecodes::_invokespecial   ||
   27.71      code == Bytecodes::_invokevirtual   ||
   27.72      code == Bytecodes::_invokeinterface;
   27.73 -  Values* args = state()->pop_arguments(target->arg_size_no_receiver());
   27.74 +  Values* args = state()->pop_arguments(target->arg_size_no_receiver() + patching_appendix_arg);
   27.75    Value recv = has_receiver ? apop() : NULL;
   27.76    int vtable_index = Method::invalid_vtable_index;
   27.77  
    28.1 --- a/src/share/vm/c1/c1_Instruction.hpp	Fri Sep 06 09:55:38 2013 +0100
    28.2 +++ b/src/share/vm/c1/c1_Instruction.hpp	Sat Sep 14 20:40:34 2013 +0100
    28.3 @@ -1,5 +1,5 @@
    28.4  /*
    28.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    28.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    28.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.8   *
    28.9   * This code is free software; you can redistribute it and/or modify it
   28.10 @@ -323,7 +323,7 @@
   28.11    }
   28.12  
   28.13   public:
   28.14 -  void* operator new(size_t size) {
   28.15 +  void* operator new(size_t size) throw() {
   28.16      Compilation* c = Compilation::current();
   28.17      void* res = c->arena()->Amalloc(size);
   28.18      ((Instruction*)res)->_id = c->get_next_id();
   28.19 @@ -1611,7 +1611,7 @@
   28.20    friend class SuxAndWeightAdjuster;
   28.21  
   28.22   public:
   28.23 -   void* operator new(size_t size) {
   28.24 +   void* operator new(size_t size) throw() {
   28.25      Compilation* c = Compilation::current();
   28.26      void* res = c->arena()->Amalloc(size);
   28.27      ((BlockBegin*)res)->_id = c->get_next_id();
    29.1 --- a/src/share/vm/c1/c1_LIR.hpp	Fri Sep 06 09:55:38 2013 +0100
    29.2 +++ b/src/share/vm/c1/c1_LIR.hpp	Sat Sep 14 20:40:34 2013 +0100
    29.3 @@ -1211,8 +1211,6 @@
    29.4    bool is_invokedynamic() const                  { return code() == lir_dynamic_call; }
    29.5    bool is_method_handle_invoke() const {
    29.6      return
    29.7 -      is_invokedynamic()  // An invokedynamic is always a MethodHandle call site.
    29.8 -      ||
    29.9        method()->is_compiled_lambda_form()  // Java-generated adapter
   29.10        ||
   29.11        method()->is_method_handle_intrinsic();  // JVM-generated MH intrinsic
    30.1 --- a/src/share/vm/c1/c1_LIRAssembler.cpp	Fri Sep 06 09:55:38 2013 +0100
    30.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Sat Sep 14 20:40:34 2013 +0100
    30.3 @@ -93,12 +93,23 @@
    30.4        default:
    30.5          ShouldNotReachHere();
    30.6      }
    30.7 +  } else if (patch->id() == PatchingStub::load_appendix_id) {
    30.8 +    Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
    30.9 +    assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
   30.10    } else {
   30.11      ShouldNotReachHere();
   30.12    }
   30.13  #endif
   30.14  }
   30.15  
   30.16 +PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
   30.17 +  IRScope* scope = info->scope();
   30.18 +  Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
   30.19 +  if (Bytecodes::has_optional_appendix(bc_raw)) {
   30.20 +    return PatchingStub::load_appendix_id;
   30.21 +  }
   30.22 +  return PatchingStub::load_mirror_id;
   30.23 +}
   30.24  
   30.25  //---------------------------------------------------------------
   30.26  
    31.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp	Fri Sep 06 09:55:38 2013 +0100
    31.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Sat Sep 14 20:40:34 2013 +0100
    31.3 @@ -119,6 +119,8 @@
    31.4  
    31.5    void comp_op(LIR_Condition condition, LIR_Opr src, LIR_Opr result, LIR_Op2* op);
    31.6  
    31.7 +  PatchingStub::PatchID patching_id(CodeEmitInfo* info);
    31.8 +
    31.9   public:
   31.10    LIR_Assembler(Compilation* c);
   31.11    ~LIR_Assembler();
    32.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Fri Sep 06 09:55:38 2013 +0100
    32.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Sat Sep 14 20:40:34 2013 +0100
    32.3 @@ -819,6 +819,7 @@
    32.4    KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
    32.5    KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
    32.6    Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
    32.7 +  Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
    32.8    bool load_klass_or_mirror_patch_id =
    32.9      (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
   32.10  
   32.11 @@ -888,10 +889,32 @@
   32.12            mirror = Handle(THREAD, m);
   32.13          }
   32.14          break;
   32.15 -      default: Unimplemented();
   32.16 +      default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
   32.17      }
   32.18      // convert to handle
   32.19      load_klass = KlassHandle(THREAD, k);
   32.20 +  } else if (stub_id == load_appendix_patching_id) {
   32.21 +    Bytecode_invoke bytecode(caller_method, bci);
   32.22 +    Bytecodes::Code bc = bytecode.invoke_code();
   32.23 +
   32.24 +    CallInfo info;
   32.25 +    constantPoolHandle pool(thread, caller_method->constants());
   32.26 +    int index = bytecode.index();
   32.27 +    LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
   32.28 +    appendix = info.resolved_appendix();
   32.29 +    switch (bc) {
   32.30 +      case Bytecodes::_invokehandle: {
   32.31 +        int cache_index = ConstantPool::decode_cpcache_index(index, true);
   32.32 +        assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
   32.33 +        pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
   32.34 +        break;
   32.35 +      }
   32.36 +      case Bytecodes::_invokedynamic: {
   32.37 +        pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
   32.38 +        break;
   32.39 +      }
   32.40 +      default: fatal("unexpected bytecode for load_appendix_patching_id");
   32.41 +    }
   32.42    } else {
   32.43      ShouldNotReachHere();
   32.44    }
   32.45 @@ -992,8 +1015,8 @@
   32.46                     n_copy->data() == (intptr_t)Universe::non_oop_word(),
   32.47                     "illegal init value");
   32.48              if (stub_id == Runtime1::load_klass_patching_id) {
   32.49 -            assert(load_klass() != NULL, "klass not set");
   32.50 -            n_copy->set_data((intx) (load_klass()));
   32.51 +              assert(load_klass() != NULL, "klass not set");
   32.52 +              n_copy->set_data((intx) (load_klass()));
   32.53              } else {
   32.54                assert(mirror() != NULL, "klass not set");
   32.55                n_copy->set_data((intx) (mirror()));
   32.56 @@ -1002,43 +1025,55 @@
   32.57              if (TracePatching) {
   32.58                Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
   32.59              }
   32.60 +          }
   32.61 +        } else if (stub_id == Runtime1::load_appendix_patching_id) {
   32.62 +          NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
   32.63 +          assert(n_copy->data() == 0 ||
   32.64 +                 n_copy->data() == (intptr_t)Universe::non_oop_word(),
   32.65 +                 "illegal init value");
   32.66 +          n_copy->set_data((intx) (appendix()));
   32.67  
   32.68 -#if defined(SPARC) || defined(PPC)
   32.69 -            // Update the location in the nmethod with the proper
   32.70 -            // metadata.  When the code was generated, a NULL was stuffed
   32.71 -            // in the metadata table and that table needs to be update to
   32.72 -            // have the right value.  On intel the value is kept
   32.73 -            // directly in the instruction instead of in the metadata
   32.74 -            // table, so set_data above effectively updated the value.
   32.75 -            nmethod* nm = CodeCache::find_nmethod(instr_pc);
   32.76 -            assert(nm != NULL, "invalid nmethod_pc");
   32.77 -            RelocIterator mds(nm, copy_buff, copy_buff + 1);
   32.78 -            bool found = false;
   32.79 -            while (mds.next() && !found) {
   32.80 -              if (mds.type() == relocInfo::oop_type) {
   32.81 -                assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
   32.82 -                oop_Relocation* r = mds.oop_reloc();
   32.83 -                oop* oop_adr = r->oop_addr();
   32.84 -                *oop_adr = mirror();
   32.85 -                r->fix_oop_relocation();
   32.86 -                found = true;
   32.87 -              } else if (mds.type() == relocInfo::metadata_type) {
   32.88 -                assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
   32.89 -                metadata_Relocation* r = mds.metadata_reloc();
   32.90 -                Metadata** metadata_adr = r->metadata_addr();
   32.91 -                *metadata_adr = load_klass();
   32.92 -                r->fix_metadata_relocation();
   32.93 -                found = true;
   32.94 -              }
   32.95 -            }
   32.96 -            assert(found, "the metadata must exist!");
   32.97 -#endif
   32.98 -
   32.99 +          if (TracePatching) {
  32.100 +            Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
  32.101            }
  32.102          } else {
  32.103            ShouldNotReachHere();
  32.104          }
  32.105  
  32.106 +#if defined(SPARC) || defined(PPC)
  32.107 +        if (load_klass_or_mirror_patch_id ||
  32.108 +            stub_id == Runtime1::load_appendix_patching_id) {
  32.109 +          // Update the location in the nmethod with the proper
  32.110 +          // metadata.  When the code was generated, a NULL was stuffed
  32.111 +          // in the metadata table and that table needs to be update to
  32.112 +          // have the right value.  On intel the value is kept
  32.113 +          // directly in the instruction instead of in the metadata
  32.114 +          // table, so set_data above effectively updated the value.
  32.115 +          nmethod* nm = CodeCache::find_nmethod(instr_pc);
  32.116 +          assert(nm != NULL, "invalid nmethod_pc");
  32.117 +          RelocIterator mds(nm, copy_buff, copy_buff + 1);
  32.118 +          bool found = false;
  32.119 +          while (mds.next() && !found) {
  32.120 +            if (mds.type() == relocInfo::oop_type) {
  32.121 +              assert(stub_id == Runtime1::load_mirror_patching_id ||
  32.122 +                     stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
  32.123 +              oop_Relocation* r = mds.oop_reloc();
  32.124 +              oop* oop_adr = r->oop_addr();
  32.125 +              *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
  32.126 +              r->fix_oop_relocation();
  32.127 +              found = true;
  32.128 +            } else if (mds.type() == relocInfo::metadata_type) {
  32.129 +              assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
  32.130 +              metadata_Relocation* r = mds.metadata_reloc();
  32.131 +              Metadata** metadata_adr = r->metadata_addr();
  32.132 +              *metadata_adr = load_klass();
  32.133 +              r->fix_metadata_relocation();
  32.134 +              found = true;
  32.135 +            }
  32.136 +          }
  32.137 +          assert(found, "the metadata must exist!");
  32.138 +        }
  32.139 +#endif
  32.140          if (do_patch) {
  32.141            // replace instructions
  32.142            // first replace the tail, then the call
  32.143 @@ -1077,7 +1112,8 @@
  32.144            ICache::invalidate_range(instr_pc, *byte_count);
  32.145            NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
  32.146  
  32.147 -          if (load_klass_or_mirror_patch_id) {
  32.148 +          if (load_klass_or_mirror_patch_id ||
  32.149 +              stub_id == Runtime1::load_appendix_patching_id) {
  32.150              relocInfo::relocType rtype =
  32.151                (stub_id == Runtime1::load_klass_patching_id) ?
  32.152                                     relocInfo::metadata_type :
  32.153 @@ -1118,7 +1154,8 @@
  32.154  
  32.155    // If we are patching in a non-perm oop, make sure the nmethod
  32.156    // is on the right list.
  32.157 -  if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
  32.158 +  if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
  32.159 +                              (appendix.not_null() && appendix->is_scavengable()))) {
  32.160      MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
  32.161      nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
  32.162      guarantee(nm != NULL, "only nmethods can contain non-perm oops");
  32.163 @@ -1179,6 +1216,24 @@
  32.164    return caller_is_deopted();
  32.165  }
  32.166  
  32.167 +int Runtime1::move_appendix_patching(JavaThread* thread) {
  32.168 +//
  32.169 +// NOTE: we are still in Java
  32.170 +//
  32.171 +  Thread* THREAD = thread;
  32.172 +  debug_only(NoHandleMark nhm;)
  32.173 +  {
  32.174 +    // Enter VM mode
  32.175 +
  32.176 +    ResetNoHandleMark rnhm;
  32.177 +    patch_code(thread, load_appendix_patching_id);
  32.178 +  }
  32.179 +  // Back in JAVA, use no oops DON'T safepoint
  32.180 +
  32.181 +  // Return true if calling code is deoptimized
  32.182 +
  32.183 +  return caller_is_deopted();
  32.184 +}
  32.185  //
  32.186  // Entry point for compiled code. We want to patch a nmethod.
  32.187  // We don't do a normal VM transition here because we want to
    33.1 --- a/src/share/vm/c1/c1_Runtime1.hpp	Fri Sep 06 09:55:38 2013 +0100
    33.2 +++ b/src/share/vm/c1/c1_Runtime1.hpp	Sat Sep 14 20:40:34 2013 +0100
    33.3 @@ -67,6 +67,7 @@
    33.4    stub(access_field_patching)        \
    33.5    stub(load_klass_patching)          \
    33.6    stub(load_mirror_patching)         \
    33.7 +  stub(load_appendix_patching)       \
    33.8    stub(g1_pre_barrier_slow)          \
    33.9    stub(g1_post_barrier_slow)         \
   33.10    stub(fpu2long_stub)                \
   33.11 @@ -160,6 +161,7 @@
   33.12    static int access_field_patching(JavaThread* thread);
   33.13    static int move_klass_patching(JavaThread* thread);
   33.14    static int move_mirror_patching(JavaThread* thread);
   33.15 +  static int move_appendix_patching(JavaThread* thread);
   33.16  
   33.17    static void patch_code(JavaThread* thread, StubID stub_id);
   33.18  
    34.1 --- a/src/share/vm/c1/c1_globals.cpp	Fri Sep 06 09:55:38 2013 +0100
    34.2 +++ b/src/share/vm/c1/c1_globals.cpp	Sat Sep 14 20:40:34 2013 +0100
    34.3 @@ -25,4 +25,4 @@
    34.4  #include "precompiled.hpp"
    34.5  #include "c1/c1_globals.hpp"
    34.6  
    34.7 -C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
    34.8 +C1_FLAGS(MATERIALIZE_DEVELOPER_FLAG, MATERIALIZE_PD_DEVELOPER_FLAG, MATERIALIZE_PRODUCT_FLAG, MATERIALIZE_PD_PRODUCT_FLAG, MATERIALIZE_DIAGNOSTIC_FLAG, MATERIALIZE_NOTPRODUCT_FLAG)
    35.1 --- a/src/share/vm/c1/c1_globals.hpp	Fri Sep 06 09:55:38 2013 +0100
    35.2 +++ b/src/share/vm/c1/c1_globals.hpp	Sat Sep 14 20:40:34 2013 +0100
    35.3 @@ -54,7 +54,7 @@
    35.4  //
    35.5  // Defines all global flags used by the client compiler.
    35.6  //
    35.7 -#define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
    35.8 +#define C1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
    35.9                                                                              \
   35.10    /* Printing */                                                            \
   35.11    notproduct(bool, PrintC1Statistics, false,                                \
   35.12 @@ -333,15 +333,19 @@
   35.13            "Use CHA and exact type results at call sites when updating MDOs")\
   35.14                                                                              \
   35.15    product(bool, C1UpdateMethodData, trueInTiered,                           \
   35.16 -          "Update MethodData*s in Tier1-generated code")                  \
   35.17 +          "Update MethodData*s in Tier1-generated code")                    \
   35.18                                                                              \
   35.19    develop(bool, PrintCFGToFile, false,                                      \
   35.20            "print control flow graph to a separate file during compilation") \
   35.21                                                                              \
   35.22 +  diagnostic(bool, C1PatchInvokeDynamic, true,                              \
   35.23 +             "Patch invokedynamic appendix not known at compile time")      \
   35.24 +                                                                            \
   35.25 +                                                                            \
   35.26  
   35.27  
   35.28  // Read default values for c1 globals
   35.29  
   35.30 -C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_NOTPRODUCT_FLAG)
   35.31 +C1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_NOTPRODUCT_FLAG)
   35.32  
   35.33  #endif // SHARE_VM_C1_C1_GLOBALS_HPP
    36.1 --- a/src/share/vm/ci/ciEnv.cpp	Fri Sep 06 09:55:38 2013 +0100
    36.2 +++ b/src/share/vm/ci/ciEnv.cpp	Sat Sep 14 20:40:34 2013 +0100
    36.3 @@ -1150,6 +1150,10 @@
    36.4    record_method_not_compilable("out of memory");
    36.5  }
    36.6  
    36.7 +ciInstance* ciEnv::unloaded_ciinstance() {
    36.8 +  GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
    36.9 +}
   36.10 +
   36.11  void ciEnv::dump_replay_data(outputStream* out) {
   36.12    VM_ENTRY_MARK;
   36.13    MutexLocker ml(Compile_lock);
    37.1 --- a/src/share/vm/ci/ciEnv.hpp	Fri Sep 06 09:55:38 2013 +0100
    37.2 +++ b/src/share/vm/ci/ciEnv.hpp	Sat Sep 14 20:40:34 2013 +0100
    37.3 @@ -400,6 +400,7 @@
    37.4    static ciInstanceKlass* unloaded_ciinstance_klass() {
    37.5      return _unloaded_ciinstance_klass;
    37.6    }
    37.7 +  ciInstance* unloaded_ciinstance();
    37.8  
    37.9    ciKlass*  find_system_klass(ciSymbol* klass_name);
   37.10    // Note:  To find a class from its name string, use ciSymbol::make,
    38.1 --- a/src/share/vm/ci/ciInstance.cpp	Fri Sep 06 09:55:38 2013 +0100
    38.2 +++ b/src/share/vm/ci/ciInstance.cpp	Sat Sep 14 20:40:34 2013 +0100
    38.3 @@ -60,10 +60,10 @@
    38.4  //
    38.5  // Constant value of a field.
    38.6  ciConstant ciInstance::field_value(ciField* field) {
    38.7 -  assert(is_loaded() &&
    38.8 -         field->holder()->is_loaded() &&
    38.9 -         klass()->is_subclass_of(field->holder()),
   38.10 -         "invalid access");
   38.11 +  assert(is_loaded(), "invalid access - must be loaded");
   38.12 +  assert(field->holder()->is_loaded(), "invalid access - holder must be loaded");
   38.13 +  assert(klass()->is_subclass_of(field->holder()), "invalid access - must be subclass");
   38.14 +
   38.15    VM_ENTRY_MARK;
   38.16    ciConstant result;
   38.17    Handle obj = get_oop();
    39.1 --- a/src/share/vm/ci/ciMethod.hpp	Fri Sep 06 09:55:38 2013 +0100
    39.2 +++ b/src/share/vm/ci/ciMethod.hpp	Sat Sep 14 20:40:34 2013 +0100
    39.3 @@ -177,6 +177,10 @@
    39.4      address bcp = code() + bci;
    39.5      return Bytecodes::java_code_at(NULL, bcp);
    39.6    }
    39.7 +  Bytecodes::Code raw_code_at_bci(int bci) {
    39.8 +    address bcp = code() + bci;
    39.9 +    return Bytecodes::code_at(NULL, bcp);
   39.10 +  }
   39.11    BCEscapeAnalyzer  *get_bcea();
   39.12    ciMethodBlocks    *get_method_blocks();
   39.13  
    40.1 --- a/src/share/vm/ci/ciObjectFactory.cpp	Fri Sep 06 09:55:38 2013 +0100
    40.2 +++ b/src/share/vm/ci/ciObjectFactory.cpp	Sat Sep 14 20:40:34 2013 +0100
    40.3 @@ -563,7 +563,10 @@
    40.4    return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass());
    40.5  }
    40.6  
    40.7 -
    40.8 +ciInstance* ciObjectFactory::get_unloaded_object_constant() {
    40.9 +  if (ciEnv::_Object_klass == NULL)  return NULL;
   40.10 +  return get_unloaded_instance(ciEnv::_Object_klass->as_instance_klass());
   40.11 +}
   40.12  
   40.13  //------------------------------------------------------------------
   40.14  // ciObjectFactory::get_empty_methodData
    41.1 --- a/src/share/vm/ci/ciObjectFactory.hpp	Fri Sep 06 09:55:38 2013 +0100
    41.2 +++ b/src/share/vm/ci/ciObjectFactory.hpp	Sat Sep 14 20:40:34 2013 +0100
    41.3 @@ -131,6 +131,8 @@
    41.4    ciInstance* get_unloaded_method_type_constant(ciSymbol* signature);
    41.5  
    41.6  
    41.7 +  ciInstance* get_unloaded_object_constant();
    41.8 +
    41.9    // Get the ciMethodData representing the methodData for a method
   41.10    // with none.
   41.11    ciMethodData* get_empty_methodData();
    42.1 --- a/src/share/vm/classfile/classFileParser.cpp	Fri Sep 06 09:55:38 2013 +0100
    42.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Sat Sep 14 20:40:34 2013 +0100
    42.3 @@ -28,7 +28,6 @@
    42.4  #include "classfile/classLoaderData.hpp"
    42.5  #include "classfile/classLoaderData.inline.hpp"
    42.6  #include "classfile/defaultMethods.hpp"
    42.7 -#include "classfile/genericSignatures.hpp"
    42.8  #include "classfile/javaClasses.hpp"
    42.9  #include "classfile/symbolTable.hpp"
   42.10  #include "classfile/systemDictionary.hpp"
   42.11 @@ -3039,35 +3038,6 @@
   42.12    return annotations;
   42.13  }
   42.14  
   42.15 -
   42.16 -#ifdef ASSERT
   42.17 -static void parseAndPrintGenericSignatures(
   42.18 -    instanceKlassHandle this_klass, TRAPS) {
   42.19 -  assert(ParseAllGenericSignatures == true, "Shouldn't call otherwise");
   42.20 -  ResourceMark rm;
   42.21 -
   42.22 -  if (this_klass->generic_signature() != NULL) {
   42.23 -    using namespace generic;
   42.24 -    ClassDescriptor* spec = ClassDescriptor::parse_generic_signature(this_klass(), CHECK);
   42.25 -
   42.26 -    tty->print_cr("Parsing %s", this_klass->generic_signature()->as_C_string());
   42.27 -    spec->print_on(tty);
   42.28 -
   42.29 -    for (int i = 0; i < this_klass->methods()->length(); ++i) {
   42.30 -      Method* m = this_klass->methods()->at(i);
   42.31 -      MethodDescriptor* method_spec = MethodDescriptor::parse_generic_signature(m, spec);
   42.32 -      Symbol* sig = m->generic_signature();
   42.33 -      if (sig == NULL) {
   42.34 -        sig = m->signature();
   42.35 -      }
   42.36 -      tty->print_cr("Parsing %s", sig->as_C_string());
   42.37 -      method_spec->print_on(tty);
   42.38 -    }
   42.39 -  }
   42.40 -}
   42.41 -#endif // def ASSERT
   42.42 -
   42.43 -
   42.44  instanceKlassHandle ClassFileParser::parse_super_class(int super_class_index,
   42.45                                                         TRAPS) {
   42.46    instanceKlassHandle super_klass;
   42.47 @@ -4060,12 +4030,6 @@
   42.48      java_lang_Class::create_mirror(this_klass, protection_domain, CHECK_(nullHandle));
   42.49  
   42.50  
   42.51 -#ifdef ASSERT
   42.52 -    if (ParseAllGenericSignatures) {
   42.53 -      parseAndPrintGenericSignatures(this_klass, CHECK_(nullHandle));
   42.54 -    }
   42.55 -#endif
   42.56 -
   42.57      // Generate any default methods - default methods are interface methods
   42.58      // that have a default implementation.  This is new with Lambda project.
   42.59      if (has_default_methods && !access_flags.is_interface() &&
    43.1 --- a/src/share/vm/classfile/classLoader.cpp	Fri Sep 06 09:55:38 2013 +0100
    43.2 +++ b/src/share/vm/classfile/classLoader.cpp	Sat Sep 14 20:40:34 2013 +0100
    43.3 @@ -1,5 +1,5 @@
    43.4  /*
    43.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    43.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    43.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    43.8   *
    43.9   * This code is free software; you can redistribute it and/or modify it
   43.10 @@ -197,7 +197,7 @@
   43.11  }
   43.12  
   43.13  
   43.14 -ClassFileStream* ClassPathDirEntry::open_stream(const char* name) {
   43.15 +ClassFileStream* ClassPathDirEntry::open_stream(const char* name, TRAPS) {
   43.16    // construct full path name
   43.17    char path[JVM_MAXPATHLEN];
   43.18    if (jio_snprintf(path, sizeof(path), "%s%s%s", _dir, os::file_separator(), name) == -1) {
   43.19 @@ -240,7 +240,7 @@
   43.20    FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
   43.21  }
   43.22  
   43.23 -ClassFileStream* ClassPathZipEntry::open_stream(const char* name) {
   43.24 +ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
   43.25    // enable call to C land
   43.26    JavaThread* thread = JavaThread::current();
   43.27    ThreadToNativeFromVM ttn(thread);
   43.28 @@ -284,24 +284,24 @@
   43.29    }
   43.30  }
   43.31  
   43.32 -LazyClassPathEntry::LazyClassPathEntry(char* path, struct stat st) : ClassPathEntry() {
   43.33 +LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
   43.34    _path = strdup(path);
   43.35 -  _st = st;
   43.36 +  _st = *st;
   43.37    _meta_index = NULL;
   43.38    _resolved_entry = NULL;
   43.39 +  _has_error = false;
   43.40  }
   43.41  
   43.42  bool LazyClassPathEntry::is_jar_file() {
   43.43    return ((_st.st_mode & S_IFREG) == S_IFREG);
   43.44  }
   43.45  
   43.46 -ClassPathEntry* LazyClassPathEntry::resolve_entry() {
   43.47 +ClassPathEntry* LazyClassPathEntry::resolve_entry(TRAPS) {
   43.48    if (_resolved_entry != NULL) {
   43.49      return (ClassPathEntry*) _resolved_entry;
   43.50    }
   43.51    ClassPathEntry* new_entry = NULL;
   43.52 -  ClassLoader::create_class_path_entry(_path, _st, &new_entry, false);
   43.53 -  assert(new_entry != NULL, "earlier code should have caught this");
   43.54 +  new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
   43.55    {
   43.56      ThreadCritical tc;
   43.57      if (_resolved_entry == NULL) {
   43.58 @@ -314,12 +314,21 @@
   43.59    return (ClassPathEntry*) _resolved_entry;
   43.60  }
   43.61  
   43.62 -ClassFileStream* LazyClassPathEntry::open_stream(const char* name) {
   43.63 +ClassFileStream* LazyClassPathEntry::open_stream(const char* name, TRAPS) {
   43.64    if (_meta_index != NULL &&
   43.65        !_meta_index->may_contain(name)) {
   43.66      return NULL;
   43.67    }
   43.68 -  return resolve_entry()->open_stream(name);
   43.69 +  if (_has_error) {
   43.70 +    return NULL;
   43.71 +  }
   43.72 +  ClassPathEntry* cpe = resolve_entry(THREAD);
   43.73 +  if (cpe == NULL) {
   43.74 +    _has_error = true;
   43.75 +    return NULL;
   43.76 +  } else {
   43.77 +    return cpe->open_stream(name, THREAD);
   43.78 +  }
   43.79  }
   43.80  
   43.81  bool LazyClassPathEntry::is_lazy() {
   43.82 @@ -465,20 +474,19 @@
   43.83    }
   43.84  }
   43.85  
   43.86 -void ClassLoader::create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy) {
   43.87 +ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
   43.88    JavaThread* thread = JavaThread::current();
   43.89    if (lazy) {
   43.90 -    *new_entry = new LazyClassPathEntry(path, st);
   43.91 -    return;
   43.92 +    return new LazyClassPathEntry(path, st);
   43.93    }
   43.94 -  if ((st.st_mode & S_IFREG) == S_IFREG) {
   43.95 +  ClassPathEntry* new_entry = NULL;
   43.96 +  if ((st->st_mode & S_IFREG) == S_IFREG) {
   43.97      // Regular file, should be a zip file
   43.98      // Canonicalized filename
   43.99      char canonical_path[JVM_MAXPATHLEN];
  43.100      if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
  43.101        // This matches the classic VM
  43.102 -      EXCEPTION_MARK;
  43.103 -      THROW_MSG(vmSymbols::java_io_IOException(), "Bad pathname");
  43.104 +      THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
  43.105      }
  43.106      char* error_msg = NULL;
  43.107      jzfile* zip;
  43.108 @@ -489,7 +497,7 @@
  43.109        zip = (*ZipOpen)(canonical_path, &error_msg);
  43.110      }
  43.111      if (zip != NULL && error_msg == NULL) {
  43.112 -      *new_entry = new ClassPathZipEntry(zip, path);
  43.113 +      new_entry = new ClassPathZipEntry(zip, path);
  43.114        if (TraceClassLoading) {
  43.115          tty->print_cr("[Opened %s]", path);
  43.116        }
  43.117 @@ -504,16 +512,16 @@
  43.118          msg = NEW_RESOURCE_ARRAY(char, len); ;
  43.119          jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
  43.120        }
  43.121 -      EXCEPTION_MARK;
  43.122 -      THROW_MSG(vmSymbols::java_lang_ClassNotFoundException(), msg);
  43.123 +      THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
  43.124      }
  43.125    } else {
  43.126      // Directory
  43.127 -    *new_entry = new ClassPathDirEntry(path);
  43.128 +    new_entry = new ClassPathDirEntry(path);
  43.129      if (TraceClassLoading) {
  43.130        tty->print_cr("[Path %s]", path);
  43.131      }
  43.132    }
  43.133 +  return new_entry;
  43.134  }
  43.135  
  43.136  
  43.137 @@ -572,13 +580,14 @@
  43.138    }
  43.139  }
  43.140  
  43.141 -void ClassLoader::update_class_path_entry_list(const char *path,
  43.142 +void ClassLoader::update_class_path_entry_list(char *path,
  43.143                                                 bool check_for_duplicates) {
  43.144    struct stat st;
  43.145 -  if (os::stat((char *)path, &st) == 0) {
  43.146 +  if (os::stat(path, &st) == 0) {
  43.147      // File or directory found
  43.148      ClassPathEntry* new_entry = NULL;
  43.149 -    create_class_path_entry((char *)path, st, &new_entry, LazyBootClassLoader);
  43.150 +    Thread* THREAD = Thread::current();
  43.151 +    new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
  43.152      // The kernel VM adds dynamically to the end of the classloader path and
  43.153      // doesn't reorder the bootclasspath which would break java.lang.Package
  43.154      // (see PackageInfo).
  43.155 @@ -897,7 +906,7 @@
  43.156                                 PerfClassTraceTime::CLASS_LOAD);
  43.157      ClassPathEntry* e = _first_entry;
  43.158      while (e != NULL) {
  43.159 -      stream = e->open_stream(name);
  43.160 +      stream = e->open_stream(name, CHECK_NULL);
  43.161        if (stream != NULL) {
  43.162          break;
  43.163        }
  43.164 @@ -1257,11 +1266,16 @@
  43.165  }
  43.166  
  43.167  void LazyClassPathEntry::compile_the_world(Handle loader, TRAPS) {
  43.168 -  resolve_entry()->compile_the_world(loader, CHECK);
  43.169 +  ClassPathEntry* cpe = resolve_entry(THREAD);
  43.170 +  if (cpe != NULL) {
  43.171 +    cpe->compile_the_world(loader, CHECK);
  43.172 +  }
  43.173  }
  43.174  
  43.175  bool LazyClassPathEntry::is_rt_jar() {
  43.176 -  return resolve_entry()->is_rt_jar();
  43.177 +  Thread* THREAD = Thread::current();
  43.178 +  ClassPathEntry* cpe = resolve_entry(THREAD);
  43.179 +  return (cpe != NULL) ? cpe->is_jar_file() : false;
  43.180  }
  43.181  
  43.182  void ClassLoader::compile_the_world() {
    44.1 --- a/src/share/vm/classfile/classLoader.hpp	Fri Sep 06 09:55:38 2013 +0100
    44.2 +++ b/src/share/vm/classfile/classLoader.hpp	Sat Sep 14 20:40:34 2013 +0100
    44.3 @@ -1,5 +1,5 @@
    44.4  /*
    44.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    44.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    44.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44.8   *
    44.9   * This code is free software; you can redistribute it and/or modify it
   44.10 @@ -63,7 +63,7 @@
   44.11    ClassPathEntry();
   44.12    // Attempt to locate file_name through this class path entry.
   44.13    // Returns a class file parsing stream if successfull.
   44.14 -  virtual ClassFileStream* open_stream(const char* name) = 0;
   44.15 +  virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0;
   44.16    // Debugging
   44.17    NOT_PRODUCT(virtual void compile_the_world(Handle loader, TRAPS) = 0;)
   44.18    NOT_PRODUCT(virtual bool is_rt_jar() = 0;)
   44.19 @@ -77,7 +77,7 @@
   44.20    bool is_jar_file()  { return false;  }
   44.21    const char* name()  { return _dir; }
   44.22    ClassPathDirEntry(char* dir);
   44.23 -  ClassFileStream* open_stream(const char* name);
   44.24 +  ClassFileStream* open_stream(const char* name, TRAPS);
   44.25    // Debugging
   44.26    NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
   44.27    NOT_PRODUCT(bool is_rt_jar();)
   44.28 @@ -107,7 +107,7 @@
   44.29    const char* name()  { return _zip_name; }
   44.30    ClassPathZipEntry(jzfile* zip, const char* zip_name);
   44.31    ~ClassPathZipEntry();
   44.32 -  ClassFileStream* open_stream(const char* name);
   44.33 +  ClassFileStream* open_stream(const char* name, TRAPS);
   44.34    void contents_do(void f(const char* name, void* context), void* context);
   44.35    // Debugging
   44.36    NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
   44.37 @@ -125,13 +125,14 @@
   44.38    char* _path; // dir or file
   44.39    struct stat _st;
   44.40    MetaIndex* _meta_index;
   44.41 +  bool _has_error;
   44.42    volatile ClassPathEntry* _resolved_entry;
   44.43 -  ClassPathEntry* resolve_entry();
   44.44 +  ClassPathEntry* resolve_entry(TRAPS);
   44.45   public:
   44.46    bool is_jar_file();
   44.47    const char* name()  { return _path; }
   44.48 -  LazyClassPathEntry(char* path, struct stat st);
   44.49 -  ClassFileStream* open_stream(const char* name);
   44.50 +  LazyClassPathEntry(char* path, const struct stat* st);
   44.51 +  ClassFileStream* open_stream(const char* name, TRAPS);
   44.52    void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
   44.53    virtual bool is_lazy();
   44.54    // Debugging
   44.55 @@ -207,14 +208,15 @@
   44.56    static void setup_meta_index();
   44.57    static void setup_bootstrap_search_path();
   44.58    static void load_zip_library();
   44.59 -  static void create_class_path_entry(char *path, struct stat st, ClassPathEntry **new_entry, bool lazy);
   44.60 +  static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
   44.61 +                                                 bool lazy, TRAPS);
   44.62  
   44.63    // Canonicalizes path names, so strcmp will work properly. This is mainly
   44.64    // to avoid confusing the zip library
   44.65    static bool get_canonical_path(char* orig, char* out, int len);
   44.66   public:
   44.67    // Used by the kernel jvm.
   44.68 -  static void update_class_path_entry_list(const char *path,
   44.69 +  static void update_class_path_entry_list(char *path,
   44.70                                             bool check_for_duplicates);
   44.71    static void print_bootclasspath();
   44.72  
    45.1 --- a/src/share/vm/classfile/defaultMethods.cpp	Fri Sep 06 09:55:38 2013 +0100
    45.2 +++ b/src/share/vm/classfile/defaultMethods.cpp	Sat Sep 14 20:40:34 2013 +0100
    45.3 @@ -25,7 +25,6 @@
    45.4  #include "precompiled.hpp"
    45.5  #include "classfile/bytecodeAssembler.hpp"
    45.6  #include "classfile/defaultMethods.hpp"
    45.7 -#include "classfile/genericSignatures.hpp"
    45.8  #include "classfile/symbolTable.hpp"
    45.9  #include "memory/allocation.hpp"
   45.10  #include "memory/metadataFactory.hpp"
   45.11 @@ -75,14 +74,6 @@
   45.12    }
   45.13  };
   45.14  
   45.15 -class ContextMark : public PseudoScopeMark {
   45.16 - private:
   45.17 -  generic::Context::Mark _mark;
   45.18 - public:
   45.19 -  ContextMark(const generic::Context::Mark& cm) : _mark(cm) {}
   45.20 -  virtual void destroy() { _mark.destroy(); }
   45.21 -};
   45.22 -
   45.23  #ifndef PRODUCT
   45.24  static void print_slot(outputStream* str, Symbol* name, Symbol* signature) {
   45.25    ResourceMark rm;
   45.26 @@ -503,38 +494,6 @@
   45.27    return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
   45.28  }
   45.29  
   45.30 -// A generic method family contains a set of all methods that implement a single
   45.31 -// language-level method.  Because of erasure, these methods may have different
   45.32 -// signatures.  As members of the set are collected while walking over the
   45.33 -// hierarchy, they are tagged with a qualification state.  The qualification
   45.34 -// state for an erased method is set to disqualified if there exists a path
   45.35 -// from the root of hierarchy to the method that contains an interleaving
   45.36 -// language-equivalent method defined in an interface.
   45.37 -class GenericMethodFamily : public MethodFamily {
   45.38 - private:
   45.39 -
   45.40 -  generic::MethodDescriptor* _descriptor; // language-level description
   45.41 -
   45.42 - public:
   45.43 -
   45.44 -  GenericMethodFamily(generic::MethodDescriptor* canonical_desc)
   45.45 -      : _descriptor(canonical_desc) {}
   45.46 -
   45.47 -  generic::MethodDescriptor* descriptor() const { return _descriptor; }
   45.48 -
   45.49 -  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
   45.50 -    return descriptor()->covariant_match(md, ctx);
   45.51 -  }
   45.52 -
   45.53 -#ifndef PRODUCT
   45.54 -  Symbol* get_generic_sig() const {
   45.55 -
   45.56 -    generic::Context ctx(NULL); // empty, as _descriptor already canonicalized
   45.57 -    TempNewSymbol sig = descriptor()->reify_signature(&ctx, Thread::current());
   45.58 -    return sig;
   45.59 -  }
   45.60 -#endif // ndef PRODUCT
   45.61 -};
   45.62  
   45.63  class StateRestorer;
   45.64  
   45.65 @@ -571,26 +530,6 @@
   45.66    StateRestorer* record_method_and_dq_further(Method* mo);
   45.67  };
   45.68  
   45.69 -
   45.70 -// StatefulGenericMethodFamily is a wrapper around GenericMethodFamily that maintains the
   45.71 -// qualification state during hierarchy visitation, and applies that state
   45.72 -// when adding members to the GenericMethodFamily.
   45.73 -class StatefulGenericMethodFamily : public StatefulMethodFamily {
   45.74 -
   45.75 - public:
   45.76 -  StatefulGenericMethodFamily(generic::MethodDescriptor* md, generic::Context* ctx)
   45.77 -  : StatefulMethodFamily(new GenericMethodFamily(md->canonicalize(ctx))) {
   45.78 -
   45.79 -  }
   45.80 -  GenericMethodFamily* get_method_family() {
   45.81 -    return (GenericMethodFamily*)_method_family;
   45.82 -  }
   45.83 -
   45.84 -  bool descriptor_matches(generic::MethodDescriptor* md, generic::Context* ctx) {
   45.85 -    return get_method_family()->descriptor_matches(md, ctx);
   45.86 -  }
   45.87 -};
   45.88 -
   45.89  class StateRestorer : public PseudoScopeMark {
   45.90   private:
   45.91    StatefulMethodFamily* _method;
   45.92 @@ -616,39 +555,6 @@
   45.93    return mark;
   45.94  }
   45.95  
   45.96 -class StatefulGenericMethodFamilies : public ResourceObj {
   45.97 - private:
   45.98 -  GrowableArray<StatefulGenericMethodFamily*> _methods;
   45.99 -
  45.100 - public:
  45.101 -  StatefulGenericMethodFamily* find_matching(
  45.102 -      generic::MethodDescriptor* md, generic::Context* ctx) {
  45.103 -    for (int i = 0; i < _methods.length(); ++i) {
  45.104 -      StatefulGenericMethodFamily* existing = _methods.at(i);
  45.105 -      if (existing->descriptor_matches(md, ctx)) {
  45.106 -        return existing;
  45.107 -      }
  45.108 -    }
  45.109 -    return NULL;
  45.110 -  }
  45.111 -
  45.112 -  StatefulGenericMethodFamily* find_matching_or_create(
  45.113 -      generic::MethodDescriptor* md, generic::Context* ctx) {
  45.114 -    StatefulGenericMethodFamily* method = find_matching(md, ctx);
  45.115 -    if (method == NULL) {
  45.116 -      method = new StatefulGenericMethodFamily(md, ctx);
  45.117 -      _methods.append(method);
  45.118 -    }
  45.119 -    return method;
  45.120 -  }
  45.121 -
  45.122 -  void extract_families_into(GrowableArray<GenericMethodFamily*>* array) {
  45.123 -    for (int i = 0; i < _methods.length(); ++i) {
  45.124 -      array->append(_methods.at(i)->get_method_family());
  45.125 -    }
  45.126 -  }
  45.127 -};
  45.128 -
  45.129  // Represents a location corresponding to a vtable slot for methods that
  45.130  // neither the class nor any of it's ancestors provide an implementaion.
  45.131  // Default methods may be present to fill this slot.
  45.132 @@ -779,146 +685,11 @@
  45.133  
  45.134  };
  45.135  
  45.136 -// Iterates over the type hierarchy looking for all methods with a specific
  45.137 -// method name.  The result of this is a set of method families each of
  45.138 -// which is populated with a set of methods that implement the same
  45.139 -// language-level signature.
  45.140 -class FindMethodsByGenericSig : public HierarchyVisitor<FindMethodsByGenericSig> {
  45.141 - private:
  45.142 -  // Context data
  45.143 -  Thread* THREAD;
  45.144 -  generic::DescriptorCache* _cache;
  45.145 -  Symbol* _method_name;
  45.146 -  generic::Context* _ctx;
  45.147 -  StatefulGenericMethodFamilies _families;
  45.148  
  45.149 - public:
  45.150 -
  45.151 -  FindMethodsByGenericSig(generic::DescriptorCache* cache, Symbol* name,
  45.152 -      generic::Context* ctx, Thread* thread) :
  45.153 -    _cache(cache), _method_name(name), _ctx(ctx), THREAD(thread) {}
  45.154 -
  45.155 -  void get_discovered_families(GrowableArray<GenericMethodFamily*>* methods) {
  45.156 -    _families.extract_families_into(methods);
  45.157 -  }
  45.158 -
  45.159 -  void* new_node_data(InstanceKlass* cls) { return new PseudoScope(); }
  45.160 -  void free_node_data(void* node_data) {
  45.161 -    PseudoScope::cast(node_data)->destroy();
  45.162 -  }
  45.163 -
  45.164 -  bool visit() {
  45.165 -    PseudoScope* scope = PseudoScope::cast(current_data());
  45.166 -    InstanceKlass* klass = current_class();
  45.167 -    InstanceKlass* sub = current_depth() > 0 ? class_at_depth(1) : NULL;
  45.168 -
  45.169 -    ContextMark* cm = new ContextMark(_ctx->mark());
  45.170 -    scope->add_mark(cm); // will restore context when scope is freed
  45.171 -
  45.172 -    _ctx->apply_type_arguments(sub, klass, THREAD);
  45.173 -
  45.174 -    int start, end = 0;
  45.175 -    start = klass->find_method_by_name(_method_name, &end);
  45.176 -    if (start != -1) {
  45.177 -      for (int i = start; i < end; ++i) {
  45.178 -        Method* m = klass->methods()->at(i);
  45.179 -        // This gets the method's parameter list with its generic type
  45.180 -        // parameters resolved
  45.181 -        generic::MethodDescriptor* md = _cache->descriptor_for(m, THREAD);
  45.182 -
  45.183 -        // Find all methods on this hierarchy that match this method
  45.184 -        // (name, signature).   This class collects other families of this
  45.185 -        // method name.
  45.186 -        StatefulGenericMethodFamily* family =
  45.187 -            _families.find_matching_or_create(md, _ctx);
  45.188 -
  45.189 -        if (klass->is_interface()) {
  45.190 -          // ???
  45.191 -          StateRestorer* restorer = family->record_method_and_dq_further(m);
  45.192 -          scope->add_mark(restorer);
  45.193 -        } else {
  45.194 -          // This is the rule that methods in classes "win" (bad word) over
  45.195 -          // methods in interfaces.  This works because of single inheritance
  45.196 -          family->set_target_if_empty(m);
  45.197 -        }
  45.198 -      }
  45.199 -    }
  45.200 -    return true;
  45.201 -  }
  45.202 -};
  45.203 -
  45.204 -#ifndef PRODUCT
  45.205 -static void print_generic_families(
  45.206 -    GrowableArray<GenericMethodFamily*>* methods, Symbol* match) {
  45.207 -  streamIndentor si(tty, 4);
  45.208 -  if (methods->length() == 0) {
  45.209 -    tty->indent();
  45.210 -    tty->print_cr("No Logical Method found");
  45.211 -  }
  45.212 -  for (int i = 0; i < methods->length(); ++i) {
  45.213 -    tty->indent();
  45.214 -    GenericMethodFamily* lm = methods->at(i);
  45.215 -    if (lm->contains_signature(match)) {
  45.216 -      tty->print_cr("<Matching>");
  45.217 -    } else {
  45.218 -      tty->print_cr("<Non-Matching>");
  45.219 -    }
  45.220 -    lm->print_sig_on(tty, lm->get_generic_sig(), 1);
  45.221 -  }
  45.222 -}
  45.223 -#endif // ndef PRODUCT
  45.224  
  45.225  static void create_overpasses(
  45.226      GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
  45.227  
  45.228 -static void generate_generic_defaults(
  45.229 -      InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
  45.230 -      EmptyVtableSlot* slot, int current_slot_index, TRAPS) {
  45.231 -
  45.232 -  if (slot->is_bound()) {
  45.233 -#ifndef PRODUCT
  45.234 -    if (TraceDefaultMethods) {
  45.235 -      streamIndentor si(tty, 4);
  45.236 -      tty->indent().print_cr("Already bound to logical method:");
  45.237 -      GenericMethodFamily* lm = (GenericMethodFamily*)(slot->get_binding());
  45.238 -      lm->print_sig_on(tty, lm->get_generic_sig(), 1);
  45.239 -    }
  45.240 -#endif // ndef PRODUCT
  45.241 -    return; // covered by previous processing
  45.242 -  }
  45.243 -
  45.244 -  generic::DescriptorCache cache;
  45.245 -
  45.246 -  generic::Context ctx(&cache);
  45.247 -  FindMethodsByGenericSig visitor(&cache, slot->name(), &ctx, CHECK);
  45.248 -  visitor.run(klass);
  45.249 -
  45.250 -  GrowableArray<GenericMethodFamily*> discovered_families;
  45.251 -  visitor.get_discovered_families(&discovered_families);
  45.252 -
  45.253 -#ifndef PRODUCT
  45.254 -  if (TraceDefaultMethods) {
  45.255 -    print_generic_families(&discovered_families, slot->signature());
  45.256 -  }
  45.257 -#endif // ndef PRODUCT
  45.258 -
  45.259 -  // Find and populate any other slots that match the discovered families
  45.260 -  for (int j = current_slot_index; j < empty_slots->length(); ++j) {
  45.261 -    EmptyVtableSlot* open_slot = empty_slots->at(j);
  45.262 -
  45.263 -    if (slot->name() == open_slot->name()) {
  45.264 -      for (int k = 0; k < discovered_families.length(); ++k) {
  45.265 -        GenericMethodFamily* lm = discovered_families.at(k);
  45.266 -
  45.267 -        if (lm->contains_signature(open_slot->signature())) {
  45.268 -          lm->determine_target(klass, CHECK);
  45.269 -          open_slot->bind_family(lm);
  45.270 -        }
  45.271 -      }
  45.272 -    }
  45.273 -  }
  45.274 -}
  45.275 -
  45.276  static void generate_erased_defaults(
  45.277       InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
  45.278       EmptyVtableSlot* slot, TRAPS) {
  45.279 @@ -943,21 +714,14 @@
  45.280  //
  45.281  // First if finds any name/signature slots that need any implementation (either
  45.282  // because they are miranda or a superclass's implementation is an overpass
  45.283 -// itself).  For each slot, iterate over the hierarchy, using generic signature
  45.284 -// information to partition any methods that match the name into method families
  45.285 -// where each family contains methods whose signatures are equivalent at the
  45.286 -// language level (i.e., their reified parameters match and return values are
  45.287 -// covariant). Check those sets to see if they contain a signature that matches
  45.288 -// the slot we're looking at (if we're lucky, there might be other empty slots
  45.289 -// that we can fill using the same analysis).
  45.290 +// itself).  For each slot, iterate over the hierarchy, to see if they contain a
  45.291 +// signature that matches the slot we are looking at.
  45.292  //
  45.293  // For each slot filled, we generate an overpass method that either calls the
  45.294  // unique default method candidate using invokespecial, or throws an exception
  45.295  // (in the case of no default method candidates, or more than one valid
  45.296 -// candidate).  These methods are then added to the class's method list.  If
  45.297 -// the method set we're using contains methods (qualified or not) with a
  45.298 -// different runtime signature than the method we're creating, then we have to
  45.299 -// create bridges with those signatures too.
  45.300 +// candidate).  These methods are then added to the class's method list.
  45.301 +// The JVM does not create bridges nor handle generic signatures here.
  45.302  void DefaultMethods::generate_default_methods(
  45.303      InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
  45.304  
  45.305 @@ -997,11 +761,7 @@
  45.306      }
  45.307  #endif // ndef PRODUCT
  45.308  
  45.309 -    if (ParseGenericDefaults) {
  45.310 -      generate_generic_defaults(klass, empty_slots, slot, i, CHECK);
  45.311 -    } else {
  45.312 -      generate_erased_defaults(klass, empty_slots, slot, CHECK);
  45.313 -    }
  45.314 +    generate_erased_defaults(klass, empty_slots, slot, CHECK);
  45.315   }
  45.316  #ifndef PRODUCT
  45.317    if (TraceDefaultMethods) {
  45.318 @@ -1019,13 +779,13 @@
  45.319  }
  45.320  
  45.321  /**
  45.322 - * Generic analysis was used upon interface '_target' and found a unique
  45.323 - * default method candidate with generic signature '_method_desc'.  This
  45.324 + * Interface inheritance rules were used to find a unique default method
  45.325 + * candidate for the resolved class. This
  45.326   * method is only viable if it would also be in the set of default method
  45.327   * candidates if we ran a full analysis on the current class.
  45.328   *
  45.329   * The only reason that the method would not be in the set of candidates for
  45.330 - * the current class is if that there's another covariantly matching method
  45.331 + * the current class is if that there's another matching method
  45.332   * which is "more specific" than the found method -- i.e., one could find a
  45.333   * path in the interface hierarchy in which the matching method appears
  45.334   * before we get to '_target'.
  45.335 @@ -1110,49 +870,6 @@
  45.336      : ShadowChecker(thread, name, holder, target) {}
  45.337  };
  45.338  
  45.339 -class GenericShadowChecker : public ShadowChecker {
  45.340 - private:
  45.341 -  generic::DescriptorCache* _cache;
  45.342 -  generic::MethodDescriptor* _method_desc;
  45.343 -
  45.344 -  bool path_has_shadow() {
  45.345 -    generic::Context ctx(_cache);
  45.346 -
  45.347 -    for (int i = current_depth() - 1; i > 0; --i) {
  45.348 -      InstanceKlass* ik = class_at_depth(i);
  45.349 -      InstanceKlass* sub = class_at_depth(i + 1);
  45.350 -      ctx.apply_type_arguments(sub, ik, THREAD);
  45.351 -
  45.352 -      if (ik->is_interface()) {
  45.353 -        int end;
  45.354 -        int start = ik->find_method_by_name(_method_name, &end);
  45.355 -        if (start != -1) {
  45.356 -          for (int j = start; j < end; ++j) {
  45.357 -            Method* mo = ik->methods()->at(j);
  45.358 -            generic::MethodDescriptor* md = _cache->descriptor_for(mo, THREAD);
  45.359 -            if (_method_desc->covariant_match(md, &ctx)) {
  45.360 -              return true;
  45.361 -            }
  45.362 -          }
  45.363 -        }
  45.364 -      }
  45.365 -    }
  45.366 -    return false;
  45.367 -  }
  45.368 -
  45.369 - public:
  45.370 -
  45.371 -  GenericShadowChecker(generic::DescriptorCache* cache, Thread* thread,
  45.372 -      Symbol* name, InstanceKlass* holder, generic::MethodDescriptor* desc,
  45.373 -      InstanceKlass* target)
  45.374 -    : ShadowChecker(thread, name, holder, target) {
  45.375 -      _cache = cache;
  45.376 -      _method_desc = desc;
  45.377 - }
  45.378 -};
  45.379 -
  45.380 -
  45.381 -
  45.382  // Find the unique qualified candidate from the perspective of the super_class
  45.383  // which is the resolved_klass, which must be an immediate superinterface
  45.384  // of klass
  45.385 @@ -1166,103 +883,48 @@
  45.386  
  45.387    if (family != NULL) {
  45.388      family->determine_target(current_class, CHECK_NULL);  // get target from current_class
  45.389 -  }
  45.390  
  45.391 -  if (family->has_target()) {
  45.392 -    Method* target = family->get_selected_target();
  45.393 -    InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
  45.394 +    if (family->has_target()) {
  45.395 +      Method* target = family->get_selected_target();
  45.396 +      InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
  45.397  
  45.398 -    // Verify that the identified method is valid from the context of
  45.399 -    // the current class, which is the caller class for invokespecial
  45.400 -    // link resolution, i.e. ensure there it is not shadowed.
  45.401 -    // You can use invokespecial to disambiguate interface methods, but
  45.402 -    // you can not use it to skip over an interface method that would shadow it.
  45.403 -    ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
  45.404 -    checker.run(current_class);
  45.405 +      // Verify that the identified method is valid from the context of
  45.406 +      // the current class, which is the caller class for invokespecial
  45.407 +      // link resolution, i.e. ensure there it is not shadowed.
  45.408 +      // You can use invokespecial to disambiguate interface methods, but
  45.409 +      // you can not use it to skip over an interface method that would shadow it.
  45.410 +      ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
  45.411 +      checker.run(current_class);
  45.412  
  45.413 -    if (checker.found_shadow()) {
  45.414 +      if (checker.found_shadow()) {
  45.415  #ifndef PRODUCT
  45.416 -      if (TraceDefaultMethods) {
  45.417 -        tty->print_cr("    Only candidate found was shadowed.");
  45.418 +        if (TraceDefaultMethods) {
  45.419 +          tty->print_cr("    Only candidate found was shadowed.");
  45.420 +        }
  45.421 +#endif // ndef PRODUCT
  45.422 +        THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
  45.423 +                   "Accessible default method not found", NULL);
  45.424 +      } else {
  45.425 +#ifndef PRODUCT
  45.426 +        if (TraceDefaultMethods) {
  45.427 +          family->print_sig_on(tty, target->signature(), 1);
  45.428 +        }
  45.429 +#endif // ndef PRODUCT
  45.430 +        return target;
  45.431        }
  45.432 -#endif // ndef PRODUCT
  45.433 +    } else {
  45.434 +      assert(family->throws_exception(), "must have target or throw");
  45.435        THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
  45.436 -                 "Accessible default method not found", NULL);
  45.437 -    } else {
  45.438 -#ifndef PRODUCT
  45.439 -      if (TraceDefaultMethods) {
  45.440 -        family->print_sig_on(tty, target->signature(), 1);
  45.441 -      }
  45.442 -#endif // ndef PRODUCT
  45.443 -      return target;
  45.444 -    }
  45.445 +                 family->get_exception_message()->as_C_string(), NULL);
  45.446 +   }
  45.447    } else {
  45.448 -    assert(family->throws_exception(), "must have target or throw");
  45.449 -    THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
  45.450 -               family->get_exception_message()->as_C_string(), NULL);
  45.451 +    // no method found
  45.452 +    ResourceMark rm(THREAD);
  45.453 +    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(),
  45.454 +              Method::name_and_sig_as_C_string(current_class,
  45.455 +                                               method_name, sig), NULL);
  45.456    }
  45.457  }
  45.458 -
  45.459 -// super_class is assumed to be the direct super of current_class
  45.460 -Method* find_generic_super_default( InstanceKlass* current_class,
  45.461 -                                    InstanceKlass* super_class,
  45.462 -                                    Symbol* method_name, Symbol* sig, TRAPS) {
  45.463 -  generic::DescriptorCache cache;
  45.464 -  generic::Context ctx(&cache);
  45.465 -
  45.466 -  // Prime the initial generic context for current -> super_class
  45.467 -  ctx.apply_type_arguments(current_class, super_class, CHECK_NULL);
  45.468 -
  45.469 -  FindMethodsByGenericSig visitor(&cache, method_name, &ctx, CHECK_NULL);
  45.470 -  visitor.run(super_class);
  45.471 -
  45.472 -  GrowableArray<GenericMethodFamily*> families;
  45.473 -  visitor.get_discovered_families(&families);
  45.474 -
  45.475 -#ifndef PRODUCT
  45.476 -  if (TraceDefaultMethods) {
  45.477 -    print_generic_families(&families, sig);
  45.478 -  }
  45.479 -#endif // ndef PRODUCT
  45.480 -
  45.481 -  GenericMethodFamily* selected_family = NULL;
  45.482 -
  45.483 -  for (int i = 0; i < families.length(); ++i) {
  45.484 -    GenericMethodFamily* lm = families.at(i);
  45.485 -    if (lm->contains_signature(sig)) {
  45.486 -      lm->determine_target(current_class, CHECK_NULL);
  45.487 -      selected_family = lm;
  45.488 -    }
  45.489 -  }
  45.490 -
  45.491 -  if (selected_family->has_target()) {
  45.492 -    Method* target = selected_family->get_selected_target();
  45.493 -    InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
  45.494 -
  45.495 -    // Verify that the identified method is valid from the context of
  45.496 -    // the current class
  45.497 -    GenericShadowChecker checker(&cache, THREAD, target->name(),
  45.498 -        holder, selected_family->descriptor(), super_class);
  45.499 -    checker.run(current_class);
  45.500 -
  45.501 -    if (checker.found_shadow()) {
  45.502 -#ifndef PRODUCT
  45.503 -      if (TraceDefaultMethods) {
  45.504 -        tty->print_cr("    Only candidate found was shadowed.");
  45.505 -      }
  45.506 -#endif // ndef PRODUCT
  45.507 -      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
  45.508 -                 "Accessible default method not found", NULL);
  45.509 -    } else {
  45.510 -      return target;
  45.511 -    }
  45.512 -  } else {
  45.513 -    assert(selected_family->throws_exception(), "must have target or throw");
  45.514 -    THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
  45.515 -               selected_family->get_exception_message()->as_C_string(), NULL);
  45.516 -  }
  45.517 -}
  45.518 -
  45.519  // This is called during linktime when we find an invokespecial call that
  45.520  // refers to a direct superinterface.  It indicates that we should find the
  45.521  // default method in the hierarchy of that superinterface, and if that method
  45.522 @@ -1296,13 +958,8 @@
  45.523    assert(super_class->is_interface(), "only call for default methods");
  45.524  
  45.525    Method* target = NULL;
  45.526 -  if (ParseGenericDefaults) {
  45.527 -    target = find_generic_super_default(current_class, super_class,
  45.528 -                                        method_name, sig, CHECK_NULL);
  45.529 -  } else {
  45.530 -    target = find_erased_super_default(current_class, super_class,
  45.531 -                                       method_name, sig, CHECK_NULL);
  45.532 -  }
  45.533 +  target = find_erased_super_default(current_class, super_class,
  45.534 +                                     method_name, sig, CHECK_NULL);
  45.535  
  45.536  #ifndef PRODUCT
  45.537    if (target != NULL) {
    46.1 --- a/src/share/vm/classfile/genericSignatures.cpp	Fri Sep 06 09:55:38 2013 +0100
    46.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    46.3 @@ -1,1279 +0,0 @@
    46.4 -/*
    46.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    46.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    46.7 - *
    46.8 - * This code is free software; you can redistribute it and/or modify it
    46.9 - * under the terms of the GNU General Public License version 2 only, as
   46.10 - * published by the Free Software Foundation.
   46.11 - *
   46.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   46.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   46.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   46.15 - * version 2 for more details (a copy is included in the LICENSE file that
   46.16 - * accompanied this code).
   46.17 - *
   46.18 - * You should have received a copy of the GNU General Public License version
   46.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   46.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   46.21 - *
   46.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   46.23 - * or visit www.oracle.com if you need additional information or have any
   46.24 - * questions.
   46.25 - *
   46.26 - */
   46.27 -
   46.28 -#include "precompiled.hpp"
   46.29 -
   46.30 -#include "classfile/genericSignatures.hpp"
   46.31 -#include "classfile/symbolTable.hpp"
   46.32 -#include "classfile/systemDictionary.hpp"
   46.33 -#include "memory/resourceArea.hpp"
   46.34 -
   46.35 -namespace generic {
   46.36 -
   46.37 -// Helper class for parsing the generic signature Symbol in klass and methods
   46.38 -class DescriptorStream : public ResourceObj {
   46.39 - private:
   46.40 -  Symbol* _symbol;
   46.41 -  int _offset;
   46.42 -  int _mark;
   46.43 -  const char* _parse_error;
   46.44 -
   46.45 -  void set_parse_error(const char* error) {
   46.46 -    assert(error != NULL, "Can't set NULL error string");
   46.47 -    _parse_error = error;
   46.48 -  }
   46.49 -
   46.50 - public:
   46.51 -  DescriptorStream(Symbol* sym)
   46.52 -      : _symbol(sym), _offset(0), _mark(-1), _parse_error(NULL) {}
   46.53 -
   46.54 -  const char* parse_error() const {
   46.55 -    return _parse_error;
   46.56 -  }
   46.57 -
   46.58 -  bool at_end() { return _offset >= _symbol->utf8_length(); }
   46.59 -
   46.60 -  char peek() {
   46.61 -    if (at_end()) {
   46.62 -      set_parse_error("Peeking past end of signature");
   46.63 -      return '\0';
   46.64 -    } else {
   46.65 -      return _symbol->byte_at(_offset);
   46.66 -    }
   46.67 -  }
   46.68 -
   46.69 -  char read() {
   46.70 -    if (at_end()) {
   46.71 -      set_parse_error("Reading past end of signature");
   46.72 -      return '\0';
   46.73 -    } else {
   46.74 -      return _symbol->byte_at(_offset++);
   46.75 -    }
   46.76 -  }
   46.77 -
   46.78 -  void read(char expected) {
   46.79 -    char c = read();
   46.80 -    assert_char(c, expected, 0);
   46.81 -  }
   46.82 -
   46.83 -  void assert_char(char c, char expected, int pos = -1) {
   46.84 -    if (c != expected) {
   46.85 -      const char* fmt = "Parse error at %d: expected %c but got %c";
   46.86 -      size_t len = strlen(fmt) + 5;
   46.87 -      char* buffer = NEW_RESOURCE_ARRAY(char, len);
   46.88 -      jio_snprintf(buffer, len, fmt, _offset + pos, expected, c);
   46.89 -      set_parse_error(buffer);
   46.90 -    }
   46.91 -  }
   46.92 -
   46.93 -  void push(char c) {
   46.94 -    assert(c == _symbol->byte_at(_offset - 1), "Pushing back wrong value");
   46.95 -    --_offset;
   46.96 -  }
   46.97 -
   46.98 -  void expect_end() {
   46.99 -    if (!at_end()) {
  46.100 -      set_parse_error("Unexpected data trailing signature");
  46.101 -    }
  46.102 -  }
  46.103 -
  46.104 -  bool has_mark() { return _mark != -1; }
  46.105 -
  46.106 -  void set_mark() {
  46.107 -    _mark = _offset;
  46.108 -  }
  46.109 -
  46.110 -  Identifier* identifier_from_mark() {
  46.111 -    assert(has_mark(), "Mark should be set");
  46.112 -    if (!has_mark()) {
  46.113 -      set_parse_error("Expected mark to be set");
  46.114 -      return NULL;
  46.115 -    } else {
  46.116 -      Identifier* id = new Identifier(_symbol, _mark, _offset - 1);
  46.117 -      _mark = -1;
  46.118 -      return id;
  46.119 -    }
  46.120 -  }
  46.121 -};
  46.122 -
  46.123 -
  46.124 -#define CHECK_FOR_PARSE_ERROR()         \
  46.125 -  if (STREAM->parse_error() != NULL) {   \
  46.126 -    if (VerifyGenericSignatures) {      \
  46.127 -      fatal(STREAM->parse_error());      \
  46.128 -    }                                   \
  46.129 -    return NULL;                        \
  46.130 -  } (void)0
  46.131 -
  46.132 -#define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR()
  46.133 -#define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR()
  46.134 -#define PUSH(c) STREAM->push(c)
  46.135 -#define EXPECT(c) STREAM->read(c); CHECK_FOR_PARSE_ERROR()
  46.136 -#define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR()
  46.137 -#define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR()
  46.138 -
  46.139 -#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); ((void)0
  46.140 -
  46.141 -#ifndef PRODUCT
  46.142 -void Identifier::print_on(outputStream* str) const {
  46.143 -  for (int i = _begin; i < _end; ++i) {
  46.144 -    str->print("%c", (char)_sym->byte_at(i));
  46.145 -  }
  46.146 -}
  46.147 -#endif // ndef PRODUCT
  46.148 -
  46.149 -bool Identifier::equals(Identifier* other) {
  46.150 -  if (_sym == other->_sym && _begin == other->_begin && _end == other->_end) {
  46.151 -    return true;
  46.152 -  } else if (_end - _begin != other->_end - other->_begin) {
  46.153 -    return false;
  46.154 -  } else {
  46.155 -    size_t len = _end - _begin;
  46.156 -    char* addr = ((char*)_sym->bytes()) + _begin;
  46.157 -    char* oaddr = ((char*)other->_sym->bytes()) + other->_begin;
  46.158 -    return strncmp(addr, oaddr, len) == 0;
  46.159 -  }
  46.160 -}
  46.161 -
  46.162 -bool Identifier::equals(Symbol* sym) {
  46.163 -  Identifier id(sym, 0, sym->utf8_length());
  46.164 -  return equals(&id);
  46.165 -}
  46.166 -
  46.167 -/**
  46.168 - * A formal type parameter may be found in the the enclosing class, but it could
  46.169 - * also come from an enclosing method or outer class, in the case of inner-outer
  46.170 - * classes or anonymous classes.  For example:
  46.171 - *
  46.172 - * class Outer<T,V> {
  46.173 - *   class Inner<W> {
  46.174 - *     void m(T t, V v, W w);
  46.175 - *   }
  46.176 - * }
  46.177 - *
  46.178 - * In this case, the type variables in m()'s signature are not all found in the
  46.179 - * immediate enclosing class (Inner).  class Inner has only type parameter W,
  46.180 - * but it's outer_class field will reference Outer's descriptor which contains
  46.181 - * T & V (no outer_method in this case).
  46.182 - *
  46.183 - * If you have an anonymous class, it has both an enclosing method *and* an
  46.184 - * enclosing class where type parameters can be declared:
  46.185 - *
  46.186 - * class MOuter<T> {
  46.187 - *   <V> void bar(V v) {
  46.188 - *     Runnable r = new Runnable() {
  46.189 - *       public void run() {}
  46.190 - *       public void foo(T t, V v) { ... }
  46.191 - *     };
  46.192 - *   }
  46.193 - * }
  46.194 - *
  46.195 - * In this case, foo will be a member of some class, Runnable$1, which has no
  46.196 - * formal parameters itself, but has an outer_method (bar()) which provides
  46.197 - * type parameter V, and an outer class MOuter with type parameter T.
  46.198 - *
  46.199 - * It is also possible that the outer class is itself an inner class to some
  46.200 - * other class (or an anonymous class with an enclosing method), so we need to
  46.201 - * follow the outer_class/outer_method chain to it's end when looking for a
  46.202 - * type parameter.
  46.203 - */
  46.204 -TypeParameter* Descriptor::find_type_parameter(Identifier* id, int* depth) {
  46.205 -
  46.206 -  int current_depth = 0;
  46.207 -
  46.208 -  MethodDescriptor* outer_method = as_method_signature();
  46.209 -  ClassDescriptor* outer_class = as_class_signature();
  46.210 -
  46.211 -  if (outer_class == NULL) { // 'this' is a method signature; use the holder
  46.212 -    outer_class = outer_method->outer_class();
  46.213 -  }
  46.214 -
  46.215 -  while (outer_method != NULL || outer_class != NULL) {
  46.216 -    if (outer_method != NULL) {
  46.217 -      for (int i = 0; i < outer_method->type_parameters().length(); ++i) {
  46.218 -        TypeParameter* p = outer_method->type_parameters().at(i);
  46.219 -        if (p->identifier()->equals(id)) {
  46.220 -          *depth = -1; // indicates this this is a method parameter
  46.221 -          return p;
  46.222 -        }
  46.223 -      }
  46.224 -    }
  46.225 -    if (outer_class != NULL) {
  46.226 -      for (int i = 0; i < outer_class->type_parameters().length(); ++i) {
  46.227 -        TypeParameter* p = outer_class->type_parameters().at(i);
  46.228 -        if (p->identifier()->equals(id)) {
  46.229 -          *depth = current_depth;
  46.230 -          return p;
  46.231 -        }
  46.232 -      }
  46.233 -      outer_method = outer_class->outer_method();
  46.234 -      outer_class = outer_class->outer_class();
  46.235 -      ++current_depth;
  46.236 -    }
  46.237 -  }
  46.238 -
  46.239 -  if (VerifyGenericSignatures) {
  46.240 -    fatal("Could not resolve identifier");
  46.241 -  }
  46.242 -
  46.243 -  return NULL;
  46.244 -}
  46.245 -
  46.246 -ClassDescriptor* ClassDescriptor::parse_generic_signature(Klass* klass, TRAPS) {
  46.247 -  return parse_generic_signature(klass, NULL, CHECK_NULL);
  46.248 -}
  46.249 -
  46.250 -ClassDescriptor* ClassDescriptor::parse_generic_signature(
  46.251 -      Klass* klass, Symbol* original_name, TRAPS) {
  46.252 -
  46.253 -  InstanceKlass* ik = InstanceKlass::cast(klass);
  46.254 -  Symbol* sym = ik->generic_signature();
  46.255 -
  46.256 -  ClassDescriptor* spec;
  46.257 -
  46.258 -  if (sym == NULL || (spec = ClassDescriptor::parse_generic_signature(sym)) == NULL) {
  46.259 -    spec = ClassDescriptor::placeholder(ik);
  46.260 -  }
  46.261 -
  46.262 -  u2 outer_index = get_outer_class_index(ik, CHECK_NULL);
  46.263 -  if (outer_index != 0) {
  46.264 -    if (original_name == NULL) {
  46.265 -      original_name = ik->name();
  46.266 -    }
  46.267 -    Handle class_loader = Handle(THREAD, ik->class_loader());
  46.268 -    Handle protection_domain = Handle(THREAD, ik->protection_domain());
  46.269 -
  46.270 -    Symbol* outer_name = ik->constants()->klass_name_at(outer_index);
  46.271 -    Klass* outer = SystemDictionary::find(
  46.272 -        outer_name, class_loader, protection_domain, CHECK_NULL);
  46.273 -    if (outer == NULL && !THREAD->is_Compiler_thread()) {
  46.274 -      if (outer_name == ik->super()->name()) {
  46.275 -        outer = SystemDictionary::resolve_super_or_fail(original_name, outer_name,
  46.276 -                                                        class_loader, protection_domain,
  46.277 -                                                        false, CHECK_NULL);
  46.278 -      }
  46.279 -      else {
  46.280 -        outer = SystemDictionary::resolve_or_fail(outer_name, class_loader,
  46.281 -                                                  protection_domain, false, CHECK_NULL);
  46.282 -      }
  46.283 -    }
  46.284 -
  46.285 -    InstanceKlass* outer_ik;
  46.286 -    ClassDescriptor* outer_spec = NULL;
  46.287 -    if (outer == NULL) {
  46.288 -      outer_spec = ClassDescriptor::placeholder(ik);
  46.289 -      assert(false, "Outer class not loaded and not loadable from here");
  46.290 -    } else {
  46.291 -      outer_ik = InstanceKlass::cast(outer);
  46.292 -      outer_spec = parse_generic_signature(outer, original_name, CHECK_NULL);
  46.293 -    }
  46.294 -    spec->set_outer_class(outer_spec);
  46.295 -
  46.296 -    u2 encl_method_idx = ik->enclosing_method_method_index();
  46.297 -    if (encl_method_idx != 0 && outer_ik != NULL) {
  46.298 -      ConstantPool* cp = ik->constants();
  46.299 -      u2 name_index = cp->name_ref_index_at(encl_method_idx);
  46.300 -      u2 sig_index = cp->signature_ref_index_at(encl_method_idx);
  46.301 -      Symbol* name = cp->symbol_at(name_index);
  46.302 -      Symbol* sig = cp->symbol_at(sig_index);
  46.303 -      Method* m = outer_ik->find_method(name, sig);
  46.304 -      if (m != NULL) {
  46.305 -        Symbol* gsig = m->generic_signature();
  46.306 -        if (gsig != NULL) {
  46.307 -          MethodDescriptor* gms = MethodDescriptor::parse_generic_signature(gsig, outer_spec);
  46.308 -          spec->set_outer_method(gms);
  46.309 -        }
  46.310 -      } else if (VerifyGenericSignatures) {
  46.311 -        ResourceMark rm;
  46.312 -        stringStream ss;
  46.313 -        ss.print("Could not find method %s %s in class %s",
  46.314 -          name->as_C_string(), sig->as_C_string(), outer_name->as_C_string());
  46.315 -        fatal(ss.as_string());
  46.316 -      }
  46.317 -    }
  46.318 -  }
  46.319 -
  46.320 -  spec->bind_variables_to_parameters();
  46.321 -  return spec;
  46.322 -}
  46.323 -
  46.324 -ClassDescriptor* ClassDescriptor::placeholder(InstanceKlass* klass) {
  46.325 -  GrowableArray<TypeParameter*> formals;
  46.326 -  GrowableArray<ClassType*> interfaces;
  46.327 -  ClassType* super_type = NULL;
  46.328 -
  46.329 -  Klass* super_klass = klass->super();
  46.330 -  if (super_klass != NULL) {
  46.331 -    InstanceKlass* super = InstanceKlass::cast(super_klass);
  46.332 -    super_type = ClassType::from_symbol(super->name());
  46.333 -  }
  46.334 -
  46.335 -  for (int i = 0; i < klass->local_interfaces()->length(); ++i) {
  46.336 -    InstanceKlass* iface = InstanceKlass::cast(klass->local_interfaces()->at(i));
  46.337 -    interfaces.append(ClassType::from_symbol(iface->name()));
  46.338 -  }
  46.339 -  return new ClassDescriptor(formals, super_type, interfaces);
  46.340 -}
  46.341 -
  46.342 -ClassDescriptor* ClassDescriptor::parse_generic_signature(Symbol* sym) {
  46.343 -
  46.344 -  DescriptorStream ds(sym);
  46.345 -  DescriptorStream* STREAM = &ds;
  46.346 -
  46.347 -  GrowableArray<TypeParameter*> parameters(8);
  46.348 -  char c = READ();
  46.349 -  if (c == '<') {
  46.350 -    c = READ();
  46.351 -    while (c != '>') {
  46.352 -      PUSH(c);
  46.353 -      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
  46.354 -      parameters.append(ftp);
  46.355 -      c = READ();
  46.356 -    }
  46.357 -  } else {
  46.358 -    PUSH(c);
  46.359 -  }
  46.360 -
  46.361 -  EXPECT('L');
  46.362 -  ClassType* super = ClassType::parse_generic_signature(CHECK_STREAM);
  46.363 -
  46.364 -  GrowableArray<ClassType*> signatures(2);
  46.365 -  while (!STREAM->at_end()) {
  46.366 -    EXPECT('L');
  46.367 -    ClassType* iface = ClassType::parse_generic_signature(CHECK_STREAM);
  46.368 -    signatures.append(iface);
  46.369 -  }
  46.370 -
  46.371 -  EXPECT_END();
  46.372 -
  46.373 -  return new ClassDescriptor(parameters, super, signatures);
  46.374 -}
  46.375 -
  46.376 -#ifndef PRODUCT
  46.377 -void ClassDescriptor::print_on(outputStream* str) const {
  46.378 -  str->indent().print_cr("ClassDescriptor {");
  46.379 -  {
  46.380 -    streamIndentor si(str);
  46.381 -    if (_type_parameters.length() > 0) {
  46.382 -      str->indent().print_cr("Formals {");
  46.383 -      {
  46.384 -        streamIndentor si(str);
  46.385 -        for (int i = 0; i < _type_parameters.length(); ++i) {
  46.386 -          _type_parameters.at(i)->print_on(str);
  46.387 -        }
  46.388 -      }
  46.389 -      str->indent().print_cr("}");
  46.390 -    }
  46.391 -    if (_super != NULL) {
  46.392 -      str->indent().print_cr("Superclass: ");
  46.393 -      {
  46.394 -        streamIndentor si(str);
  46.395 -        _super->print_on(str);
  46.396 -      }
  46.397 -    }
  46.398 -    if (_interfaces.length() > 0) {
  46.399 -      str->indent().print_cr("SuperInterfaces: {");
  46.400 -      {
  46.401 -        streamIndentor si(str);
  46.402 -        for (int i = 0; i < _interfaces.length(); ++i) {
  46.403 -          _interfaces.at(i)->print_on(str);
  46.404 -        }
  46.405 -      }
  46.406 -      str->indent().print_cr("}");
  46.407 -    }
  46.408 -    if (_outer_method != NULL) {
  46.409 -      str->indent().print_cr("Outer Method: {");
  46.410 -      {
  46.411 -        streamIndentor si(str);
  46.412 -        _outer_method->print_on(str);
  46.413 -      }
  46.414 -      str->indent().print_cr("}");
  46.415 -    }
  46.416 -    if (_outer_class != NULL) {
  46.417 -      str->indent().print_cr("Outer Class: {");
  46.418 -      {
  46.419 -        streamIndentor si(str);
  46.420 -        _outer_class->print_on(str);
  46.421 -      }
  46.422 -      str->indent().print_cr("}");
  46.423 -    }
  46.424 -  }
  46.425 -  str->indent().print_cr("}");
  46.426 -}
  46.427 -#endif // ndef PRODUCT
  46.428 -
  46.429 -ClassType* ClassDescriptor::interface_desc(Symbol* sym) {
  46.430 -  for (int i = 0; i < _interfaces.length(); ++i) {
  46.431 -    if (_interfaces.at(i)->identifier()->equals(sym)) {
  46.432 -      return _interfaces.at(i);
  46.433 -    }
  46.434 -  }
  46.435 -  if (VerifyGenericSignatures) {
  46.436 -    fatal("Did not find expected interface");
  46.437 -  }
  46.438 -  return NULL;
  46.439 -}
  46.440 -
  46.441 -void ClassDescriptor::bind_variables_to_parameters() {
  46.442 -  if (_outer_class != NULL) {
  46.443 -    _outer_class->bind_variables_to_parameters();
  46.444 -  }
  46.445 -  if (_outer_method != NULL) {
  46.446 -    _outer_method->bind_variables_to_parameters();
  46.447 -  }
  46.448 -  for (int i = 0; i < _type_parameters.length(); ++i) {
  46.449 -    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
  46.450 -  }
  46.451 -  if (_super != NULL) {
  46.452 -    _super->bind_variables_to_parameters(this);
  46.453 -  }
  46.454 -  for (int i = 0; i < _interfaces.length(); ++i) {
  46.455 -    _interfaces.at(i)->bind_variables_to_parameters(this);
  46.456 -  }
  46.457 -}
  46.458 -
  46.459 -ClassDescriptor* ClassDescriptor::canonicalize(Context* ctx) {
  46.460 -
  46.461 -  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
  46.462 -  for (int i = 0; i < _type_parameters.length(); ++i) {
  46.463 -    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
  46.464 -  }
  46.465 -
  46.466 -  ClassDescriptor* outer = _outer_class == NULL ? NULL :
  46.467 -      _outer_class->canonicalize(ctx);
  46.468 -
  46.469 -  ClassType* super = _super == NULL ? NULL : _super->canonicalize(ctx, 0);
  46.470 -
  46.471 -  GrowableArray<ClassType*> interfaces(_interfaces.length());
  46.472 -  for (int i = 0; i < _interfaces.length(); ++i) {
  46.473 -    interfaces.append(_interfaces.at(i)->canonicalize(ctx, 0));
  46.474 -  }
  46.475 -
  46.476 -  MethodDescriptor* md = _outer_method == NULL ? NULL :
  46.477 -      _outer_method->canonicalize(ctx);
  46.478 -
  46.479 -  return new ClassDescriptor(type_params, super, interfaces, outer, md);
  46.480 -}
  46.481 -
  46.482 -u2 ClassDescriptor::get_outer_class_index(InstanceKlass* klass, TRAPS) {
  46.483 -  int inner_index = InstanceKlass::inner_class_inner_class_info_offset;
  46.484 -  int outer_index = InstanceKlass::inner_class_outer_class_info_offset;
  46.485 -  int name_offset = InstanceKlass::inner_class_inner_name_offset;
  46.486 -  int next_offset = InstanceKlass::inner_class_next_offset;
  46.487 -
  46.488 -  if (klass->inner_classes() == NULL || klass->inner_classes()->length() == 0) {
  46.489 -    // No inner class info => no declaring class
  46.490 -    return 0;
  46.491 -  }
  46.492 -
  46.493 -  Array<u2>* i_icls = klass->inner_classes();
  46.494 -  ConstantPool* i_cp = klass->constants();
  46.495 -  int i_length = i_icls->length();
  46.496 -
  46.497 -  // Find inner_klass attribute
  46.498 -  for (int i = 0; i + next_offset < i_length; i += next_offset) {
  46.499 -    u2 ioff = i_icls->at(i + inner_index);
  46.500 -    u2 ooff = i_icls->at(i + outer_index);
  46.501 -    u2 noff = i_icls->at(i + name_offset);
  46.502 -    if (ioff != 0) {
  46.503 -      // Check to see if the name matches the class we're looking for
  46.504 -      // before attempting to find the class.
  46.505 -      if (i_cp->klass_name_at_matches(klass, ioff) && ooff != 0) {
  46.506 -        return ooff;
  46.507 -      }
  46.508 -    }
  46.509 -  }
  46.510 -
  46.511 -  // It may be anonymous; try for that.
  46.512 -  u2 encl_method_class_idx = klass->enclosing_method_class_index();
  46.513 -  if (encl_method_class_idx != 0) {
  46.514 -    return encl_method_class_idx;
  46.515 -  }
  46.516 -
  46.517 -  return 0;
  46.518 -}
  46.519 -
  46.520 -MethodDescriptor* MethodDescriptor::parse_generic_signature(Method* m, ClassDescriptor* outer) {
  46.521 -  Symbol* generic_sig = m->generic_signature();
  46.522 -  MethodDescriptor* md = NULL;
  46.523 -  if (generic_sig == NULL || (md = parse_generic_signature(generic_sig, outer)) == NULL) {
  46.524 -    md = parse_generic_signature(m->signature(), outer);
  46.525 -  }
  46.526 -  assert(md != NULL, "Could not parse method signature");
  46.527 -  md->bind_variables_to_parameters();
  46.528 -  return md;
  46.529 -}
  46.530 -
  46.531 -MethodDescriptor* MethodDescriptor::parse_generic_signature(Symbol* sym, ClassDescriptor* outer) {
  46.532 -
  46.533 -  DescriptorStream ds(sym);
  46.534 -  DescriptorStream* STREAM = &ds;
  46.535 -
  46.536 -  GrowableArray<TypeParameter*> params(8);
  46.537 -  char c = READ();
  46.538 -  if (c == '<') {
  46.539 -    c = READ();
  46.540 -    while (c != '>') {
  46.541 -      PUSH(c);
  46.542 -      TypeParameter* ftp = TypeParameter::parse_generic_signature(CHECK_STREAM);
  46.543 -      params.append(ftp);
  46.544 -      c = READ();
  46.545 -    }
  46.546 -  } else {
  46.547 -    PUSH(c);
  46.548 -  }
  46.549 -
  46.550 -  EXPECT('(');
  46.551 -
  46.552 -  GrowableArray<Type*> parameters(8);
  46.553 -  c = READ();
  46.554 -  while (c != ')') {
  46.555 -    PUSH(c);
  46.556 -    Type* arg = Type::parse_generic_signature(CHECK_STREAM);
  46.557 -    parameters.append(arg);
  46.558 -    c = READ();
  46.559 -  }
  46.560 -
  46.561 -  Type* rt = Type::parse_generic_signature(CHECK_STREAM);
  46.562 -
  46.563 -  GrowableArray<Type*> throws;
  46.564 -  while (!STREAM->at_end()) {
  46.565 -    EXPECT('^');
  46.566 -    Type* spec = Type::parse_generic_signature(CHECK_STREAM);
  46.567 -    throws.append(spec);
  46.568 -  }
  46.569 -
  46.570 -  return new MethodDescriptor(params, outer, parameters, rt, throws);
  46.571 -}
  46.572 -
  46.573 -void MethodDescriptor::bind_variables_to_parameters() {
  46.574 -  for (int i = 0; i < _type_parameters.length(); ++i) {
  46.575 -    _type_parameters.at(i)->bind_variables_to_parameters(this, i);
  46.576 -  }
  46.577 -  for (int i = 0; i < _parameters.length(); ++i) {
  46.578 -    _parameters.at(i)->bind_variables_to_parameters(this);
  46.579 -  }
  46.580 -  _return_type->bind_variables_to_parameters(this);
  46.581 -  for (int i = 0; i < _throws.length(); ++i) {
  46.582 -    _throws.at(i)->bind_variables_to_parameters(this);
  46.583 -  }
  46.584 -}
  46.585 -
  46.586 -bool MethodDescriptor::covariant_match(MethodDescriptor* other, Context* ctx) {
  46.587 -
  46.588 -  if (_parameters.length() == other->_parameters.length()) {
  46.589 -    for (int i = 0; i < _parameters.length(); ++i) {
  46.590 -      if (!_parameters.at(i)->covariant_match(other->_parameters.at(i), ctx)) {
  46.591 -        return false;
  46.592 -      }
  46.593 -    }
  46.594 -
  46.595 -    if (_return_type->as_primitive() != NULL) {
  46.596 -      return _return_type->covariant_match(other->_return_type, ctx);
  46.597 -    } else {
  46.598 -      // return type is a reference
  46.599 -      return other->_return_type->as_class() != NULL ||
  46.600 -             other->_return_type->as_variable() != NULL ||
  46.601 -             other->_return_type->as_array() != NULL;
  46.602 -    }
  46.603 -  } else {
  46.604 -    return false;
  46.605 -  }
  46.606 -}
  46.607 -
  46.608 -MethodDescriptor* MethodDescriptor::canonicalize(Context* ctx) {
  46.609 -
  46.610 -  GrowableArray<TypeParameter*> type_params(_type_parameters.length());
  46.611 -  for (int i = 0; i < _type_parameters.length(); ++i) {
  46.612 -    type_params.append(_type_parameters.at(i)->canonicalize(ctx, 0));
  46.613 -  }
  46.614 -
  46.615 -  ClassDescriptor* outer = _outer_class == NULL ? NULL :
  46.616 -      _outer_class->canonicalize(ctx);
  46.617 -
  46.618 -  GrowableArray<Type*> params(_parameters.length());
  46.619 -  for (int i = 0; i < _parameters.length(); ++i) {
  46.620 -    params.append(_parameters.at(i)->canonicalize(ctx, 0));
  46.621 -  }
  46.622 -
  46.623 -  Type* rt = _return_type->canonicalize(ctx, 0);
  46.624 -
  46.625 -  GrowableArray<Type*> throws(_throws.length());
  46.626 -  for (int i = 0; i < _throws.length(); ++i) {
  46.627 -    throws.append(_throws.at(i)->canonicalize(ctx, 0));
  46.628 -  }
  46.629 -
  46.630 -  return new MethodDescriptor(type_params, outer, params, rt, throws);
  46.631 -}
  46.632 -
  46.633 -#ifndef PRODUCT
  46.634 -TempNewSymbol MethodDescriptor::reify_signature(Context* ctx, TRAPS) {
  46.635 -  stringStream ss(256);
  46.636 -
  46.637 -  ss.print("(");
  46.638 -  for (int i = 0; i < _parameters.length(); ++i) {
  46.639 -    _parameters.at(i)->reify_signature(&ss, ctx);
  46.640 -  }
  46.641 -  ss.print(")");
  46.642 -  _return_type->reify_signature(&ss, ctx);
  46.643 -  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), THREAD);
  46.644 -}
  46.645 -
  46.646 -void MethodDescriptor::print_on(outputStream* str) const {
  46.647 -  str->indent().print_cr("MethodDescriptor {");
  46.648 -  {
  46.649 -    streamIndentor si(str);
  46.650 -    if (_type_parameters.length() > 0) {
  46.651 -      str->indent().print_cr("Formals: {");
  46.652 -      {
  46.653 -        streamIndentor si(str);
  46.654 -        for (int i = 0; i < _type_parameters.length(); ++i) {
  46.655 -          _type_parameters.at(i)->print_on(str);
  46.656 -        }
  46.657 -      }
  46.658 -      str->indent().print_cr("}");
  46.659 -    }
  46.660 -    str->indent().print_cr("Parameters: {");
  46.661 -    {
  46.662 -      streamIndentor si(str);
  46.663 -      for (int i = 0; i < _parameters.length(); ++i) {
  46.664 -        _parameters.at(i)->print_on(str);
  46.665 -      }
  46.666 -    }
  46.667 -    str->indent().print_cr("}");
  46.668 -    str->indent().print_cr("Return Type: ");
  46.669 -    {
  46.670 -      streamIndentor si(str);
  46.671 -      _return_type->print_on(str);
  46.672 -    }
  46.673 -
  46.674 -    if (_throws.length() > 0) {
  46.675 -      str->indent().print_cr("Throws: {");
  46.676 -      {
  46.677 -        streamIndentor si(str);
  46.678 -        for (int i = 0; i < _throws.length(); ++i) {
  46.679 -          _throws.at(i)->print_on(str);
  46.680 -        }
  46.681 -      }
  46.682 -      str->indent().print_cr("}");
  46.683 -    }
  46.684 -  }
  46.685 -  str->indent().print_cr("}");
  46.686 -}
  46.687 -#endif // ndef PRODUCT
  46.688 -
  46.689 -TypeParameter* TypeParameter::parse_generic_signature(DescriptorStream* STREAM) {
  46.690 -  STREAM->set_mark();
  46.691 -  char c = READ();
  46.692 -  while (c != ':') {
  46.693 -    c = READ();
  46.694 -  }
  46.695 -
  46.696 -  Identifier* id = STREAM->identifier_from_mark();
  46.697 -
  46.698 -  ClassType* class_bound = NULL;
  46.699 -  GrowableArray<ClassType*> interface_bounds(8);
  46.700 -
  46.701 -  c = READ();
  46.702 -  if (c != '>') {
  46.703 -    if (c != ':') {
  46.704 -      EXPECTED(c, 'L');
  46.705 -      class_bound = ClassType::parse_generic_signature(CHECK_STREAM);
  46.706 -      c = READ();
  46.707 -    }
  46.708 -
  46.709 -    while (c == ':') {
  46.710 -      EXPECT('L');
  46.711 -      ClassType* fts = ClassType::parse_generic_signature(CHECK_STREAM);
  46.712 -      interface_bounds.append(fts);
  46.713 -      c = READ();
  46.714 -    }
  46.715 -  }
  46.716 -  PUSH(c);
  46.717 -
  46.718 -  return new TypeParameter(id, class_bound, interface_bounds);
  46.719 -}
  46.720 -
  46.721 -void TypeParameter::bind_variables_to_parameters(Descriptor* sig, int position) {
  46.722 -  if (_class_bound != NULL) {
  46.723 -    _class_bound->bind_variables_to_parameters(sig);
  46.724 -  }
  46.725 -  for (int i = 0; i < _interface_bounds.length(); ++i) {
  46.726 -    _interface_bounds.at(i)->bind_variables_to_parameters(sig);
  46.727 -  }
  46.728 -  _position = position;
  46.729 -}
  46.730 -
  46.731 -Type* TypeParameter::resolve(
  46.732 -    Context* ctx, int inner_depth, int ctx_depth) {
  46.733 -
  46.734 -  if (inner_depth == -1) {
  46.735 -    // This indicates that the parameter is a method type parameter, which
  46.736 -    // isn't resolveable using the class hierarchy context
  46.737 -    return bound();
  46.738 -  }
  46.739 -
  46.740 -  ClassType* provider = ctx->at_depth(ctx_depth);
  46.741 -  if (provider != NULL) {
  46.742 -    for (int i = 0; i < inner_depth && provider != NULL; ++i) {
  46.743 -      provider = provider->outer_class();
  46.744 -    }
  46.745 -    if (provider != NULL) {
  46.746 -      TypeArgument* arg = provider->type_argument_at(_position);
  46.747 -      if (arg != NULL) {
  46.748 -        Type* value = arg->lower_bound();
  46.749 -        return value->canonicalize(ctx, ctx_depth + 1);
  46.750 -      }
  46.751 -    }
  46.752 -  }
  46.753 -
  46.754 -  return bound();
  46.755 -}
  46.756 -
  46.757 -TypeParameter* TypeParameter::canonicalize(Context* ctx, int ctx_depth) {
  46.758 -  ClassType* bound = _class_bound == NULL ? NULL :
  46.759 -     _class_bound->canonicalize(ctx, ctx_depth);
  46.760 -
  46.761 -  GrowableArray<ClassType*> ifaces(_interface_bounds.length());
  46.762 -  for (int i = 0; i < _interface_bounds.length(); ++i) {
  46.763 -    ifaces.append(_interface_bounds.at(i)->canonicalize(ctx, ctx_depth));
  46.764 -  }
  46.765 -
  46.766 -  TypeParameter* ret = new TypeParameter(_identifier, bound, ifaces);
  46.767 -  ret->_position = _position;
  46.768 -  return ret;
  46.769 -}
  46.770 -
  46.771 -ClassType* TypeParameter::bound() {
  46.772 -  if (_class_bound != NULL) {
  46.773 -    return _class_bound;
  46.774 -  }
  46.775 -
  46.776 -  if (_interface_bounds.length() == 1) {
  46.777 -    return _interface_bounds.at(0);
  46.778 -  }
  46.779 -
  46.780 -  return ClassType::java_lang_Object(); // TODO: investigate this case
  46.781 -}
  46.782 -
  46.783 -#ifndef PRODUCT
  46.784 -void TypeParameter::print_on(outputStream* str) const {
  46.785 -  str->indent().print_cr("Formal: {");
  46.786 -  {
  46.787 -    streamIndentor si(str);
  46.788 -
  46.789 -    str->indent().print("Identifier: ");
  46.790 -    _identifier->print_on(str);
  46.791 -    str->print_cr("");
  46.792 -    if (_class_bound != NULL) {
  46.793 -      str->indent().print_cr("Class Bound: ");
  46.794 -      streamIndentor si(str);
  46.795 -      _class_bound->print_on(str);
  46.796 -    }
  46.797 -    if (_interface_bounds.length() > 0) {
  46.798 -      str->indent().print_cr("Interface Bounds: {");
  46.799 -      {
  46.800 -        streamIndentor si(str);
  46.801 -        for (int i = 0; i < _interface_bounds.length(); ++i) {
  46.802 -          _interface_bounds.at(i)->print_on(str);
  46.803 -        }
  46.804 -      }
  46.805 -      str->indent().print_cr("}");
  46.806 -    }
  46.807 -    str->indent().print_cr("Ordinal Position: %d", _position);
  46.808 -  }
  46.809 -  str->indent().print_cr("}");
  46.810 -}
  46.811 -#endif // ndef PRODUCT
  46.812 -
  46.813 -Type* Type::parse_generic_signature(DescriptorStream* STREAM) {
  46.814 -  char c = READ();
  46.815 -  switch (c) {
  46.816 -    case 'L':
  46.817 -      return ClassType::parse_generic_signature(CHECK_STREAM);
  46.818 -    case 'T':
  46.819 -      return TypeVariable::parse_generic_signature(CHECK_STREAM);
  46.820 -    case '[':
  46.821 -      return ArrayType::parse_generic_signature(CHECK_STREAM);
  46.822 -    default:
  46.823 -      return new PrimitiveType(c);
  46.824 -  }
  46.825 -}
  46.826 -
  46.827 -Identifier* ClassType::parse_generic_signature_simple(GrowableArray<TypeArgument*>* args,
  46.828 -    bool* has_inner, DescriptorStream* STREAM) {
  46.829 -  STREAM->set_mark();
  46.830 -
  46.831 -  char c = READ();
  46.832 -  while (c != ';' && c != '.' && c != '<') { c = READ(); }
  46.833 -  Identifier* id = STREAM->identifier_from_mark();
  46.834 -
  46.835 -  if (c == '<') {
  46.836 -    c = READ();
  46.837 -    while (c != '>') {
  46.838 -      PUSH(c);
  46.839 -      TypeArgument* arg = TypeArgument::parse_generic_signature(CHECK_STREAM);
  46.840 -      args->append(arg);
  46.841 -      c = READ();
  46.842 -    }
  46.843 -    c = READ();
  46.844 -  }
  46.845 -
  46.846 -  *has_inner = (c == '.');
  46.847 -  if (!(*has_inner)) {
  46.848 -    EXPECTED(c, ';');
  46.849 -  }
  46.850 -
  46.851 -  return id;
  46.852 -}
  46.853 -
  46.854 -ClassType* ClassType::parse_generic_signature(DescriptorStream* STREAM) {
  46.855 -  return parse_generic_signature(NULL, CHECK_STREAM);
  46.856 -}
  46.857 -
  46.858 -ClassType* ClassType::parse_generic_signature(ClassType* outer, DescriptorStream* STREAM) {
  46.859 -  GrowableArray<TypeArgument*> args;
  46.860 -  ClassType* gct = NULL;
  46.861 -  bool has_inner = false;
  46.862 -
  46.863 -  Identifier* id = parse_generic_signature_simple(&args, &has_inner, STREAM);
  46.864 -  if (id != NULL) {
  46.865 -    gct = new ClassType(id, args, outer);
  46.866 -
  46.867 -    if (has_inner) {
  46.868 -      gct = parse_generic_signature(gct, CHECK_STREAM);
  46.869 -    }
  46.870 -  }
  46.871 -  return gct;
  46.872 -}
  46.873 -
  46.874 -ClassType* ClassType::from_symbol(Symbol* sym) {
  46.875 -  assert(sym != NULL, "Must not be null");
  46.876 -  GrowableArray<TypeArgument*> args;
  46.877 -  Identifier* id = new Identifier(sym, 0, sym->utf8_length());
  46.878 -  return new ClassType(id, args, NULL);
  46.879 -}
  46.880 -
  46.881 -ClassType* ClassType::java_lang_Object() {
  46.882 -  return from_symbol(vmSymbols::java_lang_Object());
  46.883 -}
  46.884 -
  46.885 -void ClassType::bind_variables_to_parameters(Descriptor* sig) {
  46.886 -  for (int i = 0; i < _type_arguments.length(); ++i) {
  46.887 -    _type_arguments.at(i)->bind_variables_to_parameters(sig);
  46.888 -  }
  46.889 -  if (_outer_class != NULL) {
  46.890 -    _outer_class->bind_variables_to_parameters(sig);
  46.891 -  }
  46.892 -}
  46.893 -
  46.894 -TypeArgument* ClassType::type_argument_at(int i) {
  46.895 -  if (i >= 0 && i < _type_arguments.length()) {
  46.896 -    return _type_arguments.at(i);
  46.897 -  } else {
  46.898 -    return NULL;
  46.899 -  }
  46.900 -}
  46.901 -
  46.902 -#ifndef PRODUCT
  46.903 -void ClassType::reify_signature(stringStream* ss, Context* ctx) {
  46.904 -  ss->print("L");
  46.905 -  _identifier->print_on(ss);
  46.906 -  ss->print(";");
  46.907 -}
  46.908 -
  46.909 -void ClassType::print_on(outputStream* str) const {
  46.910 -  str->indent().print_cr("Class {");
  46.911 -  {
  46.912 -    streamIndentor si(str);
  46.913 -    str->indent().print("Name: ");
  46.914 -    _identifier->print_on(str);
  46.915 -    str->print_cr("");
  46.916 -    if (_type_arguments.length() != 0) {
  46.917 -      str->indent().print_cr("Type Arguments: {");
  46.918 -      {
  46.919 -        streamIndentor si(str);
  46.920 -        for (int j = 0; j < _type_arguments.length(); ++j) {
  46.921 -          _type_arguments.at(j)->print_on(str);
  46.922 -        }
  46.923 -      }
  46.924 -      str->indent().print_cr("}");
  46.925 -    }
  46.926 -    if (_outer_class != NULL) {
  46.927 -      str->indent().print_cr("Outer Class: ");
  46.928 -      streamIndentor sir(str);
  46.929 -      _outer_class->print_on(str);
  46.930 -    }
  46.931 -  }
  46.932 -  str->indent().print_cr("}");
  46.933 -}
  46.934 -#endif // ndef PRODUCT
  46.935 -
  46.936 -bool ClassType::covariant_match(Type* other, Context* ctx) {
  46.937 -
  46.938 -  if (other == this) {
  46.939 -    return true;
  46.940 -  }
  46.941 -
  46.942 -  TypeVariable* variable = other->as_variable();
  46.943 -  if (variable != NULL) {
  46.944 -    other = variable->resolve(ctx, 0);
  46.945 -  }
  46.946 -
  46.947 -  ClassType* outer = outer_class();
  46.948 -  ClassType* other_class = other->as_class();
  46.949 -
  46.950 -  if (other_class == NULL ||
  46.951 -      (outer == NULL) != (other_class->outer_class() == NULL)) {
  46.952 -    return false;
  46.953 -  }
  46.954 -
  46.955 -  if (!_identifier->equals(other_class->_identifier)) {
  46.956 -    return false;
  46.957 -  }
  46.958 -
  46.959 -  if (outer != NULL && !outer->covariant_match(other_class->outer_class(), ctx)) {
  46.960 -    return false;
  46.961 -  }
  46.962 -
  46.963 -  return true;
  46.964 -}
  46.965 -
  46.966 -ClassType* ClassType::canonicalize(Context* ctx, int ctx_depth) {
  46.967 -
  46.968 -  GrowableArray<TypeArgument*> args(_type_arguments.length());
  46.969 -  for (int i = 0; i < _type_arguments.length(); ++i) {
  46.970 -    args.append(_type_arguments.at(i)->canonicalize(ctx, ctx_depth));
  46.971 -  }
  46.972 -
  46.973 -  ClassType* outer = _outer_class == NULL ? NULL :
  46.974 -      _outer_class->canonicalize(ctx, ctx_depth);
  46.975 -
  46.976 -  return new ClassType(_identifier, args, outer);
  46.977 -}
  46.978 -
  46.979 -TypeVariable* TypeVariable::parse_generic_signature(DescriptorStream* STREAM) {
  46.980 -  STREAM->set_mark();
  46.981 -  char c = READ();
  46.982 -  while (c != ';') {
  46.983 -    c = READ();
  46.984 -  }
  46.985 -  Identifier* id = STREAM->identifier_from_mark();
  46.986 -
  46.987 -  return new TypeVariable(id);
  46.988 -}
  46.989 -
  46.990 -void TypeVariable::bind_variables_to_parameters(Descriptor* sig) {
  46.991 -  _parameter = sig->find_type_parameter(_id, &_inner_depth);
  46.992 -  if (VerifyGenericSignatures && _parameter == NULL) {
  46.993 -    fatal("Could not find formal parameter");
  46.994 -  }
  46.995 -}
  46.996 -
  46.997 -Type* TypeVariable::resolve(Context* ctx, int ctx_depth) {
  46.998 -  if (parameter() != NULL) {
  46.999 -    return parameter()->resolve(ctx, inner_depth(), ctx_depth);
 46.1000 -  } else {
 46.1001 -    if (VerifyGenericSignatures) {
 46.1002 -      fatal("Type variable matches no parameter");
 46.1003 -    }
 46.1004 -    return NULL;
 46.1005 -  }
 46.1006 -}
 46.1007 -
 46.1008 -bool TypeVariable::covariant_match(Type* other, Context* ctx) {
 46.1009 -
 46.1010 -  if (other == this) {
 46.1011 -    return true;
 46.1012 -  }
 46.1013 -
 46.1014 -  Context my_context(NULL); // empty, results in erasure
 46.1015 -  Type* my_type = resolve(&my_context, 0);
 46.1016 -  if (my_type == NULL) {
 46.1017 -    return false;
 46.1018 -  }
 46.1019 -
 46.1020 -  return my_type->covariant_match(other, ctx);
 46.1021 -}
 46.1022 -
 46.1023 -Type* TypeVariable::canonicalize(Context* ctx, int ctx_depth) {
 46.1024 -  return resolve(ctx, ctx_depth);
 46.1025 -}
 46.1026 -
 46.1027 -#ifndef PRODUCT
 46.1028 -void TypeVariable::reify_signature(stringStream* ss, Context* ctx) {
 46.1029 -  Type* type = resolve(ctx, 0);
 46.1030 -  if (type != NULL) {
 46.1031 -    type->reify_signature(ss, ctx);
 46.1032 -  }
 46.1033 -}
 46.1034 -
 46.1035 -void TypeVariable::print_on(outputStream* str) const {
 46.1036 -  str->indent().print_cr("Type Variable {");
 46.1037 -  {
 46.1038 -    streamIndentor si(str);
 46.1039 -    str->indent().print("Name: ");
 46.1040 -    _id->print_on(str);
 46.1041 -    str->print_cr("");
 46.1042 -    str->indent().print_cr("Inner depth: %d", _inner_depth);
 46.1043 -  }
 46.1044 -  str->indent().print_cr("}");
 46.1045 -}
 46.1046 -#endif // ndef PRODUCT
 46.1047 -
 46.1048 -ArrayType* ArrayType::parse_generic_signature(DescriptorStream* STREAM) {
 46.1049 -  Type* base = Type::parse_generic_signature(CHECK_STREAM);
 46.1050 -  return new ArrayType(base);
 46.1051 -}
 46.1052 -
 46.1053 -void ArrayType::bind_variables_to_parameters(Descriptor* sig) {
 46.1054 -  assert(_base != NULL, "Invalid base");
 46.1055 -  _base->bind_variables_to_parameters(sig);
 46.1056 -}
 46.1057 -
 46.1058 -bool ArrayType::covariant_match(Type* other, Context* ctx) {
 46.1059 -  assert(_base != NULL, "Invalid base");
 46.1060 -
 46.1061 -  if (other == this) {
 46.1062 -    return true;
 46.1063 -  }
 46.1064 -
 46.1065 -  ArrayType* other_array = other->as_array();
 46.1066 -  return (other_array != NULL && _base->covariant_match(other_array->_base, ctx));
 46.1067 -}
 46.1068 -
 46.1069 -ArrayType* ArrayType::canonicalize(Context* ctx, int ctx_depth) {
 46.1070 -  assert(_base != NULL, "Invalid base");
 46.1071 -  return new ArrayType(_base->canonicalize(ctx, ctx_depth));
 46.1072 -}
 46.1073 -
 46.1074 -#ifndef PRODUCT
 46.1075 -void ArrayType::reify_signature(stringStream* ss, Context* ctx) {
 46.1076 -  assert(_base != NULL, "Invalid base");
 46.1077 -  ss->print("[");
 46.1078 -  _base->reify_signature(ss, ctx);
 46.1079 -}
 46.1080 -
 46.1081 -void ArrayType::print_on(outputStream* str) const {
 46.1082 -  str->indent().print_cr("Array {");
 46.1083 -  {
 46.1084 -    streamIndentor si(str);
 46.1085 -    _base->print_on(str);
 46.1086 -  }
 46.1087 -  str->indent().print_cr("}");
 46.1088 -}
 46.1089 -#endif // ndef PRODUCT
 46.1090 -
 46.1091 -bool PrimitiveType::covariant_match(Type* other, Context* ctx) {
 46.1092 -
 46.1093 -  PrimitiveType* other_prim = other->as_primitive();
 46.1094 -  return (other_prim != NULL && _type == other_prim->_type);
 46.1095 -}
 46.1096 -
 46.1097 -PrimitiveType* PrimitiveType::canonicalize(Context* ctx, int ctxd) {
 46.1098 -  return this;
 46.1099 -}
 46.1100 -
 46.1101 -#ifndef PRODUCT
 46.1102 -void PrimitiveType::reify_signature(stringStream* ss, Context* ctx) {
 46.1103 -  ss->print("%c", _type);
 46.1104 -}
 46.1105 -
 46.1106 -void PrimitiveType::print_on(outputStream* str) const {
 46.1107 -  str->indent().print_cr("Primitive: '%c'", _type);
 46.1108 -}
 46.1109 -#endif // ndef PRODUCT
 46.1110 -
 46.1111 -void PrimitiveType::bind_variables_to_parameters(Descriptor* sig) {
 46.1112 -}
 46.1113 -
 46.1114 -TypeArgument* TypeArgument::parse_generic_signature(DescriptorStream* STREAM) {
 46.1115 -  char c = READ();
 46.1116 -  Type* type = NULL;
 46.1117 -
 46.1118 -  switch (c) {
 46.1119 -    case '*':
 46.1120 -      return new TypeArgument(ClassType::java_lang_Object(), NULL);
 46.1121 -      break;
 46.1122 -    default:
 46.1123 -      PUSH(c);
 46.1124 -      // fall-through
 46.1125 -    case '+':
 46.1126 -    case '-':
 46.1127 -      type = Type::parse_generic_signature(CHECK_STREAM);
 46.1128 -      if (c == '+') {
 46.1129 -        return new TypeArgument(type, NULL);
 46.1130 -      } else if (c == '-') {
 46.1131 -        return new TypeArgument(ClassType::java_lang_Object(), type);
 46.1132 -      } else {
 46.1133 -        return new TypeArgument(type, type);
 46.1134 -      }
 46.1135 -  }
 46.1136 -}
 46.1137 -
 46.1138 -void TypeArgument::bind_variables_to_parameters(Descriptor* sig) {
 46.1139 -  assert(_lower_bound != NULL, "Invalid lower bound");
 46.1140 -  _lower_bound->bind_variables_to_parameters(sig);
 46.1141 -  if (_upper_bound != NULL && _upper_bound != _lower_bound) {
 46.1142 -    _upper_bound->bind_variables_to_parameters(sig);
 46.1143 -  }
 46.1144 -}
 46.1145 -
 46.1146 -bool TypeArgument::covariant_match(TypeArgument* other, Context* ctx) {
 46.1147 -  assert(_lower_bound != NULL, "Invalid lower bound");
 46.1148 -
 46.1149 -  if (other == this) {
 46.1150 -    return true;
 46.1151 -  }
 46.1152 -
 46.1153 -  if (!_lower_bound->covariant_match(other->lower_bound(), ctx)) {
 46.1154 -    return false;
 46.1155 -  }
 46.1156 -  return true;
 46.1157 -}
 46.1158 -
 46.1159 -TypeArgument* TypeArgument::canonicalize(Context* ctx, int ctx_depth) {
 46.1160 -  assert(_lower_bound != NULL, "Invalid lower bound");
 46.1161 -  Type* lower = _lower_bound->canonicalize(ctx, ctx_depth);
 46.1162 -  Type* upper = NULL;
 46.1163 -
 46.1164 -  if (_upper_bound == _lower_bound) {
 46.1165 -    upper = lower;
 46.1166 -  } else if (_upper_bound != NULL) {
 46.1167 -    upper = _upper_bound->canonicalize(ctx, ctx_depth);
 46.1168 -  }
 46.1169 -
 46.1170 -  return new TypeArgument(lower, upper);
 46.1171 -}
 46.1172 -
 46.1173 -#ifndef PRODUCT
 46.1174 -void TypeArgument::print_on(outputStream* str) const {
 46.1175 -  str->indent().print_cr("TypeArgument {");
 46.1176 -  {
 46.1177 -    streamIndentor si(str);
 46.1178 -    if (_lower_bound != NULL) {
 46.1179 -      str->indent().print("Lower bound: ");
 46.1180 -      _lower_bound->print_on(str);
 46.1181 -    }
 46.1182 -    if (_upper_bound != NULL) {
 46.1183 -      str->indent().print("Upper bound: ");
 46.1184 -      _upper_bound->print_on(str);
 46.1185 -    }
 46.1186 -  }
 46.1187 -  str->indent().print_cr("}");
 46.1188 -}
 46.1189 -#endif // ndef PRODUCT
 46.1190 -
 46.1191 -void Context::Mark::destroy() {
 46.1192 -  if (is_active()) {
 46.1193 -    _context->reset_to_mark(_marked_size);
 46.1194 -  }
 46.1195 -  deactivate();
 46.1196 -}
 46.1197 -
 46.1198 -void Context::apply_type_arguments(
 46.1199 -    InstanceKlass* current, InstanceKlass* super, TRAPS) {
 46.1200 -  assert(_cache != NULL, "Cannot use an empty context");
 46.1201 -  ClassType* spec = NULL;
 46.1202 -  if (current != NULL) {
 46.1203 -    ClassDescriptor* descriptor = _cache->descriptor_for(current, CHECK);
 46.1204 -    if (super == current->super()) {
 46.1205 -      spec = descriptor->super();
 46.1206 -    } else {
 46.1207 -      spec = descriptor->interface_desc(super->name());
 46.1208 -    }
 46.1209 -    if (spec != NULL) {
 46.1210 -      _type_arguments.push(spec);
 46.1211 -    }
 46.1212 -  }
 46.1213 -}
 46.1214 -
 46.1215 -void Context::reset_to_mark(int size) {
 46.1216 -  _type_arguments.trunc_to(size);
 46.1217 -}
 46.1218 -
 46.1219 -ClassType* Context::at_depth(int i) const {
 46.1220 -  if (i < _type_arguments.length()) {
 46.1221 -    return _type_arguments.at(_type_arguments.length() - 1 - i);
 46.1222 -  }
 46.1223 -  return NULL;
 46.1224 -}
 46.1225 -
 46.1226 -#ifndef PRODUCT
 46.1227 -void Context::print_on(outputStream* str) const {
 46.1228 -  str->indent().print_cr("Context {");
 46.1229 -  for (int i = 0; i < _type_arguments.length(); ++i) {
 46.1230 -    streamIndentor si(str);
 46.1231 -    str->indent().print("leval %d: ", i);
 46.1232 -    ClassType* ct = at_depth(i);
 46.1233 -    if (ct == NULL) {
 46.1234 -      str->print_cr("<empty>");
 46.1235 -      continue;
 46.1236 -    } else {
 46.1237 -      str->print_cr("{");
 46.1238 -    }
 46.1239 -
 46.1240 -    for (int j = 0; j < ct->type_arguments_length(); ++j) {
 46.1241 -      streamIndentor si(str);
 46.1242 -      TypeArgument* ta = ct->type_argument_at(j);
 46.1243 -      Type* bound = ta->lower_bound();
 46.1244 -      bound->print_on(str);
 46.1245 -    }
 46.1246 -    str->indent().print_cr("}");
 46.1247 -  }
 46.1248 -  str->indent().print_cr("}");
 46.1249 -}
 46.1250 -#endif // ndef PRODUCT
 46.1251 -
 46.1252 -ClassDescriptor* DescriptorCache::descriptor_for(InstanceKlass* ik, TRAPS) {
 46.1253 -
 46.1254 -  ClassDescriptor** existing = _class_descriptors.get(ik);
 46.1255 -  if (existing == NULL) {
 46.1256 -    ClassDescriptor* cd = ClassDescriptor::parse_generic_signature(ik, CHECK_NULL);
 46.1257 -    _class_descriptors.put(ik, cd);
 46.1258 -    return cd;
 46.1259 -  } else {
 46.1260 -    return *existing;
 46.1261 -  }
 46.1262 -}
 46.1263 -
 46.1264 -MethodDescriptor* DescriptorCache::descriptor_for(
 46.1265 -    Method* mh, ClassDescriptor* cd, TRAPS) {
 46.1266 -  assert(mh != NULL && cd != NULL, "Should not be NULL");
 46.1267 -  MethodDescriptor** existing = _method_descriptors.get(mh);
 46.1268 -  if (existing == NULL) {
 46.1269 -    MethodDescriptor* md = MethodDescriptor::parse_generic_signature(mh, cd);
 46.1270 -    _method_descriptors.put(mh, md);
 46.1271 -    return md;
 46.1272 -  } else {
 46.1273 -    return *existing;
 46.1274 -  }
 46.1275 -}
 46.1276 -MethodDescriptor* DescriptorCache::descriptor_for(Method* mh, TRAPS) {
 46.1277 -  ClassDescriptor* cd = descriptor_for(
 46.1278 -      InstanceKlass::cast(mh->method_holder()), CHECK_NULL);
 46.1279 -  return descriptor_for(mh, cd, THREAD);
 46.1280 -}
 46.1281 -
 46.1282 -} // namespace generic
    47.1 --- a/src/share/vm/classfile/genericSignatures.hpp	Fri Sep 06 09:55:38 2013 +0100
    47.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    47.3 @@ -1,467 +0,0 @@
    47.4 -/*
    47.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    47.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    47.7 - *
    47.8 - * This code is free software; you can redistribute it and/or modify it
    47.9 - * under the terms of the GNU General Public License version 2 only, as
   47.10 - * published by the Free Software Foundation.
   47.11 - *
   47.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   47.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   47.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   47.15 - * version 2 for more details (a copy is included in the LICENSE file that
   47.16 - * accompanied this code).
   47.17 - *
   47.18 - * You should have received a copy of the GNU General Public License version
   47.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   47.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   47.21 - *
   47.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   47.23 - * or visit www.oracle.com if you need additional information or have any
   47.24 - * questions.
   47.25 - *
   47.26 - */
   47.27 -
   47.28 -#ifndef SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
   47.29 -#define SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
   47.30 -
   47.31 -#include "classfile/symbolTable.hpp"
   47.32 -#include "memory/allocation.hpp"
   47.33 -#include "runtime/signature.hpp"
   47.34 -#include "utilities/growableArray.hpp"
   47.35 -#include "utilities/resourceHash.hpp"
   47.36 -
   47.37 -class stringStream;
   47.38 -
   47.39 -namespace generic {
   47.40 -
   47.41 -class Identifier;
   47.42 -class ClassDescriptor;
   47.43 -class MethodDescriptor;
   47.44 -
   47.45 -class TypeParameter; // a formal type parameter declared in generic signatures
   47.46 -class TypeArgument;  // The "type value" passed to fill parameters in supertypes
   47.47 -class TypeVariable;  // A usage of a type parameter as a value
   47.48 -/**
   47.49 - * Example:
   47.50 - *
   47.51 - * <T, V> class Foo extends Bar<String> { int m(V v) {} }
   47.52 - * ^^^^^^                       ^^^^^^          ^^
   47.53 - * type parameters            type argument    type variable
   47.54 - *
   47.55 - * Note that a type variable could be passed as an argument too:
   47.56 - * <T, V> class Foo extends Bar<T> { int m(V v) {} }
   47.57 - *                             ^^^
   47.58 - *                             type argument's value is a type variable
   47.59 - */
   47.60 -
   47.61 -
   47.62 -class Type;
   47.63 -class ClassType;
   47.64 -class ArrayType;
   47.65 -class PrimitiveType;
   47.66 -class Context;
   47.67 -class DescriptorCache;
   47.68 -
   47.69 -class DescriptorStream;
   47.70 -
   47.71 -class Identifier : public ResourceObj {
   47.72 - private:
   47.73 -  Symbol* _sym;
   47.74 -  int _begin;
   47.75 -  int _end;
   47.76 -
   47.77 - public:
   47.78 -  Identifier(Symbol* sym, int begin, int end) :
   47.79 -    _sym(sym), _begin(begin), _end(end) {}
   47.80 -
   47.81 -  bool equals(Identifier* other);
   47.82 -  bool equals(Symbol* sym);
   47.83 -
   47.84 -#ifndef PRODUCT
   47.85 -  void print_on(outputStream* str) const;
   47.86 -#endif // ndef PRODUCT
   47.87 -};
   47.88 -
   47.89 -class Descriptor : public ResourceObj {
   47.90 - protected:
   47.91 -  GrowableArray<TypeParameter*> _type_parameters;
   47.92 -  ClassDescriptor* _outer_class;
   47.93 -
   47.94 -  Descriptor(GrowableArray<TypeParameter*>& params,
   47.95 -    ClassDescriptor* outer)
   47.96 -    : _type_parameters(params), _outer_class(outer) {}
   47.97 -
   47.98 - public:
   47.99 -
  47.100 -  ClassDescriptor* outer_class() { return _outer_class; }
  47.101 -  void set_outer_class(ClassDescriptor* sig) { _outer_class = sig; }
  47.102 -
  47.103 -  virtual ClassDescriptor* as_class_signature() { return NULL; }
  47.104 -  virtual MethodDescriptor* as_method_signature() { return NULL; }
  47.105 -
  47.106 -  bool is_class_signature() { return as_class_signature() != NULL; }
  47.107 -  bool is_method_signature() { return as_method_signature() != NULL; }
  47.108 -
  47.109 -  GrowableArray<TypeParameter*>& type_parameters() {
  47.110 -    return _type_parameters;
  47.111 -  }
  47.112 -
  47.113 -  TypeParameter* find_type_parameter(Identifier* id, int* param_depth);
  47.114 -
  47.115 -  virtual void bind_variables_to_parameters() = 0;
  47.116 -
  47.117 -#ifndef PRODUCT
  47.118 -  virtual void print_on(outputStream* str) const = 0;
  47.119 -#endif
  47.120 -};
  47.121 -
  47.122 -class ClassDescriptor : public Descriptor {
  47.123 - private:
  47.124 -  ClassType* _super;
  47.125 -  GrowableArray<ClassType*> _interfaces;
  47.126 -  MethodDescriptor* _outer_method;
  47.127 -
  47.128 -  ClassDescriptor(GrowableArray<TypeParameter*>& ftp, ClassType* scs,
  47.129 -      GrowableArray<ClassType*>& sis, ClassDescriptor* outer_class = NULL,
  47.130 -      MethodDescriptor* outer_method = NULL)
  47.131 -        : Descriptor(ftp, outer_class), _super(scs), _interfaces(sis),
  47.132 -          _outer_method(outer_method) {}
  47.133 -
  47.134 -  static u2 get_outer_class_index(InstanceKlass* k, TRAPS);
  47.135 -  static ClassDescriptor* parse_generic_signature(Klass* k, Symbol* original_name, TRAPS);
  47.136 -
  47.137 - public:
  47.138 -
  47.139 -  virtual ClassDescriptor* as_class_signature() { return this; }
  47.140 -
  47.141 -  MethodDescriptor* outer_method() { return _outer_method; }
  47.142 -  void set_outer_method(MethodDescriptor* m) { _outer_method = m; }
  47.143 -
  47.144 -  ClassType* super() { return _super; }
  47.145 -  ClassType* interface_desc(Symbol* sym);
  47.146 -
  47.147 -  static ClassDescriptor* parse_generic_signature(Klass* k, TRAPS);
  47.148 -  static ClassDescriptor* parse_generic_signature(Symbol* sym);
  47.149 -
  47.150 -  // For use in superclass chains in positions where this is no generic info
  47.151 -  static ClassDescriptor* placeholder(InstanceKlass* klass);
  47.152 -
  47.153 -#ifndef PRODUCT
  47.154 -  void print_on(outputStream* str) const;
  47.155 -#endif
  47.156 -
  47.157 -  ClassDescriptor* canonicalize(Context* ctx);
  47.158 -
  47.159 -  // Linking sets the position index in any contained TypeVariable type
  47.160 -  // to correspond to the location of that identifier in the formal type
  47.161 -  // parameters.
  47.162 -  void bind_variables_to_parameters();
  47.163 -};
  47.164 -
  47.165 -class MethodDescriptor : public Descriptor {
  47.166 - private:
  47.167 -  GrowableArray<Type*> _parameters;
  47.168 -  Type* _return_type;
  47.169 -  GrowableArray<Type*> _throws;
  47.170 -
  47.171 -  MethodDescriptor(GrowableArray<TypeParameter*>& ftp, ClassDescriptor* outer,
  47.172 -      GrowableArray<Type*>& sigs, Type* rt, GrowableArray<Type*>& throws)
  47.173 -      : Descriptor(ftp, outer), _parameters(sigs), _return_type(rt),
  47.174 -        _throws(throws) {}
  47.175 -
  47.176 - public:
  47.177 -
  47.178 -  static MethodDescriptor* parse_generic_signature(Method* m, ClassDescriptor* outer);
  47.179 -  static MethodDescriptor* parse_generic_signature(Symbol* sym, ClassDescriptor* outer);
  47.180 -
  47.181 -  MethodDescriptor* as_method_signature() { return this; }
  47.182 -
  47.183 -  // Performs generic analysis on the method parameters to determine
  47.184 -  // if both methods refer to the same argument types.
  47.185 -  bool covariant_match(MethodDescriptor* other, Context* ctx);
  47.186 -
  47.187 -  // Returns a new method descriptor with all generic variables
  47.188 -  // removed and replaced with whatever is indicated using the Context.
  47.189 -  MethodDescriptor* canonicalize(Context* ctx);
  47.190 -
  47.191 -  void bind_variables_to_parameters();
  47.192 -
  47.193 -#ifndef PRODUCT
  47.194 -  TempNewSymbol reify_signature(Context* ctx, TRAPS);
  47.195 -  void print_on(outputStream* str) const;
  47.196 -#endif
  47.197 -};
  47.198 -
  47.199 -class TypeParameter : public ResourceObj {
  47.200 - private:
  47.201 -  Identifier* _identifier;
  47.202 -  ClassType* _class_bound;
  47.203 -  GrowableArray<ClassType*> _interface_bounds;
  47.204 -
  47.205 -  // The position is the ordinal location of the parameter within the
  47.206 -  // formal parameter list (excluding outer classes).  It is only set for
  47.207 -  // formal type parameters that are associated with a class -- method
  47.208 -  // type parameters are left as -1.  When resolving a generic variable to
  47.209 -  // find the actual type, this index is used to access the generic type
  47.210 -  // argument in the provided context object.
  47.211 -  int _position; // Assigned during variable linking
  47.212 -
  47.213 -  TypeParameter(Identifier* id, ClassType* class_bound,
  47.214 -    GrowableArray<ClassType*>& interface_bounds) :
  47.215 -      _identifier(id), _class_bound(class_bound),
  47.216 -      _interface_bounds(interface_bounds), _position(-1) {}
  47.217 -
  47.218 - public:
  47.219 -  static TypeParameter* parse_generic_signature(DescriptorStream* str);
  47.220 -
  47.221 -  ClassType* bound();
  47.222 -  int position() { return _position; }
  47.223 -
  47.224 -  void bind_variables_to_parameters(Descriptor* sig, int position);
  47.225 -  Identifier* identifier() { return _identifier; }
  47.226 -
  47.227 -  Type* resolve(Context* ctx, int inner_depth, int ctx_depth);
  47.228 -  TypeParameter* canonicalize(Context* ctx, int ctx_depth);
  47.229 -
  47.230 -#ifndef PRODUCT
  47.231 -  void print_on(outputStream* str) const;
  47.232 -#endif
  47.233 -};
  47.234 -
  47.235 -class Type : public ResourceObj {
  47.236 - public:
  47.237 -  static Type* parse_generic_signature(DescriptorStream* str);
  47.238 -
  47.239 -  virtual ClassType* as_class() { return NULL; }
  47.240 -  virtual TypeVariable* as_variable() { return NULL; }
  47.241 -  virtual ArrayType* as_array() { return NULL; }
  47.242 -  virtual PrimitiveType* as_primitive() { return NULL; }
  47.243 -
  47.244 -  virtual bool covariant_match(Type* gt, Context* ctx) = 0;
  47.245 -  virtual Type* canonicalize(Context* ctx, int ctx_depth) = 0;
  47.246 -
  47.247 -  virtual void bind_variables_to_parameters(Descriptor* sig) = 0;
  47.248 -
  47.249 -#ifndef PRODUCT
  47.250 -  virtual void reify_signature(stringStream* ss, Context* ctx) = 0;
  47.251 -  virtual void print_on(outputStream* str) const = 0;
  47.252 -#endif
  47.253 -};
  47.254 -
  47.255 -class ClassType : public Type {
  47.256 -  friend class ClassDescriptor;
  47.257 - protected:
  47.258 -  Identifier* _identifier;
  47.259 -  GrowableArray<TypeArgument*> _type_arguments;
  47.260 -  ClassType* _outer_class;
  47.261 -
  47.262 -  ClassType(Identifier* identifier,
  47.263 -      GrowableArray<TypeArgument*>& args,
  47.264 -      ClassType* outer)
  47.265 -      : _identifier(identifier), _type_arguments(args), _outer_class(outer) {}
  47.266 -
  47.267 -  // Returns true if there are inner classes to read
  47.268 -  static Identifier* parse_generic_signature_simple(
  47.269 -      GrowableArray<TypeArgument*>* args,
  47.270 -      bool* has_inner, DescriptorStream* str);
  47.271 -
  47.272 -  static ClassType* parse_generic_signature(ClassType* outer,
  47.273 -      DescriptorStream* str);
  47.274 -  static ClassType* from_symbol(Symbol* sym);
  47.275 -
  47.276 - public:
  47.277 -  ClassType* as_class() { return this; }
  47.278 -
  47.279 -  static ClassType* parse_generic_signature(DescriptorStream* str);
  47.280 -  static ClassType* java_lang_Object();
  47.281 -
  47.282 -  Identifier* identifier() { return _identifier; }
  47.283 -  int type_arguments_length() { return _type_arguments.length(); }
  47.284 -  TypeArgument* type_argument_at(int i);
  47.285 -
  47.286 -  virtual ClassType* outer_class() { return _outer_class; }
  47.287 -
  47.288 -  bool covariant_match(Type* gt, Context* ctx);
  47.289 -  ClassType* canonicalize(Context* ctx, int context_depth);
  47.290 -
  47.291 -  void bind_variables_to_parameters(Descriptor* sig);
  47.292 -
  47.293 -#ifndef PRODUCT
  47.294 -  void reify_signature(stringStream* ss, Context* ctx);
  47.295 -  void print_on(outputStream* str) const;
  47.296 -#endif
  47.297 -};
  47.298 -
  47.299 -class TypeVariable : public Type {
  47.300 - private:
  47.301 -  Identifier* _id;
  47.302 -  TypeParameter* _parameter; // assigned during linking
  47.303 -
  47.304 -  // how many steps "out" from inner classes, -1 if method
  47.305 -  int _inner_depth;
  47.306 -
  47.307 -  TypeVariable(Identifier* id)
  47.308 -      : _id(id), _parameter(NULL), _inner_depth(0) {}
  47.309 -
  47.310 - public:
  47.311 -  TypeVariable* as_variable() { return this; }
  47.312 -
  47.313 -  static TypeVariable* parse_generic_signature(DescriptorStream* str);
  47.314 -
  47.315 -  Identifier* identifier() { return _id; }
  47.316 -  TypeParameter* parameter() { return _parameter; }
  47.317 -  int inner_depth() { return _inner_depth; }
  47.318 -
  47.319 -  void bind_variables_to_parameters(Descriptor* sig);
  47.320 -
  47.321 -  Type* resolve(Context* ctx, int ctx_depth);
  47.322 -  bool covariant_match(Type* gt, Context* ctx);
  47.323 -  Type* canonicalize(Context* ctx, int ctx_depth);
  47.324 -
  47.325 -#ifndef PRODUCT
  47.326 -  void reify_signature(stringStream* ss, Context* ctx);
  47.327 -  void print_on(outputStream* str) const;
  47.328 -#endif
  47.329 -};
  47.330 -
  47.331 -class ArrayType : public Type {
  47.332 - private:
  47.333 -  Type* _base;
  47.334 -
  47.335 -  ArrayType(Type* base) : _base(base) {}
  47.336 -
  47.337 - public:
  47.338 -  ArrayType* as_array() { return this; }
  47.339 -
  47.340 -  static ArrayType* parse_generic_signature(DescriptorStream* str);
  47.341 -
  47.342 -  bool covariant_match(Type* gt, Context* ctx);
  47.343 -  ArrayType* canonicalize(Context* ctx, int ctx_depth);
  47.344 -
  47.345 -  void bind_variables_to_parameters(Descriptor* sig);
  47.346 -
  47.347 -#ifndef PRODUCT
  47.348 -  void reify_signature(stringStream* ss, Context* ctx);
  47.349 -  void print_on(outputStream* str) const;
  47.350 -#endif
  47.351 -};
  47.352 -
  47.353 -class PrimitiveType : public Type {
  47.354 -  friend class Type;
  47.355 - private:
  47.356 -  char _type; // includes V for void
  47.357 -
  47.358 -  PrimitiveType(char& type) : _type(type) {}
  47.359 -
  47.360 - public:
  47.361 -  PrimitiveType* as_primitive() { return this; }
  47.362 -
  47.363 -  bool covariant_match(Type* gt, Context* ctx);
  47.364 -  PrimitiveType* canonicalize(Context* ctx, int ctx_depth);
  47.365 -
  47.366 -  void bind_variables_to_parameters(Descriptor* sig);
  47.367 -
  47.368 -#ifndef PRODUCT
  47.369 -  void reify_signature(stringStream* ss, Context* ctx);
  47.370 -  void print_on(outputStream* str) const;
  47.371 -#endif
  47.372 -};
  47.373 -
  47.374 -class TypeArgument : public ResourceObj {
  47.375 - private:
  47.376 -  Type* _lower_bound;
  47.377 -  Type* _upper_bound; // may be null or == _lower_bound
  47.378 -
  47.379 -  TypeArgument(Type* lower_bound, Type* upper_bound)
  47.380 -      : _lower_bound(lower_bound), _upper_bound(upper_bound) {}
  47.381 -
  47.382 - public:
  47.383 -
  47.384 -  static TypeArgument* parse_generic_signature(DescriptorStream* str);
  47.385 -
  47.386 -  Type* lower_bound() { return _lower_bound; }
  47.387 -  Type* upper_bound() { return _upper_bound; }
  47.388 -
  47.389 -  void bind_variables_to_parameters(Descriptor* sig);
  47.390 -  TypeArgument* canonicalize(Context* ctx, int ctx_depth);
  47.391 -
  47.392 -  bool covariant_match(TypeArgument* a, Context* ctx);
  47.393 -
  47.394 -#ifndef PRODUCT
  47.395 -  void print_on(outputStream* str) const;
  47.396 -#endif
  47.397 -};
  47.398 -
  47.399 -
  47.400 -class Context : public ResourceObj {
  47.401 - private:
  47.402 -  DescriptorCache* _cache;
  47.403 -  GrowableArray<ClassType*> _type_arguments;
  47.404 -
  47.405 -  void reset_to_mark(int size);
  47.406 -
  47.407 - public:
  47.408 -  // When this object goes out of scope or 'destroy' is
  47.409 -  // called, then the application of the type to the
  47.410 -  // context is wound-back (unless it's been deactivated).
  47.411 -  class Mark : public StackObj {
  47.412 -   private:
  47.413 -    mutable Context* _context;
  47.414 -    int _marked_size;
  47.415 -
  47.416 -    bool is_active() const { return _context != NULL; }
  47.417 -    void deactivate() const { _context = NULL; }
  47.418 -
  47.419 -   public:
  47.420 -    Mark() : _context(NULL), _marked_size(0) {}
  47.421 -    Mark(Context* ctx, int sz) : _context(ctx), _marked_size(sz) {}
  47.422 -    Mark(const Mark& m) : _context(m._context), _marked_size(m._marked_size) {
  47.423 -      m.deactivate(); // Ownership is transferred
  47.424 -    }
  47.425 -
  47.426 -    Mark& operator=(const Mark& cm) {
  47.427 -      destroy();
  47.428 -      _context = cm._context;
  47.429 -      _marked_size = cm._marked_size;
  47.430 -      cm.deactivate();
  47.431 -      return *this;
  47.432 -    }
  47.433 -
  47.434 -    void destroy();
  47.435 -    ~Mark() { destroy(); }
  47.436 -  };
  47.437 -
  47.438 -  Context(DescriptorCache* cache) : _cache(cache) {}
  47.439 -
  47.440 -  Mark mark() { return Mark(this, _type_arguments.length()); }
  47.441 -  void apply_type_arguments(InstanceKlass* current, InstanceKlass* super,TRAPS);
  47.442 -
  47.443 -  ClassType* at_depth(int i) const;
  47.444 -
  47.445 -#ifndef PRODUCT
  47.446 -  void print_on(outputStream* str) const;
  47.447 -#endif
  47.448 -};
  47.449 -
  47.450 -/**
  47.451 - * Contains a cache of descriptors for classes and methods so they can be
  47.452 - * looked-up instead of reparsing each time they are needed.
  47.453 - */
  47.454 -class DescriptorCache : public ResourceObj {
  47.455 - private:
  47.456 -  ResourceHashtable<InstanceKlass*, ClassDescriptor*> _class_descriptors;
  47.457 -  ResourceHashtable<Method*, MethodDescriptor*> _method_descriptors;
  47.458 -
  47.459 - public:
  47.460 -  ClassDescriptor* descriptor_for(InstanceKlass* ikh, TRAPS);
  47.461 -
  47.462 -  MethodDescriptor* descriptor_for(Method* mh, ClassDescriptor* cd, TRAPS);
  47.463 -  // Class descriptor derived from method holder
  47.464 -  MethodDescriptor* descriptor_for(Method* mh, TRAPS);
  47.465 -};
  47.466 -
  47.467 -} // namespace generic
  47.468 -
  47.469 -#endif // SHARE_VM_CLASSFILE_GENERICSIGNATURES_HPP
  47.470 -
    48.1 --- a/src/share/vm/classfile/verifier.cpp	Fri Sep 06 09:55:38 2013 +0100
    48.2 +++ b/src/share/vm/classfile/verifier.cpp	Sat Sep 14 20:40:34 2013 +0100
    48.3 @@ -188,6 +188,10 @@
    48.4  bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
    48.5    Symbol* name = klass->name();
    48.6    Klass* refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass();
    48.7 +  Klass* lambda_magic_klass = SystemDictionary::lambda_MagicLambdaImpl_klass();
    48.8 +
    48.9 +  bool is_reflect = refl_magic_klass != NULL && klass->is_subtype_of(refl_magic_klass);
   48.10 +  bool is_lambda = lambda_magic_klass != NULL && klass->is_subtype_of(lambda_magic_klass);
   48.11  
   48.12    return (should_verify_for(klass->class_loader(), should_verify_class) &&
   48.13      // return if the class is a bootstrapping class
   48.14 @@ -210,9 +214,9 @@
   48.15      // sun/reflect/SerializationConstructorAccessor.
   48.16      // NOTE: this is called too early in the bootstrapping process to be
   48.17      // guarded by Universe::is_gte_jdk14x_version()/UseNewReflection.
   48.18 -    (refl_magic_klass == NULL ||
   48.19 -     !klass->is_subtype_of(refl_magic_klass) ||
   48.20 -     VerifyReflectionBytecodes)
   48.21 +    // Also for lambda generated code, gte jdk8
   48.22 +    (!is_reflect || VerifyReflectionBytecodes) &&
   48.23 +    (!is_lambda || VerifyLambdaBytecodes)
   48.24    );
   48.25  }
   48.26  
   48.27 @@ -2318,9 +2322,6 @@
   48.28        types = 1 << JVM_CONSTANT_InvokeDynamic;
   48.29        break;
   48.30      case Bytecodes::_invokespecial:
   48.31 -      types = (1 << JVM_CONSTANT_InterfaceMethodref) |
   48.32 -              (1 << JVM_CONSTANT_Methodref);
   48.33 -      break;
   48.34      case Bytecodes::_invokestatic:
   48.35        types = (_klass->major_version() < STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION) ?
   48.36          (1 << JVM_CONSTANT_Methodref) :
    49.1 --- a/src/share/vm/code/codeBlob.cpp	Fri Sep 06 09:55:38 2013 +0100
    49.2 +++ b/src/share/vm/code/codeBlob.cpp	Sat Sep 14 20:40:34 2013 +0100
    49.3 @@ -1,5 +1,5 @@
    49.4  /*
    49.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    49.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    49.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    49.8   *
    49.9   * This code is free software; you can redistribute it and/or modify it
   49.10 @@ -245,7 +245,7 @@
   49.11  }
   49.12  
   49.13  
   49.14 -void* BufferBlob::operator new(size_t s, unsigned size) {
   49.15 +void* BufferBlob::operator new(size_t s, unsigned size) throw() {
   49.16    void* p = CodeCache::allocate(size);
   49.17    return p;
   49.18  }
   49.19 @@ -347,14 +347,14 @@
   49.20  }
   49.21  
   49.22  
   49.23 -void* RuntimeStub::operator new(size_t s, unsigned size) {
   49.24 +void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
   49.25    void* p = CodeCache::allocate(size, true);
   49.26    if (!p) fatal("Initial size of CodeCache is too small");
   49.27    return p;
   49.28  }
   49.29  
   49.30  // operator new shared by all singletons:
   49.31 -void* SingletonBlob::operator new(size_t s, unsigned size) {
   49.32 +void* SingletonBlob::operator new(size_t s, unsigned size) throw() {
   49.33    void* p = CodeCache::allocate(size, true);
   49.34    if (!p) fatal("Initial size of CodeCache is too small");
   49.35    return p;
    50.1 --- a/src/share/vm/code/codeBlob.hpp	Fri Sep 06 09:55:38 2013 +0100
    50.2 +++ b/src/share/vm/code/codeBlob.hpp	Sat Sep 14 20:40:34 2013 +0100
    50.3 @@ -1,5 +1,5 @@
    50.4  /*
    50.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    50.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    50.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    50.8   *
    50.9   * This code is free software; you can redistribute it and/or modify it
   50.10 @@ -209,7 +209,7 @@
   50.11    BufferBlob(const char* name, int size);
   50.12    BufferBlob(const char* name, int size, CodeBuffer* cb);
   50.13  
   50.14 -  void* operator new(size_t s, unsigned size);
   50.15 +  void* operator new(size_t s, unsigned size) throw();
   50.16  
   50.17   public:
   50.18    // Creation
   50.19 @@ -283,7 +283,7 @@
   50.20      bool        caller_must_gc_arguments
   50.21    );
   50.22  
   50.23 -  void* operator new(size_t s, unsigned size);
   50.24 +  void* operator new(size_t s, unsigned size) throw();
   50.25  
   50.26   public:
   50.27    // Creation
   50.28 @@ -321,7 +321,7 @@
   50.29    friend class VMStructs;
   50.30  
   50.31   protected:
   50.32 -  void* operator new(size_t s, unsigned size);
   50.33 +  void* operator new(size_t s, unsigned size) throw();
   50.34  
   50.35   public:
   50.36     SingletonBlob(
    51.1 --- a/src/share/vm/code/debugInfoRec.cpp	Fri Sep 06 09:55:38 2013 +0100
    51.2 +++ b/src/share/vm/code/debugInfoRec.cpp	Sat Sep 14 20:40:34 2013 +0100
    51.3 @@ -1,5 +1,5 @@
    51.4  /*
    51.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    51.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    51.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    51.8   *
    51.9   * This code is free software; you can redistribute it and/or modify it
   51.10 @@ -38,7 +38,7 @@
   51.11    int  _length; // number of bytes in the stream
   51.12    int  _hash;   // hash of stream bytes (for quicker reuse)
   51.13  
   51.14 -  void* operator new(size_t ignore, DebugInformationRecorder* dir) {
   51.15 +  void* operator new(size_t ignore, DebugInformationRecorder* dir) throw() {
   51.16      assert(ignore == sizeof(DIR_Chunk), "");
   51.17      if (dir->_next_chunk >= dir->_next_chunk_limit) {
   51.18        const int CHUNK = 100;
    52.1 --- a/src/share/vm/code/nmethod.cpp	Fri Sep 06 09:55:38 2013 +0100
    52.2 +++ b/src/share/vm/code/nmethod.cpp	Sat Sep 14 20:40:34 2013 +0100
    52.3 @@ -1,5 +1,5 @@
    52.4  /*
    52.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    52.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    52.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    52.8   *
    52.9   * This code is free software; you can redistribute it and/or modify it
   52.10 @@ -93,18 +93,21 @@
   52.11  #endif
   52.12  
   52.13  bool nmethod::is_compiled_by_c1() const {
   52.14 -  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
   52.15 -  if (is_native_method()) return false;
   52.16 +  if (compiler() == NULL) {
   52.17 +    return false;
   52.18 +  }
   52.19    return compiler()->is_c1();
   52.20  }
   52.21  bool nmethod::is_compiled_by_c2() const {
   52.22 -  if (compiler() == NULL || method() == NULL)  return false;  // can happen during debug printing
   52.23 -  if (is_native_method()) return false;
   52.24 +  if (compiler() == NULL) {
   52.25 +    return false;
   52.26 +  }
   52.27    return compiler()->is_c2();
   52.28  }
   52.29  bool nmethod::is_compiled_by_shark() const {
   52.30 -  if (is_native_method()) return false;
   52.31 -  assert(compiler() != NULL, "must be");
   52.32 +  if (compiler() == NULL) {
   52.33 +    return false;
   52.34 +  }
   52.35    return compiler()->is_shark();
   52.36  }
   52.37  
   52.38 @@ -800,7 +803,7 @@
   52.39  }
   52.40  #endif // def HAVE_DTRACE_H
   52.41  
   52.42 -void* nmethod::operator new(size_t size, int nmethod_size) throw () {
   52.43 +void* nmethod::operator new(size_t size, int nmethod_size) throw() {
   52.44    // Not critical, may return null if there is too little continuous memory
   52.45    return CodeCache::allocate(nmethod_size);
   52.46  }
   52.47 @@ -1401,6 +1404,9 @@
   52.48      // nmethods aren't scanned for GC.
   52.49      _oops_are_stale = true;
   52.50  #endif
   52.51 +     // the Method may be reclaimed by class unloading now that the
   52.52 +     // nmethod is in zombie state
   52.53 +    set_method(NULL);
   52.54    } else {
   52.55      assert(state == not_entrant, "other cases may need to be handled differently");
   52.56    }
    53.1 --- a/src/share/vm/code/nmethod.hpp	Fri Sep 06 09:55:38 2013 +0100
    53.2 +++ b/src/share/vm/code/nmethod.hpp	Sat Sep 14 20:40:34 2013 +0100
    53.3 @@ -1,5 +1,5 @@
    53.4  /*
    53.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    53.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    53.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    53.8   *
    53.9   * This code is free software; you can redistribute it and/or modify it
   53.10 @@ -265,7 +265,7 @@
   53.11            int comp_level);
   53.12  
   53.13    // helper methods
   53.14 -  void* operator new(size_t size, int nmethod_size);
   53.15 +  void* operator new(size_t size, int nmethod_size) throw();
   53.16  
   53.17    const char* reloc_string_for(u_char* begin, u_char* end);
   53.18    // Returns true if this thread changed the state of the nmethod or
    54.1 --- a/src/share/vm/code/relocInfo.hpp	Fri Sep 06 09:55:38 2013 +0100
    54.2 +++ b/src/share/vm/code/relocInfo.hpp	Sat Sep 14 20:40:34 2013 +0100
    54.3 @@ -1,5 +1,5 @@
    54.4  /*
    54.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    54.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    54.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    54.8   *
    54.9   * This code is free software; you can redistribute it and/or modify it
   54.10 @@ -677,7 +677,7 @@
   54.11    }
   54.12  
   54.13   public:
   54.14 -  void* operator new(size_t size, const RelocationHolder& holder) {
   54.15 +  void* operator new(size_t size, const RelocationHolder& holder) throw() {
   54.16      if (size > sizeof(holder._relocbuf)) guarantee_size();
   54.17      assert((void* const *)holder.reloc() == &holder._relocbuf[0], "ptrs must agree");
   54.18      return holder.reloc();
    55.1 --- a/src/share/vm/code/vtableStubs.cpp	Fri Sep 06 09:55:38 2013 +0100
    55.2 +++ b/src/share/vm/code/vtableStubs.cpp	Sat Sep 14 20:40:34 2013 +0100
    55.3 @@ -1,5 +1,5 @@
    55.4  /*
    55.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    55.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    55.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    55.8   *
    55.9   * This code is free software; you can redistribute it and/or modify it
   55.10 @@ -49,7 +49,7 @@
   55.11  static int num_vtable_chunks = 0;
   55.12  
   55.13  
   55.14 -void* VtableStub::operator new(size_t size, int code_size) {
   55.15 +void* VtableStub::operator new(size_t size, int code_size) throw() {
   55.16    assert(size == sizeof(VtableStub), "mismatched size");
   55.17    num_vtable_chunks++;
   55.18    // compute real VtableStub size (rounded to nearest word)
    56.1 --- a/src/share/vm/code/vtableStubs.hpp	Fri Sep 06 09:55:38 2013 +0100
    56.2 +++ b/src/share/vm/code/vtableStubs.hpp	Sat Sep 14 20:40:34 2013 +0100
    56.3 @@ -1,5 +1,5 @@
    56.4  /*
    56.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    56.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    56.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    56.8   *
    56.9   * This code is free software; you can redistribute it and/or modify it
   56.10 @@ -46,7 +46,7 @@
   56.11    bool           _is_vtable_stub;    // True if vtable stub, false, is itable stub
   56.12    /* code follows here */            // The vtableStub code
   56.13  
   56.14 -  void* operator new(size_t size, int code_size);
   56.15 +  void* operator new(size_t size, int code_size) throw();
   56.16  
   56.17    VtableStub(bool is_vtable_stub, int index)
   56.18          : _next(NULL), _is_vtable_stub(is_vtable_stub),
    57.1 --- a/src/share/vm/compiler/compileBroker.cpp	Fri Sep 06 09:55:38 2013 +0100
    57.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Sat Sep 14 20:40:34 2013 +0100
    57.3 @@ -1718,7 +1718,7 @@
    57.4      CodeCache::print_summary(&s, detailed);
    57.5    }
    57.6    ttyLocker ttyl;
    57.7 -  tty->print_cr(s.as_string());
    57.8 +  tty->print(s.as_string());
    57.9  }
   57.10  
   57.11  // ------------------------------------------------------------------
    58.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 06 09:55:38 2013 +0100
    58.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sat Sep 14 20:40:34 2013 +0100
    58.3 @@ -2493,11 +2493,11 @@
    58.4  
    58.5  void G1CollectedHeap::register_concurrent_cycle_end() {
    58.6    if (_concurrent_cycle_started) {
    58.7 -    _gc_timer_cm->register_gc_end(os::elapsed_counter());
    58.8 -
    58.9      if (_cm->has_aborted()) {
   58.10        _gc_tracer_cm->report_concurrent_mode_failure();
   58.11      }
   58.12 +
   58.13 +    _gc_timer_cm->register_gc_end(os::elapsed_counter());
   58.14      _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
   58.15  
   58.16      _concurrent_cycle_started = false;
    59.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Sep 06 09:55:38 2013 +0100
    59.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Sat Sep 14 20:40:34 2013 +0100
    59.3 @@ -168,7 +168,15 @@
    59.4    // Set up the region size and associated fields. Given that the
    59.5    // policy is created before the heap, we have to set this up here,
    59.6    // so it's done as soon as possible.
    59.7 -  HeapRegion::setup_heap_region_size(Arguments::min_heap_size());
    59.8 +
    59.9 +  // It would have been natural to pass initial_heap_byte_size() and
   59.10 +  // max_heap_byte_size() to setup_heap_region_size() but those have
   59.11 +  // not been set up at this point since they should be aligned with
   59.12 +  // the region size. So, there is a circular dependency here. We base
   59.13 +  // the region size on the heap size, but the heap size should be
   59.14 +  // aligned with the region size. To get around this we use the
   59.15 +  // unaligned values for the heap.
   59.16 +  HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
   59.17    HeapRegionRemSet::setup_remset_size();
   59.18  
   59.19    G1ErgoVerbose::initialize();
    60.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 06 09:55:38 2013 +0100
    60.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Sat Sep 14 20:40:34 2013 +0100
    60.3 @@ -149,18 +149,11 @@
    60.4  // many regions in the heap (based on the min heap size).
    60.5  #define TARGET_REGION_NUMBER          2048
    60.6  
    60.7 -void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
    60.8 -  // region_size in bytes
    60.9 +void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   60.10    uintx region_size = G1HeapRegionSize;
   60.11    if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
   60.12 -    // We base the automatic calculation on the min heap size. This
   60.13 -    // can be problematic if the spread between min and max is quite
   60.14 -    // wide, imagine -Xms128m -Xmx32g. But, if we decided it based on
   60.15 -    // the max size, the region size might be way too large for the
   60.16 -    // min size. Either way, some users might have to set the region
   60.17 -    // size manually for some -Xms / -Xmx combos.
   60.18 -
   60.19 -    region_size = MAX2(min_heap_size / TARGET_REGION_NUMBER,
   60.20 +    size_t average_heap_size = (initial_heap_size + max_heap_size) / 2;
   60.21 +    region_size = MAX2(average_heap_size / TARGET_REGION_NUMBER,
   60.22                         (uintx) MIN_REGION_SIZE);
   60.23    }
   60.24  
    61.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 06 09:55:38 2013 +0100
    61.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Sat Sep 14 20:40:34 2013 +0100
    61.3 @@ -361,7 +361,7 @@
    61.4    // CardsPerRegion). All those fields are considered constant
    61.5    // throughout the JVM's execution, therefore they should only be set
    61.6    // up once during initialization time.
    61.7 -  static void setup_heap_region_size(uintx min_heap_size);
    61.8 +  static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
    61.9  
   61.10    enum ClaimValues {
   61.11      InitialClaimValue          = 0,
    62.1 --- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Fri Sep 06 09:55:38 2013 +0100
    62.2 +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Sat Sep 14 20:40:34 2013 +0100
    62.3 @@ -28,6 +28,7 @@
    62.4  #include "gc_implementation/shared/gcTrace.hpp"
    62.5  #include "gc_implementation/shared/gcWhen.hpp"
    62.6  #include "gc_implementation/shared/copyFailedInfo.hpp"
    62.7 +#include "runtime/os.hpp"
    62.8  #include "trace/tracing.hpp"
    62.9  #include "trace/traceBackend.hpp"
   62.10  #if INCLUDE_ALL_GCS
   62.11 @@ -54,11 +55,12 @@
   62.12  }
   62.13  
   62.14  void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
   62.15 -  EventGCReferenceStatistics e;
   62.16 +  EventGCReferenceStatistics e(UNTIMED);
   62.17    if (e.should_commit()) {
   62.18        e.set_gcId(_shared_gc_info.id());
   62.19        e.set_type((u1)type);
   62.20        e.set_count(count);
   62.21 +      e.set_endtime(os::elapsed_counter());
   62.22        e.commit();
   62.23    }
   62.24  }
   62.25 @@ -105,20 +107,22 @@
   62.26  }
   62.27  
   62.28  void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
   62.29 -  EventPromotionFailed e;
   62.30 +  EventPromotionFailed e(UNTIMED);
   62.31    if (e.should_commit()) {
   62.32      e.set_gcId(_shared_gc_info.id());
   62.33      e.set_data(to_trace_struct(pf_info));
   62.34      e.set_thread(pf_info.thread()->thread_id());
   62.35 +    e.set_endtime(os::elapsed_counter());
   62.36      e.commit();
   62.37    }
   62.38  }
   62.39  
   62.40  // Common to CMS and G1
   62.41  void OldGCTracer::send_concurrent_mode_failure_event() {
   62.42 -  EventConcurrentModeFailure e;
   62.43 +  EventConcurrentModeFailure e(UNTIMED);
   62.44    if (e.should_commit()) {
   62.45      e.set_gcId(_shared_gc_info.id());
   62.46 +    e.set_endtime(os::elapsed_counter());
   62.47      e.commit();
   62.48    }
   62.49  }
   62.50 @@ -136,7 +140,7 @@
   62.51  }
   62.52  
   62.53  void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
   62.54 -  EventEvacuationInfo e;
   62.55 +  EventEvacuationInfo e(UNTIMED);
   62.56    if (e.should_commit()) {
   62.57      e.set_gcId(_shared_gc_info.id());
   62.58      e.set_cSetRegions(info->collectionset_regions());
   62.59 @@ -147,15 +151,17 @@
   62.60      e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
   62.61      e.set_bytesCopied(info->bytes_copied());
   62.62      e.set_regionsFreed(info->regions_freed());
   62.63 +    e.set_endtime(os::elapsed_counter());
   62.64      e.commit();
   62.65    }
   62.66  }
   62.67  
   62.68  void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
   62.69 -  EventEvacuationFailed e;
   62.70 +  EventEvacuationFailed e(UNTIMED);
   62.71    if (e.should_commit()) {
   62.72      e.set_gcId(_shared_gc_info.id());
   62.73      e.set_data(to_trace_struct(ef_info));
   62.74 +    e.set_endtime(os::elapsed_counter());
   62.75      e.commit();
   62.76    }
   62.77  }
   62.78 @@ -189,12 +195,13 @@
   62.79    void visit(const GCHeapSummary* heap_summary) const {
   62.80      const VirtualSpaceSummary& heap_space = heap_summary->heap();
   62.81  
   62.82 -    EventGCHeapSummary e;
   62.83 +    EventGCHeapSummary e(UNTIMED);
   62.84      if (e.should_commit()) {
   62.85        e.set_gcId(_id);
   62.86        e.set_when((u1)_when);
   62.87        e.set_heapSpace(to_trace_struct(heap_space));
   62.88        e.set_heapUsed(heap_summary->used());
   62.89 +      e.set_endtime(os::elapsed_counter());
   62.90        e.commit();
   62.91      }
   62.92    }
   62.93 @@ -209,7 +216,7 @@
   62.94      const SpaceSummary& from_space = ps_heap_summary->from();
   62.95      const SpaceSummary& to_space = ps_heap_summary->to();
   62.96  
   62.97 -    EventPSHeapSummary e;
   62.98 +    EventPSHeapSummary e(UNTIMED);
   62.99      if (e.should_commit()) {
  62.100        e.set_gcId(_id);
  62.101        e.set_when((u1)_when);
  62.102 @@ -220,6 +227,7 @@
  62.103        e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
  62.104        e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
  62.105        e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
  62.106 +      e.set_endtime(os::elapsed_counter());
  62.107        e.commit();
  62.108      }
  62.109    }
  62.110 @@ -241,13 +249,14 @@
  62.111  }
  62.112  
  62.113  void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
  62.114 -  EventMetaspaceSummary e;
  62.115 +  EventMetaspaceSummary e(UNTIMED);
  62.116    if (e.should_commit()) {
  62.117      e.set_gcId(_shared_gc_info.id());
  62.118      e.set_when((u1) when);
  62.119      e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
  62.120      e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
  62.121      e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
  62.122 +    e.set_endtime(os::elapsed_counter());
  62.123      e.commit();
  62.124    }
  62.125  }
  62.126 @@ -282,8 +291,6 @@
  62.127        default: /* Ignore sending this phase */ break;
  62.128      }
  62.129    }
  62.130 -
  62.131 -#undef send_phase
  62.132  };
  62.133  
  62.134  void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
    63.1 --- a/src/share/vm/gc_implementation/shared/gcUtil.hpp	Fri Sep 06 09:55:38 2013 +0100
    63.2 +++ b/src/share/vm/gc_implementation/shared/gcUtil.hpp	Sat Sep 14 20:40:34 2013 +0100
    63.3 @@ -1,5 +1,5 @@
    63.4  /*
    63.5 - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
    63.6 + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
    63.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    63.8   *
    63.9   * This code is free software; you can redistribute it and/or modify it
   63.10 @@ -144,9 +144,9 @@
   63.11      _padded_avg(0.0), _deviation(0.0), _padding(padding) {}
   63.12  
   63.13    // Placement support
   63.14 -  void* operator new(size_t ignored, void* p) { return p; }
   63.15 +  void* operator new(size_t ignored, void* p) throw() { return p; }
   63.16    // Allocator
   63.17 -  void* operator new(size_t size) { return CHeapObj<mtGC>::operator new(size); }
   63.18 +  void* operator new(size_t size) throw() { return CHeapObj<mtGC>::operator new(size); }
   63.19  
   63.20    // Accessor
   63.21    float padded_average() const         { return _padded_avg; }
    64.1 --- a/src/share/vm/libadt/port.hpp	Fri Sep 06 09:55:38 2013 +0100
    64.2 +++ b/src/share/vm/libadt/port.hpp	Sat Sep 14 20:40:34 2013 +0100
    64.3 @@ -1,5 +1,5 @@
    64.4  /*
    64.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    64.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    64.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    64.8   *
    64.9   * This code is free software; you can redistribute it and/or modify it
   64.10 @@ -163,8 +163,8 @@
   64.11  extern void *safe_calloc (const char *file, unsigned line, unsigned nitems, unsigned size);
   64.12  extern void *safe_realloc(const char *file, unsigned line, void *ptr, unsigned size);
   64.13  extern char *safe_strdup (const char *file, unsigned line, const char *src);
   64.14 -inline void *operator new( size_t size ) { return malloc(size); }
   64.15 -inline void operator delete( void *ptr ) { free(ptr); }
   64.16 +inline void *operator new( size_t size ) throw() { return malloc(size); }
   64.17 +inline void operator delete( void *ptr )         { free(ptr); }
   64.18  #endif
   64.19  
   64.20  //-----------------------------------------------------------------------------
    65.1 --- a/src/share/vm/memory/allocation.cpp	Fri Sep 06 09:55:38 2013 +0100
    65.2 +++ b/src/share/vm/memory/allocation.cpp	Sat Sep 14 20:40:34 2013 +0100
    65.3 @@ -49,19 +49,19 @@
    65.4  # include "os_bsd.inline.hpp"
    65.5  #endif
    65.6  
    65.7 -void* StackObj::operator new(size_t size)       { ShouldNotCallThis(); return 0; }
    65.8 -void  StackObj::operator delete(void* p)        { ShouldNotCallThis(); }
    65.9 -void* StackObj::operator new [](size_t size)    { ShouldNotCallThis(); return 0; }
   65.10 -void  StackObj::operator delete [](void* p)     { ShouldNotCallThis(); }
   65.11 +void* StackObj::operator new(size_t size)     throw() { ShouldNotCallThis(); return 0; }
   65.12 +void  StackObj::operator delete(void* p)              { ShouldNotCallThis(); }
   65.13 +void* StackObj::operator new [](size_t size)  throw() { ShouldNotCallThis(); return 0; }
   65.14 +void  StackObj::operator delete [](void* p)           { ShouldNotCallThis(); }
   65.15  
   65.16 -void* _ValueObj::operator new(size_t size)      { ShouldNotCallThis(); return 0; }
   65.17 -void  _ValueObj::operator delete(void* p)       { ShouldNotCallThis(); }
   65.18 -void* _ValueObj::operator new [](size_t size)   { ShouldNotCallThis(); return 0; }
   65.19 -void  _ValueObj::operator delete [](void* p)    { ShouldNotCallThis(); }
   65.20 +void* _ValueObj::operator new(size_t size)    throw() { ShouldNotCallThis(); return 0; }
   65.21 +void  _ValueObj::operator delete(void* p)             { ShouldNotCallThis(); }
   65.22 +void* _ValueObj::operator new [](size_t size) throw() { ShouldNotCallThis(); return 0; }
   65.23 +void  _ValueObj::operator delete [](void* p)          { ShouldNotCallThis(); }
   65.24  
   65.25  void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
   65.26                                   size_t word_size, bool read_only,
   65.27 -                                 MetaspaceObj::Type type, TRAPS) {
   65.28 +                                 MetaspaceObj::Type type, TRAPS) throw() {
   65.29    // Klass has it's own operator new
   65.30    return Metaspace::allocate(loader_data, word_size, read_only,
   65.31                               type, CHECK_NULL);
   65.32 @@ -80,7 +80,7 @@
   65.33    st->print(" {"INTPTR_FORMAT"}", this);
   65.34  }
   65.35  
   65.36 -void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) {
   65.37 +void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) throw() {
   65.38    address res;
   65.39    switch (type) {
   65.40     case C_HEAP:
   65.41 @@ -97,12 +97,12 @@
   65.42    return res;
   65.43  }
   65.44  
   65.45 -void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) {
   65.46 +void* ResourceObj::operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw() {
   65.47    return (address) operator new(size, type, flags);
   65.48  }
   65.49  
   65.50  void* ResourceObj::operator new(size_t size, const std::nothrow_t&  nothrow_constant,
   65.51 -    allocation_type type, MEMFLAGS flags) {
   65.52 +    allocation_type type, MEMFLAGS flags) throw() {
   65.53    //should only call this with std::nothrow, use other operator new() otherwise
   65.54    address res;
   65.55    switch (type) {
   65.56 @@ -121,7 +121,7 @@
   65.57  }
   65.58  
   65.59  void* ResourceObj::operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
   65.60 -    allocation_type type, MEMFLAGS flags) {
   65.61 +    allocation_type type, MEMFLAGS flags) throw() {
   65.62    return (address)operator new(size, nothrow_constant, type, flags);
   65.63  }
   65.64  
   65.65 @@ -370,7 +370,7 @@
   65.66  //--------------------------------------------------------------------------------------
   65.67  // Chunk implementation
   65.68  
   65.69 -void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
   65.70 +void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) throw() {
   65.71    // requested_size is equal to sizeof(Chunk) but in order for the arena
   65.72    // allocations to come out aligned as expected the size must be aligned
   65.73    // to expected arena alignment.
   65.74 @@ -478,18 +478,18 @@
   65.75    NOT_PRODUCT(Atomic::dec(&_instance_count);)
   65.76  }
   65.77  
   65.78 -void* Arena::operator new(size_t size) {
   65.79 +void* Arena::operator new(size_t size) throw() {
   65.80    assert(false, "Use dynamic memory type binding");
   65.81    return NULL;
   65.82  }
   65.83  
   65.84 -void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) {
   65.85 +void* Arena::operator new (size_t size, const std::nothrow_t&  nothrow_constant) throw() {
   65.86    assert(false, "Use dynamic memory type binding");
   65.87    return NULL;
   65.88  }
   65.89  
   65.90    // dynamic memory type binding
   65.91 -void* Arena::operator new(size_t size, MEMFLAGS flags) {
   65.92 +void* Arena::operator new(size_t size, MEMFLAGS flags) throw() {
   65.93  #ifdef ASSERT
   65.94    void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC);
   65.95    if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
   65.96 @@ -499,7 +499,7 @@
   65.97  #endif
   65.98  }
   65.99  
  65.100 -void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) {
  65.101 +void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() {
  65.102  #ifdef ASSERT
  65.103    void* p = os::malloc(size, flags|otArena, CALLER_PC);
  65.104    if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p);
  65.105 @@ -688,22 +688,22 @@
  65.106  // define ALLOW_OPERATOR_NEW_USAGE for platform on which global operator new allowed.
  65.107  //
  65.108  #ifndef ALLOW_OPERATOR_NEW_USAGE
  65.109 -void* operator new(size_t size){
  65.110 +void* operator new(size_t size) throw() {
  65.111    assert(false, "Should not call global operator new");
  65.112    return 0;
  65.113  }
  65.114  
  65.115 -void* operator new [](size_t size){
  65.116 +void* operator new [](size_t size) throw() {
  65.117    assert(false, "Should not call global operator new[]");
  65.118    return 0;
  65.119  }
  65.120  
  65.121 -void* operator new(size_t size, const std::nothrow_t&  nothrow_constant){
  65.122 +void* operator new(size_t size, const std::nothrow_t&  nothrow_constant) throw() {
  65.123    assert(false, "Should not call global operator new");
  65.124    return 0;
  65.125  }
  65.126  
  65.127 -void* operator new [](size_t size, std::nothrow_t&  nothrow_constant){
  65.128 +void* operator new [](size_t size, std::nothrow_t&  nothrow_constant) throw() {
  65.129    assert(false, "Should not call global operator new[]");
  65.130    return 0;
  65.131  }
    66.1 --- a/src/share/vm/memory/allocation.hpp	Fri Sep 06 09:55:38 2013 +0100
    66.2 +++ b/src/share/vm/memory/allocation.hpp	Sat Sep 14 20:40:34 2013 +0100
    66.3 @@ -204,12 +204,12 @@
    66.4  
    66.5  template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
    66.6   public:
    66.7 -  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0);
    66.8 +  _NOINLINE_ void* operator new(size_t size, address caller_pc = 0) throw();
    66.9    _NOINLINE_ void* operator new (size_t size, const std::nothrow_t&  nothrow_constant,
   66.10 -                               address caller_pc = 0);
   66.11 -  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0);
   66.12 +                               address caller_pc = 0) throw();
   66.13 +  _NOINLINE_ void* operator new [](size_t size, address caller_pc = 0) throw();
   66.14    _NOINLINE_ void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
   66.15 -                               address caller_pc = 0);
   66.16 +                               address caller_pc = 0) throw();
   66.17    void  operator delete(void* p);
   66.18    void  operator delete [] (void* p);
   66.19  };
   66.20 @@ -219,9 +219,9 @@
   66.21  
   66.22  class StackObj ALLOCATION_SUPER_CLASS_SPEC {
   66.23   private:
   66.24 -  void* operator new(size_t size);
   66.25 +  void* operator new(size_t size) throw();
   66.26    void  operator delete(void* p);
   66.27 -  void* operator new [](size_t size);
   66.28 +  void* operator new [](size_t size) throw();
   66.29    void  operator delete [](void* p);
   66.30  };
   66.31  
   66.32 @@ -245,9 +245,9 @@
   66.33  //
   66.34  class _ValueObj {
   66.35   private:
   66.36 -  void* operator new(size_t size);
   66.37 +  void* operator new(size_t size) throw();
   66.38    void  operator delete(void* p);
   66.39 -  void* operator new [](size_t size);
   66.40 +  void* operator new [](size_t size) throw();
   66.41    void  operator delete [](void* p);
   66.42  };
   66.43  
   66.44 @@ -316,7 +316,7 @@
   66.45  
   66.46    void* operator new(size_t size, ClassLoaderData* loader_data,
   66.47                       size_t word_size, bool read_only,
   66.48 -                     Type type, Thread* thread);
   66.49 +                     Type type, Thread* thread) throw();
   66.50                       // can't use TRAPS from this header file.
   66.51    void operator delete(void* p) { ShouldNotCallThis(); }
   66.52  };
   66.53 @@ -339,7 +339,7 @@
   66.54    Chunk*       _next;     // Next Chunk in list
   66.55    const size_t _len;      // Size of this Chunk
   66.56   public:
   66.57 -  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
   66.58 +  void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw();
   66.59    void  operator delete(void* p);
   66.60    Chunk(size_t length);
   66.61  
   66.62 @@ -422,12 +422,12 @@
   66.63    char* hwm() const             { return _hwm; }
   66.64  
   66.65    // new operators
   66.66 -  void* operator new (size_t size);
   66.67 -  void* operator new (size_t size, const std::nothrow_t& nothrow_constant);
   66.68 +  void* operator new (size_t size) throw();
   66.69 +  void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw();
   66.70  
   66.71    // dynamic memory type tagging
   66.72 -  void* operator new(size_t size, MEMFLAGS flags);
   66.73 -  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags);
   66.74 +  void* operator new(size_t size, MEMFLAGS flags) throw();
   66.75 +  void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw();
   66.76    void  operator delete(void* p);
   66.77  
   66.78    // Fast allocate in the arena.  Common case is: pointer test + increment.
   66.79 @@ -583,44 +583,44 @@
   66.80  #endif // ASSERT
   66.81  
   66.82   public:
   66.83 -  void* operator new(size_t size, allocation_type type, MEMFLAGS flags);
   66.84 -  void* operator new [](size_t size, allocation_type type, MEMFLAGS flags);
   66.85 +  void* operator new(size_t size, allocation_type type, MEMFLAGS flags) throw();
   66.86 +  void* operator new [](size_t size, allocation_type type, MEMFLAGS flags) throw();
   66.87    void* operator new(size_t size, const std::nothrow_t&  nothrow_constant,
   66.88 -      allocation_type type, MEMFLAGS flags);
   66.89 +      allocation_type type, MEMFLAGS flags) throw();
   66.90    void* operator new [](size_t size, const std::nothrow_t&  nothrow_constant,
   66.91 -      allocation_type type, MEMFLAGS flags);
   66.92 +      allocation_type type, MEMFLAGS flags) throw();
   66.93  
   66.94 -  void* operator new(size_t size, Arena *arena) {
   66.95 +  void* operator new(size_t size, Arena *arena) throw() {
   66.96        address res = (address)arena->Amalloc(size);
   66.97        DEBUG_ONLY(set_allocation_type(res, ARENA);)
   66.98        return res;
   66.99    }
  66.100  
  66.101 -  void* operator new [](size_t size, Arena *arena) {
  66.102 +  void* operator new [](size_t size, Arena *arena) throw() {
  66.103        address res = (address)arena->Amalloc(size);
  66.104        DEBUG_ONLY(set_allocation_type(res, ARENA);)
  66.105        return res;
  66.106    }
  66.107  
  66.108 -  void* operator new(size_t size) {
  66.109 +  void* operator new(size_t size) throw() {
  66.110        address res = (address)resource_allocate_bytes(size);
  66.111        DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
  66.112        return res;
  66.113    }
  66.114  
  66.115 -  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
  66.116 +  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
  66.117        address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
  66.118        DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
  66.119        return res;
  66.120    }
  66.121  
  66.122 -  void* operator new [](size_t size) {
  66.123 +  void* operator new [](size_t size) throw() {
  66.124        address res = (address)resource_allocate_bytes(size);
  66.125        DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
  66.126        return res;
  66.127    }
  66.128  
  66.129 -  void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) {
  66.130 +  void* operator new [](size_t size, const std::nothrow_t& nothrow_constant) throw() {
  66.131        address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
  66.132        DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
  66.133        return res;
    67.1 --- a/src/share/vm/memory/allocation.inline.hpp	Fri Sep 06 09:55:38 2013 +0100
    67.2 +++ b/src/share/vm/memory/allocation.inline.hpp	Sat Sep 14 20:40:34 2013 +0100
    67.3 @@ -85,7 +85,7 @@
    67.4  
    67.5  
    67.6  template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
    67.7 -      address caller_pc){
    67.8 +      address caller_pc) throw() {
    67.9      void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC));
   67.10  #ifdef ASSERT
   67.11      if (PrintMallocFree) trace_heap_malloc(size, "CHeapObj-new", p);
   67.12 @@ -94,7 +94,7 @@
   67.13    }
   67.14  
   67.15  template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
   67.16 -  const std::nothrow_t&  nothrow_constant, address caller_pc) {
   67.17 +  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
   67.18    void* p = (void*)AllocateHeap(size, F, (caller_pc != 0 ? caller_pc : CALLER_PC),
   67.19        AllocFailStrategy::RETURN_NULL);
   67.20  #ifdef ASSERT
   67.21 @@ -104,12 +104,12 @@
   67.22  }
   67.23  
   67.24  template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
   67.25 -      address caller_pc){
   67.26 +      address caller_pc) throw() {
   67.27      return CHeapObj<F>::operator new(size, caller_pc);
   67.28  }
   67.29  
   67.30  template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
   67.31 -  const std::nothrow_t&  nothrow_constant, address caller_pc) {
   67.32 +  const std::nothrow_t&  nothrow_constant, address caller_pc) throw() {
   67.33      return CHeapObj<F>::operator new(size, nothrow_constant, caller_pc);
   67.34  }
   67.35  
    68.1 --- a/src/share/vm/memory/filemap.cpp	Fri Sep 06 09:55:38 2013 +0100
    68.2 +++ b/src/share/vm/memory/filemap.cpp	Sat Sep 14 20:40:34 2013 +0100
    68.3 @@ -55,6 +55,7 @@
    68.4                " shared archive file.\n");
    68.5    jio_vfprintf(defaultStream::error_stream(), msg, ap);
    68.6    jio_fprintf(defaultStream::error_stream(), "\n");
    68.7 +  // Do not change the text of the below message because some tests check for it.
    68.8    vm_exit_during_initialization("Unable to use shared archive.", NULL);
    68.9  }
   68.10  
    69.1 --- a/src/share/vm/memory/memRegion.cpp	Fri Sep 06 09:55:38 2013 +0100
    69.2 +++ b/src/share/vm/memory/memRegion.cpp	Sat Sep 14 20:40:34 2013 +0100
    69.3 @@ -1,5 +1,5 @@
    69.4  /*
    69.5 - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    69.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    69.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    69.8   *
    69.9   * This code is free software; you can redistribute it and/or modify it
   69.10 @@ -102,11 +102,11 @@
   69.11    return MemRegion();
   69.12  }
   69.13  
   69.14 -void* MemRegion::operator new(size_t size) {
   69.15 +void* MemRegion::operator new(size_t size) throw() {
   69.16    return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
   69.17  }
   69.18  
   69.19 -void* MemRegion::operator new [](size_t size) {
   69.20 +void* MemRegion::operator new [](size_t size) throw() {
   69.21    return (address)AllocateHeap(size, mtGC, 0, AllocFailStrategy::RETURN_NULL);
   69.22  }
   69.23  void  MemRegion::operator delete(void* p) {
    70.1 --- a/src/share/vm/memory/memRegion.hpp	Fri Sep 06 09:55:38 2013 +0100
    70.2 +++ b/src/share/vm/memory/memRegion.hpp	Sat Sep 14 20:40:34 2013 +0100
    70.3 @@ -1,5 +1,5 @@
    70.4  /*
    70.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
    70.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    70.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    70.8   *
    70.9   * This code is free software; you can redistribute it and/or modify it
   70.10 @@ -94,8 +94,8 @@
   70.11    size_t word_size() const { return _word_size; }
   70.12  
   70.13    bool is_empty() const { return word_size() == 0; }
   70.14 -  void* operator new(size_t size);
   70.15 -  void* operator new [](size_t size);
   70.16 +  void* operator new(size_t size) throw();
   70.17 +  void* operator new [](size_t size) throw();
   70.18    void  operator delete(void* p);
   70.19    void  operator delete [](void* p);
   70.20  };
   70.21 @@ -111,13 +111,13 @@
   70.22  
   70.23  class MemRegionClosureRO: public MemRegionClosure {
   70.24  public:
   70.25 -  void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) {
   70.26 +  void* operator new(size_t size, ResourceObj::allocation_type type, MEMFLAGS flags) throw() {
   70.27          return ResourceObj::operator new(size, type, flags);
   70.28    }
   70.29 -  void* operator new(size_t size, Arena *arena) {
   70.30 +  void* operator new(size_t size, Arena *arena) throw() {
   70.31          return ResourceObj::operator new(size, arena);
   70.32    }
   70.33 -  void* operator new(size_t size) {
   70.34 +  void* operator new(size_t size) throw() {
   70.35          return ResourceObj::operator new(size);
   70.36    }
   70.37  
    71.1 --- a/src/share/vm/oops/klass.cpp	Fri Sep 06 09:55:38 2013 +0100
    71.2 +++ b/src/share/vm/oops/klass.cpp	Sat Sep 14 20:40:34 2013 +0100
    71.3 @@ -139,7 +139,7 @@
    71.4    return NULL;
    71.5  }
    71.6  
    71.7 -void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) {
    71.8 +void* Klass::operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw() {
    71.9    return Metaspace::allocate(loader_data, word_size, /*read_only*/false,
   71.10                               MetaspaceObj::ClassType, CHECK_NULL);
   71.11  }
    72.1 --- a/src/share/vm/oops/klass.hpp	Fri Sep 06 09:55:38 2013 +0100
    72.2 +++ b/src/share/vm/oops/klass.hpp	Sat Sep 14 20:40:34 2013 +0100
    72.3 @@ -179,7 +179,7 @@
    72.4    // Constructor
    72.5    Klass();
    72.6  
    72.7 -  void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS);
    72.8 +  void* operator new(size_t size, ClassLoaderData* loader_data, size_t word_size, TRAPS) throw();
    72.9  
   72.10   public:
   72.11    bool is_klass() const volatile { return true; }
    73.1 --- a/src/share/vm/oops/method.cpp	Fri Sep 06 09:55:38 2013 +0100
    73.2 +++ b/src/share/vm/oops/method.cpp	Sat Sep 14 20:40:34 2013 +0100
    73.3 @@ -720,11 +720,22 @@
    73.4    }
    73.5  }
    73.6  
    73.7 +bool Method::is_always_compilable() const {
    73.8 +  // Generated adapters must be compiled
    73.9 +  if (is_method_handle_intrinsic() && is_synthetic()) {
   73.10 +    assert(!is_not_c1_compilable(), "sanity check");
   73.11 +    assert(!is_not_c2_compilable(), "sanity check");
   73.12 +    return true;
   73.13 +  }
   73.14 +
   73.15 +  return false;
   73.16 +}
   73.17 +
   73.18  bool Method::is_not_compilable(int comp_level) const {
   73.19    if (number_of_breakpoints() > 0)
   73.20      return true;
   73.21 -  if (is_method_handle_intrinsic())
   73.22 -    return !is_synthetic();  // the generated adapters must be compiled
   73.23 +  if (is_always_compilable())
   73.24 +    return false;
   73.25    if (comp_level == CompLevel_any)
   73.26      return is_not_c1_compilable() || is_not_c2_compilable();
   73.27    if (is_c1_compile(comp_level))
   73.28 @@ -736,6 +747,10 @@
   73.29  
   73.30  // call this when compiler finds that this method is not compilable
   73.31  void Method::set_not_compilable(int comp_level, bool report, const char* reason) {
   73.32 +  if (is_always_compilable()) {
   73.33 +    // Don't mark a method which should be always compilable
   73.34 +    return;
   73.35 +  }
   73.36    print_made_not_compilable(comp_level, /*is_osr*/ false, report, reason);
   73.37    if (comp_level == CompLevel_all) {
   73.38      set_not_c1_compilable();
    74.1 --- a/src/share/vm/oops/method.hpp	Fri Sep 06 09:55:38 2013 +0100
    74.2 +++ b/src/share/vm/oops/method.hpp	Sat Sep 14 20:40:34 2013 +0100
    74.3 @@ -796,6 +796,7 @@
    74.4    void set_not_osr_compilable_quietly(int comp_level = CompLevel_all) {
    74.5      set_not_osr_compilable(comp_level, false);
    74.6    }
    74.7 +  bool is_always_compilable() const;
    74.8  
    74.9   private:
   74.10    void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
    75.1 --- a/src/share/vm/oops/symbol.cpp	Fri Sep 06 09:55:38 2013 +0100
    75.2 +++ b/src/share/vm/oops/symbol.cpp	Sat Sep 14 20:40:34 2013 +0100
    75.3 @@ -41,19 +41,19 @@
    75.4    }
    75.5  }
    75.6  
    75.7 -void* Symbol::operator new(size_t sz, int len, TRAPS) {
    75.8 +void* Symbol::operator new(size_t sz, int len, TRAPS) throw() {
    75.9    int alloc_size = size(len)*HeapWordSize;
   75.10    address res = (address) AllocateHeap(alloc_size, mtSymbol);
   75.11    return res;
   75.12  }
   75.13  
   75.14 -void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) {
   75.15 +void* Symbol::operator new(size_t sz, int len, Arena* arena, TRAPS) throw() {
   75.16    int alloc_size = size(len)*HeapWordSize;
   75.17    address res = (address)arena->Amalloc(alloc_size);
   75.18    return res;
   75.19  }
   75.20  
   75.21 -void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) {
   75.22 +void* Symbol::operator new(size_t sz, int len, ClassLoaderData* loader_data, TRAPS) throw() {
   75.23    address res;
   75.24    int alloc_size = size(len)*HeapWordSize;
   75.25    res = (address) Metaspace::allocate(loader_data, size(len), true,
    76.1 --- a/src/share/vm/oops/symbol.hpp	Fri Sep 06 09:55:38 2013 +0100
    76.2 +++ b/src/share/vm/oops/symbol.hpp	Sat Sep 14 20:40:34 2013 +0100
    76.3 @@ -136,9 +136,9 @@
    76.4    }
    76.5  
    76.6    Symbol(const u1* name, int length, int refcount);
    76.7 -  void* operator new(size_t size, int len, TRAPS);
    76.8 -  void* operator new(size_t size, int len, Arena* arena, TRAPS);
    76.9 -  void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS);
   76.10 +  void* operator new(size_t size, int len, TRAPS) throw();
   76.11 +  void* operator new(size_t size, int len, Arena* arena, TRAPS) throw();
   76.12 +  void* operator new(size_t size, int len, ClassLoaderData* loader_data, TRAPS) throw();
   76.13  
   76.14    void  operator delete(void* p);
   76.15  
    77.1 --- a/src/share/vm/opto/block.cpp	Fri Sep 06 09:55:38 2013 +0100
    77.2 +++ b/src/share/vm/opto/block.cpp	Sat Sep 14 20:40:34 2013 +0100
    77.3 @@ -112,9 +112,9 @@
    77.4  // exceeds OptoLoopAlignment.
    77.5  uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
    77.6                                      PhaseRegAlloc* ra) {
    77.7 -  uint last_inst = _nodes.size();
    77.8 +  uint last_inst = number_of_nodes();
    77.9    for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
   77.10 -    uint inst_size = _nodes[j]->size(ra);
   77.11 +    uint inst_size = get_node(j)->size(ra);
   77.12      if( inst_size > 0 ) {
   77.13        inst_cnt--;
   77.14        uint sz = sum_size + inst_size;
   77.15 @@ -131,8 +131,8 @@
   77.16  }
   77.17  
   77.18  uint Block::find_node( const Node *n ) const {
   77.19 -  for( uint i = 0; i < _nodes.size(); i++ ) {
   77.20 -    if( _nodes[i] == n )
   77.21 +  for( uint i = 0; i < number_of_nodes(); i++ ) {
   77.22 +    if( get_node(i) == n )
   77.23        return i;
   77.24    }
   77.25    ShouldNotReachHere();
   77.26 @@ -141,7 +141,7 @@
   77.27  
   77.28  // Find and remove n from block list
   77.29  void Block::find_remove( const Node *n ) {
   77.30 -  _nodes.remove(find_node(n));
   77.31 +  remove_node(find_node(n));
   77.32  }
   77.33  
   77.34  // Return empty status of a block.  Empty blocks contain only the head, other
   77.35 @@ -154,10 +154,10 @@
   77.36    }
   77.37  
   77.38    int success_result = completely_empty;
   77.39 -  int end_idx = _nodes.size()-1;
   77.40 +  int end_idx = number_of_nodes() - 1;
   77.41  
   77.42    // Check for ending goto
   77.43 -  if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) {
   77.44 +  if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
   77.45      success_result = empty_with_goto;
   77.46      end_idx--;
   77.47    }
   77.48 @@ -170,7 +170,7 @@
   77.49    // Ideal nodes are allowable in empty blocks: skip them  Only MachNodes
   77.50    // turn directly into code, because only MachNodes have non-trivial
   77.51    // emit() functions.
   77.52 -  while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
   77.53 +  while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
   77.54      end_idx--;
   77.55    }
   77.56  
   77.57 @@ -209,15 +209,15 @@
   77.58  
   77.59  // True if block is low enough frequency or guarded by a test which
   77.60  // mostly does not go here.
   77.61 -bool Block::is_uncommon(PhaseCFG* cfg) const {
   77.62 +bool PhaseCFG::is_uncommon(const Block* block) {
   77.63    // Initial blocks must never be moved, so are never uncommon.
   77.64 -  if (head()->is_Root() || head()->is_Start())  return false;
   77.65 +  if (block->head()->is_Root() || block->head()->is_Start())  return false;
   77.66  
   77.67    // Check for way-low freq
   77.68 -  if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
   77.69 +  if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
   77.70  
   77.71    // Look for code shape indicating uncommon_trap or slow path
   77.72 -  if (has_uncommon_code()) return true;
   77.73 +  if (block->has_uncommon_code()) return true;
   77.74  
   77.75    const float epsilon = 0.05f;
   77.76    const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
   77.77 @@ -225,8 +225,8 @@
   77.78    uint freq_preds = 0;
   77.79    uint uncommon_for_freq_preds = 0;
   77.80  
   77.81 -  for( uint i=1; i<num_preds(); i++ ) {
   77.82 -    Block* guard = cfg->get_block_for_node(pred(i));
   77.83 +  for( uint i=1; i< block->num_preds(); i++ ) {
   77.84 +    Block* guard = get_block_for_node(block->pred(i));
   77.85      // Check to see if this block follows its guard 1 time out of 10000
   77.86      // or less.
   77.87      //
   77.88 @@ -244,14 +244,14 @@
   77.89        uncommon_preds++;
   77.90      } else {
   77.91        freq_preds++;
   77.92 -      if( _freq < guard->_freq * guard_factor ) {
   77.93 +      if(block->_freq < guard->_freq * guard_factor ) {
   77.94          uncommon_for_freq_preds++;
   77.95        }
   77.96      }
   77.97    }
   77.98 -  if( num_preds() > 1 &&
   77.99 +  if( block->num_preds() > 1 &&
  77.100        // The block is uncommon if all preds are uncommon or
  77.101 -      (uncommon_preds == (num_preds()-1) ||
  77.102 +      (uncommon_preds == (block->num_preds()-1) ||
  77.103        // it is uncommon for all frequent preds.
  77.104         uncommon_for_freq_preds == freq_preds) ) {
  77.105      return true;
  77.106 @@ -344,8 +344,8 @@
  77.107  
  77.108  void Block::dump(const PhaseCFG* cfg) const {
  77.109    dump_head(cfg);
  77.110 -  for (uint i=0; i< _nodes.size(); i++) {
  77.111 -    _nodes[i]->dump();
  77.112 +  for (uint i=0; i< number_of_nodes(); i++) {
  77.113 +    get_node(i)->dump();
  77.114    }
  77.115    tty->print("\n");
  77.116  }
  77.117 @@ -434,7 +434,7 @@
  77.118        map_node_to_block(p, bb);
  77.119        map_node_to_block(x, bb);
  77.120        if( x != p ) {                // Only for root is x == p
  77.121 -        bb->_nodes.push((Node*)x);
  77.122 +        bb->push_node((Node*)x);
  77.123        }
  77.124        // Now handle predecessors
  77.125        ++sum;                        // Count 1 for self block
  77.126 @@ -469,11 +469,11 @@
  77.127          assert( x != proj, "" );
  77.128          // Map basic block of projection
  77.129          map_node_to_block(proj, pb);
  77.130 -        pb->_nodes.push(proj);
  77.131 +        pb->push_node(proj);
  77.132        }
  77.133        // Insert self as a child of my predecessor block
  77.134        pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
  77.135 -      assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
  77.136 +      assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
  77.137                "too many control users, not a CFG?" );
  77.138      }
  77.139    }
  77.140 @@ -495,7 +495,7 @@
  77.141    // surrounding blocks.
  77.142    float freq = in->_freq * in->succ_prob(succ_no);
  77.143    // get ProjNode corresponding to the succ_no'th successor of the in block
  77.144 -  ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
  77.145 +  ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
  77.146    // create region for basic block
  77.147    RegionNode* region = new (C) RegionNode(2);
  77.148    region->init_req(1, proj);
  77.149 @@ -507,7 +507,7 @@
  77.150    Node* gto = _goto->clone(); // get a new goto node
  77.151    gto->set_req(0, region);
  77.152    // add it to the basic block
  77.153 -  block->_nodes.push(gto);
  77.154 +  block->push_node(gto);
  77.155    map_node_to_block(gto, block);
  77.156    C->regalloc()->set_bad(gto->_idx);
  77.157    // hook up successor block
  77.158 @@ -527,9 +527,9 @@
  77.159  // Does this block end in a multiway branch that cannot have the default case
  77.160  // flipped for another case?
  77.161  static bool no_flip_branch( Block *b ) {
  77.162 -  int branch_idx = b->_nodes.size() - b->_num_succs-1;
  77.163 +  int branch_idx = b->number_of_nodes() - b->_num_succs-1;
  77.164    if( branch_idx < 1 ) return false;
  77.165 -  Node *bra = b->_nodes[branch_idx];
  77.166 +  Node *bra = b->get_node(branch_idx);
  77.167    if( bra->is_Catch() )
  77.168      return true;
  77.169    if( bra->is_Mach() ) {
  77.170 @@ -550,16 +550,16 @@
  77.171  void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
  77.172    // Find true target
  77.173    int end_idx = b->end_idx();
  77.174 -  int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
  77.175 +  int idx = b->get_node(end_idx+1)->as_Proj()->_con;
  77.176    Block *succ = b->_succs[idx];
  77.177    Node* gto = _goto->clone(); // get a new goto node
  77.178    gto->set_req(0, b->head());
  77.179 -  Node *bp = b->_nodes[end_idx];
  77.180 -  b->_nodes.map(end_idx,gto); // Slam over NeverBranch
  77.181 +  Node *bp = b->get_node(end_idx);
  77.182 +  b->map_node(gto, end_idx); // Slam over NeverBranch
  77.183    map_node_to_block(gto, b);
  77.184    C->regalloc()->set_bad(gto->_idx);
  77.185 -  b->_nodes.pop();              // Yank projections
  77.186 -  b->_nodes.pop();              // Yank projections
  77.187 +  b->pop_node();              // Yank projections
  77.188 +  b->pop_node();              // Yank projections
  77.189    b->_succs.map(0,succ);        // Map only successor
  77.190    b->_num_succs = 1;
  77.191    // remap successor's predecessors if necessary
  77.192 @@ -575,8 +575,8 @@
  77.193    // Scan through block, yanking dead path from
  77.194    // all regions and phis.
  77.195    dead->head()->del_req(j);
  77.196 -  for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
  77.197 -    dead->_nodes[k]->del_req(j);
  77.198 +  for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
  77.199 +    dead->get_node(k)->del_req(j);
  77.200  }
  77.201  
  77.202  // Helper function to move block bx to the slot following b_index. Return
  77.203 @@ -620,7 +620,7 @@
  77.204    if (e != Block::not_empty) {
  77.205      if (e == Block::empty_with_goto) {
  77.206        // Remove the goto, but leave the block.
  77.207 -      b->_nodes.pop();
  77.208 +      b->pop_node();
  77.209      }
  77.210      // Mark this block as a connector block, which will cause it to be
  77.211      // ignored in certain functions such as non_connector_successor().
  77.212 @@ -663,13 +663,13 @@
  77.213      // to give a fake exit path to infinite loops.  At this late stage they
  77.214      // need to turn into Goto's so that when you enter the infinite loop you
  77.215      // indeed hang.
  77.216 -    if (block->_nodes[block->end_idx()]->Opcode() == Op_NeverBranch) {
  77.217 +    if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
  77.218        convert_NeverBranch_to_Goto(block);
  77.219      }
  77.220  
  77.221      // Look for uncommon blocks and move to end.
  77.222      if (!C->do_freq_based_layout()) {
  77.223 -      if (block->is_uncommon(this)) {
  77.224 +      if (is_uncommon(block)) {
  77.225          move_to_end(block, i);
  77.226          last--;                   // No longer check for being uncommon!
  77.227          if (no_flip_branch(block)) { // Fall-thru case must follow?
  77.228 @@ -720,9 +720,9 @@
  77.229      // exchange the true and false targets.
  77.230      if (no_flip_branch(block)) {
  77.231        // Find fall through case - if must fall into its target
  77.232 -      int branch_idx = block->_nodes.size() - block->_num_succs;
  77.233 +      int branch_idx = block->number_of_nodes() - block->_num_succs;
  77.234        for (uint j2 = 0; j2 < block->_num_succs; j2++) {
  77.235 -        const ProjNode* p = block->_nodes[branch_idx + j2]->as_Proj();
  77.236 +        const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
  77.237          if (p->_con == 0) {
  77.238            // successor j2 is fall through case
  77.239            if (block->non_connector_successor(j2) != bnext) {
  77.240 @@ -743,14 +743,14 @@
  77.241  
  77.242        // Remove all CatchProjs
  77.243        for (uint j = 0; j < block->_num_succs; j++) {
  77.244 -        block->_nodes.pop();
  77.245 +        block->pop_node();
  77.246        }
  77.247  
  77.248      } else if (block->_num_succs == 1) {
  77.249        // Block ends in a Goto?
  77.250        if (bnext == bs0) {
  77.251          // We fall into next block; remove the Goto
  77.252 -        block->_nodes.pop();
  77.253 +        block->pop_node();
  77.254        }
  77.255  
  77.256      } else if(block->_num_succs == 2) { // Block ends in a If?
  77.257 @@ -759,9 +759,9 @@
  77.258        //       be projections (in any order), the 3rd last node must be
  77.259        //       the IfNode (we have excluded other 2-way exits such as
  77.260        //       CatchNodes already).
  77.261 -      MachNode* iff   = block->_nodes[block->_nodes.size() - 3]->as_Mach();
  77.262 -      ProjNode* proj0 = block->_nodes[block->_nodes.size() - 2]->as_Proj();
  77.263 -      ProjNode* proj1 = block->_nodes[block->_nodes.size() - 1]->as_Proj();
  77.264 +      MachNode* iff   = block->get_node(block->number_of_nodes() - 3)->as_Mach();
  77.265 +      ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
  77.266 +      ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
  77.267  
  77.268        // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
  77.269        assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
  77.270 @@ -833,8 +833,8 @@
  77.271          iff->as_MachIf()->negate();
  77.272        }
  77.273  
  77.274 -      block->_nodes.pop();          // Remove IfFalse & IfTrue projections
  77.275 -      block->_nodes.pop();
  77.276 +      block->pop_node();          // Remove IfFalse & IfTrue projections
  77.277 +      block->pop_node();
  77.278  
  77.279      } else {
  77.280        // Multi-exit block, e.g. a switch statement
  77.281 @@ -895,13 +895,13 @@
  77.282    // Verify sane CFG
  77.283    for (uint i = 0; i < number_of_blocks(); i++) {
  77.284      Block* block = get_block(i);
  77.285 -    uint cnt = block->_nodes.size();
  77.286 +    uint cnt = block->number_of_nodes();
  77.287      uint j;
  77.288      for (j = 0; j < cnt; j++)  {
  77.289 -      Node *n = block->_nodes[j];
  77.290 +      Node *n = block->get_node(j);
  77.291        assert(get_block_for_node(n) == block, "");
  77.292        if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
  77.293 -        assert(j == 1 || block->_nodes[j-1]->is_Phi(), "CreateEx must be first instruction in block");
  77.294 +        assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
  77.295        }
  77.296        for (uint k = 0; k < n->req(); k++) {
  77.297          Node *def = n->in(k);
  77.298 @@ -930,14 +930,14 @@
  77.299      }
  77.300  
  77.301      j = block->end_idx();
  77.302 -    Node* bp = (Node*)block->_nodes[block->_nodes.size() - 1]->is_block_proj();
  77.303 +    Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
  77.304      assert(bp, "last instruction must be a block proj");
  77.305 -    assert(bp == block->_nodes[j], "wrong number of successors for this block");
  77.306 +    assert(bp == block->get_node(j), "wrong number of successors for this block");
  77.307      if (bp->is_Catch()) {
  77.308 -      while (block->_nodes[--j]->is_MachProj()) {
  77.309 +      while (block->get_node(--j)->is_MachProj()) {
  77.310          ;
  77.311        }
  77.312 -      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
  77.313 +      assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
  77.314      } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
  77.315        assert(block->_num_succs == 2, "Conditional branch must have two targets");
  77.316      }
  77.317 @@ -1440,9 +1440,9 @@
  77.318            Block *bnext = next(b);
  77.319            Block *bs0 = b->non_connector_successor(0);
  77.320  
  77.321 -          MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
  77.322 -          ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
  77.323 -          ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
  77.324 +          MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
  77.325 +          ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
  77.326 +          ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
  77.327  
  77.328            if (bnext == bs0) {
  77.329              // Fall-thru case in succs[0], should be in succs[1]
  77.330 @@ -1454,8 +1454,8 @@
  77.331              b->_succs.map( 1, tbs0 );
  77.332  
  77.333              // Flip projections to match targets
  77.334 -            b->_nodes.map(b->_nodes.size()-2, proj1);
  77.335 -            b->_nodes.map(b->_nodes.size()-1, proj0);
  77.336 +            b->map_node(proj1, b->number_of_nodes() - 2);
  77.337 +            b->map_node(proj0, b->number_of_nodes() - 1);
  77.338            }
  77.339          }
  77.340        }
    78.1 --- a/src/share/vm/opto/block.hpp	Fri Sep 06 09:55:38 2013 +0100
    78.2 +++ b/src/share/vm/opto/block.hpp	Sat Sep 14 20:40:34 2013 +0100
    78.3 @@ -105,15 +105,53 @@
    78.4  // any optimization pass.  They are created late in the game.
    78.5  class Block : public CFGElement {
    78.6    friend class VMStructs;
    78.7 - public:
    78.8 +
    78.9 +private:
   78.10    // Nodes in this block, in order
   78.11    Node_List _nodes;
   78.12  
   78.13 +public:
   78.14 +
   78.15 +  // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
   78.16 +  Node* get_node(uint at_index) const {
   78.17 +    return _nodes[at_index];
   78.18 +  }
   78.19 +
   78.20 +  // Get the number of nodes in this block
   78.21 +  uint number_of_nodes() const {
   78.22 +    return _nodes.size();
   78.23 +  }
   78.24 +
   78.25 +  // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
   78.26 +  void map_node(Node* node, uint to_index) {
   78.27 +    _nodes.map(to_index, node);
   78.28 +  }
   78.29 +
   78.30 +  // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
   78.31 +  void insert_node(Node* node, uint at_index) {
   78.32 +    _nodes.insert(at_index, node);
   78.33 +  }
   78.34 +
   78.35 +  // Remove a node at index 'at_index'
   78.36 +  void remove_node(uint at_index) {
   78.37 +    _nodes.remove(at_index);
   78.38 +  }
   78.39 +
   78.40 +  // Push a node 'node' onto the node list
   78.41 +  void push_node(Node* node) {
   78.42 +    _nodes.push(node);
   78.43 +  }
   78.44 +
   78.45 +  // Pop the last node off the node list
   78.46 +  Node* pop_node() {
   78.47 +    return _nodes.pop();
   78.48 +  }
   78.49 +
   78.50    // Basic blocks have a Node which defines Control for all Nodes pinned in
   78.51    // this block.  This Node is a RegionNode.  Exception-causing Nodes
   78.52    // (division, subroutines) and Phi functions are always pinned.  Later,
   78.53    // every Node will get pinned to some block.
   78.54 -  Node *head() const { return _nodes[0]; }
   78.55 +  Node *head() const { return get_node(0); }
   78.56  
   78.57    // CAUTION: num_preds() is ONE based, so that predecessor numbers match
   78.58    // input edges to Regions and Phis.
   78.59 @@ -274,29 +312,12 @@
   78.60  
   78.61    // Add an instruction to an existing block.  It must go after the head
   78.62    // instruction and before the end instruction.
   78.63 -  void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
   78.64 +  void add_inst( Node *n ) { insert_node(n, end_idx()); }
   78.65    // Find node in block
   78.66    uint find_node( const Node *n ) const;
   78.67    // Find and remove n from block list
   78.68    void find_remove( const Node *n );
   78.69  
   78.70 -  // helper function that adds caller save registers to MachProjNode
   78.71 -  void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
   78.72 -  // Schedule a call next in the block
   78.73 -  uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
   78.74 -
   78.75 -  // Perform basic-block local scheduling
   78.76 -  Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
   78.77 -  void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
   78.78 -  void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
   78.79 -  bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
   78.80 -  // Cleanup if any code lands between a Call and his Catch
   78.81 -  void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
   78.82 -  // Detect implicit-null-check opportunities.  Basically, find NULL checks
   78.83 -  // with suitable memory ops nearby.  Use the memory op to do the NULL check.
   78.84 -  // I can generate a memory op if there is not one nearby.
   78.85 -  void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
   78.86 -
   78.87    // Return the empty status of a block
   78.88    enum { not_empty, empty_with_goto, completely_empty };
   78.89    int is_Empty() const;
   78.90 @@ -328,10 +349,6 @@
   78.91    // Examine block's code shape to predict if it is not commonly executed.
   78.92    bool has_uncommon_code() const;
   78.93  
   78.94 -  // Use frequency calculations and code shape to predict if the block
   78.95 -  // is uncommon.
   78.96 -  bool is_uncommon(PhaseCFG* cfg) const;
   78.97 -
   78.98  #ifndef PRODUCT
   78.99    // Debugging print of basic block
  78.100    void dump_bidx(const Block* orig, outputStream* st = tty) const;
  78.101 @@ -414,6 +431,27 @@
  78.102    // to late. Helper for schedule_late.
  78.103    Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
  78.104  
  78.105 +  bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
  78.106 +  void set_next_call(Block* block, Node* n, VectorSet& next_call);
  78.107 +  void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
  78.108 +
  78.109 +  // Perform basic-block local scheduling
  78.110 +  Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
  78.111 +
  78.112 +  // Schedule a call next in the block
  78.113 +  uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
  78.114 +
  78.115 +  // Cleanup if any code lands between a Call and his Catch
  78.116 +  void call_catch_cleanup(Block* block);
  78.117 +
  78.118 +  Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
  78.119 +  void  catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
  78.120 +
  78.121 +  // Detect implicit-null-check opportunities.  Basically, find NULL checks
  78.122 +  // with suitable memory ops nearby.  Use the memory op to do the NULL check.
  78.123 +  // I can generate a memory op if there is not one nearby.
  78.124 +  void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
  78.125 +
  78.126    // Perform a Depth First Search (DFS).
  78.127    // Setup 'vertex' as DFS to vertex mapping.
  78.128    // Setup 'semi' as vertex to DFS mapping.
  78.129 @@ -530,6 +568,10 @@
  78.130      return (_node_to_block_mapping.lookup(node->_idx) != NULL);
  78.131    }
  78.132  
  78.133 +  // Use frequency calculations and code shape to predict if the block
  78.134 +  // is uncommon.
  78.135 +  bool is_uncommon(const Block* block);
  78.136 +
  78.137  #ifdef ASSERT
  78.138    Unique_Node_List _raw_oops;
  78.139  #endif
  78.140 @@ -550,7 +592,7 @@
  78.141  
  78.142    // Insert a node into a block at index and map the node to the block
  78.143    void insert(Block *b, uint idx, Node *n) {
  78.144 -    b->_nodes.insert( idx, n );
  78.145 +    b->insert_node(n , idx);
  78.146      map_node_to_block(n, b);
  78.147    }
  78.148  
    79.1 --- a/src/share/vm/opto/buildOopMap.cpp	Fri Sep 06 09:55:38 2013 +0100
    79.2 +++ b/src/share/vm/opto/buildOopMap.cpp	Sat Sep 14 20:40:34 2013 +0100
    79.3 @@ -121,8 +121,8 @@
    79.4  // Given reaching-defs for this block start, compute it for this block end
    79.5  void OopFlow::compute_reach( PhaseRegAlloc *regalloc, int max_reg, Dict *safehash ) {
    79.6  
    79.7 -  for( uint i=0; i<_b->_nodes.size(); i++ ) {
    79.8 -    Node *n = _b->_nodes[i];
    79.9 +  for( uint i=0; i<_b->number_of_nodes(); i++ ) {
   79.10 +    Node *n = _b->get_node(i);
   79.11  
   79.12      if( n->jvms() ) {           // Build an OopMap here?
   79.13        JVMState *jvms = n->jvms();
   79.14 @@ -447,8 +447,8 @@
   79.15        }
   79.16  
   79.17        // Now walk tmp_live up the block backwards, computing live
   79.18 -      for( int k=b->_nodes.size()-1; k>=0; k-- ) {
   79.19 -        Node *n = b->_nodes[k];
   79.20 +      for( int k=b->number_of_nodes()-1; k>=0; k-- ) {
   79.21 +        Node *n = b->get_node(k);
   79.22          // KILL def'd bits
   79.23          int first = regalloc->get_reg_first(n);
   79.24          int second = regalloc->get_reg_second(n);
   79.25 @@ -544,12 +544,12 @@
   79.26      for (i = 1; i < cfg->number_of_blocks(); i++) {
   79.27        Block* block = cfg->get_block(i);
   79.28        uint j;
   79.29 -      for (j = 1; j < block->_nodes.size(); j++) {
   79.30 -        if (block->_nodes[j]->jvms() && (*safehash)[block->_nodes[j]] == NULL) {
   79.31 +      for (j = 1; j < block->number_of_nodes(); j++) {
   79.32 +        if (block->get_node(j)->jvms() && (*safehash)[block->get_node(j)] == NULL) {
   79.33             break;
   79.34          }
   79.35        }
   79.36 -      if (j < block->_nodes.size()) {
   79.37 +      if (j < block->number_of_nodes()) {
   79.38          break;
   79.39        }
   79.40      }
    80.1 --- a/src/share/vm/opto/callGenerator.hpp	Fri Sep 06 09:55:38 2013 +0100
    80.2 +++ b/src/share/vm/opto/callGenerator.hpp	Sat Sep 14 20:40:34 2013 +0100
    80.3 @@ -1,5 +1,5 @@
    80.4  /*
    80.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
    80.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    80.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    80.8   *
    80.9   * This code is free software; you can redistribute it and/or modify it
   80.10 @@ -260,7 +260,7 @@
   80.11    // Because WarmInfo objects live over the entire lifetime of the
   80.12    // Compile object, they are allocated into the comp_arena, which
   80.13    // does not get resource marked or reset during the compile process
   80.14 -  void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
   80.15 +  void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
   80.16    void operator delete( void * ) { } // fast deallocation
   80.17  
   80.18    static WarmCallInfo* always_hot();
    81.1 --- a/src/share/vm/opto/callnode.cpp	Fri Sep 06 09:55:38 2013 +0100
    81.2 +++ b/src/share/vm/opto/callnode.cpp	Sat Sep 14 20:40:34 2013 +0100
    81.3 @@ -458,7 +458,7 @@
    81.4        st->print("={");
    81.5        uint nf = spobj->n_fields();
    81.6        if (nf > 0) {
    81.7 -        uint first_ind = spobj->first_index();
    81.8 +        uint first_ind = spobj->first_index(mcall->jvms());
    81.9          Node* fld_node = mcall->in(first_ind);
   81.10          ciField* cifield;
   81.11          if (iklass != NULL) {
   81.12 @@ -1063,7 +1063,6 @@
   81.13    int scloff = jvms->scloff();
   81.14    int endoff = jvms->endoff();
   81.15    assert(endoff == (int)req(), "no other states or debug info after me");
   81.16 -  assert(jvms->scl_size() == 0, "parsed code should not have scalar objects");
   81.17    Node* top = Compile::current()->top();
   81.18    for (uint i = 0; i < grow_by; i++) {
   81.19      ins_req(monoff, top);
   81.20 @@ -1079,32 +1078,31 @@
   81.21    const int MonitorEdges = 2;
   81.22    assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
   81.23    assert(req() == jvms()->endoff(), "correct sizing");
   81.24 -  assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
   81.25    int nextmon = jvms()->scloff();
   81.26    if (GenerateSynchronizationCode) {
   81.27 -    add_req(lock->box_node());
   81.28 -    add_req(lock->obj_node());
   81.29 +    ins_req(nextmon,   lock->box_node());
   81.30 +    ins_req(nextmon+1, lock->obj_node());
   81.31    } else {
   81.32      Node* top = Compile::current()->top();
   81.33 -    add_req(top);
   81.34 -    add_req(top);
   81.35 +    ins_req(nextmon, top);
   81.36 +    ins_req(nextmon, top);
   81.37    }
   81.38 -  jvms()->set_scloff(nextmon+MonitorEdges);
   81.39 +  jvms()->set_scloff(nextmon + MonitorEdges);
   81.40    jvms()->set_endoff(req());
   81.41  }
   81.42  
   81.43  void SafePointNode::pop_monitor() {
   81.44    // Delete last monitor from debug info
   81.45 -  assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects");
   81.46    debug_only(int num_before_pop = jvms()->nof_monitors());
   81.47 -  const int MonitorEdges = (1<<JVMState::logMonitorEdges);
   81.48 +  const int MonitorEdges = 2;
   81.49 +  assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
   81.50    int scloff = jvms()->scloff();
   81.51    int endoff = jvms()->endoff();
   81.52    int new_scloff = scloff - MonitorEdges;
   81.53    int new_endoff = endoff - MonitorEdges;
   81.54    jvms()->set_scloff(new_scloff);
   81.55    jvms()->set_endoff(new_endoff);
   81.56 -  while (scloff > new_scloff)  del_req(--scloff);
   81.57 +  while (scloff > new_scloff)  del_req_ordered(--scloff);
   81.58    assert(jvms()->nof_monitors() == num_before_pop-1, "");
   81.59  }
   81.60  
   81.61 @@ -1169,13 +1167,12 @@
   81.62  }
   81.63  
   81.64  SafePointScalarObjectNode*
   81.65 -SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
   81.66 +SafePointScalarObjectNode::clone(Dict* sosn_map) const {
   81.67    void* cached = (*sosn_map)[(void*)this];
   81.68    if (cached != NULL) {
   81.69      return (SafePointScalarObjectNode*)cached;
   81.70    }
   81.71    SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
   81.72 -  res->_first_index += jvms_adj;
   81.73    sosn_map->Insert((void*)this, (void*)res);
   81.74    return res;
   81.75  }
    82.1 --- a/src/share/vm/opto/callnode.hpp	Fri Sep 06 09:55:38 2013 +0100
    82.2 +++ b/src/share/vm/opto/callnode.hpp	Sat Sep 14 20:40:34 2013 +0100
    82.3 @@ -1,5 +1,5 @@
    82.4  /*
    82.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    82.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    82.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    82.8   *
    82.9   * This code is free software; you can redistribute it and/or modify it
   82.10 @@ -216,7 +216,7 @@
   82.11    // Because JVMState objects live over the entire lifetime of the
   82.12    // Compile object, they are allocated into the comp_arena, which
   82.13    // does not get resource marked or reset during the compile process
   82.14 -  void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
   82.15 +  void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
   82.16    void operator delete( void * ) { } // fast deallocation
   82.17  
   82.18    // Create a new JVMState, ready for abstract interpretation.
   82.19 @@ -449,14 +449,17 @@
   82.20  // at a safepoint.
   82.21  
   82.22  class SafePointScalarObjectNode: public TypeNode {
   82.23 -  uint _first_index; // First input edge index of a SafePoint node where
   82.24 +  uint _first_index; // First input edge relative index of a SafePoint node where
   82.25                       // states of the scalarized object fields are collected.
   82.26 +                     // It is relative to the last (youngest) jvms->_scloff.
   82.27    uint _n_fields;    // Number of non-static fields of the scalarized object.
   82.28    DEBUG_ONLY(AllocateNode* _alloc;)
   82.29  
   82.30    virtual uint hash() const ; // { return NO_HASH; }
   82.31    virtual uint cmp( const Node &n ) const;
   82.32  
   82.33 +  uint first_index() const { return _first_index; }
   82.34 +
   82.35  public:
   82.36    SafePointScalarObjectNode(const TypeOopPtr* tp,
   82.37  #ifdef ASSERT
   82.38 @@ -469,7 +472,10 @@
   82.39    virtual const RegMask &out_RegMask() const;
   82.40    virtual uint           match_edge(uint idx) const;
   82.41  
   82.42 -  uint first_index() const { return _first_index; }
   82.43 +  uint first_index(JVMState* jvms) const {
   82.44 +    assert(jvms != NULL, "missed JVMS");
   82.45 +    return jvms->scloff() + _first_index;
   82.46 +  }
   82.47    uint n_fields()    const { return _n_fields; }
   82.48  
   82.49  #ifdef ASSERT
   82.50 @@ -485,7 +491,7 @@
   82.51    // corresponds appropriately to "this" in "new_call".  Assumes that
   82.52    // "sosn_map" is a map, specific to the translation of "s" to "new_call",
   82.53    // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
   82.54 -  SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
   82.55 +  SafePointScalarObjectNode* clone(Dict* sosn_map) const;
   82.56  
   82.57  #ifndef PRODUCT
   82.58    virtual void              dump_spec(outputStream *st) const;
    83.1 --- a/src/share/vm/opto/chaitin.cpp	Fri Sep 06 09:55:38 2013 +0100
    83.2 +++ b/src/share/vm/opto/chaitin.cpp	Sat Sep 14 20:40:34 2013 +0100
    83.3 @@ -301,7 +301,7 @@
    83.4        // Copy kill projections after the cloned node
    83.5        Node* kills = proj->clone();
    83.6        kills->set_req(0, copy);
    83.7 -      b->_nodes.insert(idx++, kills);
    83.8 +      b->insert_node(kills, idx++);
    83.9        _cfg.map_node_to_block(kills, b);
   83.10        new_lrg(kills, max_lrg_id++);
   83.11      }
   83.12 @@ -682,11 +682,11 @@
   83.13    uint lr_counter = 1;
   83.14    for( uint i = 0; i < _cfg.number_of_blocks(); i++ ) {
   83.15      Block* block = _cfg.get_block(i);
   83.16 -    uint cnt = block->_nodes.size();
   83.17 +    uint cnt = block->number_of_nodes();
   83.18  
   83.19      // Handle all the normal Nodes in the block
   83.20      for( uint j = 0; j < cnt; j++ ) {
   83.21 -      Node *n = block->_nodes[j];
   83.22 +      Node *n = block->get_node(j);
   83.23        // Pre-color to the zero live range, or pick virtual register
   83.24        const RegMask &rm = n->out_RegMask();
   83.25        _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
   83.26 @@ -710,8 +710,8 @@
   83.27      Block* block = _cfg.get_block(i);
   83.28  
   83.29      // For all instructions
   83.30 -    for (uint j = 1; j < block->_nodes.size(); j++) {
   83.31 -      Node* n = block->_nodes[j];
   83.32 +    for (uint j = 1; j < block->number_of_nodes(); j++) {
   83.33 +      Node* n = block->get_node(j);
   83.34        uint input_edge_start =1; // Skip control most nodes
   83.35        if (n->is_Mach()) {
   83.36          input_edge_start = n->as_Mach()->oper_input_base();
   83.37 @@ -1604,7 +1604,7 @@
   83.38      // For all instructions in block
   83.39      uint last_inst = block->end_idx();
   83.40      for (uint j = 1; j <= last_inst; j++) {
   83.41 -      Node* n = block->_nodes[j];
   83.42 +      Node* n = block->get_node(j);
   83.43  
   83.44        // Dead instruction???
   83.45        assert( n->outcnt() != 0 ||// Nothing dead after post alloc
   83.46 @@ -1641,7 +1641,7 @@
   83.47              assert( cisc->oper_input_base() == 2, "Only adding one edge");
   83.48              cisc->ins_req(1,src);         // Requires a memory edge
   83.49            }
   83.50 -          block->_nodes.map(j,cisc);          // Insert into basic block
   83.51 +          block->map_node(cisc, j);          // Insert into basic block
   83.52            n->subsume_by(cisc, C); // Correct graph
   83.53            //
   83.54            ++_used_cisc_instructions;
   83.55 @@ -1698,7 +1698,7 @@
   83.56        // (where top() node is placed).
   83.57        base->init_req(0, _cfg.get_root_node());
   83.58        Block *startb = _cfg.get_block_for_node(C->top());
   83.59 -      startb->_nodes.insert(startb->find_node(C->top()), base );
   83.60 +      startb->insert_node(base, startb->find_node(C->top()));
   83.61        _cfg.map_node_to_block(base, startb);
   83.62        assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
   83.63      }
   83.64 @@ -1743,9 +1743,9 @@
   83.65    // Search the current block for an existing base-Phi
   83.66    Block *b = _cfg.get_block_for_node(derived);
   83.67    for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
   83.68 -    Node *phi = b->_nodes[i];
   83.69 +    Node *phi = b->get_node(i);
   83.70      if( !phi->is_Phi() ) {      // Found end of Phis with no match?
   83.71 -      b->_nodes.insert( i, base ); // Must insert created Phi here as base
   83.72 +      b->insert_node(base,  i); // Must insert created Phi here as base
   83.73        _cfg.map_node_to_block(base, b);
   83.74        new_lrg(base,maxlrg++);
   83.75        break;
   83.76 @@ -1786,7 +1786,7 @@
   83.77      IndexSet liveout(_live->live(block));
   83.78  
   83.79      for (uint j = block->end_idx() + 1; j > 1; j--) {
   83.80 -      Node* n = block->_nodes[j - 1];
   83.81 +      Node* n = block->get_node(j - 1);
   83.82  
   83.83        // Pre-split compares of loop-phis.  Loop-phis form a cycle we would
   83.84        // like to see in the same register.  Compare uses the loop-phi and so
   83.85 @@ -1979,8 +1979,8 @@
   83.86    b->dump_head(&_cfg);
   83.87  
   83.88    // For all instructions
   83.89 -  for( uint j = 0; j < b->_nodes.size(); j++ )
   83.90 -    dump(b->_nodes[j]);
   83.91 +  for( uint j = 0; j < b->number_of_nodes(); j++ )
   83.92 +    dump(b->get_node(j));
   83.93    // Print live-out info at end of block
   83.94    if( _live ) {
   83.95      tty->print("Liveout: ");
   83.96 @@ -2271,8 +2271,8 @@
   83.97      int dump_once = 0;
   83.98  
   83.99      // For all instructions
  83.100 -    for( uint j = 0; j < block->_nodes.size(); j++ ) {
  83.101 -      Node *n = block->_nodes[j];
  83.102 +    for( uint j = 0; j < block->number_of_nodes(); j++ ) {
  83.103 +      Node *n = block->get_node(j);
  83.104        if (_lrg_map.find_const(n) == lidx) {
  83.105          if (!dump_once++) {
  83.106            tty->cr();
    84.1 --- a/src/share/vm/opto/coalesce.cpp	Fri Sep 06 09:55:38 2013 +0100
    84.2 +++ b/src/share/vm/opto/coalesce.cpp	Sat Sep 14 20:40:34 2013 +0100
    84.3 @@ -54,9 +54,9 @@
    84.4      for( j=0; j<b->_num_succs; j++ )
    84.5        tty->print("B%d ",b->_succs[j]->_pre_order);
    84.6      tty->print(" IDom: B%d/#%d\n", b->_idom ? b->_idom->_pre_order : 0, b->_dom_depth);
    84.7 -    uint cnt = b->_nodes.size();
    84.8 +    uint cnt = b->number_of_nodes();
    84.9      for( j=0; j<cnt; j++ ) {
   84.10 -      Node *n = b->_nodes[j];
   84.11 +      Node *n = b->get_node(j);
   84.12        dump( n );
   84.13        tty->print("\t%s\t",n->Name());
   84.14  
   84.15 @@ -152,7 +152,7 @@
   84.16    // after the last use.  Last use is really first-use on a backwards scan.
   84.17    uint i = b->end_idx()-1;
   84.18    while(1) {
   84.19 -    Node *n = b->_nodes[i];
   84.20 +    Node *n = b->get_node(i);
   84.21      // Check for end of virtual copies; this is also the end of the
   84.22      // parallel renaming effort.
   84.23      if (n->_idx < _unique) {
   84.24 @@ -174,7 +174,7 @@
   84.25    // the last kill.  Thus it is the first kill on a backwards scan.
   84.26    i = b->end_idx()-1;
   84.27    while (1) {
   84.28 -    Node *n = b->_nodes[i];
   84.29 +    Node *n = b->get_node(i);
   84.30      // Check for end of virtual copies; this is also the end of the
   84.31      // parallel renaming effort.
   84.32      if (n->_idx < _unique) {
   84.33 @@ -200,13 +200,13 @@
   84.34      tmp ->set_req(idx,copy->in(idx));
   84.35      copy->set_req(idx,tmp);
   84.36      // Save source in temp early, before source is killed
   84.37 -    b->_nodes.insert(kill_src_idx,tmp);
   84.38 +    b->insert_node(tmp, kill_src_idx);
   84.39      _phc._cfg.map_node_to_block(tmp, b);
   84.40      last_use_idx++;
   84.41    }
   84.42  
   84.43    // Insert just after last use
   84.44 -  b->_nodes.insert(last_use_idx+1,copy);
   84.45 +  b->insert_node(copy, last_use_idx + 1);
   84.46  }
   84.47  
   84.48  void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
   84.49 @@ -237,8 +237,8 @@
   84.50      Block *b = _phc._cfg.get_block(i);
   84.51      uint cnt = b->num_preds();  // Number of inputs to the Phi
   84.52  
   84.53 -    for( uint l = 1; l<b->_nodes.size(); l++ ) {
   84.54 -      Node *n = b->_nodes[l];
   84.55 +    for( uint l = 1; l<b->number_of_nodes(); l++ ) {
   84.56 +      Node *n = b->get_node(l);
   84.57  
   84.58        // Do not use removed-copies, use copied value instead
   84.59        uint ncnt = n->req();
   84.60 @@ -260,7 +260,7 @@
   84.61          if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
   84.62            n->replace_by(def);
   84.63            n->set_req(cidx,NULL);
   84.64 -          b->_nodes.remove(l);
   84.65 +          b->remove_node(l);
   84.66            l--;
   84.67            continue;
   84.68          }
   84.69 @@ -321,13 +321,13 @@
   84.70                 m->as_Mach()->rematerialize()) {
   84.71                copy = m->clone();
   84.72                // Insert the copy in the basic block, just before us
   84.73 -              b->_nodes.insert(l++, copy);
   84.74 +              b->insert_node(copy, l++);
   84.75                l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
   84.76              } else {
   84.77                const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
   84.78                copy = new (C) MachSpillCopyNode(m, *rm, *rm);
   84.79                // Insert the copy in the basic block, just before us
   84.80 -              b->_nodes.insert(l++, copy);
   84.81 +              b->insert_node(copy, l++);
   84.82              }
   84.83              // Insert the copy in the use-def chain
   84.84              n->set_req(idx, copy);
   84.85 @@ -339,7 +339,7 @@
   84.86          } // End of is two-adr
   84.87  
   84.88          // Insert a copy at a debug use for a lrg which has high frequency
   84.89 -        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
   84.90 +        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || _phc._cfg.is_uncommon(b)) {
   84.91            // Walk the debug inputs to the node and check for lrg freq
   84.92            JVMState* jvms = n->jvms();
   84.93            uint debug_start = jvms ? jvms->debug_start() : 999999;
   84.94 @@ -376,7 +376,7 @@
   84.95                // Insert the copy in the use-def chain
   84.96                n->set_req(inpidx, copy );
   84.97                // Insert the copy in the basic block, just before us
   84.98 -              b->_nodes.insert( l++, copy );
   84.99 +              b->insert_node(copy,  l++);
  84.100                // Extend ("register allocate") the names array for the copy.
  84.101                uint max_lrg_id = _phc._lrg_map.max_lrg_id();
  84.102                _phc.new_lrg(copy, max_lrg_id);
  84.103 @@ -431,8 +431,8 @@
  84.104      }
  84.105  
  84.106      // Visit all the Phis in successor block
  84.107 -    for( uint k = 1; k<bs->_nodes.size(); k++ ) {
  84.108 -      Node *n = bs->_nodes[k];
  84.109 +    for( uint k = 1; k<bs->number_of_nodes(); k++ ) {
  84.110 +      Node *n = bs->get_node(k);
  84.111        if( !n->is_Phi() ) break;
  84.112        combine_these_two( n, n->in(j) );
  84.113      }
  84.114 @@ -442,7 +442,7 @@
  84.115    // Check _this_ block for 2-address instructions and copies.
  84.116    uint cnt = b->end_idx();
  84.117    for( i = 1; i<cnt; i++ ) {
  84.118 -    Node *n = b->_nodes[i];
  84.119 +    Node *n = b->get_node(i);
  84.120      uint idx;
  84.121      // 2-address instructions have a virtual Copy matching their input
  84.122      // to their output
  84.123 @@ -490,10 +490,10 @@
  84.124    dst_copy->set_req( didx, src_def );
  84.125    // Add copy to free list
  84.126    // _phc.free_spillcopy(b->_nodes[bindex]);
  84.127 -  assert( b->_nodes[bindex] == dst_copy, "" );
  84.128 +  assert( b->get_node(bindex) == dst_copy, "" );
  84.129    dst_copy->replace_by( dst_copy->in(didx) );
  84.130    dst_copy->set_req( didx, NULL);
  84.131 -  b->_nodes.remove(bindex);
  84.132 +  b->remove_node(bindex);
  84.133    if( bindex < b->_ihrp_index ) b->_ihrp_index--;
  84.134    if( bindex < b->_fhrp_index ) b->_fhrp_index--;
  84.135  
  84.136 @@ -523,8 +523,8 @@
  84.137        bindex2 = b2->end_idx()-1;
  84.138      }
  84.139      // Get prior instruction
  84.140 -    assert(bindex2 < b2->_nodes.size(), "index out of bounds");
  84.141 -    Node *x = b2->_nodes[bindex2];
  84.142 +    assert(bindex2 < b2->number_of_nodes(), "index out of bounds");
  84.143 +    Node *x = b2->get_node(bindex2);
  84.144      if( x == prev_copy ) {      // Previous copy in copy chain?
  84.145        if( prev_copy == src_copy)// Found end of chain and all interferences
  84.146          break;                  // So break out of loop
  84.147 @@ -769,14 +769,14 @@
  84.148  // Conservative (but pessimistic) copy coalescing of a single block
  84.149  void PhaseConservativeCoalesce::coalesce( Block *b ) {
  84.150    // Bail out on infrequent blocks
  84.151 -  if (b->is_uncommon(&_phc._cfg)) {
  84.152 +  if (_phc._cfg.is_uncommon(b)) {
  84.153      return;
  84.154    }
  84.155    // Check this block for copies.
  84.156    for( uint i = 1; i<b->end_idx(); i++ ) {
  84.157      // Check for actual copies on inputs.  Coalesce a copy into its
  84.158      // input if use and copy's input are compatible.
  84.159 -    Node *copy1 = b->_nodes[i];
  84.160 +    Node *copy1 = b->get_node(i);
  84.161      uint idx1 = copy1->is_Copy();
  84.162      if( !idx1 ) continue;       // Not a copy
  84.163  
    85.1 --- a/src/share/vm/opto/compile.cpp	Fri Sep 06 09:55:38 2013 +0100
    85.2 +++ b/src/share/vm/opto/compile.cpp	Sat Sep 14 20:40:34 2013 +0100
    85.3 @@ -2258,7 +2258,7 @@
    85.4      if (block->is_connector() && !Verbose) {
    85.5        continue;
    85.6      }
    85.7 -    n = block->_nodes[0];
    85.8 +    n = block->head();
    85.9      if (pcs && n->_idx < pc_limit) {
   85.10        tty->print("%3.3x   ", pcs[n->_idx]);
   85.11      } else {
   85.12 @@ -2273,12 +2273,12 @@
   85.13  
   85.14      // For all instructions
   85.15      Node *delay = NULL;
   85.16 -    for (uint j = 0; j < block->_nodes.size(); j++) {
   85.17 +    for (uint j = 0; j < block->number_of_nodes(); j++) {
   85.18        if (VMThread::should_terminate()) {
   85.19          cut_short = true;
   85.20          break;
   85.21        }
   85.22 -      n = block->_nodes[j];
   85.23 +      n = block->get_node(j);
   85.24        if (valid_bundle_info(n)) {
   85.25          Bundle* bundle = node_bundling(n);
   85.26          if (bundle->used_in_unconditional_delay()) {
    86.1 --- a/src/share/vm/opto/domgraph.cpp	Fri Sep 06 09:55:38 2013 +0100
    86.2 +++ b/src/share/vm/opto/domgraph.cpp	Sat Sep 14 20:40:34 2013 +0100
    86.3 @@ -211,21 +211,21 @@
    86.4  uint Block_Stack::most_frequent_successor( Block *b ) {
    86.5    uint freq_idx = 0;
    86.6    int eidx = b->end_idx();
    86.7 -  Node *n = b->_nodes[eidx];
    86.8 +  Node *n = b->get_node(eidx);
    86.9    int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
   86.10    switch( op ) {
   86.11    case Op_CountedLoopEnd:
   86.12    case Op_If: {               // Split frequency amongst children
   86.13      float prob = n->as_MachIf()->_prob;
   86.14      // Is succ[0] the TRUE branch or the FALSE branch?
   86.15 -    if( b->_nodes[eidx+1]->Opcode() == Op_IfFalse )
   86.16 +    if( b->get_node(eidx+1)->Opcode() == Op_IfFalse )
   86.17        prob = 1.0f - prob;
   86.18      freq_idx = prob < PROB_FAIR;      // freq=1 for succ[0] < 0.5 prob
   86.19      break;
   86.20    }
   86.21    case Op_Catch:                // Split frequency amongst children
   86.22      for( freq_idx = 0; freq_idx < b->_num_succs; freq_idx++ )
   86.23 -      if( b->_nodes[eidx+1+freq_idx]->as_CatchProj()->_con == CatchProjNode::fall_through_index )
   86.24 +      if( b->get_node(eidx+1+freq_idx)->as_CatchProj()->_con == CatchProjNode::fall_through_index )
   86.25          break;
   86.26      // Handle case of no fall-thru (e.g., check-cast MUST throw an exception)
   86.27      if( freq_idx == b->_num_succs ) freq_idx = 0;
    87.1 --- a/src/share/vm/opto/gcm.cpp	Fri Sep 06 09:55:38 2013 +0100
    87.2 +++ b/src/share/vm/opto/gcm.cpp	Sat Sep 14 20:40:34 2013 +0100
    87.3 @@ -102,12 +102,12 @@
    87.4      uint j = 0;
    87.5      if (pb->_num_succs != 1) {  // More then 1 successor?
    87.6        // Search for successor
    87.7 -      uint max = pb->_nodes.size();
    87.8 +      uint max = pb->number_of_nodes();
    87.9        assert( max > 1, "" );
   87.10        uint start = max - pb->_num_succs;
   87.11        // Find which output path belongs to projection
   87.12        for (j = start; j < max; j++) {
   87.13 -        if( pb->_nodes[j] == in0 )
   87.14 +        if( pb->get_node(j) == in0 )
   87.15            break;
   87.16        }
   87.17        assert( j < max, "must find" );
   87.18 @@ -1027,8 +1027,8 @@
   87.19    Block* least       = LCA;
   87.20    double least_freq  = least->_freq;
   87.21    uint target        = get_latency_for_node(self);
   87.22 -  uint start_latency = get_latency_for_node(LCA->_nodes[0]);
   87.23 -  uint end_latency   = get_latency_for_node(LCA->_nodes[LCA->end_idx()]);
   87.24 +  uint start_latency = get_latency_for_node(LCA->head());
   87.25 +  uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
   87.26    bool in_latency    = (target <= start_latency);
   87.27    const Block* root_block = get_block_for_node(_root);
   87.28  
   87.29 @@ -1049,9 +1049,9 @@
   87.30      self->dump();
   87.31      tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
   87.32        LCA->_pre_order,
   87.33 -      LCA->_nodes[0]->_idx,
   87.34 +      LCA->head()->_idx,
   87.35        start_latency,
   87.36 -      LCA->_nodes[LCA->end_idx()]->_idx,
   87.37 +      LCA->get_node(LCA->end_idx())->_idx,
   87.38        end_latency,
   87.39        least_freq);
   87.40    }
   87.41 @@ -1074,14 +1074,14 @@
   87.42      if (mach && LCA == root_block)
   87.43        break;
   87.44  
   87.45 -    uint start_lat = get_latency_for_node(LCA->_nodes[0]);
   87.46 +    uint start_lat = get_latency_for_node(LCA->head());
   87.47      uint end_idx   = LCA->end_idx();
   87.48 -    uint end_lat   = get_latency_for_node(LCA->_nodes[end_idx]);
   87.49 +    uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
   87.50      double LCA_freq = LCA->_freq;
   87.51  #ifndef PRODUCT
   87.52      if (trace_opto_pipelining()) {
   87.53        tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
   87.54 -        LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
   87.55 +        LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
   87.56      }
   87.57  #endif
   87.58      cand_cnt++;
   87.59 @@ -1342,7 +1342,7 @@
   87.60        Node* proj = _matcher._null_check_tests[i];
   87.61        Node* val  = _matcher._null_check_tests[i + 1];
   87.62        Block* block = get_block_for_node(proj);
   87.63 -      block->implicit_null_check(this, proj, val, allowed_reasons);
   87.64 +      implicit_null_check(block, proj, val, allowed_reasons);
   87.65        // The implicit_null_check will only perform the transformation
   87.66        // if the null branch is truly uncommon, *and* it leads to an
   87.67        // uncommon trap.  Combined with the too_many_traps guards
   87.68 @@ -1363,7 +1363,7 @@
   87.69    visited.Clear();
   87.70    for (uint i = 0; i < number_of_blocks(); i++) {
   87.71      Block* block = get_block(i);
   87.72 -    if (!block->schedule_local(this, _matcher, ready_cnt, visited)) {
   87.73 +    if (!schedule_local(block, ready_cnt, visited)) {
   87.74        if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
   87.75          C->record_method_not_compilable("local schedule failed");
   87.76        }
   87.77 @@ -1375,7 +1375,7 @@
   87.78    // clone the instructions on all paths below the Catch.
   87.79    for (uint i = 0; i < number_of_blocks(); i++) {
   87.80      Block* block = get_block(i);
   87.81 -    block->call_catch_cleanup(this, C);
   87.82 +    call_catch_cleanup(block);
   87.83    }
   87.84  
   87.85  #ifndef PRODUCT
   87.86 @@ -1726,7 +1726,7 @@
   87.87  // Determine the probability of reaching successor 'i' from the receiver block.
   87.88  float Block::succ_prob(uint i) {
   87.89    int eidx = end_idx();
   87.90 -  Node *n = _nodes[eidx];  // Get ending Node
   87.91 +  Node *n = get_node(eidx);  // Get ending Node
   87.92  
   87.93    int op = n->Opcode();
   87.94    if (n->is_Mach()) {
   87.95 @@ -1761,7 +1761,7 @@
   87.96      float prob  = n->as_MachIf()->_prob;
   87.97      assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
   87.98      // If succ[i] is the FALSE branch, invert path info
   87.99 -    if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
  87.100 +    if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
  87.101        return 1.0f - prob; // not taken
  87.102      } else {
  87.103        return prob; // taken
  87.104 @@ -1773,7 +1773,7 @@
  87.105      return 1.0f/_num_succs;
  87.106  
  87.107    case Op_Catch: {
  87.108 -    const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
  87.109 +    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
  87.110      if (ci->_con == CatchProjNode::fall_through_index) {
  87.111        // Fall-thru path gets the lion's share.
  87.112        return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
  87.113 @@ -1810,7 +1810,7 @@
  87.114  // Return the number of fall-through candidates for a block
  87.115  int Block::num_fall_throughs() {
  87.116    int eidx = end_idx();
  87.117 -  Node *n = _nodes[eidx];  // Get ending Node
  87.118 +  Node *n = get_node(eidx);  // Get ending Node
  87.119  
  87.120    int op = n->Opcode();
  87.121    if (n->is_Mach()) {
  87.122 @@ -1834,7 +1834,7 @@
  87.123  
  87.124    case Op_Catch: {
  87.125      for (uint i = 0; i < _num_succs; i++) {
  87.126 -      const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
  87.127 +      const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
  87.128        if (ci->_con == CatchProjNode::fall_through_index) {
  87.129          return 1;
  87.130        }
  87.131 @@ -1862,14 +1862,14 @@
  87.132  // Return true if a specific successor could be fall-through target.
  87.133  bool Block::succ_fall_through(uint i) {
  87.134    int eidx = end_idx();
  87.135 -  Node *n = _nodes[eidx];  // Get ending Node
  87.136 +  Node *n = get_node(eidx);  // Get ending Node
  87.137  
  87.138    int op = n->Opcode();
  87.139    if (n->is_Mach()) {
  87.140      if (n->is_MachNullCheck()) {
  87.141        // In theory, either side can fall-thru, for simplicity sake,
  87.142        // let's say only the false branch can now.
  87.143 -      return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse;
  87.144 +      return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
  87.145      }
  87.146      op = n->as_Mach()->ideal_Opcode();
  87.147    }
  87.148 @@ -1883,7 +1883,7 @@
  87.149      return true;
  87.150  
  87.151    case Op_Catch: {
  87.152 -    const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
  87.153 +    const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
  87.154      return ci->_con == CatchProjNode::fall_through_index;
  87.155    }
  87.156  
  87.157 @@ -1907,7 +1907,7 @@
  87.158  // Update the probability of a two-branch to be uncommon
  87.159  void Block::update_uncommon_branch(Block* ub) {
  87.160    int eidx = end_idx();
  87.161 -  Node *n = _nodes[eidx];  // Get ending Node
  87.162 +  Node *n = get_node(eidx);  // Get ending Node
  87.163  
  87.164    int op = n->as_Mach()->ideal_Opcode();
  87.165  
  87.166 @@ -1923,7 +1923,7 @@
  87.167  
  87.168    // If ub is the true path, make the proability small, else
  87.169    // ub is the false path, and make the probability large
  87.170 -  bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse);
  87.171 +  bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
  87.172  
  87.173    // Get existing probability
  87.174    float p = n->as_MachIf()->_prob;
    88.1 --- a/src/share/vm/opto/generateOptoStub.cpp	Fri Sep 06 09:55:38 2013 +0100
    88.2 +++ b/src/share/vm/opto/generateOptoStub.cpp	Sat Sep 14 20:40:34 2013 +0100
    88.3 @@ -61,6 +61,7 @@
    88.4    JVMState* jvms = new (C) JVMState(0);
    88.5    jvms->set_bci(InvocationEntryBci);
    88.6    jvms->set_monoff(max_map);
    88.7 +  jvms->set_scloff(max_map);
    88.8    jvms->set_endoff(max_map);
    88.9    {
   88.10      SafePointNode *map = new (C) SafePointNode( max_map, jvms );
    89.1 --- a/src/share/vm/opto/graphKit.cpp	Fri Sep 06 09:55:38 2013 +0100
    89.2 +++ b/src/share/vm/opto/graphKit.cpp	Sat Sep 14 20:40:34 2013 +0100
    89.3 @@ -1501,6 +1501,25 @@
    89.4    }
    89.5  }
    89.6  
    89.7 +bool GraphKit::can_move_pre_barrier() const {
    89.8 +  BarrierSet* bs = Universe::heap()->barrier_set();
    89.9 +  switch (bs->kind()) {
   89.10 +    case BarrierSet::G1SATBCT:
   89.11 +    case BarrierSet::G1SATBCTLogging:
   89.12 +      return true; // Can move it if no safepoint
   89.13 +
   89.14 +    case BarrierSet::CardTableModRef:
   89.15 +    case BarrierSet::CardTableExtension:
   89.16 +    case BarrierSet::ModRef:
   89.17 +      return true; // There is no pre-barrier
   89.18 +
   89.19 +    case BarrierSet::Other:
   89.20 +    default      :
   89.21 +      ShouldNotReachHere();
   89.22 +  }
   89.23 +  return false;
   89.24 +}
   89.25 +
   89.26  void GraphKit::post_barrier(Node* ctl,
   89.27                              Node* store,
   89.28                              Node* obj,
   89.29 @@ -3551,6 +3570,8 @@
   89.30    } else {
   89.31      // In this case both val_type and alias_idx are unused.
   89.32      assert(pre_val != NULL, "must be loaded already");
   89.33 +    // Nothing to be done if pre_val is null.
   89.34 +    if (pre_val->bottom_type() == TypePtr::NULL_PTR) return;
   89.35      assert(pre_val->bottom_type()->basic_type() == T_OBJECT, "or we shouldn't be here");
   89.36    }
   89.37    assert(bt == T_OBJECT, "or we shouldn't be here");
   89.38 @@ -3595,7 +3616,7 @@
   89.39      if (do_load) {
   89.40        // load original value
   89.41        // alias_idx correct??
   89.42 -      pre_val = __ load(no_ctrl, adr, val_type, bt, alias_idx);
   89.43 +      pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
   89.44      }
   89.45  
   89.46      // if (pre_val != NULL)
    90.1 --- a/src/share/vm/opto/graphKit.hpp	Fri Sep 06 09:55:38 2013 +0100
    90.2 +++ b/src/share/vm/opto/graphKit.hpp	Sat Sep 14 20:40:34 2013 +0100
    90.3 @@ -695,6 +695,10 @@
    90.4    void write_barrier_post(Node *store, Node* obj,
    90.5                            Node* adr,  uint adr_idx, Node* val, bool use_precise);
    90.6  
    90.7 +  // Allow reordering of pre-barrier with oop store and/or post-barrier.
    90.8 +  // Used for load_store operations which loads old value.
    90.9 +  bool can_move_pre_barrier() const;
   90.10 +
   90.11    // G1 pre/post barriers
   90.12    void g1_write_barrier_pre(bool do_load,
   90.13                              Node* obj,
    91.1 --- a/src/share/vm/opto/idealGraphPrinter.cpp	Fri Sep 06 09:55:38 2013 +0100
    91.2 +++ b/src/share/vm/opto/idealGraphPrinter.cpp	Sat Sep 14 20:40:34 2013 +0100
    91.3 @@ -639,8 +639,8 @@
    91.4      // reachable but are in the CFG so add them here.
    91.5      for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
    91.6        Block* block = C->cfg()->get_block(i);
    91.7 -      for (uint s = 0; s < block->_nodes.size(); s++) {
    91.8 -        nodeStack.push(block->_nodes[s]);
    91.9 +      for (uint s = 0; s < block->number_of_nodes(); s++) {
   91.10 +        nodeStack.push(block->get_node(s));
   91.11        }
   91.12      }
   91.13    }
   91.14 @@ -713,9 +713,9 @@
   91.15        tail(SUCCESSORS_ELEMENT);
   91.16  
   91.17        head(NODES_ELEMENT);
   91.18 -      for (uint s = 0; s < block->_nodes.size(); s++) {
   91.19 +      for (uint s = 0; s < block->number_of_nodes(); s++) {
   91.20          begin_elem(NODE_ELEMENT);
   91.21 -        print_attr(NODE_ID_PROPERTY, get_node_id(block->_nodes[s]));
   91.22 +        print_attr(NODE_ID_PROPERTY, get_node_id(block->get_node(s)));
   91.23          end_elem();
   91.24        }
   91.25        tail(NODES_ELEMENT);
    92.1 --- a/src/share/vm/opto/ifg.cpp	Fri Sep 06 09:55:38 2013 +0100
    92.2 +++ b/src/share/vm/opto/ifg.cpp	Sat Sep 14 20:40:34 2013 +0100
    92.3 @@ -319,7 +319,7 @@
    92.4      // value is then removed from the live-ness set and it's inputs are
    92.5      // added to the live-ness set.
    92.6      for (uint j = block->end_idx() + 1; j > 1; j--) {
    92.7 -      Node* n = block->_nodes[j - 1];
    92.8 +      Node* n = block->get_node(j - 1);
    92.9  
   92.10        // Get value being defined
   92.11        uint r = _lrg_map.live_range_id(n);
   92.12 @@ -456,7 +456,7 @@
   92.13      // Compute first nonphi node index
   92.14      uint first_inst;
   92.15      for (first_inst = 1; first_inst < last_inst; first_inst++) {
   92.16 -      if (!block->_nodes[first_inst]->is_Phi()) {
   92.17 +      if (!block->get_node(first_inst)->is_Phi()) {
   92.18          break;
   92.19        }
   92.20      }
   92.21 @@ -464,15 +464,15 @@
   92.22      // Spills could be inserted before CreateEx node which should be
   92.23      // first instruction in block after Phis. Move CreateEx up.
   92.24      for (uint insidx = first_inst; insidx < last_inst; insidx++) {
   92.25 -      Node *ex = block->_nodes[insidx];
   92.26 +      Node *ex = block->get_node(insidx);
   92.27        if (ex->is_SpillCopy()) {
   92.28          continue;
   92.29        }
   92.30        if (insidx > first_inst && ex->is_Mach() && ex->as_Mach()->ideal_Opcode() == Op_CreateEx) {
   92.31          // If the CreateEx isn't above all the MachSpillCopies
   92.32          // then move it to the top.
   92.33 -        block->_nodes.remove(insidx);
   92.34 -        block->_nodes.insert(first_inst, ex);
   92.35 +        block->remove_node(insidx);
   92.36 +        block->insert_node(ex, first_inst);
   92.37        }
   92.38        // Stop once a CreateEx or any other node is found
   92.39        break;
   92.40 @@ -523,7 +523,7 @@
   92.41      // to the live-ness set.
   92.42      uint j;
   92.43      for (j = last_inst + 1; j > 1; j--) {
   92.44 -      Node* n = block->_nodes[j - 1];
   92.45 +      Node* n = block->get_node(j - 1);
   92.46  
   92.47        // Get value being defined
   92.48        uint r = _lrg_map.live_range_id(n);
   92.49 @@ -541,7 +541,7 @@
   92.50            if( !n->is_Proj() ||
   92.51                // Could also be a flags-projection of a dead ADD or such.
   92.52                (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
   92.53 -            block->_nodes.remove(j - 1);
   92.54 +            block->remove_node(j - 1);
   92.55              if (lrgs(r)._def == n) {
   92.56                lrgs(r)._def = 0;
   92.57              }
   92.58 @@ -605,7 +605,7 @@
   92.59              // (j - 1) is index for current instruction 'n'
   92.60              Node *m = n;
   92.61              for (uint i = j; i <= last_inst && m->is_SpillCopy(); ++i) {
   92.62 -              m = block->_nodes[i];
   92.63 +              m = block->get_node(i);
   92.64              }
   92.65              if (m == single_use) {
   92.66                lrgs(r)._area = 0.0;
   92.67 @@ -772,20 +772,20 @@
   92.68  
   92.69      // Compute high pressure indice; avoid landing in the middle of projnodes
   92.70      j = hrp_index[0];
   92.71 -    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
   92.72 -      Node* cur = block->_nodes[j];
   92.73 +    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
   92.74 +      Node* cur = block->get_node(j);
   92.75        while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
   92.76          j--;
   92.77 -        cur = block->_nodes[j];
   92.78 +        cur = block->get_node(j);
   92.79        }
   92.80      }
   92.81      block->_ihrp_index = j;
   92.82      j = hrp_index[1];
   92.83 -    if (j < block->_nodes.size() && j < block->end_idx() + 1) {
   92.84 -      Node* cur = block->_nodes[j];
   92.85 +    if (j < block->number_of_nodes() && j < block->end_idx() + 1) {
   92.86 +      Node* cur = block->get_node(j);
   92.87        while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
   92.88          j--;
   92.89 -        cur = block->_nodes[j];
   92.90 +        cur = block->get_node(j);
   92.91        }
   92.92      }
   92.93      block->_fhrp_index = j;
    93.1 --- a/src/share/vm/opto/lcm.cpp	Fri Sep 06 09:55:38 2013 +0100
    93.2 +++ b/src/share/vm/opto/lcm.cpp	Sat Sep 14 20:40:34 2013 +0100
    93.3 @@ -58,14 +58,14 @@
    93.4  // The proj is the control projection for the not-null case.
    93.5  // The val is the pointer being checked for nullness or
    93.6  // decodeHeapOop_not_null node if it did not fold into address.
    93.7 -void Block::implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons) {
    93.8 +void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) {
    93.9    // Assume if null check need for 0 offset then always needed
   93.10    // Intel solaris doesn't support any null checks yet and no
   93.11    // mechanism exists (yet) to set the switches at an os_cpu level
   93.12    if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return;
   93.13  
   93.14    // Make sure the ptr-is-null path appears to be uncommon!
   93.15 -  float f = end()->as_MachIf()->_prob;
   93.16 +  float f = block->end()->as_MachIf()->_prob;
   93.17    if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f;
   93.18    if( f > PROB_UNLIKELY_MAG(4) ) return;
   93.19  
   93.20 @@ -75,13 +75,13 @@
   93.21    // Get the successor block for if the test ptr is non-null
   93.22    Block* not_null_block;  // this one goes with the proj
   93.23    Block* null_block;
   93.24 -  if (_nodes[_nodes.size()-1] == proj) {
   93.25 -    null_block     = _succs[0];
   93.26 -    not_null_block = _succs[1];
   93.27 +  if (block->get_node(block->number_of_nodes()-1) == proj) {
   93.28 +    null_block     = block->_succs[0];
   93.29 +    not_null_block = block->_succs[1];
   93.30    } else {
   93.31 -    assert(_nodes[_nodes.size()-2] == proj, "proj is one or the other");
   93.32 -    not_null_block = _succs[0];
   93.33 -    null_block     = _succs[1];
   93.34 +    assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other");
   93.35 +    not_null_block = block->_succs[0];
   93.36 +    null_block     = block->_succs[1];
   93.37    }
   93.38    while (null_block->is_Empty() == Block::empty_with_goto) {
   93.39      null_block     = null_block->_succs[0];
   93.40 @@ -93,8 +93,8 @@
   93.41    // detect failure of this optimization, as in 6366351.)
   93.42    {
   93.43      bool found_trap = false;
   93.44 -    for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) {
   93.45 -      Node* nn = null_block->_nodes[i1];
   93.46 +    for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) {
   93.47 +      Node* nn = null_block->get_node(i1);
   93.48        if (nn->is_MachCall() &&
   93.49            nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
   93.50          const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type();
   93.51 @@ -237,20 +237,20 @@
   93.52      }
   93.53  
   93.54      // Check ctrl input to see if the null-check dominates the memory op
   93.55 -    Block *cb = cfg->get_block_for_node(mach);
   93.56 +    Block *cb = get_block_for_node(mach);
   93.57      cb = cb->_idom;             // Always hoist at least 1 block
   93.58      if( !was_store ) {          // Stores can be hoisted only one block
   93.59 -      while( cb->_dom_depth > (_dom_depth + 1))
   93.60 +      while( cb->_dom_depth > (block->_dom_depth + 1))
   93.61          cb = cb->_idom;         // Hoist loads as far as we want
   93.62        // The non-null-block should dominate the memory op, too. Live
   93.63        // range spilling will insert a spill in the non-null-block if it is
   93.64        // needs to spill the memory op for an implicit null check.
   93.65 -      if (cb->_dom_depth == (_dom_depth + 1)) {
   93.66 +      if (cb->_dom_depth == (block->_dom_depth + 1)) {
   93.67          if (cb != not_null_block) continue;
   93.68          cb = cb->_idom;
   93.69        }
   93.70      }
   93.71 -    if( cb != this ) continue;
   93.72 +    if( cb != block ) continue;
   93.73  
   93.74      // Found a memory user; see if it can be hoisted to check-block
   93.75      uint vidx = 0;              // Capture index of value into memop
   93.76 @@ -262,8 +262,8 @@
   93.77          if( is_decoden ) continue;
   93.78        }
   93.79        // Block of memory-op input
   93.80 -      Block *inb = cfg->get_block_for_node(mach->in(j));
   93.81 -      Block *b = this;          // Start from nul check
   93.82 +      Block *inb = get_block_for_node(mach->in(j));
   93.83 +      Block *b = block;          // Start from nul check
   93.84        while( b != inb && b->_dom_depth > inb->_dom_depth )
   93.85          b = b->_idom;           // search upwards for input
   93.86        // See if input dominates null check
   93.87 @@ -272,28 +272,28 @@
   93.88      }
   93.89      if( j > 0 )
   93.90        continue;
   93.91 -    Block *mb = cfg->get_block_for_node(mach);
   93.92 +    Block *mb = get_block_for_node(mach);
   93.93      // Hoisting stores requires more checks for the anti-dependence case.
   93.94      // Give up hoisting if we have to move the store past any load.
   93.95      if( was_store ) {
   93.96        Block *b = mb;            // Start searching here for a local load
   93.97        // mach use (faulting) trying to hoist
   93.98        // n might be blocker to hoisting
   93.99 -      while( b != this ) {
  93.100 +      while( b != block ) {
  93.101          uint k;
  93.102 -        for( k = 1; k < b->_nodes.size(); k++ ) {
  93.103 -          Node *n = b->_nodes[k];
  93.104 +        for( k = 1; k < b->number_of_nodes(); k++ ) {
  93.105 +          Node *n = b->get_node(k);
  93.106            if( n->needs_anti_dependence_check() &&
  93.107                n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) )
  93.108              break;              // Found anti-dependent load
  93.109          }
  93.110 -        if( k < b->_nodes.size() )
  93.111 +        if( k < b->number_of_nodes() )
  93.112            break;                // Found anti-dependent load
  93.113          // Make sure control does not do a merge (would have to check allpaths)
  93.114          if( b->num_preds() != 2 ) break;
  93.115 -        b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
  93.116 +        b = get_block_for_node(b->pred(1)); // Move up to predecessor block
  93.117        }
  93.118 -      if( b != this ) continue;
  93.119 +      if( b != block ) continue;
  93.120      }
  93.121  
  93.122      // Make sure this memory op is not already being used for a NullCheck
  93.123 @@ -303,7 +303,7 @@
  93.124  
  93.125      // Found a candidate!  Pick one with least dom depth - the highest
  93.126      // in the dom tree should be closest to the null check.
  93.127 -    if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
  93.128 +    if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) {
  93.129        best = mach;
  93.130        bidx = vidx;
  93.131      }
  93.132 @@ -319,46 +319,45 @@
  93.133  
  93.134    if( is_decoden ) {
  93.135      // Check if we need to hoist decodeHeapOop_not_null first.
  93.136 -    Block *valb = cfg->get_block_for_node(val);
  93.137 -    if( this != valb && this->_dom_depth < valb->_dom_depth ) {
  93.138 +    Block *valb = get_block_for_node(val);
  93.139 +    if( block != valb && block->_dom_depth < valb->_dom_depth ) {
  93.140        // Hoist it up to the end of the test block.
  93.141        valb->find_remove(val);
  93.142 -      this->add_inst(val);
  93.143 -      cfg->map_node_to_block(val, this);
  93.144 +      block->add_inst(val);
  93.145 +      map_node_to_block(val, block);
  93.146        // DecodeN on x86 may kill flags. Check for flag-killing projections
  93.147        // that also need to be hoisted.
  93.148        for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
  93.149          Node* n = val->fast_out(j);
  93.150          if( n->is_MachProj() ) {
  93.151 -          cfg->get_block_for_node(n)->find_remove(n);
  93.152 -          this->add_inst(n);
  93.153 -          cfg->map_node_to_block(n, this);
  93.154 +          get_block_for_node(n)->find_remove(n);
  93.155 +          block->add_inst(n);
  93.156 +          map_node_to_block(n, block);
  93.157          }
  93.158        }
  93.159      }
  93.160    }
  93.161    // Hoist the memory candidate up to the end of the test block.
  93.162 -  Block *old_block = cfg->get_block_for_node(best);
  93.163 +  Block *old_block = get_block_for_node(best);
  93.164    old_block->find_remove(best);
  93.165 -  add_inst(best);
  93.166 -  cfg->map_node_to_block(best, this);
  93.167 +  block->add_inst(best);
  93.168 +  map_node_to_block(best, block);
  93.169  
  93.170    // Move the control dependence
  93.171 -  if (best->in(0) && best->in(0) == old_block->_nodes[0])
  93.172 -    best->set_req(0, _nodes[0]);
  93.173 +  if (best->in(0) && best->in(0) == old_block->head())
  93.174 +    best->set_req(0, block->head());
  93.175  
  93.176    // Check for flag-killing projections that also need to be hoisted
  93.177    // Should be DU safe because no edge updates.
  93.178    for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
  93.179      Node* n = best->fast_out(j);
  93.180      if( n->is_MachProj() ) {
  93.181 -      cfg->get_block_for_node(n)->find_remove(n);
  93.182 -      add_inst(n);
  93.183 -      cfg->map_node_to_block(n, this);
  93.184 +      get_block_for_node(n)->find_remove(n);
  93.185 +      block->add_inst(n);
  93.186 +      map_node_to_block(n, block);
  93.187      }
  93.188    }
  93.189  
  93.190 -  Compile *C = cfg->C;
  93.191    // proj==Op_True --> ne test; proj==Op_False --> eq test.
  93.192    // One of two graph shapes got matched:
  93.193    //   (IfTrue  (If (Bool NE (CmpP ptr NULL))))
  93.194 @@ -368,10 +367,10 @@
  93.195    // We need to flip the projections to keep the same semantics.
  93.196    if( proj->Opcode() == Op_IfTrue ) {
  93.197      // Swap order of projections in basic block to swap branch targets
  93.198 -    Node *tmp1 = _nodes[end_idx()+1];
  93.199 -    Node *tmp2 = _nodes[end_idx()+2];
  93.200 -    _nodes.map(end_idx()+1, tmp2);
  93.201 -    _nodes.map(end_idx()+2, tmp1);
  93.202 +    Node *tmp1 = block->get_node(block->end_idx()+1);
  93.203 +    Node *tmp2 = block->get_node(block->end_idx()+2);
  93.204 +    block->map_node(tmp2, block->end_idx()+1);
  93.205 +    block->map_node(tmp1, block->end_idx()+2);
  93.206      Node *tmp = new (C) Node(C->top()); // Use not NULL input
  93.207      tmp1->replace_by(tmp);
  93.208      tmp2->replace_by(tmp1);
  93.209 @@ -384,8 +383,8 @@
  93.210    // it as well.
  93.211    Node *old_tst = proj->in(0);
  93.212    MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
  93.213 -  _nodes.map(end_idx(),nul_chk);
  93.214 -  cfg->map_node_to_block(nul_chk, this);
  93.215 +  block->map_node(nul_chk, block->end_idx());
  93.216 +  map_node_to_block(nul_chk, block);
  93.217    // Redirect users of old_test to nul_chk
  93.218    for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
  93.219      old_tst->last_out(i2)->set_req(0, nul_chk);
  93.220 @@ -393,8 +392,8 @@
  93.221    for (uint i3 = 0; i3 < old_tst->req(); i3++)
  93.222      old_tst->set_req(i3, NULL);
  93.223  
  93.224 -  cfg->latency_from_uses(nul_chk);
  93.225 -  cfg->latency_from_uses(best);
  93.226 +  latency_from_uses(nul_chk);
  93.227 +  latency_from_uses(best);
  93.228  }
  93.229  
  93.230  
  93.231 @@ -408,7 +407,7 @@
  93.232  // remaining cases (most), choose the instruction with the greatest latency
  93.233  // (that is, the most number of pseudo-cycles required to the end of the
  93.234  // routine). If there is a tie, choose the instruction with the most inputs.
  93.235 -Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
  93.236 +Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) {
  93.237  
  93.238    // If only a single entry on the stack, use it
  93.239    uint cnt = worklist.size();
  93.240 @@ -442,7 +441,7 @@
  93.241      }
  93.242  
  93.243      // Final call in a block must be adjacent to 'catch'
  93.244 -    Node *e = end();
  93.245 +    Node *e = block->end();
  93.246      if( e->is_Catch() && e->in(0)->in(0) == n )
  93.247        continue;
  93.248  
  93.249 @@ -468,7 +467,7 @@
  93.250          Node* use = n->fast_out(j);
  93.251  
  93.252          // The use is a conditional branch, make them adjacent
  93.253 -        if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
  93.254 +        if (use->is_MachIf() && get_block_for_node(use) == block) {
  93.255            found_machif = true;
  93.256            break;
  93.257          }
  93.258 @@ -501,7 +500,7 @@
  93.259        n_choice = 1;
  93.260      }
  93.261  
  93.262 -    uint n_latency = cfg->get_latency_for_node(n);
  93.263 +    uint n_latency = get_latency_for_node(n);
  93.264      uint n_score   = n->req();   // Many inputs get high score to break ties
  93.265  
  93.266      // Keep best latency found
  93.267 @@ -529,13 +528,13 @@
  93.268  
  93.269  
  93.270  //------------------------------set_next_call----------------------------------
  93.271 -void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
  93.272 +void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) {
  93.273    if( next_call.test_set(n->_idx) ) return;
  93.274    for( uint i=0; i<n->len(); i++ ) {
  93.275      Node *m = n->in(i);
  93.276      if( !m ) continue;  // must see all nodes in block that precede call
  93.277 -    if (cfg->get_block_for_node(m) == this) {
  93.278 -      set_next_call(m, next_call, cfg);
  93.279 +    if (get_block_for_node(m) == block) {
  93.280 +      set_next_call(block, m, next_call);
  93.281      }
  93.282    }
  93.283  }
  93.284 @@ -546,24 +545,26 @@
  93.285  // next subroutine call get priority - basically it moves things NOT needed
  93.286  // for the next call till after the call.  This prevents me from trying to
  93.287  // carry lots of stuff live across a call.
  93.288 -void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
  93.289 +void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) {
  93.290    // Find the next control-defining Node in this block
  93.291    Node* call = NULL;
  93.292    for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
  93.293      Node* m = this_call->fast_out(i);
  93.294 -    if(cfg->get_block_for_node(m) == this && // Local-block user
  93.295 +    if (get_block_for_node(m) == block && // Local-block user
  93.296          m != this_call &&       // Not self-start node
  93.297 -        m->is_MachCall() )
  93.298 +        m->is_MachCall()) {
  93.299        call = m;
  93.300        break;
  93.301 +    }
  93.302    }
  93.303    if (call == NULL)  return;    // No next call (e.g., block end is near)
  93.304    // Set next-call for all inputs to this call
  93.305 -  set_next_call(call, next_call, cfg);
  93.306 +  set_next_call(block, call, next_call);
  93.307  }
  93.308  
  93.309  //------------------------------add_call_kills-------------------------------------
  93.310 -void Block::add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
  93.311 +// helper function that adds caller save registers to MachProjNode
  93.312 +static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) {
  93.313    // Fill in the kill mask for the call
  93.314    for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
  93.315      if( !regs.Member(r) ) {     // Not already defined by the call
  93.316 @@ -579,7 +580,7 @@
  93.317  
  93.318  
  93.319  //------------------------------sched_call-------------------------------------
  93.320 -uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
  93.321 +uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) {
  93.322    RegMask regs;
  93.323  
  93.324    // Schedule all the users of the call right now.  All the users are
  93.325 @@ -592,18 +593,18 @@
  93.326      ready_cnt.at_put(n->_idx, n_cnt);
  93.327      assert( n_cnt == 0, "" );
  93.328      // Schedule next to call
  93.329 -    _nodes.map(node_cnt++, n);
  93.330 +    block->map_node(n, node_cnt++);
  93.331      // Collect defined registers
  93.332      regs.OR(n->out_RegMask());
  93.333      // Check for scheduling the next control-definer
  93.334      if( n->bottom_type() == Type::CONTROL )
  93.335        // Warm up next pile of heuristic bits
  93.336 -      needed_for_next_call(n, next_call, cfg);
  93.337 +      needed_for_next_call(block, n, next_call);
  93.338  
  93.339      // Children of projections are now all ready
  93.340      for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
  93.341        Node* m = n->fast_out(j); // Get user
  93.342 -      if(cfg->get_block_for_node(m) != this) {
  93.343 +      if(get_block_for_node(m) != block) {
  93.344          continue;
  93.345        }
  93.346        if( m->is_Phi() ) continue;
  93.347 @@ -617,14 +618,14 @@
  93.348  
  93.349    // Act as if the call defines the Frame Pointer.
  93.350    // Certainly the FP is alive and well after the call.
  93.351 -  regs.Insert(matcher.c_frame_pointer());
  93.352 +  regs.Insert(_matcher.c_frame_pointer());
  93.353  
  93.354    // Set all registers killed and not already defined by the call.
  93.355    uint r_cnt = mcall->tf()->range()->cnt();
  93.356    int op = mcall->ideal_Opcode();
  93.357 -  MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
  93.358 -  cfg->map_node_to_block(proj, this);
  93.359 -  _nodes.insert(node_cnt++, proj);
  93.360 +  MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
  93.361 +  map_node_to_block(proj, block);
  93.362 +  block->insert_node(proj, node_cnt++);
  93.363  
  93.364    // Select the right register save policy.
  93.365    const char * save_policy;
  93.366 @@ -633,13 +634,13 @@
  93.367      case Op_CallLeaf:
  93.368      case Op_CallLeafNoFP:
  93.369        // Calling C code so use C calling convention
  93.370 -      save_policy = matcher._c_reg_save_policy;
  93.371 +      save_policy = _matcher._c_reg_save_policy;
  93.372        break;
  93.373  
  93.374      case Op_CallStaticJava:
  93.375      case Op_CallDynamicJava:
  93.376        // Calling Java code so use Java calling convention
  93.377 -      save_policy = matcher._register_save_policy;
  93.378 +      save_policy = _matcher._register_save_policy;
  93.379        break;
  93.380  
  93.381      default:
  93.382 @@ -674,44 +675,46 @@
  93.383  
  93.384  //------------------------------schedule_local---------------------------------
  93.385  // Topological sort within a block.  Someday become a real scheduler.
  93.386 -bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, GrowableArray<int> &ready_cnt, VectorSet &next_call) {
  93.387 +bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) {
  93.388    // Already "sorted" are the block start Node (as the first entry), and
  93.389    // the block-ending Node and any trailing control projections.  We leave
  93.390    // these alone.  PhiNodes and ParmNodes are made to follow the block start
  93.391    // Node.  Everything else gets topo-sorted.
  93.392  
  93.393  #ifndef PRODUCT
  93.394 -    if (cfg->trace_opto_pipelining()) {
  93.395 -      tty->print_cr("# --- schedule_local B%d, before: ---", _pre_order);
  93.396 -      for (uint i = 0;i < _nodes.size();i++) {
  93.397 +    if (trace_opto_pipelining()) {
  93.398 +      tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order);
  93.399 +      for (uint i = 0;i < block->number_of_nodes(); i++) {
  93.400          tty->print("# ");
  93.401 -        _nodes[i]->fast_dump();
  93.402 +        block->get_node(i)->fast_dump();
  93.403        }
  93.404        tty->print_cr("#");
  93.405      }
  93.406  #endif
  93.407  
  93.408    // RootNode is already sorted
  93.409 -  if( _nodes.size() == 1 ) return true;
  93.410 +  if (block->number_of_nodes() == 1) {
  93.411 +    return true;
  93.412 +  }
  93.413  
  93.414    // Move PhiNodes and ParmNodes from 1 to cnt up to the start
  93.415 -  uint node_cnt = end_idx();
  93.416 +  uint node_cnt = block->end_idx();
  93.417    uint phi_cnt = 1;
  93.418    uint i;
  93.419    for( i = 1; i<node_cnt; i++ ) { // Scan for Phi
  93.420 -    Node *n = _nodes[i];
  93.421 +    Node *n = block->get_node(i);
  93.422      if( n->is_Phi() ||          // Found a PhiNode or ParmNode
  93.423 -        (n->is_Proj()  && n->in(0) == head()) ) {
  93.424 +        (n->is_Proj()  && n->in(0) == block->head()) ) {
  93.425        // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt
  93.426 -      _nodes.map(i,_nodes[phi_cnt]);
  93.427 -      _nodes.map(phi_cnt++,n);  // swap Phi/Parm up front
  93.428 +      block->map_node(block->get_node(phi_cnt), i);
  93.429 +      block->map_node(n, phi_cnt++);  // swap Phi/Parm up front
  93.430      } else {                    // All others
  93.431        // Count block-local inputs to 'n'
  93.432        uint cnt = n->len();      // Input count
  93.433        uint local = 0;
  93.434        for( uint j=0; j<cnt; j++ ) {
  93.435          Node *m = n->in(j);
  93.436 -        if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
  93.437 +        if( m && get_block_for_node(m) == block && !m->is_top() )
  93.438            local++;              // One more block-local input
  93.439        }
  93.440        ready_cnt.at_put(n->_idx, local); // Count em up
  93.441 @@ -723,7 +726,7 @@
  93.442            for (uint prec = n->req(); prec < n->len(); prec++) {
  93.443              Node* oop_store = n->in(prec);
  93.444              if (oop_store != NULL) {
  93.445 -              assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
  93.446 +              assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
  93.447              }
  93.448            }
  93.449          }
  93.450 @@ -747,16 +750,16 @@
  93.451        }
  93.452      }
  93.453    }
  93.454 -  for(uint i2=i; i2<_nodes.size(); i2++ ) // Trailing guys get zapped count
  93.455 -    ready_cnt.at_put(_nodes[i2]->_idx, 0);
  93.456 +  for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count
  93.457 +    ready_cnt.at_put(block->get_node(i2)->_idx, 0);
  93.458  
  93.459    // All the prescheduled guys do not hold back internal nodes
  93.460    uint i3;
  93.461    for(i3 = 0; i3<phi_cnt; i3++ ) {  // For all pre-scheduled
  93.462 -    Node *n = _nodes[i3];       // Get pre-scheduled
  93.463 +    Node *n = block->get_node(i3);       // Get pre-scheduled
  93.464      for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
  93.465        Node* m = n->fast_out(j);
  93.466 -      if (cfg->get_block_for_node(m) == this) { // Local-block user
  93.467 +      if (get_block_for_node(m) == block) { // Local-block user
  93.468          int m_cnt = ready_cnt.at(m->_idx)-1;
  93.469          ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
  93.470        }
  93.471 @@ -767,7 +770,7 @@
  93.472    // Make a worklist
  93.473    Node_List worklist;
  93.474    for(uint i4=i3; i4<node_cnt; i4++ ) {    // Put ready guys on worklist
  93.475 -    Node *m = _nodes[i4];
  93.476 +    Node *m = block->get_node(i4);
  93.477      if( !ready_cnt.at(m->_idx) ) {   // Zero ready count?
  93.478        if (m->is_iteratively_computed()) {
  93.479          // Push induction variable increments last to allow other uses
  93.480 @@ -789,15 +792,15 @@
  93.481    }
  93.482  
  93.483    // Warm up the 'next_call' heuristic bits
  93.484 -  needed_for_next_call(_nodes[0], next_call, cfg);
  93.485 +  needed_for_next_call(block, block->head(), next_call);
  93.486  
  93.487  #ifndef PRODUCT
  93.488 -    if (cfg->trace_opto_pipelining()) {
  93.489 -      for (uint j=0; j<_nodes.size(); j++) {
  93.490 -        Node     *n = _nodes[j];
  93.491 +    if (trace_opto_pipelining()) {
  93.492 +      for (uint j=0; j< block->number_of_nodes(); j++) {
  93.493 +        Node     *n = block->get_node(j);
  93.494          int     idx = n->_idx;
  93.495          tty->print("#   ready cnt:%3d  ", ready_cnt.at(idx));
  93.496 -        tty->print("latency:%3d  ", cfg->get_latency_for_node(n));
  93.497 +        tty->print("latency:%3d  ", get_latency_for_node(n));
  93.498          tty->print("%4d: %s\n", idx, n->Name());
  93.499        }
  93.500      }
  93.501 @@ -808,7 +811,7 @@
  93.502    while( worklist.size() ) {    // Worklist is not ready
  93.503  
  93.504  #ifndef PRODUCT
  93.505 -    if (cfg->trace_opto_pipelining()) {
  93.506 +    if (trace_opto_pipelining()) {
  93.507        tty->print("#   ready list:");
  93.508        for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist
  93.509          Node *n = worklist[i];      // Get Node on worklist
  93.510 @@ -819,13 +822,13 @@
  93.511  #endif
  93.512  
  93.513      // Select and pop a ready guy from worklist
  93.514 -    Node* n = select(cfg, worklist, ready_cnt, next_call, phi_cnt);
  93.515 -    _nodes.map(phi_cnt++,n);    // Schedule him next
  93.516 +    Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt);
  93.517 +    block->map_node(n, phi_cnt++);    // Schedule him next
  93.518  
  93.519  #ifndef PRODUCT
  93.520 -    if (cfg->trace_opto_pipelining()) {
  93.521 +    if (trace_opto_pipelining()) {
  93.522        tty->print("#    select %d: %s", n->_idx, n->Name());
  93.523 -      tty->print(", latency:%d", cfg->get_latency_for_node(n));
  93.524 +      tty->print(", latency:%d", get_latency_for_node(n));
  93.525        n->dump();
  93.526        if (Verbose) {
  93.527          tty->print("#   ready list:");
  93.528 @@ -840,26 +843,26 @@
  93.529  #endif
  93.530      if( n->is_MachCall() ) {
  93.531        MachCallNode *mcall = n->as_MachCall();
  93.532 -      phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
  93.533 +      phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call);
  93.534        continue;
  93.535      }
  93.536  
  93.537      if (n->is_Mach() && n->as_Mach()->has_call()) {
  93.538        RegMask regs;
  93.539 -      regs.Insert(matcher.c_frame_pointer());
  93.540 +      regs.Insert(_matcher.c_frame_pointer());
  93.541        regs.OR(n->out_RegMask());
  93.542  
  93.543 -      MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
  93.544 -      cfg->map_node_to_block(proj, this);
  93.545 -      _nodes.insert(phi_cnt++, proj);
  93.546 +      MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
  93.547 +      map_node_to_block(proj, block);
  93.548 +      block->insert_node(proj, phi_cnt++);
  93.549  
  93.550 -      add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
  93.551 +      add_call_kills(proj, regs, _matcher._c_reg_save_policy, false);
  93.552      }
  93.553  
  93.554      // Children are now all ready
  93.555      for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
  93.556        Node* m = n->fast_out(i5); // Get user
  93.557 -      if (cfg->get_block_for_node(m) != this) {
  93.558 +      if (get_block_for_node(m) != block) {
  93.559          continue;
  93.560        }
  93.561        if( m->is_Phi() ) continue;
  93.562 @@ -874,9 +877,8 @@
  93.563      }
  93.564    }
  93.565  
  93.566 -  if( phi_cnt != end_idx() ) {
  93.567 +  if( phi_cnt != block->end_idx() ) {
  93.568      // did not schedule all.  Retry, Bailout, or Die
  93.569 -    Compile* C = matcher.C;
  93.570      if (C->subsume_loads() == true && !C->failing()) {
  93.571        // Retry with subsume_loads == false
  93.572        // If this is the first failure, the sentinel string will "stick"
  93.573 @@ -888,12 +890,12 @@
  93.574    }
  93.575  
  93.576  #ifndef PRODUCT
  93.577 -  if (cfg->trace_opto_pipelining()) {
  93.578 +  if (trace_opto_pipelining()) {
  93.579      tty->print_cr("#");
  93.580      tty->print_cr("# after schedule_local");
  93.581 -    for (uint i = 0;i < _nodes.size();i++) {
  93.582 +    for (uint i = 0;i < block->number_of_nodes();i++) {
  93.583        tty->print("# ");
  93.584 -      _nodes[i]->fast_dump();
  93.585 +      block->get_node(i)->fast_dump();
  93.586      }
  93.587      tty->cr();
  93.588    }
  93.589 @@ -919,7 +921,7 @@
  93.590  }
  93.591  
  93.592  //------------------------------catch_cleanup_find_cloned_def------------------
  93.593 -static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
  93.594 +Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
  93.595    assert( use_blk != def_blk, "Inter-block cleanup only");
  93.596  
  93.597    // The use is some block below the Catch.  Find and return the clone of the def
  93.598 @@ -945,14 +947,14 @@
  93.599      // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
  93.600      Node_Array inputs = new Node_List(Thread::current()->resource_area());
  93.601      for(uint k = 1; k < use_blk->num_preds(); k++) {
  93.602 -      Block* block = cfg->get_block_for_node(use_blk->pred(k));
  93.603 -      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
  93.604 +      Block* block = get_block_for_node(use_blk->pred(k));
  93.605 +      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx));
  93.606      }
  93.607  
  93.608      // Check to see if the use_blk already has an identical phi inserted.
  93.609      // If it exists, it will be at the first position since all uses of a
  93.610      // def are processed together.
  93.611 -    Node *phi = use_blk->_nodes[1];
  93.612 +    Node *phi = use_blk->get_node(1);
  93.613      if( phi->is_Phi() ) {
  93.614        fixup = phi;
  93.615        for (uint k = 1; k < use_blk->num_preds(); k++) {
  93.616 @@ -967,8 +969,8 @@
  93.617      // If an existing PhiNode was not found, make a new one.
  93.618      if (fixup == NULL) {
  93.619        Node *new_phi = PhiNode::make(use_blk->head(), def);
  93.620 -      use_blk->_nodes.insert(1, new_phi);
  93.621 -      cfg->map_node_to_block(new_phi, use_blk);
  93.622 +      use_blk->insert_node(new_phi, 1);
  93.623 +      map_node_to_block(new_phi, use_blk);
  93.624        for (uint k = 1; k < use_blk->num_preds(); k++) {
  93.625          new_phi->set_req(k, inputs[k]);
  93.626        }
  93.627 @@ -977,7 +979,7 @@
  93.628  
  93.629    } else {
  93.630      // Found the use just below the Catch.  Make it use the clone.
  93.631 -    fixup = use_blk->_nodes[n_clone_idx];
  93.632 +    fixup = use_blk->get_node(n_clone_idx);
  93.633    }
  93.634  
  93.635    return fixup;
  93.636 @@ -997,36 +999,36 @@
  93.637    for( uint k = 0; k < blk->_num_succs; k++ ) {
  93.638      // Get clone in each successor block
  93.639      Block *sb = blk->_succs[k];
  93.640 -    Node *clone = sb->_nodes[offset_idx+1];
  93.641 +    Node *clone = sb->get_node(offset_idx+1);
  93.642      assert( clone->Opcode() == use->Opcode(), "" );
  93.643  
  93.644      // Make use-clone reference the def-clone
  93.645 -    catch_cleanup_fix_all_inputs(clone, def, sb->_nodes[n_clone_idx]);
  93.646 +    catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx));
  93.647    }
  93.648  }
  93.649  
  93.650  //------------------------------catch_cleanup_inter_block---------------------
  93.651  // Fix all input edges in use that reference "def".  The use is in a different
  93.652  // block than the def.
  93.653 -static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
  93.654 +void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) {
  93.655    if( !use_blk ) return;        // Can happen if the use is a precedence edge
  93.656  
  93.657 -  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
  93.658 +  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx);
  93.659    catch_cleanup_fix_all_inputs(use, def, new_def);
  93.660  }
  93.661  
  93.662  //------------------------------call_catch_cleanup-----------------------------
  93.663  // If we inserted any instructions between a Call and his CatchNode,
  93.664  // clone the instructions on all paths below the Catch.
  93.665 -void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
  93.666 +void PhaseCFG::call_catch_cleanup(Block* block) {
  93.667  
  93.668    // End of region to clone
  93.669 -  uint end = end_idx();
  93.670 -  if( !_nodes[end]->is_Catch() ) return;
  93.671 +  uint end = block->end_idx();
  93.672 +  if( !block->get_node(end)->is_Catch() ) return;
  93.673    // Start of region to clone
  93.674    uint beg = end;
  93.675 -  while(!_nodes[beg-1]->is_MachProj() ||
  93.676 -        !_nodes[beg-1]->in(0)->is_MachCall() ) {
  93.677 +  while(!block->get_node(beg-1)->is_MachProj() ||
  93.678 +        !block->get_node(beg-1)->in(0)->is_MachCall() ) {
  93.679      beg--;
  93.680      assert(beg > 0,"Catch cleanup walking beyond block boundary");
  93.681    }
  93.682 @@ -1035,15 +1037,15 @@
  93.683  
  93.684    // Clone along all Catch output paths.  Clone area between the 'beg' and
  93.685    // 'end' indices.
  93.686 -  for( uint i = 0; i < _num_succs; i++ ) {
  93.687 -    Block *sb = _succs[i];
  93.688 +  for( uint i = 0; i < block->_num_succs; i++ ) {
  93.689 +    Block *sb = block->_succs[i];
  93.690      // Clone the entire area; ignoring the edge fixup for now.
  93.691      for( uint j = end; j > beg; j-- ) {
  93.692        // It is safe here to clone a node with anti_dependence
  93.693        // since clones dominate on each path.
  93.694 -      Node *clone = _nodes[j-1]->clone();
  93.695 -      sb->_nodes.insert( 1, clone );
  93.696 -      cfg->map_node_to_block(clone, sb);
  93.697 +      Node *clone = block->get_node(j-1)->clone();
  93.698 +      sb->insert_node(clone, 1);
  93.699 +      map_node_to_block(clone, sb);
  93.700      }
  93.701    }
  93.702  
  93.703 @@ -1051,7 +1053,7 @@
  93.704    // Fixup edges.  Check the def-use info per cloned Node
  93.705    for(uint i2 = beg; i2 < end; i2++ ) {
  93.706      uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block
  93.707 -    Node *n = _nodes[i2];        // Node that got cloned
  93.708 +    Node *n = block->get_node(i2);        // Node that got cloned
  93.709      // Need DU safe iterator because of edge manipulation in calls.
  93.710      Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area());
  93.711      for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) {
  93.712 @@ -1060,19 +1062,19 @@
  93.713      uint max = out->size();
  93.714      for (uint j = 0; j < max; j++) {// For all users
  93.715        Node *use = out->pop();
  93.716 -      Block *buse = cfg->get_block_for_node(use);
  93.717 +      Block *buse = get_block_for_node(use);
  93.718        if( use->is_Phi() ) {
  93.719          for( uint k = 1; k < use->req(); k++ )
  93.720            if( use->in(k) == n ) {
  93.721 -            Block* block = cfg->get_block_for_node(buse->pred(k));
  93.722 -            Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
  93.723 +            Block* b = get_block_for_node(buse->pred(k));
  93.724 +            Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx);
  93.725              use->set_req(k, fixup);
  93.726            }
  93.727        } else {
  93.728 -        if (this == buse) {
  93.729 -          catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
  93.730 +        if (block == buse) {
  93.731 +          catch_cleanup_intra_block(use, n, block, beg, n_clone_idx);
  93.732          } else {
  93.733 -          catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
  93.734 +          catch_cleanup_inter_block(use, buse, n, block, n_clone_idx);
  93.735          }
  93.736        }
  93.737      } // End for all users
  93.738 @@ -1081,30 +1083,30 @@
  93.739  
  93.740    // Remove the now-dead cloned ops
  93.741    for(uint i3 = beg; i3 < end; i3++ ) {
  93.742 -    _nodes[beg]->disconnect_inputs(NULL, C);
  93.743 -    _nodes.remove(beg);
  93.744 +    block->get_node(beg)->disconnect_inputs(NULL, C);
  93.745 +    block->remove_node(beg);
  93.746    }
  93.747  
  93.748    // If the successor blocks have a CreateEx node, move it back to the top
  93.749 -  for(uint i4 = 0; i4 < _num_succs; i4++ ) {
  93.750 -    Block *sb = _succs[i4];
  93.751 +  for(uint i4 = 0; i4 < block->_num_succs; i4++ ) {
  93.752 +    Block *sb = block->_succs[i4];
  93.753      uint new_cnt = end - beg;
  93.754      // Remove any newly created, but dead, nodes.
  93.755      for( uint j = new_cnt; j > 0; j-- ) {
  93.756 -      Node *n = sb->_nodes[j];
  93.757 +      Node *n = sb->get_node(j);
  93.758        if (n->outcnt() == 0 &&
  93.759            (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){
  93.760          n->disconnect_inputs(NULL, C);
  93.761 -        sb->_nodes.remove(j);
  93.762 +        sb->remove_node(j);
  93.763          new_cnt--;
  93.764        }
  93.765      }
  93.766      // If any newly created nodes remain, move the CreateEx node to the top
  93.767      if (new_cnt > 0) {
  93.768 -      Node *cex = sb->_nodes[1+new_cnt];
  93.769 +      Node *cex = sb->get_node(1+new_cnt);
  93.770        if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
  93.771 -        sb->_nodes.remove(1+new_cnt);
  93.772 -        sb->_nodes.insert(1,cex);
  93.773 +        sb->remove_node(1+new_cnt);
  93.774 +        sb->insert_node(cex, 1);
  93.775        }
  93.776      }
  93.777    }
    94.1 --- a/src/share/vm/opto/library_call.cpp	Fri Sep 06 09:55:38 2013 +0100
    94.2 +++ b/src/share/vm/opto/library_call.cpp	Sat Sep 14 20:40:34 2013 +0100
    94.3 @@ -2756,10 +2756,28 @@
    94.4        newval = _gvn.makecon(TypePtr::NULL_PTR);
    94.5  
    94.6      // Reference stores need a store barrier.
    94.7 -    pre_barrier(true /* do_load*/,
    94.8 -                control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
    94.9 -                NULL /* pre_val*/,
   94.10 -                T_OBJECT);
   94.11 +    if (kind == LS_xchg) {
   94.12 +      // If pre-barrier must execute before the oop store, old value will require do_load here.
   94.13 +      if (!can_move_pre_barrier()) {
   94.14 +        pre_barrier(true /* do_load*/,
   94.15 +                    control(), base, adr, alias_idx, newval, value_type->make_oopptr(),
   94.16 +                    NULL /* pre_val*/,
   94.17 +                    T_OBJECT);
   94.18 +      } // Else move pre_barrier to use load_store value, see below.
   94.19 +    } else if (kind == LS_cmpxchg) {
   94.20 +      // Same as for newval above:
   94.21 +      if (_gvn.type(oldval) == TypePtr::NULL_PTR) {
   94.22 +        oldval = _gvn.makecon(TypePtr::NULL_PTR);
   94.23 +      }
   94.24 +      // The only known value which might get overwritten is oldval.
   94.25 +      pre_barrier(false /* do_load */,
   94.26 +                  control(), NULL, NULL, max_juint, NULL, NULL,
   94.27 +                  oldval /* pre_val */,
   94.28 +                  T_OBJECT);
   94.29 +    } else {
   94.30 +      ShouldNotReachHere();
   94.31 +    }
   94.32 +
   94.33  #ifdef _LP64
   94.34      if (adr->bottom_type()->is_ptr_to_narrowoop()) {
   94.35        Node *newval_enc = _gvn.transform(new (C) EncodePNode(newval, newval->bottom_type()->make_narrowoop()));
   94.36 @@ -2795,16 +2813,27 @@
   94.37    Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store));
   94.38    set_memory(proj, alias_idx);
   94.39  
   94.40 +  if (type == T_OBJECT && kind == LS_xchg) {
   94.41 +#ifdef _LP64
   94.42 +    if (adr->bottom_type()->is_ptr_to_narrowoop()) {
   94.43 +      load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
   94.44 +    }
   94.45 +#endif
   94.46 +    if (can_move_pre_barrier()) {
   94.47 +      // Don't need to load pre_val. The old value is returned by load_store.
   94.48 +      // The pre_barrier can execute after the xchg as long as no safepoint
   94.49 +      // gets inserted between them.
   94.50 +      pre_barrier(false /* do_load */,
   94.51 +                  control(), NULL, NULL, max_juint, NULL, NULL,
   94.52 +                  load_store /* pre_val */,
   94.53 +                  T_OBJECT);
   94.54 +    }
   94.55 +  }
   94.56 +
   94.57    // Add the trailing membar surrounding the access
   94.58    insert_mem_bar(Op_MemBarCPUOrder);
   94.59    insert_mem_bar(Op_MemBarAcquire);
   94.60  
   94.61 -#ifdef _LP64
   94.62 -  if (type == T_OBJECT && adr->bottom_type()->is_ptr_to_narrowoop() && kind == LS_xchg) {
   94.63 -    load_store = _gvn.transform(new (C) DecodeNNode(load_store, load_store->get_ptr_type()));
   94.64 -  }
   94.65 -#endif
   94.66 -
   94.67    assert(type2size[load_store->bottom_type()->basic_type()] == type2size[rtype], "result type should match");
   94.68    set_result(load_store);
   94.69    return true;
    95.1 --- a/src/share/vm/opto/live.cpp	Fri Sep 06 09:55:38 2013 +0100
    95.2 +++ b/src/share/vm/opto/live.cpp	Sat Sep 14 20:40:34 2013 +0100
    95.3 @@ -85,8 +85,8 @@
    95.4      IndexSet* def = &_defs[block->_pre_order-1];
    95.5      DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
    95.6      uint i;
    95.7 -    for (i = block->_nodes.size(); i > 1; i--) {
    95.8 -      Node* n = block->_nodes[i-1];
    95.9 +    for (i = block->number_of_nodes(); i > 1; i--) {
   95.10 +      Node* n = block->get_node(i-1);
   95.11        if (n->is_Phi()) {
   95.12          break;
   95.13        }
   95.14 @@ -112,7 +112,7 @@
   95.15  #endif
   95.16      // Remove anything defined by Phis and the block start instruction
   95.17      for (uint k = i; k > 0; k--) {
   95.18 -      uint r = _names[block->_nodes[k - 1]->_idx];
   95.19 +      uint r = _names[block->get_node(k - 1)->_idx];
   95.20        def->insert(r);
   95.21        use->remove(r);
   95.22      }
   95.23 @@ -124,7 +124,7 @@
   95.24  
   95.25        // PhiNode uses go in the live-out set of prior blocks.
   95.26        for (uint k = i; k > 0; k--) {
   95.27 -        add_liveout(p, _names[block->_nodes[k-1]->in(l)->_idx], first_pass);
   95.28 +        add_liveout(p, _names[block->get_node(k-1)->in(l)->_idx], first_pass);
   95.29        }
   95.30      }
   95.31      freeset(block);
   95.32 @@ -254,10 +254,10 @@
   95.33  void PhaseLive::dump( const Block *b ) const {
   95.34    tty->print("Block %d: ",b->_pre_order);
   95.35    tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
   95.36 -  uint cnt = b->_nodes.size();
   95.37 +  uint cnt = b->number_of_nodes();
   95.38    for( uint i=0; i<cnt; i++ ) {
   95.39 -    tty->print("L%d/", _names[b->_nodes[i]->_idx] );
   95.40 -    b->_nodes[i]->dump();
   95.41 +    tty->print("L%d/", _names[b->get_node(i)->_idx] );
   95.42 +    b->get_node(i)->dump();
   95.43    }
   95.44    tty->print("\n");
   95.45  }
   95.46 @@ -269,7 +269,7 @@
   95.47    for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
   95.48      Block* block = _cfg.get_block(i);
   95.49      for (uint j = block->end_idx() + 1; j > 1; j--) {
   95.50 -      Node* n = block->_nodes[j-1];
   95.51 +      Node* n = block->get_node(j-1);
   95.52        if (n->is_Phi()) {
   95.53          break;
   95.54        }
    96.1 --- a/src/share/vm/opto/machnode.hpp	Fri Sep 06 09:55:38 2013 +0100
    96.2 +++ b/src/share/vm/opto/machnode.hpp	Sat Sep 14 20:40:34 2013 +0100
    96.3 @@ -1,5 +1,5 @@
    96.4  /*
    96.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    96.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    96.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    96.8   *
    96.9   * This code is free software; you can redistribute it and/or modify it
   96.10 @@ -58,7 +58,7 @@
   96.11  class MachOper : public ResourceObj {
   96.12  public:
   96.13    // Allocate right next to the MachNodes in the same arena
   96.14 -  void *operator new( size_t x, Compile* C ) { return C->node_arena()->Amalloc_D(x); }
   96.15 +  void *operator new( size_t x, Compile* C ) throw() { return C->node_arena()->Amalloc_D(x); }
   96.16  
   96.17    // Opcode
   96.18    virtual uint opcode() const = 0;
    97.1 --- a/src/share/vm/opto/macro.cpp	Fri Sep 06 09:55:38 2013 +0100
    97.2 +++ b/src/share/vm/opto/macro.cpp	Sat Sep 14 20:40:34 2013 +0100
    97.3 @@ -72,6 +72,8 @@
    97.4    int jvms_adj  = new_dbg_start - old_dbg_start;
    97.5    assert (new_dbg_start == newcall->req(), "argument count mismatch");
    97.6  
    97.7 +  // SafePointScalarObject node could be referenced several times in debug info.
    97.8 +  // Use Dict to record cloned nodes.
    97.9    Dict* sosn_map = new Dict(cmpkey,hashkey);
   97.10    for (uint i = old_dbg_start; i < oldcall->req(); i++) {
   97.11      Node* old_in = oldcall->in(i);
   97.12 @@ -79,8 +81,8 @@
   97.13      if (old_in != NULL && old_in->is_SafePointScalarObject()) {
   97.14        SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject();
   97.15        uint old_unique = C->unique();
   97.16 -      Node* new_in = old_sosn->clone(jvms_adj, sosn_map);
   97.17 -      if (old_unique != C->unique()) {
   97.18 +      Node* new_in = old_sosn->clone(sosn_map);
   97.19 +      if (old_unique != C->unique()) { // New node?
   97.20          new_in->set_req(0, C->root()); // reset control edge
   97.21          new_in = transform_later(new_in); // Register new node.
   97.22        }
   97.23 @@ -725,7 +727,11 @@
   97.24    while (safepoints.length() > 0) {
   97.25      SafePointNode* sfpt = safepoints.pop();
   97.26      Node* mem = sfpt->memory();
   97.27 -    uint first_ind = sfpt->req();
   97.28 +    assert(sfpt->jvms() != NULL, "missed JVMS");
   97.29 +    // Fields of scalar objs are referenced only at the end
   97.30 +    // of regular debuginfo at the last (youngest) JVMS.
   97.31 +    // Record relative start index.
   97.32 +    uint first_ind = (sfpt->req() - sfpt->jvms()->scloff());
   97.33      SafePointScalarObjectNode* sobj = new (C) SafePointScalarObjectNode(res_type,
   97.34  #ifdef ASSERT
   97.35                                                   alloc,
   97.36 @@ -799,7 +805,7 @@
   97.37            for (int i = start; i < end; i++) {
   97.38              if (sfpt_done->in(i)->is_SafePointScalarObject()) {
   97.39                SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject();
   97.40 -              if (scobj->first_index() == sfpt_done->req() &&
   97.41 +              if (scobj->first_index(jvms) == sfpt_done->req() &&
   97.42                    scobj->n_fields() == (uint)nfields) {
   97.43                  assert(scobj->alloc() == alloc, "sanity");
   97.44                  sfpt_done->set_req(i, res);
    98.1 --- a/src/share/vm/opto/node.cpp	Fri Sep 06 09:55:38 2013 +0100
    98.2 +++ b/src/share/vm/opto/node.cpp	Sat Sep 14 20:40:34 2013 +0100
    98.3 @@ -773,6 +773,21 @@
    98.4    _in[_cnt] = NULL;       // NULL out emptied slot
    98.5  }
    98.6  
    98.7 +//------------------------------del_req_ordered--------------------------------
    98.8 +// Delete the required edge and compact the edge array with preserved order
    98.9 +void Node::del_req_ordered( uint idx ) {
   98.10 +  assert( idx < _cnt, "oob");
   98.11 +  assert( !VerifyHashTableKeys || _hash_lock == 0,
   98.12 +          "remove node from hash table before modifying it");
   98.13 +  // First remove corresponding def-use edge
   98.14 +  Node *n = in(idx);
   98.15 +  if (n != NULL) n->del_out((Node *)this);
   98.16 +  if (idx < _cnt - 1) { // Not last edge ?
   98.17 +    Copy::conjoint_words_to_lower((HeapWord*)&_in[idx+1], (HeapWord*)&_in[idx], ((_cnt-idx-1)*sizeof(Node*)));
   98.18 +  }
   98.19 +  _in[--_cnt] = NULL;   // NULL out emptied slot
   98.20 +}
   98.21 +
   98.22  //------------------------------ins_req----------------------------------------
   98.23  // Insert a new required input at the end
   98.24  void Node::ins_req( uint idx, Node *n ) {
    99.1 --- a/src/share/vm/opto/node.hpp	Fri Sep 06 09:55:38 2013 +0100
    99.2 +++ b/src/share/vm/opto/node.hpp	Sat Sep 14 20:40:34 2013 +0100
    99.3 @@ -1,5 +1,5 @@
    99.4  /*
    99.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    99.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    99.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    99.8   *
    99.9   * This code is free software; you can redistribute it and/or modify it
   99.10 @@ -211,7 +211,7 @@
   99.11  
   99.12    // New Operator that takes a Compile pointer, this will eventually
   99.13    // be the "new" New operator.
   99.14 -  inline void* operator new( size_t x, Compile* C) {
   99.15 +  inline void* operator new( size_t x, Compile* C) throw() {
   99.16      Node* n = (Node*)C->node_arena()->Amalloc_D(x);
   99.17  #ifdef ASSERT
   99.18      n->_in = (Node**)n; // magic cookie for assertion check
   99.19 @@ -384,6 +384,7 @@
   99.20    void add_req( Node *n ); // Append a NEW required input
   99.21    void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
   99.22    void del_req( uint idx ); // Delete required edge & compact
   99.23 +  void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
   99.24    void ins_req( uint i, Node *n ); // Insert a NEW required input
   99.25    void set_req( uint i, Node *n ) {
   99.26      assert( is_not_dead(n), "can not use dead node");
   100.1 --- a/src/share/vm/opto/output.cpp	Fri Sep 06 09:55:38 2013 +0100
   100.2 +++ b/src/share/vm/opto/output.cpp	Sat Sep 14 20:40:34 2013 +0100
   100.3 @@ -57,7 +57,7 @@
   100.4  // Convert Nodes to instruction bits and pass off to the VM
   100.5  void Compile::Output() {
   100.6    // RootNode goes
   100.7 -  assert( _cfg->get_root_block()->_nodes.size() == 0, "" );
   100.8 +  assert( _cfg->get_root_block()->number_of_nodes() == 0, "" );
   100.9  
  100.10    // The number of new nodes (mostly MachNop) is proportional to
  100.11    // the number of java calls and inner loops which are aligned.
  100.12 @@ -70,11 +70,11 @@
  100.13    Block *entry = _cfg->get_block(1);
  100.14    Block *broot = _cfg->get_root_block();
  100.15  
  100.16 -  const StartNode *start = entry->_nodes[0]->as_Start();
  100.17 +  const StartNode *start = entry->head()->as_Start();
  100.18  
  100.19    // Replace StartNode with prolog
  100.20    MachPrologNode *prolog = new (this) MachPrologNode();
  100.21 -  entry->_nodes.map( 0, prolog );
  100.22 +  entry->map_node(prolog, 0);
  100.23    _cfg->map_node_to_block(prolog, entry);
  100.24    _cfg->unmap_node_from_block(start); // start is no longer in any block
  100.25  
  100.26 @@ -144,8 +144,8 @@
  100.27      for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
  100.28        tty->print("\nBB#%03d:\n", i);
  100.29        Block* block = _cfg->get_block(i);
  100.30 -      for (uint j = 0; j < block->_nodes.size(); j++) {
  100.31 -        Node* n = block->_nodes[j];
  100.32 +      for (uint j = 0; j < block->number_of_nodes(); j++) {
  100.33 +        Node* n = block->get_node(j);
  100.34          OptoReg::Name reg = _regalloc->get_reg_first(n);
  100.35          tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
  100.36          n->dump();
  100.37 @@ -226,8 +226,8 @@
  100.38    // Insert call to zap runtime stub before every node with an oop map
  100.39    for( uint i=0; i<_cfg->number_of_blocks(); i++ ) {
  100.40      Block *b = _cfg->get_block(i);
  100.41 -    for ( uint j = 0;  j < b->_nodes.size();  ++j ) {
  100.42 -      Node *n = b->_nodes[j];
  100.43 +    for ( uint j = 0;  j < b->number_of_nodes();  ++j ) {
  100.44 +      Node *n = b->get_node(j);
  100.45  
  100.46        // Determining if we should insert a zap-a-lot node in output.
  100.47        // We do that for all nodes that has oopmap info, except for calls
  100.48 @@ -256,7 +256,7 @@
  100.49          }
  100.50          if (insert) {
  100.51            Node *zap = call_zap_node(n->as_MachSafePoint(), i);
  100.52 -          b->_nodes.insert( j, zap );
  100.53 +          b->insert_node(zap, j);
  100.54            _cfg->map_node_to_block(zap, b);
  100.55            ++j;
  100.56          }
  100.57 @@ -379,10 +379,10 @@
  100.58      DEBUG_ONLY( jmp_rule[i]   = 0; )
  100.59  
  100.60      // Sum all instruction sizes to compute block size
  100.61 -    uint last_inst = block->_nodes.size();
  100.62 +    uint last_inst = block->number_of_nodes();
  100.63      uint blk_size = 0;
  100.64      for (uint j = 0; j < last_inst; j++) {
  100.65 -      Node* nj = block->_nodes[j];
  100.66 +      Node* nj = block->get_node(j);
  100.67        // Handle machine instruction nodes
  100.68        if (nj->is_Mach()) {
  100.69          MachNode *mach = nj->as_Mach();
  100.70 @@ -477,18 +477,18 @@
  100.71      for (uint i = 0; i < nblocks; i++) {
  100.72        Block* block = _cfg->get_block(i);
  100.73        int idx = jmp_nidx[i];
  100.74 -      MachNode* mach = (idx == -1) ? NULL: block->_nodes[idx]->as_Mach();
  100.75 +      MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach();
  100.76        if (mach != NULL && mach->may_be_short_branch()) {
  100.77  #ifdef ASSERT
  100.78          assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity");
  100.79          int j;
  100.80          // Find the branch; ignore trailing NOPs.
  100.81 -        for (j = block->_nodes.size()-1; j>=0; j--) {
  100.82 -          Node* n = block->_nodes[j];
  100.83 +        for (j = block->number_of_nodes()-1; j>=0; j--) {
  100.84 +          Node* n = block->get_node(j);
  100.85            if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con)
  100.86              break;
  100.87          }
  100.88 -        assert(j >= 0 && j == idx && block->_nodes[j] == (Node*)mach, "sanity");
  100.89 +        assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity");
  100.90  #endif
  100.91          int br_size = jmp_size[i];
  100.92          int br_offs = blk_starts[i] + jmp_offset[i];
  100.93 @@ -522,7 +522,7 @@
  100.94              diff -= nop_size;
  100.95            }
  100.96            adjust_block_start += diff;
  100.97 -          block->_nodes.map(idx, replacement);
  100.98 +          block->map_node(replacement, idx);
  100.99            mach->subsume_by(replacement, C);
 100.100            mach = replacement;
 100.101            progress = true;
 100.102 @@ -639,7 +639,7 @@
 100.103                             new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 100.104        Compile::set_sv_for_object_node(objs, sv);
 100.105  
 100.106 -      uint first_ind = spobj->first_index();
 100.107 +      uint first_ind = spobj->first_index(sfpt->jvms());
 100.108        for (uint i = 0; i < spobj->n_fields(); i++) {
 100.109          Node* fld_node = sfpt->in(first_ind+i);
 100.110          (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
 100.111 @@ -894,7 +894,7 @@
 100.112      GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
 100.113  
 100.114      // Loop over monitors and insert into array
 100.115 -    for(idx = 0; idx < num_mon; idx++) {
 100.116 +    for (idx = 0; idx < num_mon; idx++) {
 100.117        // Grab the node that defines this monitor
 100.118        Node* box_node = sfn->monitor_box(jvms, idx);
 100.119        Node* obj_node = sfn->monitor_obj(jvms, idx);
 100.120 @@ -902,11 +902,11 @@
 100.121        // Create ScopeValue for object
 100.122        ScopeValue *scval = NULL;
 100.123  
 100.124 -      if( obj_node->is_SafePointScalarObject() ) {
 100.125 +      if (obj_node->is_SafePointScalarObject()) {
 100.126          SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
 100.127          scval = Compile::sv_for_node_id(objs, spobj->_idx);
 100.128          if (scval == NULL) {
 100.129 -          const Type *t = obj_node->bottom_type();
 100.130 +          const Type *t = spobj->bottom_type();
 100.131            ciKlass* cik = t->is_oopptr()->klass();
 100.132            assert(cik->is_instance_klass() ||
 100.133                   cik->is_array_klass(), "Not supported allocation.");
 100.134 @@ -914,14 +914,14 @@
 100.135                                              new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
 100.136            Compile::set_sv_for_object_node(objs, sv);
 100.137  
 100.138 -          uint first_ind = spobj->first_index();
 100.139 +          uint first_ind = spobj->first_index(youngest_jvms);
 100.140            for (uint i = 0; i < spobj->n_fields(); i++) {
 100.141              Node* fld_node = sfn->in(first_ind+i);
 100.142              (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
 100.143            }
 100.144            scval = sv;
 100.145          }
 100.146 -      } else if( !obj_node->is_Con() ) {
 100.147 +      } else if (!obj_node->is_Con()) {
 100.148          OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
 100.149          if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
 100.150            scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
 100.151 @@ -1088,8 +1088,8 @@
 100.152      for (uint i = 0; i < _cfg->number_of_blocks(); i++) {
 100.153        Block* b = _cfg->get_block(i);
 100.154  
 100.155 -      for (uint j = 0; j < b->_nodes.size(); j++) {
 100.156 -        Node* n = b->_nodes[j];
 100.157 +      for (uint j = 0; j < b->number_of_nodes(); j++) {
 100.158 +        Node* n = b->get_node(j);
 100.159  
 100.160          // If the node is a MachConstantNode evaluate the constant
 100.161          // value section.
 100.162 @@ -1247,14 +1247,14 @@
 100.163      // Define the label at the beginning of the basic block
 100.164      MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
 100.165  
 100.166 -    uint last_inst = block->_nodes.size();
 100.167 +    uint last_inst = block->number_of_nodes();
 100.168  
 100.169      // Emit block normally, except for last instruction.
 100.170      // Emit means "dump code bits into code buffer".
 100.171      for (uint j = 0; j<last_inst; j++) {
 100.172  
 100.173        // Get the node
 100.174 -      Node* n = block->_nodes[j];
 100.175 +      Node* n = block->get_node(j);
 100.176  
 100.177        // See if delay slots are supported
 100.178        if (valid_bundle_info(n) &&
 100.179 @@ -1308,7 +1308,7 @@
 100.180            assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
 100.181            int nops_cnt = padding / nop_size;
 100.182            MachNode *nop = new (this) MachNopNode(nops_cnt);
 100.183 -          block->_nodes.insert(j++, nop);
 100.184 +          block->insert_node(nop, j++);
 100.185            last_inst++;
 100.186            _cfg->map_node_to_block(nop, block);
 100.187            nop->emit(*cb, _regalloc);
 100.188 @@ -1394,7 +1394,7 @@
 100.189                // Insert padding between avoid_back_to_back branches.
 100.190                if (needs_padding && replacement->avoid_back_to_back()) {
 100.191                  MachNode *nop = new (this) MachNopNode();
 100.192 -                block->_nodes.insert(j++, nop);
 100.193 +                block->insert_node(nop, j++);
 100.194                  _cfg->map_node_to_block(nop, block);
 100.195                  last_inst++;
 100.196                  nop->emit(*cb, _regalloc);
 100.197 @@ -1407,7 +1407,7 @@
 100.198                jmp_size[i]   = new_size;
 100.199                jmp_rule[i]   = mach->rule();
 100.200  #endif
 100.201 -              block->_nodes.map(j, replacement);
 100.202 +              block->map_node(replacement, j);
 100.203                mach->subsume_by(replacement, C);
 100.204                n    = replacement;
 100.205                mach = replacement;
 100.206 @@ -1438,7 +1438,7 @@
 100.207              count++;
 100.208              uint i4;
 100.209              for (i4 = 0; i4 < last_inst; ++i4) {
 100.210 -              if (block->_nodes[i4] == oop_store) {
 100.211 +              if (block->get_node(i4) == oop_store) {
 100.212                  break;
 100.213                }
 100.214              }
 100.215 @@ -1548,7 +1548,7 @@
 100.216        int padding = nb->alignment_padding(current_offset);
 100.217        if( padding > 0 ) {
 100.218          MachNode *nop = new (this) MachNopNode(padding / nop_size);
 100.219 -        block->_nodes.insert(block->_nodes.size(), nop);
 100.220 +        block->insert_node(nop, block->number_of_nodes());
 100.221          _cfg->map_node_to_block(nop, block);
 100.222          nop->emit(*cb, _regalloc);
 100.223          current_offset = cb->insts_size();
 100.224 @@ -1655,8 +1655,8 @@
 100.225      int j;
 100.226  
 100.227      // Find the branch; ignore trailing NOPs.
 100.228 -    for (j = block->_nodes.size() - 1; j >= 0; j--) {
 100.229 -      n = block->_nodes[j];
 100.230 +    for (j = block->number_of_nodes() - 1; j >= 0; j--) {
 100.231 +      n = block->get_node(j);
 100.232        if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) {
 100.233          break;
 100.234        }
 100.235 @@ -1675,8 +1675,8 @@
 100.236        uint call_return = call_returns[block->_pre_order];
 100.237  #ifdef ASSERT
 100.238        assert( call_return > 0, "no call seen for this basic block" );
 100.239 -      while (block->_nodes[--j]->is_MachProj()) ;
 100.240 -      assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
 100.241 +      while (block->get_node(--j)->is_MachProj()) ;
 100.242 +      assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
 100.243  #endif
 100.244        // last instruction is a CatchNode, find it's CatchProjNodes
 100.245        int nof_succs = block->_num_succs;
 100.246 @@ -1782,7 +1782,7 @@
 100.247    // Get the last node
 100.248    Block* block = _cfg->get_block(_cfg->number_of_blocks() - 1);
 100.249  
 100.250 -  _next_node = block->_nodes[block->_nodes.size() - 1];
 100.251 +  _next_node = block->get_node(block->number_of_nodes() - 1);
 100.252  }
 100.253  
 100.254  #ifndef PRODUCT
 100.255 @@ -1875,7 +1875,7 @@
 100.256      // Used to allow latency 0 to force an instruction to the beginning
 100.257      // of the bb
 100.258      uint latency = 1;
 100.259 -    Node *use = bb->_nodes[j];
 100.260 +    Node *use = bb->get_node(j);
 100.261      uint nlen = use->len();
 100.262  
 100.263      // Walk over all the inputs
 100.264 @@ -2286,7 +2286,7 @@
 100.265         (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
 100.266  
 100.267      // Push any trailing projections
 100.268 -    if( bb->_nodes[bb->_nodes.size()-1] != n ) {
 100.269 +    if( bb->get_node(bb->number_of_nodes()-1) != n ) {
 100.270        for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 100.271          Node *foi = n->fast_out(i);
 100.272          if( foi->is_Proj() )
 100.273 @@ -2329,21 +2329,21 @@
 100.274    _unconditional_delay_slot = NULL;
 100.275  
 100.276  #ifdef ASSERT
 100.277 -  for( uint i=0; i < bb->_nodes.size(); i++ )
 100.278 -    assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" );
 100.279 +  for( uint i=0; i < bb->number_of_nodes(); i++ )
 100.280 +    assert( _uses[bb->get_node(i)->_idx] == 0, "_use array not clean" );
 100.281  #endif
 100.282  
 100.283    // Force the _uses count to never go to zero for unscheduable pieces
 100.284    // of the block
 100.285    for( uint k = 0; k < _bb_start; k++ )
 100.286 -    _uses[bb->_nodes[k]->_idx] = 1;
 100.287 -  for( uint l = _bb_end; l < bb->_nodes.size(); l++ )
 100.288 -    _uses[bb->_nodes[l]->_idx] = 1;
 100.289 +    _uses[bb->get_node(k)->_idx] = 1;
 100.290 +  for( uint l = _bb_end; l < bb->number_of_nodes(); l++ )
 100.291 +    _uses[bb->get_node(l)->_idx] = 1;
 100.292  
 100.293    // Iterate backwards over the instructions in the block.  Don't count the
 100.294    // branch projections at end or the block header instructions.
 100.295    for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
 100.296 -    Node *n = bb->_nodes[j];
 100.297 +    Node *n = bb->get_node(j);
 100.298      if( n->is_Proj() ) continue; // Projections handled another way
 100.299  
 100.300      // Account for all uses
 100.301 @@ -2398,8 +2398,8 @@
 100.302  #ifndef PRODUCT
 100.303      if (_cfg->C->trace_opto_output()) {
 100.304        tty->print("#  Schedule BB#%03d (initial)\n", i);
 100.305 -      for (uint j = 0; j < bb->_nodes.size(); j++) {
 100.306 -        bb->_nodes[j]->dump();
 100.307 +      for (uint j = 0; j < bb->number_of_nodes(); j++) {
 100.308 +        bb->get_node(j)->dump();
 100.309        }
 100.310      }
 100.311  #endif
 100.312 @@ -2426,10 +2426,10 @@
 100.313      }
 100.314  
 100.315      // Leave untouched the starting instruction, any Phis, a CreateEx node
 100.316 -    // or Top.  bb->_nodes[_bb_start] is the first schedulable instruction.
 100.317 -    _bb_end = bb->_nodes.size()-1;
 100.318 +    // or Top.  bb->get_node(_bb_start) is the first schedulable instruction.
 100.319 +    _bb_end = bb->number_of_nodes()-1;
 100.320      for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
 100.321 -      Node *n = bb->_nodes[_bb_start];
 100.322 +      Node *n = bb->get_node(_bb_start);
 100.323        // Things not matched, like Phinodes and ProjNodes don't get scheduled.
 100.324        // Also, MachIdealNodes do not get scheduled
 100.325        if( !n->is_Mach() ) continue;     // Skip non-machine nodes
 100.326 @@ -2449,19 +2449,19 @@
 100.327      // in the block), because they have delay slots we can fill.  Calls all
 100.328      // have their delay slots filled in the template expansions, so we don't
 100.329      // bother scheduling them.
 100.330 -    Node *last = bb->_nodes[_bb_end];
 100.331 +    Node *last = bb->get_node(_bb_end);
 100.332      // Ignore trailing NOPs.
 100.333      while (_bb_end > 0 && last->is_Mach() &&
 100.334             last->as_Mach()->ideal_Opcode() == Op_Con) {
 100.335 -      last = bb->_nodes[--_bb_end];
 100.336 +      last = bb->get_node(--_bb_end);
 100.337      }
 100.338      assert(!last->is_Mach() || last->as_Mach()->ideal_Opcode() != Op_Con, "");
 100.339      if( last->is_Catch() ||
 100.340         // Exclude unreachable path case when Halt node is in a separate block.
 100.341         (_bb_end > 1 && last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
 100.342        // There must be a prior call.  Skip it.
 100.343 -      while( !bb->_nodes[--_bb_end]->is_MachCall() ) {
 100.344 -        assert( bb->_nodes[_bb_end]->is_MachProj(), "skipping projections after expected call" );
 100.345 +      while( !bb->get_node(--_bb_end)->is_MachCall() ) {
 100.346 +        assert( bb->get_node(_bb_end)->is_MachProj(), "skipping projections after expected call" );
 100.347        }
 100.348      } else if( last->is_MachNullCheck() ) {
 100.349        // Backup so the last null-checked memory instruction is
 100.350 @@ -2470,7 +2470,7 @@
 100.351        Node *mem = last->in(1);
 100.352        do {
 100.353          _bb_end--;
 100.354 -      } while (mem != bb->_nodes[_bb_end]);
 100.355 +      } while (mem != bb->get_node(_bb_end));
 100.356      } else {
 100.357        // Set _bb_end to point after last schedulable inst.
 100.358        _bb_end++;
 100.359 @@ -2499,7 +2499,7 @@
 100.360      assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
 100.361  #ifdef ASSERT
 100.362      for( uint l = _bb_start; l < _bb_end; l++ ) {
 100.363 -      Node *n = bb->_nodes[l];
 100.364 +      Node *n = bb->get_node(l);
 100.365        uint m;
 100.366        for( m = 0; m < _bb_end-_bb_start; m++ )
 100.367          if( _scheduled[m] == n )
 100.368 @@ -2510,14 +2510,14 @@
 100.369  
 100.370      // Now copy the instructions (in reverse order) back to the block
 100.371      for ( uint k = _bb_start; k < _bb_end; k++ )
 100.372 -      bb->_nodes.map(k, _scheduled[_bb_end-k-1]);
 100.373 +      bb->map_node(_scheduled[_bb_end-k-1], k);
 100.374  
 100.375  #ifndef PRODUCT
 100.376      if (_cfg->C->trace_opto_output()) {
 100.377        tty->print("#  Schedule BB#%03d (final)\n", i);
 100.378        uint current = 0;
 100.379 -      for (uint j = 0; j < bb->_nodes.size(); j++) {
 100.380 -        Node *n = bb->_nodes[j];
 100.381 +      for (uint j = 0; j < bb->number_of_nodes(); j++) {
 100.382 +        Node *n = bb->get_node(j);
 100.383          if( valid_bundle_info(n) ) {
 100.384            Bundle *bundle = node_bundling(n);
 100.385            if (bundle->instr_count() > 0 || bundle->flags() > 0) {
 100.386 @@ -2579,8 +2579,8 @@
 100.387    // Walk over the block backwards.  Check to make sure each DEF doesn't
 100.388    // kill a live value (other than the one it's supposed to).  Add each
 100.389    // USE to the live set.
 100.390 -  for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) {
 100.391 -    Node *n = b->_nodes[i];
 100.392 +  for( uint i = b->number_of_nodes()-1; i >= _bb_start; i-- ) {
 100.393 +    Node *n = b->get_node(i);
 100.394      int n_op = n->Opcode();
 100.395      if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
 100.396        // Fat-proj kills a slew of registers
 100.397 @@ -2711,7 +2711,7 @@
 100.398          pinch->req() == 1 ) {   // pinch not yet in block?
 100.399        pinch->del_req(0);        // yank pointer to later-def, also set flag
 100.400        // Insert the pinch-point in the block just after the last use
 100.401 -      b->_nodes.insert(b->find_node(use)+1,pinch);
 100.402 +      b->insert_node(pinch, b->find_node(use) + 1);
 100.403        _bb_end++;                // Increase size scheduled region in block
 100.404      }
 100.405  
 100.406 @@ -2763,10 +2763,10 @@
 100.407    // it being in the current block.
 100.408    bool fat_proj_seen = false;
 100.409    uint last_safept = _bb_end-1;
 100.410 -  Node* end_node         = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
 100.411 +  Node* end_node         = (_bb_end-1 >= _bb_start) ? b->get_node(last_safept) : NULL;
 100.412    Node* last_safept_node = end_node;
 100.413    for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
 100.414 -    Node *n = b->_nodes[i];
 100.415 +    Node *n = b->get_node(i);
 100.416      int is_def = n->outcnt();   // def if some uses prior to adding precedence edges
 100.417      if( n->is_MachProj() && n->ideal_reg() == MachProjNode::fat_proj ) {
 100.418        // Fat-proj kills a slew of registers
 100.419 @@ -2815,7 +2815,7 @@
 100.420      // Do not allow defs of new derived values to float above GC
 100.421      // points unless the base is definitely available at the GC point.
 100.422  
 100.423 -    Node *m = b->_nodes[i];
 100.424 +    Node *m = b->get_node(i);
 100.425  
 100.426      // Add precedence edge from following safepoint to use of derived pointer
 100.427      if( last_safept_node != end_node &&
 100.428 @@ -2832,11 +2832,11 @@
 100.429  
 100.430      if( n->jvms() ) {           // Precedence edge from derived to safept
 100.431        // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
 100.432 -      if( b->_nodes[last_safept] != last_safept_node ) {
 100.433 +      if( b->get_node(last_safept) != last_safept_node ) {
 100.434          last_safept = b->find_node(last_safept_node);
 100.435        }
 100.436        for( uint j=last_safept; j > i; j-- ) {
 100.437 -        Node *mach = b->_nodes[j];
 100.438 +        Node *mach = b->get_node(j);
 100.439          if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
 100.440            mach->add_prec( n );
 100.441        }
   101.1 --- a/src/share/vm/opto/phaseX.cpp	Fri Sep 06 09:55:38 2013 +0100
   101.2 +++ b/src/share/vm/opto/phaseX.cpp	Sat Sep 14 20:40:34 2013 +0100
   101.3 @@ -1648,10 +1648,10 @@
   101.4      bool block_not_printed = true;
   101.5  
   101.6      // and each instruction within a block
   101.7 -    uint end_index = block->_nodes.size();
   101.8 +    uint end_index = block->number_of_nodes();
   101.9      // block->end_idx() not valid after PhaseRegAlloc
  101.10      for( uint instruction_index = 1; instruction_index < end_index; ++instruction_index ) {
  101.11 -      Node     *n = block->_nodes.at(instruction_index);
  101.12 +      Node     *n = block->get_node(instruction_index);
  101.13        if( n->is_Mach() ) {
  101.14          MachNode *m = n->as_Mach();
  101.15          int deleted_count = 0;
  101.16 @@ -1673,7 +1673,7 @@
  101.17              }
  101.18              // Print instructions being deleted
  101.19              for( int i = (deleted_count - 1); i >= 0; --i ) {
  101.20 -              block->_nodes.at(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
  101.21 +              block->get_node(instruction_index-i)->as_Mach()->format(_regalloc); tty->cr();
  101.22              }
  101.23              tty->print_cr("replaced with");
  101.24              // Print new instruction
  101.25 @@ -1687,11 +1687,11 @@
  101.26            //  the node index to live range mappings.)
  101.27            uint safe_instruction_index = (instruction_index - deleted_count);
  101.28            for( ; (instruction_index > safe_instruction_index); --instruction_index ) {
  101.29 -            block->_nodes.remove( instruction_index );
  101.30 +            block->remove_node( instruction_index );
  101.31            }
  101.32            // install new node after safe_instruction_index
  101.33 -          block->_nodes.insert( safe_instruction_index + 1, m2 );
  101.34 -          end_index = block->_nodes.size() - 1; // Recompute new block size
  101.35 +          block->insert_node(m2, safe_instruction_index + 1);
  101.36 +          end_index = block->number_of_nodes() - 1; // Recompute new block size
  101.37            NOT_PRODUCT( inc_peepholes(); )
  101.38          }
  101.39        }
   102.1 --- a/src/share/vm/opto/postaloc.cpp	Fri Sep 06 09:55:38 2013 +0100
   102.2 +++ b/src/share/vm/opto/postaloc.cpp	Sat Sep 14 20:40:34 2013 +0100
   102.3 @@ -423,8 +423,8 @@
   102.4  
   102.5      // Count of Phis in block
   102.6      uint phi_dex;
   102.7 -    for (phi_dex = 1; phi_dex < block->_nodes.size(); phi_dex++) {
   102.8 -      Node* phi = block->_nodes[phi_dex];
   102.9 +    for (phi_dex = 1; phi_dex < block->number_of_nodes(); phi_dex++) {
  102.10 +      Node* phi = block->get_node(phi_dex);
  102.11        if (!phi->is_Phi()) {
  102.12          break;
  102.13        }
  102.14 @@ -439,7 +439,7 @@
  102.15        Block* pb = _cfg.get_block_for_node(block->pred(j));
  102.16        // Remove copies along phi edges
  102.17        for (uint k = 1; k < phi_dex; k++) {
  102.18 -        elide_copy(block->_nodes[k], j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
  102.19 +        elide_copy(block->get_node(k), j, block, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false);
  102.20        }
  102.21        if (blk2value[pb->_pre_order]) { // Have a mapping on this edge?
  102.22          // See if this predecessor's mappings have been used by everybody
  102.23 @@ -510,7 +510,7 @@
  102.24      // For all Phi's
  102.25      for (j = 1; j < phi_dex; j++) {
  102.26        uint k;
  102.27 -      Node *phi = block->_nodes[j];
  102.28 +      Node *phi = block->get_node(j);
  102.29        uint pidx = _lrg_map.live_range_id(phi);
  102.30        OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg();
  102.31  
  102.32 @@ -522,7 +522,7 @@
  102.33            u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
  102.34        }
  102.35        if (u != NodeSentinel) {    // Junk Phi.  Remove
  102.36 -        block->_nodes.remove(j--);
  102.37 +        block->remove_node(j--);
  102.38          phi_dex--;
  102.39          _cfg.unmap_node_from_block(phi);
  102.40          phi->replace_by(u);
  102.41 @@ -552,8 +552,8 @@
  102.42      }
  102.43  
  102.44      // For all remaining instructions
  102.45 -    for (j = phi_dex; j < block->_nodes.size(); j++) {
  102.46 -      Node* n = block->_nodes[j];
  102.47 +    for (j = phi_dex; j < block->number_of_nodes(); j++) {
  102.48 +      Node* n = block->get_node(j);
  102.49  
  102.50        if(n->outcnt() == 0 &&   // Dead?
  102.51           n != C->top() &&      // (ignore TOP, it has no du info)
   103.1 --- a/src/share/vm/opto/reg_split.cpp	Fri Sep 06 09:55:38 2013 +0100
   103.2 +++ b/src/share/vm/opto/reg_split.cpp	Sat Sep 14 20:40:34 2013 +0100
   103.3 @@ -112,17 +112,17 @@
   103.4  void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) {
   103.5    // Skip intervening ProjNodes.  Do not insert between a ProjNode and
   103.6    // its definer.
   103.7 -  while( i < b->_nodes.size() &&
   103.8 -         (b->_nodes[i]->is_Proj() ||
   103.9 -          b->_nodes[i]->is_Phi() ) )
  103.10 +  while( i < b->number_of_nodes() &&
  103.11 +         (b->get_node(i)->is_Proj() ||
  103.12 +          b->get_node(i)->is_Phi() ) )
  103.13      i++;
  103.14  
  103.15    // Do not insert between a call and his Catch
  103.16 -  if( b->_nodes[i]->is_Catch() ) {
  103.17 +  if( b->get_node(i)->is_Catch() ) {
  103.18      // Put the instruction at the top of the fall-thru block.
  103.19      // Find the fall-thru projection
  103.20      while( 1 ) {
  103.21 -      const CatchProjNode *cp = b->_nodes[++i]->as_CatchProj();
  103.22 +      const CatchProjNode *cp = b->get_node(++i)->as_CatchProj();
  103.23        if( cp->_con == CatchProjNode::fall_through_index )
  103.24          break;
  103.25      }
  103.26 @@ -131,7 +131,7 @@
  103.27      i = 1;                      // Right at start of block
  103.28    }
  103.29  
  103.30 -  b->_nodes.insert(i,spill);    // Insert node in block
  103.31 +  b->insert_node(spill, i);    // Insert node in block
  103.32    _cfg.map_node_to_block(spill,  b); // Update node->block mapping to reflect
  103.33    // Adjust the point where we go hi-pressure
  103.34    if( i <= b->_ihrp_index ) b->_ihrp_index++;
  103.35 @@ -160,9 +160,9 @@
  103.36    // (The implicit_null_check function ensures the use is also dominated
  103.37    // by the branch-not-taken block.)
  103.38    Node *be = b->end();
  103.39 -  if( be->is_MachNullCheck() && be->in(1) == def && def == b->_nodes[loc] ) {
  103.40 +  if( be->is_MachNullCheck() && be->in(1) == def && def == b->get_node(loc)) {
  103.41      // Spill goes in the branch-not-taken block
  103.42 -    b = b->_succs[b->_nodes[b->end_idx()+1]->Opcode() == Op_IfTrue];
  103.43 +    b = b->_succs[b->get_node(b->end_idx()+1)->Opcode() == Op_IfTrue];
  103.44      loc = 0;                    // Just past the Region
  103.45    }
  103.46    assert( loc >= 0, "must insert past block head" );
  103.47 @@ -450,7 +450,7 @@
  103.48  
  103.49    // Scan block for 1st use.
  103.50    for( uint i = 1; i <= b->end_idx(); i++ ) {
  103.51 -    Node *n = b->_nodes[i];
  103.52 +    Node *n = b->get_node(i);
  103.53      // Ignore PHI use, these can be up or down
  103.54      if (n->is_Phi()) {
  103.55        continue;
  103.56 @@ -647,7 +647,7 @@
  103.57  
  103.58        // check block for appropriate phinode & update edges
  103.59        for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
  103.60 -        n1 = b->_nodes[insidx];
  103.61 +        n1 = b->get_node(insidx);
  103.62          // bail if this is not a phi
  103.63          phi = n1->is_Phi() ? n1->as_Phi() : NULL;
  103.64          if( phi == NULL ) {
  103.65 @@ -747,7 +747,7 @@
  103.66      //----------Walk Instructions in the Block and Split----------
  103.67      // For all non-phi instructions in the block
  103.68      for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
  103.69 -      Node *n = b->_nodes[insidx];
  103.70 +      Node *n = b->get_node(insidx);
  103.71        // Find the defining Node's live range index
  103.72        uint defidx = _lrg_map.find_id(n);
  103.73        uint cnt = n->req();
  103.74 @@ -776,7 +776,7 @@
  103.75                assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
  103.76                n->replace_by(u); // Then replace with unique input
  103.77                n->disconnect_inputs(NULL, C);
  103.78 -              b->_nodes.remove(insidx);
  103.79 +              b->remove_node(insidx);
  103.80                insidx--;
  103.81                b->_ihrp_index--;
  103.82                b->_fhrp_index--;
  103.83 @@ -789,12 +789,12 @@
  103.84                (b->_reg_pressure < (uint)INTPRESSURE) ||
  103.85                b->_ihrp_index > 4000000 ||
  103.86                b->_ihrp_index >= b->end_idx() ||
  103.87 -              !b->_nodes[b->_ihrp_index]->is_Proj(), "" );
  103.88 +              !b->get_node(b->_ihrp_index)->is_Proj(), "" );
  103.89        assert( insidx > b->_fhrp_index ||
  103.90                (b->_freg_pressure < (uint)FLOATPRESSURE) ||
  103.91                b->_fhrp_index > 4000000 ||
  103.92                b->_fhrp_index >= b->end_idx() ||
  103.93 -              !b->_nodes[b->_fhrp_index]->is_Proj(), "" );
  103.94 +              !b->get_node(b->_fhrp_index)->is_Proj(), "" );
  103.95  
  103.96        // ********** Handle Crossing HRP Boundry **********
  103.97        if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) {
  103.98 @@ -819,7 +819,7 @@
  103.99                  // Insert point is just past last use or def in the block
 103.100                  int insert_point = insidx-1;
 103.101                  while( insert_point > 0 ) {
 103.102 -                  Node *n = b->_nodes[insert_point];
 103.103 +                  Node *n = b->get_node(insert_point);
 103.104                    // Hit top of block?  Quit going backwards
 103.105                    if (n->is_Phi()) {
 103.106                      break;
 103.107 @@ -865,7 +865,7 @@
 103.108              }
 103.109            }  // end if LRG is UP
 103.110          }  // end for all spilling live ranges
 103.111 -        assert( b->_nodes[insidx] == n, "got insidx set incorrectly" );
 103.112 +        assert( b->get_node(insidx) == n, "got insidx set incorrectly" );
 103.113        }  // end if crossing HRP Boundry
 103.114  
 103.115        // If the LRG index is oob, then this is a new spillcopy, skip it.
 103.116 @@ -878,7 +878,7 @@
 103.117        if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
 103.118          n->replace_by( n->in(copyidx) );
 103.119          n->set_req( copyidx, NULL );
 103.120 -        b->_nodes.remove(insidx--);
 103.121 +        b->remove_node(insidx--);
 103.122          b->_ihrp_index--; // Adjust the point where we go hi-pressure
 103.123          b->_fhrp_index--;
 103.124          continue;
 103.125 @@ -932,10 +932,10 @@
 103.126              // Rematerializable?  Then clone def at use site instead
 103.127              // of store/load
 103.128              if( def->rematerialize() ) {
 103.129 -              int old_size = b->_nodes.size();
 103.130 +              int old_size = b->number_of_nodes();
 103.131                def = split_Rematerialize( def, b, insidx, maxlrg, splits, slidx, lrg2reach, Reachblock, true );
 103.132                if( !def ) return 0; // Bail out
 103.133 -              insidx += b->_nodes.size()-old_size;
 103.134 +              insidx += b->number_of_nodes()-old_size;
 103.135              }
 103.136  
 103.137              MachNode *mach = n->is_Mach() ? n->as_Mach() : NULL;
 103.138 @@ -1332,8 +1332,8 @@
 103.139          // so look at the node before it.
 103.140          int insert = pred->end_idx();
 103.141          while (insert >= 1 &&
 103.142 -               pred->_nodes[insert - 1]->is_SpillCopy() &&
 103.143 -               _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
 103.144 +               pred->get_node(insert - 1)->is_SpillCopy() &&
 103.145 +               _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
 103.146            insert--;
 103.147          }
 103.148          def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
 103.149 @@ -1402,7 +1402,7 @@
 103.150    for (bidx = 0; bidx < _cfg.number_of_blocks(); bidx++) {
 103.151      b  = _cfg.get_block(bidx);
 103.152      for (insidx = 0; insidx <= b->end_idx(); insidx++) {
 103.153 -      Node *n = b->_nodes[insidx];
 103.154 +      Node *n = b->get_node(insidx);
 103.155        uint defidx = _lrg_map.find(n);
 103.156        assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
 103.157        assert(defidx < maxlrg,"Bad live range index in Split");
   104.1 --- a/src/share/vm/opto/type.hpp	Fri Sep 06 09:55:38 2013 +0100
   104.2 +++ b/src/share/vm/opto/type.hpp	Sat Sep 14 20:40:34 2013 +0100
   104.3 @@ -1,5 +1,5 @@
   104.4  /*
   104.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   104.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   104.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   104.8   *
   104.9   * This code is free software; you can redistribute it and/or modify it
  104.10 @@ -169,7 +169,7 @@
  104.11  
  104.12  public:
  104.13  
  104.14 -  inline void* operator new( size_t x ) {
  104.15 +  inline void* operator new( size_t x ) throw() {
  104.16      Compile* compile = Compile::current();
  104.17      compile->set_type_last_size(x);
  104.18      void *temp = compile->type_arena()->Amalloc_D(x);
   105.1 --- a/src/share/vm/runtime/arguments.cpp	Fri Sep 06 09:55:38 2013 +0100
   105.2 +++ b/src/share/vm/runtime/arguments.cpp	Sat Sep 14 20:40:34 2013 +0100
   105.3 @@ -1605,17 +1605,6 @@
   105.4    return result;
   105.5  }
   105.6  
   105.7 -void Arguments::set_heap_base_min_address() {
   105.8 -  if (FLAG_IS_DEFAULT(HeapBaseMinAddress) && UseG1GC && HeapBaseMinAddress < 1*G) {
   105.9 -    // By default HeapBaseMinAddress is 2G on all platforms except Solaris x86.
  105.10 -    // G1 currently needs a lot of C-heap, so on Solaris we have to give G1
  105.11 -    // some extra space for the C-heap compared to other collectors.
  105.12 -    // Use FLAG_SET_DEFAULT here rather than FLAG_SET_ERGO to make sure that
  105.13 -    // code that checks for default values work correctly.
  105.14 -    FLAG_SET_DEFAULT(HeapBaseMinAddress, 1*G);
  105.15 -  }
  105.16 -}
  105.17 -
  105.18  void Arguments::set_heap_size() {
  105.19    if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
  105.20      // Deprecated flag
  105.21 @@ -2230,7 +2219,7 @@
  105.22    // among the distinct pages.
  105.23    if (ContendedPaddingWidth < 0 || ContendedPaddingWidth > 8192) {
  105.24      jio_fprintf(defaultStream::error_stream(),
  105.25 -                "ContendedPaddingWidth=" INTX_FORMAT " must be the between %d and %d\n",
  105.26 +                "ContendedPaddingWidth=" INTX_FORMAT " must be in between %d and %d\n",
  105.27                  ContendedPaddingWidth, 0, 8192);
  105.28      status = false;
  105.29    }
  105.30 @@ -2239,7 +2228,7 @@
  105.31    // It is sufficient to check against the largest type size.
  105.32    if ((ContendedPaddingWidth % BytesPerLong) != 0) {
  105.33      jio_fprintf(defaultStream::error_stream(),
  105.34 -                "ContendedPaddingWidth=" INTX_FORMAT " must be the multiple of %d\n",
  105.35 +                "ContendedPaddingWidth=" INTX_FORMAT " must be a multiple of %d\n",
  105.36                  ContendedPaddingWidth, BytesPerLong);
  105.37      status = false;
  105.38    }
  105.39 @@ -3537,8 +3526,6 @@
  105.40      }
  105.41    }
  105.42  
  105.43 -  set_heap_base_min_address();
  105.44 -
  105.45    // Set heap size based on available physical memory
  105.46    set_heap_size();
  105.47  
   106.1 --- a/src/share/vm/runtime/arguments.hpp	Fri Sep 06 09:55:38 2013 +0100
   106.2 +++ b/src/share/vm/runtime/arguments.hpp	Sat Sep 14 20:40:34 2013 +0100
   106.3 @@ -334,8 +334,6 @@
   106.4    // limits the given memory size by the maximum amount of memory this process is
   106.5    // currently allowed to allocate or reserve.
   106.6    static julong limit_by_allocatable_memory(julong size);
   106.7 -  // Setup HeapBaseMinAddress
   106.8 -  static void set_heap_base_min_address();
   106.9    // Setup heap size
  106.10    static void set_heap_size();
  106.11    // Based on automatic selection criteria, should the
   107.1 --- a/src/share/vm/runtime/fprofiler.cpp	Fri Sep 06 09:55:38 2013 +0100
   107.2 +++ b/src/share/vm/runtime/fprofiler.cpp	Sat Sep 14 20:40:34 2013 +0100
   107.3 @@ -264,7 +264,7 @@
   107.4  
   107.5   public:
   107.6  
   107.7 -  void* operator new(size_t size, ThreadProfiler* tp);
   107.8 +  void* operator new(size_t size, ThreadProfiler* tp) throw();
   107.9    void  operator delete(void* p);
  107.10  
  107.11    ProfilerNode() {
  107.12 @@ -373,7 +373,7 @@
  107.13    }
  107.14  };
  107.15  
  107.16 -void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp){
  107.17 +void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp) throw() {
  107.18    void* result = (void*) tp->area_top;
  107.19    tp->area_top += size;
  107.20  
  107.21 @@ -925,6 +925,8 @@
  107.22        FlatProfiler::interval_print();
  107.23        FlatProfiler::interval_reset();
  107.24      }
  107.25 +
  107.26 +    FREE_C_HEAP_ARRAY(JavaThread *, threadsList, mtInternal);
  107.27    } else {
  107.28      // Couldn't get the threads lock, just record that rather than blocking
  107.29      FlatProfiler::threads_lock_ticks += 1;
   108.1 --- a/src/share/vm/runtime/globals.cpp	Fri Sep 06 09:55:38 2013 +0100
   108.2 +++ b/src/share/vm/runtime/globals.cpp	Sat Sep 14 20:40:34 2013 +0100
   108.3 @@ -205,6 +205,7 @@
   108.4  
   108.5  #define C1_PRODUCT_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 product}", DEFAULT },
   108.6  #define C1_PD_PRODUCT_FLAG_STRUCT(type, name, doc)     { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 pd product}", DEFAULT },
   108.7 +#define C1_DIAGNOSTIC_FLAG_STRUCT(type, name, value, doc) { #type, XSTR(name), &name, NOT_PRODUCT_ARG(doc) "{C1 diagnostic}", DEFAULT },
   108.8  #ifdef PRODUCT
   108.9    #define C1_DEVELOP_FLAG_STRUCT(type, name, value, doc) /* flag is constant */
  108.10    #define C1_PD_DEVELOP_FLAG_STRUCT(type, name, doc)     /* flag is constant */
  108.11 @@ -260,7 +261,7 @@
  108.12   G1_FLAGS(RUNTIME_DEVELOP_FLAG_STRUCT, RUNTIME_PD_DEVELOP_FLAG_STRUCT, RUNTIME_PRODUCT_FLAG_STRUCT, RUNTIME_PD_PRODUCT_FLAG_STRUCT, RUNTIME_DIAGNOSTIC_FLAG_STRUCT, RUNTIME_EXPERIMENTAL_FLAG_STRUCT, RUNTIME_NOTPRODUCT_FLAG_STRUCT, RUNTIME_MANAGEABLE_FLAG_STRUCT, RUNTIME_PRODUCT_RW_FLAG_STRUCT)
  108.13  #endif // INCLUDE_ALL_GCS
  108.14  #ifdef COMPILER1
  108.15 - C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
  108.16 + C1_FLAGS(C1_DEVELOP_FLAG_STRUCT, C1_PD_DEVELOP_FLAG_STRUCT, C1_PRODUCT_FLAG_STRUCT, C1_PD_PRODUCT_FLAG_STRUCT, C1_DIAGNOSTIC_FLAG_STRUCT, C1_NOTPRODUCT_FLAG_STRUCT)
  108.17  #endif
  108.18  #ifdef COMPILER2
  108.19   C2_FLAGS(C2_DEVELOP_FLAG_STRUCT, C2_PD_DEVELOP_FLAG_STRUCT, C2_PRODUCT_FLAG_STRUCT, C2_PD_PRODUCT_FLAG_STRUCT, C2_DIAGNOSTIC_FLAG_STRUCT, C2_EXPERIMENTAL_FLAG_STRUCT, C2_NOTPRODUCT_FLAG_STRUCT)
   109.1 --- a/src/share/vm/runtime/globals.hpp	Fri Sep 06 09:55:38 2013 +0100
   109.2 +++ b/src/share/vm/runtime/globals.hpp	Sat Sep 14 20:40:34 2013 +0100
   109.3 @@ -3514,6 +3514,8 @@
   109.4            "Temporary flag for transition to AbstractMethodError wrapped "   \
   109.5            "in InvocationTargetException. See 6531596")                      \
   109.6                                                                              \
   109.7 +  develop(bool, VerifyLambdaBytecodes, false,                               \
   109.8 +          "Force verification of jdk 8 lambda metafactory bytecodes.")      \
   109.9                                                                              \
  109.10    develop(intx, FastSuperclassLimit, 8,                                     \
  109.11            "Depth of hardwired instanceof accelerator array")                \
  109.12 @@ -3685,15 +3687,9 @@
  109.13    develop(bool, TraceDefaultMethods, false,                                 \
  109.14            "Trace the default method processing steps")                      \
  109.15                                                                              \
  109.16 -  develop(bool, ParseAllGenericSignatures, false,                           \
  109.17 -          "Parse all generic signatures while classloading")                \
  109.18 -                                                                            \
  109.19    develop(bool, VerifyGenericSignatures, false,                             \
  109.20            "Abort VM on erroneous or inconsistent generic signatures")       \
  109.21                                                                              \
  109.22 -  product(bool, ParseGenericDefaults, false,                                \
  109.23 -          "Parse generic signatures for default method handling")           \
  109.24 -                                                                            \
  109.25    product(bool, UseVMInterruptibleIO, false,                                \
  109.26            "(Unstable, Solaris-specific) Thread interrupt before or with "   \
  109.27            "EINTR for I/O operations results in OS_INTRPT. The default value"\
   110.1 --- a/src/share/vm/runtime/globals_extension.hpp	Fri Sep 06 09:55:38 2013 +0100
   110.2 +++ b/src/share/vm/runtime/globals_extension.hpp	Sat Sep 14 20:40:34 2013 +0100
   110.3 @@ -57,6 +57,7 @@
   110.4  
   110.5  #define C1_PRODUCT_FLAG_MEMBER(type, name, value, doc)         FLAG_MEMBER(name),
   110.6  #define C1_PD_PRODUCT_FLAG_MEMBER(type, name, doc)             FLAG_MEMBER(name),
   110.7 +#define C1_DIAGNOSTIC_FLAG_MEMBER(type, name, value, doc)      FLAG_MEMBER(name),
   110.8  #ifdef PRODUCT
   110.9    #define C1_DEVELOP_FLAG_MEMBER(type, name, value, doc)       /* flag is constant */
  110.10    #define C1_PD_DEVELOP_FLAG_MEMBER(type, name, doc)           /* flag is constant */
  110.11 @@ -99,7 +100,7 @@
  110.12   G1_FLAGS(RUNTIME_DEVELOP_FLAG_MEMBER, RUNTIME_PD_DEVELOP_FLAG_MEMBER, RUNTIME_PRODUCT_FLAG_MEMBER, RUNTIME_PD_PRODUCT_FLAG_MEMBER, RUNTIME_DIAGNOSTIC_FLAG_MEMBER, RUNTIME_EXPERIMENTAL_FLAG_MEMBER, RUNTIME_NOTPRODUCT_FLAG_MEMBER, RUNTIME_MANAGEABLE_FLAG_MEMBER, RUNTIME_PRODUCT_RW_FLAG_MEMBER)
  110.13  #endif // INCLUDE_ALL_GCS
  110.14  #ifdef COMPILER1
  110.15 - C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
  110.16 + C1_FLAGS(C1_DEVELOP_FLAG_MEMBER, C1_PD_DEVELOP_FLAG_MEMBER, C1_PRODUCT_FLAG_MEMBER, C1_PD_PRODUCT_FLAG_MEMBER, C1_DIAGNOSTIC_FLAG_MEMBER, C1_NOTPRODUCT_FLAG_MEMBER)
  110.17  #endif
  110.18  #ifdef COMPILER2
  110.19   C2_FLAGS(C2_DEVELOP_FLAG_MEMBER, C2_PD_DEVELOP_FLAG_MEMBER, C2_PRODUCT_FLAG_MEMBER, C2_PD_PRODUCT_FLAG_MEMBER, C2_DIAGNOSTIC_FLAG_MEMBER, C2_EXPERIMENTAL_FLAG_MEMBER, C2_NOTPRODUCT_FLAG_MEMBER)
  110.20 @@ -131,6 +132,7 @@
  110.21  
  110.22  #define C1_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)         FLAG_MEMBER_WITH_TYPE(name,type),
  110.23  #define C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE(type, name, doc)             FLAG_MEMBER_WITH_TYPE(name,type),
  110.24 +#define C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)      FLAG_MEMBER_WITH_TYPE(name,type),
  110.25  #ifdef PRODUCT
  110.26    #define C1_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, value, doc)       /* flag is constant */
  110.27    #define C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE(type, name, doc)           /* flag is constant */
  110.28 @@ -204,6 +206,7 @@
  110.29            C1_PD_DEVELOP_FLAG_MEMBER_WITH_TYPE,
  110.30            C1_PRODUCT_FLAG_MEMBER_WITH_TYPE,
  110.31            C1_PD_PRODUCT_FLAG_MEMBER_WITH_TYPE,
  110.32 +          C1_DIAGNOSTIC_FLAG_MEMBER_WITH_TYPE,
  110.33            C1_NOTPRODUCT_FLAG_MEMBER_WITH_TYPE)
  110.34  #endif
  110.35  #ifdef COMPILER2
   111.1 --- a/src/share/vm/runtime/handles.cpp	Fri Sep 06 09:55:38 2013 +0100
   111.2 +++ b/src/share/vm/runtime/handles.cpp	Sat Sep 14 20:40:34 2013 +0100
   111.3 @@ -1,5 +1,5 @@
   111.4  /*
   111.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   111.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   111.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   111.8   *
   111.9   * This code is free software; you can redistribute it and/or modify it
  111.10 @@ -179,11 +179,11 @@
  111.11    _thread->set_last_handle_mark(previous_handle_mark());
  111.12  }
  111.13  
  111.14 -void* HandleMark::operator new(size_t size) {
  111.15 +void* HandleMark::operator new(size_t size) throw() {
  111.16    return AllocateHeap(size, mtThread);
  111.17  }
  111.18  
  111.19 -void* HandleMark::operator new [] (size_t size) {
  111.20 +void* HandleMark::operator new [] (size_t size) throw() {
  111.21    return AllocateHeap(size, mtThread);
  111.22  }
  111.23  
   112.1 --- a/src/share/vm/runtime/handles.hpp	Fri Sep 06 09:55:38 2013 +0100
   112.2 +++ b/src/share/vm/runtime/handles.hpp	Sat Sep 14 20:40:34 2013 +0100
   112.3 @@ -309,8 +309,8 @@
   112.4    // called in the destructor of HandleMarkCleaner
   112.5    void pop_and_restore();
   112.6    // overloaded operators
   112.7 -  void* operator new(size_t size);
   112.8 -  void* operator new [](size_t size);
   112.9 +  void* operator new(size_t size) throw();
  112.10 +  void* operator new [](size_t size) throw();
  112.11    void operator delete(void* p);
  112.12    void operator delete[](void* p);
  112.13  };
   113.1 --- a/src/share/vm/runtime/interfaceSupport.hpp	Fri Sep 06 09:55:38 2013 +0100
   113.2 +++ b/src/share/vm/runtime/interfaceSupport.hpp	Sat Sep 14 20:40:34 2013 +0100
   113.3 @@ -1,5 +1,5 @@
   113.4  /*
   113.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   113.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   113.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   113.8   *
   113.9   * This code is free software; you can redistribute it and/or modify it
  113.10 @@ -56,7 +56,7 @@
  113.11    }
  113.12  
  113.13   private:
  113.14 -  inline void* operator new(size_t size, void* ptr) {
  113.15 +  inline void* operator new(size_t size, void* ptr) throw() {
  113.16      return ptr;
  113.17    }
  113.18  };
   114.1 --- a/src/share/vm/runtime/objectMonitor.hpp	Fri Sep 06 09:55:38 2013 +0100
   114.2 +++ b/src/share/vm/runtime/objectMonitor.hpp	Sat Sep 14 20:40:34 2013 +0100
   114.3 @@ -312,10 +312,10 @@
   114.4   public:
   114.5    static int Knob_Verbose;
   114.6    static int Knob_SpinLimit;
   114.7 -  void* operator new (size_t size) {
   114.8 +  void* operator new (size_t size) throw() {
   114.9      return AllocateHeap(size, mtInternal);
  114.10    }
  114.11 -  void* operator new[] (size_t size) {
  114.12 +  void* operator new[] (size_t size) throw() {
  114.13      return operator new (size);
  114.14    }
  114.15    void operator delete(void* p) {
   115.1 --- a/src/share/vm/runtime/os.cpp	Fri Sep 06 09:55:38 2013 +0100
   115.2 +++ b/src/share/vm/runtime/os.cpp	Sat Sep 14 20:40:34 2013 +0100
   115.3 @@ -1482,44 +1482,6 @@
   115.4    return result;
   115.5  }
   115.6  
   115.7 -// Read file line by line, if line is longer than bsize,
   115.8 -// skip rest of line.
   115.9 -int os::get_line_chars(int fd, char* buf, const size_t bsize){
  115.10 -  size_t sz, i = 0;
  115.11 -
  115.12 -  // read until EOF, EOL or buf is full
  115.13 -  while ((sz = (int) read(fd, &buf[i], 1)) == 1 && i < (bsize-2) && buf[i] != '\n') {
  115.14 -     ++i;
  115.15 -  }
  115.16 -
  115.17 -  if (buf[i] == '\n') {
  115.18 -    // EOL reached so ignore EOL character and return
  115.19 -
  115.20 -    buf[i] = 0;
  115.21 -    return (int) i;
  115.22 -  }
  115.23 -
  115.24 -  buf[i+1] = 0;
  115.25 -
  115.26 -  if (sz != 1) {
  115.27 -    // EOF reached. if we read chars before EOF return them and
  115.28 -    // return EOF on next call otherwise return EOF
  115.29 -
  115.30 -    return (i == 0) ? -1 : (int) i;
  115.31 -  }
  115.32 -
  115.33 -  // line is longer than size of buf, skip to EOL
  115.34 -  char ch;
  115.35 -  while (read(fd, &ch, 1) == 1 && ch != '\n') {
  115.36 -    // Do nothing
  115.37 -  }
  115.38 -
  115.39 -  // return initial part of line that fits in buf.
  115.40 -  // If we reached EOF, it will be returned on next call.
  115.41 -
  115.42 -  return (int) i;
  115.43 -}
  115.44 -
  115.45  void os::SuspendedThreadTask::run() {
  115.46    assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
  115.47    internal_do_task();
   116.1 --- a/src/share/vm/runtime/os.hpp	Fri Sep 06 09:55:38 2013 +0100
   116.2 +++ b/src/share/vm/runtime/os.hpp	Sat Sep 14 20:40:34 2013 +0100
   116.3 @@ -738,10 +738,6 @@
   116.4    // Hook for os specific jvm options that we don't want to abort on seeing
   116.5    static bool obsolete_option(const JavaVMOption *option);
   116.6  
   116.7 -  // Read file line by line. If line is longer than bsize,
   116.8 -  // rest of line is skipped. Returns number of bytes read or -1 on EOF
   116.9 -  static int get_line_chars(int fd, char *buf, const size_t bsize);
  116.10 -
  116.11    // Extensions
  116.12  #include "runtime/os_ext.hpp"
  116.13  
   117.1 --- a/src/share/vm/runtime/park.cpp	Fri Sep 06 09:55:38 2013 +0100
   117.2 +++ b/src/share/vm/runtime/park.cpp	Sat Sep 14 20:40:34 2013 +0100
   117.3 @@ -1,5 +1,5 @@
   117.4  /*
   117.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   117.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   117.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   117.8   *
   117.9   * This code is free software; you can redistribute it and/or modify it
  117.10 @@ -140,7 +140,7 @@
  117.11  // well as bank access imbalance on Niagara-like platforms,
  117.12  // although Niagara's hash function should help.
  117.13  
  117.14 -void * ParkEvent::operator new (size_t sz) {
  117.15 +void * ParkEvent::operator new (size_t sz) throw() {
  117.16    return (void *) ((intptr_t (AllocateHeap(sz + 256, mtInternal, CALLER_PC)) + 256) & -256) ;
  117.17  }
  117.18  
   118.1 --- a/src/share/vm/runtime/park.hpp	Fri Sep 06 09:55:38 2013 +0100
   118.2 +++ b/src/share/vm/runtime/park.hpp	Sat Sep 14 20:40:34 2013 +0100
   118.3 @@ -1,5 +1,5 @@
   118.4  /*
   118.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   118.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   118.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   118.8   *
   118.9   * This code is free software; you can redistribute it and/or modify it
  118.10 @@ -166,7 +166,7 @@
  118.11      // aligned on 256-byte address boundaries.  This ensures that the least
  118.12      // significant byte of a ParkEvent address is always 0.
  118.13  
  118.14 -    void * operator new (size_t sz) ;
  118.15 +    void * operator new (size_t sz) throw();
  118.16      void operator delete (void * a) ;
  118.17  
  118.18    public:
   119.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Fri Sep 06 09:55:38 2013 +0100
   119.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Sat Sep 14 20:40:34 2013 +0100
   119.3 @@ -1051,7 +1051,8 @@
   119.4  
   119.5    // Find receiver for non-static call
   119.6    if (bc != Bytecodes::_invokestatic &&
   119.7 -      bc != Bytecodes::_invokedynamic) {
   119.8 +      bc != Bytecodes::_invokedynamic &&
   119.9 +      bc != Bytecodes::_invokehandle) {
  119.10      // This register map must be update since we need to find the receiver for
  119.11      // compiled frames. The receiver might be in a register.
  119.12      RegisterMap reg_map2(thread);
  119.13 @@ -1078,7 +1079,7 @@
  119.14  
  119.15  #ifdef ASSERT
  119.16    // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
  119.17 -  if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
  119.18 +  if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic && bc != Bytecodes::_invokehandle) {
  119.19      assert(receiver.not_null(), "should have thrown exception");
  119.20      KlassHandle receiver_klass(THREAD, receiver->klass());
  119.21      Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
  119.22 @@ -1240,9 +1241,9 @@
  119.23  #endif
  119.24  
  119.25    if (is_virtual) {
  119.26 -    assert(receiver.not_null(), "sanity check");
  119.27 +    assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check");
  119.28      bool static_bound = call_info.resolved_method()->can_be_statically_bound();
  119.29 -    KlassHandle h_klass(THREAD, receiver->klass());
  119.30 +    KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass());
  119.31      CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
  119.32                       is_optimized, static_bound, virtual_call_info,
  119.33                       CHECK_(methodHandle()));
   120.1 --- a/src/share/vm/runtime/thread.cpp	Fri Sep 06 09:55:38 2013 +0100
   120.2 +++ b/src/share/vm/runtime/thread.cpp	Sat Sep 14 20:40:34 2013 +0100
   120.3 @@ -3636,6 +3636,16 @@
   120.4    CompileBroker::compilation_init();
   120.5  #endif
   120.6  
   120.7 +  if (EnableInvokeDynamic) {
   120.8 +    // Pre-initialize some JSR292 core classes to avoid deadlock during class loading.
   120.9 +    // It is done after compilers are initialized, because otherwise compilations of
  120.10 +    // signature polymorphic MH intrinsics can be missed
  120.11 +    // (see SystemDictionary::find_method_handle_intrinsic).
  120.12 +    initialize_class(vmSymbols::java_lang_invoke_MethodHandle(), CHECK_0);
  120.13 +    initialize_class(vmSymbols::java_lang_invoke_MemberName(), CHECK_0);
  120.14 +    initialize_class(vmSymbols::java_lang_invoke_MethodHandleNatives(), CHECK_0);
  120.15 +  }
  120.16 +
  120.17  #if INCLUDE_MANAGEMENT
  120.18    Management::initialize(THREAD);
  120.19  #endif // INCLUDE_MANAGEMENT
   121.1 --- a/src/share/vm/runtime/thread.hpp	Fri Sep 06 09:55:38 2013 +0100
   121.2 +++ b/src/share/vm/runtime/thread.hpp	Sat Sep 14 20:40:34 2013 +0100
   121.3 @@ -113,8 +113,9 @@
   121.4    // Support for forcing alignment of thread objects for biased locking
   121.5    void*       _real_malloc_address;
   121.6   public:
   121.7 -  void* operator new(size_t size) { return allocate(size, true); }
   121.8 -  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) { return allocate(size, false); }
   121.9 +  void* operator new(size_t size) throw() { return allocate(size, true); }
  121.10 +  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
  121.11 +    return allocate(size, false); }
  121.12    void  operator delete(void* p);
  121.13  
  121.14   protected:
   122.1 --- a/src/share/vm/services/memRecorder.hpp	Fri Sep 06 09:55:38 2013 +0100
   122.2 +++ b/src/share/vm/services/memRecorder.hpp	Sat Sep 14 20:40:34 2013 +0100
   122.3 @@ -53,13 +53,13 @@
   122.4      }
   122.5    }
   122.6  
   122.7 -  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) {
   122.8 +  void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
   122.9      // the instance is part of memRecorder, needs to be tagged with 'otNMTRecorder'
  122.10      // to avoid recursion
  122.11      return os::malloc(size, (mtNMT | otNMTRecorder));
  122.12    }
  122.13  
  122.14 -  void* operator new(size_t size) {
  122.15 +  void* operator new(size_t size) throw() {
  122.16      assert(false, "use nothrow version");
  122.17      return NULL;
  122.18    }
   123.1 --- a/src/share/vm/services/memTrackWorker.cpp	Fri Sep 06 09:55:38 2013 +0100
   123.2 +++ b/src/share/vm/services/memTrackWorker.cpp	Sat Sep 14 20:40:34 2013 +0100
   123.3 @@ -1,5 +1,5 @@
   123.4  /*
   123.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   123.6 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   123.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   123.8   *
   123.9   * This code is free software; you can redistribute it and/or modify it
  123.10 @@ -63,12 +63,12 @@
  123.11    }
  123.12  }
  123.13  
  123.14 -void* MemTrackWorker::operator new(size_t size) {
  123.15 +void* MemTrackWorker::operator new(size_t size) throw() {
  123.16    assert(false, "use nothrow version");
  123.17    return NULL;
  123.18  }
  123.19  
  123.20 -void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) {
  123.21 +void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
  123.22    return allocate(size, false, mtNMT);
  123.23  }
  123.24  
   124.1 --- a/src/share/vm/services/memTrackWorker.hpp	Fri Sep 06 09:55:38 2013 +0100
   124.2 +++ b/src/share/vm/services/memTrackWorker.hpp	Sat Sep 14 20:40:34 2013 +0100
   124.3 @@ -1,5 +1,5 @@
   124.4  /*
   124.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   124.6 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   124.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   124.8   *
   124.9   * This code is free software; you can redistribute it and/or modify it
  124.10 @@ -90,8 +90,8 @@
  124.11   public:
  124.12    MemTrackWorker(MemSnapshot* snapshot);
  124.13    ~MemTrackWorker();
  124.14 -  _NOINLINE_ void* operator new(size_t size);
  124.15 -  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant);
  124.16 +  _NOINLINE_ void* operator new(size_t size) throw();
  124.17 +  _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw();
  124.18  
  124.19    void start();
  124.20    void run();
   125.1 --- a/src/share/vm/utilities/array.hpp	Fri Sep 06 09:55:38 2013 +0100
   125.2 +++ b/src/share/vm/utilities/array.hpp	Sat Sep 14 20:40:34 2013 +0100
   125.3 @@ -317,7 +317,7 @@
   125.4    Array(const Array<T>&);
   125.5    void operator=(const Array<T>&);
   125.6  
   125.7 -  void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) {
   125.8 +  void* operator new(size_t size, ClassLoaderData* loader_data, int length, bool read_only, TRAPS) throw() {
   125.9      size_t word_size = Array::size(length);
  125.10      return (void*) Metaspace::allocate(loader_data, word_size, read_only,
  125.11                                         MetaspaceObj::array_type(sizeof(T)), CHECK_NULL);
   126.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   126.2 +++ b/test/compiler/gcbarriers/G1CrashTest.java	Sat Sep 14 20:40:34 2013 +0100
   126.3 @@ -0,0 +1,84 @@
   126.4 +/*
   126.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   126.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   126.7 + *
   126.8 + * This code is free software; you can redistribute it and/or modify it
   126.9 + * under the terms of the GNU General Public License version 2 only, as
  126.10 + * published by the Free Software Foundation.
  126.11 + *
  126.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  126.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  126.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  126.15 + * version 2 for more details (a copy is included in the LICENSE file that
  126.16 + * accompanied this code).
  126.17 + *
  126.18 + * You should have received a copy of the GNU General Public License version
  126.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  126.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  126.21 + *
  126.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  126.23 + * or visit www.oracle.com if you need additional information or have any
  126.24 + * questions.
  126.25 + *
  126.26 + */
  126.27 +
  126.28 +/**
  126.29 + * @test
  126.30 + * @bug 8023472
  126.31 + * @summary C2 optimization breaks with G1
  126.32 + *
  126.33 + * @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -Dcount=100000 G1CrashTest
  126.34 + *
  126.35 + * @author pbiswal@palantir.com
  126.36 + */
  126.37 +
  126.38 +public class G1CrashTest {
  126.39 +    static Object[] set = new Object[11];
  126.40 +
  126.41 +    public static void main(String[] args) throws InterruptedException {
  126.42 +        for (int j = 0; j < Integer.getInteger("count"); j++) {
  126.43 +            Object key = new Object();
  126.44 +            insertKey(key);
  126.45 +            if (j > set.length / 2) {
  126.46 +                Object[] oldKeys = set;
  126.47 +                set = new Object[2 * set.length - 1];
  126.48 +                for (Object o : oldKeys) {
  126.49 +                    if (o != null)
  126.50 +                        insertKey(o);
  126.51 +                }
  126.52 +            }
  126.53 +        }
  126.54 +    }
  126.55 +
  126.56 +    static void insertKey(Object key) {
  126.57 +        int hash = key.hashCode() & 0x7fffffff;
  126.58 +        int index = hash % set.length;
  126.59 +        Object cur = set[index];
  126.60 +        if (cur == null)
  126.61 +            set[index] = key;
  126.62 +        else
  126.63 +            insertKeyRehash(key, index, hash, cur);
  126.64 +    }
  126.65 +
  126.66 +    static void insertKeyRehash(Object key, int index, int hash, Object cur) {
  126.67 +        int loopIndex = index;
  126.68 +        int firstRemoved = -1;
  126.69 +        do {
  126.70 +            if (cur == "dead")
  126.71 +                firstRemoved = 1;
  126.72 +            index--;
  126.73 +            if (index < 0)
  126.74 +                index += set.length;
  126.75 +            cur = set[index];
  126.76 +            if (cur == null) {
  126.77 +                if (firstRemoved != -1)
  126.78 +                    set[firstRemoved] = "dead";
  126.79 +                else
  126.80 +                    set[index] = key;
  126.81 +                return;
  126.82 +            }
  126.83 +        } while (index != loopIndex);
  126.84 +        if (firstRemoved != -1)
  126.85 +            set[firstRemoved] = null;
  126.86 +    }
  126.87 +}
   127.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   127.2 +++ b/test/compiler/jsr292/ConcurrentClassLoadingTest.java	Sat Sep 14 20:40:34 2013 +0100
   127.3 @@ -0,0 +1,194 @@
   127.4 +/*
   127.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   127.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   127.7 + *
   127.8 + * This code is free software; you can redistribute it and/or modify it
   127.9 + * under the terms of the GNU General Public License version 2 only, as
  127.10 + * published by the Free Software Foundation.
  127.11 + *
  127.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  127.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  127.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  127.15 + * version 2 for more details (a copy is included in the LICENSE file that
  127.16 + * accompanied this code).
  127.17 + *
  127.18 + * You should have received a copy of the GNU General Public License version
  127.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  127.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  127.21 + *
  127.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  127.23 + * or visit www.oracle.com if you need additional information or have any
  127.24 + * questions.
  127.25 + */
  127.26 +
  127.27 +/**
  127.28 + * @test
  127.29 + * @bug 8022595
  127.30 + * @summary JSR292: deadlock during class loading of MethodHandles, MethodHandleImpl & MethodHandleNatives
  127.31 + *
  127.32 + * @run main/othervm ConcurrentClassLoadingTest
  127.33 + */
  127.34 +import java.util.*;
  127.35 +import java.util.concurrent.BrokenBarrierException;
  127.36 +import java.util.concurrent.CyclicBarrier;
  127.37 +
  127.38 +public class ConcurrentClassLoadingTest {
  127.39 +    int numThreads = 0;
  127.40 +    long seed = 0;
  127.41 +    CyclicBarrier l;
  127.42 +    Random rand;
  127.43 +
  127.44 +    public static void main(String[] args) throws Throwable {
  127.45 +        ConcurrentClassLoadingTest test = new ConcurrentClassLoadingTest();
  127.46 +        test.parseArgs(args);
  127.47 +        test.run();
  127.48 +    }
  127.49 +
  127.50 +    void parseArgs(String[] args) {
  127.51 +        int i = 0;
  127.52 +        while (i < args.length) {
  127.53 +            String flag = args[i];
  127.54 +            switch(flag) {
  127.55 +                case "-seed":
  127.56 +                    seed = Long.parseLong(args[++i]);
  127.57 +                    break;
  127.58 +                case "-numThreads":
  127.59 +                    numThreads = Integer.parseInt(args[++i]);
  127.60 +                    break;
  127.61 +                default:
  127.62 +                    throw new Error("Unknown flag: " + flag);
  127.63 +            }
  127.64 +            ++i;
  127.65 +        }
  127.66 +    }
  127.67 +
  127.68 +    void init() {
  127.69 +        if (numThreads == 0) {
  127.70 +            numThreads = Runtime.getRuntime().availableProcessors();
  127.71 +        }
  127.72 +
  127.73 +        if (seed == 0) {
  127.74 +            seed = (new Random()).nextLong();
  127.75 +        }
  127.76 +        rand = new Random(seed);
  127.77 +
  127.78 +        l = new CyclicBarrier(numThreads + 1);
  127.79 +
  127.80 +        System.out.printf("Threads: %d\n", numThreads);
  127.81 +        System.out.printf("Seed: %d\n", seed);
  127.82 +    }
  127.83 +
  127.84 +    final List<Loader> loaders = new ArrayList<>();
  127.85 +
  127.86 +    void prepare() {
  127.87 +        List<String> c = new ArrayList<>(Arrays.asList(classNames));
  127.88 +
  127.89 +        // Split classes between loading threads
  127.90 +        int count = (classNames.length / numThreads) + 1;
  127.91 +        for (int t = 0; t < numThreads; t++) {
  127.92 +            List<String> sel = new ArrayList<>();
  127.93 +
  127.94 +            System.out.printf("Thread #%d:\n", t);
  127.95 +            for (int i = 0; i < count; i++) {
  127.96 +                if (c.size() == 0) break;
  127.97 +
  127.98 +                int k = rand.nextInt(c.size());
  127.99 +                String elem = c.remove(k);
 127.100 +                sel.add(elem);
 127.101 +                System.out.printf("\t%s\n", elem);
 127.102 +            }
 127.103 +            loaders.add(new Loader(sel));
 127.104 +        }
 127.105 +
 127.106 +        // Print diagnostic info when the test hangs
 127.107 +        Runtime.getRuntime().addShutdownHook(new Thread() {
 127.108 +            public void run() {
 127.109 +                boolean alive = false;
 127.110 +                for (Loader l : loaders) {
 127.111 +                    if (!l.isAlive())  continue;
 127.112 +
 127.113 +                    if (!alive) {
 127.114 +                        System.out.println("Some threads are still alive:");
 127.115 +                        alive = true;
 127.116 +                    }
 127.117 +
 127.118 +                    System.out.println(l.getName());
 127.119 +                    for (StackTraceElement elem : l.getStackTrace()) {
 127.120 +                        System.out.println("\t"+elem.toString());
 127.121 +                    }
 127.122 +                }
 127.123 +            }
 127.124 +        });
 127.125 +    }
 127.126 +
 127.127 +    public void run() throws Throwable {
 127.128 +        init();
 127.129 +        prepare();
 127.130 +
 127.131 +        for (Loader loader : loaders) {
 127.132 +            loader.start();
 127.133 +        }
 127.134 +
 127.135 +        l.await();
 127.136 +
 127.137 +        for (Loader loader : loaders) {
 127.138 +            loader.join();
 127.139 +        }
 127.140 +    }
 127.141 +
 127.142 +    class Loader extends Thread {
 127.143 +        List<String> classes;
 127.144 +
 127.145 +        public Loader(List<String> classes) {
 127.146 +            this.classes = classes;
 127.147 +            setDaemon(true);
 127.148 +        }
 127.149 +
 127.150 +        @Override
 127.151 +        public void run() {
 127.152 +            try {
 127.153 +                l.await();
 127.154 +
 127.155 +                for (String name : classes) {
 127.156 +                    Class.forName(name).getName();
 127.157 +                }
 127.158 +            } catch (ClassNotFoundException | BrokenBarrierException | InterruptedException e) {
 127.159 +                throw new Error(e);
 127.160 +            }
 127.161 +        }
 127.162 +    }
 127.163 +
 127.164 +    final static String[] classNames = {
 127.165 +            "java.lang.invoke.AbstractValidatingLambdaMetafactory",
 127.166 +            "java.lang.invoke.BoundMethodHandle",
 127.167 +            "java.lang.invoke.CallSite",
 127.168 +            "java.lang.invoke.ConstantCallSite",
 127.169 +            "java.lang.invoke.DirectMethodHandle",
 127.170 +            "java.lang.invoke.InnerClassLambdaMetafactory",
 127.171 +            "java.lang.invoke.InvokeDynamic",
 127.172 +            "java.lang.invoke.InvokeGeneric",
 127.173 +            "java.lang.invoke.InvokerBytecodeGenerator",
 127.174 +            "java.lang.invoke.Invokers",
 127.175 +            "java.lang.invoke.LambdaConversionException",
 127.176 +            "java.lang.invoke.LambdaForm",
 127.177 +            "java.lang.invoke.LambdaMetafactory",
 127.178 +            "java.lang.invoke.MagicLambdaImpl",
 127.179 +            "java.lang.invoke.MemberName",
 127.180 +            "java.lang.invoke.MethodHandle",
 127.181 +            "java.lang.invoke.MethodHandleImpl",
 127.182 +            "java.lang.invoke.MethodHandleInfo",
 127.183 +            "java.lang.invoke.MethodHandleNatives",
 127.184 +            "java.lang.invoke.MethodHandleProxies",
 127.185 +            "java.lang.invoke.MethodHandles",
 127.186 +            "java.lang.invoke.MethodHandleStatics",
 127.187 +            "java.lang.invoke.MethodType",
 127.188 +            "java.lang.invoke.MethodTypeForm",
 127.189 +            "java.lang.invoke.MutableCallSite",
 127.190 +            "java.lang.invoke.SerializedLambda",
 127.191 +            "java.lang.invoke.SimpleMethodHandle",
 127.192 +            "java.lang.invoke.SwitchPoint",
 127.193 +            "java.lang.invoke.TypeConvertingMethodAdapter",
 127.194 +            "java.lang.invoke.VolatileCallSite",
 127.195 +            "java.lang.invoke.WrongMethodTypeException"
 127.196 +    };
 127.197 +}
   128.1 --- a/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Fri Sep 06 09:55:38 2013 +0100
   128.2 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Sat Sep 14 20:40:34 2013 +0100
   128.3 @@ -51,9 +51,8 @@
   128.4          output.shouldHaveExitValue(0);
   128.5  
   128.6        } catch (RuntimeException e) {
   128.7 -        // Report 'passed' if CDS was turned off because we could not allocate
   128.8 -        // the klass metaspace at an address that would work with CDS.
   128.9 -        output.shouldContain("Could not allocate metaspace at a compatible address");
  128.10 +        // Report 'passed' if CDS was turned off.
  128.11 +        output.shouldContain("Unable to use shared archive");
  128.12          output.shouldHaveExitValue(1);
  128.13        }
  128.14      }
   129.1 --- a/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Fri Sep 06 09:55:38 2013 +0100
   129.2 +++ b/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Sat Sep 14 20:40:34 2013 +0100
   129.3 @@ -69,7 +69,7 @@
   129.4                  "-server", "-Xshare:on", "-XX:+UnlockDiagnosticVMOptions",
   129.5                  "-XX:SharedArchiveFile=./sample.jsa", "-version");
   129.6              output = new OutputAnalyzer(pb.start());
   129.7 -            output.shouldContain("Could not allocate metaspace at a compatible address");
   129.8 +            output.shouldContain("Unable to use shared archive");
   129.9              output.shouldHaveExitValue(1);
  129.10          }
  129.11      }
   130.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   130.2 +++ b/test/runtime/InitialThreadOverflow/DoOverflow.java	Sat Sep 14 20:40:34 2013 +0100
   130.3 @@ -0,0 +1,41 @@
   130.4 +/*
   130.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   130.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   130.7 + *
   130.8 + * This code is free software; you can redistribute it and/or modify it
   130.9 + * under the terms of the GNU General Public License version 2 only, as
  130.10 + * published by the Free Software Foundation.
  130.11 + *
  130.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  130.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  130.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  130.15 + * version 2 for more details (a copy is included in the LICENSE file that
  130.16 + * accompanied this code).
  130.17 + *
  130.18 + * You should have received a copy of the GNU General Public License version
  130.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  130.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  130.21 + *
  130.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  130.23 + * or visit www.oracle.com if you need additional information or have any
  130.24 + * questions.
  130.25 + */
  130.26 +
  130.27 +public class DoOverflow {
  130.28 +
  130.29 +    static int count;
  130.30 +
  130.31 +    public void overflow() {
  130.32 +        count+=1;
  130.33 +        overflow();
  130.34 +    }
  130.35 +
  130.36 +    public static void printIt() {
  130.37 +        System.out.println("Going to overflow stack");
  130.38 +        try {
  130.39 +            new DoOverflow().overflow();
  130.40 +        } catch(java.lang.StackOverflowError e) {
  130.41 +            System.out.println("Overflow OK " + count);
  130.42 +        }
  130.43 +    }
  130.44 +}
   131.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   131.2 +++ b/test/runtime/InitialThreadOverflow/invoke.cxx	Sat Sep 14 20:40:34 2013 +0100
   131.3 @@ -0,0 +1,70 @@
   131.4 +/*
   131.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   131.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   131.7 + *
   131.8 + * This code is free software; you can redistribute it and/or modify it
   131.9 + * under the terms of the GNU General Public License version 2 only, as
  131.10 + * published by the Free Software Foundation.
  131.11 + *
  131.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  131.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  131.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  131.15 + * version 2 for more details (a copy is included in the LICENSE file that
  131.16 + * accompanied this code).
  131.17 + *
  131.18 + * You should have received a copy of the GNU General Public License version
  131.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  131.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  131.21 + *
  131.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  131.23 + * or visit www.oracle.com if you need additional information or have any
  131.24 + * questions.
  131.25 + */
  131.26 +
  131.27 +#include <assert.h>
  131.28 +#include <jni.h>
  131.29 +
  131.30 +#include <pthread.h>
  131.31 +
  131.32 +JavaVM* jvm;
  131.33 +
  131.34 +void *
  131.35 +floobydust (void *p) {
  131.36 +  JNIEnv *env;
  131.37 +
  131.38 +  jvm->AttachCurrentThread((void**)&env, NULL);
  131.39 +
  131.40 +  jclass class_id = env->FindClass ("DoOverflow");
  131.41 +  assert (class_id);
  131.42 +
  131.43 +  jmethodID method_id = env->GetStaticMethodID(class_id, "printIt", "()V");
  131.44 +  assert (method_id);
  131.45 +
  131.46 +  env->CallStaticVoidMethod(class_id, method_id, NULL);
  131.47 +
  131.48 +  jvm->DetachCurrentThread();
  131.49 +}
  131.50 +
  131.51 +int
  131.52 +main (int argc, const char** argv) {
  131.53 +  JavaVMOption options[1];
  131.54 +  options[0].optionString = (char*) "-Xss320k";
  131.55 +
  131.56 +  JavaVMInitArgs vm_args;
  131.57 +  vm_args.version = JNI_VERSION_1_2;
  131.58 +  vm_args.ignoreUnrecognized = JNI_TRUE;
  131.59 +  vm_args.options = options;
  131.60 +  vm_args.nOptions = 1;
  131.61 +
  131.62 +  JNIEnv* env;
  131.63 +  jint result = JNI_CreateJavaVM(&jvm, (void**)&env, &vm_args);
  131.64 +  assert(result >= 0);
  131.65 +
  131.66 +  pthread_t thr;
  131.67 +  pthread_create(&thr, NULL, floobydust, NULL);
  131.68 +  pthread_join(thr, NULL);
  131.69 +
  131.70 +  floobydust(NULL);
  131.71 +
  131.72 +  return 0;
  131.73 +}
   132.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   132.2 +++ b/test/runtime/InitialThreadOverflow/testme.sh	Sat Sep 14 20:40:34 2013 +0100
   132.3 @@ -0,0 +1,73 @@
   132.4 +#!/bin/sh
   132.5 +
   132.6 +# Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
   132.7 +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   132.8 +#
   132.9 +# This code is free software; you can redistribute it and/or modify it
  132.10 +# under the terms of the GNU General Public License version 2 only, as
  132.11 +# published by the Free Software Foundation.
  132.12 +#
  132.13 +# This code is distributed in the hope that it will be useful, but WITHOUT
  132.14 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  132.15 +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  132.16 +# version 2 for more details (a copy is included in the LICENSE file that
  132.17 +# accompanied this code).
  132.18 +#
  132.19 +# You should have received a copy of the GNU General Public License version
  132.20 +# 2 along with this work; if not, write to the Free Software Foundation,
  132.21 +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  132.22 +#
  132.23 +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  132.24 +# or visit www.oracle.com if you need additional information or have any
  132.25 +# questions.
  132.26 +
  132.27 +# @test testme.sh
  132.28 +# @bug 8009062
  132.29 +# @summary Poor performance of JNI AttachCurrentThread after fix for 7017193
  132.30 +# @compile DoOverflow.java
  132.31 +# @run shell testme.sh
  132.32 +
  132.33 +set -x
  132.34 +if [ "${TESTSRC}" = "" ]
  132.35 +then
  132.36 +  TESTSRC=${PWD}
  132.37 +  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
  132.38 +fi
  132.39 +echo "TESTSRC=${TESTSRC}"
  132.40 +## Adding common setup Variables for running shell tests.
  132.41 +. ${TESTSRC}/../../test_env.sh
  132.42 +
  132.43 +if [ "${VM_OS}" != "linux" ]
  132.44 +then
  132.45 +  echo "Test only valid for Linux"
  132.46 +  exit 0
  132.47 +fi
  132.48 +
  132.49 +gcc_cmd=`which gcc`
  132.50 +if [ "x$gcc_cmd" == "x" ]; then
  132.51 +    echo "WARNING: gcc not found. Cannot execute test." 2>&1
  132.52 +    exit 0;
  132.53 +fi
  132.54 +
  132.55 +CFLAGS="-m${VM_BITS}"
  132.56 +
  132.57 +LD_LIBRARY_PATH=.:${COMPILEJAVA}/jre/lib/${VM_CPU}/${VM_TYPE}:/usr/lib:$LD_LIBRARY_PATH
  132.58 +export LD_LIBRARY_PATH
  132.59 +
  132.60 +cp ${TESTSRC}${FS}invoke.cxx .
  132.61 +
  132.62 +# Copy the result of our @compile action:
  132.63 +cp ${TESTCLASSES}${FS}DoOverflow.class .
  132.64 +
  132.65 +echo "Compilation flag: ${COMP_FLAG}"
  132.66 +# Note pthread may not be found thus invoke creation will fail to be created.
  132.67 +# Check to ensure you have a /usr/lib/libpthread.so if you don't please look
  132.68 +# for /usr/lib/`uname -m`-linux-gnu version ensure to add that path to below compilation.
  132.69 +
  132.70 +$gcc_cmd -DLINUX ${CFLAGS} -o invoke \
  132.71 +    -I${COMPILEJAVA}/include -I${COMPILEJAVA}/include/linux \
  132.72 +    -L${COMPILEJAVA}/jre/lib/${VM_CPU}/${VM_TYPE} \
  132.73 +    -ljvm -lpthread invoke.cxx
  132.74 +
  132.75 +./invoke
  132.76 +exit $?
   133.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   133.2 +++ b/test/runtime/LoadClass/LoadClassNegative.java	Sat Sep 14 20:40:34 2013 +0100
   133.3 @@ -0,0 +1,51 @@
   133.4 +/*
   133.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   133.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   133.7 + *
   133.8 + * This code is free software; you can redistribute it and/or modify it
   133.9 + * under the terms of the GNU General Public License version 2 only, as
  133.10 + * published by the Free Software Foundation.
  133.11 + *
  133.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  133.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  133.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  133.15 + * version 2 for more details (a copy is included in the LICENSE file that
  133.16 + * accompanied this code).
  133.17 + *
  133.18 + * You should have received a copy of the GNU General Public License version
  133.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  133.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  133.21 + *
  133.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  133.23 + * or visit www.oracle.com if you need additional information or have any
  133.24 + * questions.
  133.25 + */
  133.26 +
  133.27 +/*
  133.28 + * @test
  133.29 + * @key regression
  133.30 + * @bug 8020675
  133.31 + * @summary make sure there is no fatal error if a class is loaded from an invalid jar file which is in the bootclasspath
  133.32 + * @library /testlibrary
  133.33 + * @build TestForName
  133.34 + * @build LoadClassNegative
  133.35 + * @run main LoadClassNegative
  133.36 + */
  133.37 +
  133.38 +import java.io.File;
  133.39 +import com.oracle.java.testlibrary.*;
  133.40 +
  133.41 +public class LoadClassNegative {
  133.42 +
  133.43 +  public static void main(String args[]) throws Exception {
  133.44 +    String bootCP = "-Xbootclasspath/a:" + System.getProperty("test.src")
  133.45 +                       + File.separator + "dummy.jar";
  133.46 +    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
  133.47 +        bootCP,
  133.48 +        "TestForName");
  133.49 +
  133.50 +    OutputAnalyzer output = new OutputAnalyzer(pb.start());
  133.51 +    output.shouldContain("ClassNotFoundException");
  133.52 +    output.shouldHaveExitValue(0);
  133.53 +  }
  133.54 +}
   134.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   134.2 +++ b/test/runtime/LoadClass/TestForName.java	Sat Sep 14 20:40:34 2013 +0100
   134.3 @@ -0,0 +1,33 @@
   134.4 +/*
   134.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   134.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   134.7 + *
   134.8 + * This code is free software; you can redistribute it and/or modify it
   134.9 + * under the terms of the GNU General Public License version 2 only, as
  134.10 + * published by the Free Software Foundation.
  134.11 + *
  134.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  134.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  134.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  134.15 + * version 2 for more details (a copy is included in the LICENSE file that
  134.16 + * accompanied this code).
  134.17 + *
  134.18 + * You should have received a copy of the GNU General Public License version
  134.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  134.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  134.21 + *
  134.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  134.23 + * or visit www.oracle.com if you need additional information or have any
  134.24 + * questions.
  134.25 + */
  134.26 +
  134.27 +public class TestForName {
  134.28 +    public static void main(String[] args) {
  134.29 +        try {
  134.30 +            Class cls = Class.forName("xxx");
  134.31 +            System.out.println("Class = " + cls.getName());
  134.32 +        } catch (ClassNotFoundException cnfe) {
  134.33 +            cnfe.printStackTrace();
  134.34 +        }
  134.35 +    }
  134.36 +}
   135.1 --- a/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Fri Sep 06 09:55:38 2013 +0100
   135.2 +++ b/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Sat Sep 14 20:40:34 2013 +0100
   135.3 @@ -84,7 +84,7 @@
   135.4              // there is a chance such reservation will fail
   135.5              // If it does, it is NOT considered a failure of the feature,
   135.6              // rather a possible expected outcome, though not likely
   135.7 -            output.shouldContain("Could not allocate metaspace at a compatible address");
   135.8 +            output.shouldContain("Unable to use shared archive");
   135.9              output.shouldHaveExitValue(1);
  135.10          }
  135.11      }
   136.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   136.2 +++ b/test/runtime/contended/Options.java	Sat Sep 14 20:40:34 2013 +0100
   136.3 @@ -0,0 +1,103 @@
   136.4 +/*
   136.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   136.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   136.7 + *
   136.8 + * This code is free software; you can redistribute it and/or modify it
   136.9 + * under the terms of the GNU General Public License version 2 only, as
  136.10 + * published by the Free Software Foundation.
  136.11 + *
  136.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  136.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  136.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  136.15 + * version 2 for more details (a copy is included in the LICENSE file that
  136.16 + * accompanied this code).
  136.17 + *
  136.18 + * You should have received a copy of the GNU General Public License version
  136.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  136.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  136.21 + *
  136.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  136.23 + * or visit www.oracle.com if you need additional information or have any
  136.24 + * questions.
  136.25 + */
  136.26 +
  136.27 +import com.oracle.java.testlibrary.*;
  136.28 +
  136.29 +/*
  136.30 + * @test
  136.31 + * @bug     8006997
  136.32 + * @summary ContendedPaddingWidth should be range-checked
  136.33 + *
  136.34 + * @library /testlibrary
  136.35 + * @run main Options
  136.36 + */
  136.37 +public class Options {
  136.38 +
  136.39 +    public static void main(String[] args) throws Exception {
  136.40 +        ProcessBuilder pb;
  136.41 +        OutputAnalyzer output;
  136.42 +
  136.43 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-128", "-version");
  136.44 +        output = new OutputAnalyzer(pb.start());
  136.45 +        output.shouldContain("ContendedPaddingWidth");
  136.46 +        output.shouldContain("must be in between");
  136.47 +        output.shouldHaveExitValue(1);
  136.48 +
  136.49 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-8", "-version");
  136.50 +        output = new OutputAnalyzer(pb.start());
  136.51 +        output.shouldContain("ContendedPaddingWidth");
  136.52 +        output.shouldContain("must be in between");
  136.53 +        output.shouldHaveExitValue(1);
  136.54 +
  136.55 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=-1", "-version");
  136.56 +        output = new OutputAnalyzer(pb.start());
  136.57 +        output.shouldContain("ContendedPaddingWidth");
  136.58 +        output.shouldContain("must be in between");
  136.59 +        output.shouldContain("must be a multiple of 8");
  136.60 +        output.shouldHaveExitValue(1);
  136.61 +
  136.62 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=0", "-version");
  136.63 +        output = new OutputAnalyzer(pb.start());
  136.64 +        output.shouldHaveExitValue(0);
  136.65 +
  136.66 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=1", "-version");
  136.67 +        output = new OutputAnalyzer(pb.start());
  136.68 +        output.shouldContain("ContendedPaddingWidth");
  136.69 +        output.shouldContain("must be a multiple of 8");
  136.70 +        output.shouldHaveExitValue(1);
  136.71 +
  136.72 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8", "-version");
  136.73 +        output = new OutputAnalyzer(pb.start());
  136.74 +        output.shouldHaveExitValue(0);
  136.75 +
  136.76 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8184", "-version"); // 8192-8 = 8184
  136.77 +        output = new OutputAnalyzer(pb.start());
  136.78 +        output.shouldHaveExitValue(0);
  136.79 +
  136.80 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8191", "-version");
  136.81 +        output = new OutputAnalyzer(pb.start());
  136.82 +        output.shouldContain("ContendedPaddingWidth");
  136.83 +        output.shouldContain("must be a multiple of 8");
  136.84 +        output.shouldHaveExitValue(1);
  136.85 +
  136.86 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8192", "-version");
  136.87 +        output = new OutputAnalyzer(pb.start());
  136.88 +        output.shouldHaveExitValue(0);
  136.89 +
  136.90 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8193", "-version");
  136.91 +        output = new OutputAnalyzer(pb.start());
  136.92 +        output.shouldContain("ContendedPaddingWidth");
  136.93 +        output.shouldContain("must be in between");
  136.94 +        output.shouldContain("must be a multiple of 8");
  136.95 +        output.shouldHaveExitValue(1);
  136.96 +
  136.97 +        pb = ProcessTools.createJavaProcessBuilder("-XX:ContendedPaddingWidth=8200", "-version"); // 8192+8 = 8200
  136.98 +        output = new OutputAnalyzer(pb.start());
  136.99 +        output.shouldContain("ContendedPaddingWidth");
 136.100 +        output.shouldContain("must be in between");
 136.101 +        output.shouldHaveExitValue(1);
 136.102 +
 136.103 +   }
 136.104 +
 136.105 +}
 136.106 +

mercurial