Merge

Fri, 27 Sep 2013 13:53:43 -0400

author
jiangli
date
Fri, 27 Sep 2013 13:53:43 -0400
changeset 5804
5186dcaca431
parent 5803
d68894a09c7c
parent 5780
24250c363d7f
child 5805
d0cfa6502dfe

Merge

src/share/vm/c1/c1_Runtime1.cpp file | annotate | diff | comparison | revisions
test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java file | annotate | diff | comparison | revisions
test/runtime/6878713/Test6878713.sh file | annotate | diff | comparison | revisions
test/runtime/6878713/testcase.jar file | annotate | diff | comparison | revisions
test/runtime/7020373/Test7020373.sh file | annotate | diff | comparison | revisions
test/runtime/7020373/testcase.jar file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Fri Sep 27 13:49:57 2013 -0400
     1.2 +++ b/.hgtags	Fri Sep 27 13:53:43 2013 -0400
     1.3 @@ -377,3 +377,7 @@
     1.4  50794d8ac11c9579b41dec4de23b808fef9f34a1 hs25-b49
     1.5  5b7f90aab3ad25a25b75b7b2bb18d5ae23d8231c jdk8-b107
     1.6  a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
     1.7 +85072013aad46050a362d10ab78e963121c8014c jdk8-b108
     1.8 +566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
     1.9 +c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
    1.10 +58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
     2.1 --- a/agent/src/os/linux/ps_core.c	Fri Sep 27 13:49:57 2013 -0400
     2.2 +++ b/agent/src/os/linux/ps_core.c	Fri Sep 27 13:53:43 2013 -0400
     2.3 @@ -1,5 +1,5 @@
     2.4  /*
     2.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
     2.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
     2.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8   *
     2.9   * This code is free software; you can redistribute it and/or modify it
    2.10 @@ -698,29 +698,58 @@
    2.11  
    2.12  // read segments of a shared object
    2.13  static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
    2.14 -   int i = 0;
    2.15 -   ELF_PHDR* phbuf;
    2.16 -   ELF_PHDR* lib_php = NULL;
    2.17 +  int i = 0;
    2.18 +  ELF_PHDR* phbuf;
    2.19 +  ELF_PHDR* lib_php = NULL;
    2.20  
    2.21 -   if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
    2.22 -      return false;
    2.23 +  int page_size=sysconf(_SC_PAGE_SIZE);
    2.24  
    2.25 -   // we want to process only PT_LOAD segments that are not writable.
    2.26 -   // i.e., text segments. The read/write/exec (data) segments would
    2.27 -   // have been already added from core file segments.
    2.28 -   for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
    2.29 -      if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
    2.30 -         if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
    2.31 -            goto err;
    2.32 +  if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
    2.33 +    return false;
    2.34 +  }
    2.35 +
    2.36 +  // we want to process only PT_LOAD segments that are not writable.
    2.37 +  // i.e., text segments. The read/write/exec (data) segments would
    2.38 +  // have been already added from core file segments.
    2.39 +  for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
    2.40 +    if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
    2.41 +
    2.42 +      uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
    2.43 +      map_info *existing_map = core_lookup(ph, target_vaddr);
    2.44 +
    2.45 +      if (existing_map == NULL){
    2.46 +        if (add_map_info(ph, lib_fd, lib_php->p_offset,
    2.47 +                          target_vaddr, lib_php->p_filesz) == NULL) {
    2.48 +          goto err;
    2.49 +        }
    2.50 +      } else {
    2.51 +        if ((existing_map->memsz != page_size) &&
    2.52 +            (existing_map->fd != lib_fd) &&
    2.53 +            (existing_map->memsz != lib_php->p_filesz)){
    2.54 +
    2.55 +          print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
    2.56 +                        target_vaddr, lib_php->p_filesz, lib_php->p_flags);
    2.57 +          goto err;
    2.58 +        }
    2.59 +
    2.60 +        /* replace PT_LOAD segment with library segment */
    2.61 +        print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
    2.62 +                     existing_map->memsz, lib_php->p_filesz);
    2.63 +
    2.64 +        existing_map->fd = lib_fd;
    2.65 +        existing_map->offset = lib_php->p_offset;
    2.66 +        existing_map->memsz = lib_php->p_filesz;
    2.67        }
    2.68 -      lib_php++;
    2.69 -   }
    2.70 +    }
    2.71  
    2.72 -   free(phbuf);
    2.73 -   return true;
    2.74 +    lib_php++;
    2.75 +  }
    2.76 +
    2.77 +  free(phbuf);
    2.78 +  return true;
    2.79  err:
    2.80 -   free(phbuf);
    2.81 -   return false;
    2.82 +  free(phbuf);
    2.83 +  return false;
    2.84  }
    2.85  
    2.86  // process segments from interpreter (ld.so or ld-linux.so)
     3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Fri Sep 27 13:49:57 2013 -0400
     3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java	Fri Sep 27 13:53:43 2013 -0400
     3.3 @@ -1213,6 +1213,7 @@
     3.4                  }
     3.5                  HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
     3.6                  if (t.countTokens() == 1) {
     3.7 +                    String name = t.nextToken();
     3.8                      out.println("intConstant " + name + " " + db.lookupIntConstant(name));
     3.9                  } else if (t.countTokens() == 0) {
    3.10                      Iterator i = db.getIntConstants();
    3.11 @@ -1235,6 +1236,7 @@
    3.12                  }
    3.13                  HotSpotTypeDataBase db = (HotSpotTypeDataBase)agent.getTypeDataBase();
    3.14                  if (t.countTokens() == 1) {
    3.15 +                    String name = t.nextToken();
    3.16                      out.println("longConstant " + name + " " + db.lookupLongConstant(name));
    3.17                  } else if (t.countTokens() == 0) {
    3.18                      Iterator i = db.getLongConstants();
     4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Fri Sep 27 13:49:57 2013 -0400
     4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Fri Sep 27 13:53:43 2013 -0400
     4.3 @@ -81,7 +81,7 @@
     4.4  
     4.5      public Address getCompKlassAddressAt(long offset)
     4.6              throws UnalignedAddressException, UnmappedAddressException {
     4.7 -        return debugger.readCompOopAddress(addr + offset);
     4.8 +        return debugger.readCompKlassAddress(addr + offset);
     4.9      }
    4.10  
    4.11      //
     5.1 --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Sep 27 13:49:57 2013 -0400
     5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Fri Sep 27 13:53:43 2013 -0400
     5.3 @@ -792,7 +792,7 @@
     5.4  
     5.5    public boolean isCompressedKlassPointersEnabled() {
     5.6      if (compressedKlassPointersEnabled == null) {
     5.7 -        Flag flag = getCommandLineFlag("UseCompressedKlassPointers");
     5.8 +        Flag flag = getCommandLineFlag("UseCompressedClassPointers");
     5.9          compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE:
    5.10               (flag.getBool()? Boolean.TRUE: Boolean.FALSE);
    5.11      }
     6.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Fri Sep 27 13:49:57 2013 -0400
     6.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Fri Sep 27 13:53:43 2013 -0400
     6.3 @@ -66,18 +66,18 @@
     6.4        printGCAlgorithm(flagMap);
     6.5        System.out.println();
     6.6        System.out.println("Heap Configuration:");
     6.7 -      printValue("MinHeapFreeRatio   = ", getFlagValue("MinHeapFreeRatio", flagMap));
     6.8 -      printValue("MaxHeapFreeRatio   = ", getFlagValue("MaxHeapFreeRatio", flagMap));
     6.9 -      printValMB("MaxHeapSize        = ", getFlagValue("MaxHeapSize", flagMap));
    6.10 -      printValMB("NewSize            = ", getFlagValue("NewSize", flagMap));
    6.11 -      printValMB("MaxNewSize         = ", getFlagValue("MaxNewSize", flagMap));
    6.12 -      printValMB("OldSize            = ", getFlagValue("OldSize", flagMap));
    6.13 -      printValue("NewRatio           = ", getFlagValue("NewRatio", flagMap));
    6.14 -      printValue("SurvivorRatio      = ", getFlagValue("SurvivorRatio", flagMap));
    6.15 -      printValMB("MetaspaceSize      = ", getFlagValue("MetaspaceSize", flagMap));
    6.16 -      printValMB("ClassMetaspaceSize = ", getFlagValue("ClassMetaspaceSize", flagMap));
    6.17 -      printValMB("MaxMetaspaceSize   = ", getFlagValue("MaxMetaspaceSize", flagMap));
    6.18 -      printValMB("G1HeapRegionSize   = ", HeapRegion.grainBytes());
    6.19 +      printValue("MinHeapFreeRatio         = ", getFlagValue("MinHeapFreeRatio", flagMap));
    6.20 +      printValue("MaxHeapFreeRatio         = ", getFlagValue("MaxHeapFreeRatio", flagMap));
    6.21 +      printValMB("MaxHeapSize              = ", getFlagValue("MaxHeapSize", flagMap));
    6.22 +      printValMB("NewSize                  = ", getFlagValue("NewSize", flagMap));
    6.23 +      printValMB("MaxNewSize               = ", getFlagValue("MaxNewSize", flagMap));
    6.24 +      printValMB("OldSize                  = ", getFlagValue("OldSize", flagMap));
    6.25 +      printValue("NewRatio                 = ", getFlagValue("NewRatio", flagMap));
    6.26 +      printValue("SurvivorRatio            = ", getFlagValue("SurvivorRatio", flagMap));
    6.27 +      printValMB("MetaspaceSize            = ", getFlagValue("MetaspaceSize", flagMap));
    6.28 +      printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap));
    6.29 +      printValMB("MaxMetaspaceSize         = ", getFlagValue("MaxMetaspaceSize", flagMap));
    6.30 +      printValMB("G1HeapRegionSize         = ", HeapRegion.grainBytes());
    6.31  
    6.32        System.out.println();
    6.33        System.out.println("Heap Usage:");
     7.1 --- a/make/bsd/makefiles/gcc.make	Fri Sep 27 13:49:57 2013 -0400
     7.2 +++ b/make/bsd/makefiles/gcc.make	Fri Sep 27 13:53:43 2013 -0400
     7.3 @@ -80,7 +80,7 @@
     7.4      HOSTCC  = $(CC)
     7.5    endif
     7.6  
     7.7 -  AS   = $(CC) -c -x assembler-with-cpp
     7.8 +  AS   = $(CC) -c 
     7.9  endif
    7.10  
    7.11  
    7.12 @@ -347,6 +347,13 @@
    7.13    LDFLAGS += -mmacosx-version-min=$(MACOSX_VERSION_MIN)
    7.14  endif
    7.15  
    7.16 +
    7.17 +#------------------------------------------------------------------------
    7.18 +# Assembler flags
    7.19 +
    7.20 +# Enforce prerpocessing of .s files
    7.21 +ASFLAGS += -x assembler-with-cpp
    7.22 +
    7.23  #------------------------------------------------------------------------
    7.24  # Linker flags
    7.25  
     8.1 --- a/make/excludeSrc.make	Fri Sep 27 13:49:57 2013 -0400
     8.2 +++ b/make/excludeSrc.make	Fri Sep 27 13:53:43 2013 -0400
     8.3 @@ -88,7 +88,7 @@
     8.4  	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
     8.5  	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
     8.6  	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
     8.7 -	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
     8.8 +	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
     8.9  	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
    8.10  	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
    8.11  	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
     9.1 --- a/make/hotspot_version	Fri Sep 27 13:49:57 2013 -0400
     9.2 +++ b/make/hotspot_version	Fri Sep 27 13:53:43 2013 -0400
     9.3 @@ -35,7 +35,7 @@
     9.4  
     9.5  HS_MAJOR_VER=25
     9.6  HS_MINOR_VER=0
     9.7 -HS_BUILD_NUMBER=51
     9.8 +HS_BUILD_NUMBER=53
     9.9  
    9.10  JDK_MAJOR_VER=1
    9.11  JDK_MINOR_VER=8
    10.1 --- a/make/jprt.properties	Fri Sep 27 13:49:57 2013 -0400
    10.2 +++ b/make/jprt.properties	Fri Sep 27 13:53:43 2013 -0400
    10.3 @@ -120,13 +120,13 @@
    10.4  jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
    10.5  jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
    10.6  
    10.7 -jprt.my.windows.i586.jdk8=windows_i586_5.1
    10.8 -jprt.my.windows.i586.jdk7=windows_i586_5.1
    10.9 +jprt.my.windows.i586.jdk8=windows_i586_6.1
   10.10 +jprt.my.windows.i586.jdk7=windows_i586_6.1
   10.11  jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
   10.12  jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
   10.13  
   10.14 -jprt.my.windows.x64.jdk8=windows_x64_5.2
   10.15 -jprt.my.windows.x64.jdk7=windows_x64_5.2
   10.16 +jprt.my.windows.x64.jdk8=windows_x64_6.1
   10.17 +jprt.my.windows.x64.jdk7=windows_x64_6.1
   10.18  jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
   10.19  jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
   10.20  
    11.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Sep 27 13:49:57 2013 -0400
    11.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Sep 27 13:53:43 2013 -0400
    11.3 @@ -105,7 +105,7 @@
    11.4          if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
    11.5        }
    11.6  
    11.7 -      if (UseCompressedKlassPointers) {
    11.8 +      if (UseCompressedClassPointers) {
    11.9          if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
   11.10              src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
   11.11        }
   11.12 @@ -963,7 +963,7 @@
   11.13        case T_METADATA:  __ ld_ptr(base, offset, to_reg->as_register()); break;
   11.14        case T_ADDRESS:
   11.15  #ifdef _LP64
   11.16 -        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) {
   11.17 +        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
   11.18            __ lduw(base, offset, to_reg->as_register());
   11.19            __ decode_klass_not_null(to_reg->as_register());
   11.20          } else
   11.21 @@ -2208,7 +2208,7 @@
   11.22      // We don't know the array types are compatible
   11.23      if (basic_type != T_OBJECT) {
   11.24        // Simple test for basic type arrays
   11.25 -      if (UseCompressedKlassPointers) {
   11.26 +      if (UseCompressedClassPointers) {
   11.27          // We don't need decode because we just need to compare
   11.28          __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
   11.29          __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
   11.30 @@ -2342,7 +2342,7 @@
   11.31      // but not necessarily exactly of type default_type.
   11.32      Label known_ok, halt;
   11.33      metadata2reg(op->expected_type()->constant_encoding(), tmp);
   11.34 -    if (UseCompressedKlassPointers) {
   11.35 +    if (UseCompressedClassPointers) {
   11.36        // tmp holds the default type. It currently comes uncompressed after the
   11.37        // load of a constant, so encode it.
   11.38        __ encode_klass_not_null(tmp);
    12.1 --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Fri Sep 27 13:49:57 2013 -0400
    12.2 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Fri Sep 27 13:53:43 2013 -0400
    12.3 @@ -186,7 +186,7 @@
    12.4      set((intx)markOopDesc::prototype(), t1);
    12.5    }
    12.6    st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
    12.7 -  if (UseCompressedKlassPointers) {
    12.8 +  if (UseCompressedClassPointers) {
    12.9      // Save klass
   12.10      mov(klass, t1);
   12.11      encode_klass_not_null(t1);
   12.12 @@ -196,7 +196,7 @@
   12.13    }
   12.14    if (len->is_valid()) {
   12.15      st(len, obj, arrayOopDesc::length_offset_in_bytes());
   12.16 -  } else if (UseCompressedKlassPointers) {
   12.17 +  } else if (UseCompressedClassPointers) {
   12.18      // otherwise length is in the class gap
   12.19      store_klass_gap(G0, obj);
   12.20    }
    13.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Sep 27 13:49:57 2013 -0400
    13.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Sep 27 13:53:43 2013 -0400
    13.3 @@ -3911,7 +3911,7 @@
    13.4    // The number of bytes in this code is used by
    13.5    // MachCallDynamicJavaNode::ret_addr_offset()
    13.6    // if this changes, change that.
    13.7 -  if (UseCompressedKlassPointers) {
    13.8 +  if (UseCompressedClassPointers) {
    13.9      lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
   13.10      decode_klass_not_null(klass);
   13.11    } else {
   13.12 @@ -3920,7 +3920,7 @@
   13.13  }
   13.14  
   13.15  void MacroAssembler::store_klass(Register klass, Register dst_oop) {
   13.16 -  if (UseCompressedKlassPointers) {
   13.17 +  if (UseCompressedClassPointers) {
   13.18      assert(dst_oop != klass, "not enough registers");
   13.19      encode_klass_not_null(klass);
   13.20      st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
   13.21 @@ -3930,7 +3930,7 @@
   13.22  }
   13.23  
   13.24  void MacroAssembler::store_klass_gap(Register s, Register d) {
   13.25 -  if (UseCompressedKlassPointers) {
   13.26 +  if (UseCompressedClassPointers) {
   13.27      assert(s != d, "not enough registers");
   13.28      st(s, d, oopDesc::klass_gap_offset_in_bytes());
   13.29    }
   13.30 @@ -4089,7 +4089,7 @@
   13.31  }
   13.32  
   13.33  void MacroAssembler::encode_klass_not_null(Register r) {
   13.34 -  assert (UseCompressedKlassPointers, "must be compressed");
   13.35 +  assert (UseCompressedClassPointers, "must be compressed");
   13.36    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   13.37    assert(r != G6_heapbase, "bad register choice");
   13.38    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
   13.39 @@ -4105,7 +4105,7 @@
   13.40    if (src == dst) {
   13.41      encode_klass_not_null(src);
   13.42    } else {
   13.43 -    assert (UseCompressedKlassPointers, "must be compressed");
   13.44 +    assert (UseCompressedClassPointers, "must be compressed");
   13.45      assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   13.46      set((intptr_t)Universe::narrow_klass_base(), dst);
   13.47      sub(src, dst, dst);
   13.48 @@ -4119,7 +4119,7 @@
   13.49  // generated by decode_klass_not_null() and reinit_heapbase().  Hence, if
   13.50  // the instructions they generate change, then this method needs to be updated.
   13.51  int MacroAssembler::instr_size_for_decode_klass_not_null() {
   13.52 -  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
   13.53 +  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   13.54    // set + add + set
   13.55    int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
   13.56      insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
   13.57 @@ -4135,7 +4135,7 @@
   13.58  void  MacroAssembler::decode_klass_not_null(Register r) {
   13.59    // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   13.60    // pd_code_size_limit.
   13.61 -  assert (UseCompressedKlassPointers, "must be compressed");
   13.62 +  assert (UseCompressedClassPointers, "must be compressed");
   13.63    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   13.64    assert(r != G6_heapbase, "bad register choice");
   13.65    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
   13.66 @@ -4151,7 +4151,7 @@
   13.67    } else {
   13.68      // Do not add assert code to this unless you change vtableStubs_sparc.cpp
   13.69      // pd_code_size_limit.
   13.70 -    assert (UseCompressedKlassPointers, "must be compressed");
   13.71 +    assert (UseCompressedClassPointers, "must be compressed");
   13.72      assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
   13.73      if (Universe::narrow_klass_shift() != 0) {
   13.74        assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
   13.75 @@ -4167,7 +4167,7 @@
   13.76  }
   13.77  
   13.78  void MacroAssembler::reinit_heapbase() {
   13.79 -  if (UseCompressedOops || UseCompressedKlassPointers) {
   13.80 +  if (UseCompressedOops || UseCompressedClassPointers) {
   13.81      if (Universe::heap() != NULL) {
   13.82        set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
   13.83      } else {
    14.1 --- a/src/cpu/sparc/vm/sparc.ad	Fri Sep 27 13:49:57 2013 -0400
    14.2 +++ b/src/cpu/sparc/vm/sparc.ad	Fri Sep 27 13:53:43 2013 -0400
    14.3 @@ -557,7 +557,7 @@
    14.4      int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
    14.5      int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
    14.6      int klass_load_size;
    14.7 -    if (UseCompressedKlassPointers) {
    14.8 +    if (UseCompressedClassPointers) {
    14.9        assert(Universe::heap() != NULL, "java heap should be initialized");
   14.10        klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
   14.11      } else {
   14.12 @@ -1657,7 +1657,7 @@
   14.13  void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
   14.14    st->print_cr("\nUEP:");
   14.15  #ifdef    _LP64
   14.16 -  if (UseCompressedKlassPointers) {
   14.17 +  if (UseCompressedClassPointers) {
   14.18      assert(Universe::heap() != NULL, "java heap should be initialized");
   14.19      st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
   14.20      st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
   14.21 @@ -1897,7 +1897,7 @@
   14.22  
   14.23  bool Matcher::narrow_klass_use_complex_address() {
   14.24    NOT_LP64(ShouldNotCallThis());
   14.25 -  assert(UseCompressedKlassPointers, "only for compressed klass code");
   14.26 +  assert(UseCompressedClassPointers, "only for compressed klass code");
   14.27    return false;
   14.28  }
   14.29  
   14.30 @@ -2561,7 +2561,7 @@
   14.31        int off = __ offset();
   14.32        __ load_klass(O0, G3_scratch);
   14.33        int klass_load_size;
   14.34 -      if (UseCompressedKlassPointers) {
   14.35 +      if (UseCompressedClassPointers) {
   14.36          assert(Universe::heap() != NULL, "java heap should be initialized");
   14.37          klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
   14.38        } else {
    15.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Sep 27 13:49:57 2013 -0400
    15.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Fri Sep 27 13:53:43 2013 -0400
    15.3 @@ -2945,7 +2945,7 @@
    15.4  
    15.5      BLOCK_COMMENT("arraycopy argument klass checks");
    15.6      //  get src->klass()
    15.7 -    if (UseCompressedKlassPointers) {
    15.8 +    if (UseCompressedClassPointers) {
    15.9        __ delayed()->nop(); // ??? not good
   15.10        __ load_klass(src, G3_src_klass);
   15.11      } else {
   15.12 @@ -2980,7 +2980,7 @@
   15.13      // Load 32-bits signed value. Use br() instruction with it to check icc.
   15.14      __ lduw(G3_src_klass, lh_offset, G5_lh);
   15.15  
   15.16 -    if (UseCompressedKlassPointers) {
   15.17 +    if (UseCompressedClassPointers) {
   15.18        __ load_klass(dst, G4_dst_klass);
   15.19      }
   15.20      // Handle objArrays completely differently...
   15.21 @@ -2988,7 +2988,7 @@
   15.22      __ set(objArray_lh, O5_temp);
   15.23      __ cmp(G5_lh,       O5_temp);
   15.24      __ br(Assembler::equal, false, Assembler::pt, L_objArray);
   15.25 -    if (UseCompressedKlassPointers) {
   15.26 +    if (UseCompressedClassPointers) {
   15.27        __ delayed()->nop();
   15.28      } else {
   15.29        __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
    16.1 --- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Sep 27 13:49:57 2013 -0400
    16.2 +++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Sep 27 13:53:43 2013 -0400
    16.3 @@ -52,6 +52,11 @@
    16.4  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    16.5    const int sparc_code_length = VtableStub::pd_code_size_limit(true);
    16.6    VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
    16.7 +  // Can be NULL if there is no free space in the code cache.
    16.8 +  if (s == NULL) {
    16.9 +    return NULL;
   16.10 +  }
   16.11 +
   16.12    ResourceMark rm;
   16.13    CodeBuffer cb(s->entry_point(), sparc_code_length);
   16.14    MacroAssembler* masm = new MacroAssembler(&cb);
   16.15 @@ -125,6 +130,11 @@
   16.16  VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   16.17    const int sparc_code_length = VtableStub::pd_code_size_limit(false);
   16.18    VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
   16.19 +  // Can be NULL if there is no free space in the code cache.
   16.20 +  if (s == NULL) {
   16.21 +    return NULL;
   16.22 +  }
   16.23 +
   16.24    ResourceMark rm;
   16.25    CodeBuffer cb(s->entry_point(), sparc_code_length);
   16.26    MacroAssembler* masm = new MacroAssembler(&cb);
   16.27 @@ -218,13 +228,13 @@
   16.28        // ld;ld;ld,jmp,nop
   16.29        const int basic = 5*BytesPerInstWord +
   16.30                          // shift;add for load_klass (only shift with zero heap based)
   16.31 -                        (UseCompressedKlassPointers ?
   16.32 +                        (UseCompressedClassPointers ?
   16.33                            MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   16.34        return basic + slop;
   16.35      } else {
   16.36        const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
   16.37                          // shift;add for load_klass (only shift with zero heap based)
   16.38 -                        (UseCompressedKlassPointers ?
   16.39 +                        (UseCompressedClassPointers ?
   16.40                            MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   16.41        return (basic + slop);
   16.42      }
    17.1 --- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Fri Sep 27 13:49:57 2013 -0400
    17.2 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Fri Sep 27 13:53:43 2013 -0400
    17.3 @@ -148,7 +148,7 @@
    17.4  
    17.5    static int adjust_reg_range(int range) {
    17.6      // Reduce the number of available regs (to free r12) in case of compressed oops
    17.7 -    if (UseCompressedOops || UseCompressedKlassPointers) return range - 1;
    17.8 +    if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
    17.9      return range;
   17.10    }
   17.11  
    18.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Sep 27 13:49:57 2013 -0400
    18.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Sep 27 13:53:43 2013 -0400
    18.3 @@ -341,7 +341,7 @@
    18.4    Register receiver = FrameMap::receiver_opr->as_register();
    18.5    Register ic_klass = IC_Klass;
    18.6    const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
    18.7 -  const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
    18.8 +  const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
    18.9    if (!do_post_padding) {
   18.10      // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
   18.11      while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
   18.12 @@ -1263,7 +1263,7 @@
   18.13        break;
   18.14  
   18.15      case T_ADDRESS:
   18.16 -      if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
   18.17 +      if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
   18.18          __ movl(dest->as_register(), from_addr);
   18.19        } else {
   18.20          __ movptr(dest->as_register(), from_addr);
   18.21 @@ -1371,7 +1371,7 @@
   18.22      __ verify_oop(dest->as_register());
   18.23    } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
   18.24  #ifdef _LP64
   18.25 -    if (UseCompressedKlassPointers) {
   18.26 +    if (UseCompressedClassPointers) {
   18.27        __ decode_klass_not_null(dest->as_register());
   18.28      }
   18.29  #endif
   18.30 @@ -1716,7 +1716,7 @@
   18.31    } else if (obj == klass_RInfo) {
   18.32      klass_RInfo = dst;
   18.33    }
   18.34 -  if (k->is_loaded() && !UseCompressedKlassPointers) {
   18.35 +  if (k->is_loaded() && !UseCompressedClassPointers) {
   18.36      select_different_registers(obj, dst, k_RInfo, klass_RInfo);
   18.37    } else {
   18.38      Rtmp1 = op->tmp3()->as_register();
   18.39 @@ -1724,14 +1724,6 @@
   18.40    }
   18.41  
   18.42    assert_different_registers(obj, k_RInfo, klass_RInfo);
   18.43 -  if (!k->is_loaded()) {
   18.44 -    klass2reg_with_patching(k_RInfo, op->info_for_patch());
   18.45 -  } else {
   18.46 -#ifdef _LP64
   18.47 -    __ mov_metadata(k_RInfo, k->constant_encoding());
   18.48 -#endif // _LP64
   18.49 -  }
   18.50 -  assert(obj != k_RInfo, "must be different");
   18.51  
   18.52    __ cmpptr(obj, (int32_t)NULL_WORD);
   18.53    if (op->should_profile()) {
   18.54 @@ -1748,13 +1740,21 @@
   18.55    } else {
   18.56      __ jcc(Assembler::equal, *obj_is_null);
   18.57    }
   18.58 +
   18.59 +  if (!k->is_loaded()) {
   18.60 +    klass2reg_with_patching(k_RInfo, op->info_for_patch());
   18.61 +  } else {
   18.62 +#ifdef _LP64
   18.63 +    __ mov_metadata(k_RInfo, k->constant_encoding());
   18.64 +#endif // _LP64
   18.65 +  }
   18.66    __ verify_oop(obj);
   18.67  
   18.68    if (op->fast_check()) {
   18.69      // get object class
   18.70      // not a safepoint as obj null check happens earlier
   18.71  #ifdef _LP64
   18.72 -    if (UseCompressedKlassPointers) {
   18.73 +    if (UseCompressedClassPointers) {
   18.74        __ load_klass(Rtmp1, obj);
   18.75        __ cmpptr(k_RInfo, Rtmp1);
   18.76      } else {
   18.77 @@ -3294,7 +3294,7 @@
   18.78      // We don't know the array types are compatible
   18.79      if (basic_type != T_OBJECT) {
   18.80        // Simple test for basic type arrays
   18.81 -      if (UseCompressedKlassPointers) {
   18.82 +      if (UseCompressedClassPointers) {
   18.83          __ movl(tmp, src_klass_addr);
   18.84          __ cmpl(tmp, dst_klass_addr);
   18.85        } else {
   18.86 @@ -3456,21 +3456,21 @@
   18.87      Label known_ok, halt;
   18.88      __ mov_metadata(tmp, default_type->constant_encoding());
   18.89  #ifdef _LP64
   18.90 -    if (UseCompressedKlassPointers) {
   18.91 +    if (UseCompressedClassPointers) {
   18.92        __ encode_klass_not_null(tmp);
   18.93      }
   18.94  #endif
   18.95  
   18.96      if (basic_type != T_OBJECT) {
   18.97  
   18.98 -      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
   18.99 +      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
  18.100        else                   __ cmpptr(tmp, dst_klass_addr);
  18.101        __ jcc(Assembler::notEqual, halt);
  18.102 -      if (UseCompressedKlassPointers)          __ cmpl(tmp, src_klass_addr);
  18.103 +      if (UseCompressedClassPointers)          __ cmpl(tmp, src_klass_addr);
  18.104        else                   __ cmpptr(tmp, src_klass_addr);
  18.105        __ jcc(Assembler::equal, known_ok);
  18.106      } else {
  18.107 -      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
  18.108 +      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
  18.109        else                   __ cmpptr(tmp, dst_klass_addr);
  18.110        __ jcc(Assembler::equal, known_ok);
  18.111        __ cmpptr(src, dst);
    19.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Fri Sep 27 13:49:57 2013 -0400
    19.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Fri Sep 27 13:53:43 2013 -0400
    19.3 @@ -1239,7 +1239,7 @@
    19.4    }
    19.5    LIR_Opr reg = rlock_result(x);
    19.6    LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
    19.7 -  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
    19.8 +  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
    19.9      tmp3 = new_register(objectType);
   19.10    }
   19.11    __ checkcast(reg, obj.result(), x->klass(),
   19.12 @@ -1261,7 +1261,7 @@
   19.13    }
   19.14    obj.load_item();
   19.15    LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
   19.16 -  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
   19.17 +  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
   19.18      tmp3 = new_register(objectType);
   19.19    }
   19.20    __ instanceof(reg, obj.result(), x->klass(),
    20.1 --- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Fri Sep 27 13:49:57 2013 -0400
    20.2 +++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Fri Sep 27 13:53:43 2013 -0400
    20.3 @@ -157,7 +157,7 @@
    20.4      movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
    20.5    }
    20.6  #ifdef _LP64
    20.7 -  if (UseCompressedKlassPointers) { // Take care not to kill klass
    20.8 +  if (UseCompressedClassPointers) { // Take care not to kill klass
    20.9      movptr(t1, klass);
   20.10      encode_klass_not_null(t1);
   20.11      movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
   20.12 @@ -171,7 +171,7 @@
   20.13      movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
   20.14    }
   20.15  #ifdef _LP64
   20.16 -  else if (UseCompressedKlassPointers) {
   20.17 +  else if (UseCompressedClassPointers) {
   20.18      xorptr(t1, t1);
   20.19      store_klass_gap(obj, t1);
   20.20    }
   20.21 @@ -334,7 +334,7 @@
   20.22    assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
   20.23    int start_offset = offset();
   20.24  
   20.25 -  if (UseCompressedKlassPointers) {
   20.26 +  if (UseCompressedClassPointers) {
   20.27      load_klass(rscratch1, receiver);
   20.28      cmpptr(rscratch1, iCache);
   20.29    } else {
   20.30 @@ -345,7 +345,7 @@
   20.31    jump_cc(Assembler::notEqual,
   20.32            RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
   20.33    const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
   20.34 -  assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
   20.35 +  assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
   20.36  }
   20.37  
   20.38  
    21.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Sep 27 13:49:57 2013 -0400
    21.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Fri Sep 27 13:53:43 2013 -0400
    21.3 @@ -1635,7 +1635,7 @@
    21.4  #ifdef ASSERT
    21.5    // TraceBytecodes does not use r12 but saves it over the call, so don't verify
    21.6    // r12 is the heapbase.
    21.7 -  LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
    21.8 +  LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
    21.9  #endif // ASSERT
   21.10  
   21.11    assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
   21.12 @@ -4802,7 +4802,7 @@
   21.13  
   21.14  void MacroAssembler::load_klass(Register dst, Register src) {
   21.15  #ifdef _LP64
   21.16 -  if (UseCompressedKlassPointers) {
   21.17 +  if (UseCompressedClassPointers) {
   21.18      movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
   21.19      decode_klass_not_null(dst);
   21.20    } else
   21.21 @@ -4817,7 +4817,7 @@
   21.22  
   21.23  void MacroAssembler::store_klass(Register dst, Register src) {
   21.24  #ifdef _LP64
   21.25 -  if (UseCompressedKlassPointers) {
   21.26 +  if (UseCompressedClassPointers) {
   21.27      encode_klass_not_null(src);
   21.28      movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
   21.29    } else
   21.30 @@ -4892,7 +4892,7 @@
   21.31  
   21.32  #ifdef _LP64
   21.33  void MacroAssembler::store_klass_gap(Register dst, Register src) {
   21.34 -  if (UseCompressedKlassPointers) {
   21.35 +  if (UseCompressedClassPointers) {
   21.36      // Store to klass gap in destination
   21.37      movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
   21.38    }
   21.39 @@ -5075,7 +5075,7 @@
   21.40  // when (Universe::heap() != NULL).  Hence, if the instructions they
   21.41  // generate change, then this method needs to be updated.
   21.42  int MacroAssembler::instr_size_for_decode_klass_not_null() {
   21.43 -  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
   21.44 +  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   21.45    // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
   21.46    return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
   21.47  }
   21.48 @@ -5085,7 +5085,7 @@
   21.49  void  MacroAssembler::decode_klass_not_null(Register r) {
   21.50    // Note: it will change flags
   21.51    assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
   21.52 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   21.53 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   21.54    assert(r != r12_heapbase, "Decoding a klass in r12");
   21.55    // Cannot assert, unverified entry point counts instructions (see .ad file)
   21.56    // vtableStubs also counts instructions in pd_code_size_limit.
   21.57 @@ -5103,7 +5103,7 @@
   21.58  void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
   21.59    // Note: it will change flags
   21.60    assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
   21.61 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   21.62 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   21.63    if (dst == src) {
   21.64      decode_klass_not_null(dst);
   21.65    } else {
   21.66 @@ -5141,7 +5141,7 @@
   21.67  }
   21.68  
   21.69  void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
   21.70 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   21.71 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   21.72    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   21.73    int klass_index = oop_recorder()->find_index(k);
   21.74    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
   21.75 @@ -5149,7 +5149,7 @@
   21.76  }
   21.77  
   21.78  void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
   21.79 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   21.80 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   21.81    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   21.82    int klass_index = oop_recorder()->find_index(k);
   21.83    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
   21.84 @@ -5175,7 +5175,7 @@
   21.85  }
   21.86  
   21.87  void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
   21.88 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   21.89 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   21.90    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   21.91    int klass_index = oop_recorder()->find_index(k);
   21.92    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
   21.93 @@ -5183,7 +5183,7 @@
   21.94  }
   21.95  
   21.96  void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
   21.97 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   21.98 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   21.99    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
  21.100    int klass_index = oop_recorder()->find_index(k);
  21.101    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
  21.102 @@ -5191,7 +5191,7 @@
  21.103  }
  21.104  
  21.105  void MacroAssembler::reinit_heapbase() {
  21.106 -  if (UseCompressedOops || UseCompressedKlassPointers) {
  21.107 +  if (UseCompressedOops || UseCompressedClassPointers) {
  21.108      if (Universe::heap() != NULL) {
  21.109        if (Universe::narrow_oop_base() == NULL) {
  21.110          MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
    22.1 --- a/src/cpu/x86/vm/templateInterpreter_x86.hpp	Fri Sep 27 13:49:57 2013 -0400
    22.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86.hpp	Fri Sep 27 13:53:43 2013 -0400
    22.3 @@ -34,9 +34,9 @@
    22.4    // Run with +PrintInterpreter to get the VM to print out the size.
    22.5    // Max size with JVMTI
    22.6  #ifdef AMD64
    22.7 -  const static int InterpreterCodeSize = 200 * 1024;
    22.8 +  const static int InterpreterCodeSize = 208 * 1024;
    22.9  #else
   22.10 -  const static int InterpreterCodeSize = 168 * 1024;
   22.11 +  const static int InterpreterCodeSize = 176 * 1024;
   22.12  #endif // AMD64
   22.13  
   22.14  #endif // CPU_X86_VM_TEMPLATEINTERPRETER_X86_HPP
    23.1 --- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Fri Sep 27 13:49:57 2013 -0400
    23.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Fri Sep 27 13:53:43 2013 -0400
    23.3 @@ -58,6 +58,11 @@
    23.4  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    23.5    const int i486_code_length = VtableStub::pd_code_size_limit(true);
    23.6    VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
    23.7 +  // Can be NULL if there is no free space in the code cache.
    23.8 +  if (s == NULL) {
    23.9 +    return NULL;
   23.10 +  }
   23.11 +
   23.12    ResourceMark rm;
   23.13    CodeBuffer cb(s->entry_point(), i486_code_length);
   23.14    MacroAssembler* masm = new MacroAssembler(&cb);
   23.15 @@ -132,6 +137,11 @@
   23.16    //            add code here, bump the code stub size returned by pd_code_size_limit!
   23.17    const int i486_code_length = VtableStub::pd_code_size_limit(false);
   23.18    VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
   23.19 +  // Can be NULL if there is no free space in the code cache.
   23.20 +  if (s == NULL) {
   23.21 +    return NULL;
   23.22 +  }
   23.23 +
   23.24    ResourceMark rm;
   23.25    CodeBuffer cb(s->entry_point(), i486_code_length);
   23.26    MacroAssembler* masm = new MacroAssembler(&cb);
    24.1 --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Sep 27 13:49:57 2013 -0400
    24.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Sep 27 13:53:43 2013 -0400
    24.3 @@ -49,6 +49,11 @@
    24.4  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    24.5    const int amd64_code_length = VtableStub::pd_code_size_limit(true);
    24.6    VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
    24.7 +  // Can be NULL if there is no free space in the code cache.
    24.8 +  if (s == NULL) {
    24.9 +    return NULL;
   24.10 +  }
   24.11 +
   24.12    ResourceMark rm;
   24.13    CodeBuffer cb(s->entry_point(), amd64_code_length);
   24.14    MacroAssembler* masm = new MacroAssembler(&cb);
   24.15 @@ -126,6 +131,11 @@
   24.16    // returned by pd_code_size_limit!
   24.17    const int amd64_code_length = VtableStub::pd_code_size_limit(false);
   24.18    VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
   24.19 +  // Can be NULL if there is no free space in the code cache.
   24.20 +  if (s == NULL) {
   24.21 +    return NULL;
   24.22 +  }
   24.23 +
   24.24    ResourceMark rm;
   24.25    CodeBuffer cb(s->entry_point(), amd64_code_length);
   24.26    MacroAssembler* masm = new MacroAssembler(&cb);
   24.27 @@ -211,11 +221,11 @@
   24.28    if (is_vtable_stub) {
   24.29      // Vtable stub size
   24.30      return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
   24.31 -           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   24.32 +           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   24.33    } else {
   24.34      // Itable stub size
   24.35      return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
   24.36 -           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   24.37 +           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   24.38    }
   24.39    // In order to tune these parameters, run the JVM with VM options
   24.40    // +PrintMiscellaneous and +WizardMode to see information about
    25.1 --- a/src/cpu/x86/vm/x86_64.ad	Fri Sep 27 13:49:57 2013 -0400
    25.2 +++ b/src/cpu/x86/vm/x86_64.ad	Fri Sep 27 13:53:43 2013 -0400
    25.3 @@ -1391,7 +1391,7 @@
    25.4  #ifndef PRODUCT
    25.5  void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
    25.6  {
    25.7 -  if (UseCompressedKlassPointers) {
    25.8 +  if (UseCompressedClassPointers) {
    25.9      st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
   25.10      st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
   25.11      st->print_cr("\tcmpq    rax, rscratch1\t # Inline cache check");
   25.12 @@ -1408,7 +1408,7 @@
   25.13  {
   25.14    MacroAssembler masm(&cbuf);
   25.15    uint insts_size = cbuf.insts_size();
   25.16 -  if (UseCompressedKlassPointers) {
   25.17 +  if (UseCompressedClassPointers) {
   25.18      masm.load_klass(rscratch1, j_rarg0);
   25.19      masm.cmpptr(rax, rscratch1);
   25.20    } else {
   25.21 @@ -1557,7 +1557,7 @@
   25.22  }
   25.23  
   25.24  bool Matcher::narrow_klass_use_complex_address() {
   25.25 -  assert(UseCompressedKlassPointers, "only for compressed klass code");
   25.26 +  assert(UseCompressedClassPointers, "only for compressed klass code");
   25.27    return (LogKlassAlignmentInBytes <= 3);
   25.28  }
   25.29  
    26.1 --- a/src/os/bsd/vm/os_bsd.cpp	Fri Sep 27 13:49:57 2013 -0400
    26.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Fri Sep 27 13:53:43 2013 -0400
    26.3 @@ -3589,8 +3589,6 @@
    26.4  #endif
    26.5    }
    26.6  
    26.7 -  os::large_page_init();
    26.8 -
    26.9    // initialize suspend/resume support - must do this before signal_sets_init()
   26.10    if (SR_initialize() != 0) {
   26.11      perror("SR_initialize failed");
    27.1 --- a/src/os/linux/vm/os_linux.cpp	Fri Sep 27 13:49:57 2013 -0400
    27.2 +++ b/src/os/linux/vm/os_linux.cpp	Fri Sep 27 13:53:43 2013 -0400
    27.3 @@ -131,6 +131,7 @@
    27.4  bool os::Linux::_supports_fast_thread_cpu_time = false;
    27.5  const char * os::Linux::_glibc_version = NULL;
    27.6  const char * os::Linux::_libpthread_version = NULL;
    27.7 +pthread_condattr_t os::Linux::_condattr[1];
    27.8  
    27.9  static jlong initial_time_count=0;
   27.10  
   27.11 @@ -1399,12 +1400,15 @@
   27.12            clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
   27.13          // yes, monotonic clock is supported
   27.14          _clock_gettime = clock_gettime_func;
   27.15 +        return;
   27.16        } else {
   27.17          // close librt if there is no monotonic clock
   27.18          dlclose(handle);
   27.19        }
   27.20      }
   27.21    }
   27.22 +  warning("No monotonic clock was available - timed services may " \
   27.23 +          "be adversely affected if the time-of-day clock changes");
   27.24  }
   27.25  
   27.26  #ifndef SYS_clock_getres
   27.27 @@ -2165,23 +2169,49 @@
   27.28  }
   27.29  
   27.30  // Try to identify popular distros.
   27.31 -// Most Linux distributions have /etc/XXX-release file, which contains
   27.32 -// the OS version string. Some have more than one /etc/XXX-release file
   27.33 -// (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
   27.34 -// so the order is important.
   27.35 +// Most Linux distributions have a /etc/XXX-release file, which contains
   27.36 +// the OS version string. Newer Linux distributions have a /etc/lsb-release
   27.37 +// file that also contains the OS version string. Some have more than one
   27.38 +// /etc/XXX-release file (e.g. Mandrake has both /etc/mandrake-release and
   27.39 +// /etc/redhat-release.), so the order is important.
   27.40 +// Any Linux that is based on Redhat (i.e. Oracle, Mandrake, Sun JDS...) have
   27.41 +// their own specific XXX-release file as well as a redhat-release file.
   27.42 +// Because of this the XXX-release file needs to be searched for before the
   27.43 +// redhat-release file.
   27.44 +// Since Red Hat has a lsb-release file that is not very descriptive the
   27.45 +// search for redhat-release needs to be before lsb-release.
   27.46 +// Since the lsb-release file is the new standard it needs to be searched
   27.47 +// before the older style release files.
   27.48 +// Searching system-release (Red Hat) and os-release (other Linuxes) are a
   27.49 +// next to last resort.  The os-release file is a new standard that contains
   27.50 +// distribution information and the system-release file seems to be an old
   27.51 +// standard that has been replaced by the lsb-release and os-release files.
   27.52 +// Searching for the debian_version file is the last resort.  It contains
   27.53 +// an informative string like "6.0.6" or "wheezy/sid". Because of this
   27.54 +// "Debian " is printed before the contents of the debian_version file.
   27.55  void os::Linux::print_distro_info(outputStream* st) {
   27.56 -  if (!_print_ascii_file("/etc/mandrake-release", st) &&
   27.57 -      !_print_ascii_file("/etc/sun-release", st) &&
   27.58 -      !_print_ascii_file("/etc/redhat-release", st) &&
   27.59 -      !_print_ascii_file("/etc/SuSE-release", st) &&
   27.60 -      !_print_ascii_file("/etc/turbolinux-release", st) &&
   27.61 -      !_print_ascii_file("/etc/gentoo-release", st) &&
   27.62 -      !_print_ascii_file("/etc/debian_version", st) &&
   27.63 -      !_print_ascii_file("/etc/ltib-release", st) &&
   27.64 -      !_print_ascii_file("/etc/angstrom-version", st)) {
   27.65 -      st->print("Linux");
   27.66 -  }
   27.67 -  st->cr();
   27.68 +   if (!_print_ascii_file("/etc/oracle-release", st) &&
   27.69 +       !_print_ascii_file("/etc/mandriva-release", st) &&
   27.70 +       !_print_ascii_file("/etc/mandrake-release", st) &&
   27.71 +       !_print_ascii_file("/etc/sun-release", st) &&
   27.72 +       !_print_ascii_file("/etc/redhat-release", st) &&
   27.73 +       !_print_ascii_file("/etc/lsb-release", st) &&
   27.74 +       !_print_ascii_file("/etc/SuSE-release", st) &&
   27.75 +       !_print_ascii_file("/etc/turbolinux-release", st) &&
   27.76 +       !_print_ascii_file("/etc/gentoo-release", st) &&
   27.77 +       !_print_ascii_file("/etc/ltib-release", st) &&
   27.78 +       !_print_ascii_file("/etc/angstrom-version", st) &&
   27.79 +       !_print_ascii_file("/etc/system-release", st) &&
   27.80 +       !_print_ascii_file("/etc/os-release", st)) {
   27.81 +
   27.82 +       if (file_exists("/etc/debian_version")) {
   27.83 +         st->print("Debian ");
   27.84 +         _print_ascii_file("/etc/debian_version", st);
   27.85 +       } else {
   27.86 +         st->print("Linux");
   27.87 +       }
   27.88 +   }
   27.89 +   st->cr();
   27.90  }
   27.91  
   27.92  void os::Linux::print_libversion_info(outputStream* st) {
   27.93 @@ -4709,6 +4739,26 @@
   27.94  
   27.95    Linux::clock_init();
   27.96    initial_time_count = os::elapsed_counter();
   27.97 +
   27.98 +  // pthread_condattr initialization for monotonic clock
   27.99 +  int status;
  27.100 +  pthread_condattr_t* _condattr = os::Linux::condAttr();
  27.101 +  if ((status = pthread_condattr_init(_condattr)) != 0) {
  27.102 +    fatal(err_msg("pthread_condattr_init: %s", strerror(status)));
  27.103 +  }
  27.104 +  // Only set the clock if CLOCK_MONOTONIC is available
  27.105 +  if (Linux::supports_monotonic_clock()) {
  27.106 +    if ((status = pthread_condattr_setclock(_condattr, CLOCK_MONOTONIC)) != 0) {
  27.107 +      if (status == EINVAL) {
  27.108 +        warning("Unable to use monotonic clock with relative timed-waits" \
  27.109 +                " - changes to the time-of-day clock may have adverse affects");
  27.110 +      } else {
  27.111 +        fatal(err_msg("pthread_condattr_setclock: %s", strerror(status)));
  27.112 +      }
  27.113 +    }
  27.114 +  }
  27.115 +  // else it defaults to CLOCK_REALTIME
  27.116 +
  27.117    pthread_mutex_init(&dl_mutex, NULL);
  27.118  
  27.119    // If the pagesize of the VM is greater than 8K determine the appropriate
  27.120 @@ -4755,8 +4805,6 @@
  27.121  #endif
  27.122    }
  27.123  
  27.124 -  os::large_page_init();
  27.125 -
  27.126    // initialize suspend/resume support - must do this before signal_sets_init()
  27.127    if (SR_initialize() != 0) {
  27.128      perror("SR_initialize failed");
  27.129 @@ -5519,21 +5567,36 @@
  27.130  
  27.131  static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
  27.132    if (millis < 0)  millis = 0;
  27.133 -  struct timeval now;
  27.134 -  int status = gettimeofday(&now, NULL);
  27.135 -  assert(status == 0, "gettimeofday");
  27.136 +
  27.137    jlong seconds = millis / 1000;
  27.138    millis %= 1000;
  27.139    if (seconds > 50000000) { // see man cond_timedwait(3T)
  27.140      seconds = 50000000;
  27.141    }
  27.142 -  abstime->tv_sec = now.tv_sec  + seconds;
  27.143 -  long       usec = now.tv_usec + millis * 1000;
  27.144 -  if (usec >= 1000000) {
  27.145 -    abstime->tv_sec += 1;
  27.146 -    usec -= 1000000;
  27.147 -  }
  27.148 -  abstime->tv_nsec = usec * 1000;
  27.149 +
  27.150 +  if (os::Linux::supports_monotonic_clock()) {
  27.151 +    struct timespec now;
  27.152 +    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
  27.153 +    assert_status(status == 0, status, "clock_gettime");
  27.154 +    abstime->tv_sec = now.tv_sec  + seconds;
  27.155 +    long nanos = now.tv_nsec + millis * NANOSECS_PER_MILLISEC;
  27.156 +    if (nanos >= NANOSECS_PER_SEC) {
  27.157 +      abstime->tv_sec += 1;
  27.158 +      nanos -= NANOSECS_PER_SEC;
  27.159 +    }
  27.160 +    abstime->tv_nsec = nanos;
  27.161 +  } else {
  27.162 +    struct timeval now;
  27.163 +    int status = gettimeofday(&now, NULL);
  27.164 +    assert(status == 0, "gettimeofday");
  27.165 +    abstime->tv_sec = now.tv_sec  + seconds;
  27.166 +    long usec = now.tv_usec + millis * 1000;
  27.167 +    if (usec >= 1000000) {
  27.168 +      abstime->tv_sec += 1;
  27.169 +      usec -= 1000000;
  27.170 +    }
  27.171 +    abstime->tv_nsec = usec * 1000;
  27.172 +  }
  27.173    return abstime;
  27.174  }
  27.175  
  27.176 @@ -5625,7 +5688,7 @@
  27.177      status = os::Linux::safe_cond_timedwait(_cond, _mutex, &abst);
  27.178      if (status != 0 && WorkAroundNPTLTimedWaitHang) {
  27.179        pthread_cond_destroy (_cond);
  27.180 -      pthread_cond_init (_cond, NULL) ;
  27.181 +      pthread_cond_init (_cond, os::Linux::condAttr()) ;
  27.182      }
  27.183      assert_status(status == 0 || status == EINTR ||
  27.184                    status == ETIME || status == ETIMEDOUT,
  27.185 @@ -5726,32 +5789,50 @@
  27.186  
  27.187  static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
  27.188    assert (time > 0, "convertTime");
  27.189 -
  27.190 -  struct timeval now;
  27.191 -  int status = gettimeofday(&now, NULL);
  27.192 -  assert(status == 0, "gettimeofday");
  27.193 -
  27.194 -  time_t max_secs = now.tv_sec + MAX_SECS;
  27.195 -
  27.196 -  if (isAbsolute) {
  27.197 -    jlong secs = time / 1000;
  27.198 -    if (secs > max_secs) {
  27.199 -      absTime->tv_sec = max_secs;
  27.200 +  time_t max_secs = 0;
  27.201 +
  27.202 +  if (!os::Linux::supports_monotonic_clock() || isAbsolute) {
  27.203 +    struct timeval now;
  27.204 +    int status = gettimeofday(&now, NULL);
  27.205 +    assert(status == 0, "gettimeofday");
  27.206 +
  27.207 +    max_secs = now.tv_sec + MAX_SECS;
  27.208 +
  27.209 +    if (isAbsolute) {
  27.210 +      jlong secs = time / 1000;
  27.211 +      if (secs > max_secs) {
  27.212 +        absTime->tv_sec = max_secs;
  27.213 +      } else {
  27.214 +        absTime->tv_sec = secs;
  27.215 +      }
  27.216 +      absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
  27.217 +    } else {
  27.218 +      jlong secs = time / NANOSECS_PER_SEC;
  27.219 +      if (secs >= MAX_SECS) {
  27.220 +        absTime->tv_sec = max_secs;
  27.221 +        absTime->tv_nsec = 0;
  27.222 +      } else {
  27.223 +        absTime->tv_sec = now.tv_sec + secs;
  27.224 +        absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
  27.225 +        if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
  27.226 +          absTime->tv_nsec -= NANOSECS_PER_SEC;
  27.227 +          ++absTime->tv_sec; // note: this must be <= max_secs
  27.228 +        }
  27.229 +      }
  27.230      }
  27.231 -    else {
  27.232 -      absTime->tv_sec = secs;
  27.233 -    }
  27.234 -    absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
  27.235 -  }
  27.236 -  else {
  27.237 +  } else {
  27.238 +    // must be relative using monotonic clock
  27.239 +    struct timespec now;
  27.240 +    int status = os::Linux::clock_gettime(CLOCK_MONOTONIC, &now);
  27.241 +    assert_status(status == 0, status, "clock_gettime");
  27.242 +    max_secs = now.tv_sec + MAX_SECS;
  27.243      jlong secs = time / NANOSECS_PER_SEC;
  27.244      if (secs >= MAX_SECS) {
  27.245        absTime->tv_sec = max_secs;
  27.246        absTime->tv_nsec = 0;
  27.247 -    }
  27.248 -    else {
  27.249 +    } else {
  27.250        absTime->tv_sec = now.tv_sec + secs;
  27.251 -      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
  27.252 +      absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_nsec;
  27.253        if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
  27.254          absTime->tv_nsec -= NANOSECS_PER_SEC;
  27.255          ++absTime->tv_sec; // note: this must be <= max_secs
  27.256 @@ -5831,15 +5912,19 @@
  27.257    jt->set_suspend_equivalent();
  27.258    // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  27.259  
  27.260 +  assert(_cur_index == -1, "invariant");
  27.261    if (time == 0) {
  27.262 -    status = pthread_cond_wait (_cond, _mutex) ;
  27.263 +    _cur_index = REL_INDEX; // arbitrary choice when not timed
  27.264 +    status = pthread_cond_wait (&_cond[_cur_index], _mutex) ;
  27.265    } else {
  27.266 -    status = os::Linux::safe_cond_timedwait (_cond, _mutex, &absTime) ;
  27.267 +    _cur_index = isAbsolute ? ABS_INDEX : REL_INDEX;
  27.268 +    status = os::Linux::safe_cond_timedwait (&_cond[_cur_index], _mutex, &absTime) ;
  27.269      if (status != 0 && WorkAroundNPTLTimedWaitHang) {
  27.270 -      pthread_cond_destroy (_cond) ;
  27.271 -      pthread_cond_init    (_cond, NULL);
  27.272 +      pthread_cond_destroy (&_cond[_cur_index]) ;
  27.273 +      pthread_cond_init    (&_cond[_cur_index], isAbsolute ? NULL : os::Linux::condAttr());
  27.274      }
  27.275    }
  27.276 +  _cur_index = -1;
  27.277    assert_status(status == 0 || status == EINTR ||
  27.278                  status == ETIME || status == ETIMEDOUT,
  27.279                  status, "cond_timedwait");
  27.280 @@ -5868,17 +5953,24 @@
  27.281    s = _counter;
  27.282    _counter = 1;
  27.283    if (s < 1) {
  27.284 -     if (WorkAroundNPTLTimedWaitHang) {
  27.285 -        status = pthread_cond_signal (_cond) ;
  27.286 -        assert (status == 0, "invariant") ;
  27.287 +    // thread might be parked
  27.288 +    if (_cur_index != -1) {
  27.289 +      // thread is definitely parked
  27.290 +      if (WorkAroundNPTLTimedWaitHang) {
  27.291 +        status = pthread_cond_signal (&_cond[_cur_index]);
  27.292 +        assert (status == 0, "invariant");
  27.293          status = pthread_mutex_unlock(_mutex);
  27.294 -        assert (status == 0, "invariant") ;
  27.295 -     } else {
  27.296 +        assert (status == 0, "invariant");
  27.297 +      } else {
  27.298          status = pthread_mutex_unlock(_mutex);
  27.299 -        assert (status == 0, "invariant") ;
  27.300 -        status = pthread_cond_signal (_cond) ;
  27.301 -        assert (status == 0, "invariant") ;
  27.302 -     }
  27.303 +        assert (status == 0, "invariant");
  27.304 +        status = pthread_cond_signal (&_cond[_cur_index]);
  27.305 +        assert (status == 0, "invariant");
  27.306 +      }
  27.307 +    } else {
  27.308 +      pthread_mutex_unlock(_mutex);
  27.309 +      assert (status == 0, "invariant") ;
  27.310 +    }
  27.311    } else {
  27.312      pthread_mutex_unlock(_mutex);
  27.313      assert (status == 0, "invariant") ;
    28.1 --- a/src/os/linux/vm/os_linux.hpp	Fri Sep 27 13:49:57 2013 -0400
    28.2 +++ b/src/os/linux/vm/os_linux.hpp	Fri Sep 27 13:53:43 2013 -0400
    28.3 @@ -221,6 +221,13 @@
    28.4  
    28.5    static jlong fast_thread_cpu_time(clockid_t clockid);
    28.6  
    28.7 +  // pthread_cond clock suppport
    28.8 +  private:
    28.9 +  static pthread_condattr_t _condattr[1];
   28.10 +
   28.11 +  public:
   28.12 +  static pthread_condattr_t* condAttr() { return _condattr; }
   28.13 +
   28.14    // Stack repair handling
   28.15  
   28.16    // none present
   28.17 @@ -295,7 +302,7 @@
   28.18    public:
   28.19      PlatformEvent() {
   28.20        int status;
   28.21 -      status = pthread_cond_init (_cond, NULL);
   28.22 +      status = pthread_cond_init (_cond, os::Linux::condAttr());
   28.23        assert_status(status == 0, status, "cond_init");
   28.24        status = pthread_mutex_init (_mutex, NULL);
   28.25        assert_status(status == 0, status, "mutex_init");
   28.26 @@ -310,14 +317,19 @@
   28.27      void park () ;
   28.28      void unpark () ;
   28.29      int  TryPark () ;
   28.30 -    int  park (jlong millis) ;
   28.31 +    int  park (jlong millis) ; // relative timed-wait only
   28.32      void SetAssociation (Thread * a) { _Assoc = a ; }
   28.33  } ;
   28.34  
   28.35  class PlatformParker : public CHeapObj<mtInternal> {
   28.36    protected:
   28.37 +    enum {
   28.38 +        REL_INDEX = 0,
   28.39 +        ABS_INDEX = 1
   28.40 +    };
   28.41 +    int _cur_index;  // which cond is in use: -1, 0, 1
   28.42      pthread_mutex_t _mutex [1] ;
   28.43 -    pthread_cond_t  _cond  [1] ;
   28.44 +    pthread_cond_t  _cond  [2] ; // one for relative times and one for abs.
   28.45  
   28.46    public:       // TODO-FIXME: make dtor private
   28.47      ~PlatformParker() { guarantee (0, "invariant") ; }
   28.48 @@ -325,10 +337,13 @@
   28.49    public:
   28.50      PlatformParker() {
   28.51        int status;
   28.52 -      status = pthread_cond_init (_cond, NULL);
   28.53 -      assert_status(status == 0, status, "cond_init");
   28.54 +      status = pthread_cond_init (&_cond[REL_INDEX], os::Linux::condAttr());
   28.55 +      assert_status(status == 0, status, "cond_init rel");
   28.56 +      status = pthread_cond_init (&_cond[ABS_INDEX], NULL);
   28.57 +      assert_status(status == 0, status, "cond_init abs");
   28.58        status = pthread_mutex_init (_mutex, NULL);
   28.59        assert_status(status == 0, status, "mutex_init");
   28.60 +      _cur_index = -1; // mark as unused
   28.61      }
   28.62  };
   28.63  
    29.1 --- a/src/os/solaris/vm/os_solaris.cpp	Fri Sep 27 13:49:57 2013 -0400
    29.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Fri Sep 27 13:53:43 2013 -0400
    29.3 @@ -5178,9 +5178,7 @@
    29.4      if(Verbose && PrintMiscellaneous)
    29.5        tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
    29.6  #endif
    29.7 -}
    29.8 -
    29.9 -  os::large_page_init();
   29.10 +  }
   29.11  
   29.12    // Check minimum allowable stack size for thread creation and to initialize
   29.13    // the java system classes, including StackOverflowError - depends on page
    30.1 --- a/src/os/windows/vm/decoder_windows.cpp	Fri Sep 27 13:49:57 2013 -0400
    30.2 +++ b/src/os/windows/vm/decoder_windows.cpp	Fri Sep 27 13:53:43 2013 -0400
    30.3 @@ -32,7 +32,11 @@
    30.4    _can_decode_in_vm = false;
    30.5    _pfnSymGetSymFromAddr64 = NULL;
    30.6    _pfnUndecorateSymbolName = NULL;
    30.7 -
    30.8 +#ifdef AMD64
    30.9 +  _pfnStackWalk64 = NULL;
   30.10 +  _pfnSymFunctionTableAccess64 = NULL;
   30.11 +  _pfnSymGetModuleBase64 = NULL;
   30.12 +#endif
   30.13    _decoder_status = no_error;
   30.14    initialize();
   30.15  }
   30.16 @@ -53,14 +57,24 @@
   30.17      _pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)::GetProcAddress(handle, "UnDecorateSymbolName");
   30.18  
   30.19      if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) {
   30.20 -      _pfnSymGetSymFromAddr64 = NULL;
   30.21 -      _pfnUndecorateSymbolName = NULL;
   30.22 -      ::FreeLibrary(handle);
   30.23 -      _dbghelp_handle = NULL;
   30.24 +      uninitialize();
   30.25        _decoder_status = helper_func_error;
   30.26        return;
   30.27      }
   30.28  
   30.29 +#ifdef AMD64
   30.30 +    _pfnStackWalk64 = (pfn_StackWalk64)::GetProcAddress(handle, "StackWalk64");
   30.31 +    _pfnSymFunctionTableAccess64 = (pfn_SymFunctionTableAccess64)::GetProcAddress(handle, "SymFunctionTableAccess64");
   30.32 +    _pfnSymGetModuleBase64 = (pfn_SymGetModuleBase64)::GetProcAddress(handle, "SymGetModuleBase64");
   30.33 +    if (_pfnStackWalk64 == NULL || _pfnSymFunctionTableAccess64 == NULL || _pfnSymGetModuleBase64 == NULL) {
   30.34 +      // We can't call StackWalk64 to walk the stack, but we are still
   30.35 +      // able to decode the symbols. Let's limp on.
   30.36 +      _pfnStackWalk64 = NULL;
   30.37 +      _pfnSymFunctionTableAccess64 = NULL;
   30.38 +      _pfnSymGetModuleBase64 = NULL;
   30.39 +    }
   30.40 +#endif
   30.41 +
   30.42      HANDLE hProcess = ::GetCurrentProcess();
   30.43      _pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
   30.44      if (!_pfnSymInitialize(hProcess, NULL, TRUE)) {
   30.45 @@ -156,6 +170,11 @@
   30.46  void WindowsDecoder::uninitialize() {
   30.47    _pfnSymGetSymFromAddr64 = NULL;
   30.48    _pfnUndecorateSymbolName = NULL;
   30.49 +#ifdef AMD64
   30.50 +  _pfnStackWalk64 = NULL;
   30.51 +  _pfnSymFunctionTableAccess64 = NULL;
   30.52 +  _pfnSymGetModuleBase64 = NULL;
   30.53 +#endif
   30.54    if (_dbghelp_handle != NULL) {
   30.55      ::FreeLibrary(_dbghelp_handle);
   30.56    }
   30.57 @@ -195,3 +214,65 @@
   30.58           _pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
   30.59  }
   30.60  
   30.61 +#ifdef AMD64
   30.62 +BOOL WindowsDbgHelp::StackWalk64(DWORD MachineType,
   30.63 +                                 HANDLE hProcess,
   30.64 +                                 HANDLE hThread,
   30.65 +                                 LPSTACKFRAME64 StackFrame,
   30.66 +                                 PVOID ContextRecord,
   30.67 +                                 PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
   30.68 +                                 PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
   30.69 +                                 PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
   30.70 +                                 PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress) {
   30.71 +  DecoderLocker locker;
   30.72 +  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
   30.73 +
   30.74 +  if (!wd->has_error() && wd->_pfnStackWalk64) {
   30.75 +    return wd->_pfnStackWalk64(MachineType,
   30.76 +                               hProcess,
   30.77 +                               hThread,
   30.78 +                               StackFrame,
   30.79 +                               ContextRecord,
   30.80 +                               ReadMemoryRoutine,
   30.81 +                               FunctionTableAccessRoutine,
   30.82 +                               GetModuleBaseRoutine,
   30.83 +                               TranslateAddress);
   30.84 +  } else {
   30.85 +    return false;
   30.86 +  }
   30.87 +}
   30.88 +
   30.89 +PVOID WindowsDbgHelp::SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
   30.90 +  DecoderLocker locker;
   30.91 +  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
   30.92 +
   30.93 +  if (!wd->has_error() && wd->_pfnSymFunctionTableAccess64) {
   30.94 +    return wd->_pfnSymFunctionTableAccess64(hProcess, AddrBase);
   30.95 +  } else {
   30.96 +    return NULL;
   30.97 +  }
   30.98 +}
   30.99 +
  30.100 +pfn_SymFunctionTableAccess64 WindowsDbgHelp::pfnSymFunctionTableAccess64() {
  30.101 +  DecoderLocker locker;
  30.102 +  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
  30.103 +
  30.104 +  if (!wd->has_error()) {
  30.105 +    return wd->_pfnSymFunctionTableAccess64;
  30.106 +  } else {
  30.107 +    return NULL;
  30.108 +  }
  30.109 +}
  30.110 +
  30.111 +pfn_SymGetModuleBase64 WindowsDbgHelp::pfnSymGetModuleBase64() {
  30.112 +  DecoderLocker locker;
  30.113 +  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
  30.114 +
  30.115 +  if (!wd->has_error()) {
  30.116 +    return wd->_pfnSymGetModuleBase64;
  30.117 +  } else {
  30.118 +    return NULL;
  30.119 +  }
  30.120 +}
  30.121 +
  30.122 +#endif // AMD64
    31.1 --- a/src/os/windows/vm/decoder_windows.hpp	Fri Sep 27 13:49:57 2013 -0400
    31.2 +++ b/src/os/windows/vm/decoder_windows.hpp	Fri Sep 27 13:53:43 2013 -0400
    31.3 @@ -38,6 +38,20 @@
    31.4  typedef BOOL  (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR);
    31.5  typedef BOOL  (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int);
    31.6  
    31.7 +#ifdef AMD64
    31.8 +typedef BOOL  (WINAPI *pfn_StackWalk64)(DWORD MachineType,
    31.9 +                                        HANDLE hProcess,
   31.10 +                                        HANDLE hThread,
   31.11 +                                        LPSTACKFRAME64 StackFrame,
   31.12 +                                        PVOID ContextRecord,
   31.13 +                                        PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
   31.14 +                                        PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
   31.15 +                                        PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
   31.16 +                                        PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
   31.17 +typedef PVOID (WINAPI *pfn_SymFunctionTableAccess64)(HANDLE hProcess, DWORD64 AddrBase);
   31.18 +typedef DWORD64 (WINAPI *pfn_SymGetModuleBase64)(HANDLE hProcess, DWORD64 dwAddr);
   31.19 +#endif
   31.20 +
   31.21  class WindowsDecoder : public AbstractDecoder {
   31.22  
   31.23  public:
   31.24 @@ -61,7 +75,34 @@
   31.25    bool                      _can_decode_in_vm;
   31.26    pfn_SymGetSymFromAddr64   _pfnSymGetSymFromAddr64;
   31.27    pfn_UndecorateSymbolName  _pfnUndecorateSymbolName;
   31.28 +#ifdef AMD64
   31.29 +  pfn_StackWalk64              _pfnStackWalk64;
   31.30 +  pfn_SymFunctionTableAccess64 _pfnSymFunctionTableAccess64;
   31.31 +  pfn_SymGetModuleBase64       _pfnSymGetModuleBase64;
   31.32 +
   31.33 +  friend class WindowsDbgHelp;
   31.34 +#endif
   31.35  };
   31.36  
   31.37 +#ifdef AMD64
   31.38 +// TODO: refactor and move the handling of dbghelp.dll outside of Decoder
   31.39 +class WindowsDbgHelp : public Decoder {
   31.40 +public:
   31.41 +  static BOOL StackWalk64(DWORD MachineType,
   31.42 +                          HANDLE hProcess,
   31.43 +                          HANDLE hThread,
   31.44 +                          LPSTACKFRAME64 StackFrame,
   31.45 +                          PVOID ContextRecord,
   31.46 +                          PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
   31.47 +                          PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
   31.48 +                          PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
   31.49 +                          PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
   31.50 +  static PVOID SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase);
   31.51 +
   31.52 +  static pfn_SymFunctionTableAccess64 pfnSymFunctionTableAccess64();
   31.53 +  static pfn_SymGetModuleBase64       pfnSymGetModuleBase64();
   31.54 +};
   31.55 +#endif
   31.56 +
   31.57  #endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
   31.58  
    32.1 --- a/src/os/windows/vm/os_windows.cpp	Fri Sep 27 13:49:57 2013 -0400
    32.2 +++ b/src/os/windows/vm/os_windows.cpp	Fri Sep 27 13:53:43 2013 -0400
    32.3 @@ -3189,9 +3189,12 @@
    32.4      return p_buf;
    32.5  
    32.6    } else {
    32.7 +    if (TracePageSizes && Verbose) {
    32.8 +       tty->print_cr("Reserving large pages in a single large chunk.");
    32.9 +    }
   32.10      // normal policy just allocate it all at once
   32.11      DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
   32.12 -    char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
   32.13 +    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
   32.14      if (res != NULL) {
   32.15        address pc = CALLER_PC;
   32.16        MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
   32.17 @@ -3917,8 +3920,6 @@
   32.18  #endif
   32.19    }
   32.20  
   32.21 -  os::large_page_init();
   32.22 -
   32.23    // Setup Windows Exceptions
   32.24  
   32.25    // for debugging float code generation bugs
   32.26 @@ -5714,7 +5715,66 @@
   32.27  #endif
   32.28  
   32.29  #ifndef PRODUCT
   32.30 +
   32.31 +// test the code path in reserve_memory_special() that tries to allocate memory in a single
   32.32 +// contiguous memory block at a particular address.
   32.33 +// The test first tries to find a good approximate address to allocate at by using the same
   32.34 +// method to allocate some memory at any address. The test then tries to allocate memory in
   32.35 +// the vicinity (not directly after it to avoid possible by-chance use of that location)
   32.36 +// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
   32.37 +// the previously allocated memory is available for allocation. The only actual failure
   32.38 +// that is reported is when the test tries to allocate at a particular location but gets a
   32.39 +// different valid one. A NULL return value at this point is not considered an error but may
   32.40 +// be legitimate.
   32.41 +// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
   32.42  void TestReserveMemorySpecial_test() {
   32.43 -  // No tests available for this platform
   32.44 -}
   32.45 -#endif
   32.46 +  if (!UseLargePages) {
   32.47 +    if (VerboseInternalVMTests) {
   32.48 +      gclog_or_tty->print("Skipping test because large pages are disabled");
   32.49 +    }
   32.50 +    return;
   32.51 +  }
   32.52 +  // save current value of globals
   32.53 +  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
   32.54 +  bool old_use_numa_interleaving = UseNUMAInterleaving;
   32.55 +
   32.56 +  // set globals to make sure we hit the correct code path
   32.57 +  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
   32.58 +
   32.59 +  // do an allocation at an address selected by the OS to get a good one.
   32.60 +  const size_t large_allocation_size = os::large_page_size() * 4;
   32.61 +  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
   32.62 +  if (result == NULL) {
   32.63 +    if (VerboseInternalVMTests) {
   32.64 +      gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
   32.65 +        large_allocation_size);
   32.66 +    }
   32.67 +  } else {
   32.68 +    os::release_memory_special(result, large_allocation_size);
   32.69 +
   32.70 +    // allocate another page within the recently allocated memory area which seems to be a good location. At least
   32.71 +    // we managed to get it once.
   32.72 +    const size_t expected_allocation_size = os::large_page_size();
   32.73 +    char* expected_location = result + os::large_page_size();
   32.74 +    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
   32.75 +    if (actual_location == NULL) {
   32.76 +      if (VerboseInternalVMTests) {
   32.77 +        gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
   32.78 +          expected_location, large_allocation_size);
   32.79 +      }
   32.80 +    } else {
   32.81 +      // release memory
   32.82 +      os::release_memory_special(actual_location, expected_allocation_size);
   32.83 +      // only now check, after releasing any memory to avoid any leaks.
   32.84 +      assert(actual_location == expected_location,
   32.85 +        err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
   32.86 +          expected_location, expected_allocation_size, actual_location));
   32.87 +    }
   32.88 +  }
   32.89 +
   32.90 +  // restore globals
   32.91 +  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
   32.92 +  UseNUMAInterleaving = old_use_numa_interleaving;
   32.93 +}
   32.94 +#endif // PRODUCT
   32.95 +
    33.1 --- a/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Fri Sep 27 13:49:57 2013 -0400
    33.2 +++ b/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Fri Sep 27 13:53:43 2013 -0400
    33.3 @@ -35,7 +35,9 @@
    33.4  
    33.5  // Used on 64 bit platforms for UseCompressedOops base address
    33.6  #ifdef _LP64
    33.7 -define_pd_global(uintx, HeapBaseMinAddress,      CONST64(4)*G);
    33.8 +// use 6G as default base address because by default the OS maps the application
    33.9 +// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
   33.10 +define_pd_global(uintx, HeapBaseMinAddress,      CONST64(6)*G);
   33.11  #else
   33.12  define_pd_global(uintx, HeapBaseMinAddress,      2*G);
   33.13  #endif
    34.1 --- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Sep 27 13:49:57 2013 -0400
    34.2 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Sep 27 13:53:43 2013 -0400
    34.3 @@ -29,6 +29,7 @@
    34.4  #include "classfile/vmSymbols.hpp"
    34.5  #include "code/icBuffer.hpp"
    34.6  #include "code/vtableStubs.hpp"
    34.7 +#include "decoder_windows.hpp"
    34.8  #include "interpreter/interpreter.hpp"
    34.9  #include "jvm_windows.h"
   34.10  #include "memory/allocation.inline.hpp"
   34.11 @@ -327,6 +328,94 @@
   34.12  
   34.13  cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
   34.14  
   34.15 +#ifdef AMD64
   34.16 +/*
   34.17 + * Windows/x64 does not use stack frames the way expected by Java:
   34.18 + * [1] in most cases, there is no frame pointer. All locals are addressed via RSP
   34.19 + * [2] in rare cases, when alloca() is used, a frame pointer is used, but this may
   34.20 + *     not be RBP.
   34.21 + * See http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
   34.22 + *
   34.23 + * So it's not possible to print the native stack using the
   34.24 + *     while (...) {...  fr = os::get_sender_for_C_frame(&fr); }
   34.25 + * loop in vmError.cpp. We need to roll our own loop.
   34.26 + */
   34.27 +bool os::platform_print_native_stack(outputStream* st, void* context,
   34.28 +                                     char *buf, int buf_size)
   34.29 +{
   34.30 +  CONTEXT ctx;
   34.31 +  if (context != NULL) {
   34.32 +    memcpy(&ctx, context, sizeof(ctx));
   34.33 +  } else {
   34.34 +    RtlCaptureContext(&ctx);
   34.35 +  }
   34.36 +
   34.37 +  st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)");
   34.38 +
   34.39 +  STACKFRAME stk;
   34.40 +  memset(&stk, 0, sizeof(stk));
   34.41 +  stk.AddrStack.Offset    = ctx.Rsp;
   34.42 +  stk.AddrStack.Mode      = AddrModeFlat;
   34.43 +  stk.AddrFrame.Offset    = ctx.Rbp;
   34.44 +  stk.AddrFrame.Mode      = AddrModeFlat;
   34.45 +  stk.AddrPC.Offset       = ctx.Rip;
   34.46 +  stk.AddrPC.Mode         = AddrModeFlat;
   34.47 +
   34.48 +  int count = 0;
   34.49 +  address lastpc = 0;
   34.50 +  while (count++ < StackPrintLimit) {
   34.51 +    intptr_t* sp = (intptr_t*)stk.AddrStack.Offset;
   34.52 +    intptr_t* fp = (intptr_t*)stk.AddrFrame.Offset; // NOT necessarily the same as ctx.Rbp!
   34.53 +    address pc = (address)stk.AddrPC.Offset;
   34.54 +
   34.55 +    if (pc != NULL && sp != NULL && fp != NULL) {
   34.56 +      if (count == 2 && lastpc == pc) {
   34.57 +        // Skip it -- StackWalk64() may return the same PC
   34.58 +        // (but different SP) on the first try.
   34.59 +      } else {
   34.60 +        // Don't try to create a frame(sp, fp, pc) -- on WinX64, stk.AddrFrame
   34.61 +        // may not contain what Java expects, and may cause the frame() constructor
   34.62 +        // to crash. Let's just print out the symbolic address.
   34.63 +        frame::print_C_frame(st, buf, buf_size, pc);
   34.64 +        st->cr();
   34.65 +      }
   34.66 +      lastpc = pc;
   34.67 +    } else {
   34.68 +      break;
   34.69 +    }
   34.70 +
   34.71 +    PVOID p = WindowsDbgHelp::SymFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
   34.72 +    if (!p) {
   34.73 +      // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
   34.74 +      break;
   34.75 +    }
   34.76 +
   34.77 +    BOOL result = WindowsDbgHelp::StackWalk64(
   34.78 +        IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
   34.79 +        GetCurrentProcess(),       // __in      HANDLE hProcess,
   34.80 +        GetCurrentThread(),        // __in      HANDLE hThread,
   34.81 +        &stk,                      // __inout   LP STACKFRAME64 StackFrame,
   34.82 +        &ctx,                      // __inout   PVOID ContextRecord,
   34.83 +        NULL,                      // __in_opt  PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
   34.84 +        WindowsDbgHelp::pfnSymFunctionTableAccess64(),
   34.85 +                                   // __in_opt  PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
   34.86 +        WindowsDbgHelp::pfnSymGetModuleBase64(),
   34.87 +                                   // __in_opt  PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
   34.88 +        NULL);                     // __in_opt  PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
   34.89 +
   34.90 +    if (!result) {
   34.91 +      break;
   34.92 +    }
   34.93 +  }
   34.94 +  if (count > StackPrintLimit) {
   34.95 +    st->print_cr("...<more frames>...");
   34.96 +  }
   34.97 +  st->cr();
   34.98 +
   34.99 +  return true;
  34.100 +}
  34.101 +#endif // AMD64
  34.102 +
  34.103  ExtendedPC os::fetch_frame_from_context(void* ucVoid,
  34.104                      intptr_t** ret_sp, intptr_t** ret_fp) {
  34.105  
  34.106 @@ -401,6 +490,9 @@
  34.107                                       StubRoutines::x86::get_previous_fp_entry());
  34.108    if (func == NULL) return frame();
  34.109    intptr_t* fp = (*func)();
  34.110 +  if (fp == NULL) {
  34.111 +    return frame();
  34.112 +  }
  34.113  #else
  34.114    intptr_t* fp = _get_previous_fp();
  34.115  #endif // AMD64
    35.1 --- a/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Fri Sep 27 13:49:57 2013 -0400
    35.2 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.hpp	Fri Sep 27 13:53:43 2013 -0400
    35.3 @@ -62,4 +62,10 @@
    35.4  
    35.5    static bool      register_code_area(char *low, char *high);
    35.6  
    35.7 +#ifdef AMD64
    35.8 +#define PLATFORM_PRINT_NATIVE_STACK 1
    35.9 +static bool platform_print_native_stack(outputStream* st, void* context,
   35.10 +                                        char *buf, int buf_size);
   35.11 +#endif
   35.12 +
   35.13  #endif // OS_CPU_WINDOWS_X86_VM_OS_WINDOWS_X86_HPP
    36.1 --- a/src/share/tools/LogCompilation/README	Fri Sep 27 13:49:57 2013 -0400
    36.2 +++ b/src/share/tools/LogCompilation/README	Fri Sep 27 13:53:43 2013 -0400
    36.3 @@ -4,14 +4,14 @@
    36.4  requires a 1.5 JDK to build and simply typing make should build it.
    36.5  
    36.6  It produces a jar file, logc.jar, that can be run on the
    36.7 -hotspot.log from LogCompilation output like this:
    36.8 +HotSpot log (by default, hotspot_pid{pid}.log) from LogCompilation output like this:
    36.9  
   36.10 -  java -jar logc.jar hotspot.log
   36.11 +  java -jar logc.jar hotspot_pid1234.log
   36.12  
   36.13  This will produce something like the normal PrintCompilation output.
   36.14  Adding the -i option with also report inlining like PrintInlining.
   36.15  
   36.16 -More information about the LogCompilation output can be found at 
   36.17 +More information about the LogCompilation output can be found at
   36.18  
   36.19  https://wikis.oracle.com/display/HotSpotInternals/LogCompilation+overview
   36.20  https://wikis.oracle.com/display/HotSpotInternals/PrintCompilation
    37.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 27 13:49:57 2013 -0400
    37.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 27 13:53:43 2013 -0400
    37.3 @@ -4219,7 +4219,9 @@
    37.4      }
    37.5    }
    37.6  
    37.7 -  if (!PrintInlining)  return;
    37.8 +  if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
    37.9 +    return;
   37.10 +  }
   37.11    CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
   37.12    if (success && CIPrintMethodCodes) {
   37.13      callee->print_codes();
    38.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Fri Sep 27 13:49:57 2013 -0400
    38.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Fri Sep 27 13:53:43 2013 -0400
    38.3 @@ -709,10 +709,10 @@
    38.4    Bytecodes::Code code       = field_access.code();
    38.5  
    38.6    // We must load class, initialize class and resolvethe field
    38.7 -  FieldAccessInfo result; // initialize class if needed
    38.8 +  fieldDescriptor result; // initialize class if needed
    38.9    constantPoolHandle constants(THREAD, caller->constants());
   38.10 -  LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL);
   38.11 -  return result.klass()();
   38.12 +  LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
   38.13 +  return result.field_holder();
   38.14  }
   38.15  
   38.16  
   38.17 @@ -826,11 +826,11 @@
   38.18    if (stub_id == Runtime1::access_field_patching_id) {
   38.19  
   38.20      Bytecode_field field_access(caller_method, bci);
   38.21 -    FieldAccessInfo result; // initialize class if needed
   38.22 +    fieldDescriptor result; // initialize class if needed
   38.23      Bytecodes::Code code = field_access.code();
   38.24      constantPoolHandle constants(THREAD, caller_method->constants());
   38.25 -    LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK);
   38.26 -    patch_field_offset = result.field_offset();
   38.27 +    LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
   38.28 +    patch_field_offset = result.offset();
   38.29  
   38.30      // If we're patching a field which is volatile then at compile it
   38.31      // must not have been know to be volatile, so the generated code
    39.1 --- a/src/share/vm/ci/ciField.cpp	Fri Sep 27 13:49:57 2013 -0400
    39.2 +++ b/src/share/vm/ci/ciField.cpp	Fri Sep 27 13:53:43 2013 -0400
    39.3 @@ -1,5 +1,5 @@
    39.4  /*
    39.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    39.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    39.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    39.8   *
    39.9   * This code is free software; you can redistribute it and/or modify it
   39.10 @@ -75,7 +75,6 @@
   39.11  
   39.12    assert(klass->get_instanceKlass()->is_linked(), "must be linked before using its constan-pool");
   39.13  
   39.14 -  _cp_index = index;
   39.15    constantPoolHandle cpool(thread, klass->get_instanceKlass()->constants());
   39.16  
   39.17    // Get the field's name, signature, and type.
   39.18 @@ -116,7 +115,7 @@
   39.19    // The declared holder of this field may not have been loaded.
   39.20    // Bail out with partial field information.
   39.21    if (!holder_is_accessible) {
   39.22 -    // _cp_index and _type have already been set.
   39.23 +    // _type has already been set.
   39.24      // The default values for _flags and _constant_value will suffice.
   39.25      // We need values for _holder, _offset,  and _is_constant,
   39.26      _holder = declared_holder;
   39.27 @@ -146,8 +145,6 @@
   39.28  ciField::ciField(fieldDescriptor *fd): _known_to_link_with_put(NULL), _known_to_link_with_get(NULL) {
   39.29    ASSERT_IN_VM;
   39.30  
   39.31 -  _cp_index = -1;
   39.32 -
   39.33    // Get the field's name, signature, and type.
   39.34    ciEnv* env = CURRENT_ENV;
   39.35    _name = env->get_symbol(fd->name());
   39.36 @@ -351,12 +348,11 @@
   39.37      }
   39.38    }
   39.39  
   39.40 -  FieldAccessInfo result;
   39.41 -  constantPoolHandle c_pool(THREAD,
   39.42 -                         accessing_klass->get_instanceKlass()->constants());
   39.43 -  LinkResolver::resolve_field(result, c_pool, _cp_index,
   39.44 -                              Bytecodes::java_code(bc),
   39.45 -                              true, false, KILL_COMPILE_ON_FATAL_(false));
   39.46 +  fieldDescriptor result;
   39.47 +  LinkResolver::resolve_field(result, _holder->get_instanceKlass(),
   39.48 +                              _name->get_symbol(), _signature->get_symbol(),
   39.49 +                              accessing_klass->get_Klass(), bc, true, false,
   39.50 +                              KILL_COMPILE_ON_FATAL_(false));
   39.51  
   39.52    // update the hit-cache, unless there is a problem with memory scoping:
   39.53    if (accessing_klass->is_shared() || !is_shared()) {
    40.1 --- a/src/share/vm/ci/ciField.hpp	Fri Sep 27 13:49:57 2013 -0400
    40.2 +++ b/src/share/vm/ci/ciField.hpp	Fri Sep 27 13:53:43 2013 -0400
    40.3 @@ -1,5 +1,5 @@
    40.4  /*
    40.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    40.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    40.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    40.8   *
    40.9   * This code is free software; you can redistribute it and/or modify it
   40.10 @@ -53,9 +53,6 @@
   40.11    ciInstanceKlass* _known_to_link_with_get;
   40.12    ciConstant       _constant_value;
   40.13  
   40.14 -  // Used for will_link
   40.15 -  int              _cp_index;
   40.16 -
   40.17    ciType* compute_type();
   40.18    ciType* compute_type_impl();
   40.19  
    41.1 --- a/src/share/vm/ci/ciInstanceKlass.cpp	Fri Sep 27 13:49:57 2013 -0400
    41.2 +++ b/src/share/vm/ci/ciInstanceKlass.cpp	Fri Sep 27 13:53:43 2013 -0400
    41.3 @@ -1,5 +1,5 @@
    41.4  /*
    41.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    41.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    41.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    41.8   *
    41.9   * This code is free software; you can redistribute it and/or modify it
   41.10 @@ -522,8 +522,7 @@
   41.11  
   41.12    for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
   41.13      if (fs.access_flags().is_static())  continue;
   41.14 -    fieldDescriptor fd;
   41.15 -    fd.initialize(k, fs.index());
   41.16 +    fieldDescriptor& fd = fs.field_descriptor();
   41.17      ciField* field = new (arena) ciField(&fd);
   41.18      fields->append(field);
   41.19    }
    42.1 --- a/src/share/vm/ci/ciMethod.cpp	Fri Sep 27 13:49:57 2013 -0400
    42.2 +++ b/src/share/vm/ci/ciMethod.cpp	Fri Sep 27 13:53:43 2013 -0400
    42.3 @@ -286,7 +286,10 @@
    42.4    check_is_loaded();
    42.5    assert(holder()->is_linked(), "must be linked");
    42.6    VM_ENTRY_MARK;
    42.7 -  return klassItable::compute_itable_index(get_Method());
    42.8 +  Method* m = get_Method();
    42.9 +  if (!m->has_itable_index())
   42.10 +    return Method::nonvirtual_vtable_index;
   42.11 +  return m->itable_index();
   42.12  }
   42.13  #endif // SHARK
   42.14  
   42.15 @@ -1137,6 +1140,10 @@
   42.16  // ------------------------------------------------------------------
   42.17  // ciMethod::check_call
   42.18  bool ciMethod::check_call(int refinfo_index, bool is_static) const {
   42.19 +  // This method is used only in C2 from InlineTree::ok_to_inline,
   42.20 +  // and is only used under -Xcomp or -XX:CompileTheWorld.
   42.21 +  // It appears to fail when applied to an invokeinterface call site.
   42.22 +  // FIXME: Remove this method and resolve_method_statically; refactor to use the other LinkResolver entry points.
   42.23    VM_ENTRY_MARK;
   42.24    {
   42.25      EXCEPTION_MARK;
    43.1 --- a/src/share/vm/ci/ciSymbol.hpp	Fri Sep 27 13:49:57 2013 -0400
    43.2 +++ b/src/share/vm/ci/ciSymbol.hpp	Fri Sep 27 13:53:43 2013 -0400
    43.3 @@ -1,5 +1,5 @@
    43.4  /*
    43.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    43.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    43.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    43.8   *
    43.9   * This code is free software; you can redistribute it and/or modify it
   43.10 @@ -44,6 +44,7 @@
   43.11    friend class ciInstanceKlass;
   43.12    friend class ciSignature;
   43.13    friend class ciMethod;
   43.14 +  friend class ciField;
   43.15    friend class ciObjArrayKlass;
   43.16  
   43.17  private:
    44.1 --- a/src/share/vm/classfile/classFileParser.cpp	Fri Sep 27 13:49:57 2013 -0400
    44.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Fri Sep 27 13:53:43 2013 -0400
    44.3 @@ -888,6 +888,7 @@
    44.4    int runtime_visible_type_annotations_length = 0;
    44.5    u1* runtime_invisible_type_annotations = NULL;
    44.6    int runtime_invisible_type_annotations_length = 0;
    44.7 +  bool runtime_invisible_type_annotations_exists = false;
    44.8    while (attributes_count--) {
    44.9      cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
   44.10      u2 attribute_name_index = cfs->get_u2_fast();
   44.11 @@ -946,15 +947,27 @@
   44.12          assert(runtime_invisible_annotations != NULL, "null invisible annotations");
   44.13          cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
   44.14        } else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
   44.15 +        if (runtime_visible_type_annotations != NULL) {
   44.16 +          classfile_parse_error(
   44.17 +            "Multiple RuntimeVisibleTypeAnnotations attributes for field in class file %s", CHECK);
   44.18 +        }
   44.19          runtime_visible_type_annotations_length = attribute_length;
   44.20          runtime_visible_type_annotations = cfs->get_u1_buffer();
   44.21          assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
   44.22          cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
   44.23 -      } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
   44.24 -        runtime_invisible_type_annotations_length = attribute_length;
   44.25 -        runtime_invisible_type_annotations = cfs->get_u1_buffer();
   44.26 -        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
   44.27 -        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
   44.28 +      } else if (attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
   44.29 +        if (runtime_invisible_type_annotations_exists) {
   44.30 +          classfile_parse_error(
   44.31 +            "Multiple RuntimeInvisibleTypeAnnotations attributes for field in class file %s", CHECK);
   44.32 +        } else {
   44.33 +          runtime_invisible_type_annotations_exists = true;
   44.34 +        }
   44.35 +        if (PreserveAllAnnotations) {
   44.36 +          runtime_invisible_type_annotations_length = attribute_length;
   44.37 +          runtime_invisible_type_annotations = cfs->get_u1_buffer();
   44.38 +          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
   44.39 +        }
   44.40 +        cfs->skip_u1(attribute_length, CHECK);
   44.41        } else {
   44.42          cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
   44.43        }
   44.44 @@ -2066,6 +2079,7 @@
   44.45    int runtime_visible_type_annotations_length = 0;
   44.46    u1* runtime_invisible_type_annotations = NULL;
   44.47    int runtime_invisible_type_annotations_length = 0;
   44.48 +  bool runtime_invisible_type_annotations_exists = false;
   44.49    u1* annotation_default = NULL;
   44.50    int annotation_default_length = 0;
   44.51  
   44.52 @@ -2322,16 +2336,30 @@
   44.53          assert(annotation_default != NULL, "null annotation default");
   44.54          cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
   44.55        } else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
   44.56 +        if (runtime_visible_type_annotations != NULL) {
   44.57 +          classfile_parse_error(
   44.58 +            "Multiple RuntimeVisibleTypeAnnotations attributes for method in class file %s",
   44.59 +            CHECK_(nullHandle));
   44.60 +        }
   44.61          runtime_visible_type_annotations_length = method_attribute_length;
   44.62          runtime_visible_type_annotations = cfs->get_u1_buffer();
   44.63          assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
   44.64          // No need for the VM to parse Type annotations
   44.65          cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle));
   44.66 -      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
   44.67 -        runtime_invisible_type_annotations_length = method_attribute_length;
   44.68 -        runtime_invisible_type_annotations = cfs->get_u1_buffer();
   44.69 -        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
   44.70 -        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK_(nullHandle));
   44.71 +      } else if (method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
   44.72 +        if (runtime_invisible_type_annotations_exists) {
   44.73 +          classfile_parse_error(
   44.74 +            "Multiple RuntimeInvisibleTypeAnnotations attributes for method in class file %s",
   44.75 +            CHECK_(nullHandle));
   44.76 +        } else {
   44.77 +          runtime_invisible_type_annotations_exists = true;
   44.78 +        }
   44.79 +        if (PreserveAllAnnotations) {
   44.80 +          runtime_invisible_type_annotations_length = method_attribute_length;
   44.81 +          runtime_invisible_type_annotations = cfs->get_u1_buffer();
   44.82 +          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
   44.83 +        }
   44.84 +        cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
   44.85        } else {
   44.86          // Skip unknown attributes
   44.87          cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
   44.88 @@ -2824,6 +2852,7 @@
   44.89    int runtime_visible_type_annotations_length = 0;
   44.90    u1* runtime_invisible_type_annotations = NULL;
   44.91    int runtime_invisible_type_annotations_length = 0;
   44.92 +  bool runtime_invisible_type_annotations_exists = false;
   44.93    u1* inner_classes_attribute_start = NULL;
   44.94    u4  inner_classes_attribute_length = 0;
   44.95    u2  enclosing_method_class_index = 0;
   44.96 @@ -2927,16 +2956,28 @@
   44.97          parsed_bootstrap_methods_attribute = true;
   44.98          parse_classfile_bootstrap_methods_attribute(attribute_length, CHECK);
   44.99        } else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
  44.100 +        if (runtime_visible_type_annotations != NULL) {
  44.101 +          classfile_parse_error(
  44.102 +            "Multiple RuntimeVisibleTypeAnnotations attributes in class file %s", CHECK);
  44.103 +        }
  44.104          runtime_visible_type_annotations_length = attribute_length;
  44.105          runtime_visible_type_annotations = cfs->get_u1_buffer();
  44.106          assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
  44.107          // No need for the VM to parse Type annotations
  44.108          cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
  44.109 -      } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
  44.110 -        runtime_invisible_type_annotations_length = attribute_length;
  44.111 -        runtime_invisible_type_annotations = cfs->get_u1_buffer();
  44.112 -        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
  44.113 -        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
  44.114 +      } else if (tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
  44.115 +        if (runtime_invisible_type_annotations_exists) {
  44.116 +          classfile_parse_error(
  44.117 +            "Multiple RuntimeInvisibleTypeAnnotations attributes in class file %s", CHECK);
  44.118 +        } else {
  44.119 +          runtime_invisible_type_annotations_exists = true;
  44.120 +        }
  44.121 +        if (PreserveAllAnnotations) {
  44.122 +          runtime_invisible_type_annotations_length = attribute_length;
  44.123 +          runtime_invisible_type_annotations = cfs->get_u1_buffer();
  44.124 +          assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
  44.125 +        }
  44.126 +        cfs->skip_u1(attribute_length, CHECK);
  44.127        } else {
  44.128          // Unknown attribute
  44.129          cfs->skip_u1(attribute_length, CHECK);
  44.130 @@ -3954,9 +3995,8 @@
  44.131        this_klass->set_has_final_method();
  44.132      }
  44.133      this_klass->copy_method_ordering(method_ordering, CHECK_NULL);
  44.134 -    // The InstanceKlass::_methods_jmethod_ids cache and the
  44.135 -    // InstanceKlass::_methods_cached_itable_indices cache are
  44.136 -    // both managed on the assumption that the initial cache
  44.137 +    // The InstanceKlass::_methods_jmethod_ids cache
  44.138 +    // is managed on the assumption that the initial cache
  44.139      // size is equal to the number of methods in the class. If
  44.140      // that changes, then InstanceKlass::idnum_can_increment()
  44.141      // has to be changed accordingly.
    45.1 --- a/src/share/vm/classfile/classLoader.cpp	Fri Sep 27 13:49:57 2013 -0400
    45.2 +++ b/src/share/vm/classfile/classLoader.cpp	Fri Sep 27 13:53:43 2013 -0400
    45.3 @@ -1319,6 +1319,25 @@
    45.4    // The CHECK at the caller will propagate the exception out
    45.5  }
    45.6  
    45.7 +/**
    45.8 + * Returns if the given method should be compiled when doing compile-the-world.
    45.9 + *
   45.10 + * TODO:  This should be a private method in a CompileTheWorld class.
   45.11 + */
   45.12 +static bool can_be_compiled(methodHandle m, int comp_level) {
   45.13 +  assert(CompileTheWorld, "must be");
   45.14 +
   45.15 +  // It's not valid to compile a native wrapper for MethodHandle methods
   45.16 +  // that take a MemberName appendix since the bytecode signature is not
   45.17 +  // correct.
   45.18 +  vmIntrinsics::ID iid = m->intrinsic_id();
   45.19 +  if (MethodHandles::is_signature_polymorphic(iid) && MethodHandles::has_member_arg(iid)) {
   45.20 +    return false;
   45.21 +  }
   45.22 +
   45.23 +  return CompilationPolicy::can_be_compiled(m, comp_level);
   45.24 +}
   45.25 +
   45.26  void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
   45.27    int len = (int)strlen(name);
   45.28    if (len > 6 && strcmp(".class", name + len - 6) == 0) {
   45.29 @@ -1362,8 +1381,7 @@
   45.30            int comp_level = CompilationPolicy::policy()->initial_compile_level();
   45.31            for (int n = 0; n < k->methods()->length(); n++) {
   45.32              methodHandle m (THREAD, k->methods()->at(n));
   45.33 -            if (CompilationPolicy::can_be_compiled(m, comp_level)) {
   45.34 -
   45.35 +            if (can_be_compiled(m, comp_level)) {
   45.36                if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
   45.37                  // Give sweeper a chance to keep up with CTW
   45.38                  VM_ForceSafepoint op;
   45.39 @@ -1375,7 +1393,7 @@
   45.40                                              methodHandle(), 0, "CTW", THREAD);
   45.41                if (HAS_PENDING_EXCEPTION) {
   45.42                  clear_pending_exception_if_not_oom(CHECK);
   45.43 -                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
   45.44 +                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
   45.45                } else {
   45.46                  _compile_the_world_method_counter++;
   45.47                }
   45.48 @@ -1391,11 +1409,13 @@
   45.49                                                methodHandle(), 0, "CTW", THREAD);
   45.50                  if (HAS_PENDING_EXCEPTION) {
   45.51                    clear_pending_exception_if_not_oom(CHECK);
   45.52 -                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
   45.53 +                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
   45.54                  } else {
   45.55                    _compile_the_world_method_counter++;
   45.56                  }
   45.57                }
   45.58 +            } else {
   45.59 +              tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
   45.60              }
   45.61  
   45.62              nmethod* nm = m->code();
    46.1 --- a/src/share/vm/classfile/defaultMethods.cpp	Fri Sep 27 13:49:57 2013 -0400
    46.2 +++ b/src/share/vm/classfile/defaultMethods.cpp	Fri Sep 27 13:53:43 2013 -0400
    46.3 @@ -450,6 +450,10 @@
    46.4      streamIndentor si(str, indent * 2);
    46.5      str->indent().print("Selected method: ");
    46.6      print_method(str, _selected_target);
    46.7 +    Klass* method_holder = _selected_target->method_holder();
    46.8 +    if (!method_holder->is_interface()) {
    46.9 +      tty->print(" : in superclass");
   46.10 +    }
   46.11      str->print_cr("");
   46.12    }
   46.13  
   46.14 @@ -1141,19 +1145,23 @@
   46.15  #endif // ndef PRODUCT
   46.16        if (method->has_target()) {
   46.17          Method* selected = method->get_selected_target();
   46.18 -        max_stack = assemble_redirect(
   46.19 +        if (selected->method_holder()->is_interface()) {
   46.20 +          max_stack = assemble_redirect(
   46.21              &bpool, &buffer, slot->signature(), selected, CHECK);
   46.22 +        }
   46.23        } else if (method->throws_exception()) {
   46.24          max_stack = assemble_abstract_method_error(
   46.25              &bpool, &buffer, method->get_exception_message(), CHECK);
   46.26        }
   46.27 -      AccessFlags flags = accessFlags_from(
   46.28 +      if (max_stack != 0) {
   46.29 +        AccessFlags flags = accessFlags_from(
   46.30            JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
   46.31 -      Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
   46.32 +        Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
   46.33            flags, max_stack, slot->size_of_parameters(),
   46.34            ConstMethod::OVERPASS, CHECK);
   46.35 -      if (m != NULL) {
   46.36 -        overpasses.push(m);
   46.37 +        if (m != NULL) {
   46.38 +          overpasses.push(m);
   46.39 +        }
   46.40        }
   46.41      }
   46.42    }
    47.1 --- a/src/share/vm/classfile/javaClasses.cpp	Fri Sep 27 13:49:57 2013 -0400
    47.2 +++ b/src/share/vm/classfile/javaClasses.cpp	Fri Sep 27 13:53:43 2013 -0400
    47.3 @@ -438,6 +438,29 @@
    47.4    return true;
    47.5  }
    47.6  
    47.7 +bool java_lang_String::equals(oop str1, oop str2) {
    47.8 +  assert(str1->klass() == SystemDictionary::String_klass(),
    47.9 +         "must be java String");
   47.10 +  assert(str2->klass() == SystemDictionary::String_klass(),
   47.11 +         "must be java String");
   47.12 +  typeArrayOop value1  = java_lang_String::value(str1);
   47.13 +  int          offset1 = java_lang_String::offset(str1);
   47.14 +  int          length1 = java_lang_String::length(str1);
   47.15 +  typeArrayOop value2  = java_lang_String::value(str2);
   47.16 +  int          offset2 = java_lang_String::offset(str2);
   47.17 +  int          length2 = java_lang_String::length(str2);
   47.18 +
   47.19 +  if (length1 != length2) {
   47.20 +    return false;
   47.21 +  }
   47.22 +  for (int i = 0; i < length1; i++) {
   47.23 +    if (value1->char_at(i + offset1) != value2->char_at(i + offset2)) {
   47.24 +      return false;
   47.25 +    }
   47.26 +  }
   47.27 +  return true;
   47.28 +}
   47.29 +
   47.30  void java_lang_String::print(Handle java_string, outputStream* st) {
   47.31    oop          obj    = java_string();
   47.32    assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
    48.1 --- a/src/share/vm/classfile/javaClasses.hpp	Fri Sep 27 13:49:57 2013 -0400
    48.2 +++ b/src/share/vm/classfile/javaClasses.hpp	Fri Sep 27 13:53:43 2013 -0400
    48.3 @@ -182,6 +182,7 @@
    48.4    static unsigned int hash_string(oop java_string);
    48.5  
    48.6    static bool equals(oop java_string, jchar* chars, int len);
    48.7 +  static bool equals(oop str1, oop str2);
    48.8  
    48.9    // Conversion between '.' and '/' formats
   48.10    static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
    49.1 --- a/src/share/vm/classfile/symbolTable.cpp	Fri Sep 27 13:49:57 2013 -0400
    49.2 +++ b/src/share/vm/classfile/symbolTable.cpp	Fri Sep 27 13:53:43 2013 -0400
    49.3 @@ -341,7 +341,7 @@
    49.4  
    49.5  Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
    49.6                                 unsigned int hashValue_arg, bool c_heap, TRAPS) {
    49.7 -  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
    49.8 +  assert(!Universe::heap()->is_in_reserved(name),
    49.9           "proposed name of symbol must be stable");
   49.10  
   49.11    // Don't allow symbols to be created which cannot fit in a Symbol*.
   49.12 @@ -685,7 +685,7 @@
   49.13    if (found_string != NULL) return found_string;
   49.14  
   49.15    debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
   49.16 -  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
   49.17 +  assert(!Universe::heap()->is_in_reserved(name),
   49.18           "proposed name of symbol must be stable");
   49.19  
   49.20    Handle string;
   49.21 @@ -807,6 +807,8 @@
   49.22    }
   49.23  }
   49.24  
   49.25 +// This verification is part of Universe::verify() and needs to be quick.
   49.26 +// See StringTable::verify_and_compare() below for exhaustive verification.
   49.27  void StringTable::verify() {
   49.28    for (int i = 0; i < the_table()->table_size(); ++i) {
   49.29      HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
   49.30 @@ -825,6 +827,162 @@
   49.31    the_table()->dump_table(st, "StringTable");
   49.32  }
   49.33  
   49.34 +StringTable::VerifyRetTypes StringTable::compare_entries(
   49.35 +                                      int bkt1, int e_cnt1,
   49.36 +                                      HashtableEntry<oop, mtSymbol>* e_ptr1,
   49.37 +                                      int bkt2, int e_cnt2,
   49.38 +                                      HashtableEntry<oop, mtSymbol>* e_ptr2) {
   49.39 +  // These entries are sanity checked by verify_and_compare_entries()
   49.40 +  // before this function is called.
   49.41 +  oop str1 = e_ptr1->literal();
   49.42 +  oop str2 = e_ptr2->literal();
   49.43 +
   49.44 +  if (str1 == str2) {
   49.45 +    tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
   49.46 +                  "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
   49.47 +                  str1, bkt1, e_cnt1, bkt2, e_cnt2);
   49.48 +    return _verify_fail_continue;
   49.49 +  }
   49.50 +
   49.51 +  if (java_lang_String::equals(str1, str2)) {
   49.52 +    tty->print_cr("ERROR: identical String values in entry @ "
   49.53 +                  "bucket[%d][%d] and entry @ bucket[%d][%d]",
   49.54 +                  bkt1, e_cnt1, bkt2, e_cnt2);
   49.55 +    return _verify_fail_continue;
   49.56 +  }
   49.57 +
   49.58 +  return _verify_pass;
   49.59 +}
   49.60 +
   49.61 +StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
   49.62 +                                      HashtableEntry<oop, mtSymbol>* e_ptr,
   49.63 +                                      StringTable::VerifyMesgModes mesg_mode) {
   49.64 +
   49.65 +  VerifyRetTypes ret = _verify_pass;  // be optimistic
   49.66 +
   49.67 +  oop str = e_ptr->literal();
   49.68 +  if (str == NULL) {
   49.69 +    if (mesg_mode == _verify_with_mesgs) {
   49.70 +      tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt,
   49.71 +                    e_cnt);
   49.72 +    }
   49.73 +    // NULL oop means no more verifications are possible
   49.74 +    return _verify_fail_done;
   49.75 +  }
   49.76 +
   49.77 +  if (str->klass() != SystemDictionary::String_klass()) {
   49.78 +    if (mesg_mode == _verify_with_mesgs) {
   49.79 +      tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]",
   49.80 +                    bkt, e_cnt);
   49.81 +    }
   49.82 +    // not a String means no more verifications are possible
   49.83 +    return _verify_fail_done;
   49.84 +  }
   49.85 +
   49.86 +  unsigned int h = java_lang_String::hash_string(str);
   49.87 +  if (e_ptr->hash() != h) {
   49.88 +    if (mesg_mode == _verify_with_mesgs) {
   49.89 +      tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "
   49.90 +                    "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h);
   49.91 +    }
   49.92 +    ret = _verify_fail_continue;
   49.93 +  }
   49.94 +
   49.95 +  if (the_table()->hash_to_index(h) != bkt) {
   49.96 +    if (mesg_mode == _verify_with_mesgs) {
   49.97 +      tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], "
   49.98 +                    "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h,
   49.99 +                    the_table()->hash_to_index(h));
  49.100 +    }
  49.101 +    ret = _verify_fail_continue;
  49.102 +  }
  49.103 +
  49.104 +  return ret;
  49.105 +}
  49.106 +
  49.107 +// See StringTable::verify() above for the quick verification that is
  49.108 +// part of Universe::verify(). This verification is exhaustive and
  49.109 +// reports on every issue that is found. StringTable::verify() only
  49.110 +// reports on the first issue that is found.
  49.111 +//
  49.112 +// StringTable::verify_entry() checks:
  49.113 +// - oop value != NULL (same as verify())
  49.114 +// - oop value is a String
  49.115 +// - hash(String) == hash in entry (same as verify())
  49.116 +// - index for hash == index of entry (same as verify())
  49.117 +//
  49.118 +// StringTable::compare_entries() checks:
  49.119 +// - oops are unique across all entries
  49.120 +// - String values are unique across all entries
  49.121 +//
  49.122 +int StringTable::verify_and_compare_entries() {
  49.123 +  assert(StringTable_lock->is_locked(), "sanity check");
  49.124 +
  49.125 +  int  fail_cnt = 0;
  49.126 +
  49.127 +  // first, verify all the entries individually:
  49.128 +  for (int bkt = 0; bkt < the_table()->table_size(); bkt++) {
  49.129 +    HashtableEntry<oop, mtSymbol>* e_ptr = the_table()->bucket(bkt);
  49.130 +    for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) {
  49.131 +      VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs);
  49.132 +      if (ret != _verify_pass) {
  49.133 +        fail_cnt++;
  49.134 +      }
  49.135 +    }
  49.136 +  }
  49.137 +
  49.138 +  // Optimization: if the above check did not find any failures, then
  49.139 +  // the comparison loop below does not need to call verify_entry()
  49.140 +  // before calling compare_entries(). If there were failures, then we
  49.141 +  // have to call verify_entry() to see if the entry can be passed to
  49.142 +  // compare_entries() safely. When we call verify_entry() in the loop
  49.143 +  // below, we do so quietly to void duplicate messages and we don't
  49.144 +  // increment fail_cnt because the failures have already been counted.
  49.145 +  bool need_entry_verify = (fail_cnt != 0);
  49.146 +
  49.147 +  // second, verify all entries relative to each other:
  49.148 +  for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) {
  49.149 +    HashtableEntry<oop, mtSymbol>* e_ptr1 = the_table()->bucket(bkt1);
  49.150 +    for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) {
  49.151 +      if (need_entry_verify) {
  49.152 +        VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1,
  49.153 +                                          _verify_quietly);
  49.154 +        if (ret == _verify_fail_done) {
  49.155 +          // cannot use the current entry to compare against other entries
  49.156 +          continue;
  49.157 +        }
  49.158 +      }
  49.159 +
  49.160 +      for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) {
  49.161 +        HashtableEntry<oop, mtSymbol>* e_ptr2 = the_table()->bucket(bkt2);
  49.162 +        int e_cnt2;
  49.163 +        for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) {
  49.164 +          if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) {
  49.165 +            // skip the entries up to and including the one that
  49.166 +            // we're comparing against
  49.167 +            continue;
  49.168 +          }
  49.169 +
  49.170 +          if (need_entry_verify) {
  49.171 +            VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2,
  49.172 +                                              _verify_quietly);
  49.173 +            if (ret == _verify_fail_done) {
  49.174 +              // cannot compare against this entry
  49.175 +              continue;
  49.176 +            }
  49.177 +          }
  49.178 +
  49.179 +          // compare two entries, report and count any failures:
  49.180 +          if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2)
  49.181 +              != _verify_pass) {
  49.182 +            fail_cnt++;
  49.183 +          }
  49.184 +        }
  49.185 +      }
  49.186 +    }
  49.187 +  }
  49.188 +  return fail_cnt;
  49.189 +}
  49.190  
  49.191  // Create a new table and using alternate hash code, populate the new table
  49.192  // with the existing strings.   Set flag to use the alternate hash code afterwards.
    50.1 --- a/src/share/vm/classfile/symbolTable.hpp	Fri Sep 27 13:49:57 2013 -0400
    50.2 +++ b/src/share/vm/classfile/symbolTable.hpp	Fri Sep 27 13:53:43 2013 -0400
    50.3 @@ -311,6 +311,26 @@
    50.4    static void verify();
    50.5    static void dump(outputStream* st);
    50.6  
    50.7 +  enum VerifyMesgModes {
    50.8 +    _verify_quietly    = 0,
    50.9 +    _verify_with_mesgs = 1
   50.10 +  };
   50.11 +
   50.12 +  enum VerifyRetTypes {
   50.13 +    _verify_pass          = 0,
   50.14 +    _verify_fail_continue = 1,
   50.15 +    _verify_fail_done     = 2
   50.16 +  };
   50.17 +
   50.18 +  static VerifyRetTypes compare_entries(int bkt1, int e_cnt1,
   50.19 +                                        HashtableEntry<oop, mtSymbol>* e_ptr1,
   50.20 +                                        int bkt2, int e_cnt2,
   50.21 +                                        HashtableEntry<oop, mtSymbol>* e_ptr2);
   50.22 +  static VerifyRetTypes verify_entry(int bkt, int e_cnt,
   50.23 +                                     HashtableEntry<oop, mtSymbol>* e_ptr,
   50.24 +                                     VerifyMesgModes mesg_mode);
   50.25 +  static int verify_and_compare_entries();
   50.26 +
   50.27    // Sharing
   50.28    static void copy_buckets(char** top, char*end) {
   50.29      the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
    51.1 --- a/src/share/vm/code/compiledIC.cpp	Fri Sep 27 13:49:57 2013 -0400
    51.2 +++ b/src/share/vm/code/compiledIC.cpp	Fri Sep 27 13:53:43 2013 -0400
    51.3 @@ -1,5 +1,5 @@
    51.4  /*
    51.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    51.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    51.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    51.8   *
    51.9   * This code is free software; you can redistribute it and/or modify it
   51.10 @@ -160,32 +160,42 @@
   51.11  // High-level access to an inline cache. Guaranteed to be MT-safe.
   51.12  
   51.13  
   51.14 -void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
   51.15 -  methodHandle method = call_info->selected_method();
   51.16 -  bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
   51.17 +bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
   51.18    assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   51.19    assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   51.20    assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
   51.21  
   51.22    address entry;
   51.23 -  if (is_invoke_interface) {
   51.24 -    int index = klassItable::compute_itable_index(call_info->resolved_method()());
   51.25 -    entry = VtableStubs::create_stub(false, index, method());
   51.26 -    assert(entry != NULL, "entry not computed");
   51.27 +  if (call_info->call_kind() == CallInfo::itable_call) {
   51.28 +    assert(bytecode == Bytecodes::_invokeinterface, "");
   51.29 +    int itable_index = call_info->itable_index();
   51.30 +    entry = VtableStubs::find_itable_stub(itable_index);
   51.31 +    if (entry == false) {
   51.32 +      return false;
   51.33 +    }
   51.34 +#ifdef ASSERT
   51.35 +    int index = call_info->resolved_method()->itable_index();
   51.36 +    assert(index == itable_index, "CallInfo pre-computes this");
   51.37 +#endif //ASSERT
   51.38      InstanceKlass* k = call_info->resolved_method()->method_holder();
   51.39 -    assert(k->is_interface(), "sanity check");
   51.40 +    assert(k->verify_itable_index(itable_index), "sanity check");
   51.41      InlineCacheBuffer::create_transition_stub(this, k, entry);
   51.42    } else {
   51.43 -    // Can be different than method->vtable_index(), due to package-private etc.
   51.44 +    assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
   51.45 +    // Can be different than selected_method->vtable_index(), due to package-private etc.
   51.46      int vtable_index = call_info->vtable_index();
   51.47 -    entry = VtableStubs::create_stub(true, vtable_index, method());
   51.48 -    InlineCacheBuffer::create_transition_stub(this, method(), entry);
   51.49 +    assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
   51.50 +    entry = VtableStubs::find_vtable_stub(vtable_index);
   51.51 +    if (entry == NULL) {
   51.52 +      return false;
   51.53 +    }
   51.54 +    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
   51.55    }
   51.56  
   51.57    if (TraceICs) {
   51.58      ResourceMark rm;
   51.59      tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
   51.60 -                   instruction_address(), method->print_value_string(), entry);
   51.61 +                   instruction_address(), call_info->selected_method()->print_value_string(), entry);
   51.62    }
   51.63  
   51.64    // We can't check this anymore. With lazy deopt we could have already
   51.65 @@ -195,6 +205,7 @@
   51.66    // race because the IC entry was complete when we safepointed so
   51.67    // cleaning it immediately is harmless.
   51.68    // assert(is_megamorphic(), "sanity check");
   51.69 +  return true;
   51.70  }
   51.71  
   51.72  
    52.1 --- a/src/share/vm/code/compiledIC.hpp	Fri Sep 27 13:49:57 2013 -0400
    52.2 +++ b/src/share/vm/code/compiledIC.hpp	Fri Sep 27 13:53:43 2013 -0400
    52.3 @@ -226,7 +226,10 @@
    52.4    //
    52.5    void set_to_clean();  // Can only be called during a safepoint operation
    52.6    void set_to_monomorphic(CompiledICInfo& info);
    52.7 -  void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
    52.8 +
    52.9 +  // Returns true if successful and false otherwise. The call can fail if memory
   52.10 +  // allocation in the code cache fails.
   52.11 +  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
   52.12  
   52.13    static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
   52.14                                          bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
    53.1 --- a/src/share/vm/code/vtableStubs.cpp	Fri Sep 27 13:49:57 2013 -0400
    53.2 +++ b/src/share/vm/code/vtableStubs.cpp	Fri Sep 27 13:53:43 2013 -0400
    53.3 @@ -46,12 +46,9 @@
    53.4  address VtableStub::_chunk_end         = NULL;
    53.5  VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
    53.6  
    53.7 -static int num_vtable_chunks = 0;
    53.8 -
    53.9  
   53.10  void* VtableStub::operator new(size_t size, int code_size) throw() {
   53.11    assert(size == sizeof(VtableStub), "mismatched size");
   53.12 -  num_vtable_chunks++;
   53.13    // compute real VtableStub size (rounded to nearest word)
   53.14    const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
   53.15    // malloc them in chunks to minimize header overhead
   53.16 @@ -60,7 +57,7 @@
   53.17      const int bytes = chunk_factor * real_size + pd_code_alignment();
   53.18      BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
   53.19      if (blob == NULL) {
   53.20 -      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
   53.21 +      return NULL;
   53.22      }
   53.23      _chunk = blob->content_begin();
   53.24      _chunk_end = _chunk + bytes;
   53.25 @@ -111,7 +108,7 @@
   53.26  }
   53.27  
   53.28  
   53.29 -address VtableStubs::create_stub(bool is_vtable_stub, int vtable_index, Method* method) {
   53.30 +address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
   53.31    assert(vtable_index >= 0, "must be positive");
   53.32  
   53.33    VtableStub* s = ShareVtableStubs ? lookup(is_vtable_stub, vtable_index) : NULL;
   53.34 @@ -121,6 +118,12 @@
   53.35      } else {
   53.36        s = create_itable_stub(vtable_index);
   53.37      }
   53.38 +
   53.39 +    // Creation of vtable or itable can fail if there is not enough free space in the code cache.
   53.40 +    if (s == NULL) {
   53.41 +      return NULL;
   53.42 +    }
   53.43 +
   53.44      enter(is_vtable_stub, vtable_index, s);
   53.45      if (PrintAdapterHandlers) {
   53.46        tty->print_cr("Decoding VtableStub %s[%d]@%d",
    54.1 --- a/src/share/vm/code/vtableStubs.hpp	Fri Sep 27 13:49:57 2013 -0400
    54.2 +++ b/src/share/vm/code/vtableStubs.hpp	Fri Sep 27 13:53:43 2013 -0400
    54.3 @@ -121,9 +121,11 @@
    54.4    static VtableStub* lookup            (bool is_vtable_stub, int vtable_index);
    54.5    static void        enter             (bool is_vtable_stub, int vtable_index, VtableStub* s);
    54.6    static inline uint hash              (bool is_vtable_stub, int vtable_index);
    54.7 +  static address     find_stub         (bool is_vtable_stub, int vtable_index);
    54.8  
    54.9   public:
   54.10 -  static address     create_stub(bool is_vtable_stub, int vtable_index, Method* method); // return the entry point of a stub for this call
   54.11 +  static address     find_vtable_stub(int vtable_index) { return find_stub(true,  vtable_index); }
   54.12 +  static address     find_itable_stub(int itable_index) { return find_stub(false, itable_index); }
   54.13    static bool        is_entry_point(address pc);                     // is pc a vtable stub entry point?
   54.14    static bool        contains(address pc);                           // is pc within any stub?
   54.15    static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
    55.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Sep 27 13:49:57 2013 -0400
    55.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Sep 27 13:53:43 2013 -0400
    55.3 @@ -230,7 +230,7 @@
    55.4    // depends on this property.
    55.5    debug_only(
    55.6      FreeChunk* junk = NULL;
    55.7 -    assert(UseCompressedKlassPointers ||
    55.8 +    assert(UseCompressedClassPointers ||
    55.9             junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
   55.10             "Offset of FreeChunk::_prev within FreeChunk must match"
   55.11             "  that of OopDesc::_klass within OopDesc");
   55.12 @@ -1407,7 +1407,7 @@
   55.13    assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
   55.14    OrderAccess::storestore();
   55.15  
   55.16 -  if (UseCompressedKlassPointers) {
   55.17 +  if (UseCompressedClassPointers) {
   55.18      // Copy gap missed by (aligned) header size calculation below
   55.19      obj->set_klass_gap(old->klass_gap());
   55.20    }
    56.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Sep 27 13:49:57 2013 -0400
    56.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Sep 27 13:53:43 2013 -0400
    56.3 @@ -481,9 +481,8 @@
    56.4  
    56.5  ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
    56.6    _g1h(g1h),
    56.7 -  _markBitMap1(MinObjAlignment - 1),
    56.8 -  _markBitMap2(MinObjAlignment - 1),
    56.9 -
   56.10 +  _markBitMap1(log2_intptr(MinObjAlignment)),
   56.11 +  _markBitMap2(log2_intptr(MinObjAlignment)),
   56.12    _parallel_marking_threads(0),
   56.13    _max_parallel_marking_threads(0),
   56.14    _sleep_factor(0.0),
    57.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    57.2 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp	Fri Sep 27 13:53:43 2013 -0400
    57.3 @@ -0,0 +1,141 @@
    57.4 +/*
    57.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    57.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    57.7 + *
    57.8 + * This code is free software; you can redistribute it and/or modify it
    57.9 + * under the terms of the GNU General Public License version 2 only, as
   57.10 + * published by the Free Software Foundation.
   57.11 + *
   57.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   57.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   57.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   57.15 + * version 2 for more details (a copy is included in the LICENSE file that
   57.16 + * accompanied this code).
   57.17 + *
   57.18 + * You should have received a copy of the GNU General Public License version
   57.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   57.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   57.21 + *
   57.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   57.23 + * or visit www.oracle.com if you need additional information or have any
   57.24 + * questions.
   57.25 + *
   57.26 + */
   57.27 +
   57.28 +#include "precompiled.hpp"
   57.29 +#include "gc_implementation/g1/g1BiasedArray.hpp"
   57.30 +
   57.31 +#ifndef PRODUCT
   57.32 +void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
   57.33 +  guarantee(_base != NULL, "Array not initialized");
   57.34 +  guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
   57.35 +}
   57.36 +
   57.37 +void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
   57.38 +  guarantee(_biased_base != NULL, "Array not initialized");
   57.39 +  guarantee(biased_index >= bias() && biased_index < (bias() + length()),
   57.40 +    err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
   57.41 +}
   57.42 +
   57.43 +void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
   57.44 +  guarantee(_biased_base != NULL, "Array not initialized");
   57.45 +  guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
   57.46 +    err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
   57.47 +}
   57.48 +
   57.49 +class TestMappedArray : public G1BiasedMappedArray<int> {
   57.50 +protected:
   57.51 +  virtual int default_value() const { return 0xBAADBABE; }
   57.52 +public:
   57.53 +  static void test_biasedarray() {
   57.54 +    const size_t REGION_SIZE_IN_WORDS = 512;
   57.55 +    const size_t NUM_REGIONS = 20;
   57.56 +    HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
   57.57 +
   57.58 +    TestMappedArray array;
   57.59 +    array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
   57.60 +            REGION_SIZE_IN_WORDS * HeapWordSize);
   57.61 +    // Check address calculation (bounds)
   57.62 +    assert(array.bottom_address_mapped() == fake_heap,
   57.63 +      err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
   57.64 +    assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
   57.65 +
   57.66 +    int* bottom = array.address_mapped_to(fake_heap);
   57.67 +    assert((void*)bottom == (void*) array.base(), "must be");
   57.68 +    int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
   57.69 +    assert((void*)end == (void*)(array.base() + array.length()), "must be");
   57.70 +    // The entire array should contain default value elements
   57.71 +    for (int* current = bottom; current < end; current++) {
   57.72 +      assert(*current == array.default_value(), "must be");
   57.73 +    }
   57.74 +
   57.75 +    // Test setting values in the table
   57.76 +
   57.77 +    HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
   57.78 +    HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
   57.79 +
   57.80 +    // Set/get by address tests: invert some value; first retrieve one
   57.81 +    int actual_value = array.get_by_index(NUM_REGIONS / 2);
   57.82 +    array.set_by_index(NUM_REGIONS / 2, ~actual_value);
   57.83 +    // Get the same value by address, should correspond to the start of the "region"
   57.84 +    int value = array.get_by_address(region_start_address);
   57.85 +    assert(value == ~actual_value, "must be");
   57.86 +    // Get the same value by address, at one HeapWord before the start
   57.87 +    value = array.get_by_address(region_start_address - 1);
   57.88 +    assert(value == array.default_value(), "must be");
   57.89 +    // Get the same value by address, at the end of the "region"
   57.90 +    value = array.get_by_address(region_end_address);
   57.91 +    assert(value == ~actual_value, "must be");
   57.92 +    // Make sure the next value maps to another index
   57.93 +    value = array.get_by_address(region_end_address + 1);
   57.94 +    assert(value == array.default_value(), "must be");
   57.95 +
   57.96 +    // Reset the value in the array
   57.97 +    array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
   57.98 +
   57.99 +    // The entire array should have the default value again
  57.100 +    for (int* current = bottom; current < end; current++) {
  57.101 +      assert(*current == array.default_value(), "must be");
  57.102 +    }
  57.103 +
  57.104 +    // Set/get by index tests: invert some value
  57.105 +    idx_t index = NUM_REGIONS / 2;
  57.106 +    actual_value = array.get_by_index(index);
  57.107 +    array.set_by_index(index, ~actual_value);
  57.108 +
  57.109 +    value = array.get_by_index(index);
  57.110 +    assert(value == ~actual_value, "must be");
  57.111 +
  57.112 +    value = array.get_by_index(index - 1);
  57.113 +    assert(value == array.default_value(), "must be");
  57.114 +
  57.115 +    value = array.get_by_index(index + 1);
  57.116 +    assert(value == array.default_value(), "must be");
  57.117 +
  57.118 +    array.set_by_index(0, 0);
  57.119 +    value = array.get_by_index(0);
  57.120 +    assert(value == 0, "must be");
  57.121 +
  57.122 +    array.set_by_index(array.length() - 1, 0);
  57.123 +    value = array.get_by_index(array.length() - 1);
  57.124 +    assert(value == 0, "must be");
  57.125 +
  57.126 +    array.set_by_index(index, 0);
  57.127 +
  57.128 +    // The array should have three zeros, and default values otherwise
  57.129 +    size_t num_zeros = 0;
  57.130 +    for (int* current = bottom; current < end; current++) {
  57.131 +      assert(*current == array.default_value() || *current == 0, "must be");
  57.132 +      if (*current == 0) {
  57.133 +        num_zeros++;
  57.134 +      }
  57.135 +    }
  57.136 +    assert(num_zeros == 3, "must be");
  57.137 +  }
  57.138 +};
  57.139 +
  57.140 +void TestG1BiasedArray_test() {
  57.141 +  TestMappedArray::test_biasedarray();
  57.142 +}
  57.143 +
  57.144 +#endif
    58.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    58.2 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Fri Sep 27 13:53:43 2013 -0400
    58.3 @@ -0,0 +1,181 @@
    58.4 +/*
    58.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    58.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    58.7 + *
    58.8 + * This code is free software; you can redistribute it and/or modify it
    58.9 + * under the terms of the GNU General Public License version 2 only, as
   58.10 + * published by the Free Software Foundation.
   58.11 + *
   58.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   58.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   58.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   58.15 + * version 2 for more details (a copy is included in the LICENSE file that
   58.16 + * accompanied this code).
   58.17 + *
   58.18 + * You should have received a copy of the GNU General Public License version
   58.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   58.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   58.21 + *
   58.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   58.23 + * or visit www.oracle.com if you need additional information or have any
   58.24 + * questions.
   58.25 + *
   58.26 + */
   58.27 +
   58.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
   58.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
   58.30 +
   58.31 +#include "utilities/debug.hpp"
   58.32 +#include "memory/allocation.inline.hpp"
   58.33 +
   58.34 +// Implements the common base functionality for arrays that contain provisions
   58.35 +// for accessing its elements using a biased index.
   58.36 +// The element type is defined by the instantiating the template.
   58.37 +class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
   58.38 +  friend class VMStructs;
   58.39 +public:
   58.40 +  typedef size_t idx_t;
   58.41 +protected:
   58.42 +  address _base;          // the real base address
   58.43 +  size_t _length;         // the length of the array
   58.44 +  address _biased_base;   // base address biased by "bias" elements
   58.45 +  size_t _bias;           // the bias, i.e. the offset biased_base is located to the right in elements
   58.46 +  uint _shift_by;         // the amount of bits to shift right when mapping to an index of the array.
   58.47 +
   58.48 +protected:
   58.49 +
   58.50 +  G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
   58.51 +    _bias(0), _shift_by(0) { }
   58.52 +
   58.53 +  // Allocate a new array, generic version.
   58.54 +  static address create_new_base_array(size_t length, size_t elem_size) {
   58.55 +    assert(length > 0, "just checking");
   58.56 +    assert(elem_size > 0, "just checking");
   58.57 +    return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
   58.58 +  }
   58.59 +
   58.60 +  // Initialize the members of this class. The biased start address of this array
   58.61 +  // is the bias (in elements) multiplied by the element size.
   58.62 +  void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
   58.63 +    assert(base != NULL, "just checking");
   58.64 +    assert(length > 0, "just checking");
   58.65 +    assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
   58.66 +    _base = base;
   58.67 +    _length = length;
   58.68 +    _biased_base = base - (bias * elem_size);
   58.69 +    _bias = bias;
   58.70 +    _shift_by = shift_by;
   58.71 +  }
   58.72 +
   58.73 +  // Allocate and initialize this array to cover the heap addresses in the range
   58.74 +  // of [bottom, end).
   58.75 +  void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
   58.76 +    assert(mapping_granularity_in_bytes > 0, "just checking");
   58.77 +    assert(is_power_of_2(mapping_granularity_in_bytes),
   58.78 +      err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
   58.79 +    assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
   58.80 +      err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
   58.81 +        mapping_granularity_in_bytes, bottom));
   58.82 +    assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
   58.83 +      err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
   58.84 +        mapping_granularity_in_bytes, end));
   58.85 +    size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
   58.86 +    idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
   58.87 +    address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
   58.88 +    initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
   58.89 +  }
   58.90 +
   58.91 +  size_t bias() const { return _bias; }
   58.92 +  uint shift_by() const { return _shift_by; }
   58.93 +
   58.94 +  void verify_index(idx_t index) const PRODUCT_RETURN;
   58.95 +  void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
   58.96 +  void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
   58.97 +
   58.98 +public:
   58.99 +   // Return the length of the array in elements.
  58.100 +   size_t length() const { return _length; }
  58.101 +};
  58.102 +
  58.103 +// Array that provides biased access and mapping from (valid) addresses in the
  58.104 +// heap into this array.
  58.105 +template<class T>
  58.106 +class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
  58.107 +public:
  58.108 +  typedef G1BiasedMappedArrayBase::idx_t idx_t;
  58.109 +
  58.110 +  T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
  58.111 +  // Return the element of the given array at the given index. Assume
  58.112 +  // the index is valid. This is a convenience method that does sanity
  58.113 +  // checking on the index.
  58.114 +  T get_by_index(idx_t index) const {
  58.115 +    verify_index(index);
  58.116 +    return this->base()[index];
  58.117 +  }
  58.118 +
  58.119 +  // Set the element of the given array at the given index to the
  58.120 +  // given value. Assume the index is valid. This is a convenience
  58.121 +  // method that does sanity checking on the index.
  58.122 +  void set_by_index(idx_t index, T value) {
  58.123 +    verify_index(index);
  58.124 +    this->base()[index] = value;
  58.125 +  }
  58.126 +
  58.127 +  // The raw biased base pointer.
  58.128 +  T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
  58.129 +
  58.130 +  // Return the element of the given array that covers the given word in the
  58.131 +  // heap. Assumes the index is valid.
  58.132 +  T get_by_address(HeapWord* value) const {
  58.133 +    idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
  58.134 +    this->verify_biased_index(biased_index);
  58.135 +    return biased_base()[biased_index];
  58.136 +  }
  58.137 +
  58.138 +  // Set the value of the array entry that corresponds to the given array.
  58.139 +  void set_by_address(HeapWord * address, T value) {
  58.140 +    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
  58.141 +    this->verify_biased_index(biased_index);
  58.142 +    biased_base()[biased_index] = value;
  58.143 +  }
  58.144 +
  58.145 +protected:
  58.146 +  // Returns the address of the element the given address maps to
  58.147 +  T* address_mapped_to(HeapWord* address) {
  58.148 +    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
  58.149 +    this->verify_biased_index_inclusive_end(biased_index);
  58.150 +    return biased_base() + biased_index;
  58.151 +  }
  58.152 +
  58.153 +public:
  58.154 +  // Return the smallest address (inclusive) in the heap that this array covers.
  58.155 +  HeapWord* bottom_address_mapped() const {
  58.156 +    return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
  58.157 +  }
  58.158 +
  58.159 +  // Return the highest address (exclusive) in the heap that this array covers.
  58.160 +  HeapWord* end_address_mapped() const {
  58.161 +    return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
  58.162 +  }
  58.163 +
  58.164 +protected:
  58.165 +  virtual T default_value() const = 0;
  58.166 +  // Set all elements of the given array to the given value.
  58.167 +  void clear() {
  58.168 +    T value = default_value();
  58.169 +    for (idx_t i = 0; i < length(); i++) {
  58.170 +      set_by_index(i, value);
  58.171 +    }
  58.172 +  }
  58.173 +public:
  58.174 +  G1BiasedMappedArray() {}
  58.175 +
  58.176 +  // Allocate and initialize this array to cover the heap addresses in the range
  58.177 +  // of [bottom, end).
  58.178 +  void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
  58.179 +    G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
  58.180 +    this->clear();
  58.181 +  }
  58.182 +};
  58.183 +
  58.184 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
    59.1 --- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Fri Sep 27 13:49:57 2013 -0400
    59.2 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Fri Sep 27 13:53:43 2013 -0400
    59.3 @@ -33,8 +33,8 @@
    59.4  
    59.5  void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
    59.6    if (has_count_table()) {
    59.7 -    check_card_num(from_card_num,
    59.8 -                   err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
    59.9 +    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
   59.10 +           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
   59.11      assert(from_card_num < to_card_num,
   59.12             err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
   59.13                     from_card_num, to_card_num));
    60.1 --- a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Fri Sep 27 13:49:57 2013 -0400
    60.2 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Fri Sep 27 13:53:43 2013 -0400
    60.3 @@ -72,25 +72,21 @@
    60.4      return has_reserved_count_table() && _committed_max_card_num > 0;
    60.5    }
    60.6  
    60.7 -  void check_card_num(size_t card_num, const char* msg) {
    60.8 -    assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
    60.9 -  }
   60.10 -
   60.11    size_t ptr_2_card_num(const jbyte* card_ptr) {
   60.12      assert(card_ptr >= _ct_bot,
   60.13 -           err_msg("Inavalied card pointer: "
   60.14 +           err_msg("Invalid card pointer: "
   60.15                     "card_ptr: " PTR_FORMAT ", "
   60.16                     "_ct_bot: " PTR_FORMAT,
   60.17                     card_ptr, _ct_bot));
   60.18      size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
   60.19 -    check_card_num(card_num,
   60.20 -                   err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
   60.21 +    assert(card_num >= 0 && card_num < _committed_max_card_num,
   60.22 +           err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
   60.23      return card_num;
   60.24    }
   60.25  
   60.26    jbyte* card_num_2_ptr(size_t card_num) {
   60.27 -    check_card_num(card_num,
   60.28 -                   err_msg("card num out of range: "SIZE_FORMAT, card_num));
   60.29 +    assert(card_num >= 0 && card_num < _committed_max_card_num,
   60.30 +           err_msg("card num out of range: "SIZE_FORMAT, card_num));
   60.31      return (jbyte*) (_ct_bot + card_num);
   60.32    }
   60.33  
    61.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 27 13:49:57 2013 -0400
    61.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 27 13:53:43 2013 -0400
    61.3 @@ -2069,8 +2069,10 @@
    61.4    _g1_storage.initialize(g1_rs, 0);
    61.5    _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
    61.6    _hrs.initialize((HeapWord*) _g1_reserved.start(),
    61.7 -                  (HeapWord*) _g1_reserved.end(),
    61.8 -                  _expansion_regions);
    61.9 +                  (HeapWord*) _g1_reserved.end());
   61.10 +  assert(_hrs.max_length() == _expansion_regions,
   61.11 +         err_msg("max length: %u expansion regions: %u",
   61.12 +                 _hrs.max_length(), _expansion_regions));
   61.13  
   61.14    // Do later initialization work for concurrent refinement.
   61.15    _cg1r->init();
   61.16 @@ -2191,6 +2193,10 @@
   61.17    return JNI_OK;
   61.18  }
   61.19  
   61.20 +size_t G1CollectedHeap::conservative_max_heap_alignment() {
   61.21 +  return HeapRegion::max_region_size();
   61.22 +}
   61.23 +
   61.24  void G1CollectedHeap::ref_processing_init() {
   61.25    // Reference processing in G1 currently works as follows:
   61.26    //
    62.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Sep 27 13:49:57 2013 -0400
    62.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Sep 27 13:53:43 2013 -0400
    62.3 @@ -1092,6 +1092,9 @@
    62.4    // specified by the policy object.
    62.5    jint initialize();
    62.6  
    62.7 +  // Return the (conservative) maximum heap alignment for any G1 heap
    62.8 +  static size_t conservative_max_heap_alignment();
    62.9 +
   62.10    // Initialize weak reference processing.
   62.11    virtual void ref_processing_init();
   62.12  
    63.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 27 13:49:57 2013 -0400
    63.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Sep 27 13:53:43 2013 -0400
    63.3 @@ -1,5 +1,5 @@
    63.4  /*
    63.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    63.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    63.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    63.8   *
    63.9   * This code is free software; you can redistribute it and/or modify it
   63.10 @@ -149,6 +149,10 @@
   63.11  // many regions in the heap (based on the min heap size).
   63.12  #define TARGET_REGION_NUMBER          2048
   63.13  
   63.14 +size_t HeapRegion::max_region_size() {
   63.15 +  return (size_t)MAX_REGION_SIZE;
   63.16 +}
   63.17 +
   63.18  void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   63.19    uintx region_size = G1HeapRegionSize;
   63.20    if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
    64.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 27 13:49:57 2013 -0400
    64.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Sep 27 13:53:43 2013 -0400
    64.3 @@ -1,5 +1,5 @@
    64.4  /*
    64.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    64.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    64.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    64.8   *
    64.9   * This code is free software; you can redistribute it and/or modify it
   64.10 @@ -355,6 +355,8 @@
   64.11                                        ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   64.12    }
   64.13  
   64.14 +  static size_t max_region_size();
   64.15 +
   64.16    // It sets up the heap region size (GrainBytes / GrainWords), as
   64.17    // well as other related fields that are based on the heap region
   64.18    // size (LogOfHRGrainBytes / LogOfHRGrainWords /
    65.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Sep 27 13:49:57 2013 -0400
    65.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Sep 27 13:53:43 2013 -0400
    65.3 @@ -71,27 +71,16 @@
    65.4  
    65.5  // Public
    65.6  
    65.7 -void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
    65.8 -                               uint max_length) {
    65.9 +void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
   65.10    assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
   65.11           "bottom should be heap region aligned");
   65.12    assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
   65.13           "end should be heap region aligned");
   65.14  
   65.15 -  _length = 0;
   65.16 -  _heap_bottom = bottom;
   65.17 -  _heap_end = end;
   65.18 -  _region_shift = HeapRegion::LogOfHRGrainBytes;
   65.19    _next_search_index = 0;
   65.20    _allocated_length = 0;
   65.21 -  _max_length = max_length;
   65.22  
   65.23 -  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
   65.24 -  memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
   65.25 -  _regions_biased = _regions - ((uintx) bottom >> _region_shift);
   65.26 -
   65.27 -  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
   65.28 -         "bottom should be included in the region with index 0");
   65.29 +  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
   65.30  }
   65.31  
   65.32  MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
   65.33 @@ -101,15 +90,15 @@
   65.34    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   65.35  
   65.36    HeapWord* next_bottom = old_end;
   65.37 -  assert(_heap_bottom <= next_bottom, "invariant");
   65.38 +  assert(heap_bottom() <= next_bottom, "invariant");
   65.39    while (next_bottom < new_end) {
   65.40 -    assert(next_bottom < _heap_end, "invariant");
   65.41 +    assert(next_bottom < heap_end(), "invariant");
   65.42      uint index = length();
   65.43  
   65.44 -    assert(index < _max_length, "otherwise we cannot expand further");
   65.45 +    assert(index < max_length(), "otherwise we cannot expand further");
   65.46      if (index == 0) {
   65.47        // We have not allocated any regions so far
   65.48 -      assert(next_bottom == _heap_bottom, "invariant");
   65.49 +      assert(next_bottom == heap_bottom(), "invariant");
   65.50      } else {
   65.51        // next_bottom should match the end of the last/previous region
   65.52        assert(next_bottom == at(index - 1)->end(), "invariant");
   65.53 @@ -122,8 +111,8 @@
   65.54          // allocation failed, we bail out and return what we have done so far
   65.55          return MemRegion(old_end, next_bottom);
   65.56        }
   65.57 -      assert(_regions[index] == NULL, "invariant");
   65.58 -      _regions[index] = new_hr;
   65.59 +      assert(_regions.get_by_index(index) == NULL, "invariant");
   65.60 +      _regions.set_by_index(index, new_hr);
   65.61        increment_allocated_length();
   65.62      }
   65.63      // Have to increment the length first, otherwise we will get an
   65.64 @@ -228,26 +217,26 @@
   65.65  
   65.66  #ifndef PRODUCT
   65.67  void HeapRegionSeq::verify_optional() {
   65.68 -  guarantee(_length <= _allocated_length,
   65.69 +  guarantee(length() <= _allocated_length,
   65.70              err_msg("invariant: _length: %u _allocated_length: %u",
   65.71 -                    _length, _allocated_length));
   65.72 -  guarantee(_allocated_length <= _max_length,
   65.73 +                    length(), _allocated_length));
   65.74 +  guarantee(_allocated_length <= max_length(),
   65.75              err_msg("invariant: _allocated_length: %u _max_length: %u",
   65.76 -                    _allocated_length, _max_length));
   65.77 -  guarantee(_next_search_index <= _length,
   65.78 +                    _allocated_length, max_length()));
   65.79 +  guarantee(_next_search_index <= length(),
   65.80              err_msg("invariant: _next_search_index: %u _length: %u",
   65.81 -                    _next_search_index, _length));
   65.82 +                    _next_search_index, length()));
   65.83  
   65.84 -  HeapWord* prev_end = _heap_bottom;
   65.85 +  HeapWord* prev_end = heap_bottom();
   65.86    for (uint i = 0; i < _allocated_length; i += 1) {
   65.87 -    HeapRegion* hr = _regions[i];
   65.88 +    HeapRegion* hr = _regions.get_by_index(i);
   65.89      guarantee(hr != NULL, err_msg("invariant: i: %u", i));
   65.90      guarantee(hr->bottom() == prev_end,
   65.91                err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
   65.92                        i, HR_FORMAT_PARAMS(hr), prev_end));
   65.93      guarantee(hr->hrs_index() == i,
   65.94                err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
   65.95 -    if (i < _length) {
   65.96 +    if (i < length()) {
   65.97        // Asserts will fire if i is >= _length
   65.98        HeapWord* addr = hr->bottom();
   65.99        guarantee(addr_to_region(addr) == hr, "sanity");
  65.100 @@ -265,8 +254,8 @@
  65.101        prev_end = hr->end();
  65.102      }
  65.103    }
  65.104 -  for (uint i = _allocated_length; i < _max_length; i += 1) {
  65.105 -    guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
  65.106 +  for (uint i = _allocated_length; i < max_length(); i += 1) {
  65.107 +    guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
  65.108    }
  65.109  }
  65.110  #endif // PRODUCT
    66.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Sep 27 13:49:57 2013 -0400
    66.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Sep 27 13:53:43 2013 -0400
    66.3 @@ -25,10 +25,17 @@
    66.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
    66.5  #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
    66.6  
    66.7 +#include "gc_implementation/g1/g1BiasedArray.hpp"
    66.8 +
    66.9  class HeapRegion;
   66.10  class HeapRegionClosure;
   66.11  class FreeRegionList;
   66.12  
   66.13 +class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
   66.14 + protected:
   66.15 +   virtual HeapRegion* default_value() const { return NULL; }
   66.16 +};
   66.17 +
   66.18  // This class keeps track of the region metadata (i.e., HeapRegion
   66.19  // instances). They are kept in the _regions array in address
   66.20  // order. A region's index in the array corresponds to its index in
   66.21 @@ -44,35 +51,21 @@
   66.22  //
   66.23  // We keep track of three lengths:
   66.24  //
   66.25 -// * _length (returned by length()) is the number of currently
   66.26 +// * _committed_length (returned by length()) is the number of currently
   66.27  //   committed regions.
   66.28  // * _allocated_length (not exposed outside this class) is the
   66.29  //   number of regions for which we have HeapRegions.
   66.30 -// * _max_length (returned by max_length()) is the maximum number of
   66.31 -//   regions the heap can have.
   66.32 +// * max_length() returns the maximum number of regions the heap can have.
   66.33  //
   66.34 -// and maintain that: _length <= _allocated_length <= _max_length
   66.35 +// and maintain that: _committed_length <= _allocated_length <= max_length()
   66.36  
   66.37  class HeapRegionSeq: public CHeapObj<mtGC> {
   66.38    friend class VMStructs;
   66.39  
   66.40 -  // The array that holds the HeapRegions.
   66.41 -  HeapRegion** _regions;
   66.42 -
   66.43 -  // Version of _regions biased to address 0
   66.44 -  HeapRegion** _regions_biased;
   66.45 +  G1HeapRegionTable _regions;
   66.46  
   66.47    // The number of regions committed in the heap.
   66.48 -  uint _length;
   66.49 -
   66.50 -  // The address of the first reserved word in the heap.
   66.51 -  HeapWord* _heap_bottom;
   66.52 -
   66.53 -  // The address of the last reserved word in the heap - 1.
   66.54 -  HeapWord* _heap_end;
   66.55 -
   66.56 -  // The log of the region byte size.
   66.57 -  uint _region_shift;
   66.58 +  uint _committed_length;
   66.59  
   66.60    // A hint for which index to start searching from for humongous
   66.61    // allocations.
   66.62 @@ -81,37 +74,33 @@
   66.63    // The number of regions for which we have allocated HeapRegions for.
   66.64    uint _allocated_length;
   66.65  
   66.66 -  // The maximum number of regions in the heap.
   66.67 -  uint _max_length;
   66.68 -
   66.69    // Find a contiguous set of empty regions of length num, starting
   66.70    // from the given index.
   66.71    uint find_contiguous_from(uint from, uint num);
   66.72  
   66.73 -  // Map a heap address to a biased region index. Assume that the
   66.74 -  // address is valid.
   66.75 -  inline uintx addr_to_index_biased(HeapWord* addr) const;
   66.76 -
   66.77    void increment_allocated_length() {
   66.78 -    assert(_allocated_length < _max_length, "pre-condition");
   66.79 +    assert(_allocated_length < max_length(), "pre-condition");
   66.80      _allocated_length++;
   66.81    }
   66.82  
   66.83    void increment_length() {
   66.84 -    assert(_length < _max_length, "pre-condition");
   66.85 -    _length++;
   66.86 +    assert(length() < max_length(), "pre-condition");
   66.87 +    _committed_length++;
   66.88    }
   66.89  
   66.90    void decrement_length() {
   66.91 -    assert(_length > 0, "pre-condition");
   66.92 -    _length--;
   66.93 +    assert(length() > 0, "pre-condition");
   66.94 +    _committed_length--;
   66.95    }
   66.96  
   66.97 +  HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
   66.98 +  HeapWord* heap_end() const {return _regions.end_address_mapped(); }
   66.99 +
  66.100   public:
  66.101    // Empty contructor, we'll initialize it with the initialize() method.
  66.102 -  HeapRegionSeq() { }
  66.103 +  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
  66.104  
  66.105 -  void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
  66.106 +  void initialize(HeapWord* bottom, HeapWord* end);
  66.107  
  66.108    // Return the HeapRegion at the given index. Assume that the index
  66.109    // is valid.
  66.110 @@ -126,10 +115,10 @@
  66.111    inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
  66.112  
  66.113    // Return the number of regions that have been committed in the heap.
  66.114 -  uint length() const { return _length; }
  66.115 +  uint length() const { return _committed_length; }
  66.116  
  66.117    // Return the maximum number of regions in the heap.
  66.118 -  uint max_length() const { return _max_length; }
  66.119 +  uint max_length() const { return (uint)_regions.length(); }
  66.120  
  66.121    // Expand the sequence to reflect that the heap has grown from
  66.122    // old_end to new_end. Either create new HeapRegions, or re-use
    67.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Fri Sep 27 13:49:57 2013 -0400
    67.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Fri Sep 27 13:53:43 2013 -0400
    67.3 @@ -28,28 +28,16 @@
    67.4  #include "gc_implementation/g1/heapRegion.hpp"
    67.5  #include "gc_implementation/g1/heapRegionSeq.hpp"
    67.6  
    67.7 -inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
    67.8 -  assert(_heap_bottom <= addr && addr < _heap_end,
    67.9 -         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
   67.10 -                 addr, _heap_bottom, _heap_end));
   67.11 -  uintx index = (uintx) addr >> _region_shift;
   67.12 -  return index;
   67.13 -}
   67.14 -
   67.15  inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
   67.16 -  assert(_heap_bottom <= addr && addr < _heap_end,
   67.17 -         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
   67.18 -                 addr, _heap_bottom, _heap_end));
   67.19 -  uintx index_biased = addr_to_index_biased(addr);
   67.20 -  HeapRegion* hr = _regions_biased[index_biased];
   67.21 +  HeapRegion* hr = _regions.get_by_address(addr);
   67.22    assert(hr != NULL, "invariant");
   67.23    return hr;
   67.24  }
   67.25  
   67.26  inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
   67.27 -  if (addr != NULL && addr < _heap_end) {
   67.28 -    assert(addr >= _heap_bottom,
   67.29 -          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
   67.30 +  if (addr != NULL && addr < heap_end()) {
   67.31 +    assert(addr >= heap_bottom(),
   67.32 +          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
   67.33      return addr_to_region_unsafe(addr);
   67.34    }
   67.35    return NULL;
   67.36 @@ -57,7 +45,7 @@
   67.37  
   67.38  inline HeapRegion* HeapRegionSeq::at(uint index) const {
   67.39    assert(index < length(), "pre-condition");
   67.40 -  HeapRegion* hr = _regions[index];
   67.41 +  HeapRegion* hr = _regions.get_by_index(index);
   67.42    assert(hr != NULL, "sanity");
   67.43    assert(hr->hrs_index() == index, "sanity");
   67.44    return hr;
    68.1 --- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Fri Sep 27 13:49:57 2013 -0400
    68.2 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Fri Sep 27 13:53:43 2013 -0400
    68.3 @@ -38,6 +38,7 @@
    68.4  
    68.5  class PtrQueueSet;
    68.6  class PtrQueue VALUE_OBJ_CLASS_SPEC {
    68.7 +  friend class VMStructs;
    68.8  
    68.9  protected:
   68.10    // The ptr queue set to which this queue belongs.
    69.1 --- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Sep 27 13:49:57 2013 -0400
    69.2 +++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Sep 27 13:53:43 2013 -0400
    69.3 @@ -31,10 +31,17 @@
    69.4  
    69.5  #define VM_STRUCTS_G1(nonstatic_field, static_field)                          \
    69.6                                                                                \
    69.7 -  static_field(HeapRegion, GrainBytes, size_t)                                \
    69.8 +  static_field(HeapRegion, GrainBytes,        size_t)                         \
    69.9 +  static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
   69.10                                                                                \
   69.11 -  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
   69.12 -  nonstatic_field(HeapRegionSeq,   _length,  uint)                            \
   69.13 +  nonstatic_field(G1HeapRegionTable, _base,             address)              \
   69.14 +  nonstatic_field(G1HeapRegionTable, _length,           size_t)               \
   69.15 +  nonstatic_field(G1HeapRegionTable, _biased_base,      address)              \
   69.16 +  nonstatic_field(G1HeapRegionTable, _bias,             size_t)               \
   69.17 +  nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
   69.18 +                                                                              \
   69.19 +  nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
   69.20 +  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
   69.21                                                                                \
   69.22    nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
   69.23    nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   69.24 @@ -57,6 +64,8 @@
   69.25  
   69.26  #define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
   69.27                                                                                \
   69.28 +  declare_toplevel_type(G1HeapRegionTable)                                    \
   69.29 +                                                                              \
   69.30    declare_type(G1CollectedHeap, SharedHeap)                                   \
   69.31                                                                                \
   69.32    declare_type(HeapRegion, ContiguousSpace)                                   \
    70.1 --- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Fri Sep 27 13:49:57 2013 -0400
    70.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Fri Sep 27 13:53:43 2013 -0400
    70.3 @@ -1,5 +1,5 @@
    70.4  /*
    70.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    70.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    70.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    70.8   *
    70.9   * This code is free software; you can redistribute it and/or modify it
   70.10 @@ -68,9 +68,6 @@
   70.11    size_t min_old_gen_size()   { return _min_gen1_size; }
   70.12    size_t old_gen_size()       { return _initial_gen1_size; }
   70.13    size_t max_old_gen_size()   { return _max_gen1_size; }
   70.14 -
   70.15 -  size_t metaspace_size()      { return MetaspaceSize; }
   70.16 -  size_t max_metaspace_size()  { return MaxMetaspaceSize; }
   70.17  };
   70.18  
   70.19  #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP
    71.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Sep 27 13:49:57 2013 -0400
    71.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Sep 27 13:53:43 2013 -0400
    71.3 @@ -86,6 +86,11 @@
    71.4      set_alignment(_old_gen_alignment, intra_heap_alignment());
    71.5    }
    71.6  
    71.7 +  // Return the (conservative) maximum heap alignment
    71.8 +  static size_t conservative_max_heap_alignment() {
    71.9 +    return intra_heap_alignment();
   71.10 +  }
   71.11 +
   71.12    // For use by VM operations
   71.13    enum CollectionType {
   71.14      Scavenge,
   71.15 @@ -122,7 +127,7 @@
   71.16  
   71.17    // The alignment used for eden and survivors within the young gen
   71.18    // and for boundary between young gen and old gen.
   71.19 -  size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
   71.20 +  static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
   71.21  
   71.22    size_t capacity() const;
   71.23    size_t used() const;
    72.1 --- a/src/share/vm/gc_interface/collectedHeap.cpp	Fri Sep 27 13:49:57 2013 -0400
    72.2 +++ b/src/share/vm/gc_interface/collectedHeap.cpp	Fri Sep 27 13:53:43 2013 -0400
    72.3 @@ -87,15 +87,15 @@
    72.4    const MetaspaceSizes meta_space(
    72.5        MetaspaceAux::allocated_capacity_bytes(),
    72.6        MetaspaceAux::allocated_used_bytes(),
    72.7 -      MetaspaceAux::reserved_in_bytes());
    72.8 +      MetaspaceAux::reserved_bytes());
    72.9    const MetaspaceSizes data_space(
   72.10        MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
   72.11        MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
   72.12 -      MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType));
   72.13 +      MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
   72.14    const MetaspaceSizes class_space(
   72.15        MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
   72.16        MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
   72.17 -      MetaspaceAux::reserved_in_bytes(Metaspace::ClassType));
   72.18 +      MetaspaceAux::reserved_bytes(Metaspace::ClassType));
   72.19  
   72.20    return MetaspaceSummary(meta_space, data_space, class_space);
   72.21  }
    73.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Sep 27 13:49:57 2013 -0400
    73.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Sep 27 13:53:43 2013 -0400
    73.3 @@ -496,15 +496,15 @@
    73.4  
    73.5  IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecodes::Code bytecode))
    73.6    // resolve field
    73.7 -  FieldAccessInfo info;
    73.8 +  fieldDescriptor info;
    73.9    constantPoolHandle pool(thread, method(thread)->constants());
   73.10    bool is_put    = (bytecode == Bytecodes::_putfield  || bytecode == Bytecodes::_putstatic);
   73.11    bool is_static = (bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic);
   73.12  
   73.13    {
   73.14      JvmtiHideSingleStepping jhss(thread);
   73.15 -    LinkResolver::resolve_field(info, pool, get_index_u2_cpcache(thread, bytecode),
   73.16 -                                bytecode, false, CHECK);
   73.17 +    LinkResolver::resolve_field_access(info, pool, get_index_u2_cpcache(thread, bytecode),
   73.18 +                                       bytecode, CHECK);
   73.19    } // end JvmtiHideSingleStepping
   73.20  
   73.21    // check if link resolution caused cpCache to be updated
   73.22 @@ -524,7 +524,7 @@
   73.23    // class is intitialized.  This is required so that access to the static
   73.24    // field will call the initialization function every time until the class
   73.25    // is completely initialized ala. in 2.17.5 in JVM Specification.
   73.26 -  InstanceKlass *klass = InstanceKlass::cast(info.klass()());
   73.27 +  InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
   73.28    bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
   73.29                                 !klass->is_initialized());
   73.30    Bytecodes::Code get_code = (Bytecodes::Code)0;
   73.31 @@ -539,9 +539,9 @@
   73.32    cache_entry(thread)->set_field(
   73.33      get_code,
   73.34      put_code,
   73.35 -    info.klass(),
   73.36 -    info.field_index(),
   73.37 -    info.field_offset(),
   73.38 +    info.field_holder(),
   73.39 +    info.index(),
   73.40 +    info.offset(),
   73.41      state,
   73.42      info.access_flags().is_final(),
   73.43      info.access_flags().is_volatile(),
   73.44 @@ -686,29 +686,55 @@
   73.45    if (already_resolved(thread)) return;
   73.46  
   73.47    if (bytecode == Bytecodes::_invokeinterface) {
   73.48 -
   73.49      if (TraceItables && Verbose) {
   73.50        ResourceMark rm(thread);
   73.51        tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
   73.52      }
   73.53 +  }
   73.54 +#ifdef ASSERT
   73.55 +  if (bytecode == Bytecodes::_invokeinterface) {
   73.56      if (info.resolved_method()->method_holder() ==
   73.57                                              SystemDictionary::Object_klass()) {
   73.58        // NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
   73.59 -      // (see also cpCacheOop.cpp for details)
   73.60 +      // (see also CallInfo::set_interface for details)
   73.61 +      assert(info.call_kind() == CallInfo::vtable_call ||
   73.62 +             info.call_kind() == CallInfo::direct_call, "");
   73.63        methodHandle rm = info.resolved_method();
   73.64        assert(rm->is_final() || info.has_vtable_index(),
   73.65               "should have been set already");
   73.66 -      cache_entry(thread)->set_method(bytecode, rm, info.vtable_index());
   73.67 +    } else if (!info.resolved_method()->has_itable_index()) {
   73.68 +      // Resolved something like CharSequence.toString.  Use vtable not itable.
   73.69 +      assert(info.call_kind() != CallInfo::itable_call, "");
   73.70      } else {
   73.71        // Setup itable entry
   73.72 -      int index = klassItable::compute_itable_index(info.resolved_method()());
   73.73 -      cache_entry(thread)->set_interface_call(info.resolved_method(), index);
   73.74 +      assert(info.call_kind() == CallInfo::itable_call, "");
   73.75 +      int index = info.resolved_method()->itable_index();
   73.76 +      assert(info.itable_index() == index, "");
   73.77      }
   73.78    } else {
   73.79 -    cache_entry(thread)->set_method(
   73.80 +    assert(info.call_kind() == CallInfo::direct_call ||
   73.81 +           info.call_kind() == CallInfo::vtable_call, "");
   73.82 +  }
   73.83 +#endif
   73.84 +  switch (info.call_kind()) {
   73.85 +  case CallInfo::direct_call:
   73.86 +    cache_entry(thread)->set_direct_call(
   73.87 +      bytecode,
   73.88 +      info.resolved_method());
   73.89 +    break;
   73.90 +  case CallInfo::vtable_call:
   73.91 +    cache_entry(thread)->set_vtable_call(
   73.92        bytecode,
   73.93        info.resolved_method(),
   73.94        info.vtable_index());
   73.95 +    break;
   73.96 +  case CallInfo::itable_call:
   73.97 +    cache_entry(thread)->set_itable_call(
   73.98 +      bytecode,
   73.99 +      info.resolved_method(),
  73.100 +      info.itable_index());
  73.101 +    break;
  73.102 +  default:  ShouldNotReachHere();
  73.103    }
  73.104  }
  73.105  IRT_END
    74.1 --- a/src/share/vm/interpreter/linkResolver.cpp	Fri Sep 27 13:49:57 2013 -0400
    74.2 +++ b/src/share/vm/interpreter/linkResolver.cpp	Fri Sep 27 13:53:43 2013 -0400
    74.3 @@ -46,19 +46,6 @@
    74.4  #include "runtime/thread.inline.hpp"
    74.5  #include "runtime/vmThread.hpp"
    74.6  
    74.7 -//------------------------------------------------------------------------------------------------------------------------
    74.8 -// Implementation of FieldAccessInfo
    74.9 -
   74.10 -void FieldAccessInfo::set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
   74.11 -BasicType field_type, AccessFlags access_flags) {
   74.12 -  _klass        = klass;
   74.13 -  _name         = name;
   74.14 -  _field_index  = field_index;
   74.15 -  _field_offset = field_offset;
   74.16 -  _field_type   = field_type;
   74.17 -  _access_flags = access_flags;
   74.18 -}
   74.19 -
   74.20  
   74.21  //------------------------------------------------------------------------------------------------------------------------
   74.22  // Implementation of CallInfo
   74.23 @@ -66,26 +53,25 @@
   74.24  
   74.25  void CallInfo::set_static(KlassHandle resolved_klass, methodHandle resolved_method, TRAPS) {
   74.26    int vtable_index = Method::nonvirtual_vtable_index;
   74.27 -  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
   74.28 +  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
   74.29  }
   74.30  
   74.31  
   74.32 -void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, TRAPS) {
   74.33 +void CallInfo::set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index, TRAPS) {
   74.34    // This is only called for interface methods. If the resolved_method
   74.35    // comes from java/lang/Object, it can be the subject of a virtual call, so
   74.36    // we should pick the vtable index from the resolved method.
   74.37 -  // Other than that case, there is no valid vtable index to specify.
   74.38 -  int vtable_index = Method::invalid_vtable_index;
   74.39 -  if (resolved_method->method_holder() == SystemDictionary::Object_klass()) {
   74.40 -    assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check");
   74.41 -    vtable_index = resolved_method->vtable_index();
   74.42 -  }
   74.43 -  set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
   74.44 +  // In that case, the caller must call set_virtual instead of set_interface.
   74.45 +  assert(resolved_method->method_holder()->is_interface(), "");
   74.46 +  assert(itable_index == resolved_method()->itable_index(), "");
   74.47 +  set_common(resolved_klass, selected_klass, resolved_method, selected_method, CallInfo::itable_call, itable_index, CHECK);
   74.48  }
   74.49  
   74.50  void CallInfo::set_virtual(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
   74.51    assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index, "valid index");
   74.52 -  set_common(resolved_klass, selected_klass, resolved_method, selected_method, vtable_index, CHECK);
   74.53 +  assert(vtable_index < 0 || !resolved_method->has_vtable_index() || vtable_index == resolved_method->vtable_index(), "");
   74.54 +  CallKind kind = (vtable_index >= 0 && !resolved_method->can_be_statically_bound() ? CallInfo::vtable_call : CallInfo::direct_call);
   74.55 +  set_common(resolved_klass, selected_klass, resolved_method, selected_method, kind, vtable_index, CHECK);
   74.56    assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call");
   74.57  }
   74.58  
   74.59 @@ -98,20 +84,29 @@
   74.60           resolved_method->is_compiled_lambda_form(),
   74.61           "linkMethod must return one of these");
   74.62    int vtable_index = Method::nonvirtual_vtable_index;
   74.63 -  assert(resolved_method->vtable_index() == vtable_index, "");
   74.64 -  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
   74.65 +  assert(!resolved_method->has_vtable_index(), "");
   74.66 +  set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, CallInfo::direct_call, vtable_index, CHECK);
   74.67    _resolved_appendix    = resolved_appendix;
   74.68    _resolved_method_type = resolved_method_type;
   74.69  }
   74.70  
   74.71 -void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
   74.72 +void CallInfo::set_common(KlassHandle resolved_klass,
   74.73 +                          KlassHandle selected_klass,
   74.74 +                          methodHandle resolved_method,
   74.75 +                          methodHandle selected_method,
   74.76 +                          CallKind kind,
   74.77 +                          int index,
   74.78 +                          TRAPS) {
   74.79    assert(resolved_method->signature() == selected_method->signature(), "signatures must correspond");
   74.80    _resolved_klass  = resolved_klass;
   74.81    _selected_klass  = selected_klass;
   74.82    _resolved_method = resolved_method;
   74.83    _selected_method = selected_method;
   74.84 -  _vtable_index    = vtable_index;
   74.85 +  _call_kind       = kind;
   74.86 +  _call_index      = index;
   74.87    _resolved_appendix = Handle();
   74.88 +  DEBUG_ONLY(verify());  // verify before making side effects
   74.89 +
   74.90    if (CompilationPolicy::must_be_compiled(selected_method)) {
   74.91      // This path is unusual, mostly used by the '-Xcomp' stress test mode.
   74.92  
   74.93 @@ -138,6 +133,65 @@
   74.94    }
   74.95  }
   74.96  
   74.97 +// utility query for unreflecting a method
   74.98 +CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) {
   74.99 +  Klass* resolved_method_holder = resolved_method->method_holder();
  74.100 +  if (resolved_klass == NULL) { // 2nd argument defaults to holder of 1st
  74.101 +    resolved_klass = resolved_method_holder;
  74.102 +  }
  74.103 +  _resolved_klass  = resolved_klass;
  74.104 +  _selected_klass  = resolved_klass;
  74.105 +  _resolved_method = resolved_method;
  74.106 +  _selected_method = resolved_method;
  74.107 +  // classify:
  74.108 +  CallKind kind = CallInfo::unknown_kind;
  74.109 +  int index = resolved_method->vtable_index();
  74.110 +  if (resolved_method->can_be_statically_bound()) {
  74.111 +    kind = CallInfo::direct_call;
  74.112 +  } else if (!resolved_method_holder->is_interface()) {
  74.113 +    // Could be an Object method inherited into an interface, but still a vtable call.
  74.114 +    kind = CallInfo::vtable_call;
  74.115 +  } else if (!resolved_klass->is_interface()) {
  74.116 +    // A miranda method.  Compute the vtable index.
  74.117 +    ResourceMark rm;
  74.118 +    klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
  74.119 +    index = vt->index_of_miranda(resolved_method->name(),
  74.120 +                                 resolved_method->signature());
  74.121 +    kind = CallInfo::vtable_call;
  74.122 +  } else {
  74.123 +    // A regular interface call.
  74.124 +    kind = CallInfo::itable_call;
  74.125 +    index = resolved_method->itable_index();
  74.126 +  }
  74.127 +  assert(index == Method::nonvirtual_vtable_index || index >= 0, err_msg("bad index %d", index));
  74.128 +  _call_kind  = kind;
  74.129 +  _call_index = index;
  74.130 +  _resolved_appendix = Handle();
  74.131 +  DEBUG_ONLY(verify());
  74.132 +}
  74.133 +
  74.134 +#ifdef ASSERT
  74.135 +void CallInfo::verify() {
  74.136 +  switch (call_kind()) {  // the meaning and allowed value of index depends on kind
  74.137 +  case CallInfo::direct_call:
  74.138 +    if (_call_index == Method::nonvirtual_vtable_index)  break;
  74.139 +    // else fall through to check vtable index:
  74.140 +  case CallInfo::vtable_call:
  74.141 +    assert(resolved_klass()->verify_vtable_index(_call_index), "");
  74.142 +    break;
  74.143 +  case CallInfo::itable_call:
  74.144 +    assert(resolved_method()->method_holder()->verify_itable_index(_call_index), "");
  74.145 +    break;
  74.146 +  case CallInfo::unknown_kind:
  74.147 +    assert(call_kind() != CallInfo::unknown_kind, "CallInfo must be set");
  74.148 +    break;
  74.149 +  default:
  74.150 +    fatal(err_msg_res("Unexpected call kind %d", call_kind()));
  74.151 +  }
  74.152 +}
  74.153 +#endif //ASSERT
  74.154 +
  74.155 +
  74.156  
  74.157  //------------------------------------------------------------------------------------------------------------------------
  74.158  // Klass resolution
  74.159 @@ -163,13 +217,6 @@
  74.160    result = KlassHandle(THREAD, result_oop);
  74.161  }
  74.162  
  74.163 -void LinkResolver::resolve_klass_no_update(KlassHandle& result, constantPoolHandle pool, int index, TRAPS) {
  74.164 -  Klass* result_oop =
  74.165 -         ConstantPool::klass_ref_at_if_loaded_check(pool, index, CHECK);
  74.166 -  result = KlassHandle(THREAD, result_oop);
  74.167 -}
  74.168 -
  74.169 -
  74.170  //------------------------------------------------------------------------------------------------------------------------
  74.171  // Method resolution
  74.172  //
  74.173 @@ -360,7 +407,12 @@
  74.174  
  74.175  void LinkResolver::resolve_method_statically(methodHandle& resolved_method, KlassHandle& resolved_klass,
  74.176                                               Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS) {
  74.177 -
  74.178 +  // This method is used only
  74.179 +  // (1) in C2 from InlineTree::ok_to_inline (via ciMethod::check_call),
  74.180 +  // and
  74.181 +  // (2) in Bytecode_invoke::static_target
  74.182 +  // It appears to fail when applied to an invokeinterface call site.
  74.183 +  // FIXME: Remove this method and ciMethod::check_call; refactor to use the other LinkResolver entry points.
  74.184    // resolve klass
  74.185    if (code == Bytecodes::_invokedynamic) {
  74.186      resolved_klass = SystemDictionary::MethodHandle_klass();
  74.187 @@ -580,45 +632,49 @@
  74.188    }
  74.189  }
  74.190  
  74.191 -void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS) {
  74.192 -  resolve_field(result, pool, index, byte, check_only, true, CHECK);
  74.193 +void LinkResolver::resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS) {
  74.194 +  // Load these early in case the resolve of the containing klass fails
  74.195 +  Symbol* field = pool->name_ref_at(index);
  74.196 +  Symbol* sig   = pool->signature_ref_at(index);
  74.197 +
  74.198 +  // resolve specified klass
  74.199 +  KlassHandle resolved_klass;
  74.200 +  resolve_klass(resolved_klass, pool, index, CHECK);
  74.201 +
  74.202 +  KlassHandle  current_klass(THREAD, pool->pool_holder());
  74.203 +  resolve_field(result, resolved_klass, field, sig, current_klass, byte, true, true, CHECK);
  74.204  }
  74.205  
  74.206 -void LinkResolver::resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS) {
  74.207 +void LinkResolver::resolve_field(fieldDescriptor& fd, KlassHandle resolved_klass, Symbol* field, Symbol* sig,
  74.208 +                                 KlassHandle current_klass, Bytecodes::Code byte, bool check_access, bool initialize_class,
  74.209 +                                 TRAPS) {
  74.210    assert(byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic ||
  74.211 -         byte == Bytecodes::_getfield  || byte == Bytecodes::_putfield, "bad bytecode");
  74.212 +         byte == Bytecodes::_getfield  || byte == Bytecodes::_putfield  ||
  74.213 +         (byte == Bytecodes::_nop && !check_access), "bad field access bytecode");
  74.214  
  74.215    bool is_static = (byte == Bytecodes::_getstatic || byte == Bytecodes::_putstatic);
  74.216    bool is_put    = (byte == Bytecodes::_putfield  || byte == Bytecodes::_putstatic);
  74.217  
  74.218 -  // resolve specified klass
  74.219 -  KlassHandle resolved_klass;
  74.220 -  if (update_pool) {
  74.221 -    resolve_klass(resolved_klass, pool, index, CHECK);
  74.222 -  } else {
  74.223 -    resolve_klass_no_update(resolved_klass, pool, index, CHECK);
  74.224 -  }
  74.225 -  // Load these early in case the resolve of the containing klass fails
  74.226 -  Symbol* field = pool->name_ref_at(index);
  74.227 -  Symbol* sig   = pool->signature_ref_at(index);
  74.228    // Check if there's a resolved klass containing the field
  74.229 -  if( resolved_klass.is_null() ) {
  74.230 +  if (resolved_klass.is_null()) {
  74.231      ResourceMark rm(THREAD);
  74.232      THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
  74.233    }
  74.234  
  74.235    // Resolve instance field
  74.236 -  fieldDescriptor fd; // find_field initializes fd if found
  74.237    KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
  74.238    // check if field exists; i.e., if a klass containing the field def has been selected
  74.239 -  if (sel_klass.is_null()){
  74.240 +  if (sel_klass.is_null()) {
  74.241      ResourceMark rm(THREAD);
  74.242      THROW_MSG(vmSymbols::java_lang_NoSuchFieldError(), field->as_C_string());
  74.243    }
  74.244  
  74.245 +  if (!check_access)
  74.246 +    // Access checking may be turned off when calling from within the VM.
  74.247 +    return;
  74.248 +
  74.249    // check access
  74.250 -  KlassHandle ref_klass(THREAD, pool->pool_holder());
  74.251 -  check_field_accessability(ref_klass, resolved_klass, sel_klass, fd, CHECK);
  74.252 +  check_field_accessability(current_klass, resolved_klass, sel_klass, fd, CHECK);
  74.253  
  74.254    // check for errors
  74.255    if (is_static != fd.is_static()) {
  74.256 @@ -629,7 +685,7 @@
  74.257    }
  74.258  
  74.259    // Final fields can only be accessed from its own class.
  74.260 -  if (is_put && fd.access_flags().is_final() && sel_klass() != pool->pool_holder()) {
  74.261 +  if (is_put && fd.access_flags().is_final() && sel_klass() != current_klass()) {
  74.262      THROW(vmSymbols::java_lang_IllegalAccessError());
  74.263    }
  74.264  
  74.265 @@ -639,19 +695,18 @@
  74.266    //
  74.267    // note 2: we don't want to force initialization if we are just checking
  74.268    //         if the field access is legal; e.g., during compilation
  74.269 -  if (is_static && !check_only) {
  74.270 +  if (is_static && initialize_class) {
  74.271      sel_klass->initialize(CHECK);
  74.272    }
  74.273  
  74.274 -  {
  74.275 +  if (sel_klass() != current_klass()) {
  74.276      HandleMark hm(THREAD);
  74.277 -    Handle ref_loader (THREAD, InstanceKlass::cast(ref_klass())->class_loader());
  74.278 +    Handle ref_loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
  74.279      Handle sel_loader (THREAD, InstanceKlass::cast(sel_klass())->class_loader());
  74.280 -    Symbol*  signature_ref  = pool->signature_ref_at(index);
  74.281      {
  74.282        ResourceMark rm(THREAD);
  74.283        Symbol* failed_type_symbol =
  74.284 -        SystemDictionary::check_signature_loaders(signature_ref,
  74.285 +        SystemDictionary::check_signature_loaders(sig,
  74.286                                                    ref_loader, sel_loader,
  74.287                                                    false,
  74.288                                                    CHECK);
  74.289 @@ -677,9 +732,6 @@
  74.290  
  74.291    // return information. note that the klass is set to the actual klass containing the
  74.292    // field, otherwise access of static fields in superclasses will not work.
  74.293 -  KlassHandle holder (THREAD, fd.field_holder());
  74.294 -  Symbol*  name   = fd.name();
  74.295 -  result.set(holder, name, fd.index(), fd.offset(), fd.field_type(), fd.access_flags());
  74.296  }
  74.297  
  74.298  
  74.299 @@ -907,10 +959,6 @@
  74.300    }
  74.301  
  74.302    // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
  74.303 -  // has not been rewritten, and the vtable initialized.
  74.304 -  assert(resolved_method->method_holder()->is_linked(), "must be linked");
  74.305 -
  74.306 -  // Virtual methods cannot be resolved before its klass has been linked, for otherwise the Method*'s
  74.307    // has not been rewritten, and the vtable initialized. Make sure to do this after the nullcheck, since
  74.308    // a missing receiver might result in a bogus lookup.
  74.309    assert(resolved_method->method_holder()->is_linked(), "must be linked");
  74.310 @@ -920,6 +968,7 @@
  74.311      vtable_index = vtable_index_of_miranda_method(resolved_klass,
  74.312                             resolved_method->name(),
  74.313                             resolved_method->signature(), CHECK);
  74.314 +
  74.315      assert(vtable_index >= 0 , "we should have valid vtable index at this point");
  74.316  
  74.317      InstanceKlass* inst = InstanceKlass::cast(recv_klass());
  74.318 @@ -927,6 +976,7 @@
  74.319    } else {
  74.320      // at this point we are sure that resolved_method is virtual and not
  74.321      // a miranda method; therefore, it must have a valid vtable index.
  74.322 +    assert(!resolved_method->has_itable_index(), "");
  74.323      vtable_index = resolved_method->vtable_index();
  74.324      // We could get a negative vtable_index for final methods,
  74.325      // because as an optimization they are they are never put in the vtable,
  74.326 @@ -1006,6 +1056,12 @@
  74.327    lookup_instance_method_in_klasses(sel_method, recv_klass,
  74.328              resolved_method->name(),
  74.329              resolved_method->signature(), CHECK);
  74.330 +  if (sel_method.is_null() && !check_null_and_abstract) {
  74.331 +    // In theory this is a harmless placeholder value, but
  74.332 +    // in practice leaving in null affects the nsk default method tests.
  74.333 +    // This needs further study.
  74.334 +    sel_method = resolved_method;
  74.335 +  }
  74.336    // check if method exists
  74.337    if (sel_method.is_null()) {
  74.338      ResourceMark rm(THREAD);
  74.339 @@ -1046,7 +1102,14 @@
  74.340                                                        sel_method->signature()));
  74.341    }
  74.342    // setup result
  74.343 -  result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, CHECK);
  74.344 +  if (!resolved_method->has_itable_index()) {
  74.345 +    int vtable_index = resolved_method->vtable_index();
  74.346 +    assert(vtable_index == sel_method->vtable_index(), "sanity check");
  74.347 +    result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
  74.348 +    return;
  74.349 +  }
  74.350 +  int itable_index = resolved_method()->itable_index();
  74.351 +  result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
  74.352  }
  74.353  
  74.354  
  74.355 @@ -1293,7 +1356,8 @@
  74.356    }
  74.357  
  74.358    if (TraceMethodHandles) {
  74.359 -    tty->print_cr("resolve_invokedynamic #%d %s %s",
  74.360 +      ResourceMark rm(THREAD);
  74.361 +      tty->print_cr("resolve_invokedynamic #%d %s %s",
  74.362                    ConstantPool::decode_invokedynamic_index(index),
  74.363                    method_name->as_C_string(), method_signature->as_C_string());
  74.364      tty->print("  BSM info: "); bootstrap_specifier->print();
  74.365 @@ -1342,9 +1406,16 @@
  74.366  //------------------------------------------------------------------------------------------------------------------------
  74.367  #ifndef PRODUCT
  74.368  
  74.369 -void FieldAccessInfo::print() {
  74.370 +void CallInfo::print() {
  74.371    ResourceMark rm;
  74.372 -  tty->print_cr("Field %s@%d", name()->as_C_string(), field_offset());
  74.373 +  const char* kindstr = "unknown";
  74.374 +  switch (_call_kind) {
  74.375 +  case direct_call: kindstr = "direct"; break;
  74.376 +  case vtable_call: kindstr = "vtable"; break;
  74.377 +  case itable_call: kindstr = "itable"; break;
  74.378 +  }
  74.379 +  tty->print_cr("Call %s@%d %s", kindstr, _call_index,
  74.380 +                _resolved_method.is_null() ? "(none)" : _resolved_method->name_and_sig_as_C_string());
  74.381  }
  74.382  
  74.383  #endif
    75.1 --- a/src/share/vm/interpreter/linkResolver.hpp	Fri Sep 27 13:49:57 2013 -0400
    75.2 +++ b/src/share/vm/interpreter/linkResolver.hpp	Fri Sep 27 13:53:43 2013 -0400
    75.3 @@ -1,5 +1,5 @@
    75.4  /*
    75.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    75.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    75.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    75.8   *
    75.9   * This code is free software; you can redistribute it and/or modify it
   75.10 @@ -30,63 +30,54 @@
   75.11  
   75.12  // All the necessary definitions for run-time link resolution.
   75.13  
   75.14 -// LinkInfo & its subclasses provide all the information gathered
   75.15 -// for a particular link after resolving it. A link is any reference
   75.16 +// CallInfo provides all the information gathered for a particular
   75.17 +// linked call site after resolving it. A link is any reference
   75.18  // made from within the bytecodes of a method to an object outside of
   75.19  // that method. If the info is invalid, the link has not been resolved
   75.20  // successfully.
   75.21  
   75.22 -class LinkInfo VALUE_OBJ_CLASS_SPEC {
   75.23 -};
   75.24 -
   75.25 -
   75.26 -// Link information for getfield/putfield & getstatic/putstatic bytecodes.
   75.27 -
   75.28 -class FieldAccessInfo: public LinkInfo {
   75.29 - protected:
   75.30 -  KlassHandle  _klass;
   75.31 -  Symbol*      _name;
   75.32 -  AccessFlags  _access_flags;
   75.33 -  int          _field_index;  // original index in the klass
   75.34 -  int          _field_offset;
   75.35 -  BasicType    _field_type;
   75.36 -
   75.37 +class CallInfo VALUE_OBJ_CLASS_SPEC {
   75.38   public:
   75.39 -  void         set(KlassHandle klass, Symbol* name, int field_index, int field_offset,
   75.40 -                 BasicType field_type, AccessFlags access_flags);
   75.41 -  KlassHandle  klass() const                     { return _klass; }
   75.42 -  Symbol* name() const                           { return _name; }
   75.43 -  int          field_index() const               { return _field_index; }
   75.44 -  int          field_offset() const              { return _field_offset; }
   75.45 -  BasicType    field_type() const                { return _field_type; }
   75.46 -  AccessFlags  access_flags() const              { return _access_flags; }
   75.47 -
   75.48 -  // debugging
   75.49 -  void print()  PRODUCT_RETURN;
   75.50 -};
   75.51 -
   75.52 -
   75.53 -// Link information for all calls.
   75.54 -
   75.55 -class CallInfo: public LinkInfo {
   75.56 +  // Ways that a method call might be selected (or not) based on receiver type.
   75.57 +  // Note that an invokevirtual instruction might be linked with no_dispatch,
   75.58 +  // and an invokeinterface instruction might be linked with any of the three options
   75.59 +  enum CallKind {
   75.60 +    direct_call,                        // jump into resolved_method (must be concrete)
   75.61 +    vtable_call,                        // select recv.klass.method_at_vtable(index)
   75.62 +    itable_call,                        // select recv.klass.method_at_itable(resolved_method.holder, index)
   75.63 +    unknown_kind = -1
   75.64 +  };
   75.65   private:
   75.66 -  KlassHandle  _resolved_klass;         // static receiver klass
   75.67 +  KlassHandle  _resolved_klass;         // static receiver klass, resolved from a symbolic reference
   75.68    KlassHandle  _selected_klass;         // dynamic receiver class (same as static, or subklass)
   75.69    methodHandle _resolved_method;        // static target method
   75.70    methodHandle _selected_method;        // dynamic (actual) target method
   75.71 -  int          _vtable_index;           // vtable index of selected method
   75.72 +  CallKind     _call_kind;              // kind of call (static(=bytecode static/special +
   75.73 +                                        //               others inferred), vtable, itable)
   75.74 +  int          _call_index;             // vtable or itable index of selected class method (if any)
   75.75    Handle       _resolved_appendix;      // extra argument in constant pool (if CPCE::has_appendix)
   75.76    Handle       _resolved_method_type;   // MethodType (for invokedynamic and invokehandle call sites)
   75.77  
   75.78    void         set_static(   KlassHandle resolved_klass,                             methodHandle resolved_method                                                       , TRAPS);
   75.79 -  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method                         , TRAPS);
   75.80 +  void         set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int itable_index       , TRAPS);
   75.81    void         set_virtual(  KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
   75.82    void         set_handle(                                                           methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS);
   75.83 -  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index       , TRAPS);
   75.84 +  void         set_common(   KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, CallKind kind, int index, TRAPS);
   75.85  
   75.86    friend class LinkResolver;
   75.87  
   75.88   public:
   75.89 +  CallInfo() {
   75.90 +#ifndef PRODUCT
   75.91 +    _call_kind  = CallInfo::unknown_kind;
   75.92 +    _call_index = Method::garbage_vtable_index;
   75.93 +#endif //PRODUCT
   75.94 +  }
   75.95 +
   75.96 +  // utility to extract an effective CallInfo from a method and an optional receiver limit
   75.97 +  // does not queue the method for compilation
   75.98 +  CallInfo(Method* resolved_method, Klass* resolved_klass = NULL);
   75.99 +
  75.100    KlassHandle  resolved_klass() const            { return _resolved_klass; }
  75.101    KlassHandle  selected_klass() const            { return _selected_klass; }
  75.102    methodHandle resolved_method() const           { return _resolved_method; }
  75.103 @@ -95,21 +86,43 @@
  75.104    Handle       resolved_method_type() const      { return _resolved_method_type; }
  75.105  
  75.106    BasicType    result_type() const               { return selected_method()->result_type(); }
  75.107 -  bool         has_vtable_index() const          { return _vtable_index >= 0; }
  75.108 -  bool         is_statically_bound() const       { return _vtable_index == Method::nonvirtual_vtable_index; }
  75.109 +  CallKind     call_kind() const                 { return _call_kind; }
  75.110 +  int          call_index() const                { return _call_index; }
  75.111    int          vtable_index() const {
  75.112      // Even for interface calls the vtable index could be non-negative.
  75.113      // See CallInfo::set_interface.
  75.114      assert(has_vtable_index() || is_statically_bound(), "");
  75.115 -    return _vtable_index;
  75.116 +    assert(call_kind() == vtable_call || call_kind() == direct_call, "");
  75.117 +    // The returned value is < 0 if the call is statically bound.
  75.118 +    // But, the returned value may be >= 0 even if the kind is direct_call.
  75.119 +    // It is up to the caller to decide which way to go.
  75.120 +    return _call_index;
  75.121    }
  75.122 +  int          itable_index() const {
  75.123 +    assert(call_kind() == itable_call, "");
  75.124 +    // The returned value is always >= 0, a valid itable index.
  75.125 +    return _call_index;
  75.126 +  }
  75.127 +
  75.128 +  // debugging
  75.129 +#ifdef ASSERT
  75.130 +  bool         has_vtable_index() const          { return _call_index >= 0 && _call_kind != CallInfo::itable_call; }
  75.131 +  bool         is_statically_bound() const       { return _call_index == Method::nonvirtual_vtable_index; }
  75.132 +#endif //ASSERT
  75.133 +  void         verify() PRODUCT_RETURN;
  75.134 +  void         print()  PRODUCT_RETURN;
  75.135  };
  75.136  
  75.137 +// Link information for getfield/putfield & getstatic/putstatic bytecodes
  75.138 +// is represented using a fieldDescriptor.
  75.139  
  75.140  // The LinkResolver is used to resolve constant-pool references at run-time.
  75.141  // It does all necessary link-time checks & throws exceptions if necessary.
  75.142  
  75.143  class LinkResolver: AllStatic {
  75.144 +  friend class klassVtable;
  75.145 +  friend class klassItable;
  75.146 +
  75.147   private:
  75.148    static void lookup_method_in_klasses          (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
  75.149    static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
  75.150 @@ -120,7 +133,6 @@
  75.151    static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
  75.152  
  75.153    static void resolve_klass           (KlassHandle& result, constantPoolHandle  pool, int index, TRAPS);
  75.154 -  static void resolve_klass_no_update (KlassHandle& result, constantPoolHandle pool, int index, TRAPS); // no update of constantPool entry
  75.155  
  75.156    static void resolve_pool  (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
  75.157  
  75.158 @@ -148,9 +160,16 @@
  75.159                                          Bytecodes::Code code, constantPoolHandle pool, int index, TRAPS);
  75.160  
  75.161    // runtime/static resolving for fields
  75.162 -  static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, TRAPS);
  75.163 -  // takes an extra bool argument "update_pool" to decide whether to update the constantPool during klass resolution.
  75.164 -  static void resolve_field(FieldAccessInfo& result, constantPoolHandle pool, int index, Bytecodes::Code byte, bool check_only, bool update_pool, TRAPS);
  75.165 +  static void resolve_field_access(fieldDescriptor& result, constantPoolHandle pool, int index, Bytecodes::Code byte, TRAPS);
  75.166 +  static void resolve_field(fieldDescriptor& result, KlassHandle resolved_klass, Symbol* field_name, Symbol* field_signature,
  75.167 +                            KlassHandle current_klass, Bytecodes::Code access_kind, bool check_access, bool initialize_class, TRAPS);
  75.168 +
  75.169 +  // source of access_kind codes:
  75.170 +  static Bytecodes::Code field_access_kind(bool is_static, bool is_put) {
  75.171 +    return (is_static
  75.172 +            ? (is_put ? Bytecodes::_putstatic : Bytecodes::_getstatic)
  75.173 +            : (is_put ? Bytecodes::_putfield  : Bytecodes::_getfield ));
  75.174 +  }
  75.175  
  75.176    // runtime resolving:
  75.177    //   resolved_klass = specified class (i.e., static receiver class)
    76.1 --- a/src/share/vm/memory/collectorPolicy.cpp	Fri Sep 27 13:49:57 2013 -0400
    76.2 +++ b/src/share/vm/memory/collectorPolicy.cpp	Fri Sep 27 13:53:43 2013 -0400
    76.3 @@ -47,6 +47,11 @@
    76.4  
    76.5  // CollectorPolicy methods.
    76.6  
    76.7 +// Align down. If the aligning result in 0, return 'alignment'.
    76.8 +static size_t restricted_align_down(size_t size, size_t alignment) {
    76.9 +  return MAX2(alignment, align_size_down_(size, alignment));
   76.10 +}
   76.11 +
   76.12  void CollectorPolicy::initialize_flags() {
   76.13    assert(max_alignment() >= min_alignment(),
   76.14        err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
   76.15 @@ -59,18 +64,24 @@
   76.16      vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   76.17    }
   76.18  
   76.19 -  if (MetaspaceSize > MaxMetaspaceSize) {
   76.20 -    MaxMetaspaceSize = MetaspaceSize;
   76.21 -  }
   76.22 -  MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
   76.23 -  // Don't increase Metaspace size limit above specified.
   76.24 -  MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
   76.25 -  if (MetaspaceSize > MaxMetaspaceSize) {
   76.26 -    MetaspaceSize = MaxMetaspaceSize;
   76.27 +  if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
   76.28 +    FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
   76.29 +        restricted_align_down(MaxMetaspaceSize, max_alignment()));
   76.30    }
   76.31  
   76.32 -  MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
   76.33 -  MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
   76.34 +  if (MetaspaceSize > MaxMetaspaceSize) {
   76.35 +    FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
   76.36 +  }
   76.37 +
   76.38 +  if (!is_size_aligned(MetaspaceSize, min_alignment())) {
   76.39 +    FLAG_SET_ERGO(uintx, MetaspaceSize,
   76.40 +        restricted_align_down(MetaspaceSize, min_alignment()));
   76.41 +  }
   76.42 +
   76.43 +  assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
   76.44 +
   76.45 +  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
   76.46 +  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
   76.47  
   76.48    MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
   76.49  
   76.50 @@ -145,6 +156,30 @@
   76.51    _all_soft_refs_clear = true;
   76.52  }
   76.53  
   76.54 +size_t CollectorPolicy::compute_max_alignment() {
   76.55 +  // The card marking array and the offset arrays for old generations are
   76.56 +  // committed in os pages as well. Make sure they are entirely full (to
   76.57 +  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
   76.58 +  // byte entry and the os page size is 4096, the maximum heap size should
   76.59 +  // be 512*4096 = 2MB aligned.
   76.60 +
   76.61 +  // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
   76.62 +  // is supported.
   76.63 +  // Requirements of any new remembered set implementations must be added here.
   76.64 +  size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
   76.65 +
   76.66 +  // Parallel GC does its own alignment of the generations to avoid requiring a
   76.67 +  // large page (256M on some platforms) for the permanent generation.  The
   76.68 +  // other collectors should also be updated to do their own alignment and then
   76.69 +  // this use of lcm() should be removed.
   76.70 +  if (UseLargePages && !UseParallelGC) {
   76.71 +      // in presence of large pages we have to make sure that our
   76.72 +      // alignment is large page aware
   76.73 +      alignment = lcm(os::large_page_size(), alignment);
   76.74 +  }
   76.75 +
   76.76 +  return alignment;
   76.77 +}
   76.78  
   76.79  // GenCollectorPolicy methods.
   76.80  
   76.81 @@ -175,29 +210,6 @@
   76.82                                          GCTimeRatio);
   76.83  }
   76.84  
   76.85 -size_t GenCollectorPolicy::compute_max_alignment() {
   76.86 -  // The card marking array and the offset arrays for old generations are
   76.87 -  // committed in os pages as well. Make sure they are entirely full (to
   76.88 -  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
   76.89 -  // byte entry and the os page size is 4096, the maximum heap size should
   76.90 -  // be 512*4096 = 2MB aligned.
   76.91 -  size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
   76.92 -
   76.93 -  // Parallel GC does its own alignment of the generations to avoid requiring a
   76.94 -  // large page (256M on some platforms) for the permanent generation.  The
   76.95 -  // other collectors should also be updated to do their own alignment and then
   76.96 -  // this use of lcm() should be removed.
   76.97 -  if (UseLargePages && !UseParallelGC) {
   76.98 -      // in presence of large pages we have to make sure that our
   76.99 -      // alignment is large page aware
  76.100 -      alignment = lcm(os::large_page_size(), alignment);
  76.101 -  }
  76.102 -
  76.103 -  assert(alignment >= min_alignment(), "Must be");
  76.104 -
  76.105 -  return alignment;
  76.106 -}
  76.107 -
  76.108  void GenCollectorPolicy::initialize_flags() {
  76.109    // All sizes must be multiples of the generation granularity.
  76.110    set_min_alignment((uintx) Generation::GenGrain);
    77.1 --- a/src/share/vm/memory/collectorPolicy.hpp	Fri Sep 27 13:49:57 2013 -0400
    77.2 +++ b/src/share/vm/memory/collectorPolicy.hpp	Fri Sep 27 13:53:43 2013 -0400
    77.3 @@ -98,6 +98,9 @@
    77.4    {}
    77.5  
    77.6   public:
    77.7 +  // Return maximum heap alignment that may be imposed by the policy
    77.8 +  static size_t compute_max_alignment();
    77.9 +
   77.10    void set_min_alignment(size_t align)         { _min_alignment = align; }
   77.11    size_t min_alignment()                       { return _min_alignment; }
   77.12    void set_max_alignment(size_t align)         { _max_alignment = align; }
   77.13 @@ -234,9 +237,6 @@
   77.14    // Try to allocate space by expanding the heap.
   77.15    virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
   77.16  
   77.17 -  // compute max heap alignment
   77.18 -  size_t compute_max_alignment();
   77.19 -
   77.20   // Scale the base_size by NewRation according to
   77.21   //     result = base_size / (NewRatio + 1)
   77.22   // and align by min_alignment()
    78.1 --- a/src/share/vm/memory/gcLocker.cpp	Fri Sep 27 13:49:57 2013 -0400
    78.2 +++ b/src/share/vm/memory/gcLocker.cpp	Fri Sep 27 13:53:43 2013 -0400
    78.3 @@ -122,7 +122,7 @@
    78.4      // strictly needed. It's added here to make it clear that
    78.5      // the GC will NOT be performed if any other caller
    78.6      // of GC_locker::lock() still needs GC locked.
    78.7 -    if (!is_active()) {
    78.8 +    if (!is_active_internal()) {
    78.9        _doing_gc = true;
   78.10        {
   78.11          // Must give up the lock while at a safepoint
    79.1 --- a/src/share/vm/memory/gcLocker.hpp	Fri Sep 27 13:49:57 2013 -0400
    79.2 +++ b/src/share/vm/memory/gcLocker.hpp	Fri Sep 27 13:53:43 2013 -0400
    79.3 @@ -88,7 +88,7 @@
    79.4   public:
    79.5    // Accessors
    79.6    static bool is_active() {
    79.7 -    assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
    79.8 +    assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
    79.9      return is_active_internal();
   79.10    }
   79.11    static bool needs_gc()       { return _needs_gc;                        }
    80.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Fri Sep 27 13:49:57 2013 -0400
    80.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Fri Sep 27 13:53:43 2013 -0400
    80.3 @@ -1,5 +1,5 @@
    80.4  /*
    80.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
    80.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    80.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    80.8   *
    80.9   * This code is free software; you can redistribute it and/or modify it
   80.10 @@ -148,6 +148,11 @@
   80.11      return gen_policy()->size_policy();
   80.12    }
   80.13  
   80.14 +  // Return the (conservative) maximum heap alignment
   80.15 +  static size_t conservative_max_heap_alignment() {
   80.16 +    return Generation::GenGrain;
   80.17 +  }
   80.18 +
   80.19    size_t capacity() const;
   80.20    size_t used() const;
   80.21  
    81.1 --- a/src/share/vm/memory/metablock.cpp	Fri Sep 27 13:49:57 2013 -0400
    81.2 +++ b/src/share/vm/memory/metablock.cpp	Fri Sep 27 13:53:43 2013 -0400
    81.3 @@ -50,13 +50,6 @@
    81.4  // Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
    81.5  size_t Metablock::_min_block_byte_size = sizeof(Metablock);
    81.6  
    81.7 -#ifdef ASSERT
    81.8 -size_t Metablock::_overhead =
    81.9 -  Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
   81.10 -#else
   81.11 -size_t Metablock::_overhead = 0;
   81.12 -#endif
   81.13 -
   81.14  // New blocks returned by the Metaspace are zero initialized.
   81.15  // We should fix the constructors to not assume this instead.
   81.16  Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
    82.1 --- a/src/share/vm/memory/metablock.hpp	Fri Sep 27 13:49:57 2013 -0400
    82.2 +++ b/src/share/vm/memory/metablock.hpp	Fri Sep 27 13:53:43 2013 -0400
    82.3 @@ -48,7 +48,6 @@
    82.4      } _header;
    82.5    } _block;
    82.6    static size_t _min_block_byte_size;
    82.7 -  static size_t _overhead;
    82.8  
    82.9    typedef union block_t Block;
   82.10    typedef struct header_t Header;
   82.11 @@ -73,7 +72,6 @@
   82.12    void set_prev(Metablock* v) { _block._header._prev = v; }
   82.13  
   82.14    static size_t min_block_byte_size() { return _min_block_byte_size; }
   82.15 -  static size_t overhead() { return _overhead; }
   82.16  
   82.17    bool is_free()                 { return header()->_word_size != 0; }
   82.18    void clear_next()              { set_next(NULL); }
    83.1 --- a/src/share/vm/memory/metaspace.cpp	Fri Sep 27 13:49:57 2013 -0400
    83.2 +++ b/src/share/vm/memory/metaspace.cpp	Fri Sep 27 13:53:43 2013 -0400
    83.3 @@ -23,6 +23,7 @@
    83.4   */
    83.5  #include "precompiled.hpp"
    83.6  #include "gc_interface/collectedHeap.hpp"
    83.7 +#include "memory/allocation.hpp"
    83.8  #include "memory/binaryTreeDictionary.hpp"
    83.9  #include "memory/freeList.hpp"
   83.10  #include "memory/collectorPolicy.hpp"
   83.11 @@ -51,7 +52,7 @@
   83.12  // Parameters for stress mode testing
   83.13  const uint metadata_deallocate_a_lot_block = 10;
   83.14  const uint metadata_deallocate_a_lock_chunk = 3;
   83.15 -size_t const allocation_from_dictionary_limit = 64 * K;
   83.16 +size_t const allocation_from_dictionary_limit = 4 * K;
   83.17  
   83.18  MetaWord* last_allocated = 0;
   83.19  
   83.20 @@ -111,7 +112,7 @@
   83.21  // Has three lists of free chunks, and a total size and
   83.22  // count that includes all three
   83.23  
   83.24 -class ChunkManager VALUE_OBJ_CLASS_SPEC {
   83.25 +class ChunkManager : public CHeapObj<mtInternal> {
   83.26  
   83.27    // Free list of chunks of different sizes.
   83.28    //   SpecializedChunk
   83.29 @@ -158,7 +159,12 @@
   83.30  
   83.31   public:
   83.32  
   83.33 -  ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
   83.34 +  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
   83.35 +      : _free_chunks_total(0), _free_chunks_count(0) {
   83.36 +    _free_chunks[SpecializedIndex].set_size(specialized_size);
   83.37 +    _free_chunks[SmallIndex].set_size(small_size);
   83.38 +    _free_chunks[MediumIndex].set_size(medium_size);
   83.39 +  }
   83.40  
   83.41    // add or delete (return) a chunk to the global freelist.
   83.42    Metachunk* chunk_freelist_allocate(size_t word_size);
   83.43 @@ -177,8 +183,8 @@
   83.44    void return_chunks(ChunkIndex index, Metachunk* chunks);
   83.45  
   83.46    // Total of the space in the free chunks list
   83.47 -  size_t free_chunks_total();
   83.48 -  size_t free_chunks_total_in_bytes();
   83.49 +  size_t free_chunks_total_words();
   83.50 +  size_t free_chunks_total_bytes();
   83.51  
   83.52    // Number of chunks in the free chunks list
   83.53    size_t free_chunks_count();
   83.54 @@ -219,7 +225,7 @@
   83.55    void locked_print_free_chunks(outputStream* st);
   83.56    void locked_print_sum_free_chunks(outputStream* st);
   83.57  
   83.58 -  void print_on(outputStream* st);
   83.59 +  void print_on(outputStream* st) const;
   83.60  };
   83.61  
   83.62  // Used to manage the free list of Metablocks (a block corresponds
   83.63 @@ -228,6 +234,10 @@
   83.64    BlockTreeDictionary* _dictionary;
   83.65    static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
   83.66  
   83.67 +  // Only allocate and split from freelist if the size of the allocation
   83.68 +  // is at least 1/4th the size of the available block.
   83.69 +  const static int WasteMultiplier = 4;
   83.70 +
   83.71    // Accessors
   83.72    BlockTreeDictionary* dictionary() const { return _dictionary; }
   83.73  
   83.74 @@ -272,11 +282,6 @@
   83.75    // VirtualSpace
   83.76    Metachunk* first_chunk() { return (Metachunk*) bottom(); }
   83.77  
   83.78 -  void inc_container_count();
   83.79 -#ifdef ASSERT
   83.80 -  uint container_count_slow();
   83.81 -#endif
   83.82 -
   83.83   public:
   83.84  
   83.85    VirtualSpaceNode(size_t byte_size);
   83.86 @@ -287,6 +292,10 @@
   83.87    MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   83.88    MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   83.89  
   83.90 +  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
   83.91 +  size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
   83.92 +  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
   83.93 +
   83.94    // address of next available space in _virtual_space;
   83.95    // Accessors
   83.96    VirtualSpaceNode* next() { return _next; }
   83.97 @@ -306,8 +315,10 @@
   83.98    void inc_top(size_t word_size) { _top += word_size; }
   83.99  
  83.100    uintx container_count() { return _container_count; }
  83.101 +  void inc_container_count();
  83.102    void dec_container_count();
  83.103  #ifdef ASSERT
  83.104 +  uint container_count_slow();
  83.105    void verify_container_count();
  83.106  #endif
  83.107  
  83.108 @@ -323,12 +334,10 @@
  83.109  
  83.110    // Allocate a chunk from the virtual space and return it.
  83.111    Metachunk* get_chunk_vs(size_t chunk_word_size);
  83.112 -  Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
  83.113  
  83.114    // Expands/shrinks the committed space in a virtual space.  Delegates
  83.115    // to Virtualspace
  83.116    bool expand_by(size_t words, bool pre_touch = false);
  83.117 -  bool shrink_by(size_t words);
  83.118  
  83.119    // In preparation for deleting this node, remove all the chunks
  83.120    // in the node from any freelist.
  83.121 @@ -336,8 +345,6 @@
  83.122  
  83.123  #ifdef ASSERT
  83.124    // Debug support
  83.125 -  static void verify_virtual_space_total();
  83.126 -  static void verify_virtual_space_count();
  83.127    void mangle();
  83.128  #endif
  83.129  
  83.130 @@ -417,16 +424,17 @@
  83.131    VirtualSpaceNode* _virtual_space_list;
  83.132    // virtual space currently being used for allocations
  83.133    VirtualSpaceNode* _current_virtual_space;
  83.134 -  // Free chunk list for all other metadata
  83.135 -  ChunkManager      _chunk_manager;
  83.136  
  83.137    // Can this virtual list allocate >1 spaces?  Also, used to determine
  83.138    // whether to allocate unlimited small chunks in this virtual space
  83.139    bool _is_class;
  83.140 -  bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
  83.141 -
  83.142 -  // Sum of space in all virtual spaces and number of virtual spaces
  83.143 -  size_t _virtual_space_total;
  83.144 +  bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
  83.145 +
  83.146 +  // Sum of reserved and committed memory in the virtual spaces
  83.147 +  size_t _reserved_words;
  83.148 +  size_t _committed_words;
  83.149 +
  83.150 +  // Number of virtual spaces
  83.151    size_t _virtual_space_count;
  83.152  
  83.153    ~VirtualSpaceList();
  83.154 @@ -440,7 +448,7 @@
  83.155      _current_virtual_space = v;
  83.156    }
  83.157  
  83.158 -  void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
  83.159 +  void link_vs(VirtualSpaceNode* new_entry);
  83.160  
  83.161    // Get another virtual space and add it to the list.  This
  83.162    // is typically prompted by a failed attempt to allocate a chunk
  83.163 @@ -457,6 +465,8 @@
  83.164                             size_t grow_chunks_by_words,
  83.165                             size_t medium_chunk_bunch);
  83.166  
  83.167 +  bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
  83.168 +
  83.169    // Get the first chunk for a Metaspace.  Used for
  83.170    // special cases such as the boot class loader, reflection
  83.171    // class loader and anonymous class loader.
  83.172 @@ -466,28 +476,25 @@
  83.173      return _current_virtual_space;
  83.174    }
  83.175  
  83.176 -  ChunkManager* chunk_manager() { return &_chunk_manager; }
  83.177    bool is_class() const { return _is_class; }
  83.178  
  83.179    // Allocate the first virtualspace.
  83.180    void initialize(size_t word_size);
  83.181  
  83.182 -  size_t virtual_space_total() { return _virtual_space_total; }
  83.183 -
  83.184 -  void inc_virtual_space_total(size_t v);
  83.185 -  void dec_virtual_space_total(size_t v);
  83.186 +  size_t reserved_words()  { return _reserved_words; }
  83.187 +  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
  83.188 +  size_t committed_words() { return _committed_words; }
  83.189 +  size_t committed_bytes() { return committed_words() * BytesPerWord; }
  83.190 +
  83.191 +  void inc_reserved_words(size_t v);
  83.192 +  void dec_reserved_words(size_t v);
  83.193 +  void inc_committed_words(size_t v);
  83.194 +  void dec_committed_words(size_t v);
  83.195    void inc_virtual_space_count();
  83.196    void dec_virtual_space_count();
  83.197  
  83.198    // Unlink empty VirtualSpaceNodes and free it.
  83.199 -  void purge();
  83.200 -
  83.201 -  // Used and capacity in the entire list of virtual spaces.
  83.202 -  // These are global values shared by all Metaspaces
  83.203 -  size_t capacity_words_sum();
  83.204 -  size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
  83.205 -  size_t used_words_sum();
  83.206 -  size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
  83.207 +  void purge(ChunkManager* chunk_manager);
  83.208  
  83.209    bool contains(const void *ptr);
  83.210  
  83.211 @@ -568,18 +575,12 @@
  83.212    // Type of metadata allocated.
  83.213    Metaspace::MetadataType _mdtype;
  83.214  
  83.215 -  // Chunk related size
  83.216 -  size_t _medium_chunk_bunch;
  83.217 -
  83.218    // List of chunks in use by this SpaceManager.  Allocations
  83.219    // are done from the current chunk.  The list is used for deallocating
  83.220    // chunks when the SpaceManager is freed.
  83.221    Metachunk* _chunks_in_use[NumberOfInUseLists];
  83.222    Metachunk* _current_chunk;
  83.223  
  83.224 -  // Virtual space where allocation comes from.
  83.225 -  VirtualSpaceList* _vs_list;
  83.226 -
  83.227    // Number of small chunks to allocate to a manager
  83.228    // If class space manager, small chunks are unlimited
  83.229    static uint const _small_chunk_limit;
  83.230 @@ -612,7 +613,9 @@
  83.231    }
  83.232  
  83.233    Metaspace::MetadataType mdtype() { return _mdtype; }
  83.234 -  VirtualSpaceList* vs_list() const    { return _vs_list; }
  83.235 +
  83.236 +  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
  83.237 +  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
  83.238  
  83.239    Metachunk* current_chunk() const { return _current_chunk; }
  83.240    void set_current_chunk(Metachunk* v) {
  83.241 @@ -623,6 +626,7 @@
  83.242  
  83.243    // Add chunk to the list of chunks in use
  83.244    void add_chunk(Metachunk* v, bool make_current);
  83.245 +  void retire_current_chunk();
  83.246  
  83.247    Mutex* lock() const { return _lock; }
  83.248  
  83.249 @@ -633,18 +637,19 @@
  83.250  
  83.251   public:
  83.252    SpaceManager(Metaspace::MetadataType mdtype,
  83.253 -               Mutex* lock,
  83.254 -               VirtualSpaceList* vs_list);
  83.255 +               Mutex* lock);
  83.256    ~SpaceManager();
  83.257  
  83.258    enum ChunkMultiples {
  83.259      MediumChunkMultiple = 4
  83.260    };
  83.261  
  83.262 +  bool is_class() { return _mdtype == Metaspace::ClassType; }
  83.263 +
  83.264    // Accessors
  83.265    size_t specialized_chunk_size() { return SpecializedChunk; }
  83.266 -  size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
  83.267 -  size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
  83.268 +  size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
  83.269 +  size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
  83.270    size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
  83.271  
  83.272    size_t allocated_blocks_words() const { return _allocated_blocks_words; }
  83.273 @@ -722,9 +727,7 @@
  83.274      // MinChunkSize is a placeholder for the real minimum size JJJ
  83.275      size_t byte_size = word_size * BytesPerWord;
  83.276  
  83.277 -    size_t byte_size_with_overhead = byte_size + Metablock::overhead();
  83.278 -
  83.279 -    size_t raw_bytes_size = MAX2(byte_size_with_overhead,
  83.280 +    size_t raw_bytes_size = MAX2(byte_size,
  83.281                                   Metablock::min_block_byte_size());
  83.282      raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
  83.283      size_t raw_word_size = raw_bytes_size / BytesPerWord;
  83.284 @@ -749,7 +752,7 @@
  83.285    _container_count++;
  83.286    assert(_container_count == container_count_slow(),
  83.287           err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
  83.288 -                 "container_count_slow() " SIZE_FORMAT,
  83.289 +                 " container_count_slow() " SIZE_FORMAT,
  83.290                   _container_count, container_count_slow()));
  83.291  }
  83.292  
  83.293 @@ -762,7 +765,7 @@
  83.294  void VirtualSpaceNode::verify_container_count() {
  83.295    assert(_container_count == container_count_slow(),
  83.296      err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
  83.297 -            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
  83.298 +            " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
  83.299  }
  83.300  #endif
  83.301  
  83.302 @@ -807,12 +810,25 @@
  83.303    }
  83.304  
  83.305    Metablock* free_block =
  83.306 -    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
  83.307 +    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
  83.308    if (free_block == NULL) {
  83.309      return NULL;
  83.310    }
  83.311  
  83.312 -  return (MetaWord*) free_block;
  83.313 +  const size_t block_size = free_block->size();
  83.314 +  if (block_size > WasteMultiplier * word_size) {
  83.315 +    return_block((MetaWord*)free_block, block_size);
  83.316 +    return NULL;
  83.317 +  }
  83.318 +
  83.319 +  MetaWord* new_block = (MetaWord*)free_block;
  83.320 +  assert(block_size >= word_size, "Incorrect size of block from freelist");
  83.321 +  const size_t unused = block_size - word_size;
  83.322 +  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
  83.323 +    return_block(new_block + word_size, unused);
  83.324 +  }
  83.325 +
  83.326 +  return new_block;
  83.327  }
  83.328  
  83.329  void BlockFreelist::print_on(outputStream* st) const {
  83.330 @@ -855,9 +871,9 @@
  83.331  
  83.332    if (!is_available(chunk_word_size)) {
  83.333      if (TraceMetadataChunkAllocation) {
  83.334 -      tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
  83.335 +      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
  83.336        // Dump some information about the virtual space that is nearly full
  83.337 -      print_on(tty);
  83.338 +      print_on(gclog_or_tty);
  83.339      }
  83.340      return NULL;
  83.341    }
  83.342 @@ -878,20 +894,11 @@
  83.343    if (TraceMetavirtualspaceAllocation && !result) {
  83.344      gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
  83.345                             "for byte size " SIZE_FORMAT, bytes);
  83.346 -    virtual_space()->print();
  83.347 +    virtual_space()->print_on(gclog_or_tty);
  83.348    }
  83.349    return result;
  83.350  }
  83.351  
  83.352 -// Shrink the virtual space (commit more of the reserved space)
  83.353 -bool VirtualSpaceNode::shrink_by(size_t words) {
  83.354 -  size_t bytes = words * BytesPerWord;
  83.355 -  virtual_space()->shrink_by(bytes);
  83.356 -  return true;
  83.357 -}
  83.358 -
  83.359 -// Add another chunk to the chunk list.
  83.360 -
  83.361  Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
  83.362    assert_lock_strong(SpaceManager::expand_lock());
  83.363    Metachunk* result = take_from_committed(chunk_word_size);
  83.364 @@ -901,23 +908,6 @@
  83.365    return result;
  83.366  }
  83.367  
  83.368 -Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
  83.369 -  assert_lock_strong(SpaceManager::expand_lock());
  83.370 -
  83.371 -  Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
  83.372 -
  83.373 -  if (new_chunk == NULL) {
  83.374 -    // Only a small part of the virtualspace is committed when first
  83.375 -    // allocated so committing more here can be expected.
  83.376 -    size_t page_size_words = os::vm_page_size() / BytesPerWord;
  83.377 -    size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
  83.378 -                                                    page_size_words);
  83.379 -    expand_by(aligned_expand_vs_by_words, false);
  83.380 -    new_chunk = get_chunk_vs(chunk_word_size);
  83.381 -  }
  83.382 -  return new_chunk;
  83.383 -}
  83.384 -
  83.385  bool VirtualSpaceNode::initialize() {
  83.386  
  83.387    if (!_rs.is_reserved()) {
  83.388 @@ -977,13 +967,22 @@
  83.389    }
  83.390  }
  83.391  
  83.392 -void VirtualSpaceList::inc_virtual_space_total(size_t v) {
  83.393 +void VirtualSpaceList::inc_reserved_words(size_t v) {
  83.394    assert_lock_strong(SpaceManager::expand_lock());
  83.395 -  _virtual_space_total = _virtual_space_total + v;
  83.396 +  _reserved_words = _reserved_words + v;
  83.397  }
  83.398 -void VirtualSpaceList::dec_virtual_space_total(size_t v) {
  83.399 +void VirtualSpaceList::dec_reserved_words(size_t v) {
  83.400    assert_lock_strong(SpaceManager::expand_lock());
  83.401 -  _virtual_space_total = _virtual_space_total - v;
  83.402 +  _reserved_words = _reserved_words - v;
  83.403 +}
  83.404 +
  83.405 +void VirtualSpaceList::inc_committed_words(size_t v) {
  83.406 +  assert_lock_strong(SpaceManager::expand_lock());
  83.407 +  _committed_words = _committed_words + v;
  83.408 +}
  83.409 +void VirtualSpaceList::dec_committed_words(size_t v) {
  83.410 +  assert_lock_strong(SpaceManager::expand_lock());
  83.411 +  _committed_words = _committed_words - v;
  83.412  }
  83.413  
  83.414  void VirtualSpaceList::inc_virtual_space_count() {
  83.415 @@ -1011,7 +1010,7 @@
  83.416  // Walk the list of VirtualSpaceNodes and delete
  83.417  // nodes with a 0 container_count.  Remove Metachunks in
  83.418  // the node from their respective freelists.
  83.419 -void VirtualSpaceList::purge() {
  83.420 +void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
  83.421    assert_lock_strong(SpaceManager::expand_lock());
  83.422    // Don't use a VirtualSpaceListIterator because this
  83.423    // list is being changed and a straightforward use of an iterator is not safe.
  83.424 @@ -1033,8 +1032,9 @@
  83.425          prev_vsl->set_next(vsl->next());
  83.426        }
  83.427  
  83.428 -      vsl->purge(chunk_manager());
  83.429 -      dec_virtual_space_total(vsl->reserved()->word_size());
  83.430 +      vsl->purge(chunk_manager);
  83.431 +      dec_reserved_words(vsl->reserved_words());
  83.432 +      dec_committed_words(vsl->committed_words());
  83.433        dec_virtual_space_count();
  83.434        purged_vsl = vsl;
  83.435        delete vsl;
  83.436 @@ -1054,49 +1054,16 @@
  83.437  #endif
  83.438  }
  83.439  
  83.440 -size_t VirtualSpaceList::used_words_sum() {
  83.441 -  size_t allocated_by_vs = 0;
  83.442 -  VirtualSpaceListIterator iter(virtual_space_list());
  83.443 -  while (iter.repeat()) {
  83.444 -    VirtualSpaceNode* vsl = iter.get_next();
  83.445 -    // Sum used region [bottom, top) in each virtualspace
  83.446 -    allocated_by_vs += vsl->used_words_in_vs();
  83.447 -  }
  83.448 -  assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
  83.449 -    err_msg("Total in free chunks " SIZE_FORMAT
  83.450 -            " greater than total from virtual_spaces " SIZE_FORMAT,
  83.451 -            allocated_by_vs, chunk_manager()->free_chunks_total()));
  83.452 -  size_t used =
  83.453 -    allocated_by_vs - chunk_manager()->free_chunks_total();
  83.454 -  return used;
  83.455 -}
  83.456 -
  83.457 -// Space available in all MetadataVirtualspaces allocated
  83.458 -// for metadata.  This is the upper limit on the capacity
  83.459 -// of chunks allocated out of all the MetadataVirtualspaces.
  83.460 -size_t VirtualSpaceList::capacity_words_sum() {
  83.461 -  size_t capacity = 0;
  83.462 -  VirtualSpaceListIterator iter(virtual_space_list());
  83.463 -  while (iter.repeat()) {
  83.464 -    VirtualSpaceNode* vsl = iter.get_next();
  83.465 -    capacity += vsl->capacity_words_in_vs();
  83.466 -  }
  83.467 -  return capacity;
  83.468 -}
  83.469 -
  83.470  VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
  83.471                                     _is_class(false),
  83.472                                     _virtual_space_list(NULL),
  83.473                                     _current_virtual_space(NULL),
  83.474 -                                   _virtual_space_total(0),
  83.475 +                                   _reserved_words(0),
  83.476 +                                   _committed_words(0),
  83.477                                     _virtual_space_count(0) {
  83.478    MutexLockerEx cl(SpaceManager::expand_lock(),
  83.479                     Mutex::_no_safepoint_check_flag);
  83.480    bool initialization_succeeded = grow_vs(word_size);
  83.481 -
  83.482 -  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  83.483 -  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
  83.484 -  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
  83.485    assert(initialization_succeeded,
  83.486      " VirtualSpaceList initialization should not fail");
  83.487  }
  83.488 @@ -1105,17 +1072,15 @@
  83.489                                     _is_class(true),
  83.490                                     _virtual_space_list(NULL),
  83.491                                     _current_virtual_space(NULL),
  83.492 -                                   _virtual_space_total(0),
  83.493 +                                   _reserved_words(0),
  83.494 +                                   _committed_words(0),
  83.495                                     _virtual_space_count(0) {
  83.496    MutexLockerEx cl(SpaceManager::expand_lock(),
  83.497                     Mutex::_no_safepoint_check_flag);
  83.498    VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  83.499    bool succeeded = class_entry->initialize();
  83.500 -  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  83.501 -  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  83.502 -  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
  83.503    assert(succeeded, " VirtualSpaceList initialization should not fail");
  83.504 -  link_vs(class_entry, rs.size()/BytesPerWord);
  83.505 +  link_vs(class_entry);
  83.506  }
  83.507  
  83.508  size_t VirtualSpaceList::free_bytes() {
  83.509 @@ -1130,7 +1095,7 @@
  83.510    }
  83.511    // Reserve the space
  83.512    size_t vs_byte_size = vs_word_size * BytesPerWord;
  83.513 -  assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
  83.514 +  assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
  83.515  
  83.516    // Allocate the meta virtual space and initialize it.
  83.517    VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  83.518 @@ -1138,44 +1103,53 @@
  83.519      delete new_entry;
  83.520      return false;
  83.521    } else {
  83.522 +    assert(new_entry->reserved_words() == vs_word_size, "Must be");
  83.523      // ensure lock-free iteration sees fully initialized node
  83.524      OrderAccess::storestore();
  83.525 -    link_vs(new_entry, vs_word_size);
  83.526 +    link_vs(new_entry);
  83.527      return true;
  83.528    }
  83.529  }
  83.530  
  83.531 -void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
  83.532 +void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
  83.533    if (virtual_space_list() == NULL) {
  83.534        set_virtual_space_list(new_entry);
  83.535    } else {
  83.536      current_virtual_space()->set_next(new_entry);
  83.537    }
  83.538    set_current_virtual_space(new_entry);
  83.539 -  inc_virtual_space_total(vs_word_size);
  83.540 +  inc_reserved_words(new_entry->reserved_words());
  83.541 +  inc_committed_words(new_entry->committed_words());
  83.542    inc_virtual_space_count();
  83.543  #ifdef ASSERT
  83.544    new_entry->mangle();
  83.545  #endif
  83.546    if (TraceMetavirtualspaceAllocation && Verbose) {
  83.547      VirtualSpaceNode* vsl = current_virtual_space();
  83.548 -    vsl->print_on(tty);
  83.549 +    vsl->print_on(gclog_or_tty);
  83.550    }
  83.551  }
  83.552  
  83.553 +bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
  83.554 +  size_t before = node->committed_words();
  83.555 +
  83.556 +  bool result = node->expand_by(word_size, pre_touch);
  83.557 +
  83.558 +  size_t after = node->committed_words();
  83.559 +
  83.560 +  // after and before can be the same if the memory was pre-committed.
  83.561 +  assert(after >= before, "Must be");
  83.562 +  inc_committed_words(after - before);
  83.563 +
  83.564 +  return result;
  83.565 +}
  83.566 +
  83.567  Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
  83.568                                             size_t grow_chunks_by_words,
  83.569                                             size_t medium_chunk_bunch) {
  83.570  
  83.571 -  // Get a chunk from the chunk freelist
  83.572 -  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  83.573 -
  83.574 -  if (next != NULL) {
  83.575 -    next->container()->inc_container_count();
  83.576 -  } else {
  83.577 -    // Allocate a chunk out of the current virtual space.
  83.578 -    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  83.579 -  }
  83.580 +  // Allocate a chunk out of the current virtual space.
  83.581 +  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  83.582  
  83.583    if (next == NULL) {
  83.584      // Not enough room in current virtual space.  Try to commit
  83.585 @@ -1186,18 +1160,27 @@
  83.586      size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
  83.587                                                          page_size_words);
  83.588      bool vs_expanded =
  83.589 -      current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
  83.590 +      expand_by(current_virtual_space(), aligned_expand_vs_by_words);
  83.591      if (!vs_expanded) {
  83.592        // Should the capacity of the metaspaces be expanded for
  83.593        // this allocation?  If it's the virtual space for classes and is
  83.594        // being used for CompressedHeaders, don't allocate a new virtualspace.
  83.595        if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
  83.596          // Get another virtual space.
  83.597 -          size_t grow_vs_words =
  83.598 -            MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
  83.599 +        size_t allocation_aligned_expand_words =
  83.600 +            align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
  83.601 +        size_t grow_vs_words =
  83.602 +            MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
  83.603          if (grow_vs(grow_vs_words)) {
  83.604            // Got it.  It's on the list now.  Get a chunk from it.
  83.605 -          next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
  83.606 +          assert(current_virtual_space()->expanded_words() == 0,
  83.607 +              "New virtual space nodes should not have expanded");
  83.608 +
  83.609 +          size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
  83.610 +                                                              page_size_words);
  83.611 +          // We probably want to expand by aligned_expand_vs_by_words here.
  83.612 +          expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
  83.613 +          next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  83.614          }
  83.615        } else {
  83.616          // Allocation will fail and induce a GC
  83.617 @@ -1307,8 +1290,9 @@
  83.618    // reserved space, because this is a larger space prereserved for compressed
  83.619    // class pointers.
  83.620    if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
  83.621 -    size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
  83.622 -              MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  83.623 +    size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
  83.624 +    size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  83.625 +    size_t real_allocated     = nonclass_allocated + class_allocated;
  83.626      if (real_allocated >= MaxMetaspaceSize) {
  83.627        return false;
  83.628      }
  83.629 @@ -1501,15 +1485,15 @@
  83.630        if (dummy_chunk == NULL) {
  83.631          break;
  83.632        }
  83.633 -      vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  83.634 +      sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  83.635  
  83.636        if (TraceMetadataChunkAllocation && Verbose) {
  83.637          gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
  83.638                                 sm->sum_count_in_chunks_in_use());
  83.639          dummy_chunk->print_on(gclog_or_tty);
  83.640          gclog_or_tty->print_cr("  Free chunks total %d  count %d",
  83.641 -                               vsl->chunk_manager()->free_chunks_total(),
  83.642 -                               vsl->chunk_manager()->free_chunks_count());
  83.643 +                               sm->chunk_manager()->free_chunks_total_words(),
  83.644 +                               sm->chunk_manager()->free_chunks_count());
  83.645        }
  83.646      }
  83.647    } else {
  83.648 @@ -1565,12 +1549,12 @@
  83.649  
  83.650  // ChunkManager methods
  83.651  
  83.652 -size_t ChunkManager::free_chunks_total() {
  83.653 +size_t ChunkManager::free_chunks_total_words() {
  83.654    return _free_chunks_total;
  83.655  }
  83.656  
  83.657 -size_t ChunkManager::free_chunks_total_in_bytes() {
  83.658 -  return free_chunks_total() * BytesPerWord;
  83.659 +size_t ChunkManager::free_chunks_total_bytes() {
  83.660 +  return free_chunks_total_words() * BytesPerWord;
  83.661  }
  83.662  
  83.663  size_t ChunkManager::free_chunks_count() {
  83.664 @@ -1698,9 +1682,9 @@
  83.665    assert_lock_strong(SpaceManager::expand_lock());
  83.666    slow_locked_verify();
  83.667    if (TraceMetadataChunkAllocation) {
  83.668 -    tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
  83.669 -                  PTR_FORMAT "  size " SIZE_FORMAT,
  83.670 -                  chunk, chunk->word_size());
  83.671 +    gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
  83.672 +                           PTR_FORMAT "  size " SIZE_FORMAT,
  83.673 +                           chunk, chunk->word_size());
  83.674    }
  83.675    free_chunks_put(chunk);
  83.676  }
  83.677 @@ -1729,9 +1713,9 @@
  83.678      dec_free_chunks_total(chunk->capacity_word_size());
  83.679  
  83.680      if (TraceMetadataChunkAllocation && Verbose) {
  83.681 -      tty->print_cr("ChunkManager::free_chunks_get: free_list "
  83.682 -                    PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  83.683 -                    free_list, chunk, chunk->word_size());
  83.684 +      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
  83.685 +                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  83.686 +                             free_list, chunk, chunk->word_size());
  83.687      }
  83.688    } else {
  83.689      chunk = humongous_dictionary()->get_chunk(
  83.690 @@ -1741,10 +1725,10 @@
  83.691      if (chunk != NULL) {
  83.692        if (TraceMetadataHumongousAllocation) {
  83.693          size_t waste = chunk->word_size() - word_size;
  83.694 -        tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
  83.695 -                      " for requested size " SIZE_FORMAT
  83.696 -                      " waste " SIZE_FORMAT,
  83.697 -                      chunk->word_size(), word_size, waste);
  83.698 +        gclog_or_tty->print_cr("Free list allocate humongous chunk size "
  83.699 +                               SIZE_FORMAT " for requested size " SIZE_FORMAT
  83.700 +                               " waste " SIZE_FORMAT,
  83.701 +                               chunk->word_size(), word_size, waste);
  83.702        }
  83.703        // Chunk is being removed from the chunks free list.
  83.704        dec_free_chunks_total(chunk->capacity_word_size());
  83.705 @@ -1761,6 +1745,8 @@
  83.706    // work.
  83.707    chunk->set_is_free(false);
  83.708  #endif
  83.709 +  chunk->container()->inc_container_count();
  83.710 +
  83.711    slow_locked_verify();
  83.712    return chunk;
  83.713  }
  83.714 @@ -1786,18 +1772,18 @@
  83.715      } else {
  83.716        list_count = humongous_dictionary()->total_count();
  83.717      }
  83.718 -    tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
  83.719 -               PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
  83.720 -               this, chunk, chunk->word_size(), list_count);
  83.721 -    locked_print_free_chunks(tty);
  83.722 +    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
  83.723 +                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
  83.724 +                        this, chunk, chunk->word_size(), list_count);
  83.725 +    locked_print_free_chunks(gclog_or_tty);
  83.726    }
  83.727  
  83.728    return chunk;
  83.729  }
  83.730  
  83.731 -void ChunkManager::print_on(outputStream* out) {
  83.732 +void ChunkManager::print_on(outputStream* out) const {
  83.733    if (PrintFLSStatistics != 0) {
  83.734 -    humongous_dictionary()->report_statistics();
  83.735 +    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
  83.736    }
  83.737  }
  83.738  
  83.739 @@ -1944,8 +1930,8 @@
  83.740      }
  83.741    }
  83.742  
  83.743 -  vs_list()->chunk_manager()->locked_print_free_chunks(st);
  83.744 -  vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
  83.745 +  chunk_manager()->locked_print_free_chunks(st);
  83.746 +  chunk_manager()->locked_print_sum_free_chunks(st);
  83.747  }
  83.748  
  83.749  size_t SpaceManager::calc_chunk_size(size_t word_size) {
  83.750 @@ -2049,9 +2035,7 @@
  83.751  }
  83.752  
  83.753  SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
  83.754 -                           Mutex* lock,
  83.755 -                           VirtualSpaceList* vs_list) :
  83.756 -  _vs_list(vs_list),
  83.757 +                           Mutex* lock) :
  83.758    _mdtype(mdtype),
  83.759    _allocated_blocks_words(0),
  83.760    _allocated_chunks_words(0),
  83.761 @@ -2137,9 +2121,7 @@
  83.762    MutexLockerEx fcl(SpaceManager::expand_lock(),
  83.763                      Mutex::_no_safepoint_check_flag);
  83.764  
  83.765 -  ChunkManager* chunk_manager = vs_list()->chunk_manager();
  83.766 -
  83.767 -  chunk_manager->slow_locked_verify();
  83.768 +  chunk_manager()->slow_locked_verify();
  83.769  
  83.770    dec_total_from_size_metrics();
  83.771  
  83.772 @@ -2153,8 +2135,8 @@
  83.773  
  83.774    // Have to update before the chunks_in_use lists are emptied
  83.775    // below.
  83.776 -  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
  83.777 -                                       sum_count_in_chunks_in_use());
  83.778 +  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
  83.779 +                                         sum_count_in_chunks_in_use());
  83.780  
  83.781    // Add all the chunks in use by this space manager
  83.782    // to the global list of free chunks.
  83.783 @@ -2169,11 +2151,11 @@
  83.784                               chunk_size_name(i));
  83.785      }
  83.786      Metachunk* chunks = chunks_in_use(i);
  83.787 -    chunk_manager->return_chunks(i, chunks);
  83.788 +    chunk_manager()->return_chunks(i, chunks);
  83.789      set_chunks_in_use(i, NULL);
  83.790      if (TraceMetadataChunkAllocation && Verbose) {
  83.791        gclog_or_tty->print_cr("updated freelist count %d %s",
  83.792 -                             chunk_manager->free_chunks(i)->count(),
  83.793 +                             chunk_manager()->free_chunks(i)->count(),
  83.794                               chunk_size_name(i));
  83.795      }
  83.796      assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
  83.797 @@ -2210,16 +2192,16 @@
  83.798                     humongous_chunks->word_size(), HumongousChunkGranularity));
  83.799      Metachunk* next_humongous_chunks = humongous_chunks->next();
  83.800      humongous_chunks->container()->dec_container_count();
  83.801 -    chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
  83.802 +    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
  83.803      humongous_chunks = next_humongous_chunks;
  83.804    }
  83.805    if (TraceMetadataChunkAllocation && Verbose) {
  83.806      gclog_or_tty->print_cr("");
  83.807      gclog_or_tty->print_cr("updated dictionary count %d %s",
  83.808 -                     chunk_manager->humongous_dictionary()->total_count(),
  83.809 +                     chunk_manager()->humongous_dictionary()->total_count(),
  83.810                       chunk_size_name(HumongousIndex));
  83.811    }
  83.812 -  chunk_manager->slow_locked_verify();
  83.813 +  chunk_manager()->slow_locked_verify();
  83.814  }
  83.815  
  83.816  const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  83.817 @@ -2278,6 +2260,7 @@
  83.818    ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
  83.819  
  83.820    if (index != HumongousIndex) {
  83.821 +    retire_current_chunk();
  83.822      set_current_chunk(new_chunk);
  83.823      new_chunk->set_next(chunks_in_use(index));
  83.824      set_chunks_in_use(index, new_chunk);
  83.825 @@ -2307,23 +2290,35 @@
  83.826      gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
  83.827                          sum_count_in_chunks_in_use());
  83.828      new_chunk->print_on(gclog_or_tty);
  83.829 -    if (vs_list() != NULL) {
  83.830 -      vs_list()->chunk_manager()->locked_print_free_chunks(tty);
  83.831 +    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  83.832 +  }
  83.833 +}
  83.834 +
  83.835 +void SpaceManager::retire_current_chunk() {
  83.836 +  if (current_chunk() != NULL) {
  83.837 +    size_t remaining_words = current_chunk()->free_word_size();
  83.838 +    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
  83.839 +      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
  83.840 +      inc_used_metrics(remaining_words);
  83.841      }
  83.842    }
  83.843  }
  83.844  
  83.845  Metachunk* SpaceManager::get_new_chunk(size_t word_size,
  83.846                                         size_t grow_chunks_by_words) {
  83.847 -
  83.848 -  Metachunk* next = vs_list()->get_new_chunk(word_size,
  83.849 -                                             grow_chunks_by_words,
  83.850 -                                             medium_chunk_bunch());
  83.851 -
  83.852 -  if (TraceMetadataHumongousAllocation &&
  83.853 +  // Get a chunk from the chunk freelist
  83.854 +  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  83.855 +
  83.856 +  if (next == NULL) {
  83.857 +    next = vs_list()->get_new_chunk(word_size,
  83.858 +                                    grow_chunks_by_words,
  83.859 +                                    medium_chunk_bunch());
  83.860 +  }
  83.861 +
  83.862 +  if (TraceMetadataHumongousAllocation && next != NULL &&
  83.863        SpaceManager::is_humongous(next->word_size())) {
  83.864 -    gclog_or_tty->print_cr("  new humongous chunk word size " PTR_FORMAT,
  83.865 -                           next->word_size());
  83.866 +    gclog_or_tty->print_cr("  new humongous chunk word size "
  83.867 +                           PTR_FORMAT, next->word_size());
  83.868    }
  83.869  
  83.870    return next;
  83.871 @@ -2441,9 +2436,6 @@
  83.872           curr = curr->next()) {
  83.873        out->print("%d) ", i++);
  83.874        curr->print_on(out);
  83.875 -      if (TraceMetadataChunkAllocation && Verbose) {
  83.876 -        block_freelists()->print_on(out);
  83.877 -      }
  83.878        curr_total += curr->word_size();
  83.879        used += curr->used_word_size();
  83.880        capacity += curr->capacity_word_size();
  83.881 @@ -2451,6 +2443,10 @@
  83.882      }
  83.883    }
  83.884  
  83.885 +  if (TraceMetadataChunkAllocation && Verbose) {
  83.886 +    block_freelists()->print_on(out);
  83.887 +  }
  83.888 +
  83.889    size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
  83.890    // Free space isn't wasted.
  83.891    waste -= free;
  83.892 @@ -2538,13 +2534,13 @@
  83.893    return used * BytesPerWord;
  83.894  }
  83.895  
  83.896 -size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
  83.897 +size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
  83.898    size_t free = 0;
  83.899    ClassLoaderDataGraphMetaspaceIterator iter;
  83.900    while (iter.repeat()) {
  83.901      Metaspace* msp = iter.get_next();
  83.902      if (msp != NULL) {
  83.903 -      free += msp->free_words(mdtype);
  83.904 +      free += msp->free_words_slow(mdtype);
  83.905      }
  83.906    }
  83.907    return free * BytesPerWord;
  83.908 @@ -2567,34 +2563,55 @@
  83.909    return capacity * BytesPerWord;
  83.910  }
  83.911  
  83.912 -size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
  83.913 +size_t MetaspaceAux::capacity_bytes_slow() {
  83.914 +#ifdef PRODUCT
  83.915 +  // Use allocated_capacity_bytes() in PRODUCT instead of this function.
  83.916 +  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
  83.917 +#endif
  83.918 +  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
  83.919 +  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
  83.920 +  assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
  83.921 +      err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
  83.922 +        " class_capacity + non_class_capacity " SIZE_FORMAT
  83.923 +        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
  83.924 +        allocated_capacity_bytes(), class_capacity + non_class_capacity,
  83.925 +        class_capacity, non_class_capacity));
  83.926 +
  83.927 +  return class_capacity + non_class_capacity;
  83.928 +}
  83.929 +
  83.930 +size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
  83.931    VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  83.932 -  return list == NULL ? 0 : list->virtual_space_total();
  83.933 +  return list == NULL ? 0 : list->reserved_bytes();
  83.934  }
  83.935  
  83.936 -size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
  83.937 -
  83.938 -size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
  83.939 +size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
  83.940    VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  83.941 -  if (list == NULL) {
  83.942 +  return list == NULL ? 0 : list->committed_bytes();
  83.943 +}
  83.944 +
  83.945 +size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
  83.946 +
  83.947 +size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
  83.948 +  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
  83.949 +  if (chunk_manager == NULL) {
  83.950      return 0;
  83.951    }
  83.952 -  ChunkManager* chunk = list->chunk_manager();
  83.953 -  chunk->slow_verify();
  83.954 -  return chunk->free_chunks_total();
  83.955 +  chunk_manager->slow_verify();
  83.956 +  return chunk_manager->free_chunks_total_words();
  83.957  }
  83.958  
  83.959 -size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
  83.960 -  return free_chunks_total(mdtype) * BytesPerWord;
  83.961 +size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
  83.962 +  return free_chunks_total_words(mdtype) * BytesPerWord;
  83.963  }
  83.964  
  83.965 -size_t MetaspaceAux::free_chunks_total() {
  83.966 -  return free_chunks_total(Metaspace::ClassType) +
  83.967 -         free_chunks_total(Metaspace::NonClassType);
  83.968 +size_t MetaspaceAux::free_chunks_total_words() {
  83.969 +  return free_chunks_total_words(Metaspace::ClassType) +
  83.970 +         free_chunks_total_words(Metaspace::NonClassType);
  83.971  }
  83.972  
  83.973 -size_t MetaspaceAux::free_chunks_total_in_bytes() {
  83.974 -  return free_chunks_total() * BytesPerWord;
  83.975 +size_t MetaspaceAux::free_chunks_total_bytes() {
  83.976 +  return free_chunks_total_words() * BytesPerWord;
  83.977  }
  83.978  
  83.979  void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
  83.980 @@ -2605,14 +2622,14 @@
  83.981                          "("  SIZE_FORMAT ")",
  83.982                          prev_metadata_used,
  83.983                          allocated_used_bytes(),
  83.984 -                        reserved_in_bytes());
  83.985 +                        reserved_bytes());
  83.986    } else {
  83.987      gclog_or_tty->print(" "  SIZE_FORMAT "K"
  83.988                          "->" SIZE_FORMAT "K"
  83.989                          "("  SIZE_FORMAT "K)",
  83.990 -                        prev_metadata_used / K,
  83.991 -                        allocated_used_bytes() / K,
  83.992 -                        reserved_in_bytes()/ K);
  83.993 +                        prev_metadata_used/K,
  83.994 +                        allocated_used_bytes()/K,
  83.995 +                        reserved_bytes()/K);
  83.996    }
  83.997  
  83.998    gclog_or_tty->print("]");
  83.999 @@ -2625,14 +2642,14 @@
 83.1000    out->print_cr(" Metaspace total "
 83.1001                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
 83.1002                  " reserved " SIZE_FORMAT "K",
 83.1003 -                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
 83.1004 +                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
 83.1005  
 83.1006    out->print_cr("  data space     "
 83.1007                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
 83.1008                  " reserved " SIZE_FORMAT "K",
 83.1009                  allocated_capacity_bytes(nct)/K,
 83.1010                  allocated_used_bytes(nct)/K,
 83.1011 -                reserved_in_bytes(nct)/K);
 83.1012 +                reserved_bytes(nct)/K);
 83.1013    if (Metaspace::using_class_space()) {
 83.1014      Metaspace::MetadataType ct = Metaspace::ClassType;
 83.1015      out->print_cr("  class space    "
 83.1016 @@ -2640,17 +2657,17 @@
 83.1017                    " reserved " SIZE_FORMAT "K",
 83.1018                    allocated_capacity_bytes(ct)/K,
 83.1019                    allocated_used_bytes(ct)/K,
 83.1020 -                  reserved_in_bytes(ct)/K);
 83.1021 +                  reserved_bytes(ct)/K);
 83.1022    }
 83.1023  }
 83.1024  
 83.1025  // Print information for class space and data space separately.
 83.1026  // This is almost the same as above.
 83.1027  void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
 83.1028 -  size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
 83.1029 +  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
 83.1030    size_t capacity_bytes = capacity_bytes_slow(mdtype);
 83.1031    size_t used_bytes = used_bytes_slow(mdtype);
 83.1032 -  size_t free_bytes = free_in_bytes(mdtype);
 83.1033 +  size_t free_bytes = free_bytes_slow(mdtype);
 83.1034    size_t used_and_free = used_bytes + free_bytes +
 83.1035                             free_chunks_capacity_bytes;
 83.1036    out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
 83.1037 @@ -2732,9 +2749,9 @@
 83.1038  }
 83.1039  
 83.1040  void MetaspaceAux::verify_free_chunks() {
 83.1041 -  Metaspace::space_list()->chunk_manager()->verify();
 83.1042 +  Metaspace::chunk_manager_metadata()->verify();
 83.1043    if (Metaspace::using_class_space()) {
 83.1044 -    Metaspace::class_space_list()->chunk_manager()->verify();
 83.1045 +    Metaspace::chunk_manager_class()->verify();
 83.1046    }
 83.1047  }
 83.1048  
 83.1049 @@ -2805,6 +2822,9 @@
 83.1050  VirtualSpaceList* Metaspace::_space_list = NULL;
 83.1051  VirtualSpaceList* Metaspace::_class_space_list = NULL;
 83.1052  
 83.1053 +ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
 83.1054 +ChunkManager* Metaspace::_chunk_manager_class = NULL;
 83.1055 +
 83.1056  #define VIRTUALSPACEMULTIPLIER 2
 83.1057  
 83.1058  #ifdef _LP64
 83.1059 @@ -2836,7 +2856,7 @@
 83.1060  // to work with compressed klass pointers.
 83.1061  bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
 83.1062    assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
 83.1063 -  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
 83.1064 +  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
 83.1065    address lower_base = MIN2((address)metaspace_base, cds_base);
 83.1066    address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
 83.1067                                  (address)(metaspace_base + class_metaspace_size()));
 83.1068 @@ -2846,7 +2866,7 @@
 83.1069  // Try to allocate the metaspace at the requested addr.
 83.1070  void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
 83.1071    assert(using_class_space(), "called improperly");
 83.1072 -  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
 83.1073 +  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
 83.1074    assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
 83.1075           "Metaspace size is too big");
 83.1076  
 83.1077 @@ -2869,9 +2889,9 @@
 83.1078  
 83.1079      // If no successful allocation then try to allocate the space anywhere.  If
 83.1080      // that fails then OOM doom.  At this point we cannot try allocating the
 83.1081 -    // metaspace as if UseCompressedKlassPointers is off because too much
 83.1082 -    // initialization has happened that depends on UseCompressedKlassPointers.
 83.1083 -    // So, UseCompressedKlassPointers cannot be turned off at this point.
 83.1084 +    // metaspace as if UseCompressedClassPointers is off because too much
 83.1085 +    // initialization has happened that depends on UseCompressedClassPointers.
 83.1086 +    // So, UseCompressedClassPointers cannot be turned off at this point.
 83.1087      if (!metaspace_rs.is_reserved()) {
 83.1088        metaspace_rs = ReservedSpace(class_metaspace_size(),
 83.1089                                     os::vm_allocation_granularity(), false);
 83.1090 @@ -2904,14 +2924,15 @@
 83.1091    }
 83.1092  }
 83.1093  
 83.1094 -// For UseCompressedKlassPointers the class space is reserved above the top of
 83.1095 +// For UseCompressedClassPointers the class space is reserved above the top of
 83.1096  // the Java heap.  The argument passed in is at the base of the compressed space.
 83.1097  void Metaspace::initialize_class_space(ReservedSpace rs) {
 83.1098    // The reserved space size may be bigger because of alignment, esp with UseLargePages
 83.1099 -  assert(rs.size() >= ClassMetaspaceSize,
 83.1100 -         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
 83.1101 +  assert(rs.size() >= CompressedClassSpaceSize,
 83.1102 +         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
 83.1103    assert(using_class_space(), "Must be using class space");
 83.1104    _class_space_list = new VirtualSpaceList(rs);
 83.1105 +  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
 83.1106  }
 83.1107  
 83.1108  #endif
 83.1109 @@ -2921,7 +2942,7 @@
 83.1110    int max_alignment = os::vm_page_size();
 83.1111    size_t cds_total = 0;
 83.1112  
 83.1113 -  set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
 83.1114 +  set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
 83.1115                                           os::vm_allocation_granularity()));
 83.1116  
 83.1117    MetaspaceShared::set_max_alignment(max_alignment);
 83.1118 @@ -2937,12 +2958,13 @@
 83.1119      // remainder is the misc code and data chunks.
 83.1120      cds_total = FileMapInfo::shared_spaces_size();
 83.1121      _space_list = new VirtualSpaceList(cds_total/wordSize);
 83.1122 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
 83.1123  
 83.1124  #ifdef _LP64
 83.1125      // Set the compressed klass pointer base so that decoding of these pointers works
 83.1126      // properly when creating the shared archive.
 83.1127 -    assert(UseCompressedOops && UseCompressedKlassPointers,
 83.1128 -      "UseCompressedOops and UseCompressedKlassPointers must be set");
 83.1129 +    assert(UseCompressedOops && UseCompressedClassPointers,
 83.1130 +      "UseCompressedOops and UseCompressedClassPointers must be set");
 83.1131      Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
 83.1132      if (TraceMetavirtualspaceAllocation && Verbose) {
 83.1133        gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
 83.1134 @@ -2979,7 +3001,7 @@
 83.1135      }
 83.1136  
 83.1137  #ifdef _LP64
 83.1138 -    // If UseCompressedKlassPointers is set then allocate the metaspace area
 83.1139 +    // If UseCompressedClassPointers is set then allocate the metaspace area
 83.1140      // above the heap and above the CDS area (if it exists).
 83.1141      if (using_class_space()) {
 83.1142        if (UseSharedSpaces) {
 83.1143 @@ -2997,22 +3019,37 @@
 83.1144      // on the medium chunk list.   The next chunk will be small and progress
 83.1145      // from there.  This size calculated by -version.
 83.1146      _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
 83.1147 -                                       (ClassMetaspaceSize/BytesPerWord)*2);
 83.1148 +                                       (CompressedClassSpaceSize/BytesPerWord)*2);
 83.1149      _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
 83.1150      // Arbitrarily set the initial virtual space to a multiple
 83.1151      // of the boot class loader size.
 83.1152      size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
 83.1153      // Initialize the list of virtual spaces.
 83.1154      _space_list = new VirtualSpaceList(word_size);
 83.1155 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
 83.1156    }
 83.1157  }
 83.1158  
 83.1159 +Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
 83.1160 +                                               size_t chunk_word_size,
 83.1161 +                                               size_t chunk_bunch) {
 83.1162 +  // Get a chunk from the chunk freelist
 83.1163 +  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
 83.1164 +  if (chunk != NULL) {
 83.1165 +    return chunk;
 83.1166 +  }
 83.1167 +
 83.1168 +  return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
 83.1169 +}
 83.1170 +
 83.1171  void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
 83.1172  
 83.1173    assert(space_list() != NULL,
 83.1174      "Metadata VirtualSpaceList has not been initialized");
 83.1175 -
 83.1176 -  _vsm = new SpaceManager(NonClassType, lock, space_list());
 83.1177 +  assert(chunk_manager_metadata() != NULL,
 83.1178 +    "Metadata ChunkManager has not been initialized");
 83.1179 +
 83.1180 +  _vsm = new SpaceManager(NonClassType, lock);
 83.1181    if (_vsm == NULL) {
 83.1182      return;
 83.1183    }
 83.1184 @@ -3021,11 +3058,13 @@
 83.1185    vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
 83.1186  
 83.1187    if (using_class_space()) {
 83.1188 -    assert(class_space_list() != NULL,
 83.1189 -      "Class VirtualSpaceList has not been initialized");
 83.1190 +  assert(class_space_list() != NULL,
 83.1191 +    "Class VirtualSpaceList has not been initialized");
 83.1192 +  assert(chunk_manager_class() != NULL,
 83.1193 +    "Class ChunkManager has not been initialized");
 83.1194  
 83.1195      // Allocate SpaceManager for classes.
 83.1196 -    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
 83.1197 +    _class_vsm = new SpaceManager(ClassType, lock);
 83.1198      if (_class_vsm == NULL) {
 83.1199        return;
 83.1200      }
 83.1201 @@ -3034,9 +3073,9 @@
 83.1202    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
 83.1203  
 83.1204    // Allocate chunk for metadata objects
 83.1205 -  Metachunk* new_chunk =
 83.1206 -     space_list()->get_initialization_chunk(word_size,
 83.1207 -                                            vsm()->medium_chunk_bunch());
 83.1208 +  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
 83.1209 +                                                  word_size,
 83.1210 +                                                  vsm()->medium_chunk_bunch());
 83.1211    assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
 83.1212    if (new_chunk != NULL) {
 83.1213      // Add to this manager's list of chunks in use and current_chunk().
 83.1214 @@ -3045,9 +3084,9 @@
 83.1215  
 83.1216    // Allocate chunk for class metadata objects
 83.1217    if (using_class_space()) {
 83.1218 -    Metachunk* class_chunk =
 83.1219 -       class_space_list()->get_initialization_chunk(class_word_size,
 83.1220 -                                                    class_vsm()->medium_chunk_bunch());
 83.1221 +    Metachunk* class_chunk = get_initialization_chunk(ClassType,
 83.1222 +                                                      class_word_size,
 83.1223 +                                                      class_vsm()->medium_chunk_bunch());
 83.1224      if (class_chunk != NULL) {
 83.1225        class_vsm()->add_chunk(class_chunk, true);
 83.1226      }
 83.1227 @@ -3064,7 +3103,7 @@
 83.1228  
 83.1229  MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
 83.1230    // DumpSharedSpaces doesn't use class metadata area (yet)
 83.1231 -  // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
 83.1232 +  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
 83.1233    if (mdtype == ClassType && using_class_space()) {
 83.1234      return  class_vsm()->allocate(word_size);
 83.1235    } else {
 83.1236 @@ -3103,7 +3142,7 @@
 83.1237    }
 83.1238  }
 83.1239  
 83.1240 -size_t Metaspace::free_words(MetadataType mdtype) const {
 83.1241 +size_t Metaspace::free_words_slow(MetadataType mdtype) const {
 83.1242    if (mdtype == ClassType) {
 83.1243      return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
 83.1244    } else {
 83.1245 @@ -3213,7 +3252,7 @@
 83.1246          MetaspaceAux::dump(gclog_or_tty);
 83.1247        }
 83.1248        // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 83.1249 -      const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
 83.1250 +      const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
 83.1251                                                           "Metadata space";
 83.1252        report_java_out_of_memory(space_string);
 83.1253  
 83.1254 @@ -3264,12 +3303,16 @@
 83.1255    }
 83.1256  }
 83.1257  
 83.1258 +void Metaspace::purge(MetadataType mdtype) {
 83.1259 +  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
 83.1260 +}
 83.1261 +
 83.1262  void Metaspace::purge() {
 83.1263    MutexLockerEx cl(SpaceManager::expand_lock(),
 83.1264                     Mutex::_no_safepoint_check_flag);
 83.1265 -  space_list()->purge();
 83.1266 +  purge(NonClassType);
 83.1267    if (using_class_space()) {
 83.1268 -    class_space_list()->purge();
 83.1269 +    purge(ClassType);
 83.1270    }
 83.1271  }
 83.1272  
 83.1273 @@ -3311,3 +3354,70 @@
 83.1274      class_vsm()->dump(out);
 83.1275    }
 83.1276  }
 83.1277 +
 83.1278 +/////////////// Unit tests ///////////////
 83.1279 +
 83.1280 +#ifndef PRODUCT
 83.1281 +
 83.1282 +class TestMetaspaceAuxTest : AllStatic {
 83.1283 + public:
 83.1284 +  static void test_reserved() {
 83.1285 +    size_t reserved = MetaspaceAux::reserved_bytes();
 83.1286 +
 83.1287 +    assert(reserved > 0, "assert");
 83.1288 +
 83.1289 +    size_t committed  = MetaspaceAux::committed_bytes();
 83.1290 +    assert(committed <= reserved, "assert");
 83.1291 +
 83.1292 +    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
 83.1293 +    assert(reserved_metadata > 0, "assert");
 83.1294 +    assert(reserved_metadata <= reserved, "assert");
 83.1295 +
 83.1296 +    if (UseCompressedClassPointers) {
 83.1297 +      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
 83.1298 +      assert(reserved_class > 0, "assert");
 83.1299 +      assert(reserved_class < reserved, "assert");
 83.1300 +    }
 83.1301 +  }
 83.1302 +
 83.1303 +  static void test_committed() {
 83.1304 +    size_t committed = MetaspaceAux::committed_bytes();
 83.1305 +
 83.1306 +    assert(committed > 0, "assert");
 83.1307 +
 83.1308 +    size_t reserved  = MetaspaceAux::reserved_bytes();
 83.1309 +    assert(committed <= reserved, "assert");
 83.1310 +
 83.1311 +    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
 83.1312 +    assert(committed_metadata > 0, "assert");
 83.1313 +    assert(committed_metadata <= committed, "assert");
 83.1314 +
 83.1315 +    if (UseCompressedClassPointers) {
 83.1316 +      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
 83.1317 +      assert(committed_class > 0, "assert");
 83.1318 +      assert(committed_class < committed, "assert");
 83.1319 +    }
 83.1320 +  }
 83.1321 +
 83.1322 +  static void test_virtual_space_list_large_chunk() {
 83.1323 +    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
 83.1324 +    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
 83.1325 +    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
 83.1326 +    // vm_allocation_granularity aligned on Windows.
 83.1327 +    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
 83.1328 +    large_size += (os::vm_page_size()/BytesPerWord);
 83.1329 +    vs_list->get_new_chunk(large_size, large_size, 0);
 83.1330 +  }
 83.1331 +
 83.1332 +  static void test() {
 83.1333 +    test_reserved();
 83.1334 +    test_committed();
 83.1335 +    test_virtual_space_list_large_chunk();
 83.1336 +  }
 83.1337 +};
 83.1338 +
 83.1339 +void TestMetaspaceAux_test() {
 83.1340 +  TestMetaspaceAuxTest::test();
 83.1341 +}
 83.1342 +
 83.1343 +#endif
    84.1 --- a/src/share/vm/memory/metaspace.hpp	Fri Sep 27 13:49:57 2013 -0400
    84.2 +++ b/src/share/vm/memory/metaspace.hpp	Fri Sep 27 13:53:43 2013 -0400
    84.3 @@ -56,12 +56,15 @@
    84.4  //                       +-------------------+
    84.5  //
    84.6  
    84.7 +class ChunkManager;
    84.8  class ClassLoaderData;
    84.9  class Metablock;
   84.10 +class Metachunk;
   84.11  class MetaWord;
   84.12  class Mutex;
   84.13  class outputStream;
   84.14  class SpaceManager;
   84.15 +class VirtualSpaceList;
   84.16  
   84.17  // Metaspaces each have a  SpaceManager and allocations
   84.18  // are done by the SpaceManager.  Allocations are done
   84.19 @@ -76,8 +79,6 @@
   84.20  // allocate() method returns a block for use as a
   84.21  // quantum of metadata.
   84.22  
   84.23 -class VirtualSpaceList;
   84.24 -
   84.25  class Metaspace : public CHeapObj<mtClass> {
   84.26    friend class VMStructs;
   84.27    friend class SpaceManager;
   84.28 @@ -102,6 +103,10 @@
   84.29   private:
   84.30    void initialize(Mutex* lock, MetaspaceType type);
   84.31  
   84.32 +  Metachunk* get_initialization_chunk(MetadataType mdtype,
   84.33 +                                      size_t chunk_word_size,
   84.34 +                                      size_t chunk_bunch);
   84.35 +
   84.36    // Align up the word size to the allocation word size
   84.37    static size_t align_word_size_up(size_t);
   84.38  
   84.39 @@ -134,6 +139,10 @@
   84.40    static VirtualSpaceList* _space_list;
   84.41    static VirtualSpaceList* _class_space_list;
   84.42  
   84.43 +  static ChunkManager* _chunk_manager_metadata;
   84.44 +  static ChunkManager* _chunk_manager_class;
   84.45 +
   84.46 + public:
   84.47    static VirtualSpaceList* space_list()       { return _space_list; }
   84.48    static VirtualSpaceList* class_space_list() { return _class_space_list; }
   84.49    static VirtualSpaceList* get_space_list(MetadataType mdtype) {
   84.50 @@ -141,6 +150,14 @@
   84.51      return mdtype == ClassType ? class_space_list() : space_list();
   84.52    }
   84.53  
   84.54 +  static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
   84.55 +  static ChunkManager* chunk_manager_class()    { return _chunk_manager_class; }
   84.56 +  static ChunkManager* get_chunk_manager(MetadataType mdtype) {
   84.57 +    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
   84.58 +    return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
   84.59 +  }
   84.60 +
   84.61 + private:
   84.62    // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
   84.63    // maintain a single list for now.
   84.64    void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
   84.65 @@ -182,9 +199,8 @@
   84.66  
   84.67    char*  bottom() const;
   84.68    size_t used_words_slow(MetadataType mdtype) const;
   84.69 -  size_t free_words(MetadataType mdtype) const;
   84.70 +  size_t free_words_slow(MetadataType mdtype) const;
   84.71    size_t capacity_words_slow(MetadataType mdtype) const;
   84.72 -  size_t waste_words(MetadataType mdtype) const;
   84.73  
   84.74    size_t used_bytes_slow(MetadataType mdtype) const;
   84.75    size_t capacity_bytes_slow(MetadataType mdtype) const;
   84.76 @@ -200,6 +216,7 @@
   84.77    void dump(outputStream* const out) const;
   84.78  
   84.79    // Free empty virtualspaces
   84.80 +  static void purge(MetadataType mdtype);
   84.81    static void purge();
   84.82  
   84.83    void print_on(outputStream* st) const;
   84.84 @@ -213,27 +230,22 @@
   84.85  
   84.86    void iterate(AllocRecordClosure *closure);
   84.87  
   84.88 -  // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
   84.89 +  // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
   84.90    static bool using_class_space() {
   84.91 -    return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
   84.92 +    return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
   84.93    }
   84.94  
   84.95  };
   84.96  
   84.97  class MetaspaceAux : AllStatic {
   84.98 -  static size_t free_chunks_total(Metaspace::MetadataType mdtype);
   84.99 -
  84.100 - public:
  84.101 -  // Statistics for class space and data space in metaspace.
  84.102 +  static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
  84.103  
  84.104    // These methods iterate over the classloader data graph
  84.105    // for the given Metaspace type.  These are slow.
  84.106    static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
  84.107 -  static size_t free_in_bytes(Metaspace::MetadataType mdtype);
  84.108 +  static size_t free_bytes_slow(Metaspace::MetadataType mdtype);
  84.109    static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
  84.110 -
  84.111 -  // Iterates over the virtual space list.
  84.112 -  static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
  84.113 +  static size_t capacity_bytes_slow();
  84.114  
  84.115    // Running sum of space in all Metachunks that has been
  84.116    // allocated to a Metaspace.  This is used instead of
  84.117 @@ -263,17 +275,16 @@
  84.118    }
  84.119  
  84.120    // Used by MetaspaceCounters
  84.121 -  static size_t free_chunks_total();
  84.122 -  static size_t free_chunks_total_in_bytes();
  84.123 -  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
  84.124 +  static size_t free_chunks_total_words();
  84.125 +  static size_t free_chunks_total_bytes();
  84.126 +  static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
  84.127  
  84.128    static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
  84.129      return _allocated_capacity_words[mdtype];
  84.130    }
  84.131    static size_t allocated_capacity_words() {
  84.132 -    return _allocated_capacity_words[Metaspace::NonClassType] +
  84.133 -           (Metaspace::using_class_space() ?
  84.134 -           _allocated_capacity_words[Metaspace::ClassType] : 0);
  84.135 +    return allocated_capacity_words(Metaspace::NonClassType) +
  84.136 +           allocated_capacity_words(Metaspace::ClassType);
  84.137    }
  84.138    static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
  84.139      return allocated_capacity_words(mdtype) * BytesPerWord;
  84.140 @@ -286,9 +297,8 @@
  84.141      return _allocated_used_words[mdtype];
  84.142    }
  84.143    static size_t allocated_used_words() {
  84.144 -    return _allocated_used_words[Metaspace::NonClassType] +
  84.145 -           (Metaspace::using_class_space() ?
  84.146 -           _allocated_used_words[Metaspace::ClassType] : 0);
  84.147 +    return allocated_used_words(Metaspace::NonClassType) +
  84.148 +           allocated_used_words(Metaspace::ClassType);
  84.149    }
  84.150    static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
  84.151      return allocated_used_words(mdtype) * BytesPerWord;
  84.152 @@ -300,31 +310,22 @@
  84.153    static size_t free_bytes();
  84.154    static size_t free_bytes(Metaspace::MetadataType mdtype);
  84.155  
  84.156 -  // Total capacity in all Metaspaces
  84.157 -  static size_t capacity_bytes_slow() {
  84.158 -#ifdef PRODUCT
  84.159 -    // Use allocated_capacity_bytes() in PRODUCT instead of this function.
  84.160 -    guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
  84.161 -#endif
  84.162 -    size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
  84.163 -    size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
  84.164 -    assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
  84.165 -           err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
  84.166 -             " class_capacity + non_class_capacity " SIZE_FORMAT
  84.167 -             " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
  84.168 -             allocated_capacity_bytes(), class_capacity + non_class_capacity,
  84.169 -             class_capacity, non_class_capacity));
  84.170 -
  84.171 -    return class_capacity + non_class_capacity;
  84.172 +  static size_t reserved_bytes(Metaspace::MetadataType mdtype);
  84.173 +  static size_t reserved_bytes() {
  84.174 +    return reserved_bytes(Metaspace::ClassType) +
  84.175 +           reserved_bytes(Metaspace::NonClassType);
  84.176    }
  84.177  
  84.178 -  // Total space reserved in all Metaspaces
  84.179 -  static size_t reserved_in_bytes() {
  84.180 -    return reserved_in_bytes(Metaspace::ClassType) +
  84.181 -           reserved_in_bytes(Metaspace::NonClassType);
  84.182 +  static size_t committed_bytes(Metaspace::MetadataType mdtype);
  84.183 +  static size_t committed_bytes() {
  84.184 +    return committed_bytes(Metaspace::ClassType) +
  84.185 +           committed_bytes(Metaspace::NonClassType);
  84.186    }
  84.187  
  84.188 -  static size_t min_chunk_size();
  84.189 +  static size_t min_chunk_size_words();
  84.190 +  static size_t min_chunk_size_bytes() {
  84.191 +    return min_chunk_size_words() * BytesPerWord;
  84.192 +  }
  84.193  
  84.194    // Print change in used metadata.
  84.195    static void print_metaspace_change(size_t prev_metadata_used);
    85.1 --- a/src/share/vm/memory/metaspaceCounters.cpp	Fri Sep 27 13:49:57 2013 -0400
    85.2 +++ b/src/share/vm/memory/metaspaceCounters.cpp	Fri Sep 27 13:53:43 2013 -0400
    85.3 @@ -65,26 +65,25 @@
    85.4  
    85.5  MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
    85.6  
    85.7 -size_t MetaspaceCounters::calculate_capacity() {
    85.8 -  // The total capacity is the sum of
    85.9 -  //   1) capacity of Metachunks in use by all Metaspaces
   85.10 -  //   2) unused space at the end of each Metachunk
   85.11 -  //   3) space in the freelist
   85.12 -  size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
   85.13 -    + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
   85.14 -  return total_capacity;
   85.15 +size_t MetaspaceCounters::used() {
   85.16 +  return MetaspaceAux::allocated_used_bytes();
   85.17 +}
   85.18 +
   85.19 +size_t MetaspaceCounters::capacity() {
   85.20 +  return MetaspaceAux::committed_bytes();
   85.21 +}
   85.22 +
   85.23 +size_t MetaspaceCounters::max_capacity() {
   85.24 +  return MetaspaceAux::reserved_bytes();
   85.25  }
   85.26  
   85.27  void MetaspaceCounters::initialize_performance_counters() {
   85.28    if (UsePerfData) {
   85.29      assert(_perf_counters == NULL, "Should only be initialized once");
   85.30  
   85.31 -    size_t min_capacity = MetaspaceAux::min_chunk_size();
   85.32 -    size_t capacity = calculate_capacity();
   85.33 -    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
   85.34 -    size_t used = MetaspaceAux::allocated_used_bytes();
   85.35 -
   85.36 -    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
   85.37 +    size_t min_capacity = 0;
   85.38 +    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity,
   85.39 +                                               capacity(), max_capacity(), used());
   85.40    }
   85.41  }
   85.42  
   85.43 @@ -92,31 +91,29 @@
   85.44    if (UsePerfData) {
   85.45      assert(_perf_counters != NULL, "Should be initialized");
   85.46  
   85.47 -    size_t capacity = calculate_capacity();
   85.48 -    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
   85.49 -    size_t used = MetaspaceAux::allocated_used_bytes();
   85.50 -
   85.51 -    _perf_counters->update(capacity, max_capacity, used);
   85.52 +    _perf_counters->update(capacity(), max_capacity(), used());
   85.53    }
   85.54  }
   85.55  
   85.56  MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
   85.57  
   85.58 -size_t CompressedClassSpaceCounters::calculate_capacity() {
   85.59 -    return MetaspaceAux::allocated_capacity_bytes(_class_type) +
   85.60 -           MetaspaceAux::free_bytes(_class_type) +
   85.61 -           MetaspaceAux::free_chunks_total_in_bytes(_class_type);
   85.62 +size_t CompressedClassSpaceCounters::used() {
   85.63 +  return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
   85.64 +}
   85.65 +
   85.66 +size_t CompressedClassSpaceCounters::capacity() {
   85.67 +  return MetaspaceAux::committed_bytes(Metaspace::ClassType);
   85.68 +}
   85.69 +
   85.70 +size_t CompressedClassSpaceCounters::max_capacity() {
   85.71 +  return MetaspaceAux::reserved_bytes(Metaspace::ClassType);
   85.72  }
   85.73  
   85.74  void CompressedClassSpaceCounters::update_performance_counters() {
   85.75 -  if (UsePerfData && UseCompressedKlassPointers) {
   85.76 +  if (UsePerfData && UseCompressedClassPointers) {
   85.77      assert(_perf_counters != NULL, "Should be initialized");
   85.78  
   85.79 -    size_t capacity = calculate_capacity();
   85.80 -    size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
   85.81 -    size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
   85.82 -
   85.83 -    _perf_counters->update(capacity, max_capacity, used);
   85.84 +    _perf_counters->update(capacity(), max_capacity(), used());
   85.85    }
   85.86  }
   85.87  
   85.88 @@ -125,13 +122,10 @@
   85.89      assert(_perf_counters == NULL, "Should only be initialized once");
   85.90      const char* ns = "compressedclassspace";
   85.91  
   85.92 -    if (UseCompressedKlassPointers) {
   85.93 -      size_t min_capacity = MetaspaceAux::min_chunk_size();
   85.94 -      size_t capacity = calculate_capacity();
   85.95 -      size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
   85.96 -      size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
   85.97 -
   85.98 -      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
   85.99 +    if (UseCompressedClassPointers) {
  85.100 +      size_t min_capacity = 0;
  85.101 +      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity(),
  85.102 +                                                 max_capacity(), used());
  85.103      } else {
  85.104        _perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
  85.105      }
    86.1 --- a/src/share/vm/memory/metaspaceCounters.hpp	Fri Sep 27 13:49:57 2013 -0400
    86.2 +++ b/src/share/vm/memory/metaspaceCounters.hpp	Fri Sep 27 13:53:43 2013 -0400
    86.3 @@ -25,13 +25,15 @@
    86.4  #ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
    86.5  #define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
    86.6  
    86.7 -#include "memory/metaspace.hpp"
    86.8 +#include "memory/allocation.hpp"
    86.9  
   86.10  class MetaspacePerfCounters;
   86.11  
   86.12  class MetaspaceCounters: public AllStatic {
   86.13    static MetaspacePerfCounters* _perf_counters;
   86.14 -  static size_t calculate_capacity();
   86.15 +  static size_t used();
   86.16 +  static size_t capacity();
   86.17 +  static size_t max_capacity();
   86.18  
   86.19   public:
   86.20    static void initialize_performance_counters();
   86.21 @@ -40,8 +42,9 @@
   86.22  
   86.23  class CompressedClassSpaceCounters: public AllStatic {
   86.24    static MetaspacePerfCounters* _perf_counters;
   86.25 -  static size_t calculate_capacity();
   86.26 -  static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
   86.27 +  static size_t used();
   86.28 +  static size_t capacity();
   86.29 +  static size_t max_capacity();
   86.30  
   86.31   public:
   86.32    static void initialize_performance_counters();
    87.1 --- a/src/share/vm/memory/metaspaceShared.cpp	Fri Sep 27 13:49:57 2013 -0400
    87.2 +++ b/src/share/vm/memory/metaspaceShared.cpp	Fri Sep 27 13:53:43 2013 -0400
    87.3 @@ -103,9 +103,10 @@
    87.4      if (k->oop_is_instance()) {
    87.5        InstanceKlass* ik = InstanceKlass::cast(k);
    87.6        for (int i = 0; i < ik->methods()->length(); i++) {
    87.7 -        ResourceMark rm;
    87.8          Method* m = ik->methods()->at(i);
    87.9 -        (new Fingerprinter(m))->fingerprint();
   87.10 +        Fingerprinter fp(m);
   87.11 +        // The side effect of this call sets method's fingerprint field.
   87.12 +        fp.fingerprint();
   87.13        }
   87.14      }
   87.15    }
    88.1 --- a/src/share/vm/memory/universe.cpp	Fri Sep 27 13:49:57 2013 -0400
    88.2 +++ b/src/share/vm/memory/universe.cpp	Fri Sep 27 13:53:43 2013 -0400
    88.3 @@ -602,7 +602,7 @@
    88.4    }
    88.5  }
    88.6  
    88.7 -static intptr_t non_oop_bits = 0;
    88.8 +intptr_t Universe::_non_oop_bits = 0;
    88.9  
   88.10  void* Universe::non_oop_word() {
   88.11    // Neither the high bits nor the low bits of this value is allowed
   88.12 @@ -616,11 +616,11 @@
   88.13    // Using the OS-supplied non-memory-address word (usually 0 or -1)
   88.14    // will take care of the high bits, however many there are.
   88.15  
   88.16 -  if (non_oop_bits == 0) {
   88.17 -    non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
   88.18 +  if (_non_oop_bits == 0) {
   88.19 +    _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
   88.20    }
   88.21  
   88.22 -  return (void*)non_oop_bits;
   88.23 +  return (void*)_non_oop_bits;
   88.24  }
   88.25  
   88.26  jint universe_init() {
   88.27 @@ -872,13 +872,16 @@
   88.28  
   88.29  // Reserve the Java heap, which is now the same for all GCs.
   88.30  ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
   88.31 +  assert(alignment <= Arguments::conservative_max_heap_alignment(),
   88.32 +      err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
   88.33 +          alignment, Arguments::conservative_max_heap_alignment()));
   88.34    size_t total_reserved = align_size_up(heap_size, alignment);
   88.35    assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
   88.36        "heap size is too big for compressed oops");
   88.37  
   88.38    bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
   88.39    assert(!UseLargePages
   88.40 -      || UseParallelOldGC
   88.41 +      || UseParallelGC
   88.42        || use_large_pages, "Wrong alignment to use large pages");
   88.43  
   88.44    char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
   88.45 @@ -1028,7 +1031,7 @@
   88.46  
   88.47      msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
   88.48      java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
   88.49 -    msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false);
   88.50 +    msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
   88.51      java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
   88.52  
   88.53      msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
    89.1 --- a/src/share/vm/memory/universe.hpp	Fri Sep 27 13:49:57 2013 -0400
    89.2 +++ b/src/share/vm/memory/universe.hpp	Fri Sep 27 13:53:43 2013 -0400
    89.3 @@ -179,9 +179,11 @@
    89.4    // The particular choice of collected heap.
    89.5    static CollectedHeap* _collectedHeap;
    89.6  
    89.7 +  static intptr_t _non_oop_bits;
    89.8 +
    89.9    // For UseCompressedOops.
   89.10    static struct NarrowPtrStruct _narrow_oop;
   89.11 -  // For UseCompressedKlassPointers.
   89.12 +  // For UseCompressedClassPointers.
   89.13    static struct NarrowPtrStruct _narrow_klass;
   89.14    static address _narrow_ptrs_base;
   89.15  
   89.16 @@ -229,7 +231,7 @@
   89.17      _narrow_oop._base    = base;
   89.18    }
   89.19    static void     set_narrow_klass_base(address base) {
   89.20 -    assert(UseCompressedKlassPointers, "no compressed klass ptrs?");
   89.21 +    assert(UseCompressedClassPointers, "no compressed klass ptrs?");
   89.22      _narrow_klass._base   = base;
   89.23    }
   89.24    static void     set_narrow_oop_use_implicit_null_checks(bool use) {
   89.25 @@ -353,7 +355,7 @@
   89.26    static int      narrow_oop_shift()                      { return  _narrow_oop._shift; }
   89.27    static bool     narrow_oop_use_implicit_null_checks()   { return  _narrow_oop._use_implicit_null_checks; }
   89.28  
   89.29 -  // For UseCompressedKlassPointers
   89.30 +  // For UseCompressedClassPointers
   89.31    static address  narrow_klass_base()                     { return  _narrow_klass._base; }
   89.32    static bool  is_narrow_klass_base(void* addr)           { return (narrow_klass_base() == (address)addr); }
   89.33    static int      narrow_klass_shift()                    { return  _narrow_klass._shift; }
    90.1 --- a/src/share/vm/oops/arrayOop.hpp	Fri Sep 27 13:49:57 2013 -0400
    90.2 +++ b/src/share/vm/oops/arrayOop.hpp	Fri Sep 27 13:53:43 2013 -0400
    90.3 @@ -65,7 +65,7 @@
    90.4    // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
    90.5    // it occupies the second half of the _klass field in oopDesc.
    90.6    static int length_offset_in_bytes() {
    90.7 -    return UseCompressedKlassPointers ? klass_gap_offset_in_bytes() :
    90.8 +    return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
    90.9                                 sizeof(arrayOopDesc);
   90.10    }
   90.11  
    91.1 --- a/src/share/vm/oops/constantPool.cpp	Fri Sep 27 13:49:57 2013 -0400
    91.2 +++ b/src/share/vm/oops/constantPool.cpp	Fri Sep 27 13:53:43 2013 -0400
    91.3 @@ -108,16 +108,16 @@
    91.4  void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
    91.5                                                    intStack reference_map,
    91.6                                                    int constant_pool_map_length,
    91.7 -                                                   TRAPS) {
    91.8 +                                                  TRAPS) {
    91.9    // Initialized the resolved object cache.
   91.10    int map_length = reference_map.length();
   91.11    if (map_length > 0) {
   91.12      // Only need mapping back to constant pool entries.  The map isn't used for
   91.13 -    // invokedynamic resolved_reference entries.  The constant pool cache index
   91.14 -    // has the mapping back to both the constant pool and to the resolved
   91.15 -    // reference index.
   91.16 +    // invokedynamic resolved_reference entries.  For invokedynamic entries,
   91.17 +    // the constant pool cache index has the mapping back to both the constant
   91.18 +    // pool and to the resolved reference index.
   91.19      if (constant_pool_map_length > 0) {
   91.20 -      Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, map_length, CHECK);
   91.21 +      Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, constant_pool_map_length, CHECK);
   91.22  
   91.23        for (int i = 0; i < constant_pool_map_length; i++) {
   91.24          int x = reference_map.at(i);
   91.25 @@ -182,16 +182,9 @@
   91.26  
   91.27  int ConstantPool::cp_to_object_index(int cp_index) {
   91.28    // this is harder don't do this so much.
   91.29 -  for (int i = 0; i< reference_map()->length(); i++) {
   91.30 -    if (reference_map()->at(i) == cp_index) return i;
   91.31 -    // Zero entry is divider between constant pool indices for strings,
   91.32 -    // method handles and method types. After that the index is a constant
   91.33 -    // pool cache index for invokedynamic.  Stop when zero (which can never
   91.34 -    // be a constant pool index)
   91.35 -    if (reference_map()->at(i) == 0) break;
   91.36 -  }
   91.37 -  // We might not find the index.
   91.38 -  return _no_index_sentinel;
   91.39 +  int i = reference_map()->find(cp_index);
   91.40 +  // We might not find the index for jsr292 call.
   91.41 +  return (i < 0) ? _no_index_sentinel : i;
   91.42  }
   91.43  
   91.44  Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
   91.45 @@ -396,32 +389,6 @@
   91.46  }
   91.47  
   91.48  
   91.49 -// This is an interface for the compiler that allows accessing non-resolved entries
   91.50 -// in the constant pool - but still performs the validations tests. Must be used
   91.51 -// in a pre-parse of the compiler - to determine what it can do and not do.
   91.52 -// Note: We cannot update the ConstantPool from the vm_thread.
   91.53 -Klass* ConstantPool::klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int index, TRAPS) {
   91.54 -  int which = this_oop->klass_ref_index_at(index);
   91.55 -  CPSlot entry = this_oop->slot_at(which);
   91.56 -  if (entry.is_resolved()) {
   91.57 -    assert(entry.get_klass()->is_klass(), "must be");
   91.58 -    return entry.get_klass();
   91.59 -  } else {
   91.60 -    assert(entry.is_unresolved(), "must be either symbol or klass");
   91.61 -    Symbol*  name  = entry.get_symbol();
   91.62 -    oop loader = this_oop->pool_holder()->class_loader();
   91.63 -    oop protection_domain = this_oop->pool_holder()->protection_domain();
   91.64 -    Handle h_loader(THREAD, loader);
   91.65 -    Handle h_prot  (THREAD, protection_domain);
   91.66 -    KlassHandle k(THREAD, SystemDictionary::find(name, h_loader, h_prot, THREAD));
   91.67 -
   91.68 -    // Do access check for klasses
   91.69 -    if( k.not_null() ) verify_constant_pool_resolve(this_oop, k, CHECK_NULL);
   91.70 -    return k();
   91.71 -  }
   91.72 -}
   91.73 -
   91.74 -
   91.75  Method* ConstantPool::method_at_if_loaded(constantPoolHandle cpool,
   91.76                                                     int which) {
   91.77    if (cpool->cache() == NULL)  return NULL;  // nothing to load yet
   91.78 @@ -866,8 +833,7 @@
   91.79    // If the string has already been interned, this entry will be non-null
   91.80    oop str = this_oop->resolved_references()->obj_at(obj_index);
   91.81    if (str != NULL) return str;
   91.82 -
   91.83 -      Symbol* sym = this_oop->unresolved_string_at(which);
   91.84 +  Symbol* sym = this_oop->unresolved_string_at(which);
   91.85    str = StringTable::intern(sym, CHECK_(NULL));
   91.86    this_oop->string_at_put(which, obj_index, str);
   91.87    assert(java_lang_String::is_instance(str), "must be string");
   91.88 @@ -1645,9 +1611,11 @@
   91.89      case JVM_CONSTANT_UnresolvedClassInError:
   91.90      case JVM_CONSTANT_StringIndex:
   91.91      case JVM_CONSTANT_MethodType:
   91.92 +    case JVM_CONSTANT_MethodTypeInError:
   91.93        return 3;
   91.94  
   91.95      case JVM_CONSTANT_MethodHandle:
   91.96 +    case JVM_CONSTANT_MethodHandleInError:
   91.97        return 4; //tag, ref_kind, ref_index
   91.98  
   91.99      case JVM_CONSTANT_Integer:
  91.100 @@ -1828,8 +1796,8 @@
  91.101        case JVM_CONSTANT_MethodHandle:
  91.102        case JVM_CONSTANT_MethodHandleInError: {
  91.103          *bytes = JVM_CONSTANT_MethodHandle;
  91.104 -        int kind = method_handle_ref_kind_at(idx);
  91.105 -        idx1 = method_handle_index_at(idx);
  91.106 +        int kind = method_handle_ref_kind_at_error_ok(idx);
  91.107 +        idx1 = method_handle_index_at_error_ok(idx);
  91.108          *(bytes+1) = (unsigned char) kind;
  91.109          Bytes::put_Java_u2((address) (bytes+2), idx1);
  91.110          DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1));
  91.111 @@ -1838,7 +1806,7 @@
  91.112        case JVM_CONSTANT_MethodType:
  91.113        case JVM_CONSTANT_MethodTypeInError: {
  91.114          *bytes = JVM_CONSTANT_MethodType;
  91.115 -        idx1 = method_type_index_at(idx);
  91.116 +        idx1 = method_type_index_at_error_ok(idx);
  91.117          Bytes::put_Java_u2((address) (bytes+1), idx1);
  91.118          DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
  91.119          break;
  91.120 @@ -2026,12 +1994,12 @@
  91.121        break;
  91.122      case JVM_CONSTANT_MethodHandle :
  91.123      case JVM_CONSTANT_MethodHandleInError :
  91.124 -      st->print("ref_kind=%d", method_handle_ref_kind_at(index));
  91.125 -      st->print(" ref_index=%d", method_handle_index_at(index));
  91.126 +      st->print("ref_kind=%d", method_handle_ref_kind_at_error_ok(index));
  91.127 +      st->print(" ref_index=%d", method_handle_index_at_error_ok(index));
  91.128        break;
  91.129      case JVM_CONSTANT_MethodType :
  91.130      case JVM_CONSTANT_MethodTypeInError :
  91.131 -      st->print("signature_index=%d", method_type_index_at(index));
  91.132 +      st->print("signature_index=%d", method_type_index_at_error_ok(index));
  91.133        break;
  91.134      case JVM_CONSTANT_InvokeDynamic :
  91.135        {
    92.1 --- a/src/share/vm/oops/constantPool.hpp	Fri Sep 27 13:49:57 2013 -0400
    92.2 +++ b/src/share/vm/oops/constantPool.hpp	Fri Sep 27 13:53:43 2013 -0400
    92.3 @@ -231,7 +231,6 @@
    92.4    static int cache_offset_in_bytes()        { return offset_of(ConstantPool, _cache); }
    92.5    static int pool_holder_offset_in_bytes()  { return offset_of(ConstantPool, _pool_holder); }
    92.6    static int resolved_references_offset_in_bytes() { return offset_of(ConstantPool, _resolved_references); }
    92.7 -  static int reference_map_offset_in_bytes() { return offset_of(ConstantPool, _reference_map); }
    92.8  
    92.9    // Storing constants
   92.10  
   92.11 @@ -475,18 +474,42 @@
   92.12      return *int_at_addr(which);
   92.13    }
   92.14  
   92.15 -  int method_handle_ref_kind_at(int which) {
   92.16 -    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
   92.17 + private:
   92.18 +  int method_handle_ref_kind_at(int which, bool error_ok) {
   92.19 +    assert(tag_at(which).is_method_handle() ||
   92.20 +           (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
   92.21      return extract_low_short_from_int(*int_at_addr(which));  // mask out unwanted ref_index bits
   92.22    }
   92.23 -  int method_handle_index_at(int which) {
   92.24 -    assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
   92.25 +  int method_handle_index_at(int which, bool error_ok) {
   92.26 +    assert(tag_at(which).is_method_handle() ||
   92.27 +           (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
   92.28      return extract_high_short_from_int(*int_at_addr(which));  // shift out unwanted ref_kind bits
   92.29    }
   92.30 -  int method_type_index_at(int which) {
   92.31 -    assert(tag_at(which).is_method_type(), "Corrupted constant pool");
   92.32 +  int method_type_index_at(int which, bool error_ok) {
   92.33 +    assert(tag_at(which).is_method_type() ||
   92.34 +           (error_ok && tag_at(which).is_method_type_in_error()), "Corrupted constant pool");
   92.35      return *int_at_addr(which);
   92.36    }
   92.37 + public:
   92.38 +  int method_handle_ref_kind_at(int which) {
   92.39 +    return method_handle_ref_kind_at(which, false);
   92.40 +  }
   92.41 +  int method_handle_ref_kind_at_error_ok(int which) {
   92.42 +    return method_handle_ref_kind_at(which, true);
   92.43 +  }
   92.44 +  int method_handle_index_at(int which) {
   92.45 +    return method_handle_index_at(which, false);
   92.46 +  }
   92.47 +  int method_handle_index_at_error_ok(int which) {
   92.48 +    return method_handle_index_at(which, true);
   92.49 +  }
   92.50 +  int method_type_index_at(int which) {
   92.51 +    return method_type_index_at(which, false);
   92.52 +  }
   92.53 +  int method_type_index_at_error_ok(int which) {
   92.54 +    return method_type_index_at(which, true);
   92.55 +  }
   92.56 +
   92.57    // Derived queries:
   92.58    Symbol* method_handle_name_ref_at(int which) {
   92.59      int member = method_handle_index_at(which);
   92.60 @@ -730,8 +753,6 @@
   92.61    static oop         method_type_at_if_loaded      (constantPoolHandle this_oop, int which);
   92.62    static Klass*            klass_at_if_loaded      (constantPoolHandle this_oop, int which);
   92.63    static Klass*        klass_ref_at_if_loaded      (constantPoolHandle this_oop, int which);
   92.64 -  // Same as above - but does LinkResolving.
   92.65 -  static Klass*        klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
   92.66  
   92.67    // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
   92.68    // future by other Java code. These take constant pool indices rather than
    93.1 --- a/src/share/vm/oops/cpCache.cpp	Fri Sep 27 13:49:57 2013 -0400
    93.2 +++ b/src/share/vm/oops/cpCache.cpp	Fri Sep 27 13:53:43 2013 -0400
    93.3 @@ -140,9 +140,10 @@
    93.4              err_msg("size must not change: parameter_size=%d, value=%d", parameter_size(), value));
    93.5  }
    93.6  
    93.7 -void ConstantPoolCacheEntry::set_method(Bytecodes::Code invoke_code,
    93.8 -                                        methodHandle method,
    93.9 -                                        int vtable_index) {
   93.10 +void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_code,
   93.11 +                                                       methodHandle method,
   93.12 +                                                       int vtable_index) {
   93.13 +  bool is_vtable_call = (vtable_index >= 0);  // FIXME: split this method on this boolean
   93.14    assert(method->interpreter_entry() != NULL, "should have been set at this point");
   93.15    assert(!method->is_obsolete(),  "attempt to write obsolete method to cpCache");
   93.16  
   93.17 @@ -160,7 +161,8 @@
   93.18        // ...and fall through as if we were handling invokevirtual:
   93.19      case Bytecodes::_invokevirtual:
   93.20        {
   93.21 -        if (method->can_be_statically_bound()) {
   93.22 +        if (!is_vtable_call) {
   93.23 +          assert(method->can_be_statically_bound(), "");
   93.24            // set_f2_as_vfinal_method checks if is_vfinal flag is true.
   93.25            set_method_flags(as_TosState(method->result_type()),
   93.26                             (                             1      << is_vfinal_shift) |
   93.27 @@ -169,6 +171,7 @@
   93.28                             method()->size_of_parameters());
   93.29            set_f2_as_vfinal_method(method());
   93.30          } else {
   93.31 +          assert(!method->can_be_statically_bound(), "");
   93.32            assert(vtable_index >= 0, "valid index");
   93.33            assert(!method->is_final_method(), "sanity");
   93.34            set_method_flags(as_TosState(method->result_type()),
   93.35 @@ -182,6 +185,7 @@
   93.36  
   93.37      case Bytecodes::_invokespecial:
   93.38      case Bytecodes::_invokestatic:
   93.39 +      assert(!is_vtable_call, "");
   93.40        // Note:  Read and preserve the value of the is_vfinal flag on any
   93.41        // invokevirtual bytecode shared with this constant pool cache entry.
   93.42        // It is cheap and safe to consult is_vfinal() at all times.
   93.43 @@ -232,8 +236,22 @@
   93.44    NOT_PRODUCT(verify(tty));
   93.45  }
   93.46  
   93.47 +void ConstantPoolCacheEntry::set_direct_call(Bytecodes::Code invoke_code, methodHandle method) {
   93.48 +  int index = Method::nonvirtual_vtable_index;
   93.49 +  // index < 0; FIXME: inline and customize set_direct_or_vtable_call
   93.50 +  set_direct_or_vtable_call(invoke_code, method, index);
   93.51 +}
   93.52  
   93.53 -void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index) {
   93.54 +void ConstantPoolCacheEntry::set_vtable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
   93.55 +  // either the method is a miranda or its holder should accept the given index
   93.56 +  assert(method->method_holder()->is_interface() || method->method_holder()->verify_vtable_index(index), "");
   93.57 +  // index >= 0; FIXME: inline and customize set_direct_or_vtable_call
   93.58 +  set_direct_or_vtable_call(invoke_code, method, index);
   93.59 +}
   93.60 +
   93.61 +void ConstantPoolCacheEntry::set_itable_call(Bytecodes::Code invoke_code, methodHandle method, int index) {
   93.62 +  assert(method->method_holder()->verify_itable_index(index), "");
   93.63 +  assert(invoke_code == Bytecodes::_invokeinterface, "");
   93.64    InstanceKlass* interf = method->method_holder();
   93.65    assert(interf->is_interface(), "must be an interface");
   93.66    assert(!method->is_final_method(), "interfaces do not have final methods; cannot link to one here");
    94.1 --- a/src/share/vm/oops/cpCache.hpp	Fri Sep 27 13:49:57 2013 -0400
    94.2 +++ b/src/share/vm/oops/cpCache.hpp	Fri Sep 27 13:53:43 2013 -0400
    94.3 @@ -219,15 +219,29 @@
    94.4      Klass*          root_klass                   // needed by the GC to dirty the klass
    94.5    );
    94.6  
    94.7 -  void set_method(                               // sets entry to resolved method entry
    94.8 + private:
    94.9 +  void set_direct_or_vtable_call(
   94.10      Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
   94.11      methodHandle    method,                      // the method/prototype if any (NULL, otherwise)
   94.12      int             vtable_index                 // the vtable index if any, else negative
   94.13    );
   94.14  
   94.15 -  void set_interface_call(
   94.16 -    methodHandle method,                         // Resolved method
   94.17 -    int index                                    // Method index into interface
   94.18 + public:
   94.19 +  void set_direct_call(                          // sets entry to exact concrete method entry
   94.20 +    Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
   94.21 +    methodHandle    method                       // the method to call
   94.22 +  );
   94.23 +
   94.24 +  void set_vtable_call(                          // sets entry to vtable index
   94.25 +    Bytecodes::Code invoke_code,                 // the bytecode used for invoking the method
   94.26 +    methodHandle    method,                      // resolved method which declares the vtable index
   94.27 +    int             vtable_index                 // the vtable index
   94.28 +  );
   94.29 +
   94.30 +  void set_itable_call(
   94.31 +    Bytecodes::Code invoke_code,                 // the bytecode used; must be invokeinterface
   94.32 +    methodHandle method,                         // the resolved interface method
   94.33 +    int itable_index                             // index into itable for the method
   94.34    );
   94.35  
   94.36    void set_method_handle(
    95.1 --- a/src/share/vm/oops/fieldStreams.hpp	Fri Sep 27 13:49:57 2013 -0400
    95.2 +++ b/src/share/vm/oops/fieldStreams.hpp	Fri Sep 27 13:53:43 2013 -0400
    95.3 @@ -1,5 +1,5 @@
    95.4  /*
    95.5 - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    95.6 + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
    95.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    95.8   *
    95.9   * This code is free software; you can redistribute it and/or modify it
   95.10 @@ -27,6 +27,7 @@
   95.11  
   95.12  #include "oops/instanceKlass.hpp"
   95.13  #include "oops/fieldInfo.hpp"
   95.14 +#include "runtime/fieldDescriptor.hpp"
   95.15  
   95.16  // The is the base class for iteration over the fields array
   95.17  // describing the declared fields in the class.  Several subclasses
   95.18 @@ -43,8 +44,10 @@
   95.19    int                 _index;
   95.20    int                 _limit;
   95.21    int                 _generic_signature_slot;
   95.22 +  fieldDescriptor     _fd_buf;
   95.23  
   95.24    FieldInfo* field() const { return FieldInfo::from_field_array(_fields, _index); }
   95.25 +  InstanceKlass* field_holder() const { return _constants->pool_holder(); }
   95.26  
   95.27    int init_generic_signature_start_slot() {
   95.28      int length = _fields->length();
   95.29 @@ -102,6 +105,7 @@
   95.30      _index = 0;
   95.31      _limit = klass->java_fields_count();
   95.32      init_generic_signature_start_slot();
   95.33 +    assert(klass == field_holder(), "");
   95.34    }
   95.35    FieldStreamBase(instanceKlassHandle klass) {
   95.36      _fields = klass->fields();
   95.37 @@ -109,6 +113,7 @@
   95.38      _index = 0;
   95.39      _limit = klass->java_fields_count();
   95.40      init_generic_signature_start_slot();
   95.41 +    assert(klass == field_holder(), "");
   95.42    }
   95.43  
   95.44    // accessors
   95.45 @@ -180,6 +185,12 @@
   95.46      return field()->contended_group();
   95.47    }
   95.48  
   95.49 +  // bridge to a heavier API:
   95.50 +  fieldDescriptor& field_descriptor() const {
   95.51 +    fieldDescriptor& field = const_cast<fieldDescriptor&>(_fd_buf);
   95.52 +    field.reinitialize(field_holder(), _index);
   95.53 +    return field;
   95.54 +  }
   95.55  };
   95.56  
   95.57  // Iterate over only the internal fields
    96.1 --- a/src/share/vm/oops/instanceKlass.cpp	Fri Sep 27 13:49:57 2013 -0400
    96.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Fri Sep 27 13:53:43 2013 -0400
    96.3 @@ -286,7 +286,6 @@
    96.4    init_previous_versions();
    96.5    set_generic_signature_index(0);
    96.6    release_set_methods_jmethod_ids(NULL);
    96.7 -  release_set_methods_cached_itable_indices(NULL);
    96.8    set_annotations(NULL);
    96.9    set_jvmti_cached_class_field_map(NULL);
   96.10    set_initial_method_idnum(0);
   96.11 @@ -1149,7 +1148,7 @@
   96.12      Symbol* f_name = fs.name();
   96.13      Symbol* f_sig  = fs.signature();
   96.14      if (f_name == name && f_sig == sig) {
   96.15 -      fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
   96.16 +      fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
   96.17        return true;
   96.18      }
   96.19    }
   96.20 @@ -1218,7 +1217,7 @@
   96.21  bool InstanceKlass::find_local_field_from_offset(int offset, bool is_static, fieldDescriptor* fd) const {
   96.22    for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
   96.23      if (fs.offset() == offset) {
   96.24 -      fd->initialize(const_cast<InstanceKlass*>(this), fs.index());
   96.25 +      fd->reinitialize(const_cast<InstanceKlass*>(this), fs.index());
   96.26        if (fd->is_static() == is_static) return true;
   96.27      }
   96.28    }
   96.29 @@ -1251,8 +1250,7 @@
   96.30  void InstanceKlass::do_local_static_fields(FieldClosure* cl) {
   96.31    for (JavaFieldStream fs(this); !fs.done(); fs.next()) {
   96.32      if (fs.access_flags().is_static()) {
   96.33 -      fieldDescriptor fd;
   96.34 -      fd.initialize(this, fs.index());
   96.35 +      fieldDescriptor& fd = fs.field_descriptor();
   96.36        cl->do_field(&fd);
   96.37      }
   96.38    }
   96.39 @@ -1268,8 +1266,7 @@
   96.40  void InstanceKlass::do_local_static_fields_impl(instanceKlassHandle this_oop, void f(fieldDescriptor* fd, TRAPS), TRAPS) {
   96.41    for (JavaFieldStream fs(this_oop()); !fs.done(); fs.next()) {
   96.42      if (fs.access_flags().is_static()) {
   96.43 -      fieldDescriptor fd;
   96.44 -      fd.initialize(this_oop(), fs.index());
   96.45 +      fieldDescriptor& fd = fs.field_descriptor();
   96.46        f(&fd, CHECK);
   96.47      }
   96.48    }
   96.49 @@ -1291,7 +1288,7 @@
   96.50    int* fields_sorted = NEW_C_HEAP_ARRAY(int, 2*(length+1), mtClass);
   96.51    int j = 0;
   96.52    for (int i = 0; i < length; i += 1) {
   96.53 -    fd.initialize(this, i);
   96.54 +    fd.reinitialize(this, i);
   96.55      if (!fd.is_static()) {
   96.56        fields_sorted[j + 0] = fd.offset();
   96.57        fields_sorted[j + 1] = i;
   96.58 @@ -1303,7 +1300,7 @@
   96.59      // _sort_Fn is defined in growableArray.hpp.
   96.60      qsort(fields_sorted, length/2, 2*sizeof(int), (_sort_Fn)compare_fields_by_offset);
   96.61      for (int i = 0; i < length; i += 2) {
   96.62 -      fd.initialize(this, fields_sorted[i + 1]);
   96.63 +      fd.reinitialize(this, fields_sorted[i + 1]);
   96.64        assert(!fd.is_static() && fd.offset() == fields_sorted[i], "only nonstatic fields");
   96.65        cl->do_field(&fd);
   96.66      }
   96.67 @@ -1686,87 +1683,6 @@
   96.68  }
   96.69  
   96.70  
   96.71 -// Cache an itable index
   96.72 -void InstanceKlass::set_cached_itable_index(size_t idnum, int index) {
   96.73 -  int* indices = methods_cached_itable_indices_acquire();
   96.74 -  int* to_dealloc_indices = NULL;
   96.75 -
   96.76 -  // We use a double-check locking idiom here because this cache is
   96.77 -  // performance sensitive. In the normal system, this cache only
   96.78 -  // transitions from NULL to non-NULL which is safe because we use
   96.79 -  // release_set_methods_cached_itable_indices() to advertise the
   96.80 -  // new cache. A partially constructed cache should never be seen
   96.81 -  // by a racing thread. Cache reads and writes proceed without a
   96.82 -  // lock, but creation of the cache itself requires no leaks so a
   96.83 -  // lock is generally acquired in that case.
   96.84 -  //
   96.85 -  // If the RedefineClasses() API has been used, then this cache can
   96.86 -  // grow and we'll have transitions from non-NULL to bigger non-NULL.
   96.87 -  // Cache creation requires no leaks and we require safety between all
   96.88 -  // cache accesses and freeing of the old cache so a lock is generally
   96.89 -  // acquired when the RedefineClasses() API has been used.
   96.90 -
   96.91 -  if (indices == NULL || idnum_can_increment()) {
   96.92 -    // we need a cache or the cache can grow
   96.93 -    MutexLocker ml(JNICachedItableIndex_lock);
   96.94 -    // reacquire the cache to see if another thread already did the work
   96.95 -    indices = methods_cached_itable_indices_acquire();
   96.96 -    size_t length = 0;
   96.97 -    // cache size is stored in element[0], other elements offset by one
   96.98 -    if (indices == NULL || (length = (size_t)indices[0]) <= idnum) {
   96.99 -      size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count());
  96.100 -      int* new_indices = NEW_C_HEAP_ARRAY(int, size+1, mtClass);
  96.101 -      new_indices[0] = (int)size;
  96.102 -      // copy any existing entries
  96.103 -      size_t i;
  96.104 -      for (i = 0; i < length; i++) {
  96.105 -        new_indices[i+1] = indices[i+1];
  96.106 -      }
  96.107 -      // Set all the rest to -1
  96.108 -      for (i = length; i < size; i++) {
  96.109 -        new_indices[i+1] = -1;
  96.110 -      }
  96.111 -      if (indices != NULL) {
  96.112 -        // We have an old cache to delete so save it for after we
  96.113 -        // drop the lock.
  96.114 -        to_dealloc_indices = indices;
  96.115 -      }
  96.116 -      release_set_methods_cached_itable_indices(indices = new_indices);
  96.117 -    }
  96.118 -
  96.119 -    if (idnum_can_increment()) {
  96.120 -      // this cache can grow so we have to write to it safely
  96.121 -      indices[idnum+1] = index;
  96.122 -    }
  96.123 -  } else {
  96.124 -    CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
  96.125 -  }
  96.126 -
  96.127 -  if (!idnum_can_increment()) {
  96.128 -    // The cache cannot grow and this JNI itable index value does not
  96.129 -    // have to be unique like a jmethodID. If there is a race to set it,
  96.130 -    // it doesn't matter.
  96.131 -    indices[idnum+1] = index;
  96.132 -  }
  96.133 -
  96.134 -  if (to_dealloc_indices != NULL) {
  96.135 -    // we allocated a new cache so free the old one
  96.136 -    FreeHeap(to_dealloc_indices);
  96.137 -  }
  96.138 -}
  96.139 -
  96.140 -
  96.141 -// Retrieve a cached itable index
  96.142 -int InstanceKlass::cached_itable_index(size_t idnum) {
  96.143 -  int* indices = methods_cached_itable_indices_acquire();
  96.144 -  if (indices != NULL && ((size_t)indices[0]) > idnum) {
  96.145 -     // indices exist and are long enough, retrieve possible cached
  96.146 -    return indices[idnum+1];
  96.147 -  }
  96.148 -  return -1;
  96.149 -}
  96.150 -
  96.151 -
  96.152  //
  96.153  // Walk the list of dependent nmethods searching for nmethods which
  96.154  // are dependent on the changes that were passed in and mark them for
  96.155 @@ -2326,12 +2242,6 @@
  96.156      }
  96.157    }
  96.158  
  96.159 -  int* indices = methods_cached_itable_indices_acquire();
  96.160 -  if (indices != (int*)NULL) {
  96.161 -    release_set_methods_cached_itable_indices(NULL);
  96.162 -    FreeHeap(indices);
  96.163 -  }
  96.164 -
  96.165    // release dependencies
  96.166    nmethodBucket* b = _dependencies;
  96.167    _dependencies = NULL;
  96.168 @@ -2782,6 +2692,18 @@
  96.169    "allocated", "loaded", "linked", "being_initialized", "fully_initialized", "initialization_error"
  96.170  };
  96.171  
  96.172 +static void print_vtable(intptr_t* start, int len, outputStream* st) {
  96.173 +  for (int i = 0; i < len; i++) {
  96.174 +    intptr_t e = start[i];
  96.175 +    st->print("%d : " INTPTR_FORMAT, i, e);
  96.176 +    if (e != 0 && ((Metadata*)e)->is_metaspace_object()) {
  96.177 +      st->print(" ");
  96.178 +      ((Metadata*)e)->print_value_on(st);
  96.179 +    }
  96.180 +    st->cr();
  96.181 +  }
  96.182 +}
  96.183 +
  96.184  void InstanceKlass::print_on(outputStream* st) const {
  96.185    assert(is_klass(), "must be klass");
  96.186    Klass::print_on(st);
  96.187 @@ -2816,7 +2738,7 @@
  96.188  
  96.189    st->print(BULLET"arrays:            "); array_klasses()->print_value_on_maybe_null(st); st->cr();
  96.190    st->print(BULLET"methods:           "); methods()->print_value_on(st);                  st->cr();
  96.191 -  if (Verbose) {
  96.192 +  if (Verbose || WizardMode) {
  96.193      Array<Method*>* method_array = methods();
  96.194      for(int i = 0; i < method_array->length(); i++) {
  96.195        st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
  96.196 @@ -2847,24 +2769,17 @@
  96.197    st->print(BULLET"field annotations:       "); fields_annotations()->print_value_on(st); st->cr();
  96.198    st->print(BULLET"field type annotations:  "); fields_type_annotations()->print_value_on(st); st->cr();
  96.199    {
  96.200 -    ResourceMark rm;
  96.201 -    // PreviousVersionInfo objects returned via PreviousVersionWalker
  96.202 -    // contain a GrowableArray of handles. We have to clean up the
  96.203 -    // GrowableArray _after_ the PreviousVersionWalker destructor
  96.204 -    // has destroyed the handles.
  96.205 -    {
  96.206 -      bool have_pv = false;
  96.207 -      PreviousVersionWalker pvw((InstanceKlass*)this);
  96.208 -      for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
  96.209 -           pv_info != NULL; pv_info = pvw.next_previous_version()) {
  96.210 -        if (!have_pv)
  96.211 -          st->print(BULLET"previous version:  ");
  96.212 -        have_pv = true;
  96.213 -        pv_info->prev_constant_pool_handle()()->print_value_on(st);
  96.214 -      }
  96.215 -      if (have_pv)  st->cr();
  96.216 -    } // pvw is cleaned up
  96.217 -  } // rm is cleaned up
  96.218 +    bool have_pv = false;
  96.219 +    PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this);
  96.220 +    for (PreviousVersionNode * pv_node = pvw.next_previous_version();
  96.221 +         pv_node != NULL; pv_node = pvw.next_previous_version()) {
  96.222 +      if (!have_pv)
  96.223 +        st->print(BULLET"previous version:  ");
  96.224 +      have_pv = true;
  96.225 +      pv_node->prev_constant_pool()->print_value_on(st);
  96.226 +    }
  96.227 +    if (have_pv) st->cr();
  96.228 +  } // pvw is cleaned up
  96.229  
  96.230    if (generic_signature() != NULL) {
  96.231      st->print(BULLET"generic signature: ");
  96.232 @@ -2874,7 +2789,9 @@
  96.233    st->print(BULLET"inner classes:     "); inner_classes()->print_value_on(st);     st->cr();
  96.234    st->print(BULLET"java mirror:       "); java_mirror()->print_value_on(st);       st->cr();
  96.235    st->print(BULLET"vtable length      %d  (start addr: " INTPTR_FORMAT ")", vtable_length(), start_of_vtable());  st->cr();
  96.236 +  if (vtable_length() > 0 && (Verbose || WizardMode))  print_vtable(start_of_vtable(), vtable_length(), st);
  96.237    st->print(BULLET"itable length      %d (start addr: " INTPTR_FORMAT ")", itable_length(), start_of_itable()); st->cr();
  96.238 +  if (itable_length() > 0 && (Verbose || WizardMode))  print_vtable(start_of_itable(), itable_length(), st);
  96.239    st->print_cr(BULLET"---- static fields (%d words):", static_field_size());
  96.240    FieldPrinter print_static_field(st);
  96.241    ((InstanceKlass*)this)->do_local_static_fields(&print_static_field);
  96.242 @@ -2896,6 +2813,7 @@
  96.243  
  96.244  void InstanceKlass::print_value_on(outputStream* st) const {
  96.245    assert(is_klass(), "must be klass");
  96.246 +  if (Verbose || WizardMode)  access_flags().print_on(st);
  96.247    name()->print_value_on(st);
  96.248  }
  96.249  
  96.250 @@ -3392,34 +3310,34 @@
  96.251    Array<Method*>* old_methods = ikh->methods();
  96.252  
  96.253    if (cp_ref->on_stack()) {
  96.254 -  PreviousVersionNode * pv_node = NULL;
  96.255 -  if (emcp_method_count == 0) {
  96.256 +    PreviousVersionNode * pv_node = NULL;
  96.257 +    if (emcp_method_count == 0) {
  96.258        // non-shared ConstantPool gets a reference
  96.259 -      pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), NULL);
  96.260 -    RC_TRACE(0x00000400,
  96.261 -        ("add: all methods are obsolete; flushing any EMCP refs"));
  96.262 -  } else {
  96.263 -    int local_count = 0;
  96.264 +      pv_node = new PreviousVersionNode(cp_ref, NULL);
  96.265 +      RC_TRACE(0x00000400,
  96.266 +          ("add: all methods are obsolete; flushing any EMCP refs"));
  96.267 +    } else {
  96.268 +      int local_count = 0;
  96.269        GrowableArray<Method*>* method_refs = new (ResourceObj::C_HEAP, mtClass)
  96.270 -        GrowableArray<Method*>(emcp_method_count, true);
  96.271 -    for (int i = 0; i < old_methods->length(); i++) {
  96.272 -      if (emcp_methods->at(i)) {
  96.273 -          // this old method is EMCP. Save it only if it's on the stack
  96.274 -          Method* old_method = old_methods->at(i);
  96.275 -          if (old_method->on_stack()) {
  96.276 -            method_refs->append(old_method);
  96.277 +          GrowableArray<Method*>(emcp_method_count, true);
  96.278 +      for (int i = 0; i < old_methods->length(); i++) {
  96.279 +        if (emcp_methods->at(i)) {
  96.280 +            // this old method is EMCP. Save it only if it's on the stack
  96.281 +            Method* old_method = old_methods->at(i);
  96.282 +            if (old_method->on_stack()) {
  96.283 +              method_refs->append(old_method);
  96.284 +            }
  96.285 +          if (++local_count >= emcp_method_count) {
  96.286 +            // no more EMCP methods so bail out now
  96.287 +            break;
  96.288            }
  96.289 -        if (++local_count >= emcp_method_count) {
  96.290 -          // no more EMCP methods so bail out now
  96.291 -          break;
  96.292          }
  96.293        }
  96.294 -    }
  96.295        // non-shared ConstantPool gets a reference
  96.296 -      pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), method_refs);
  96.297 +      pv_node = new PreviousVersionNode(cp_ref, method_refs);
  96.298      }
  96.299      // append new previous version.
  96.300 -  _previous_versions->append(pv_node);
  96.301 +    _previous_versions->append(pv_node);
  96.302    }
  96.303  
  96.304    // Since the caller is the VMThread and we are at a safepoint, this
  96.305 @@ -3520,6 +3438,8 @@
  96.306          return m;
  96.307        }
  96.308      }
  96.309 +    // None found, return null for the caller to handle.
  96.310 +    return NULL;
  96.311    }
  96.312    return m;
  96.313  }
  96.314 @@ -3536,10 +3456,9 @@
  96.315  // Construct a PreviousVersionNode entry for the array hung off
  96.316  // the InstanceKlass.
  96.317  PreviousVersionNode::PreviousVersionNode(ConstantPool* prev_constant_pool,
  96.318 -  bool prev_cp_is_weak, GrowableArray<Method*>* prev_EMCP_methods) {
  96.319 +  GrowableArray<Method*>* prev_EMCP_methods) {
  96.320  
  96.321    _prev_constant_pool = prev_constant_pool;
  96.322 -  _prev_cp_is_weak = prev_cp_is_weak;
  96.323    _prev_EMCP_methods = prev_EMCP_methods;
  96.324  }
  96.325  
  96.326 @@ -3555,99 +3474,38 @@
  96.327    }
  96.328  }
  96.329  
  96.330 -
  96.331 -// Construct a PreviousVersionInfo entry
  96.332 -PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
  96.333 -  _prev_constant_pool_handle = constantPoolHandle();  // NULL handle
  96.334 -  _prev_EMCP_method_handles = NULL;
  96.335 -
  96.336 -  ConstantPool* cp = pv_node->prev_constant_pool();
  96.337 -  assert(cp != NULL, "constant pool ref was unexpectedly cleared");
  96.338 -  if (cp == NULL) {
  96.339 -    return;  // robustness
  96.340 -  }
  96.341 -
  96.342 -  // make the ConstantPool* safe to return
  96.343 -  _prev_constant_pool_handle = constantPoolHandle(cp);
  96.344 -
  96.345 -  GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
  96.346 -  if (method_refs == NULL) {
  96.347 -    // the InstanceKlass did not have any EMCP methods
  96.348 -    return;
  96.349 -  }
  96.350 -
  96.351 -  _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
  96.352 -
  96.353 -  int n_methods = method_refs->length();
  96.354 -  for (int i = 0; i < n_methods; i++) {
  96.355 -    Method* method = method_refs->at(i);
  96.356 -    assert (method != NULL, "method has been cleared");
  96.357 -    if (method == NULL) {
  96.358 -      continue;  // robustness
  96.359 -    }
  96.360 -    // make the Method* safe to return
  96.361 -    _prev_EMCP_method_handles->append(methodHandle(method));
  96.362 -  }
  96.363 -}
  96.364 -
  96.365 -
  96.366 -// Destroy a PreviousVersionInfo
  96.367 -PreviousVersionInfo::~PreviousVersionInfo() {
  96.368 -  // Since _prev_EMCP_method_handles is not C-heap allocated, we
  96.369 -  // don't have to delete it.
  96.370 -}
  96.371 -
  96.372 -
  96.373  // Construct a helper for walking the previous versions array
  96.374 -PreviousVersionWalker::PreviousVersionWalker(InstanceKlass *ik) {
  96.375 +PreviousVersionWalker::PreviousVersionWalker(Thread* thread, InstanceKlass *ik) {
  96.376 +  _thread = thread;
  96.377    _previous_versions = ik->previous_versions();
  96.378    _current_index = 0;
  96.379 -  // _hm needs no initialization
  96.380    _current_p = NULL;
  96.381 -}
  96.382 -
  96.383 -
  96.384 -// Destroy a PreviousVersionWalker
  96.385 -PreviousVersionWalker::~PreviousVersionWalker() {
  96.386 -  // Delete the current info just in case the caller didn't walk to
  96.387 -  // the end of the previous versions list. No harm if _current_p is
  96.388 -  // already NULL.
  96.389 -  delete _current_p;
  96.390 -
  96.391 -  // When _hm is destroyed, all the Handles returned in
  96.392 -  // PreviousVersionInfo objects will be destroyed.
  96.393 -  // Also, after this destructor is finished it will be
  96.394 -  // safe to delete the GrowableArray allocated in the
  96.395 -  // PreviousVersionInfo objects.
  96.396 +  _current_constant_pool_handle = constantPoolHandle(thread, ik->constants());
  96.397  }
  96.398  
  96.399  
  96.400  // Return the interesting information for the next previous version
  96.401  // of the klass. Returns NULL if there are no more previous versions.
  96.402 -PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
  96.403 +PreviousVersionNode* PreviousVersionWalker::next_previous_version() {
  96.404    if (_previous_versions == NULL) {
  96.405      // no previous versions so nothing to return
  96.406      return NULL;
  96.407    }
  96.408  
  96.409 -  delete _current_p;  // cleanup the previous info for the caller
  96.410 -  _current_p = NULL;  // reset to NULL so we don't delete same object twice
  96.411 +  _current_p = NULL;  // reset to NULL
  96.412 +  _current_constant_pool_handle = NULL;
  96.413  
  96.414    int length = _previous_versions->length();
  96.415  
  96.416    while (_current_index < length) {
  96.417      PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
  96.418 -    PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass)
  96.419 -                                          PreviousVersionInfo(pv_node);
  96.420 -
  96.421 -    constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
  96.422 -    assert (!cp_h.is_null(), "null cp found in previous version");
  96.423 -
  96.424 -    // The caller will need to delete pv_info when they are done with it.
  96.425 -    _current_p = pv_info;
  96.426 -    return pv_info;
  96.427 +
  96.428 +    // Save a handle to the constant pool for this previous version,
  96.429 +    // which keeps all the methods from being deallocated.
  96.430 +    _current_constant_pool_handle = constantPoolHandle(_thread, pv_node->prev_constant_pool());
  96.431 +    _current_p = pv_node;
  96.432 +    return pv_node;
  96.433    }
  96.434  
  96.435 -  // all of the underlying nodes' info has been deleted
  96.436    return NULL;
  96.437  } // end next_previous_version()
    97.1 --- a/src/share/vm/oops/instanceKlass.hpp	Fri Sep 27 13:49:57 2013 -0400
    97.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Fri Sep 27 13:53:43 2013 -0400
    97.3 @@ -245,7 +245,6 @@
    97.4    MemberNameTable* _member_names;        // Member names
    97.5    JNIid*          _jni_ids;              // First JNI identifier for static fields in this class
    97.6    jmethodID*      _methods_jmethod_ids;  // jmethodIDs corresponding to method_idnum, or NULL if none
    97.7 -  int*            _methods_cached_itable_indices;  // itable_index cache for JNI invoke corresponding to methods idnum, or NULL
    97.8    nmethodBucket*  _dependencies;         // list of dependent nmethods
    97.9    nmethod*        _osr_nmethods_head;    // Head of list of on-stack replacement nmethods for this class
   97.10    BreakpointInfo* _breakpoints;          // bpt lists, managed by Method*
   97.11 @@ -690,10 +689,6 @@
   97.12                  size_t *length_p, jmethodID* id_p);
   97.13    jmethodID jmethod_id_or_null(Method* method);
   97.14  
   97.15 -  // cached itable index support
   97.16 -  void set_cached_itable_index(size_t idnum, int index);
   97.17 -  int cached_itable_index(size_t idnum);
   97.18 -
   97.19    // annotations support
   97.20    Annotations* annotations() const          { return _annotations; }
   97.21    void set_annotations(Annotations* anno)   { _annotations = anno; }
   97.22 @@ -994,11 +989,6 @@
   97.23    void release_set_methods_jmethod_ids(jmethodID* jmeths)
   97.24           { OrderAccess::release_store_ptr(&_methods_jmethod_ids, jmeths); }
   97.25  
   97.26 -  int* methods_cached_itable_indices_acquire() const
   97.27 -         { return (int*)OrderAccess::load_ptr_acquire(&_methods_cached_itable_indices); }
   97.28 -  void release_set_methods_cached_itable_indices(int* indices)
   97.29 -         { OrderAccess::release_store_ptr(&_methods_cached_itable_indices, indices); }
   97.30 -
   97.31    // Lock during initialization
   97.32  public:
   97.33    // Lock for (1) initialization; (2) access to the ConstantPool of this class.
   97.34 @@ -1136,21 +1126,11 @@
   97.35  
   97.36  
   97.37  // A collection point for interesting information about the previous
   97.38 -// version(s) of an InstanceKlass. This class uses weak references to
   97.39 -// the information so that the information may be collected as needed
   97.40 -// by the system. If the information is shared, then a regular
   97.41 -// reference must be used because a weak reference would be seen as
   97.42 -// collectible. A GrowableArray of PreviousVersionNodes is attached
   97.43 -// to the InstanceKlass as needed. See PreviousVersionWalker below.
   97.44 +// version(s) of an InstanceKlass.  A GrowableArray of PreviousVersionNodes
   97.45 +// is attached to the InstanceKlass as needed. See PreviousVersionWalker below.
   97.46  class PreviousVersionNode : public CHeapObj<mtClass> {
   97.47   private:
   97.48 -  // A shared ConstantPool is never collected so we'll always have
   97.49 -  // a reference to it so we can update items in the cache. We'll
   97.50 -  // have a weak reference to a non-shared ConstantPool until all
   97.51 -  // of the methods (EMCP or obsolete) have been collected; the
   97.52 -  // non-shared ConstantPool becomes collectible at that point.
   97.53 -  ConstantPool*    _prev_constant_pool;  // regular or weak reference
   97.54 -  bool    _prev_cp_is_weak;     // true if not a shared ConstantPool
   97.55 +  ConstantPool*    _prev_constant_pool;
   97.56  
   97.57    // If the previous version of the InstanceKlass doesn't have any
   97.58    // EMCP methods, then _prev_EMCP_methods will be NULL. If all the
   97.59 @@ -1159,8 +1139,8 @@
   97.60    GrowableArray<Method*>* _prev_EMCP_methods;
   97.61  
   97.62  public:
   97.63 -  PreviousVersionNode(ConstantPool* prev_constant_pool, bool prev_cp_is_weak,
   97.64 -    GrowableArray<Method*>* prev_EMCP_methods);
   97.65 +  PreviousVersionNode(ConstantPool* prev_constant_pool,
   97.66 +                      GrowableArray<Method*>* prev_EMCP_methods);
   97.67    ~PreviousVersionNode();
   97.68    ConstantPool* prev_constant_pool() const {
   97.69      return _prev_constant_pool;
   97.70 @@ -1171,59 +1151,26 @@
   97.71  };
   97.72  
   97.73  
   97.74 -// A Handle-ized version of PreviousVersionNode.
   97.75 -class PreviousVersionInfo : public ResourceObj {
   97.76 - private:
   97.77 -  constantPoolHandle   _prev_constant_pool_handle;
   97.78 -  // If the previous version of the InstanceKlass doesn't have any
   97.79 -  // EMCP methods, then _prev_EMCP_methods will be NULL. Since the
   97.80 -  // methods cannot be collected while we hold a handle,
   97.81 -  // _prev_EMCP_methods should never have a length of zero.
   97.82 -  GrowableArray<methodHandle>* _prev_EMCP_method_handles;
   97.83 -
   97.84 -public:
   97.85 -  PreviousVersionInfo(PreviousVersionNode *pv_node);
   97.86 -  ~PreviousVersionInfo();
   97.87 -  constantPoolHandle prev_constant_pool_handle() const {
   97.88 -    return _prev_constant_pool_handle;
   97.89 -  }
   97.90 -  GrowableArray<methodHandle>* prev_EMCP_method_handles() const {
   97.91 -    return _prev_EMCP_method_handles;
   97.92 -  }
   97.93 -};
   97.94 -
   97.95 -
   97.96 -// Helper object for walking previous versions. This helper cleans up
   97.97 -// the Handles that it allocates when the helper object is destroyed.
   97.98 -// The PreviousVersionInfo object returned by next_previous_version()
   97.99 -// is only valid until a subsequent call to next_previous_version() or
  97.100 -// the helper object is destroyed.
  97.101 +// Helper object for walking previous versions.
  97.102  class PreviousVersionWalker : public StackObj {
  97.103   private:
  97.104 +  Thread*                               _thread;
  97.105    GrowableArray<PreviousVersionNode *>* _previous_versions;
  97.106    int                                   _current_index;
  97.107 -  // Fields for cleaning up when we are done walking the previous versions:
  97.108 -  // A HandleMark for the PreviousVersionInfo handles:
  97.109 -  HandleMark                            _hm;
  97.110  
  97.111 -  // It would be nice to have a ResourceMark field in this helper also,
  97.112 -  // but the ResourceMark code says to be careful to delete handles held
  97.113 -  // in GrowableArrays _before_ deleting the GrowableArray. Since we
  97.114 -  // can't guarantee the order in which the fields are destroyed, we
  97.115 -  // have to let the creator of the PreviousVersionWalker object do
  97.116 -  // the right thing. Also, adding a ResourceMark here causes an
  97.117 -  // include loop.
  97.118 +  // A pointer to the current node object so we can handle the deletes.
  97.119 +  PreviousVersionNode*                  _current_p;
  97.120  
  97.121 -  // A pointer to the current info object so we can handle the deletes.
  97.122 -  PreviousVersionInfo *                 _current_p;
  97.123 +  // The constant pool handle keeps all the methods in this class from being
  97.124 +  // deallocated from the metaspace during class unloading.
  97.125 +  constantPoolHandle                    _current_constant_pool_handle;
  97.126  
  97.127   public:
  97.128 -  PreviousVersionWalker(InstanceKlass *ik);
  97.129 -  ~PreviousVersionWalker();
  97.130 +  PreviousVersionWalker(Thread* thread, InstanceKlass *ik);
  97.131  
  97.132    // Return the interesting information for the next previous version
  97.133    // of the klass. Returns NULL if there are no more previous versions.
  97.134 -  PreviousVersionInfo* next_previous_version();
  97.135 +  PreviousVersionNode* next_previous_version();
  97.136  };
  97.137  
  97.138  
    98.1 --- a/src/share/vm/oops/instanceOop.hpp	Fri Sep 27 13:49:57 2013 -0400
    98.2 +++ b/src/share/vm/oops/instanceOop.hpp	Fri Sep 27 13:53:43 2013 -0400
    98.3 @@ -37,9 +37,9 @@
    98.4  
    98.5    // If compressed, the offset of the fields of the instance may not be aligned.
    98.6    static int base_offset_in_bytes() {
    98.7 -    // offset computation code breaks if UseCompressedKlassPointers
    98.8 +    // offset computation code breaks if UseCompressedClassPointers
    98.9      // only is true
   98.10 -    return (UseCompressedOops && UseCompressedKlassPointers) ?
   98.11 +    return (UseCompressedOops && UseCompressedClassPointers) ?
   98.12               klass_gap_offset_in_bytes() :
   98.13               sizeof(instanceOopDesc);
   98.14    }
    99.1 --- a/src/share/vm/oops/klass.cpp	Fri Sep 27 13:49:57 2013 -0400
    99.2 +++ b/src/share/vm/oops/klass.cpp	Fri Sep 27 13:53:43 2013 -0400
    99.3 @@ -674,13 +674,23 @@
    99.4  
    99.5  #ifndef PRODUCT
    99.6  
    99.7 -void Klass::verify_vtable_index(int i) {
    99.8 +bool Klass::verify_vtable_index(int i) {
    99.9    if (oop_is_instance()) {
   99.10 -    assert(i>=0 && i<((InstanceKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
   99.11 +    int limit = ((InstanceKlass*)this)->vtable_length()/vtableEntry::size();
   99.12 +    assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
   99.13    } else {
   99.14      assert(oop_is_array(), "Must be");
   99.15 -    assert(i>=0 && i<((ArrayKlass*)this)->vtable_length()/vtableEntry::size(), "index out of bounds");
   99.16 +    int limit = ((ArrayKlass*)this)->vtable_length()/vtableEntry::size();
   99.17 +    assert(i >= 0 && i < limit, err_msg("index %d out of bounds %d", i, limit));
   99.18    }
   99.19 +  return true;
   99.20 +}
   99.21 +
   99.22 +bool Klass::verify_itable_index(int i) {
   99.23 +  assert(oop_is_instance(), "");
   99.24 +  int method_count = klassItable::method_count_for_interface(this);
   99.25 +  assert(i >= 0 && i < method_count, "index out of bounds");
   99.26 +  return true;
   99.27  }
   99.28  
   99.29  #endif
   100.1 --- a/src/share/vm/oops/klass.hpp	Fri Sep 27 13:49:57 2013 -0400
   100.2 +++ b/src/share/vm/oops/klass.hpp	Fri Sep 27 13:53:43 2013 -0400
   100.3 @@ -699,7 +699,8 @@
   100.4    void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
   100.5  
   100.6  #ifndef PRODUCT
   100.7 -  void verify_vtable_index(int index);
   100.8 +  bool verify_vtable_index(int index);
   100.9 +  bool verify_itable_index(int index);
  100.10  #endif
  100.11  
  100.12    virtual void oop_verify_on(oop obj, outputStream* st);
   101.1 --- a/src/share/vm/oops/klassVtable.cpp	Fri Sep 27 13:49:57 2013 -0400
   101.2 +++ b/src/share/vm/oops/klassVtable.cpp	Fri Sep 27 13:53:43 2013 -0400
   101.3 @@ -47,11 +47,12 @@
   101.4  
   101.5  
   101.6  // this function computes the vtable size (including the size needed for miranda
   101.7 -// methods) and the number of miranda methods in this class
   101.8 +// methods) and the number of miranda methods in this class.
   101.9  // Note on Miranda methods: Let's say there is a class C that implements
  101.10 -// interface I.  Let's say there is a method m in I that neither C nor any
  101.11 -// of its super classes implement (i.e there is no method of any access, with
  101.12 -// the same name and signature as m), then m is a Miranda method which is
  101.13 +// interface I, and none of C's superclasses implements I.
  101.14 +// Let's say there is an abstract method m in I that neither C
  101.15 +// nor any of its super classes implement (i.e there is no method of any access,
  101.16 +// with the same name and signature as m), then m is a Miranda method which is
  101.17  // entered as a public abstract method in C's vtable.  From then on it should
  101.18  // treated as any other public method in C for method over-ride purposes.
  101.19  void klassVtable::compute_vtable_size_and_num_mirandas(
  101.20 @@ -111,10 +112,13 @@
  101.21  }
  101.22  
  101.23  int klassVtable::index_of(Method* m, int len) const {
  101.24 -  assert(m->vtable_index() >= 0, "do not ask this of non-vtable methods");
  101.25 +  assert(m->has_vtable_index(), "do not ask this of non-vtable methods");
  101.26    return m->vtable_index();
  101.27  }
  101.28  
  101.29 +// Copy super class's vtable to the first part (prefix) of this class's vtable,
  101.30 +// and return the number of entries copied.  Expects that 'super' is the Java
  101.31 +// super class (arrays can have "array" super classes that must be skipped).
  101.32  int klassVtable::initialize_from_super(KlassHandle super) {
  101.33    if (super.is_null()) {
  101.34      return 0;
  101.35 @@ -139,14 +143,14 @@
  101.36    }
  101.37  }
  101.38  
  101.39 -// Revised lookup semantics   introduced 1.3 (Kestral beta)
  101.40 +//
  101.41 +// Revised lookup semantics   introduced 1.3 (Kestrel beta)
  101.42  void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
  101.43  
  101.44    // Note:  Arrays can have intermediate array supers.  Use java_super to skip them.
  101.45    KlassHandle super (THREAD, klass()->java_super());
  101.46    int nofNewEntries = 0;
  101.47  
  101.48 -
  101.49    if (PrintVtables && !klass()->oop_is_array()) {
  101.50      ResourceMark rm(THREAD);
  101.51      tty->print_cr("Initializing: %s", _klass->name()->as_C_string());
  101.52 @@ -174,8 +178,10 @@
  101.53      int len = methods->length();
  101.54      int initialized = super_vtable_len;
  101.55  
  101.56 -    // update_inherited_vtable can stop for gc - ensure using handles
  101.57 +    // Check each of this class's methods against super;
  101.58 +    // if override, replace in copy of super vtable, otherwise append to end
  101.59      for (int i = 0; i < len; i++) {
  101.60 +      // update_inherited_vtable can stop for gc - ensure using handles
  101.61        HandleMark hm(THREAD);
  101.62        assert(methods->at(i)->is_method(), "must be a Method*");
  101.63        methodHandle mh(THREAD, methods->at(i));
  101.64 @@ -189,11 +195,11 @@
  101.65        }
  101.66      }
  101.67  
  101.68 -    // add miranda methods; it will also update the value of initialized
  101.69 -    fill_in_mirandas(&initialized);
  101.70 +    // add miranda methods to end of vtable.
  101.71 +    initialized = fill_in_mirandas(initialized);
  101.72  
  101.73      // In class hierarchies where the accessibility is not increasing (i.e., going from private ->
  101.74 -    // package_private -> publicprotected), the vtable might actually be smaller than our initial
  101.75 +    // package_private -> public/protected), the vtable might actually be smaller than our initial
  101.76      // calculation.
  101.77      assert(initialized <= _length, "vtable initialization failed");
  101.78      for(;initialized < _length; initialized++) {
  101.79 @@ -248,14 +254,8 @@
  101.80    return superk;
  101.81  }
  101.82  
  101.83 -// Methods that are "effectively" final don't need vtable entries.
  101.84 -bool method_is_effectively_final(
  101.85 -    AccessFlags klass_flags, methodHandle target) {
  101.86 -  return target->is_final() || klass_flags.is_final() && !target->is_overpass();
  101.87 -}
  101.88 -
  101.89  // Update child's copy of super vtable for overrides
  101.90 -// OR return true if a new vtable entry is required
  101.91 +// OR return true if a new vtable entry is required.
  101.92  // Only called for InstanceKlass's, i.e. not for arrays
  101.93  // If that changed, could not use _klass as handle for klass
  101.94  bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len,
  101.95 @@ -263,6 +263,7 @@
  101.96    ResourceMark rm;
  101.97    bool allocate_new = true;
  101.98    assert(klass->oop_is_instance(), "must be InstanceKlass");
  101.99 +  assert(klass == target_method()->method_holder(), "caller resp.");
 101.100  
 101.101    // Initialize the method's vtable index to "nonvirtual".
 101.102    // If we allocate a vtable entry, we will update it to a non-negative number.
 101.103 @@ -273,11 +274,17 @@
 101.104      return false;
 101.105    }
 101.106  
 101.107 -  if (method_is_effectively_final(klass->access_flags(), target_method)) {
 101.108 +  if (target_method->is_final_method(klass->access_flags())) {
 101.109      // a final method never needs a new entry; final methods can be statically
 101.110      // resolved and they have to be present in the vtable only if they override
 101.111      // a super's method, in which case they re-use its entry
 101.112      allocate_new = false;
 101.113 +  } else if (klass->is_interface()) {
 101.114 +    allocate_new = false;  // see note below in needs_new_vtable_entry
 101.115 +    // An interface never allocates new vtable slots, only inherits old ones.
 101.116 +    // This method will either be assigned its own itable index later,
 101.117 +    // or be assigned an inherited vtable index in the loop below.
 101.118 +    target_method()->set_vtable_index(Method::pending_itable_index);
 101.119    }
 101.120  
 101.121    // we need a new entry if there is no superclass
 101.122 @@ -411,8 +418,14 @@
 101.123                                           Symbol* classname,
 101.124                                           AccessFlags class_flags,
 101.125                                           TRAPS) {
 101.126 +  if (class_flags.is_interface()) {
 101.127 +    // Interfaces do not use vtables, so there is no point to assigning
 101.128 +    // a vtable index to any of their methods.  If we refrain from doing this,
 101.129 +    // we can use Method::_vtable_index to hold the itable index
 101.130 +    return false;
 101.131 +  }
 101.132  
 101.133 -  if (method_is_effectively_final(class_flags, target_method) ||
 101.134 +  if (target_method->is_final_method(class_flags) ||
 101.135        // a final method never needs a new entry; final methods can be statically
 101.136        // resolved and they have to be present in the vtable only if they override
 101.137        // a super's method, in which case they re-use its entry
 101.138 @@ -500,7 +513,8 @@
 101.139    return Method::invalid_vtable_index;
 101.140  }
 101.141  
 101.142 -// check if an entry is miranda
 101.143 +// check if an entry at an index is miranda
 101.144 +// requires that method m at entry be declared ("held") by an interface.
 101.145  bool klassVtable::is_miranda_entry_at(int i) {
 101.146    Method* m = method_at(i);
 101.147    Klass* method_holder = m->method_holder();
 101.148 @@ -516,7 +530,9 @@
 101.149    return false;
 101.150  }
 101.151  
 101.152 -// check if a method is a miranda method, given a class's methods table and it's super
 101.153 +// check if a method is a miranda method, given a class's methods table and its super
 101.154 +// "miranda" means not static, not defined by this class, and not defined
 101.155 +// in super unless it is private and therefore inaccessible to this class.
 101.156  // the caller must make sure that the method belongs to an interface implemented by the class
 101.157  bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
 101.158    if (m->is_static()) {
 101.159 @@ -541,6 +557,14 @@
 101.160    return false;
 101.161  }
 101.162  
 101.163 +// Scans current_interface_methods for miranda methods that do not
 101.164 +// already appear in new_mirandas and are also not defined-and-non-private
 101.165 +// in super (superclass).  These mirandas are added to all_mirandas if it is
 101.166 +// not null; in addition, those that are not duplicates of miranda methods
 101.167 +// inherited by super from its interfaces are added to new_mirandas.
 101.168 +// Thus, new_mirandas will be the set of mirandas that this class introduces,
 101.169 +// all_mirandas will be the set of all mirandas applicable to this class
 101.170 +// including all defined in superclasses.
 101.171  void klassVtable::add_new_mirandas_to_lists(
 101.172      GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
 101.173      Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
 101.174 @@ -599,17 +623,22 @@
 101.175    }
 101.176  }
 101.177  
 101.178 -// fill in mirandas
 101.179 -void klassVtable::fill_in_mirandas(int* initialized) {
 101.180 +// Discover miranda methods ("miranda" = "interface abstract, no binding"),
 101.181 +// and append them into the vtable starting at index initialized,
 101.182 +// return the new value of initialized.
 101.183 +int klassVtable::fill_in_mirandas(int initialized) {
 101.184    GrowableArray<Method*> mirandas(20);
 101.185    get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
 101.186                 ik()->local_interfaces());
 101.187    for (int i = 0; i < mirandas.length(); i++) {
 101.188 -    put_method_at(mirandas.at(i), *initialized);
 101.189 -    ++(*initialized);
 101.190 +    put_method_at(mirandas.at(i), initialized);
 101.191 +    ++initialized;
 101.192    }
 101.193 +  return initialized;
 101.194  }
 101.195  
 101.196 +// Copy this class's vtable to the vtable beginning at start.
 101.197 +// Used to copy superclass vtable to prefix of subclass's vtable.
 101.198  void klassVtable::copy_vtable_to(vtableEntry* start) {
 101.199    Copy::disjoint_words((HeapWord*)table(), (HeapWord*)start, _length * vtableEntry::size());
 101.200  }
 101.201 @@ -723,6 +752,12 @@
 101.202  
 101.203  // Initialization
 101.204  void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
 101.205 +  if (_klass->is_interface()) {
 101.206 +    // This needs to go after vtable indexes are assigned but
 101.207 +    // before implementors need to know the number of itable indexes.
 101.208 +    assign_itable_indexes_for_interface(_klass());
 101.209 +  }
 101.210 +
 101.211    // Cannot be setup doing bootstrapping, interfaces don't have
 101.212    // itables, and klass with only ones entry have empty itables
 101.213    if (Universe::is_bootstrapping() ||
 101.214 @@ -754,45 +789,89 @@
 101.215  }
 101.216  
 101.217  
 101.218 +inline bool interface_method_needs_itable_index(Method* m) {
 101.219 +  if (m->is_static())           return false;   // e.g., Stream.empty
 101.220 +  if (m->is_initializer())      return false;   // <init> or <clinit>
 101.221 +  // If an interface redeclares a method from java.lang.Object,
 101.222 +  // it should already have a vtable index, don't touch it.
 101.223 +  // e.g., CharSequence.toString (from initialize_vtable)
 101.224 +  // if (m->has_vtable_index())  return false; // NO!
 101.225 +  return true;
 101.226 +}
 101.227 +
 101.228 +int klassItable::assign_itable_indexes_for_interface(Klass* klass) {
 101.229 +  // an interface does not have an itable, but its methods need to be numbered
 101.230 +  if (TraceItables) tty->print_cr("%3d: Initializing itable for interface %s", ++initialize_count,
 101.231 +                                  klass->name()->as_C_string());
 101.232 +  Array<Method*>* methods = InstanceKlass::cast(klass)->methods();
 101.233 +  int nof_methods = methods->length();
 101.234 +  int ime_num = 0;
 101.235 +  for (int i = 0; i < nof_methods; i++) {
 101.236 +    Method* m = methods->at(i);
 101.237 +    if (interface_method_needs_itable_index(m)) {
 101.238 +      assert(!m->is_final_method(), "no final interface methods");
 101.239 +      // If m is already assigned a vtable index, do not disturb it.
 101.240 +      if (!m->has_vtable_index()) {
 101.241 +        assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
 101.242 +        m->set_itable_index(ime_num);
 101.243 +        // Progress to next itable entry
 101.244 +        ime_num++;
 101.245 +      }
 101.246 +    }
 101.247 +  }
 101.248 +  assert(ime_num == method_count_for_interface(klass), "proper sizing");
 101.249 +  return ime_num;
 101.250 +}
 101.251 +
 101.252 +int klassItable::method_count_for_interface(Klass* interf) {
 101.253 +  assert(interf->oop_is_instance(), "must be");
 101.254 +  assert(interf->is_interface(), "must be");
 101.255 +  Array<Method*>* methods = InstanceKlass::cast(interf)->methods();
 101.256 +  int nof_methods = methods->length();
 101.257 +  while (nof_methods > 0) {
 101.258 +    Method* m = methods->at(nof_methods-1);
 101.259 +    if (m->has_itable_index()) {
 101.260 +      int length = m->itable_index() + 1;
 101.261 +#ifdef ASSERT
 101.262 +      while (nof_methods = 0) {
 101.263 +        m = methods->at(--nof_methods);
 101.264 +        assert(!m->has_itable_index() || m->itable_index() < length, "");
 101.265 +      }
 101.266 +#endif //ASSERT
 101.267 +      return length;  // return the rightmost itable index, plus one
 101.268 +    }
 101.269 +    nof_methods -= 1;
 101.270 +  }
 101.271 +  // no methods have itable indexes
 101.272 +  return 0;
 101.273 +}
 101.274 +
 101.275 +
 101.276  void klassItable::initialize_itable_for_interface(int method_table_offset, KlassHandle interf_h, bool checkconstraints, TRAPS) {
 101.277    Array<Method*>* methods = InstanceKlass::cast(interf_h())->methods();
 101.278    int nof_methods = methods->length();
 101.279    HandleMark hm;
 101.280 -  KlassHandle klass = _klass;
 101.281    assert(nof_methods > 0, "at least one method must exist for interface to be in vtable");
 101.282    Handle interface_loader (THREAD, InstanceKlass::cast(interf_h())->class_loader());
 101.283 -  int ime_num = 0;
 101.284  
 101.285 -  // Skip first Method* if it is a class initializer
 101.286 -  int i = methods->at(0)->is_static_initializer() ? 1 : 0;
 101.287 -
 101.288 -  // m, method_name, method_signature, klass reset each loop so they
 101.289 -  // don't need preserving across check_signature_loaders call
 101.290 -  // methods needs a handle in case of gc from check_signature_loaders
 101.291 -  for(; i < nof_methods; i++) {
 101.292 +  int ime_count = method_count_for_interface(interf_h());
 101.293 +  for (int i = 0; i < nof_methods; i++) {
 101.294      Method* m = methods->at(i);
 101.295 -    Symbol* method_name = m->name();
 101.296 -    Symbol* method_signature = m->signature();
 101.297 -
 101.298 -    // This is same code as in Linkresolver::lookup_instance_method_in_klasses
 101.299 -    Method* target = klass->uncached_lookup_method(method_name, method_signature);
 101.300 -    while (target != NULL && target->is_static()) {
 101.301 -      // continue with recursive lookup through the superclass
 101.302 -      Klass* super = target->method_holder()->super();
 101.303 -      target = (super == NULL) ? (Method*)NULL : super->uncached_lookup_method(method_name, method_signature);
 101.304 +    methodHandle target;
 101.305 +    if (m->has_itable_index()) {
 101.306 +      LinkResolver::lookup_instance_method_in_klasses(target, _klass, m->name(), m->signature(), CHECK);
 101.307      }
 101.308      if (target == NULL || !target->is_public() || target->is_abstract()) {
 101.309        // Entry do not resolve. Leave it empty
 101.310      } else {
 101.311        // Entry did resolve, check loader constraints before initializing
 101.312        // if checkconstraints requested
 101.313 -      methodHandle  target_h (THREAD, target); // preserve across gc
 101.314        if (checkconstraints) {
 101.315          Handle method_holder_loader (THREAD, target->method_holder()->class_loader());
 101.316          if (method_holder_loader() != interface_loader()) {
 101.317            ResourceMark rm(THREAD);
 101.318            Symbol* failed_type_symbol =
 101.319 -            SystemDictionary::check_signature_loaders(method_signature,
 101.320 +            SystemDictionary::check_signature_loaders(m->signature(),
 101.321                                                        method_holder_loader,
 101.322                                                        interface_loader,
 101.323                                                        true, CHECK);
 101.324 @@ -803,9 +882,9 @@
 101.325                "and the class loader (instance of %s) for interface "
 101.326                "%s have different Class objects for the type %s "
 101.327                "used in the signature";
 101.328 -            char* sig = target_h()->name_and_sig_as_C_string();
 101.329 +            char* sig = target()->name_and_sig_as_C_string();
 101.330              const char* loader1 = SystemDictionary::loader_name(method_holder_loader());
 101.331 -            char* current = klass->name()->as_C_string();
 101.332 +            char* current = _klass->name()->as_C_string();
 101.333              const char* loader2 = SystemDictionary::loader_name(interface_loader());
 101.334              char* iface = InstanceKlass::cast(interf_h())->name()->as_C_string();
 101.335              char* failed_type_name = failed_type_symbol->as_C_string();
 101.336 @@ -821,10 +900,10 @@
 101.337        }
 101.338  
 101.339        // ime may have moved during GC so recalculate address
 101.340 -      itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target_h());
 101.341 +      int ime_num = m->itable_index();
 101.342 +      assert(ime_num < ime_count, "oob");
 101.343 +      itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target());
 101.344      }
 101.345 -    // Progress to next entry
 101.346 -    ime_num++;
 101.347    }
 101.348  }
 101.349  
 101.350 @@ -913,20 +992,22 @@
 101.351    virtual void doit(Klass* intf, int method_count) = 0;
 101.352  };
 101.353  
 101.354 -// Visit all interfaces with at-least one method (excluding <clinit>)
 101.355 +// Visit all interfaces with at least one itable method
 101.356  void visit_all_interfaces(Array<Klass*>* transitive_intf, InterfaceVisiterClosure *blk) {
 101.357    // Handle array argument
 101.358    for(int i = 0; i < transitive_intf->length(); i++) {
 101.359      Klass* intf = transitive_intf->at(i);
 101.360      assert(intf->is_interface(), "sanity check");
 101.361  
 101.362 -    // Find no. of methods excluding a <clinit>
 101.363 -    int method_count = InstanceKlass::cast(intf)->methods()->length();
 101.364 -    if (method_count > 0) {
 101.365 -      Method* m = InstanceKlass::cast(intf)->methods()->at(0);
 101.366 -      assert(m != NULL && m->is_method(), "sanity check");
 101.367 -      if (m->name() == vmSymbols::object_initializer_name()) {
 101.368 -        method_count--;
 101.369 +    // Find no. of itable methods
 101.370 +    int method_count = 0;
 101.371 +    // method_count = klassItable::method_count_for_interface(intf);
 101.372 +    Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
 101.373 +    if (methods->length() > 0) {
 101.374 +      for (int i = methods->length(); --i >= 0; ) {
 101.375 +        if (interface_method_needs_itable_index(methods->at(i))) {
 101.376 +          method_count++;
 101.377 +        }
 101.378        }
 101.379      }
 101.380  
 101.381 @@ -1024,40 +1105,26 @@
 101.382  }
 101.383  
 101.384  
 101.385 -// m must be a method in an interface
 101.386 -int klassItable::compute_itable_index(Method* m) {
 101.387 -  InstanceKlass* intf = m->method_holder();
 101.388 -  assert(intf->is_interface(), "sanity check");
 101.389 -  Array<Method*>* methods = intf->methods();
 101.390 -  int index = 0;
 101.391 -  while(methods->at(index) != m) {
 101.392 -    index++;
 101.393 -    assert(index < methods->length(), "should find index for resolve_invoke");
 101.394 -  }
 101.395 -  // Adjust for <clinit>, which is left out of table if first method
 101.396 -  if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
 101.397 -    index--;
 101.398 -  }
 101.399 -  return index;
 101.400 -}
 101.401 -
 101.402 -
 101.403 -// inverse to compute_itable_index
 101.404 +// inverse to itable_index
 101.405  Method* klassItable::method_for_itable_index(Klass* intf, int itable_index) {
 101.406    assert(InstanceKlass::cast(intf)->is_interface(), "sanity check");
 101.407 +  assert(intf->verify_itable_index(itable_index), "");
 101.408    Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
 101.409  
 101.410 -  int index = itable_index;
 101.411 -  // Adjust for <clinit>, which is left out of table if first method
 101.412 -  if (methods->length() > 0 && methods->at(0)->is_static_initializer()) {
 101.413 -    index++;
 101.414 -  }
 101.415 -
 101.416 -  if (itable_index < 0 || index >= methods->length())
 101.417 +  if (itable_index < 0 || itable_index >= method_count_for_interface(intf))
 101.418      return NULL;                // help caller defend against bad indexes
 101.419  
 101.420 +  int index = itable_index;
 101.421    Method* m = methods->at(index);
 101.422 -  assert(compute_itable_index(m) == itable_index, "correct inverse");
 101.423 +  int index2 = -1;
 101.424 +  while (!m->has_itable_index() ||
 101.425 +         (index2 = m->itable_index()) != itable_index) {
 101.426 +    assert(index2 < itable_index, "monotonic");
 101.427 +    if (++index == methods->length())
 101.428 +      return NULL;
 101.429 +    m = methods->at(index);
 101.430 +  }
 101.431 +  assert(m->itable_index() == itable_index, "correct inverse");
 101.432  
 101.433    return m;
 101.434  }
   102.1 --- a/src/share/vm/oops/klassVtable.hpp	Fri Sep 27 13:49:57 2013 -0400
   102.2 +++ b/src/share/vm/oops/klassVtable.hpp	Fri Sep 27 13:53:43 2013 -0400
   102.3 @@ -124,7 +124,7 @@
   102.4  
   102.5    // support for miranda methods
   102.6    bool is_miranda_entry_at(int i);
   102.7 -  void fill_in_mirandas(int* initialized);
   102.8 +  int fill_in_mirandas(int initialized);
   102.9    static bool is_miranda(Method* m, Array<Method*>* class_methods, Klass* super);
  102.10    static void add_new_mirandas_to_lists(
  102.11        GrowableArray<Method*>* new_mirandas,
  102.12 @@ -150,6 +150,8 @@
  102.13  //      from_compiled_code_entry_point -> nmethod entry point
  102.14  //      from_interpreter_entry_point   -> i2cadapter
  102.15  class vtableEntry VALUE_OBJ_CLASS_SPEC {
  102.16 +  friend class VMStructs;
  102.17 +
  102.18   public:
  102.19    // size in words
  102.20    static int size() {
  102.21 @@ -288,12 +290,12 @@
  102.22  #endif // INCLUDE_JVMTI
  102.23  
  102.24    // Setup of itable
  102.25 +  static int assign_itable_indexes_for_interface(Klass* klass);
  102.26 +  static int method_count_for_interface(Klass* klass);
  102.27    static int compute_itable_size(Array<Klass*>* transitive_interfaces);
  102.28    static void setup_itable_offset_table(instanceKlassHandle klass);
  102.29  
  102.30    // Resolving of method to index
  102.31 -  static int compute_itable_index(Method* m);
  102.32 -  // ...and back again:
  102.33    static Method* method_for_itable_index(Klass* klass, int itable_index);
  102.34  
  102.35    // Debugging/Statistics
   103.1 --- a/src/share/vm/oops/method.cpp	Fri Sep 27 13:49:57 2013 -0400
   103.2 +++ b/src/share/vm/oops/method.cpp	Fri Sep 27 13:53:43 2013 -0400
   103.3 @@ -509,24 +509,31 @@
   103.4    return _access_flags.has_loops();
   103.5  }
   103.6  
   103.7 +bool Method::is_final_method(AccessFlags class_access_flags) const {
   103.8 +  // or "does_not_require_vtable_entry"
   103.9 +  // overpass can occur, is not final (reuses vtable entry)
  103.10 +  // private methods get vtable entries for backward class compatibility.
  103.11 +  if (is_overpass())  return false;
  103.12 +  return is_final() || class_access_flags.is_final();
  103.13 +}
  103.14  
  103.15  bool Method::is_final_method() const {
  103.16 -  // %%% Should return true for private methods also,
  103.17 -  // since there is no way to override them.
  103.18 -  return is_final() || method_holder()->is_final();
  103.19 +  return is_final_method(method_holder()->access_flags());
  103.20  }
  103.21  
  103.22 -
  103.23 -bool Method::is_strict_method() const {
  103.24 -  return is_strict();
  103.25 -}
  103.26 -
  103.27 -
  103.28 -bool Method::can_be_statically_bound() const {
  103.29 -  if (is_final_method())  return true;
  103.30 +bool Method::can_be_statically_bound(AccessFlags class_access_flags) const {
  103.31 +  if (is_final_method(class_access_flags))  return true;
  103.32 +#ifdef ASSERT
  103.33 +  bool is_nonv = (vtable_index() == nonvirtual_vtable_index);
  103.34 +  if (class_access_flags.is_interface())  assert(is_nonv == is_static(), err_msg("is_nonv=%s", is_nonv));
  103.35 +#endif
  103.36 +  assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question");
  103.37    return vtable_index() == nonvirtual_vtable_index;
  103.38  }
  103.39  
  103.40 +bool Method::can_be_statically_bound() const {
  103.41 +  return can_be_statically_bound(method_holder()->access_flags());
  103.42 +}
  103.43  
  103.44  bool Method::is_accessor() const {
  103.45    if (code_size() != 5) return false;
  103.46 @@ -967,7 +974,7 @@
  103.47  
  103.48    assert(ik->is_subclass_of(method_holder()), "should be subklass");
  103.49    assert(ik->vtable() != NULL, "vtable should exist");
  103.50 -  if (vtable_index() == nonvirtual_vtable_index) {
  103.51 +  if (!has_vtable_index()) {
  103.52      return false;
  103.53    } else {
  103.54      Method* vt_m = ik->method_at_vtable(vtable_index());
  103.55 @@ -1959,7 +1966,7 @@
  103.56  
  103.57  void Method::print_value_on(outputStream* st) const {
  103.58    assert(is_method(), "must be method");
  103.59 -  st->print_cr(internal_name());
  103.60 +  st->print(internal_name());
  103.61    print_address_on(st);
  103.62    st->print(" ");
  103.63    name()->print_value_on(st);
  103.64 @@ -1967,6 +1974,7 @@
  103.65    signature()->print_value_on(st);
  103.66    st->print(" in ");
  103.67    method_holder()->print_value_on(st);
  103.68 +  if (WizardMode) st->print("#%d", _vtable_index);
  103.69    if (WizardMode) st->print("[%d,%d]", size_of_parameters(), max_locals());
  103.70    if (WizardMode && code() != NULL) st->print(" ((nmethod*)%p)", code());
  103.71  }
   104.1 --- a/src/share/vm/oops/method.hpp	Fri Sep 27 13:49:57 2013 -0400
   104.2 +++ b/src/share/vm/oops/method.hpp	Fri Sep 27 13:53:43 2013 -0400
   104.3 @@ -448,16 +448,22 @@
   104.4    enum VtableIndexFlag {
   104.5      // Valid vtable indexes are non-negative (>= 0).
   104.6      // These few negative values are used as sentinels.
   104.7 -    highest_unused_vtable_index_value = -5,
   104.8 +    itable_index_max        = -10, // first itable index, growing downward
   104.9 +    pending_itable_index    = -9,  // itable index will be assigned
  104.10      invalid_vtable_index    = -4,  // distinct from any valid vtable index
  104.11      garbage_vtable_index    = -3,  // not yet linked; no vtable layout yet
  104.12      nonvirtual_vtable_index = -2   // there is no need for vtable dispatch
  104.13      // 6330203 Note:  Do not use -1, which was overloaded with many meanings.
  104.14    };
  104.15    DEBUG_ONLY(bool valid_vtable_index() const     { return _vtable_index >= nonvirtual_vtable_index; })
  104.16 -  int  vtable_index() const                      { assert(valid_vtable_index(), "");
  104.17 -                                                   return _vtable_index; }
  104.18 +  bool has_vtable_index() const                  { return _vtable_index >= 0; }
  104.19 +  int  vtable_index() const                      { return _vtable_index; }
  104.20    void set_vtable_index(int index)               { _vtable_index = index; }
  104.21 +  DEBUG_ONLY(bool valid_itable_index() const     { return _vtable_index <= pending_itable_index; })
  104.22 +  bool has_itable_index() const                  { return _vtable_index <= itable_index_max; }
  104.23 +  int  itable_index() const                      { assert(valid_itable_index(), "");
  104.24 +                                                   return itable_index_max - _vtable_index; }
  104.25 +  void set_itable_index(int index)               { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); }
  104.26  
  104.27    // interpreter entry
  104.28    address interpreter_entry() const              { return _i2i_entry; }
  104.29 @@ -560,10 +566,11 @@
  104.30  
  104.31    // checks method and its method holder
  104.32    bool is_final_method() const;
  104.33 -  bool is_strict_method() const;
  104.34 +  bool is_final_method(AccessFlags class_access_flags) const;
  104.35  
  104.36    // true if method needs no dynamic dispatch (final and/or no vtable entry)
  104.37    bool can_be_statically_bound() const;
  104.38 +  bool can_be_statically_bound(AccessFlags class_access_flags) const;
  104.39  
  104.40    // returns true if the method has any backward branches.
  104.41    bool has_loops() {
  104.42 @@ -740,10 +747,6 @@
  104.43    // so handles are not used to avoid deadlock.
  104.44    jmethodID find_jmethod_id_or_null()               { return method_holder()->jmethod_id_or_null(this); }
  104.45  
  104.46 -  // JNI static invoke cached itable index accessors
  104.47 -  int cached_itable_index()                         { return method_holder()->cached_itable_index(method_idnum()); }
  104.48 -  void set_cached_itable_index(int index)           { method_holder()->set_cached_itable_index(method_idnum(), index); }
  104.49 -
  104.50    // Support for inlining of intrinsic methods
  104.51    vmIntrinsics::ID intrinsic_id() const          { return (vmIntrinsics::ID) _intrinsic_id;           }
  104.52    void     set_intrinsic_id(vmIntrinsics::ID id) {                           _intrinsic_id = (u1) id; }
   105.1 --- a/src/share/vm/oops/methodData.hpp	Fri Sep 27 13:49:57 2013 -0400
   105.2 +++ b/src/share/vm/oops/methodData.hpp	Fri Sep 27 13:53:43 2013 -0400
   105.3 @@ -72,6 +72,8 @@
   105.4  //
   105.5  // Overlay for generic profiling data.
   105.6  class DataLayout VALUE_OBJ_CLASS_SPEC {
   105.7 +  friend class VMStructs;
   105.8 +
   105.9  private:
  105.10    // Every data layout begins with a header.  This header
  105.11    // contains a tag, which is used to indicate the size/layout
   106.1 --- a/src/share/vm/oops/oop.inline.hpp	Fri Sep 27 13:49:57 2013 -0400
   106.2 +++ b/src/share/vm/oops/oop.inline.hpp	Fri Sep 27 13:53:43 2013 -0400
   106.3 @@ -69,7 +69,7 @@
   106.4  }
   106.5  
   106.6  inline Klass* oopDesc::klass() const {
   106.7 -  if (UseCompressedKlassPointers) {
   106.8 +  if (UseCompressedClassPointers) {
   106.9      return Klass::decode_klass_not_null(_metadata._compressed_klass);
  106.10    } else {
  106.11      return _metadata._klass;
  106.12 @@ -78,7 +78,7 @@
  106.13  
  106.14  inline Klass* oopDesc::klass_or_null() const volatile {
  106.15    // can be NULL in CMS
  106.16 -  if (UseCompressedKlassPointers) {
  106.17 +  if (UseCompressedClassPointers) {
  106.18      return Klass::decode_klass(_metadata._compressed_klass);
  106.19    } else {
  106.20      return _metadata._klass;
  106.21 @@ -86,19 +86,19 @@
  106.22  }
  106.23  
  106.24  inline int oopDesc::klass_gap_offset_in_bytes() {
  106.25 -  assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
  106.26 +  assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
  106.27    return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
  106.28  }
  106.29  
  106.30  inline Klass** oopDesc::klass_addr() {
  106.31    // Only used internally and with CMS and will not work with
  106.32    // UseCompressedOops
  106.33 -  assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
  106.34 +  assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
  106.35    return (Klass**) &_metadata._klass;
  106.36  }
  106.37  
  106.38  inline narrowKlass* oopDesc::compressed_klass_addr() {
  106.39 -  assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
  106.40 +  assert(UseCompressedClassPointers, "only called by compressed klass pointers");
  106.41    return &_metadata._compressed_klass;
  106.42  }
  106.43  
  106.44 @@ -106,7 +106,7 @@
  106.45    // since klasses are promoted no store check is needed
  106.46    assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
  106.47    assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
  106.48 -  if (UseCompressedKlassPointers) {
  106.49 +  if (UseCompressedClassPointers) {
  106.50      *compressed_klass_addr() = Klass::encode_klass_not_null(k);
  106.51    } else {
  106.52      *klass_addr() = k;
  106.53 @@ -118,7 +118,7 @@
  106.54  }
  106.55  
  106.56  inline void oopDesc::set_klass_gap(int v) {
  106.57 -  if (UseCompressedKlassPointers) {
  106.58 +  if (UseCompressedClassPointers) {
  106.59      *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
  106.60    }
  106.61  }
  106.62 @@ -126,7 +126,7 @@
  106.63  inline void oopDesc::set_klass_to_list_ptr(oop k) {
  106.64    // This is only to be used during GC, for from-space objects, so no
  106.65    // barrier is needed.
  106.66 -  if (UseCompressedKlassPointers) {
  106.67 +  if (UseCompressedClassPointers) {
  106.68      _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k);  // may be null (parnew overflow handling)
  106.69    } else {
  106.70      _metadata._klass = (Klass*)(address)k;
  106.71 @@ -135,7 +135,7 @@
  106.72  
  106.73  inline oop oopDesc::list_ptr_from_klass() {
  106.74    // This is only to be used during GC, for from-space objects.
  106.75 -  if (UseCompressedKlassPointers) {
  106.76 +  if (UseCompressedClassPointers) {
  106.77      return decode_heap_oop((narrowOop)_metadata._compressed_klass);
  106.78    } else {
  106.79      // Special case for GC
   107.1 --- a/src/share/vm/oops/symbol.hpp	Fri Sep 27 13:49:57 2013 -0400
   107.2 +++ b/src/share/vm/oops/symbol.hpp	Fri Sep 27 13:53:43 2013 -0400
   107.3 @@ -45,7 +45,7 @@
   107.4  // in the SymbolTable bucket (the _literal field in HashtableEntry)
   107.5  // that points to the Symbol.  All other stores of a Symbol*
   107.6  // to a field of a persistent variable (e.g., the _name filed in
   107.7 -// FieldAccessInfo or _ptr in a CPSlot) is reference counted.
   107.8 +// fieldDescriptor or _ptr in a CPSlot) is reference counted.
   107.9  //
  107.10  // 1) The lookup of a "name" in the SymbolTable either creates a Symbol F for
  107.11  // "name" and returns a pointer to F or finds a pre-existing Symbol F for
   108.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Fri Sep 27 13:49:57 2013 -0400
   108.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Fri Sep 27 13:53:43 2013 -0400
   108.3 @@ -123,7 +123,7 @@
   108.4    // Allows targeted inlining
   108.5    if(callee_method->should_inline()) {
   108.6      *wci_result = *(WarmCallInfo::always_hot());
   108.7 -    if (PrintInlining && Verbose) {
   108.8 +    if (C->print_inlining() && Verbose) {
   108.9        CompileTask::print_inline_indent(inline_level());
  108.10        tty->print_cr("Inlined method is hot: ");
  108.11      }
  108.12 @@ -137,7 +137,7 @@
  108.13    if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
  108.14       size < InlineThrowMaxSize ) {
  108.15      wci_result->set_profit(wci_result->profit() * 100);
  108.16 -    if (PrintInlining && Verbose) {
  108.17 +    if (C->print_inlining() && Verbose) {
  108.18        CompileTask::print_inline_indent(inline_level());
  108.19        tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
  108.20      }
  108.21 @@ -491,7 +491,7 @@
  108.22        C->log()->inline_fail(inline_msg);
  108.23      }
  108.24    }
  108.25 -  if (PrintInlining) {
  108.26 +  if (C->print_inlining()) {
  108.27      C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
  108.28      if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
  108.29      if (Verbose && callee_method) {
  108.30 @@ -540,7 +540,7 @@
  108.31  
  108.32  #ifndef PRODUCT
  108.33    if (UseOldInlining && InlineWarmCalls
  108.34 -      && (PrintOpto || PrintOptoInlining || PrintInlining)) {
  108.35 +      && (PrintOpto || C->print_inlining())) {
  108.36      bool cold = wci.is_cold();
  108.37      bool hot  = !cold && wci.is_hot();
  108.38      bool old_cold = !success;
  108.39 @@ -617,7 +617,7 @@
  108.40               callee_method->is_compiled_lambda_form()) {
  108.41        max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
  108.42      }
  108.43 -    if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
  108.44 +    if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
  108.45        CompileTask::print_inline_indent(inline_level());
  108.46        tty->print_cr(" \\-> discounting inline depth");
  108.47      }
   109.1 --- a/src/share/vm/opto/callGenerator.hpp	Fri Sep 27 13:49:57 2013 -0400
   109.2 +++ b/src/share/vm/opto/callGenerator.hpp	Fri Sep 27 13:53:43 2013 -0400
   109.3 @@ -159,8 +159,9 @@
   109.4    virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
   109.5  
   109.6    static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
   109.7 -    if (PrintInlining)
   109.8 +    if (C->print_inlining()) {
   109.9        C->print_inlining(callee, inline_level, bci, msg);
  109.10 +    }
  109.11    }
  109.12  };
  109.13  
   110.1 --- a/src/share/vm/opto/cfgnode.cpp	Fri Sep 27 13:49:57 2013 -0400
   110.2 +++ b/src/share/vm/opto/cfgnode.cpp	Fri Sep 27 13:53:43 2013 -0400
   110.3 @@ -1932,7 +1932,7 @@
   110.4  #ifdef _LP64
   110.5    // Push DecodeN/DecodeNKlass down through phi.
   110.6    // The rest of phi graph will transform by split EncodeP node though phis up.
   110.7 -  if ((UseCompressedOops || UseCompressedKlassPointers) && can_reshape && progress == NULL) {
   110.8 +  if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) {
   110.9      bool may_push = true;
  110.10      bool has_decodeN = false;
  110.11      bool is_decodeN = false;
   111.1 --- a/src/share/vm/opto/chaitin.cpp	Fri Sep 27 13:49:57 2013 -0400
   111.2 +++ b/src/share/vm/opto/chaitin.cpp	Fri Sep 27 13:53:43 2013 -0400
   111.3 @@ -122,40 +122,23 @@
   111.4    return score;
   111.5  }
   111.6  
   111.7 -LRG_List::LRG_List( uint max ) : _cnt(max), _max(max), _lidxs(NEW_RESOURCE_ARRAY(uint,max)) {
   111.8 -  memset( _lidxs, 0, sizeof(uint)*max );
   111.9 -}
  111.10 -
  111.11 -void LRG_List::extend( uint nidx, uint lidx ) {
  111.12 -  _nesting.check();
  111.13 -  if( nidx >= _max ) {
  111.14 -    uint size = 16;
  111.15 -    while( size <= nidx ) size <<=1;
  111.16 -    _lidxs = REALLOC_RESOURCE_ARRAY( uint, _lidxs, _max, size );
  111.17 -    _max = size;
  111.18 -  }
  111.19 -  while( _cnt <= nidx )
  111.20 -    _lidxs[_cnt++] = 0;
  111.21 -  _lidxs[nidx] = lidx;
  111.22 -}
  111.23 -
  111.24  #define NUMBUCKS 3
  111.25  
  111.26  // Straight out of Tarjan's union-find algorithm
  111.27  uint LiveRangeMap::find_compress(uint lrg) {
  111.28    uint cur = lrg;
  111.29 -  uint next = _uf_map[cur];
  111.30 +  uint next = _uf_map.at(cur);
  111.31    while (next != cur) { // Scan chain of equivalences
  111.32      assert( next < cur, "always union smaller");
  111.33      cur = next; // until find a fixed-point
  111.34 -    next = _uf_map[cur];
  111.35 +    next = _uf_map.at(cur);
  111.36    }
  111.37  
  111.38    // Core of union-find algorithm: update chain of
  111.39    // equivalences to be equal to the root.
  111.40    while (lrg != next) {
  111.41 -    uint tmp = _uf_map[lrg];
  111.42 -    _uf_map.map(lrg, next);
  111.43 +    uint tmp = _uf_map.at(lrg);
  111.44 +    _uf_map.at_put(lrg, next);
  111.45      lrg = tmp;
  111.46    }
  111.47    return lrg;
  111.48 @@ -165,10 +148,10 @@
  111.49  void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
  111.50    _max_lrg_id= max_lrg_id;
  111.51    // Force the Union-Find mapping to be at least this large
  111.52 -  _uf_map.extend(_max_lrg_id, 0);
  111.53 +  _uf_map.at_put_grow(_max_lrg_id, 0);
  111.54    // Initialize it to be the ID mapping.
  111.55    for (uint i = 0; i < _max_lrg_id; ++i) {
  111.56 -    _uf_map.map(i, i);
  111.57 +    _uf_map.at_put(i, i);
  111.58    }
  111.59  }
  111.60  
  111.61 @@ -176,12 +159,12 @@
  111.62  // the Union-Find mapping after this call.
  111.63  void LiveRangeMap::compress_uf_map_for_nodes() {
  111.64    // For all Nodes, compress mapping
  111.65 -  uint unique = _names.Size();
  111.66 +  uint unique = _names.length();
  111.67    for (uint i = 0; i < unique; ++i) {
  111.68 -    uint lrg = _names[i];
  111.69 +    uint lrg = _names.at(i);
  111.70      uint compressed_lrg = find(lrg);
  111.71      if (lrg != compressed_lrg) {
  111.72 -      _names.map(i, compressed_lrg);
  111.73 +      _names.at_put(i, compressed_lrg);
  111.74      }
  111.75    }
  111.76  }
  111.77 @@ -198,11 +181,11 @@
  111.78      return lrg;
  111.79    }
  111.80  
  111.81 -  uint next = _uf_map[lrg];
  111.82 +  uint next = _uf_map.at(lrg);
  111.83    while (next != lrg) { // Scan chain of equivalences
  111.84      assert(next < lrg, "always union smaller");
  111.85      lrg = next; // until find a fixed-point
  111.86 -    next = _uf_map[lrg];
  111.87 +    next = _uf_map.at(lrg);
  111.88    }
  111.89    return next;
  111.90  }
  111.91 @@ -215,7 +198,7 @@
  111.92         NULL
  111.93  #endif
  111.94         )
  111.95 -  , _lrg_map(unique)
  111.96 +  , _lrg_map(Thread::current()->resource_area(), unique)
  111.97    , _live(0)
  111.98    , _spilled_once(Thread::current()->resource_area())
  111.99    , _spilled_twice(Thread::current()->resource_area())
 111.100 @@ -692,6 +675,7 @@
 111.101        _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
 111.102      }
 111.103    }
 111.104 +
 111.105    // Reset the Union-Find mapping to be identity
 111.106    _lrg_map.reset_uf_map(lr_counter);
 111.107  }
   112.1 --- a/src/share/vm/opto/chaitin.hpp	Fri Sep 27 13:49:57 2013 -0400
   112.2 +++ b/src/share/vm/opto/chaitin.hpp	Fri Sep 27 13:53:43 2013 -0400
   112.3 @@ -283,8 +283,8 @@
   112.4  
   112.5    // Straight out of Tarjan's union-find algorithm
   112.6    uint find_compress(const Node *node) {
   112.7 -    uint lrg_id = find_compress(_names[node->_idx]);
   112.8 -    _names.map(node->_idx, lrg_id);
   112.9 +    uint lrg_id = find_compress(_names.at(node->_idx));
  112.10 +    _names.at_put(node->_idx, lrg_id);
  112.11      return lrg_id;
  112.12    }
  112.13  
  112.14 @@ -305,40 +305,40 @@
  112.15    }
  112.16  
  112.17    uint size() const {
  112.18 -    return _names.Size();
  112.19 +    return _names.length();
  112.20    }
  112.21  
  112.22    uint live_range_id(uint idx) const {
  112.23 -    return _names[idx];
  112.24 +    return _names.at(idx);
  112.25    }
  112.26  
  112.27    uint live_range_id(const Node *node) const {
  112.28 -    return _names[node->_idx];
  112.29 +    return _names.at(node->_idx);
  112.30    }
  112.31  
  112.32    uint uf_live_range_id(uint lrg_id) const {
  112.33 -    return _uf_map[lrg_id];
  112.34 +    return _uf_map.at(lrg_id);
  112.35    }
  112.36  
  112.37    void map(uint idx, uint lrg_id) {
  112.38 -    _names.map(idx, lrg_id);
  112.39 +    _names.at_put(idx, lrg_id);
  112.40    }
  112.41  
  112.42    void uf_map(uint dst_lrg_id, uint src_lrg_id) {
  112.43 -    _uf_map.map(dst_lrg_id, src_lrg_id);
  112.44 +    _uf_map.at_put(dst_lrg_id, src_lrg_id);
  112.45    }
  112.46  
  112.47    void extend(uint idx, uint lrg_id) {
  112.48 -    _names.extend(idx, lrg_id);
  112.49 +    _names.at_put_grow(idx, lrg_id);
  112.50    }
  112.51  
  112.52    void uf_extend(uint dst_lrg_id, uint src_lrg_id) {
  112.53 -    _uf_map.extend(dst_lrg_id, src_lrg_id);
  112.54 +    _uf_map.at_put_grow(dst_lrg_id, src_lrg_id);
  112.55    }
  112.56  
  112.57 -  LiveRangeMap(uint unique)
  112.58 -  : _names(unique)
  112.59 -  , _uf_map(unique)
  112.60 +  LiveRangeMap(Arena* arena, uint unique)
  112.61 +  : _names(arena, unique, unique, 0)
  112.62 +  , _uf_map(arena, unique, unique, 0)
  112.63    , _max_lrg_id(0) {}
  112.64  
  112.65    uint find_id( const Node *n ) {
  112.66 @@ -355,14 +355,14 @@
  112.67    void compress_uf_map_for_nodes();
  112.68  
  112.69    uint find(uint lidx) {
  112.70 -    uint uf_lidx = _uf_map[lidx];
  112.71 +    uint uf_lidx = _uf_map.at(lidx);
  112.72      return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx);
  112.73    }
  112.74  
  112.75    // Convert a Node into a Live Range Index - a lidx
  112.76    uint find(const Node *node) {
  112.77      uint lidx = live_range_id(node);
  112.78 -    uint uf_lidx = _uf_map[lidx];
  112.79 +    uint uf_lidx = _uf_map.at(lidx);
  112.80      return (uf_lidx == lidx) ? uf_lidx : find_compress(node);
  112.81    }
  112.82  
  112.83 @@ -371,10 +371,10 @@
  112.84  
  112.85    // Like Find above, but no path compress, so bad asymptotic behavior
  112.86    uint find_const(const Node *node) const {
  112.87 -    if(node->_idx >= _names.Size()) {
  112.88 +    if(node->_idx >= (uint)_names.length()) {
  112.89        return 0; // not mapped, usual for debug dump
  112.90      }
  112.91 -    return find_const(_names[node->_idx]);
  112.92 +    return find_const(_names.at(node->_idx));
  112.93    }
  112.94  };
  112.95  
   113.1 --- a/src/share/vm/opto/coalesce.hpp	Fri Sep 27 13:49:57 2013 -0400
   113.2 +++ b/src/share/vm/opto/coalesce.hpp	Fri Sep 27 13:53:43 2013 -0400
   113.3 @@ -29,7 +29,6 @@
   113.4  
   113.5  class LoopTree;
   113.6  class LRG;
   113.7 -class LRG_List;
   113.8  class Matcher;
   113.9  class PhaseIFG;
  113.10  class PhaseCFG;
   114.1 --- a/src/share/vm/opto/compile.cpp	Fri Sep 27 13:49:57 2013 -0400
   114.2 +++ b/src/share/vm/opto/compile.cpp	Fri Sep 27 13:53:43 2013 -0400
   114.3 @@ -654,7 +654,7 @@
   114.4                    _inlining_progress(false),
   114.5                    _inlining_incrementally(false),
   114.6                    _print_inlining_list(NULL),
   114.7 -                  _print_inlining(0) {
   114.8 +                  _print_inlining_idx(0) {
   114.9    C = this;
  114.10  
  114.11    CompileWrapper cw(this);
  114.12 @@ -679,6 +679,8 @@
  114.13    set_print_assembly(print_opto_assembly);
  114.14    set_parsed_irreducible_loop(false);
  114.15  #endif
  114.16 +  set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
  114.17 +  set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
  114.18  
  114.19    if (ProfileTraps) {
  114.20      // Make sure the method being compiled gets its own MDO,
  114.21 @@ -710,7 +712,7 @@
  114.22    PhaseGVN gvn(node_arena(), estimated_size);
  114.23    set_initial_gvn(&gvn);
  114.24  
  114.25 -  if (PrintInlining  || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
  114.26 +  if (print_inlining() || print_intrinsics()) {
  114.27      _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
  114.28    }
  114.29    { // Scope for timing the parser
  114.30 @@ -937,7 +939,7 @@
  114.31      _inlining_progress(false),
  114.32      _inlining_incrementally(false),
  114.33      _print_inlining_list(NULL),
  114.34 -    _print_inlining(0) {
  114.35 +    _print_inlining_idx(0) {
  114.36    C = this;
  114.37  
  114.38  #ifndef PRODUCT
  114.39 @@ -2646,7 +2648,7 @@
  114.40              addp->in(AddPNode::Base) == n->in(AddPNode::Base),
  114.41              "Base pointers must match" );
  114.42  #ifdef _LP64
  114.43 -    if ((UseCompressedOops || UseCompressedKlassPointers) &&
  114.44 +    if ((UseCompressedOops || UseCompressedClassPointers) &&
  114.45          addp->Opcode() == Op_ConP &&
  114.46          addp == n->in(AddPNode::Base) &&
  114.47          n->in(AddPNode::Offset)->is_Con()) {
  114.48 @@ -3033,7 +3035,7 @@
  114.49  
  114.50    // Skip next transformation if compressed oops are not used.
  114.51    if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
  114.52 -      (!UseCompressedOops && !UseCompressedKlassPointers))
  114.53 +      (!UseCompressedOops && !UseCompressedClassPointers))
  114.54      return;
  114.55  
  114.56    // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
  114.57 @@ -3611,7 +3613,7 @@
  114.58  }
  114.59  
  114.60  void Compile::dump_inlining() {
  114.61 -  if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
  114.62 +  if (print_inlining() || print_intrinsics()) {
  114.63      // Print inlining message for candidates that we couldn't inline
  114.64      // for lack of space or non constant receiver
  114.65      for (int i = 0; i < _late_inlines.length(); i++) {
  114.66 @@ -3635,7 +3637,7 @@
  114.67        }
  114.68      }
  114.69      for (int i = 0; i < _print_inlining_list->length(); i++) {
  114.70 -      tty->print(_print_inlining_list->at(i).ss()->as_string());
  114.71 +      tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
  114.72      }
  114.73    }
  114.74  }
   115.1 --- a/src/share/vm/opto/compile.hpp	Fri Sep 27 13:49:57 2013 -0400
   115.2 +++ b/src/share/vm/opto/compile.hpp	Fri Sep 27 13:53:43 2013 -0400
   115.3 @@ -312,6 +312,8 @@
   115.4    bool                  _do_method_data_update; // True if we generate code to update MethodData*s
   115.5    int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
   115.6    bool                  _print_assembly;        // True if we should dump assembly code for this compilation
   115.7 +  bool                  _print_inlining;        // True if we should print inlining for this compilation
   115.8 +  bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
   115.9  #ifndef PRODUCT
  115.10    bool                  _trace_opto_output;
  115.11    bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
  115.12 @@ -414,7 +416,7 @@
  115.13    };
  115.14  
  115.15    GrowableArray<PrintInliningBuffer>* _print_inlining_list;
  115.16 -  int _print_inlining;
  115.17 +  int _print_inlining_idx;
  115.18  
  115.19    // Only keep nodes in the expensive node list that need to be optimized
  115.20    void cleanup_expensive_nodes(PhaseIterGVN &igvn);
  115.21 @@ -426,24 +428,24 @@
  115.22   public:
  115.23  
  115.24    outputStream* print_inlining_stream() const {
  115.25 -    return _print_inlining_list->at(_print_inlining).ss();
  115.26 +    return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
  115.27    }
  115.28  
  115.29    void print_inlining_skip(CallGenerator* cg) {
  115.30 -    if (PrintInlining) {
  115.31 -      _print_inlining_list->at(_print_inlining).set_cg(cg);
  115.32 -      _print_inlining++;
  115.33 -      _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
  115.34 +    if (_print_inlining) {
  115.35 +      _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
  115.36 +      _print_inlining_idx++;
  115.37 +      _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
  115.38      }
  115.39    }
  115.40  
  115.41    void print_inlining_insert(CallGenerator* cg) {
  115.42 -    if (PrintInlining) {
  115.43 +    if (_print_inlining) {
  115.44        for (int i = 0; i < _print_inlining_list->length(); i++) {
  115.45 -        if (_print_inlining_list->at(i).cg() == cg) {
  115.46 +        if (_print_inlining_list->adr_at(i)->cg() == cg) {
  115.47            _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
  115.48 -          _print_inlining = i+1;
  115.49 -          _print_inlining_list->at(i).set_cg(NULL);
  115.50 +          _print_inlining_idx = i+1;
  115.51 +          _print_inlining_list->adr_at(i)->set_cg(NULL);
  115.52            return;
  115.53          }
  115.54        }
  115.55 @@ -572,6 +574,10 @@
  115.56    int               AliasLevel() const          { return _AliasLevel; }
  115.57    bool              print_assembly() const       { return _print_assembly; }
  115.58    void          set_print_assembly(bool z)       { _print_assembly = z; }
  115.59 +  bool              print_inlining() const       { return _print_inlining; }
  115.60 +  void          set_print_inlining(bool z)       { _print_inlining = z; }
  115.61 +  bool              print_intrinsics() const     { return _print_intrinsics; }
  115.62 +  void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
  115.63    // check the CompilerOracle for special behaviours for this compile
  115.64    bool          method_has_option(const char * option) {
  115.65      return method() != NULL && method()->has_option(option);
   116.1 --- a/src/share/vm/opto/connode.cpp	Fri Sep 27 13:49:57 2013 -0400
   116.2 +++ b/src/share/vm/opto/connode.cpp	Fri Sep 27 13:53:43 2013 -0400
   116.3 @@ -630,7 +630,7 @@
   116.4    if (t == Type::TOP) return Type::TOP;
   116.5    assert (t != TypePtr::NULL_PTR, "null klass?");
   116.6  
   116.7 -  assert(UseCompressedKlassPointers && t->isa_klassptr(), "only klass ptr here");
   116.8 +  assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
   116.9    return t->make_narrowklass();
  116.10  }
  116.11  
   117.1 --- a/src/share/vm/opto/doCall.cpp	Fri Sep 27 13:49:57 2013 -0400
   117.2 +++ b/src/share/vm/opto/doCall.cpp	Fri Sep 27 13:53:43 2013 -0400
   117.3 @@ -41,9 +41,9 @@
   117.4  #include "runtime/sharedRuntime.hpp"
   117.5  
   117.6  void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
   117.7 -  if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
   117.8 +  if (TraceTypeProfile || C->print_inlining()) {
   117.9      outputStream* out = tty;
  117.10 -    if (!PrintInlining) {
  117.11 +    if (!C->print_inlining()) {
  117.12        if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
  117.13          method->print_short_name();
  117.14          tty->cr();
   118.1 --- a/src/share/vm/opto/library_call.cpp	Fri Sep 27 13:49:57 2013 -0400
   118.2 +++ b/src/share/vm/opto/library_call.cpp	Fri Sep 27 13:53:43 2013 -0400
   118.3 @@ -543,7 +543,7 @@
   118.4    Compile* C = kit.C;
   118.5    int nodes = C->unique();
   118.6  #ifndef PRODUCT
   118.7 -  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
   118.8 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
   118.9      char buf[1000];
  118.10      const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
  118.11      tty->print_cr("Intrinsic %s", str);
  118.12 @@ -554,7 +554,7 @@
  118.13  
  118.14    // Try to inline the intrinsic.
  118.15    if (kit.try_to_inline()) {
  118.16 -    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
  118.17 +    if (C->print_intrinsics() || C->print_inlining()) {
  118.18        C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
  118.19      }
  118.20      C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
  118.21 @@ -570,7 +570,7 @@
  118.22    }
  118.23  
  118.24    // The intrinsic bailed out
  118.25 -  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
  118.26 +  if (C->print_intrinsics() || C->print_inlining()) {
  118.27      if (jvms->has_method()) {
  118.28        // Not a root compile.
  118.29        const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
  118.30 @@ -592,7 +592,7 @@
  118.31    int nodes = C->unique();
  118.32  #ifndef PRODUCT
  118.33    assert(is_predicted(), "sanity");
  118.34 -  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
  118.35 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
  118.36      char buf[1000];
  118.37      const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
  118.38      tty->print_cr("Predicate for intrinsic %s", str);
  118.39 @@ -603,7 +603,7 @@
  118.40  
  118.41    Node* slow_ctl = kit.try_to_predicate();
  118.42    if (!kit.failing()) {
  118.43 -    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
  118.44 +    if (C->print_intrinsics() || C->print_inlining()) {
  118.45        C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
  118.46      }
  118.47      C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
  118.48 @@ -617,7 +617,7 @@
  118.49    }
  118.50  
  118.51    // The intrinsic bailed out
  118.52 -  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
  118.53 +  if (C->print_intrinsics() || C->print_inlining()) {
  118.54      if (jvms->has_method()) {
  118.55        // Not a root compile.
  118.56        const char* msg = "failed to generate predicate for intrinsic";
  118.57 @@ -2299,7 +2299,7 @@
  118.58      const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
  118.59  
  118.60  #ifndef PRODUCT
  118.61 -    if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
  118.62 +    if (C->print_intrinsics() || C->print_inlining()) {
  118.63        tty->print("  from base type: ");  adr_type->dump();
  118.64        tty->print("  sharpened value: ");  tjp->dump();
  118.65      }
  118.66 @@ -3260,7 +3260,7 @@
  118.67    if (mirror_con == NULL)  return false;  // cannot happen?
  118.68  
  118.69  #ifndef PRODUCT
  118.70 -  if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
  118.71 +  if (C->print_intrinsics() || C->print_inlining()) {
  118.72      ciType* k = mirror_con->java_mirror_type();
  118.73      if (k) {
  118.74        tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
  118.75 @@ -3734,6 +3734,8 @@
  118.76                                               RegionNode* slow_region) {
  118.77    ciMethod* method = callee();
  118.78    int vtable_index = method->vtable_index();
  118.79 +  assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
  118.80 +         err_msg_res("bad index %d", vtable_index));
  118.81    // Get the Method* out of the appropriate vtable entry.
  118.82    int entry_offset  = (InstanceKlass::vtable_start_offset() +
  118.83                       vtable_index*vtableEntry::size()) * wordSize +
  118.84 @@ -3784,6 +3786,8 @@
  118.85        // so the vtable index is fixed.
  118.86        // No need to use the linkResolver to get it.
  118.87         vtable_index = method->vtable_index();
  118.88 +       assert(vtable_index >= 0 || vtable_index == Method::nonvirtual_vtable_index,
  118.89 +              err_msg_res("bad index %d", vtable_index));
  118.90      }
  118.91      slow_call = new(C) CallDynamicJavaNode(tf,
  118.92                            SharedRuntime::get_resolve_virtual_call_stub(),
  118.93 @@ -3948,14 +3952,14 @@
  118.94  // caller sensitive methods.
  118.95  bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
  118.96  #ifndef PRODUCT
  118.97 -  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
  118.98 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
  118.99      tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
 118.100    }
 118.101  #endif
 118.102  
 118.103    if (!jvms()->has_method()) {
 118.104  #ifndef PRODUCT
 118.105 -    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
 118.106 +    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
 118.107        tty->print_cr("  Bailing out because intrinsic was inlined at top level");
 118.108      }
 118.109  #endif
 118.110 @@ -3979,7 +3983,7 @@
 118.111        // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
 118.112        if (!m->caller_sensitive()) {
 118.113  #ifndef PRODUCT
 118.114 -        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
 118.115 +        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
 118.116            tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
 118.117          }
 118.118  #endif
 118.119 @@ -3995,7 +3999,7 @@
 118.120          set_result(makecon(TypeInstPtr::make(caller_mirror)));
 118.121  
 118.122  #ifndef PRODUCT
 118.123 -        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
 118.124 +        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
 118.125            tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
 118.126            tty->print_cr("  JVM state at this point:");
 118.127            for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
 118.128 @@ -4011,7 +4015,7 @@
 118.129    }
 118.130  
 118.131  #ifndef PRODUCT
 118.132 -  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
 118.133 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
 118.134      tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
 118.135      tty->print_cr("  JVM state at this point:");
 118.136      for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
 118.137 @@ -4204,7 +4208,7 @@
 118.138    // 12 - 64-bit VM, compressed klass
 118.139    // 16 - 64-bit VM, normal klass
 118.140    if (base_off % BytesPerLong != 0) {
 118.141 -    assert(UseCompressedKlassPointers, "");
 118.142 +    assert(UseCompressedClassPointers, "");
 118.143      if (is_array) {
 118.144        // Exclude length to copy by 8 bytes words.
 118.145        base_off += sizeof(int);
   119.1 --- a/src/share/vm/opto/live.cpp	Fri Sep 27 13:49:57 2013 -0400
   119.2 +++ b/src/share/vm/opto/live.cpp	Fri Sep 27 13:53:43 2013 -0400
   119.3 @@ -91,7 +91,7 @@
   119.4          break;
   119.5        }
   119.6  
   119.7 -      uint r = _names[n->_idx];
   119.8 +      uint r = _names.at(n->_idx);
   119.9        assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
  119.10        def->insert( r );
  119.11        use->remove( r );
  119.12 @@ -100,7 +100,7 @@
  119.13          Node *nk = n->in(k);
  119.14          uint nkidx = nk->_idx;
  119.15          if (_cfg.get_block_for_node(nk) != block) {
  119.16 -          uint u = _names[nkidx];
  119.17 +          uint u = _names.at(nkidx);
  119.18            use->insert(u);
  119.19            DEBUG_ONLY(def_outside->insert(u);)
  119.20          }
  119.21 @@ -112,7 +112,7 @@
  119.22  #endif
  119.23      // Remove anything defined by Phis and the block start instruction
  119.24      for (uint k = i; k > 0; k--) {
  119.25 -      uint r = _names[block->get_node(k - 1)->_idx];
  119.26 +      uint r = _names.at(block->get_node(k - 1)->_idx);
  119.27        def->insert(r);
  119.28        use->remove(r);
  119.29      }
  119.30 @@ -124,7 +124,7 @@
  119.31  
  119.32        // PhiNode uses go in the live-out set of prior blocks.
  119.33        for (uint k = i; k > 0; k--) {
  119.34 -        add_liveout(p, _names[block->get_node(k-1)->in(l)->_idx], first_pass);
  119.35 +        add_liveout(p, _names.at(block->get_node(k-1)->in(l)->_idx), first_pass);
  119.36        }
  119.37      }
  119.38      freeset(block);
  119.39 @@ -256,7 +256,7 @@
  119.40    tty->print("LiveOut: ");  _live[b->_pre_order-1].dump();
  119.41    uint cnt = b->number_of_nodes();
  119.42    for( uint i=0; i<cnt; i++ ) {
  119.43 -    tty->print("L%d/", _names[b->get_node(i)->_idx] );
  119.44 +    tty->print("L%d/", _names.at(b->get_node(i)->_idx));
  119.45      b->get_node(i)->dump();
  119.46    }
  119.47    tty->print("\n");
  119.48 @@ -321,7 +321,7 @@
  119.49  #ifdef _LP64
  119.50                        UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
  119.51                        UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
  119.52 -                      UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
  119.53 +                      UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
  119.54  #endif
  119.55                        check->as_Mach()->ideal_Opcode() == Op_LoadP ||
  119.56                        check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
   120.1 --- a/src/share/vm/opto/live.hpp	Fri Sep 27 13:49:57 2013 -0400
   120.2 +++ b/src/share/vm/opto/live.hpp	Fri Sep 27 13:53:43 2013 -0400
   120.3 @@ -40,27 +40,7 @@
   120.4  //------------------------------LRG_List---------------------------------------
   120.5  // Map Node indices to Live RanGe indices.
   120.6  // Array lookup in the optimized case.
   120.7 -class LRG_List : public ResourceObj {
   120.8 -  friend class VMStructs;
   120.9 -  uint _cnt, _max;
  120.10 -  uint* _lidxs;
  120.11 -  ReallocMark _nesting;         // assertion check for reallocations
  120.12 -public:
  120.13 -  LRG_List( uint max );
  120.14 -
  120.15 -  uint lookup( uint nidx ) const {
  120.16 -    return _lidxs[nidx];
  120.17 -  }
  120.18 -  uint operator[] (uint nidx) const { return lookup(nidx); }
  120.19 -
  120.20 -  void map( uint nidx, uint lidx ) {
  120.21 -    assert( nidx < _cnt, "oob" );
  120.22 -    _lidxs[nidx] = lidx;
  120.23 -  }
  120.24 -  void extend( uint nidx, uint lidx );
  120.25 -
  120.26 -  uint Size() const { return _cnt; }
  120.27 -};
  120.28 +typedef GrowableArray<uint> LRG_List;
  120.29  
  120.30  //------------------------------PhaseLive--------------------------------------
  120.31  // Compute live-in/live-out
   121.1 --- a/src/share/vm/opto/macro.cpp	Fri Sep 27 13:49:57 2013 -0400
   121.2 +++ b/src/share/vm/opto/macro.cpp	Fri Sep 27 13:53:43 2013 -0400
   121.3 @@ -2191,7 +2191,7 @@
   121.4        Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
   121.5        klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
   121.6  #ifdef _LP64
   121.7 -      if (UseCompressedKlassPointers && klass_node->is_DecodeNKlass()) {
   121.8 +      if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
   121.9          assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
  121.10          klass_node->in(1)->init_req(0, ctrl);
  121.11        } else
   122.1 --- a/src/share/vm/opto/memnode.cpp	Fri Sep 27 13:49:57 2013 -0400
   122.2 +++ b/src/share/vm/opto/memnode.cpp	Fri Sep 27 13:53:43 2013 -0400
   122.3 @@ -2031,7 +2031,7 @@
   122.4    assert(adr_type != NULL, "expecting TypeKlassPtr");
   122.5  #ifdef _LP64
   122.6    if (adr_type->is_ptr_to_narrowklass()) {
   122.7 -    assert(UseCompressedKlassPointers, "no compressed klasses");
   122.8 +    assert(UseCompressedClassPointers, "no compressed klasses");
   122.9      Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
  122.10      return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
  122.11    }
  122.12 @@ -2369,7 +2369,7 @@
  122.13        val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
  122.14        return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
  122.15      } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
  122.16 -               (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() &&
  122.17 +               (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
  122.18                  adr->bottom_type()->isa_rawptr())) {
  122.19        val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
  122.20        return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
   123.1 --- a/src/share/vm/opto/type.cpp	Fri Sep 27 13:49:57 2013 -0400
   123.2 +++ b/src/share/vm/opto/type.cpp	Fri Sep 27 13:53:43 2013 -0400
   123.3 @@ -2416,7 +2416,7 @@
   123.4  #ifdef _LP64
   123.5    if (_offset != 0) {
   123.6      if (_offset == oopDesc::klass_offset_in_bytes()) {
   123.7 -      _is_ptr_to_narrowklass = UseCompressedKlassPointers;
   123.8 +      _is_ptr_to_narrowklass = UseCompressedClassPointers;
   123.9      } else if (klass() == NULL) {
  123.10        // Array with unknown body type
  123.11        assert(this->isa_aryptr(), "only arrays without klass");
   124.1 --- a/src/share/vm/prims/jni.cpp	Fri Sep 27 13:49:57 2013 -0400
   124.2 +++ b/src/share/vm/prims/jni.cpp	Fri Sep 27 13:53:43 2013 -0400
   124.3 @@ -1336,6 +1336,7 @@
   124.4        if (call_type == JNI_VIRTUAL) {
   124.5          // jni_GetMethodID makes sure class is linked and initialized
   124.6          // so m should have a valid vtable index.
   124.7 +        assert(!m->has_itable_index(), "");
   124.8          int vtbl_index = m->vtable_index();
   124.9          if (vtbl_index != Method::nonvirtual_vtable_index) {
  124.10            Klass* k = h_recv->klass();
  124.11 @@ -1355,12 +1356,7 @@
  124.12        // interface call
  124.13        KlassHandle h_holder(THREAD, holder);
  124.14  
  124.15 -      int itbl_index = m->cached_itable_index();
  124.16 -      if (itbl_index == -1) {
  124.17 -        itbl_index = klassItable::compute_itable_index(m);
  124.18 -        m->set_cached_itable_index(itbl_index);
  124.19 -        // the above may have grabbed a lock, 'm' and anything non-handlized can't be used again
  124.20 -      }
  124.21 +      int itbl_index = m->itable_index();
  124.22        Klass* k = h_recv->klass();
  124.23        selected_method = InstanceKlass::cast(k)->method_at_itable(h_holder(), itbl_index, CHECK);
  124.24      }
  124.25 @@ -5037,6 +5033,7 @@
  124.26  #include "gc_implementation/g1/heapRegionRemSet.hpp"
  124.27  #endif
  124.28  #include "utilities/quickSort.hpp"
  124.29 +#include "utilities/ostream.hpp"
  124.30  #if INCLUDE_VM_STRUCTS
  124.31  #include "runtime/vmStructs.hpp"
  124.32  #endif
  124.33 @@ -5048,22 +5045,31 @@
  124.34  // Forward declaration
  124.35  void TestReservedSpace_test();
  124.36  void TestReserveMemorySpecial_test();
  124.37 +void TestVirtualSpace_test();
  124.38 +void TestMetaspaceAux_test();
  124.39 +#if INCLUDE_ALL_GCS
  124.40 +void TestG1BiasedArray_test();
  124.41 +#endif
  124.42  
  124.43  void execute_internal_vm_tests() {
  124.44    if (ExecuteInternalVMTests) {
  124.45      tty->print_cr("Running internal VM tests");
  124.46      run_unit_test(TestReservedSpace_test());
  124.47      run_unit_test(TestReserveMemorySpecial_test());
  124.48 +    run_unit_test(TestVirtualSpace_test());
  124.49 +    run_unit_test(TestMetaspaceAux_test());
  124.50      run_unit_test(GlobalDefinitions::test_globals());
  124.51      run_unit_test(GCTimerAllTest::all());
  124.52      run_unit_test(arrayOopDesc::test_max_array_length());
  124.53      run_unit_test(CollectedHeap::test_is_in());
  124.54      run_unit_test(QuickSort::test_quick_sort());
  124.55      run_unit_test(AltHashing::test_alt_hash());
  124.56 +    run_unit_test(test_loggc_filename());
  124.57  #if INCLUDE_VM_STRUCTS
  124.58      run_unit_test(VMStructs::test());
  124.59  #endif
  124.60  #if INCLUDE_ALL_GCS
  124.61 +    run_unit_test(TestG1BiasedArray_test());
  124.62      run_unit_test(HeapRegionRemSet::test_prt());
  124.63  #endif
  124.64      tty->print_cr("All internal VM tests passed");
   125.1 --- a/src/share/vm/prims/jvm.cpp	Fri Sep 27 13:49:57 2013 -0400
   125.2 +++ b/src/share/vm/prims/jvm.cpp	Fri Sep 27 13:53:43 2013 -0400
   125.3 @@ -1824,7 +1824,7 @@
   125.4      }
   125.5  
   125.6      if (!publicOnly || fs.access_flags().is_public()) {
   125.7 -      fd.initialize(k(), fs.index());
   125.8 +      fd.reinitialize(k(), fs.index());
   125.9        oop field = Reflection::new_field(&fd, UseNewReflection, CHECK_NULL);
  125.10        result->obj_at_put(out_idx, field);
  125.11        ++out_idx;
  125.12 @@ -1835,16 +1835,27 @@
  125.13  }
  125.14  JVM_END
  125.15  
  125.16 -JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
  125.17 -{
  125.18 -  JVMWrapper("JVM_GetClassDeclaredMethods");
  125.19 +static bool select_method(methodHandle method, bool want_constructor) {
  125.20 +  if (want_constructor) {
  125.21 +    return (method->is_initializer() && !method->is_static());
  125.22 +  } else {
  125.23 +    return  (!method->is_initializer() && !method->is_overpass());
  125.24 +  }
  125.25 +}
  125.26 +
  125.27 +static jobjectArray get_class_declared_methods_helper(
  125.28 +                                  JNIEnv *env,
  125.29 +                                  jclass ofClass, jboolean publicOnly,
  125.30 +                                  bool want_constructor,
  125.31 +                                  Klass* klass, TRAPS) {
  125.32 +
  125.33    JvmtiVMObjectAllocEventCollector oam;
  125.34  
  125.35    // Exclude primitive types and array types
  125.36    if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
  125.37        || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
  125.38      // Return empty array
  125.39 -    oop res = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), 0, CHECK_NULL);
  125.40 +    oop res = oopFactory::new_objArray(klass, 0, CHECK_NULL);
  125.41      return (jobjectArray) JNIHandles::make_local(env, res);
  125.42    }
  125.43  
  125.44 @@ -1855,87 +1866,67 @@
  125.45  
  125.46    Array<Method*>* methods = k->methods();
  125.47    int methods_length = methods->length();
  125.48 +
  125.49 +  // Save original method_idnum in case of redefinition, which can change
  125.50 +  // the idnum of obsolete methods.  The new method will have the same idnum
  125.51 +  // but if we refresh the methods array, the counts will be wrong.
  125.52 +  ResourceMark rm(THREAD);
  125.53 +  GrowableArray<int>* idnums = new GrowableArray<int>(methods_length);
  125.54    int num_methods = 0;
  125.55  
  125.56 -  int i;
  125.57 -  for (i = 0; i < methods_length; i++) {
  125.58 +  for (int i = 0; i < methods_length; i++) {
  125.59      methodHandle method(THREAD, methods->at(i));
  125.60 -    if (!method->is_initializer() && !method->is_overpass()) {
  125.61 +    if (select_method(method, want_constructor)) {
  125.62        if (!publicOnly || method->is_public()) {
  125.63 +        idnums->push(method->method_idnum());
  125.64          ++num_methods;
  125.65        }
  125.66      }
  125.67    }
  125.68  
  125.69    // Allocate result
  125.70 -  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), num_methods, CHECK_NULL);
  125.71 +  objArrayOop r = oopFactory::new_objArray(klass, num_methods, CHECK_NULL);
  125.72    objArrayHandle result (THREAD, r);
  125.73  
  125.74 -  int out_idx = 0;
  125.75 -  for (i = 0; i < methods_length; i++) {
  125.76 -    methodHandle method(THREAD, methods->at(i));
  125.77 -    if (!method->is_initializer() && !method->is_overpass()) {
  125.78 -      if (!publicOnly || method->is_public()) {
  125.79 -        oop m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
  125.80 -        result->obj_at_put(out_idx, m);
  125.81 -        ++out_idx;
  125.82 +  // Now just put the methods that we selected above, but go by their idnum
  125.83 +  // in case of redefinition.  The methods can be redefined at any safepoint,
  125.84 +  // so above when allocating the oop array and below when creating reflect
  125.85 +  // objects.
  125.86 +  for (int i = 0; i < num_methods; i++) {
  125.87 +    methodHandle method(THREAD, k->method_with_idnum(idnums->at(i)));
  125.88 +    if (method.is_null()) {
  125.89 +      // Method may have been deleted and seems this API can handle null
  125.90 +      // Otherwise should probably put a method that throws NSME
  125.91 +      result->obj_at_put(i, NULL);
  125.92 +    } else {
  125.93 +      oop m;
  125.94 +      if (want_constructor) {
  125.95 +        m = Reflection::new_constructor(method, CHECK_NULL);
  125.96 +      } else {
  125.97 +        m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
  125.98        }
  125.99 +      result->obj_at_put(i, m);
 125.100      }
 125.101    }
 125.102 -  assert(out_idx == num_methods, "just checking");
 125.103 +
 125.104    return (jobjectArray) JNIHandles::make_local(env, result());
 125.105  }
 125.106 +
 125.107 +JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
 125.108 +{
 125.109 +  JVMWrapper("JVM_GetClassDeclaredMethods");
 125.110 +  return get_class_declared_methods_helper(env, ofClass, publicOnly,
 125.111 +                                           /*want_constructor*/ false,
 125.112 +                                           SystemDictionary::reflect_Method_klass(), THREAD);
 125.113 +}
 125.114  JVM_END
 125.115  
 125.116  JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredConstructors(JNIEnv *env, jclass ofClass, jboolean publicOnly))
 125.117  {
 125.118    JVMWrapper("JVM_GetClassDeclaredConstructors");
 125.119 -  JvmtiVMObjectAllocEventCollector oam;
 125.120 -
 125.121 -  // Exclude primitive types and array types
 125.122 -  if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
 125.123 -      || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
 125.124 -    // Return empty array
 125.125 -    oop res = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0 , CHECK_NULL);
 125.126 -    return (jobjectArray) JNIHandles::make_local(env, res);
 125.127 -  }
 125.128 -
 125.129 -  instanceKlassHandle k(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass)));
 125.130 -
 125.131 -  // Ensure class is linked
 125.132 -  k->link_class(CHECK_NULL);
 125.133 -
 125.134 -  Array<Method*>* methods = k->methods();
 125.135 -  int methods_length = methods->length();
 125.136 -  int num_constructors = 0;
 125.137 -
 125.138 -  int i;
 125.139 -  for (i = 0; i < methods_length; i++) {
 125.140 -    methodHandle method(THREAD, methods->at(i));
 125.141 -    if (method->is_initializer() && !method->is_static()) {
 125.142 -      if (!publicOnly || method->is_public()) {
 125.143 -        ++num_constructors;
 125.144 -      }
 125.145 -    }
 125.146 -  }
 125.147 -
 125.148 -  // Allocate result
 125.149 -  objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), num_constructors, CHECK_NULL);
 125.150 -  objArrayHandle result(THREAD, r);
 125.151 -
 125.152 -  int out_idx = 0;
 125.153 -  for (i = 0; i < methods_length; i++) {
 125.154 -    methodHandle method(THREAD, methods->at(i));
 125.155 -    if (method->is_initializer() && !method->is_static()) {
 125.156 -      if (!publicOnly || method->is_public()) {
 125.157 -        oop m = Reflection::new_constructor(method, CHECK_NULL);
 125.158 -        result->obj_at_put(out_idx, m);
 125.159 -        ++out_idx;
 125.160 -      }
 125.161 -    }
 125.162 -  }
 125.163 -  assert(out_idx == num_constructors, "just checking");
 125.164 -  return (jobjectArray) JNIHandles::make_local(env, result());
 125.165 +  return get_class_declared_methods_helper(env, ofClass, publicOnly,
 125.166 +                                           /*want_constructor*/ true,
 125.167 +                                           SystemDictionary::reflect_Constructor_klass(), THREAD);
 125.168  }
 125.169  JVM_END
 125.170  
   126.1 --- a/src/share/vm/prims/jvmtiEnvBase.hpp	Fri Sep 27 13:49:57 2013 -0400
   126.2 +++ b/src/share/vm/prims/jvmtiEnvBase.hpp	Fri Sep 27 13:53:43 2013 -0400
   126.3 @@ -406,7 +406,11 @@
   126.4    VMOp_Type type() const { return VMOp_GetCurrentContendedMonitor; }
   126.5    jvmtiError result() { return _result; }
   126.6    void doit() {
   126.7 -    _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
   126.8 +    _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
   126.9 +    if (Threads::includes(_java_thread) && !_java_thread->is_exiting() &&
  126.10 +        _java_thread->threadObj() != NULL) {
  126.11 +      _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
  126.12 +    }
  126.13    }
  126.14  };
  126.15  
   127.1 --- a/src/share/vm/prims/jvmtiImpl.cpp	Fri Sep 27 13:49:57 2013 -0400
   127.2 +++ b/src/share/vm/prims/jvmtiImpl.cpp	Fri Sep 27 13:53:43 2013 -0400
   127.3 @@ -273,59 +273,49 @@
   127.4  
   127.5    // add/remove breakpoint to/from versions of the method that
   127.6    // are EMCP. Directly or transitively obsolete methods are
   127.7 -  // not saved in the PreviousVersionInfo.
   127.8 +  // not saved in the PreviousVersionNodes.
   127.9    Thread *thread = Thread::current();
  127.10    instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
  127.11    Symbol* m_name = _method->name();
  127.12    Symbol* m_signature = _method->signature();
  127.13  
  127.14 -  {
  127.15 -    ResourceMark rm(thread);
  127.16 -    // PreviousVersionInfo objects returned via PreviousVersionWalker
  127.17 -    // contain a GrowableArray of handles. We have to clean up the
  127.18 -    // GrowableArray _after_ the PreviousVersionWalker destructor
  127.19 -    // has destroyed the handles.
  127.20 -    {
  127.21 -      // search previous versions if they exist
  127.22 -      PreviousVersionWalker pvw((InstanceKlass *)ikh());
  127.23 -      for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
  127.24 -           pv_info != NULL; pv_info = pvw.next_previous_version()) {
  127.25 -        GrowableArray<methodHandle>* methods =
  127.26 -          pv_info->prev_EMCP_method_handles();
  127.27 +  // search previous versions if they exist
  127.28 +  PreviousVersionWalker pvw(thread, (InstanceKlass *)ikh());
  127.29 +  for (PreviousVersionNode * pv_node = pvw.next_previous_version();
  127.30 +       pv_node != NULL; pv_node = pvw.next_previous_version()) {
  127.31 +    GrowableArray<Method*>* methods = pv_node->prev_EMCP_methods();
  127.32  
  127.33 -        if (methods == NULL) {
  127.34 -          // We have run into a PreviousVersion generation where
  127.35 -          // all methods were made obsolete during that generation's
  127.36 -          // RedefineClasses() operation. At the time of that
  127.37 -          // operation, all EMCP methods were flushed so we don't
  127.38 -          // have to go back any further.
  127.39 -          //
  127.40 -          // A NULL methods array is different than an empty methods
  127.41 -          // array. We cannot infer any optimizations about older
  127.42 -          // generations from an empty methods array for the current
  127.43 -          // generation.
  127.44 -          break;
  127.45 -        }
  127.46 +    if (methods == NULL) {
  127.47 +      // We have run into a PreviousVersion generation where
  127.48 +      // all methods were made obsolete during that generation's
  127.49 +      // RedefineClasses() operation. At the time of that
  127.50 +      // operation, all EMCP methods were flushed so we don't
  127.51 +      // have to go back any further.
  127.52 +      //
  127.53 +      // A NULL methods array is different than an empty methods
  127.54 +      // array. We cannot infer any optimizations about older
  127.55 +      // generations from an empty methods array for the current
  127.56 +      // generation.
  127.57 +      break;
  127.58 +    }
  127.59  
  127.60 -        for (int i = methods->length() - 1; i >= 0; i--) {
  127.61 -          methodHandle method = methods->at(i);
  127.62 -          // obsolete methods that are running are not deleted from
  127.63 -          // previous version array, but they are skipped here.
  127.64 -          if (!method->is_obsolete() &&
  127.65 -              method->name() == m_name &&
  127.66 -              method->signature() == m_signature) {
  127.67 -            RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
  127.68 -              meth_act == &Method::set_breakpoint ? "sett" : "clear",
  127.69 -              method->name()->as_C_string(),
  127.70 -              method->signature()->as_C_string()));
  127.71 +    for (int i = methods->length() - 1; i >= 0; i--) {
  127.72 +      Method* method = methods->at(i);
  127.73 +      // obsolete methods that are running are not deleted from
  127.74 +      // previous version array, but they are skipped here.
  127.75 +      if (!method->is_obsolete() &&
  127.76 +          method->name() == m_name &&
  127.77 +          method->signature() == m_signature) {
  127.78 +        RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
  127.79 +          meth_act == &Method::set_breakpoint ? "sett" : "clear",
  127.80 +          method->name()->as_C_string(),
  127.81 +          method->signature()->as_C_string()));
  127.82  
  127.83 -            ((Method*)method()->*meth_act)(_bci);
  127.84 -            break;
  127.85 -          }
  127.86 -        }
  127.87 +        (method->*meth_act)(_bci);
  127.88 +        break;
  127.89        }
  127.90 -    } // pvw is cleaned up
  127.91 -  } // rm is cleaned up
  127.92 +    }
  127.93 +  }
  127.94  }
  127.95  
  127.96  void JvmtiBreakpoint::set() {
   128.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Sep 27 13:49:57 2013 -0400
   128.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Sep 27 13:53:43 2013 -0400
   128.3 @@ -1072,8 +1072,17 @@
   128.4      }
   128.5  
   128.6      res = merge_cp_and_rewrite(the_class, scratch_class, THREAD);
   128.7 -    if (res != JVMTI_ERROR_NONE) {
   128.8 -      return res;
   128.9 +    if (HAS_PENDING_EXCEPTION) {
  128.10 +      Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
  128.11 +      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
  128.12 +      RC_TRACE_WITH_THREAD(0x00000002, THREAD,
  128.13 +        ("merge_cp_and_rewrite exception: '%s'", ex_name->as_C_string()));
  128.14 +      CLEAR_PENDING_EXCEPTION;
  128.15 +      if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) {
  128.16 +        return JVMTI_ERROR_OUT_OF_MEMORY;
  128.17 +      } else {
  128.18 +        return JVMTI_ERROR_INTERNAL;
  128.19 +      }
  128.20      }
  128.21  
  128.22      if (VerifyMergedCPBytecodes) {
  128.23 @@ -1105,6 +1114,9 @@
  128.24      }
  128.25      if (HAS_PENDING_EXCEPTION) {
  128.26        Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
  128.27 +      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
  128.28 +      RC_TRACE_WITH_THREAD(0x00000002, THREAD,
  128.29 +        ("Rewriter::rewrite or link_methods exception: '%s'", ex_name->as_C_string()));
  128.30        CLEAR_PENDING_EXCEPTION;
  128.31        if (ex_name == vmSymbols::java_lang_OutOfMemoryError()) {
  128.32          return JVMTI_ERROR_OUT_OF_MEMORY;
  128.33 @@ -1395,8 +1407,8 @@
  128.34    ClassLoaderData* loader_data = the_class->class_loader_data();
  128.35    ConstantPool* merge_cp_oop =
  128.36      ConstantPool::allocate(loader_data,
  128.37 -                                  merge_cp_length,
  128.38 -                                  THREAD);
  128.39 +                           merge_cp_length,
  128.40 +                           CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
  128.41    MergeCPCleaner cp_cleaner(loader_data, merge_cp_oop);
  128.42  
  128.43    HandleMark hm(THREAD);  // make sure handles are cleared before
  128.44 @@ -1472,7 +1484,8 @@
  128.45  
  128.46        // Replace the new constant pool with a shrunken copy of the
  128.47        // merged constant pool
  128.48 -      set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length, THREAD);
  128.49 +      set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length,
  128.50 +                            CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
  128.51        // The new constant pool replaces scratch_cp so have cleaner clean it up.
  128.52        // It can't be cleaned up while there are handles to it.
  128.53        cp_cleaner.add_scratch_cp(scratch_cp());
  128.54 @@ -1502,7 +1515,8 @@
  128.55      // merged constant pool so now the rewritten bytecodes have
  128.56      // valid references; the previous new constant pool will get
  128.57      // GCed.
  128.58 -    set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length, THREAD);
  128.59 +    set_new_constant_pool(loader_data, scratch_class, merge_cp, merge_cp_length,
  128.60 +                          CHECK_(JVMTI_ERROR_OUT_OF_MEMORY));
  128.61      // The new constant pool replaces scratch_cp so have cleaner clean it up.
  128.62      // It can't be cleaned up while there are handles to it.
  128.63      cp_cleaner.add_scratch_cp(scratch_cp());
  128.64 @@ -1590,11 +1604,23 @@
  128.65    for (int i = methods->length() - 1; i >= 0; i--) {
  128.66      methodHandle method(THREAD, methods->at(i));
  128.67      methodHandle new_method;
  128.68 -    rewrite_cp_refs_in_method(method, &new_method, CHECK_false);
  128.69 +    rewrite_cp_refs_in_method(method, &new_method, THREAD);
  128.70      if (!new_method.is_null()) {
  128.71        // the method has been replaced so save the new method version
  128.72 +      // even in the case of an exception.  original method is on the
  128.73 +      // deallocation list.
  128.74        methods->at_put(i, new_method());
  128.75      }
  128.76 +    if (HAS_PENDING_EXCEPTION) {
  128.77 +      Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
  128.78 +      // RC_TRACE_WITH_THREAD macro has an embedded ResourceMark
  128.79 +      RC_TRACE_WITH_THREAD(0x00000002, THREAD,
  128.80 +        ("rewrite_cp_refs_in_method exception: '%s'", ex_name->as_C_string()));
  128.81 +      // Need to clear pending exception here as the super caller sets
  128.82 +      // the JVMTI_ERROR_INTERNAL if the returned value is false.
  128.83 +      CLEAR_PENDING_EXCEPTION;
  128.84 +      return false;
  128.85 +    }
  128.86    }
  128.87  
  128.88    return true;
  128.89 @@ -1674,10 +1700,7 @@
  128.90                Pause_No_Safepoint_Verifier pnsv(&nsv);
  128.91  
  128.92                // ldc is 2 bytes and ldc_w is 3 bytes
  128.93 -              m = rc.insert_space_at(bci, 3, inst_buffer, THREAD);
  128.94 -              if (m.is_null() || HAS_PENDING_EXCEPTION) {
  128.95 -                guarantee(false, "insert_space_at() failed");
  128.96 -              }
  128.97 +              m = rc.insert_space_at(bci, 3, inst_buffer, CHECK);
  128.98              }
  128.99  
 128.100              // return the new method so that the caller can update
 128.101 @@ -2487,8 +2510,8 @@
 128.102    // scratch_cp is a merged constant pool and has enough space for a
 128.103    // worst case merge situation. We want to associate the minimum
 128.104    // sized constant pool with the klass to save space.
 128.105 -  constantPoolHandle smaller_cp(THREAD,
 128.106 -          ConstantPool::allocate(loader_data, scratch_cp_length, THREAD));
 128.107 +  ConstantPool* cp = ConstantPool::allocate(loader_data, scratch_cp_length, CHECK);
 128.108 +  constantPoolHandle smaller_cp(THREAD, cp);
 128.109  
 128.110    // preserve version() value in the smaller copy
 128.111    int version = scratch_cp->version();
 128.112 @@ -2500,6 +2523,11 @@
 128.113    smaller_cp->set_pool_holder(scratch_class());
 128.114  
 128.115    scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD);
 128.116 +  if (HAS_PENDING_EXCEPTION) {
 128.117 +    // Exception is handled in the caller
 128.118 +    loader_data->add_to_deallocate_list(smaller_cp());
 128.119 +    return;
 128.120 +  }
 128.121    scratch_cp = smaller_cp;
 128.122  
 128.123    // attach new constant pool to klass
 128.124 @@ -2779,28 +2807,20 @@
 128.125                                          &trace_name_printed);
 128.126        }
 128.127      }
 128.128 -    {
 128.129 -      ResourceMark rm(_thread);
 128.130 -      // PreviousVersionInfo objects returned via PreviousVersionWalker
 128.131 -      // contain a GrowableArray of handles. We have to clean up the
 128.132 -      // GrowableArray _after_ the PreviousVersionWalker destructor
 128.133 -      // has destroyed the handles.
 128.134 -      {
 128.135 -        // the previous versions' constant pool caches may need adjustment
 128.136 -        PreviousVersionWalker pvw(ik);
 128.137 -        for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
 128.138 -             pv_info != NULL; pv_info = pvw.next_previous_version()) {
 128.139 -          other_cp = pv_info->prev_constant_pool_handle();
 128.140 -          cp_cache = other_cp->cache();
 128.141 -          if (cp_cache != NULL) {
 128.142 -            cp_cache->adjust_method_entries(_matching_old_methods,
 128.143 -                                            _matching_new_methods,
 128.144 -                                            _matching_methods_length,
 128.145 -                                            &trace_name_printed);
 128.146 -          }
 128.147 -        }
 128.148 -      } // pvw is cleaned up
 128.149 -    } // rm is cleaned up
 128.150 +
 128.151 +    // the previous versions' constant pool caches may need adjustment
 128.152 +    PreviousVersionWalker pvw(_thread, ik);
 128.153 +    for (PreviousVersionNode * pv_node = pvw.next_previous_version();
 128.154 +         pv_node != NULL; pv_node = pvw.next_previous_version()) {
 128.155 +      other_cp = pv_node->prev_constant_pool();
 128.156 +      cp_cache = other_cp->cache();
 128.157 +      if (cp_cache != NULL) {
 128.158 +        cp_cache->adjust_method_entries(_matching_old_methods,
 128.159 +                                        _matching_new_methods,
 128.160 +                                        _matching_methods_length,
 128.161 +                                        &trace_name_printed);
 128.162 +      }
 128.163 +    }
 128.164    }
 128.165  }
 128.166  
 128.167 @@ -2914,10 +2934,9 @@
 128.168        // obsolete methods need a unique idnum
 128.169        u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum();
 128.170        if (num != ConstMethod::UNSET_IDNUM) {
 128.171 -//      u2 old_num = old_method->method_idnum();
 128.172          old_method->set_method_idnum(num);
 128.173 -// TO DO: attach obsolete annotations to obsolete method's new idnum
 128.174        }
 128.175 +
 128.176        // With tracing we try not to "yack" too much. The position of
 128.177        // this trace assumes there are fewer obsolete methods than
 128.178        // EMCP methods.
 128.179 @@ -2930,7 +2949,7 @@
 128.180    for (int i = 0; i < _deleted_methods_length; ++i) {
 128.181      Method* old_method = _deleted_methods[i];
 128.182  
 128.183 -    assert(old_method->vtable_index() < 0,
 128.184 +    assert(!old_method->has_vtable_index(),
 128.185             "cannot delete methods with vtable entries");;
 128.186  
 128.187      // Mark all deleted methods as old and obsolete
   129.1 --- a/src/share/vm/prims/methodHandles.cpp	Fri Sep 27 13:49:57 2013 -0400
   129.2 +++ b/src/share/vm/prims/methodHandles.cpp	Fri Sep 27 13:53:43 2013 -0400
   129.3 @@ -127,25 +127,37 @@
   129.4  }
   129.5  
   129.6  oop MethodHandles::init_MemberName(Handle mname, Handle target) {
   129.7 +  // This method is used from java.lang.invoke.MemberName constructors.
   129.8 +  // It fills in the new MemberName from a java.lang.reflect.Member.
   129.9    Thread* thread = Thread::current();
  129.10    oop target_oop = target();
  129.11    Klass* target_klass = target_oop->klass();
  129.12    if (target_klass == SystemDictionary::reflect_Field_klass()) {
  129.13      oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder()
  129.14      int slot  = java_lang_reflect_Field::slot(target_oop);  // fd.index()
  129.15 -    int mods  = java_lang_reflect_Field::modifiers(target_oop);
  129.16 -    oop type  = java_lang_reflect_Field::type(target_oop);
  129.17 -    oop name  = java_lang_reflect_Field::name(target_oop);
  129.18      KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
  129.19 -    intptr_t offset = InstanceKlass::cast(k())->field_offset(slot);
  129.20 -    return init_field_MemberName(mname, k, accessFlags_from(mods), type, name, offset);
  129.21 +    if (!k.is_null() && k->oop_is_instance()) {
  129.22 +      fieldDescriptor fd(InstanceKlass::cast(k()), slot);
  129.23 +      oop mname2 = init_field_MemberName(mname, fd);
  129.24 +      if (mname2 != NULL) {
  129.25 +        // Since we have the reified name and type handy, add them to the result.
  129.26 +        if (java_lang_invoke_MemberName::name(mname2) == NULL)
  129.27 +          java_lang_invoke_MemberName::set_name(mname2, java_lang_reflect_Field::name(target_oop));
  129.28 +        if (java_lang_invoke_MemberName::type(mname2) == NULL)
  129.29 +          java_lang_invoke_MemberName::set_type(mname2, java_lang_reflect_Field::type(target_oop));
  129.30 +      }
  129.31 +      return mname2;
  129.32 +    }
  129.33    } else if (target_klass == SystemDictionary::reflect_Method_klass()) {
  129.34      oop clazz  = java_lang_reflect_Method::clazz(target_oop);
  129.35      int slot   = java_lang_reflect_Method::slot(target_oop);
  129.36      KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
  129.37      if (!k.is_null() && k->oop_is_instance()) {
  129.38        Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
  129.39 -      return init_method_MemberName(mname, m, true, k);
  129.40 +      if (m == NULL || is_signature_polymorphic(m->intrinsic_id()))
  129.41 +        return NULL;            // do not resolve unless there is a concrete signature
  129.42 +      CallInfo info(m, k());
  129.43 +      return init_method_MemberName(mname, info);
  129.44      }
  129.45    } else if (target_klass == SystemDictionary::reflect_Constructor_klass()) {
  129.46      oop clazz  = java_lang_reflect_Constructor::clazz(target_oop);
  129.47 @@ -153,65 +165,50 @@
  129.48      KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
  129.49      if (!k.is_null() && k->oop_is_instance()) {
  129.50        Method* m = InstanceKlass::cast(k())->method_with_idnum(slot);
  129.51 -      return init_method_MemberName(mname, m, false, k);
  129.52 -    }
  129.53 -  } else if (target_klass == SystemDictionary::MemberName_klass()) {
  129.54 -    // Note: This only works if the MemberName has already been resolved.
  129.55 -    oop clazz        = java_lang_invoke_MemberName::clazz(target_oop);
  129.56 -    int flags        = java_lang_invoke_MemberName::flags(target_oop);
  129.57 -    Metadata* vmtarget=java_lang_invoke_MemberName::vmtarget(target_oop);
  129.58 -    intptr_t vmindex = java_lang_invoke_MemberName::vmindex(target_oop);
  129.59 -    KlassHandle k(thread, java_lang_Class::as_Klass(clazz));
  129.60 -    int ref_kind     = (flags >> REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK;
  129.61 -    if (vmtarget == NULL)  return NULL;  // not resolved
  129.62 -    if ((flags & IS_FIELD) != 0) {
  129.63 -      assert(vmtarget->is_klass(), "field vmtarget is Klass*");
  129.64 -      int basic_mods = (ref_kind_is_static(ref_kind) ? JVM_ACC_STATIC : 0);
  129.65 -      // FIXME:  how does k (receiver_limit) contribute?
  129.66 -      KlassHandle k_vmtarget(thread, (Klass*)vmtarget);
  129.67 -      return init_field_MemberName(mname, k_vmtarget, accessFlags_from(basic_mods), NULL, NULL, vmindex);
  129.68 -    } else if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) != 0) {
  129.69 -      assert(vmtarget->is_method(), "method or constructor vmtarget is Method*");
  129.70 -      return init_method_MemberName(mname, (Method*)vmtarget, ref_kind_does_dispatch(ref_kind), k);
  129.71 -    } else {
  129.72 -      return NULL;
  129.73 +      if (m == NULL)  return NULL;
  129.74 +      CallInfo info(m, k());
  129.75 +      return init_method_MemberName(mname, info);
  129.76      }
  129.77    }
  129.78    return NULL;
  129.79  }
  129.80  
  129.81 -oop MethodHandles::init_method_MemberName(Handle mname, Method* m, bool do_dispatch,
  129.82 -                                          KlassHandle receiver_limit_h) {
  129.83 -  Klass* receiver_limit = receiver_limit_h();
  129.84 -  AccessFlags mods = m->access_flags();
  129.85 -  int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
  129.86 -  int vmindex = Method::nonvirtual_vtable_index; // implies never any dispatch
  129.87 -  Klass* mklass = m->method_holder();
  129.88 -  if (receiver_limit == NULL)
  129.89 -    receiver_limit = mklass;
  129.90 -  if (m->is_initializer()) {
  129.91 -    flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
  129.92 -  } else if (mods.is_static()) {
  129.93 -    flags |= IS_METHOD | (JVM_REF_invokeStatic << REFERENCE_KIND_SHIFT);
  129.94 -  } else if (receiver_limit != mklass &&
  129.95 -             !receiver_limit->is_subtype_of(mklass)) {
  129.96 -    return NULL;  // bad receiver limit
  129.97 -  } else if (do_dispatch && receiver_limit->is_interface() &&
  129.98 -             mklass->is_interface()) {
  129.99 +oop MethodHandles::init_method_MemberName(Handle mname, CallInfo& info) {
 129.100 +  assert(info.resolved_appendix().is_null(), "only normal methods here");
 129.101 +  KlassHandle receiver_limit = info.resolved_klass();
 129.102 +  methodHandle m = info.resolved_method();
 129.103 +  int flags = (jushort)( m->access_flags().as_short() & JVM_RECOGNIZED_METHOD_MODIFIERS );
 129.104 +  int vmindex = Method::invalid_vtable_index;
 129.105 +
 129.106 +  switch (info.call_kind()) {
 129.107 +  case CallInfo::itable_call:
 129.108 +    vmindex = info.itable_index();
 129.109 +    // More importantly, the itable index only works with the method holder.
 129.110 +    receiver_limit = m->method_holder();
 129.111 +    assert(receiver_limit->verify_itable_index(vmindex), "");
 129.112      flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT);
 129.113 -    receiver_limit = mklass;  // ignore passed-in limit; interfaces are interconvertible
 129.114 -    vmindex = klassItable::compute_itable_index(m);
 129.115 -  } else if (do_dispatch && mklass != receiver_limit && mklass->is_interface()) {
 129.116 +    break;
 129.117 +
 129.118 +  case CallInfo::vtable_call:
 129.119 +    vmindex = info.vtable_index();
 129.120      flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
 129.121 -    // it is a miranda method, so m->vtable_index is not what we want
 129.122 -    ResourceMark rm;
 129.123 -    klassVtable* vt = InstanceKlass::cast(receiver_limit)->vtable();
 129.124 -    vmindex = vt->index_of_miranda(m->name(), m->signature());
 129.125 -  } else if (!do_dispatch || m->can_be_statically_bound()) {
 129.126 -    flags |= IS_METHOD | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
 129.127 -  } else {
 129.128 -    flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
 129.129 -    vmindex = m->vtable_index();
 129.130 +    assert(receiver_limit->is_subtype_of(m->method_holder()), "virtual call must be type-safe");
 129.131 +    break;
 129.132 +
 129.133 +  case CallInfo::direct_call:
 129.134 +    vmindex = Method::nonvirtual_vtable_index;
 129.135 +    if (m->is_static()) {
 129.136 +      flags |= IS_METHOD      | (JVM_REF_invokeStatic  << REFERENCE_KIND_SHIFT);
 129.137 +    } else if (m->is_initializer()) {
 129.138 +      flags |= IS_CONSTRUCTOR | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
 129.139 +      assert(receiver_limit == m->method_holder(), "constructor call must be exactly typed");
 129.140 +    } else {
 129.141 +      flags |= IS_METHOD      | (JVM_REF_invokeSpecial << REFERENCE_KIND_SHIFT);
 129.142 +      assert(receiver_limit->is_subtype_of(m->method_holder()), "special call must be type-safe");
 129.143 +    }
 129.144 +    break;
 129.145 +
 129.146 +  default:  assert(false, "bad CallInfo");  return NULL;
 129.147    }
 129.148  
 129.149    // @CallerSensitive annotation detected
 129.150 @@ -221,7 +218,7 @@
 129.151  
 129.152    oop mname_oop = mname();
 129.153    java_lang_invoke_MemberName::set_flags(   mname_oop, flags);
 129.154 -  java_lang_invoke_MemberName::set_vmtarget(mname_oop, m);
 129.155 +  java_lang_invoke_MemberName::set_vmtarget(mname_oop, m());
 129.156    java_lang_invoke_MemberName::set_vmindex( mname_oop, vmindex);   // vtable/itable index
 129.157    java_lang_invoke_MemberName::set_clazz(   mname_oop, receiver_limit->java_mirror());
 129.158    // Note:  name and type can be lazily computed by resolve_MemberName,
 129.159 @@ -237,59 +234,19 @@
 129.160    return mname();
 129.161  }
 129.162  
 129.163 -Handle MethodHandles::init_method_MemberName(Handle mname, CallInfo& info, TRAPS) {
 129.164 -  Handle empty;
 129.165 -  if (info.resolved_appendix().not_null()) {
 129.166 -    // The resolved MemberName must not be accompanied by an appendix argument,
 129.167 -    // since there is no way to bind this value into the MemberName.
 129.168 -    // Caller is responsible to prevent this from happening.
 129.169 -    THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty);
 129.170 -  }
 129.171 -  methodHandle m = info.resolved_method();
 129.172 -  KlassHandle defc = info.resolved_klass();
 129.173 -  int vmindex = Method::invalid_vtable_index;
 129.174 -  if (defc->is_interface() && m->method_holder()->is_interface()) {
 129.175 -    // static interface methods do not reference vtable or itable
 129.176 -    if (m->is_static()) {
 129.177 -      vmindex = Method::nonvirtual_vtable_index;
 129.178 -    }
 129.179 -    // interface methods invoked via invokespecial also
 129.180 -    // do not reference vtable or itable.
 129.181 -    int ref_kind = ((java_lang_invoke_MemberName::flags(mname()) >>
 129.182 -                     REFERENCE_KIND_SHIFT) & REFERENCE_KIND_MASK);
 129.183 -    if (ref_kind == JVM_REF_invokeSpecial) {
 129.184 -      vmindex = Method::nonvirtual_vtable_index;
 129.185 -    }
 129.186 -    // If neither m is static nor ref_kind is invokespecial,
 129.187 -    // set it to itable index.
 129.188 -    if (vmindex == Method::invalid_vtable_index) {
 129.189 -      // LinkResolver does not report itable indexes!  (fix this?)
 129.190 -      vmindex = klassItable::compute_itable_index(m());
 129.191 -    }
 129.192 -  } else if (m->can_be_statically_bound()) {
 129.193 -    // LinkResolver reports vtable index even for final methods!
 129.194 -    vmindex = Method::nonvirtual_vtable_index;
 129.195 -  } else {
 129.196 -    vmindex = info.vtable_index();
 129.197 -  }
 129.198 -  oop res = init_method_MemberName(mname, m(), (vmindex >= 0), defc());
 129.199 -  assert(res == NULL || (java_lang_invoke_MemberName::vmindex(res) == vmindex), "");
 129.200 -  return Handle(THREAD, res);
 129.201 -}
 129.202 -
 129.203 -oop MethodHandles::init_field_MemberName(Handle mname, KlassHandle field_holder,
 129.204 -                                         AccessFlags mods, oop type, oop name,
 129.205 -                                         intptr_t offset, bool is_setter) {
 129.206 -  int flags = (jushort)( mods.as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS );
 129.207 -  flags |= IS_FIELD | ((mods.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT);
 129.208 +oop MethodHandles::init_field_MemberName(Handle mname, fieldDescriptor& fd, bool is_setter) {
 129.209 +  int flags = (jushort)( fd.access_flags().as_short() & JVM_RECOGNIZED_FIELD_MODIFIERS );
 129.210 +  flags |= IS_FIELD | ((fd.is_static() ? JVM_REF_getStatic : JVM_REF_getField) << REFERENCE_KIND_SHIFT);
 129.211    if (is_setter)  flags += ((JVM_REF_putField - JVM_REF_getField) << REFERENCE_KIND_SHIFT);
 129.212 -  Metadata* vmtarget = field_holder();
 129.213 -  int vmindex  = offset;  // determines the field uniquely when combined with static bit
 129.214 +  Metadata* vmtarget = fd.field_holder();
 129.215 +  int vmindex        = fd.offset();  // determines the field uniquely when combined with static bit
 129.216    oop mname_oop = mname();
 129.217    java_lang_invoke_MemberName::set_flags(mname_oop,    flags);
 129.218    java_lang_invoke_MemberName::set_vmtarget(mname_oop, vmtarget);
 129.219    java_lang_invoke_MemberName::set_vmindex(mname_oop,  vmindex);
 129.220 -  java_lang_invoke_MemberName::set_clazz(mname_oop,    field_holder->java_mirror());
 129.221 +  java_lang_invoke_MemberName::set_clazz(mname_oop,    fd.field_holder()->java_mirror());
 129.222 +  oop type = field_signature_type_or_null(fd.signature());
 129.223 +  oop name = field_name_or_null(fd.name());
 129.224    if (name != NULL)
 129.225      java_lang_invoke_MemberName::set_name(mname_oop,   name);
 129.226    if (type != NULL)
 129.227 @@ -305,19 +262,6 @@
 129.228    return mname();
 129.229  }
 129.230  
 129.231 -Handle MethodHandles::init_field_MemberName(Handle mname, FieldAccessInfo& info, TRAPS) {
 129.232 -  return Handle();
 129.233 -#if 0 // FIXME
 129.234 -  KlassHandle field_holder = info.klass();
 129.235 -  intptr_t    field_offset = info.field_offset();
 129.236 -  return init_field_MemberName(mname_oop, field_holder(),
 129.237 -                               info.access_flags(),
 129.238 -                               type, name,
 129.239 -                               field_offset, false /*is_setter*/);
 129.240 -#endif
 129.241 -}
 129.242 -
 129.243 -
 129.244  // JVM 2.9 Special Methods:
 129.245  // A method is signature polymorphic if and only if all of the following conditions hold :
 129.246  // * It is declared in the java.lang.invoke.MethodHandle class.
 129.247 @@ -573,12 +517,12 @@
 129.248    return SystemDictionary::Object_klass()->java_mirror();
 129.249  }
 129.250  
 129.251 -static oop field_name_or_null(Symbol* s) {
 129.252 +oop MethodHandles::field_name_or_null(Symbol* s) {
 129.253    if (s == NULL)  return NULL;
 129.254    return StringTable::lookup(s);
 129.255  }
 129.256  
 129.257 -static oop field_signature_type_or_null(Symbol* s) {
 129.258 +oop MethodHandles::field_signature_type_or_null(Symbol* s) {
 129.259    if (s == NULL)  return NULL;
 129.260    BasicType bt = FieldType::basic_type(s);
 129.261    if (is_java_primitive(bt)) {
 129.262 @@ -701,7 +645,14 @@
 129.263            return empty;
 129.264          }
 129.265        }
 129.266 -      return init_method_MemberName(mname, result, THREAD);
 129.267 +      if (result.resolved_appendix().not_null()) {
 129.268 +        // The resolved MemberName must not be accompanied by an appendix argument,
 129.269 +        // since there is no way to bind this value into the MemberName.
 129.270 +        // Caller is responsible to prevent this from happening.
 129.271 +        THROW_MSG_(vmSymbols::java_lang_InternalError(), "appendix", empty);
 129.272 +      }
 129.273 +      oop mname2 = init_method_MemberName(mname, result);
 129.274 +      return Handle(THREAD, mname2);
 129.275      }
 129.276    case IS_CONSTRUCTOR:
 129.277      {
 129.278 @@ -719,22 +670,21 @@
 129.279          }
 129.280        }
 129.281        assert(result.is_statically_bound(), "");
 129.282 -      return init_method_MemberName(mname, result, THREAD);
 129.283 +      oop mname2 = init_method_MemberName(mname, result);
 129.284 +      return Handle(THREAD, mname2);
 129.285      }
 129.286    case IS_FIELD:
 129.287      {
 129.288 -      // This is taken from LinkResolver::resolve_field, sans access checks.
 129.289 -      fieldDescriptor fd; // find_field initializes fd if found
 129.290 -      KlassHandle sel_klass(THREAD, InstanceKlass::cast(defc())->find_field(name, type, &fd));
 129.291 -      // check if field exists; i.e., if a klass containing the field def has been selected
 129.292 -      if (sel_klass.is_null())  return empty;  // should not happen
 129.293 -      oop type = field_signature_type_or_null(fd.signature());
 129.294 -      oop name = field_name_or_null(fd.name());
 129.295 -      bool is_setter = (ref_kind_is_valid(ref_kind) && ref_kind_is_setter(ref_kind));
 129.296 -      mname = Handle(THREAD,
 129.297 -                     init_field_MemberName(mname, sel_klass,
 129.298 -                                           fd.access_flags(), type, name, fd.offset(), is_setter));
 129.299 -      return mname;
 129.300 +      fieldDescriptor result; // find_field initializes fd if found
 129.301 +      {
 129.302 +        assert(!HAS_PENDING_EXCEPTION, "");
 129.303 +        LinkResolver::resolve_field(result, defc, name, type, KlassHandle(), Bytecodes::_nop, false, false, THREAD);
 129.304 +        if (HAS_PENDING_EXCEPTION) {
 129.305 +          return empty;
 129.306 +        }
 129.307 +      }
 129.308 +      oop mname2 = init_field_MemberName(mname, result, ref_kind_is_setter(ref_kind));
 129.309 +      return Handle(THREAD, mname2);
 129.310      }
 129.311    default:
 129.312      THROW_MSG_(vmSymbols::java_lang_InternalError(), "unrecognized MemberName format", empty);
 129.313 @@ -793,7 +743,6 @@
 129.314      }
 129.315    case IS_FIELD:
 129.316      {
 129.317 -      // This is taken from LinkResolver::resolve_field, sans access checks.
 129.318        assert(vmtarget->is_klass(), "field vmtarget is Klass*");
 129.319        if (!((Klass*) vmtarget)->oop_is_instance())  break;
 129.320        instanceKlassHandle defc(THREAD, (Klass*) vmtarget);
 129.321 @@ -872,11 +821,7 @@
 129.322          Handle result(thread, results->obj_at(rfill++));
 129.323          if (!java_lang_invoke_MemberName::is_instance(result()))
 129.324            return -99;  // caller bug!
 129.325 -        oop type = field_signature_type_or_null(st.signature());
 129.326 -        oop name = field_name_or_null(st.name());
 129.327 -        oop saved = MethodHandles::init_field_MemberName(result, st.klass(),
 129.328 -                                                         st.access_flags(), type, name,
 129.329 -                                                         st.offset());
 129.330 +        oop saved = MethodHandles::init_field_MemberName(result, st.field_descriptor());
 129.331          if (saved != result())
 129.332            results->obj_at_put(rfill-1, saved);  // show saved instance to user
 129.333        } else if (++overflow >= overflow_limit) {
 129.334 @@ -926,7 +871,8 @@
 129.335          Handle result(thread, results->obj_at(rfill++));
 129.336          if (!java_lang_invoke_MemberName::is_instance(result()))
 129.337            return -99;  // caller bug!
 129.338 -        oop saved = MethodHandles::init_method_MemberName(result, m, true, NULL);
 129.339 +        CallInfo info(m);
 129.340 +        oop saved = MethodHandles::init_method_MemberName(result, info);
 129.341          if (saved != result())
 129.342            results->obj_at_put(rfill-1, saved);  // show saved instance to user
 129.343        } else if (++overflow >= overflow_limit) {
 129.344 @@ -1227,7 +1173,8 @@
 129.345      x = ((Klass*) vmtarget)->java_mirror();
 129.346    } else if (vmtarget->is_method()) {
 129.347      Handle mname2 = MethodHandles::new_MemberName(CHECK_NULL);
 129.348 -    x = MethodHandles::init_method_MemberName(mname2, (Method*)vmtarget, false, NULL);
 129.349 +    CallInfo info((Method*)vmtarget);
 129.350 +    x = MethodHandles::init_method_MemberName(mname2, info);
 129.351    }
 129.352    result->obj_at_put(1, x);
 129.353    return JNIHandles::make_local(env, result());
   130.1 --- a/src/share/vm/prims/methodHandles.hpp	Fri Sep 27 13:49:57 2013 -0400
   130.2 +++ b/src/share/vm/prims/methodHandles.hpp	Fri Sep 27 13:53:43 2013 -0400
   130.3 @@ -49,19 +49,18 @@
   130.4    // Adapters.
   130.5    static MethodHandlesAdapterBlob* _adapter_code;
   130.6  
   130.7 +  // utility functions for reifying names and types
   130.8 +  static oop field_name_or_null(Symbol* s);
   130.9 +  static oop field_signature_type_or_null(Symbol* s);
  130.10 +
  130.11   public:
  130.12    // working with member names
  130.13    static Handle resolve_MemberName(Handle mname, TRAPS); // compute vmtarget/vmindex from name/type
  130.14    static void expand_MemberName(Handle mname, int suppress, TRAPS);  // expand defc/name/type if missing
  130.15    static Handle new_MemberName(TRAPS);  // must be followed by init_MemberName
  130.16    static oop init_MemberName(Handle mname_h, Handle target_h); // compute vmtarget/vmindex from target
  130.17 -  static oop init_method_MemberName(Handle mname_h, Method* m, bool do_dispatch,
  130.18 -                                    KlassHandle receiver_limit_h);
  130.19 -  static oop init_field_MemberName(Handle mname_h, KlassHandle field_holder_h,
  130.20 -                                   AccessFlags mods, oop type, oop name,
  130.21 -                                   intptr_t offset, bool is_setter = false);
  130.22 -  static Handle init_method_MemberName(Handle mname_h, CallInfo& info, TRAPS);
  130.23 -  static Handle init_field_MemberName(Handle mname_h, FieldAccessInfo& info, TRAPS);
  130.24 +  static oop init_field_MemberName(Handle mname_h, fieldDescriptor& fd, bool is_setter = false);
  130.25 +  static oop init_method_MemberName(Handle mname_h, CallInfo& info);
  130.26    static int method_ref_kind(Method* m, bool do_dispatch_if_possible = true);
  130.27    static int find_MemberNames(KlassHandle k, Symbol* name, Symbol* sig,
  130.28                                int mflags, KlassHandle caller,
   131.1 --- a/src/share/vm/prims/whitebox.cpp	Fri Sep 27 13:49:57 2013 -0400
   131.2 +++ b/src/share/vm/prims/whitebox.cpp	Fri Sep 27 13:53:43 2013 -0400
   131.3 @@ -33,6 +33,7 @@
   131.4  #include "prims/whitebox.hpp"
   131.5  #include "prims/wbtestmethods/parserTests.hpp"
   131.6  
   131.7 +#include "runtime/arguments.hpp"
   131.8  #include "runtime/interfaceSupport.hpp"
   131.9  #include "runtime/os.hpp"
  131.10  #include "utilities/debug.hpp"
  131.11 @@ -94,6 +95,11 @@
  131.12    return closure.found();
  131.13  WB_END
  131.14  
  131.15 +WB_ENTRY(jlong, WB_GetCompressedOopsMaxHeapSize(JNIEnv* env, jobject o)) {
  131.16 +  return (jlong)Arguments::max_heap_for_compressed_oops();
  131.17 +}
  131.18 +WB_END
  131.19 +
  131.20  WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
  131.21    CollectorPolicy * p = Universe::heap()->collector_policy();
  131.22    gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
  131.23 @@ -436,6 +442,8 @@
  131.24        CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
  131.25        (void*) &WB_ParseCommandLine
  131.26    },
  131.27 +  {CC"getCompressedOopsMaxHeapSize", CC"()J",
  131.28 +      (void*)&WB_GetCompressedOopsMaxHeapSize},
  131.29    {CC"printHeapSizes",     CC"()V",                   (void*)&WB_PrintHeapSizes    },
  131.30  #if INCLUDE_ALL_GCS
  131.31    {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
   132.1 --- a/src/share/vm/runtime/arguments.cpp	Fri Sep 27 13:49:57 2013 -0400
   132.2 +++ b/src/share/vm/runtime/arguments.cpp	Fri Sep 27 13:53:43 2013 -0400
   132.3 @@ -28,6 +28,7 @@
   132.4  #include "compiler/compilerOracle.hpp"
   132.5  #include "memory/allocation.inline.hpp"
   132.6  #include "memory/cardTableRS.hpp"
   132.7 +#include "memory/genCollectedHeap.hpp"
   132.8  #include "memory/referenceProcessor.hpp"
   132.9  #include "memory/universe.inline.hpp"
  132.10  #include "oops/oop.inline.hpp"
  132.11 @@ -54,6 +55,8 @@
  132.12  #endif
  132.13  #if INCLUDE_ALL_GCS
  132.14  #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  132.15 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  132.16 +#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  132.17  #endif // INCLUDE_ALL_GCS
  132.18  
  132.19  // Note: This is a special bug reporting site for the JVM
  132.20 @@ -90,6 +93,7 @@
  132.21  SystemProperty* Arguments::_system_properties   = NULL;
  132.22  const char*  Arguments::_gc_log_filename        = NULL;
  132.23  bool   Arguments::_has_profile                  = false;
  132.24 +size_t Arguments::_conservative_max_heap_alignment = 0;
  132.25  uintx  Arguments::_min_heap_size                = 0;
  132.26  Arguments::Mode Arguments::_mode                = _mixed;
  132.27  bool   Arguments::_java_compiler                = false;
  132.28 @@ -1096,6 +1100,7 @@
  132.29    }
  132.30  }
  132.31  
  132.32 +#if defined(COMPILER2) || defined(_LP64) || !INCLUDE_CDS
  132.33  // Conflict: required to use shared spaces (-Xshare:on), but
  132.34  // incompatible command line options were chosen.
  132.35  
  132.36 @@ -1108,6 +1113,7 @@
  132.37      FLAG_SET_DEFAULT(UseSharedSpaces, false);
  132.38    }
  132.39  }
  132.40 +#endif
  132.41  
  132.42  void Arguments::set_tiered_flags() {
  132.43    // With tiered, set default policy to AdvancedThresholdPolicy, which is 3.
  132.44 @@ -1391,10 +1397,17 @@
  132.45    return true;
  132.46  }
  132.47  
  132.48 -inline uintx max_heap_for_compressed_oops() {
  132.49 +uintx Arguments::max_heap_for_compressed_oops() {
  132.50    // Avoid sign flip.
  132.51    assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
  132.52 -  LP64_ONLY(return OopEncodingHeapMax - os::vm_page_size());
  132.53 +  // We need to fit both the NULL page and the heap into the memory budget, while
  132.54 +  // keeping alignment constraints of the heap. To guarantee the latter, as the
  132.55 +  // NULL page is located before the heap, we pad the NULL page to the conservative
  132.56 +  // maximum alignment that the GC may ever impose upon the heap.
  132.57 +  size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
  132.58 +    Arguments::conservative_max_heap_alignment());
  132.59 +
  132.60 +  LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
  132.61    NOT_LP64(ShouldNotReachHere(); return 0);
  132.62  }
  132.63  
  132.64 @@ -1439,7 +1452,7 @@
  132.65      if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
  132.66        warning("Max heap size too large for Compressed Oops");
  132.67        FLAG_SET_DEFAULT(UseCompressedOops, false);
  132.68 -      FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
  132.69 +      FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
  132.70      }
  132.71    }
  132.72  #endif // _LP64
  132.73 @@ -1452,22 +1465,22 @@
  132.74  void Arguments::set_use_compressed_klass_ptrs() {
  132.75  #ifndef ZERO
  132.76  #ifdef _LP64
  132.77 -  // UseCompressedOops must be on for UseCompressedKlassPointers to be on.
  132.78 +  // UseCompressedOops must be on for UseCompressedClassPointers to be on.
  132.79    if (!UseCompressedOops) {
  132.80 -    if (UseCompressedKlassPointers) {
  132.81 -      warning("UseCompressedKlassPointers requires UseCompressedOops");
  132.82 +    if (UseCompressedClassPointers) {
  132.83 +      warning("UseCompressedClassPointers requires UseCompressedOops");
  132.84      }
  132.85 -    FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
  132.86 +    FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
  132.87    } else {
  132.88 -    // Turn on UseCompressedKlassPointers too
  132.89 -    if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
  132.90 -      FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
  132.91 +    // Turn on UseCompressedClassPointers too
  132.92 +    if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
  132.93 +      FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
  132.94      }
  132.95 -    // Check the ClassMetaspaceSize to make sure we use compressed klass ptrs.
  132.96 -    if (UseCompressedKlassPointers) {
  132.97 -      if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
  132.98 -        warning("Class metaspace size is too large for UseCompressedKlassPointers");
  132.99 -        FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
 132.100 +    // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
 132.101 +    if (UseCompressedClassPointers) {
 132.102 +      if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
 132.103 +        warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
 132.104 +        FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
 132.105        }
 132.106      }
 132.107    }
 132.108 @@ -1475,6 +1488,23 @@
 132.109  #endif // !ZERO
 132.110  }
 132.111  
 132.112 +void Arguments::set_conservative_max_heap_alignment() {
 132.113 +  // The conservative maximum required alignment for the heap is the maximum of
 132.114 +  // the alignments imposed by several sources: any requirements from the heap
 132.115 +  // itself, the collector policy and the maximum page size we may run the VM
 132.116 +  // with.
 132.117 +  size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
 132.118 +#if INCLUDE_ALL_GCS
 132.119 +  if (UseParallelGC) {
 132.120 +    heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
 132.121 +  } else if (UseG1GC) {
 132.122 +    heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
 132.123 +  }
 132.124 +#endif // INCLUDE_ALL_GCS
 132.125 +  _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
 132.126 +    CollectorPolicy::compute_max_alignment());
 132.127 +}
 132.128 +
 132.129  void Arguments::set_ergonomics_flags() {
 132.130  
 132.131    if (os::is_server_class_machine()) {
 132.132 @@ -1492,16 +1522,20 @@
 132.133          FLAG_SET_ERGO(bool, UseParallelGC, true);
 132.134        }
 132.135      }
 132.136 -    // Shared spaces work fine with other GCs but causes bytecode rewriting
 132.137 -    // to be disabled, which hurts interpreter performance and decreases
 132.138 -    // server performance.   On server class machines, keep the default
 132.139 -    // off unless it is asked for.  Future work: either add bytecode rewriting
 132.140 -    // at link time, or rewrite bytecodes in non-shared methods.
 132.141 -    if (!DumpSharedSpaces && !RequireSharedSpaces &&
 132.142 -        (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
 132.143 -      no_shared_spaces();
 132.144 -    }
 132.145    }
 132.146 +#ifdef COMPILER2
 132.147 +  // Shared spaces work fine with other GCs but causes bytecode rewriting
 132.148 +  // to be disabled, which hurts interpreter performance and decreases
 132.149 +  // server performance.  When -server is specified, keep the default off
 132.150 +  // unless it is asked for.  Future work: either add bytecode rewriting
 132.151 +  // at link time, or rewrite bytecodes in non-shared methods.
 132.152 +  if (!DumpSharedSpaces && !RequireSharedSpaces &&
 132.153 +      (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
 132.154 +    no_shared_spaces();
 132.155 +  }
 132.156 +#endif
 132.157 +
 132.158 +  set_conservative_max_heap_alignment();
 132.159  
 132.160  #ifndef ZERO
 132.161  #ifdef _LP64
 132.162 @@ -1839,7 +1873,7 @@
 132.163          (NumberOfGCLogFiles == 0)  ||
 132.164          (GCLogFileSize == 0)) {
 132.165        jio_fprintf(defaultStream::output_stream(),
 132.166 -                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>\n"
 132.167 +                  "To enable GC log rotation, use -Xloggc:<filename> -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=<num_of_files> -XX:GCLogFileSize=<num_of_size>[k|K|m|M|g|G]\n"
 132.168                    "where num_of_file > 0 and num_of_size > 0\n"
 132.169                    "GC log rotation is turned off\n");
 132.170        UseGCLogFileRotation = false;
 132.171 @@ -1853,6 +1887,51 @@
 132.172    }
 132.173  }
 132.174  
 132.175 +// This function is called for -Xloggc:<filename>, it can be used
 132.176 +// to check if a given file name(or string) conforms to the following
 132.177 +// specification:
 132.178 +// A valid string only contains "[A-Z][a-z][0-9].-_%[p|t]"
 132.179 +// %p and %t only allowed once. We only limit usage of filename not path
 132.180 +bool is_filename_valid(const char *file_name) {
 132.181 +  const char* p = file_name;
 132.182 +  char file_sep = os::file_separator()[0];
 132.183 +  const char* cp;
 132.184 +  // skip prefix path
 132.185 +  for (cp = file_name; *cp != '\0'; cp++) {
 132.186 +    if (*cp == '/' || *cp == file_sep) {
 132.187 +      p = cp + 1;
 132.188 +    }
 132.189 +  }
 132.190 +
 132.191 +  int count_p = 0;
 132.192 +  int count_t = 0;
 132.193 +  while (*p != '\0') {
 132.194 +    if ((*p >= '0' && *p <= '9') ||
 132.195 +        (*p >= 'A' && *p <= 'Z') ||
 132.196 +        (*p >= 'a' && *p <= 'z') ||
 132.197 +         *p == '-'               ||
 132.198 +         *p == '_'               ||
 132.199 +         *p == '.') {
 132.200 +       p++;
 132.201 +       continue;
 132.202 +    }
 132.203 +    if (*p == '%') {
 132.204 +      if(*(p + 1) == 'p') {
 132.205 +        p += 2;
 132.206 +        count_p ++;
 132.207 +        continue;
 132.208 +      }
 132.209 +      if (*(p + 1) == 't') {
 132.210 +        p += 2;
 132.211 +        count_t ++;
 132.212 +        continue;
 132.213 +      }
 132.214 +    }
 132.215 +    return false;
 132.216 +  }
 132.217 +  return count_p < 2 && count_t < 2;
 132.218 +}
 132.219 +
 132.220  // Check consistency of GC selection
 132.221  bool Arguments::check_gc_consistency() {
 132.222    check_gclog_consistency();
 132.223 @@ -2148,8 +2227,8 @@
 132.224  
 132.225    status = status && verify_object_alignment();
 132.226  
 132.227 -  status = status && verify_interval(ClassMetaspaceSize, 1*M, 3*G,
 132.228 -                                      "ClassMetaspaceSize");
 132.229 +  status = status && verify_interval(CompressedClassSpaceSize, 1*M, 3*G,
 132.230 +                                      "CompressedClassSpaceSize");
 132.231  
 132.232    status = status && verify_interval(MarkStackSizeMax,
 132.233                                    1, (max_jint - 1), "MarkStackSizeMax");
 132.234 @@ -2364,21 +2443,6 @@
 132.235      return result;
 132.236    }
 132.237  
 132.238 -  if (AggressiveOpts) {
 132.239 -    // Insert alt-rt.jar between user-specified bootclasspath
 132.240 -    // prefix and the default bootclasspath.  os::set_boot_path()
 132.241 -    // uses meta_index_dir as the default bootclasspath directory.
 132.242 -    const char* altclasses_jar = "alt-rt.jar";
 132.243 -    size_t altclasses_path_len = strlen(get_meta_index_dir()) + 1 +
 132.244 -                                 strlen(altclasses_jar);
 132.245 -    char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len, mtInternal);
 132.246 -    strcpy(altclasses_path, get_meta_index_dir());
 132.247 -    strcat(altclasses_path, altclasses_jar);
 132.248 -    scp.add_suffix_to_prefix(altclasses_path);
 132.249 -    scp_assembly_required = true;
 132.250 -    FREE_C_HEAP_ARRAY(char, altclasses_path, mtInternal);
 132.251 -  }
 132.252 -
 132.253    // Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM)
 132.254    result = parse_java_options_environment_variable(&scp, &scp_assembly_required);
 132.255    if (result != JNI_OK) {
 132.256 @@ -2806,6 +2870,13 @@
 132.257        // ostream_init_log(), when called will use this filename
 132.258        // to initialize a fileStream.
 132.259        _gc_log_filename = strdup(tail);
 132.260 +     if (!is_filename_valid(_gc_log_filename)) {
 132.261 +       jio_fprintf(defaultStream::output_stream(),
 132.262 +                  "Invalid file name for use with -Xloggc: Filename can only contain the "
 132.263 +                  "characters [A-Z][a-z][0-9]-_.%%[p|t] but it has been %s\n"
 132.264 +                  "Note %%p or %%t can only be used once\n", _gc_log_filename);
 132.265 +        return JNI_EINVAL;
 132.266 +      }
 132.267        FLAG_SET_CMDLINE(bool, PrintGC, true);
 132.268        FLAG_SET_CMDLINE(bool, PrintGCTimeStamps, true);
 132.269  
 132.270 @@ -3274,13 +3345,13 @@
 132.271      }
 132.272      UseSharedSpaces = false;
 132.273  #ifdef _LP64
 132.274 -    if (!UseCompressedOops || !UseCompressedKlassPointers) {
 132.275 +    if (!UseCompressedOops || !UseCompressedClassPointers) {
 132.276        vm_exit_during_initialization(
 132.277 -        "Cannot dump shared archive when UseCompressedOops or UseCompressedKlassPointers is off.", NULL);
 132.278 +        "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
 132.279      }
 132.280    } else {
 132.281 -    // UseCompressedOops and UseCompressedKlassPointers must be on for UseSharedSpaces.
 132.282 -    if (!UseCompressedOops || !UseCompressedKlassPointers) {
 132.283 +    // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.
 132.284 +    if (!UseCompressedOops || !UseCompressedClassPointers) {
 132.285        no_shared_spaces();
 132.286      }
 132.287  #endif
 132.288 @@ -3326,6 +3397,33 @@
 132.289    return shared_archive_path;
 132.290  }
 132.291  
 132.292 +#ifndef PRODUCT
 132.293 +// Determine whether LogVMOutput should be implicitly turned on.
 132.294 +static bool use_vm_log() {
 132.295 +  if (LogCompilation || !FLAG_IS_DEFAULT(LogFile) ||
 132.296 +      PrintCompilation || PrintInlining || PrintDependencies || PrintNativeNMethods ||
 132.297 +      PrintDebugInfo || PrintRelocations || PrintNMethods || PrintExceptionHandlers ||
 132.298 +      PrintAssembly || TraceDeoptimization || TraceDependencies ||
 132.299 +      (VerifyDependencies && FLAG_IS_CMDLINE(VerifyDependencies))) {
 132.300 +    return true;
 132.301 +  }
 132.302 +
 132.303 +#ifdef COMPILER1
 132.304 +  if (PrintC1Statistics) {
 132.305 +    return true;
 132.306 +  }
 132.307 +#endif // COMPILER1
 132.308 +
 132.309 +#ifdef COMPILER2
 132.310 +  if (PrintOptoAssembly || PrintOptoStatistics) {
 132.311 +    return true;
 132.312 +  }
 132.313 +#endif // COMPILER2
 132.314 +
 132.315 +  return false;
 132.316 +}
 132.317 +#endif // PRODUCT
 132.318 +
 132.319  // Parse entry point called from JNI_CreateJavaVM
 132.320  
 132.321  jint Arguments::parse(const JavaVMInitArgs* args) {
 132.322 @@ -3506,6 +3604,11 @@
 132.323    no_shared_spaces();
 132.324  #endif // INCLUDE_CDS
 132.325  
 132.326 +  return JNI_OK;
 132.327 +}
 132.328 +
 132.329 +jint Arguments::apply_ergo() {
 132.330 +
 132.331    // Set flags based on ergonomics.
 132.332    set_ergonomics_flags();
 132.333  
 132.334 @@ -3581,7 +3684,7 @@
 132.335    FLAG_SET_DEFAULT(ProfileInterpreter, false);
 132.336    FLAG_SET_DEFAULT(UseBiasedLocking, false);
 132.337    LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
 132.338 -  LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedKlassPointers, false));
 132.339 +  LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
 132.340  #endif // CC_INTERP
 132.341  
 132.342  #ifdef COMPILER2
 132.343 @@ -3610,6 +3713,10 @@
 132.344      DebugNonSafepoints = true;
 132.345    }
 132.346  
 132.347 +  if (FLAG_IS_CMDLINE(CompressedClassSpaceSize) && !UseCompressedClassPointers) {
 132.348 +    warning("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used");
 132.349 +  }
 132.350 +
 132.351  #ifndef PRODUCT
 132.352    if (CompileTheWorld) {
 132.353      // Force NmethodSweeper to sweep whole CodeCache each time.
 132.354 @@ -3617,7 +3724,13 @@
 132.355        NmethodSweepFraction = 1;
 132.356      }
 132.357    }
 132.358 -#endif
 132.359 +
 132.360 +  if (!LogVMOutput && FLAG_IS_DEFAULT(LogVMOutput)) {
 132.361 +    if (use_vm_log()) {
 132.362 +      LogVMOutput = true;
 132.363 +    }
 132.364 +  }
 132.365 +#endif // PRODUCT
 132.366  
 132.367    if (PrintCommandLineFlags) {
 132.368      CommandLineFlags::printSetFlags(tty);
   133.1 --- a/src/share/vm/runtime/arguments.hpp	Fri Sep 27 13:49:57 2013 -0400
   133.2 +++ b/src/share/vm/runtime/arguments.hpp	Fri Sep 27 13:53:43 2013 -0400
   133.3 @@ -280,6 +280,9 @@
   133.4    // Option flags
   133.5    static bool   _has_profile;
   133.6    static const char*  _gc_log_filename;
   133.7 +  // Value of the conservative maximum heap alignment needed
   133.8 +  static size_t  _conservative_max_heap_alignment;
   133.9 +
  133.10    static uintx  _min_heap_size;
  133.11  
  133.12    // -Xrun arguments
  133.13 @@ -327,6 +330,7 @@
  133.14    // Garbage-First (UseG1GC)
  133.15    static void set_g1_gc_flags();
  133.16    // GC ergonomics
  133.17 +  static void set_conservative_max_heap_alignment();
  133.18    static void set_use_compressed_oops();
  133.19    static void set_use_compressed_klass_ptrs();
  133.20    static void set_ergonomics_flags();
  133.21 @@ -430,8 +434,10 @@
  133.22    static char*  SharedArchivePath;
  133.23  
  133.24   public:
  133.25 -  // Parses the arguments
  133.26 +  // Parses the arguments, first phase
  133.27    static jint parse(const JavaVMInitArgs* args);
  133.28 +  // Apply ergonomics
  133.29 +  static jint apply_ergo();
  133.30    // Adjusts the arguments after the OS have adjusted the arguments
  133.31    static jint adjust_after_os();
  133.32    // Check for consistency in the selection of the garbage collector.
  133.33 @@ -445,6 +451,10 @@
  133.34    // Used by os_solaris
  133.35    static bool process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized);
  133.36  
  133.37 +  static size_t conservative_max_heap_alignment() { return _conservative_max_heap_alignment; }
  133.38 +  // Return the maximum size a heap with compressed oops can take
  133.39 +  static size_t max_heap_for_compressed_oops();
  133.40 +
  133.41    // return a char* array containing all options
  133.42    static char** jvm_flags_array()          { return _jvm_flags_array; }
  133.43    static char** jvm_args_array()           { return _jvm_args_array; }
   134.1 --- a/src/share/vm/runtime/deoptimization.cpp	Fri Sep 27 13:49:57 2013 -0400
   134.2 +++ b/src/share/vm/runtime/deoptimization.cpp	Fri Sep 27 13:53:43 2013 -0400
   134.3 @@ -1751,7 +1751,7 @@
   134.4    else    return trap_state & ~DS_RECOMPILE_BIT;
   134.5  }
   134.6  //---------------------------format_trap_state---------------------------------
   134.7 -// This is used for debugging and diagnostics, including hotspot.log output.
   134.8 +// This is used for debugging and diagnostics, including LogFile output.
   134.9  const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
  134.10                                                int trap_state) {
  134.11    DeoptReason reason      = trap_state_reason(trap_state);
  134.12 @@ -1828,7 +1828,7 @@
  134.13    return buf;
  134.14  }
  134.15  
  134.16 -// This is used for debugging and diagnostics, including hotspot.log output.
  134.17 +// This is used for debugging and diagnostics, including LogFile output.
  134.18  const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
  134.19                                                  int trap_request) {
  134.20    jint unloaded_class_index = trap_request_index(trap_request);
   135.1 --- a/src/share/vm/runtime/fieldDescriptor.cpp	Fri Sep 27 13:49:57 2013 -0400
   135.2 +++ b/src/share/vm/runtime/fieldDescriptor.cpp	Fri Sep 27 13:53:43 2013 -0400
   135.3 @@ -97,18 +97,32 @@
   135.4    return constants()->uncached_string_at(initial_value_index(), CHECK_0);
   135.5  }
   135.6  
   135.7 -void fieldDescriptor::initialize(InstanceKlass* ik, int index) {
   135.8 -  _cp = ik->constants();
   135.9 +void fieldDescriptor::reinitialize(InstanceKlass* ik, int index) {
  135.10 +  if (_cp.is_null() || field_holder() != ik) {
  135.11 +    _cp = constantPoolHandle(Thread::current(), ik->constants());
  135.12 +    // _cp should now reference ik's constant pool; i.e., ik is now field_holder.
  135.13 +    assert(field_holder() == ik, "must be already initialized to this class");
  135.14 +  }
  135.15    FieldInfo* f = ik->field(index);
  135.16    assert(!f->is_internal(), "regular Java fields only");
  135.17  
  135.18    _access_flags = accessFlags_from(f->access_flags());
  135.19    guarantee(f->name_index() != 0 && f->signature_index() != 0, "bad constant pool index for fieldDescriptor");
  135.20    _index = index;
  135.21 +  verify();
  135.22  }
  135.23  
  135.24  #ifndef PRODUCT
  135.25  
  135.26 +void fieldDescriptor::verify() const {
  135.27 +  if (_cp.is_null()) {
  135.28 +    assert(_index == badInt, "constructor must be called");  // see constructor
  135.29 +  } else {
  135.30 +    assert(_index >= 0, "good index");
  135.31 +    assert(_index < field_holder()->java_fields_count(), "oob");
  135.32 +  }
  135.33 +}
  135.34 +
  135.35  void fieldDescriptor::print_on(outputStream* st) const {
  135.36    access_flags().print_on(st);
  135.37    name()->print_value_on(st);
   136.1 --- a/src/share/vm/runtime/fieldDescriptor.hpp	Fri Sep 27 13:49:57 2013 -0400
   136.2 +++ b/src/share/vm/runtime/fieldDescriptor.hpp	Fri Sep 27 13:53:43 2013 -0400
   136.3 @@ -1,5 +1,5 @@
   136.4  /*
   136.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   136.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   136.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   136.8   *
   136.9   * This code is free software; you can redistribute it and/or modify it
  136.10 @@ -53,6 +53,13 @@
  136.11    }
  136.12  
  136.13   public:
  136.14 +  fieldDescriptor() {
  136.15 +    DEBUG_ONLY(_index = badInt);
  136.16 +  }
  136.17 +  fieldDescriptor(InstanceKlass* ik, int index) {
  136.18 +    DEBUG_ONLY(_index = badInt);
  136.19 +    reinitialize(ik, index);
  136.20 +  }
  136.21    Symbol* name() const {
  136.22      return field()->name(_cp);
  136.23    }
  136.24 @@ -112,12 +119,13 @@
  136.25    }
  136.26  
  136.27    // Initialization
  136.28 -  void initialize(InstanceKlass* ik, int index);
  136.29 +  void reinitialize(InstanceKlass* ik, int index);
  136.30  
  136.31    // Print
  136.32    void print() { print_on(tty); }
  136.33    void print_on(outputStream* st) const         PRODUCT_RETURN;
  136.34    void print_on_for(outputStream* st, oop obj)  PRODUCT_RETURN;
  136.35 +  void verify() const                           PRODUCT_RETURN;
  136.36  };
  136.37  
  136.38  #endif // SHARE_VM_RUNTIME_FIELDDESCRIPTOR_HPP
   137.1 --- a/src/share/vm/runtime/frame.cpp	Fri Sep 27 13:49:57 2013 -0400
   137.2 +++ b/src/share/vm/runtime/frame.cpp	Fri Sep 27 13:53:43 2013 -0400
   137.3 @@ -652,7 +652,7 @@
   137.4  // Return whether the frame is in the VM or os indicating a Hotspot problem.
   137.5  // Otherwise, it's likely a bug in the native library that the Java code calls,
   137.6  // hopefully indicating where to submit bugs.
   137.7 -static void print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
   137.8 +void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
   137.9    // C/C++ frame
  137.10    bool in_vm = os::address_is_in_vm(pc);
  137.11    st->print(in_vm ? "V" : "C");
   138.1 --- a/src/share/vm/runtime/frame.hpp	Fri Sep 27 13:49:57 2013 -0400
   138.2 +++ b/src/share/vm/runtime/frame.hpp	Fri Sep 27 13:53:43 2013 -0400
   138.3 @@ -406,6 +406,7 @@
   138.4    void print_on(outputStream* st) const;
   138.5    void interpreter_frame_print_on(outputStream* st) const;
   138.6    void print_on_error(outputStream* st, char* buf, int buflen, bool verbose = false) const;
   138.7 +  static void print_C_frame(outputStream* st, char* buf, int buflen, address pc);
   138.8  
   138.9    // Add annotated descriptions of memory locations belonging to this frame to values
  138.10    void describe(FrameValues& values, int frame_no);
   139.1 --- a/src/share/vm/runtime/globals.hpp	Fri Sep 27 13:49:57 2013 -0400
   139.2 +++ b/src/share/vm/runtime/globals.hpp	Fri Sep 27 13:53:43 2013 -0400
   139.3 @@ -443,8 +443,8 @@
   139.4              "Use 32-bit object references in 64-bit VM  "                   \
   139.5              "lp64_product means flag is always constant in 32 bit VM")      \
   139.6                                                                              \
   139.7 -  lp64_product(bool, UseCompressedKlassPointers, false,                     \
   139.8 -            "Use 32-bit klass pointers in 64-bit VM  "                      \
   139.9 +  lp64_product(bool, UseCompressedClassPointers, false,                     \
  139.10 +            "Use 32-bit class pointers in 64-bit VM  "                      \
  139.11              "lp64_product means flag is always constant in 32 bit VM")      \
  139.12                                                                              \
  139.13    notproduct(bool, CheckCompressedOops, true,                               \
  139.14 @@ -880,7 +880,7 @@
  139.15            "stay alive at the expense of JVM performance")                   \
  139.16                                                                              \
  139.17    diagnostic(bool, LogCompilation, false,                                   \
  139.18 -          "Log compilation activity in detail to hotspot.log or LogFile")   \
  139.19 +          "Log compilation activity in detail to LogFile")                  \
  139.20                                                                              \
  139.21    product(bool, PrintCompilation, false,                                    \
  139.22            "Print compilations")                                             \
  139.23 @@ -2498,16 +2498,17 @@
  139.24           "Print all VM flags with default values and descriptions and exit")\
  139.25                                                                              \
  139.26    diagnostic(bool, SerializeVMOutput, true,                                 \
  139.27 -         "Use a mutex to serialize output to tty and hotspot.log")          \
  139.28 +         "Use a mutex to serialize output to tty and LogFile")              \
  139.29                                                                              \
  139.30    diagnostic(bool, DisplayVMOutput, true,                                   \
  139.31           "Display all VM output on the tty, independently of LogVMOutput")  \
  139.32                                                                              \
  139.33 -  diagnostic(bool, LogVMOutput, trueInDebug,                                \
  139.34 -         "Save VM output to hotspot.log, or to LogFile")                    \
  139.35 +  diagnostic(bool, LogVMOutput, false,                                      \
  139.36 +         "Save VM output to LogFile")                                       \
  139.37                                                                              \
  139.38    diagnostic(ccstr, LogFile, NULL,                                          \
  139.39 -         "If LogVMOutput is on, save VM output to this file [hotspot.log]") \
  139.40 +         "If LogVMOutput or LogCompilation is on, save VM output to "       \
  139.41 +         "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)") \
  139.42                                                                              \
  139.43    product(ccstr, ErrorFile, NULL,                                           \
  139.44           "If an error occurs, save the error data to this file "            \
  139.45 @@ -2525,6 +2526,9 @@
  139.46    product(bool, PrintStringTableStatistics, false,                          \
  139.47            "print statistics about the StringTable and SymbolTable")         \
  139.48                                                                              \
  139.49 +  diagnostic(bool, VerifyStringTableAtExit, false,                          \
  139.50 +          "verify StringTable contents at exit")                            \
  139.51 +                                                                            \
  139.52    notproduct(bool, PrintSymbolTableSizeHistogram, false,                    \
  139.53            "print histogram of the symbol table")                            \
  139.54                                                                              \
  139.55 @@ -3039,9 +3043,9 @@
  139.56    product(uintx, MaxMetaspaceSize, max_uintx,                               \
  139.57            "Maximum size of Metaspaces (in bytes)")                          \
  139.58                                                                              \
  139.59 -  product(uintx, ClassMetaspaceSize, 1*G,                                   \
  139.60 -          "Maximum size of InstanceKlass area in Metaspace used for "       \
  139.61 -          "UseCompressedKlassPointers")                                     \
  139.62 +  product(uintx, CompressedClassSpaceSize, 1*G,                             \
  139.63 +          "Maximum size of class area in Metaspace when compressed "        \
  139.64 +          "class pointers are used")                                        \
  139.65                                                                              \
  139.66    product(uintx, MinHeapFreeRatio,    40,                                   \
  139.67            "Min percentage of heap free after GC to avoid expansion")        \
   140.1 --- a/src/share/vm/runtime/handles.hpp	Fri Sep 27 13:49:57 2013 -0400
   140.2 +++ b/src/share/vm/runtime/handles.hpp	Fri Sep 27 13:53:43 2013 -0400
   140.3 @@ -136,7 +136,7 @@
   140.4  // Specific Handles for different oop types
   140.5  #define DEF_METADATA_HANDLE(name, type)          \
   140.6    class name##Handle;                            \
   140.7 -  class name##Handle {                           \
   140.8 +  class name##Handle : public StackObj {         \
   140.9      type*     _value;                            \
  140.10      Thread*   _thread;                           \
  140.11     protected:                                    \
  140.12 @@ -175,7 +175,7 @@
  140.13  // Writing this class explicitly, since DEF_METADATA_HANDLE(klass) doesn't
  140.14  // provide the necessary Klass* <-> Klass* conversions. This Klass
  140.15  // could be removed when we don't have the Klass* typedef anymore.
  140.16 -class KlassHandle {
  140.17 +class KlassHandle : public StackObj {
  140.18    Klass* _value;
  140.19   protected:
  140.20     Klass* obj() const          { return _value; }
   141.1 --- a/src/share/vm/runtime/handles.inline.hpp	Fri Sep 27 13:49:57 2013 -0400
   141.2 +++ b/src/share/vm/runtime/handles.inline.hpp	Fri Sep 27 13:53:43 2013 -0400
   141.3 @@ -79,6 +79,7 @@
   141.4      } else {                                                           \
   141.5        _thread = Thread::current();                                     \
   141.6      }                                                                  \
   141.7 +    assert (_thread->is_in_stack((address)this), "not on stack?");     \
   141.8      _thread->metadata_handles()->push((Metadata*)_value);              \
   141.9    } else {                                                             \
  141.10      _thread = NULL;                                                    \
  141.11 @@ -95,6 +96,7 @@
  141.12      } else {                                                           \
  141.13        _thread = Thread::current();                                     \
  141.14      }                                                                  \
  141.15 +    assert (_thread->is_in_stack((address)this), "not on stack?");     \
  141.16      _thread->metadata_handles()->push((Metadata*)_value);              \
  141.17    } else {                                                             \
  141.18      _thread = NULL;                                                    \
   142.1 --- a/src/share/vm/runtime/java.cpp	Fri Sep 27 13:49:57 2013 -0400
   142.2 +++ b/src/share/vm/runtime/java.cpp	Fri Sep 27 13:53:43 2013 -0400
   142.3 @@ -544,6 +544,19 @@
   142.4    // it will run into trouble when system destroys static variables.
   142.5    MemTracker::shutdown(MemTracker::NMT_normal);
   142.6  
   142.7 +  if (VerifyStringTableAtExit) {
   142.8 +    int fail_cnt = 0;
   142.9 +    {
  142.10 +      MutexLocker ml(StringTable_lock);
  142.11 +      fail_cnt = StringTable::verify_and_compare_entries();
  142.12 +    }
  142.13 +
  142.14 +    if (fail_cnt != 0) {
  142.15 +      tty->print_cr("ERROR: fail_cnt=%d", fail_cnt);
  142.16 +      guarantee(fail_cnt == 0, "unexpected StringTable verification failures");
  142.17 +    }
  142.18 +  }
  142.19 +
  142.20    #undef BEFORE_EXIT_NOT_RUN
  142.21    #undef BEFORE_EXIT_RUNNING
  142.22    #undef BEFORE_EXIT_DONE
   143.1 --- a/src/share/vm/runtime/mutexLocker.cpp	Fri Sep 27 13:49:57 2013 -0400
   143.2 +++ b/src/share/vm/runtime/mutexLocker.cpp	Fri Sep 27 13:53:43 2013 -0400
   143.3 @@ -45,7 +45,6 @@
   143.4  Mutex*   VMStatistic_lock             = NULL;
   143.5  Mutex*   JNIGlobalHandle_lock         = NULL;
   143.6  Mutex*   JNIHandleBlockFreeList_lock  = NULL;
   143.7 -Mutex*   JNICachedItableIndex_lock    = NULL;
   143.8  Mutex*   MemberNameTable_lock         = NULL;
   143.9  Mutex*   JmethodIdCreation_lock       = NULL;
  143.10  Mutex*   JfieldIdCreation_lock        = NULL;
  143.11 @@ -253,7 +252,6 @@
  143.12    }
  143.13    def(Heap_lock                    , Monitor, nonleaf+1,   false);
  143.14    def(JfieldIdCreation_lock        , Mutex  , nonleaf+1,   true ); // jfieldID, Used in VM_Operation
  143.15 -  def(JNICachedItableIndex_lock    , Mutex  , nonleaf+1,   false); // Used to cache an itable index during JNI invoke
  143.16    def(MemberNameTable_lock         , Mutex  , nonleaf+1,   false); // Used to protect MemberNameTable
  143.17  
  143.18    def(CompiledIC_lock              , Mutex  , nonleaf+2,   false); // locks VtableStubs_lock, InlineCacheBuffer_lock
   144.1 --- a/src/share/vm/runtime/mutexLocker.hpp	Fri Sep 27 13:49:57 2013 -0400
   144.2 +++ b/src/share/vm/runtime/mutexLocker.hpp	Fri Sep 27 13:53:43 2013 -0400
   144.3 @@ -1,5 +1,5 @@
   144.4  /*
   144.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   144.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   144.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   144.8   *
   144.9   * This code is free software; you can redistribute it and/or modify it
  144.10 @@ -50,7 +50,6 @@
  144.11  extern Mutex*   VMStatistic_lock;                // a lock used to guard statistics count increment
  144.12  extern Mutex*   JNIGlobalHandle_lock;            // a lock on creating JNI global handles
  144.13  extern Mutex*   JNIHandleBlockFreeList_lock;     // a lock on the JNI handle block free list
  144.14 -extern Mutex*   JNICachedItableIndex_lock;       // a lock on caching an itable index during JNI invoke
  144.15  extern Mutex*   MemberNameTable_lock;            // a lock on the MemberNameTable updates
  144.16  extern Mutex*   JmethodIdCreation_lock;          // a lock on creating JNI method identifiers
  144.17  extern Mutex*   JfieldIdCreation_lock;           // a lock on creating JNI static field identifiers
   145.1 --- a/src/share/vm/runtime/os.cpp	Fri Sep 27 13:49:57 2013 -0400
   145.2 +++ b/src/share/vm/runtime/os.cpp	Fri Sep 27 13:53:43 2013 -0400
   145.3 @@ -314,6 +314,11 @@
   145.4    }
   145.5  }
   145.6  
   145.7 +void os::init_before_ergo() {
   145.8 +  // We need to initialize large page support here because ergonomics takes some
   145.9 +  // decisions depending on large page support and the calculated large page size.
  145.10 +  large_page_init();
  145.11 +}
  145.12  
  145.13  void os::signal_init() {
  145.14    if (!ReduceSignalUsage) {
   146.1 --- a/src/share/vm/runtime/os.hpp	Fri Sep 27 13:49:57 2013 -0400
   146.2 +++ b/src/share/vm/runtime/os.hpp	Fri Sep 27 13:53:43 2013 -0400
   146.3 @@ -91,6 +91,8 @@
   146.4  typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
   146.5  
   146.6  class os: AllStatic {
   146.7 +  friend class VMStructs;
   146.8 +
   146.9   public:
  146.10    enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
  146.11  
  146.12 @@ -139,7 +141,10 @@
  146.13  
  146.14   public:
  146.15    static void init(void);                      // Called before command line parsing
  146.16 +  static void init_before_ergo(void);          // Called after command line parsing
  146.17 +                                               // before VM ergonomics processing.
  146.18    static jint init_2(void);                    // Called after command line parsing
  146.19 +                                               // and VM ergonomics processing
  146.20    static void init_globals(void) {             // Called from init_globals() in init.cpp
  146.21      init_globals_ext();
  146.22    }
  146.23 @@ -254,6 +259,11 @@
  146.24    static size_t page_size_for_region(size_t region_min_size,
  146.25                                       size_t region_max_size,
  146.26                                       uint min_pages);
  146.27 +  // Return the largest page size that can be used
  146.28 +  static size_t max_page_size() {
  146.29 +    // The _page_sizes array is sorted in descending order.
  146.30 +    return _page_sizes[0];
  146.31 +  }
  146.32  
  146.33    // Methods for tracing page sizes returned by the above method; enabled by
  146.34    // TracePageSizes.  The region_{min,max}_size parameters should be the values
  146.35 @@ -795,6 +805,14 @@
  146.36  #endif
  146.37  
  146.38   public:
  146.39 +#ifndef PLATFORM_PRINT_NATIVE_STACK
  146.40 +  // No platform-specific code for printing the native stack.
  146.41 +  static bool platform_print_native_stack(outputStream* st, void* context,
  146.42 +                                          char *buf, int buf_size) {
  146.43 +    return false;
  146.44 +  }
  146.45 +#endif
  146.46 +
  146.47    // debugging support (mostly used by debug.cpp but also fatal error handler)
  146.48    static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
  146.49  
   147.1 --- a/src/share/vm/runtime/reflection.cpp	Fri Sep 27 13:49:57 2013 -0400
   147.2 +++ b/src/share/vm/runtime/reflection.cpp	Fri Sep 27 13:53:43 2013 -0400
   147.3 @@ -952,7 +952,8 @@
   147.4          }
   147.5        }  else {
   147.6          // if the method can be overridden, we resolve using the vtable index.
   147.7 -        int index  = reflected_method->vtable_index();
   147.8 +        assert(!reflected_method->has_itable_index(), "");
   147.9 +        int index = reflected_method->vtable_index();
  147.10          method = reflected_method;
  147.11          if (index != Method::nonvirtual_vtable_index) {
  147.12            // target_klass might be an arrayKlassOop but all vtables start at
   148.1 --- a/src/share/vm/runtime/reflectionUtils.hpp	Fri Sep 27 13:49:57 2013 -0400
   148.2 +++ b/src/share/vm/runtime/reflectionUtils.hpp	Fri Sep 27 13:53:43 2013 -0400
   148.3 @@ -109,6 +109,8 @@
   148.4   private:
   148.5    int length() const                { return _klass->java_fields_count(); }
   148.6  
   148.7 +  fieldDescriptor _fd_buf;
   148.8 +
   148.9   public:
  148.10    FieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
  148.11      : KlassStream(klass, local_only, classes_only) {
  148.12 @@ -134,6 +136,12 @@
  148.13    int offset() const {
  148.14      return _klass->field_offset( index() );
  148.15    }
  148.16 +  // bridge to a heavier API:
  148.17 +  fieldDescriptor& field_descriptor() const {
  148.18 +    fieldDescriptor& field = const_cast<fieldDescriptor&>(_fd_buf);
  148.19 +    field.reinitialize(_klass(), _index);
  148.20 +    return field;
  148.21 +  }
  148.22  };
  148.23  
  148.24  class FilteredField : public CHeapObj<mtInternal>  {
   149.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Fri Sep 27 13:49:57 2013 -0400
   149.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Sep 27 13:53:43 2013 -0400
   149.3 @@ -1506,8 +1506,11 @@
   149.4                                                  info, CHECK_(methodHandle()));
   149.5          inline_cache->set_to_monomorphic(info);
   149.6        } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
   149.7 -        // Change to megamorphic
   149.8 -        inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
   149.9 +        // Potential change to megamorphic
  149.10 +        bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
  149.11 +        if (!successful) {
  149.12 +          inline_cache->set_to_clean();
  149.13 +        }
  149.14        } else {
  149.15          // Either clean or megamorphic
  149.16        }
   150.1 --- a/src/share/vm/runtime/sweeper.cpp	Fri Sep 27 13:49:57 2013 -0400
   150.2 +++ b/src/share/vm/runtime/sweeper.cpp	Fri Sep 27 13:53:43 2013 -0400
   150.3 @@ -269,6 +269,7 @@
   150.4    // the number of nmethods changes during the sweep so the final
   150.5    // stage must iterate until it there are no more nmethods.
   150.6    int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
   150.7 +  int swept_count = 0;
   150.8  
   150.9    assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
  150.10    assert(!CodeCache_lock->owned_by_self(), "just checking");
  150.11 @@ -278,6 +279,7 @@
  150.12  
  150.13      // The last invocation iterates until there are no more nmethods
  150.14      for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
  150.15 +      swept_count++;
  150.16        if (SafepointSynchronize::is_synchronizing()) { // Safepoint request
  150.17          if (PrintMethodFlushing && Verbose) {
  150.18            tty->print_cr("### Sweep at %d out of %d, invocation: %d, yielding to safepoint", _seen, CodeCache::nof_nmethods(), _invocations);
  150.19 @@ -331,7 +333,7 @@
  150.20      event.set_endtime(sweep_end_counter);
  150.21      event.set_sweepIndex(_traversals);
  150.22      event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
  150.23 -    event.set_sweptCount(todo);
  150.24 +    event.set_sweptCount(swept_count);
  150.25      event.set_flushedCount(_flushed_count);
  150.26      event.set_markedCount(_marked_count);
  150.27      event.set_zombifiedCount(_zombified_count);
   151.1 --- a/src/share/vm/runtime/thread.cpp	Fri Sep 27 13:49:57 2013 -0400
   151.2 +++ b/src/share/vm/runtime/thread.cpp	Fri Sep 27 13:53:43 2013 -0400
   151.3 @@ -333,6 +333,8 @@
   151.4    // Reclaim the objectmonitors from the omFreeList of the moribund thread.
   151.5    ObjectSynchronizer::omFlush (this) ;
   151.6  
   151.7 +  EVENT_THREAD_DESTRUCT(this);
   151.8 +
   151.9    // stack_base can be NULL if the thread is never started or exited before
  151.10    // record_stack_base_and_size called. Although, we would like to ensure
  151.11    // that all started threads do call record_stack_base_and_size(), there is
  151.12 @@ -3329,6 +3331,11 @@
  151.13    jint parse_result = Arguments::parse(args);
  151.14    if (parse_result != JNI_OK) return parse_result;
  151.15  
  151.16 +  os::init_before_ergo();
  151.17 +
  151.18 +  jint ergo_result = Arguments::apply_ergo();
  151.19 +  if (ergo_result != JNI_OK) return ergo_result;
  151.20 +
  151.21    if (PauseAtStartup) {
  151.22      os::pause();
  151.23    }
   152.1 --- a/src/share/vm/runtime/virtualspace.cpp	Fri Sep 27 13:49:57 2013 -0400
   152.2 +++ b/src/share/vm/runtime/virtualspace.cpp	Fri Sep 27 13:53:43 2013 -0400
   152.3 @@ -453,6 +453,42 @@
   152.4    return reserved_size() - committed_size();
   152.5  }
   152.6  
   152.7 +size_t VirtualSpace::actual_committed_size() const {
   152.8 +  // Special VirtualSpaces commit all reserved space up front.
   152.9 +  if (special()) {
  152.10 +    return reserved_size();
  152.11 +  }
  152.12 +
  152.13 +  size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
  152.14 +  size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
  152.15 +  size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
  152.16 +
  152.17 +#ifdef ASSERT
  152.18 +  size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
  152.19 +  size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
  152.20 +  size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
  152.21 +
  152.22 +  if (committed_high > 0) {
  152.23 +    assert(committed_low == lower, "Must be");
  152.24 +    assert(committed_middle == middle, "Must be");
  152.25 +  }
  152.26 +
  152.27 +  if (committed_middle > 0) {
  152.28 +    assert(committed_low == lower, "Must be");
  152.29 +  }
  152.30 +  if (committed_middle < middle) {
  152.31 +    assert(committed_high == 0, "Must be");
  152.32 +  }
  152.33 +
  152.34 +  if (committed_low < lower) {
  152.35 +    assert(committed_high == 0, "Must be");
  152.36 +    assert(committed_middle == 0, "Must be");
  152.37 +  }
  152.38 +#endif
  152.39 +
  152.40 +  return committed_low + committed_middle + committed_high;
  152.41 +}
  152.42 +
  152.43  
  152.44  bool VirtualSpace::contains(const void* p) const {
  152.45    return low() <= (const char*) p && (const char*) p < high();
  152.46 @@ -718,16 +754,19 @@
  152.47    assert(high() <= upper_high(), "upper high");
  152.48  }
  152.49  
  152.50 -void VirtualSpace::print() {
  152.51 -  tty->print   ("Virtual space:");
  152.52 -  if (special()) tty->print(" (pinned in memory)");
  152.53 -  tty->cr();
  152.54 -  tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
  152.55 -  tty->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
  152.56 -  tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
  152.57 -  tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
  152.58 +void VirtualSpace::print_on(outputStream* out) {
  152.59 +  out->print   ("Virtual space:");
  152.60 +  if (special()) out->print(" (pinned in memory)");
  152.61 +  out->cr();
  152.62 +  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
  152.63 +  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
  152.64 +  out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
  152.65 +  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
  152.66  }
  152.67  
  152.68 +void VirtualSpace::print() {
  152.69 +  print_on(tty);
  152.70 +}
  152.71  
  152.72  /////////////// Unit tests ///////////////
  152.73  
  152.74 @@ -910,6 +949,109 @@
  152.75    TestReservedSpace::test_reserved_space();
  152.76  }
  152.77  
  152.78 +#define assert_equals(actual, expected)     \
  152.79 +  assert(actual == expected,                \
  152.80 +    err_msg("Got " SIZE_FORMAT " expected " \
  152.81 +      SIZE_FORMAT, actual, expected));
  152.82 +
  152.83 +#define assert_ge(value1, value2)                  \
  152.84 +  assert(value1 >= value2,                         \
  152.85 +    err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
  152.86 +      #value2 "': " SIZE_FORMAT, value1, value2));
  152.87 +
  152.88 +#define assert_lt(value1, value2)                  \
  152.89 +  assert(value1 < value2,                          \
  152.90 +    err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
  152.91 +      #value2 "': " SIZE_FORMAT, value1, value2));
  152.92 +
  152.93 +
  152.94 +class TestVirtualSpace : AllStatic {
  152.95 + public:
  152.96 +  static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) {
  152.97 +    size_t granularity = os::vm_allocation_granularity();
  152.98 +    size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
  152.99 +
 152.100 +    ReservedSpace reserved(reserve_size_aligned);
 152.101 +
 152.102 +    assert(reserved.is_reserved(), "Must be");
 152.103 +
 152.104 +    VirtualSpace vs;
 152.105 +    bool initialized = vs.initialize(reserved, 0);
 152.106 +    assert(initialized, "Failed to initialize VirtualSpace");
 152.107 +
 152.108 +    vs.expand_by(commit_size, false);
 152.109 +
 152.110 +    if (vs.special()) {
 152.111 +      assert_equals(vs.actual_committed_size(), reserve_size_aligned);
 152.112 +    } else {
 152.113 +      assert_ge(vs.actual_committed_size(), commit_size);
 152.114 +      // Approximate the commit granularity.
 152.115 +      size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size();
 152.116 +      assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
 152.117 +    }
 152.118 +
 152.119 +    reserved.release();
 152.120 +  }
 152.121 +
 152.122 +  static void test_virtual_space_actual_committed_space_one_large_page() {
 152.123 +    if (!UseLargePages) {
 152.124 +      return;
 152.125 +    }
 152.126 +
 152.127 +    size_t large_page_size = os::large_page_size();
 152.128 +
 152.129 +    ReservedSpace reserved(large_page_size, large_page_size, true, false);
 152.130 +
 152.131 +    assert(reserved.is_reserved(), "Must be");
 152.132 +
 152.133 +    VirtualSpace vs;
 152.134 +    bool initialized = vs.initialize(reserved, 0);
 152.135 +    assert(initialized, "Failed to initialize VirtualSpace");
 152.136 +
 152.137 +    vs.expand_by(large_page_size, false);
 152.138 +
 152.139 +    assert_equals(vs.actual_committed_size(), large_page_size);
 152.140 +
 152.141 +    reserved.release();
 152.142 +  }
 152.143 +
 152.144 +  static void test_virtual_space_actual_committed_space() {
 152.145 +    test_virtual_space_actual_committed_space(4 * K, 0);
 152.146 +    test_virtual_space_actual_committed_space(4 * K, 4 * K);
 152.147 +    test_virtual_space_actual_committed_space(8 * K, 0);
 152.148 +    test_virtual_space_actual_committed_space(8 * K, 4 * K);
 152.149 +    test_virtual_space_actual_committed_space(8 * K, 8 * K);
 152.150 +    test_virtual_space_actual_committed_space(12 * K, 0);
 152.151 +    test_virtual_space_actual_committed_space(12 * K, 4 * K);
 152.152 +    test_virtual_space_actual_committed_space(12 * K, 8 * K);
 152.153 +    test_virtual_space_actual_committed_space(12 * K, 12 * K);
 152.154 +    test_virtual_space_actual_committed_space(64 * K, 0);
 152.155 +    test_virtual_space_actual_committed_space(64 * K, 32 * K);
 152.156 +    test_virtual_space_actual_committed_space(64 * K, 64 * K);
 152.157 +    test_virtual_space_actual_committed_space(2 * M, 0);
 152.158 +    test_virtual_space_actual_committed_space(2 * M, 4 * K);
 152.159 +    test_virtual_space_actual_committed_space(2 * M, 64 * K);
 152.160 +    test_virtual_space_actual_committed_space(2 * M, 1 * M);
 152.161 +    test_virtual_space_actual_committed_space(2 * M, 2 * M);
 152.162 +    test_virtual_space_actual_committed_space(10 * M, 0);
 152.163 +    test_virtual_space_actual_committed_space(10 * M, 4 * K);
 152.164 +    test_virtual_space_actual_committed_space(10 * M, 8 * K);
 152.165 +    test_virtual_space_actual_committed_space(10 * M, 1 * M);
 152.166 +    test_virtual_space_actual_committed_space(10 * M, 2 * M);
 152.167 +    test_virtual_space_actual_committed_space(10 * M, 5 * M);
 152.168 +    test_virtual_space_actual_committed_space(10 * M, 10 * M);
 152.169 +  }
 152.170 +
 152.171 +  static void test_virtual_space() {
 152.172 +    test_virtual_space_actual_committed_space();
 152.173 +    test_virtual_space_actual_committed_space_one_large_page();
 152.174 +  }
 152.175 +};
 152.176 +
 152.177 +void TestVirtualSpace_test() {
 152.178 +  TestVirtualSpace::test_virtual_space();
 152.179 +}
 152.180 +
 152.181  #endif // PRODUCT
 152.182  
 152.183  #endif
   153.1 --- a/src/share/vm/runtime/virtualspace.hpp	Fri Sep 27 13:49:57 2013 -0400
   153.2 +++ b/src/share/vm/runtime/virtualspace.hpp	Fri Sep 27 13:53:43 2013 -0400
   153.3 @@ -183,11 +183,16 @@
   153.4    // Destruction
   153.5    ~VirtualSpace();
   153.6  
   153.7 -  // Testers (all sizes are byte sizes)
   153.8 -  size_t committed_size()   const;
   153.9 -  size_t reserved_size()    const;
  153.10 +  // Reserved memory
  153.11 +  size_t reserved_size() const;
  153.12 +  // Actually committed OS memory
  153.13 +  size_t actual_committed_size() const;
  153.14 +  // Memory used/expanded in this virtual space
  153.15 +  size_t committed_size() const;
  153.16 +  // Memory left to use/expand in this virtual space
  153.17    size_t uncommitted_size() const;
  153.18 -  bool   contains(const void* p)  const;
  153.19 +
  153.20 +  bool   contains(const void* p) const;
  153.21  
  153.22    // Operations
  153.23    // returns true on success, false otherwise
  153.24 @@ -198,7 +203,8 @@
  153.25    void check_for_contiguity() PRODUCT_RETURN;
  153.26  
  153.27    // Debugging
  153.28 -  void print() PRODUCT_RETURN;
  153.29 +  void print_on(outputStream* out) PRODUCT_RETURN;
  153.30 +  void print();
  153.31  };
  153.32  
  153.33  #endif // SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
   154.1 --- a/src/share/vm/runtime/vmStructs.cpp	Fri Sep 27 13:49:57 2013 -0400
   154.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Fri Sep 27 13:53:43 2013 -0400
   154.3 @@ -315,7 +315,6 @@
   154.4    nonstatic_field(InstanceKlass,               _breakpoints,                                  BreakpointInfo*)                       \
   154.5    nonstatic_field(InstanceKlass,               _generic_signature_index,                           u2)                               \
   154.6    nonstatic_field(InstanceKlass,               _methods_jmethod_ids,                          jmethodID*)                            \
   154.7 -  nonstatic_field(InstanceKlass,               _methods_cached_itable_indices,                int*)                                  \
   154.8    volatile_nonstatic_field(InstanceKlass,      _idnum_allocated_count,                        u2)                                    \
   154.9    nonstatic_field(InstanceKlass,               _annotations,                                  Annotations*)                          \
  154.10    nonstatic_field(InstanceKlass,               _dependencies,                                 nmethodBucket*)                        \
  154.11 @@ -330,11 +329,13 @@
  154.12    nonstatic_field(Klass,                       _java_mirror,                                  oop)                                   \
  154.13    nonstatic_field(Klass,                       _modifier_flags,                               jint)                                  \
  154.14    nonstatic_field(Klass,                       _super,                                        Klass*)                                \
  154.15 +  nonstatic_field(Klass,                       _subklass,                                     Klass*)                                \
  154.16    nonstatic_field(Klass,                       _layout_helper,                                jint)                                  \
  154.17    nonstatic_field(Klass,                       _name,                                         Symbol*)                               \
  154.18    nonstatic_field(Klass,                       _access_flags,                                 AccessFlags)                           \
  154.19 -  nonstatic_field(Klass,                       _subklass,                                     Klass*)                                \
  154.20 +  nonstatic_field(Klass,                       _prototype_header,                             markOop)                               \
  154.21    nonstatic_field(Klass,                       _next_sibling,                                 Klass*)                                \
  154.22 +  nonstatic_field(vtableEntry,                 _method,                                       Method*)                               \
  154.23    nonstatic_field(MethodData,           _size,                                         int)                                   \
  154.24    nonstatic_field(MethodData,           _method,                                       Method*)                               \
  154.25    nonstatic_field(MethodData,           _data_size,                                    int)                                   \
  154.26 @@ -342,10 +343,15 @@
  154.27    nonstatic_field(MethodData,           _nof_decompiles,                               uint)                                  \
  154.28    nonstatic_field(MethodData,           _nof_overflow_recompiles,                      uint)                                  \
  154.29    nonstatic_field(MethodData,           _nof_overflow_traps,                           uint)                                  \
  154.30 +  nonstatic_field(MethodData,           _trap_hist._array[0],                          u1)                                    \
  154.31    nonstatic_field(MethodData,           _eflags,                                       intx)                                  \
  154.32    nonstatic_field(MethodData,           _arg_local,                                    intx)                                  \
  154.33    nonstatic_field(MethodData,           _arg_stack,                                    intx)                                  \
  154.34    nonstatic_field(MethodData,           _arg_returned,                                 intx)                                  \
  154.35 +  nonstatic_field(DataLayout,           _header._struct._tag,                          u1)                                    \
  154.36 +  nonstatic_field(DataLayout,           _header._struct._flags,                        u1)                                    \
  154.37 +  nonstatic_field(DataLayout,           _header._struct._bci,                          u2)                                    \
  154.38 +  nonstatic_field(DataLayout,           _cells[0],                                     intptr_t)                              \
  154.39    nonstatic_field(MethodCounters,       _interpreter_invocation_count,                 int)                                   \
  154.40    nonstatic_field(MethodCounters,       _interpreter_throwout_count,                   u2)                                    \
  154.41    nonstatic_field(MethodCounters,       _number_of_breakpoints,                        u2)                                    \
  154.42 @@ -357,6 +363,7 @@
  154.43    nonstatic_field(Method,               _access_flags,                                 AccessFlags)                           \
  154.44    nonstatic_field(Method,               _vtable_index,                                 int)                                   \
  154.45    nonstatic_field(Method,               _method_size,                                  u2)                                    \
  154.46 +  nonstatic_field(Method,               _intrinsic_id,                                 u1)                                    \
  154.47    nonproduct_nonstatic_field(Method,    _compiled_invocation_count,                    int)                                   \
  154.48    volatile_nonstatic_field(Method,      _code,                                         nmethod*)                              \
  154.49    nonstatic_field(Method,               _i2i_entry,                                    address)                               \
  154.50 @@ -443,12 +450,19 @@
  154.51       static_field(Universe,                    _bootstrapping,                                bool)                                  \
  154.52       static_field(Universe,                    _fully_initialized,                            bool)                                  \
  154.53       static_field(Universe,                    _verify_count,                                 int)                                   \
  154.54 +     static_field(Universe,                    _non_oop_bits,                                 intptr_t)                              \
  154.55       static_field(Universe,                    _narrow_oop._base,                             address)                               \
  154.56       static_field(Universe,                    _narrow_oop._shift,                            int)                                   \
  154.57       static_field(Universe,                    _narrow_oop._use_implicit_null_checks,         bool)                                  \
  154.58       static_field(Universe,                    _narrow_klass._base,                           address)                               \
  154.59       static_field(Universe,                    _narrow_klass._shift,                          int)                                   \
  154.60                                                                                                                                       \
  154.61 +  /******/                                                                                                                           \
  154.62 +  /* os */                                                                                                                           \
  154.63 +  /******/                                                                                                                           \
  154.64 +                                                                                                                                     \
  154.65 +     static_field(os,                          _polling_page,                                 address)                               \
  154.66 +                                                                                                                                     \
  154.67    /**********************************************************************************/                                               \
  154.68    /* Generation and Space hierarchies                                               */                                               \
  154.69    /**********************************************************************************/                                               \
  154.70 @@ -456,6 +470,7 @@
  154.71    unchecked_nonstatic_field(ageTable,          sizes,                                         sizeof(ageTable::sizes))               \
  154.72                                                                                                                                       \
  154.73    nonstatic_field(BarrierSet,                  _max_covered_regions,                          int)                                   \
  154.74 +  nonstatic_field(BarrierSet,                  _kind,                                         BarrierSet::Name)                      \
  154.75    nonstatic_field(BlockOffsetTable,            _bottom,                                       HeapWord*)                             \
  154.76    nonstatic_field(BlockOffsetTable,            _end,                                          HeapWord*)                             \
  154.77                                                                                                                                       \
  154.78 @@ -495,6 +510,7 @@
  154.79    nonstatic_field(CollectedHeap,               _barrier_set,                                  BarrierSet*)                           \
  154.80    nonstatic_field(CollectedHeap,               _defer_initial_card_mark,                      bool)                                  \
  154.81    nonstatic_field(CollectedHeap,               _is_gc_active,                                 bool)                                  \
  154.82 +  nonstatic_field(CollectedHeap,               _total_collections,                            unsigned int)                          \
  154.83    nonstatic_field(CompactibleSpace,            _compaction_top,                               HeapWord*)                             \
  154.84    nonstatic_field(CompactibleSpace,            _first_dead,                                   HeapWord*)                             \
  154.85    nonstatic_field(CompactibleSpace,            _end_of_live,                                  HeapWord*)                             \
  154.86 @@ -505,7 +521,7 @@
  154.87    nonstatic_field(ContiguousSpace,             _saved_mark_word,                              HeapWord*)                             \
  154.88                                                                                                                                       \
  154.89    nonstatic_field(DefNewGeneration,            _next_gen,                                     Generation*)                           \
  154.90 -  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                   \
  154.91 +  nonstatic_field(DefNewGeneration,            _tenuring_threshold,                           uint)                                  \
  154.92    nonstatic_field(DefNewGeneration,            _age_table,                                    ageTable)                              \
  154.93    nonstatic_field(DefNewGeneration,            _eden_space,                                   EdenSpace*)                            \
  154.94    nonstatic_field(DefNewGeneration,            _from_space,                                   ContiguousSpace*)                      \
  154.95 @@ -552,6 +568,11 @@
  154.96    nonstatic_field(ThreadLocalAllocBuffer,      _desired_size,                                 size_t)                                \
  154.97    nonstatic_field(ThreadLocalAllocBuffer,      _refill_waste_limit,                           size_t)                                \
  154.98       static_field(ThreadLocalAllocBuffer,      _target_refills,                               unsigned)                              \
  154.99 +  nonstatic_field(ThreadLocalAllocBuffer,      _number_of_refills,                            unsigned)                              \
 154.100 +  nonstatic_field(ThreadLocalAllocBuffer,      _fast_refill_waste,                            unsigned)                              \
 154.101 +  nonstatic_field(ThreadLocalAllocBuffer,      _slow_refill_waste,                            unsigned)                              \
 154.102 +  nonstatic_field(ThreadLocalAllocBuffer,      _gc_waste,                                     unsigned)                              \
 154.103 +  nonstatic_field(ThreadLocalAllocBuffer,      _slow_allocations,                             unsigned)                              \
 154.104    nonstatic_field(VirtualSpace,                _low_boundary,                                 char*)                                 \
 154.105    nonstatic_field(VirtualSpace,                _high_boundary,                                char*)                                 \
 154.106    nonstatic_field(VirtualSpace,                _low,                                          char*)                                 \
 154.107 @@ -713,6 +734,13 @@
 154.108                                                                                                                                       \
 154.109    static_field(ClassLoaderDataGraph,           _head,                                         ClassLoaderData*)                      \
 154.110                                                                                                                                       \
 154.111 +  /**********/                                                                                                                       \
 154.112 +  /* Arrays */                                                                                                                       \
 154.113 +  /**********/                                                                                                                       \
 154.114 +                                                                                                                                     \
 154.115 +  nonstatic_field(Array<Klass*>,               _length,                                       int)                                   \
 154.116 +  nonstatic_field(Array<Klass*>,               _data[0],                                      Klass*)                                \
 154.117 +                                                                                                                                     \
 154.118    /*******************/                                                                                                              \
 154.119    /* GrowableArrays  */                                                                                                              \
 154.120    /*******************/                                                                                                              \
 154.121 @@ -720,7 +748,7 @@
 154.122    nonstatic_field(GenericGrowableArray,        _len,                                          int)                                   \
 154.123    nonstatic_field(GenericGrowableArray,        _max,                                          int)                                   \
 154.124    nonstatic_field(GenericGrowableArray,        _arena,                                        Arena*)                                \
 154.125 -  nonstatic_field(GrowableArray<int>,               _data,                                         int*) \
 154.126 +  nonstatic_field(GrowableArray<int>,          _data,                                         int*)                                  \
 154.127                                                                                                                                       \
 154.128    /********************************/                                                                                                 \
 154.129    /* CodeCache (NOTE: incomplete) */                                                                                                 \
 154.130 @@ -763,7 +791,20 @@
 154.131    /* StubRoutines (NOTE: incomplete) */                                                                                              \
 154.132    /***********************************/                                                                                              \
 154.133                                                                                                                                       \
 154.134 +     static_field(StubRoutines,                _verify_oop_count,                             jint)                                  \
 154.135       static_field(StubRoutines,                _call_stub_return_address,                     address)                               \
 154.136 +     static_field(StubRoutines,                _aescrypt_encryptBlock,                        address)                               \
 154.137 +     static_field(StubRoutines,                _aescrypt_decryptBlock,                        address)                               \
 154.138 +     static_field(StubRoutines,                _cipherBlockChaining_encryptAESCrypt,          address)                               \
 154.139 +     static_field(StubRoutines,                _cipherBlockChaining_decryptAESCrypt,          address)                               \
 154.140 +     static_field(StubRoutines,                _updateBytesCRC32,                             address)                               \
 154.141 +     static_field(StubRoutines,                _crc_table_adr,                                address)                               \
 154.142 +                                                                                                                                     \
 154.143 +  /*****************/                                                                                                                \
 154.144 +  /* SharedRuntime */                                                                                                                \
 154.145 +  /*****************/                                                                                                                \
 154.146 +                                                                                                                                     \
 154.147 +     static_field(SharedRuntime,               _ic_miss_blob,                                 RuntimeStub*)                          \
 154.148                                                                                                                                       \
 154.149    /***************************************/                                                                                          \
 154.150    /* PcDesc and other compiled code info */                                                                                          \
 154.151 @@ -853,6 +894,7 @@
 154.152     volatile_nonstatic_field(Thread,            _suspend_flags,                                uint32_t)                              \
 154.153    nonstatic_field(Thread,                      _active_handles,                               JNIHandleBlock*)                       \
 154.154    nonstatic_field(Thread,                      _tlab,                                         ThreadLocalAllocBuffer)                \
 154.155 +  nonstatic_field(Thread,                      _allocated_bytes,                              jlong)                                 \
 154.156    nonstatic_field(Thread,                      _current_pending_monitor,                      ObjectMonitor*)                        \
 154.157    nonstatic_field(Thread,                      _current_pending_monitor_is_from_java,         bool)                                  \
 154.158    nonstatic_field(Thread,                      _current_waiting_monitor,                      ObjectMonitor*)                        \
 154.159 @@ -866,6 +908,7 @@
 154.160    nonstatic_field(JavaThread,                  _pending_async_exception,                      oop)                                   \
 154.161    volatile_nonstatic_field(JavaThread,         _exception_oop,                                oop)                                   \
 154.162    volatile_nonstatic_field(JavaThread,         _exception_pc,                                 address)                               \
 154.163 +  volatile_nonstatic_field(JavaThread,         _is_method_handle_return,                      int)                                   \
 154.164    nonstatic_field(JavaThread,                  _is_compiling,                                 bool)                                  \
 154.165    nonstatic_field(JavaThread,                  _special_runtime_exit_condition,               JavaThread::AsyncRequests)             \
 154.166    nonstatic_field(JavaThread,                  _saved_exception_pc,                           address)                               \
 154.167 @@ -875,6 +918,8 @@
 154.168    nonstatic_field(JavaThread,                  _stack_size,                                   size_t)                                \
 154.169    nonstatic_field(JavaThread,                  _vframe_array_head,                            vframeArray*)                          \
 154.170    nonstatic_field(JavaThread,                  _vframe_array_last,                            vframeArray*)                          \
 154.171 +  nonstatic_field(JavaThread,                  _satb_mark_queue,                              ObjPtrQueue)                           \
 154.172 +  nonstatic_field(JavaThread,                  _dirty_card_queue,                             DirtyCardQueue)                        \
 154.173    nonstatic_field(Thread,                      _resource_area,                                ResourceArea*)                         \
 154.174    nonstatic_field(CompilerThread,              _env,                                          ciEnv*)                                \
 154.175                                                                                                                                       \
 154.176 @@ -1187,7 +1232,7 @@
 154.177    unchecked_nonstatic_field(Array<int>,            _data,                                     sizeof(int))                           \
 154.178    unchecked_nonstatic_field(Array<u1>,             _data,                                     sizeof(u1))                            \
 154.179    unchecked_nonstatic_field(Array<u2>,             _data,                                     sizeof(u2))                            \
 154.180 -  unchecked_nonstatic_field(Array<Method*>, _data,                                     sizeof(Method*))                \
 154.181 +  unchecked_nonstatic_field(Array<Method*>,        _data,                                     sizeof(Method*))                       \
 154.182    unchecked_nonstatic_field(Array<Klass*>,         _data,                                     sizeof(Klass*))                        \
 154.183                                                                                                                                       \
 154.184    /*********************************/                                                                                                \
 154.185 @@ -1203,7 +1248,7 @@
 154.186    /* Miscellaneous fields */                                                                                                         \
 154.187    /************************/                                                                                                         \
 154.188                                                                                                                                       \
 154.189 -  nonstatic_field(CompileTask,                 _method,                                      Method*)                         \
 154.190 +  nonstatic_field(CompileTask,                 _method,                                      Method*)                                \
 154.191    nonstatic_field(CompileTask,                 _osr_bci,                                     int)                                    \
 154.192    nonstatic_field(CompileTask,                 _comp_level,                                  int)                                    \
 154.193    nonstatic_field(CompileTask,                 _compile_id,                                  uint)                                   \
 154.194 @@ -1217,7 +1262,11 @@
 154.195                                                                                                                                       \
 154.196    nonstatic_field(vframeArrayElement,          _frame,                                       frame)                                  \
 154.197    nonstatic_field(vframeArrayElement,          _bci,                                         int)                                    \
 154.198 -  nonstatic_field(vframeArrayElement,          _method,                                      Method*)                         \
 154.199 +  nonstatic_field(vframeArrayElement,          _method,                                      Method*)                                \
 154.200 +                                                                                                                                     \
 154.201 +  nonstatic_field(PtrQueue,                    _active,                                      bool)                                   \
 154.202 +  nonstatic_field(PtrQueue,                    _buf,                                         void**)                                 \
 154.203 +  nonstatic_field(PtrQueue,                    _index,                                       size_t)                                 \
 154.204                                                                                                                                       \
 154.205    nonstatic_field(AccessFlags,                 _flags,                                       jint)                                   \
 154.206    nonstatic_field(elapsedTimer,                _counter,                                     jlong)                                  \
 154.207 @@ -1363,7 +1412,7 @@
 154.208    /* MetadataOopDesc hierarchy (NOTE: some missing) */                    \
 154.209    /**************************************************/                    \
 154.210                                                                            \
 154.211 -  declare_toplevel_type(CompiledICHolder)                          \
 154.212 +  declare_toplevel_type(CompiledICHolder)                                 \
 154.213    declare_toplevel_type(MetaspaceObj)                                     \
 154.214      declare_type(Metadata, MetaspaceObj)                                  \
 154.215      declare_type(Klass, Metadata)                                         \
 154.216 @@ -1374,17 +1423,20 @@
 154.217          declare_type(InstanceClassLoaderKlass, InstanceKlass)             \
 154.218          declare_type(InstanceMirrorKlass, InstanceKlass)                  \
 154.219          declare_type(InstanceRefKlass, InstanceKlass)                     \
 154.220 -    declare_type(ConstantPool, Metadata)                           \
 154.221 -    declare_type(ConstantPoolCache, MetaspaceObj)                  \
 154.222 -    declare_type(MethodData, Metadata)                             \
 154.223 -    declare_type(Method, Metadata)                                 \
 154.224 -    declare_type(MethodCounters, MetaspaceObj)                     \
 154.225 -    declare_type(ConstMethod, MetaspaceObj)                        \
 154.226 +    declare_type(ConstantPool, Metadata)                                  \
 154.227 +    declare_type(ConstantPoolCache, MetaspaceObj)                         \
 154.228 +    declare_type(MethodData, Metadata)                                    \
 154.229 +    declare_type(Method, Metadata)                                        \
 154.230 +    declare_type(MethodCounters, MetaspaceObj)                            \
 154.231 +    declare_type(ConstMethod, MetaspaceObj)                               \
 154.232 +                                                                          \
 154.233 +  declare_toplevel_type(vtableEntry)                                      \
 154.234                                                                            \
 154.235             declare_toplevel_type(Symbol)                                  \
 154.236             declare_toplevel_type(Symbol*)                                 \
 154.237    declare_toplevel_type(volatile Metadata*)                               \
 154.238                                                                            \
 154.239 +  declare_toplevel_type(DataLayout)                                       \
 154.240    declare_toplevel_type(nmethodBucket)                                    \
 154.241                                                                            \
 154.242    /********/                                                              \
 154.243 @@ -1432,6 +1484,7 @@
 154.244             declare_type(ModRefBarrierSet,             BarrierSet)         \
 154.245             declare_type(CardTableModRefBS,            ModRefBarrierSet)   \
 154.246             declare_type(CardTableModRefBSForCTRS,     CardTableModRefBS)  \
 154.247 +  declare_toplevel_type(BarrierSet::Name)                                 \
 154.248    declare_toplevel_type(GenRemSet)                                        \
 154.249             declare_type(CardTableRS,                  GenRemSet)          \
 154.250    declare_toplevel_type(BlockOffsetSharedArray)                           \
 154.251 @@ -1450,6 +1503,8 @@
 154.252    declare_toplevel_type(ThreadLocalAllocBuffer)                           \
 154.253    declare_toplevel_type(VirtualSpace)                                     \
 154.254    declare_toplevel_type(WaterMark)                                        \
 154.255 +  declare_toplevel_type(ObjPtrQueue)                                      \
 154.256 +  declare_toplevel_type(DirtyCardQueue)                                   \
 154.257                                                                            \
 154.258    /* Pointers to Garbage Collection types */                              \
 154.259                                                                            \
 154.260 @@ -2068,6 +2123,7 @@
 154.261    declare_toplevel_type(StubQueue*)                                       \
 154.262    declare_toplevel_type(Thread*)                                          \
 154.263    declare_toplevel_type(Universe)                                         \
 154.264 +  declare_toplevel_type(os)                                               \
 154.265    declare_toplevel_type(vframeArray)                                      \
 154.266    declare_toplevel_type(vframeArrayElement)                               \
 154.267    declare_toplevel_type(Annotations*)                                     \
 154.268 @@ -2076,6 +2132,8 @@
 154.269    /* Miscellaneous types */                                               \
 154.270    /***************/                                                       \
 154.271                                                                            \
 154.272 +  declare_toplevel_type(PtrQueue)                                         \
 154.273 +                                                                          \
 154.274    /* freelist */                                                          \
 154.275    declare_toplevel_type(FreeChunk*)                                       \
 154.276    declare_toplevel_type(Metablock*)                                       \
 154.277 @@ -2106,6 +2164,7 @@
 154.278    /* Useful globals */                                                    \
 154.279    /******************/                                                    \
 154.280                                                                            \
 154.281 +  declare_preprocessor_constant("ASSERT", DEBUG_ONLY(1) NOT_DEBUG(0))     \
 154.282                                                                            \
 154.283    /**************/                                                        \
 154.284    /* Stack bias */                                                        \
 154.285 @@ -2122,6 +2181,8 @@
 154.286    declare_constant(BytesPerWord)                                          \
 154.287    declare_constant(BytesPerLong)                                          \
 154.288                                                                            \
 154.289 +  declare_constant(LogKlassAlignmentInBytes)                              \
 154.290 +                                                                          \
 154.291    /********************************************/                          \
 154.292    /* Generation and Space Hierarchy Constants */                          \
 154.293    /********************************************/                          \
 154.294 @@ -2130,6 +2191,9 @@
 154.295                                                                            \
 154.296    declare_constant(BarrierSet::ModRef)                                    \
 154.297    declare_constant(BarrierSet::CardTableModRef)                           \
 154.298 +  declare_constant(BarrierSet::CardTableExtension)                        \
 154.299 +  declare_constant(BarrierSet::G1SATBCT)                                  \
 154.300 +  declare_constant(BarrierSet::G1SATBCTLogging)                           \
 154.301    declare_constant(BarrierSet::Other)                                     \
 154.302                                                                            \
 154.303    declare_constant(BlockOffsetSharedArray::LogN)                          \
 154.304 @@ -2248,8 +2312,11 @@
 154.305    declare_constant(Klass::_primary_super_limit)                           \
 154.306    declare_constant(Klass::_lh_instance_slow_path_bit)                     \
 154.307    declare_constant(Klass::_lh_log2_element_size_shift)                    \
 154.308 +  declare_constant(Klass::_lh_log2_element_size_mask)                     \
 154.309    declare_constant(Klass::_lh_element_type_shift)                         \
 154.310 +  declare_constant(Klass::_lh_element_type_mask)                          \
 154.311    declare_constant(Klass::_lh_header_size_shift)                          \
 154.312 +  declare_constant(Klass::_lh_header_size_mask)                           \
 154.313    declare_constant(Klass::_lh_array_tag_shift)                            \
 154.314    declare_constant(Klass::_lh_array_tag_type_value)                       \
 154.315    declare_constant(Klass::_lh_array_tag_obj_value)                        \
 154.316 @@ -2268,6 +2335,12 @@
 154.317    declare_constant(ConstMethod::_has_default_annotations)                 \
 154.318    declare_constant(ConstMethod::_has_type_annotations)                    \
 154.319                                                                            \
 154.320 +  /**************/                                                        \
 154.321 +  /* DataLayout */                                                        \
 154.322 +  /**************/                                                        \
 154.323 +                                                                          \
 154.324 +  declare_constant(DataLayout::cell_size)                                 \
 154.325 +                                                                          \
 154.326    /*************************************/                                 \
 154.327    /* InstanceKlass enum                */                                 \
 154.328    /*************************************/                                 \
 154.329 @@ -2402,6 +2475,13 @@
 154.330    declare_constant(Deoptimization::Reason_LIMIT)                          \
 154.331    declare_constant(Deoptimization::Reason_RECORDED_LIMIT)                 \
 154.332                                                                            \
 154.333 +  declare_constant(Deoptimization::Action_none)                           \
 154.334 +  declare_constant(Deoptimization::Action_maybe_recompile)                \
 154.335 +  declare_constant(Deoptimization::Action_reinterpret)                    \
 154.336 +  declare_constant(Deoptimization::Action_make_not_entrant)               \
 154.337 +  declare_constant(Deoptimization::Action_make_not_compilable)            \
 154.338 +  declare_constant(Deoptimization::Action_LIMIT)                          \
 154.339 +                                                                          \
 154.340    /*********************/                                                 \
 154.341    /* Matcher (C2 only) */                                                 \
 154.342    /*********************/                                                 \
 154.343 @@ -2468,6 +2548,16 @@
 154.344    declare_constant(vmSymbols::FIRST_SID)                                  \
 154.345    declare_constant(vmSymbols::SID_LIMIT)                                  \
 154.346                                                                            \
 154.347 +  /****************/                                                      \
 154.348 +  /* vmIntrinsics */                                                      \
 154.349 +  /****************/                                                      \
 154.350 +                                                                          \
 154.351 +  declare_constant(vmIntrinsics::_invokeBasic)                            \
 154.352 +  declare_constant(vmIntrinsics::_linkToVirtual)                          \
 154.353 +  declare_constant(vmIntrinsics::_linkToStatic)                           \
 154.354 +  declare_constant(vmIntrinsics::_linkToSpecial)                          \
 154.355 +  declare_constant(vmIntrinsics::_linkToInterface)                        \
 154.356 +                                                                          \
 154.357    /********************************/                                      \
 154.358    /* Calling convention constants */                                      \
 154.359    /********************************/                                      \
 154.360 @@ -2515,6 +2605,8 @@
 154.361    declare_constant(markOopDesc::biased_lock_bit_in_place)                 \
 154.362    declare_constant(markOopDesc::age_mask)                                 \
 154.363    declare_constant(markOopDesc::age_mask_in_place)                        \
 154.364 +  declare_constant(markOopDesc::epoch_mask)                               \
 154.365 +  declare_constant(markOopDesc::epoch_mask_in_place)                      \
 154.366    declare_constant(markOopDesc::hash_mask)                                \
 154.367    declare_constant(markOopDesc::hash_mask_in_place)                       \
 154.368    declare_constant(markOopDesc::biased_lock_alignment)                    \
   155.1 --- a/src/share/vm/services/attachListener.cpp	Fri Sep 27 13:49:57 2013 -0400
   155.2 +++ b/src/share/vm/services/attachListener.cpp	Fri Sep 27 13:53:43 2013 -0400
   155.3 @@ -470,7 +470,17 @@
   155.4                         vmSymbols::threadgroup_string_void_signature(),
   155.5                         thread_group,
   155.6                         string,
   155.7 -                       CHECK);
   155.8 +                       THREAD);
   155.9 +
  155.10 +  if (HAS_PENDING_EXCEPTION) {
  155.11 +    tty->print_cr("Exception in VM (AttachListener::init) : ");
  155.12 +    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
  155.13 +    tty->cr();
  155.14 +
  155.15 +    CLEAR_PENDING_EXCEPTION;
  155.16 +
  155.17 +    return;
  155.18 +  }
  155.19  
  155.20    KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
  155.21    JavaCalls::call_special(&result,
  155.22 @@ -479,7 +489,17 @@
  155.23                          vmSymbols::add_method_name(),
  155.24                          vmSymbols::thread_void_signature(),
  155.25                          thread_oop,             // ARG 1
  155.26 -                        CHECK);
  155.27 +                        THREAD);
  155.28 +
  155.29 +  if (HAS_PENDING_EXCEPTION) {
  155.30 +    tty->print_cr("Exception in VM (AttachListener::init) : ");
  155.31 +    java_lang_Throwable::print(PENDING_EXCEPTION, tty);
  155.32 +    tty->cr();
  155.33 +
  155.34 +    CLEAR_PENDING_EXCEPTION;
  155.35 +
  155.36 +    return;
  155.37 +  }
  155.38  
  155.39    { MutexLocker mu(Threads_lock);
  155.40      JavaThread* listener_thread = new JavaThread(&attach_listener_thread_entry);
   156.1 --- a/src/share/vm/services/diagnosticArgument.cpp	Fri Sep 27 13:49:57 2013 -0400
   156.2 +++ b/src/share/vm/services/diagnosticArgument.cpp	Fri Sep 27 13:53:43 2013 -0400
   156.3 @@ -61,7 +61,7 @@
   156.4  }
   156.5  
   156.6  void GenDCmdArgument::to_string(char* c, char* buf, size_t len) {
   156.7 -  jio_snprintf(buf, len, "%s", c);
   156.8 +  jio_snprintf(buf, len, "%s", (c != NULL) ? c : "");
   156.9  }
  156.10  
  156.11  void GenDCmdArgument::to_string(StringArrayArgument* f, char* buf, size_t len) {
   157.1 --- a/src/share/vm/services/gcNotifier.cpp	Fri Sep 27 13:49:57 2013 -0400
   157.2 +++ b/src/share/vm/services/gcNotifier.cpp	Fri Sep 27 13:53:43 2013 -0400
   157.3 @@ -209,7 +209,7 @@
   157.4    GCNotificationRequest *request = getRequest();
   157.5    if (request != NULL) {
   157.6      NotificationMark nm(request);
   157.7 -    Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, THREAD);
   157.8 +    Handle objGcInfo = createGcInfo(request->gcManager, request->gcStatInfo, CHECK);
   157.9  
  157.10      Handle objName = java_lang_String::create_from_str(request->gcManager->name(), CHECK);
  157.11      Handle objAction = java_lang_String::create_from_str(request->gcAction, CHECK);
   158.1 --- a/src/share/vm/services/memPtr.cpp	Fri Sep 27 13:49:57 2013 -0400
   158.2 +++ b/src/share/vm/services/memPtr.cpp	Fri Sep 27 13:53:43 2013 -0400
   158.3 @@ -1,5 +1,5 @@
   158.4  /*
   158.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   158.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   158.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   158.8   *
   158.9   * This code is free software; you can redistribute it and/or modify it
  158.10 @@ -34,9 +34,9 @@
  158.11    jint seq = Atomic::add(1, &_seq_number);
  158.12    if (seq < 0) {
  158.13      MemTracker::shutdown(MemTracker::NMT_sequence_overflow);
  158.14 +  } else {
  158.15 +    NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
  158.16    }
  158.17 -  assert(seq > 0, "counter overflow");
  158.18 -  NOT_PRODUCT(_max_seq_number = (seq > _max_seq_number) ? seq : _max_seq_number;)
  158.19    return seq;
  158.20  }
  158.21  
   159.1 --- a/src/share/vm/services/memoryPool.cpp	Fri Sep 27 13:49:57 2013 -0400
   159.2 +++ b/src/share/vm/services/memoryPool.cpp	Fri Sep 27 13:53:43 2013 -0400
   159.3 @@ -260,10 +260,10 @@
   159.4  }
   159.5  
   159.6  MetaspacePool::MetaspacePool() :
   159.7 -  MemoryPool("Metaspace", NonHeap, capacity_in_bytes(), calculate_max_size(), true, false) { }
   159.8 +  MemoryPool("Metaspace", NonHeap, 0, calculate_max_size(), true, false) { }
   159.9  
  159.10  MemoryUsage MetaspacePool::get_memory_usage() {
  159.11 -  size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
  159.12 +  size_t committed = MetaspaceAux::committed_bytes();
  159.13    return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
  159.14  }
  159.15  
  159.16 @@ -271,26 +271,19 @@
  159.17    return MetaspaceAux::allocated_used_bytes();
  159.18  }
  159.19  
  159.20 -size_t MetaspacePool::capacity_in_bytes() const {
  159.21 -  return MetaspaceAux::allocated_capacity_bytes();
  159.22 -}
  159.23 -
  159.24  size_t MetaspacePool::calculate_max_size() const {
  159.25 -  return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize : max_uintx;
  159.26 +  return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize :
  159.27 +                                             MemoryUsage::undefined_size();
  159.28  }
  159.29  
  159.30  CompressedKlassSpacePool::CompressedKlassSpacePool() :
  159.31 -  MemoryPool("Compressed Class Space", NonHeap, capacity_in_bytes(), ClassMetaspaceSize, true, false) { }
  159.32 +  MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
  159.33  
  159.34  size_t CompressedKlassSpacePool::used_in_bytes() {
  159.35    return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
  159.36  }
  159.37  
  159.38 -size_t CompressedKlassSpacePool::capacity_in_bytes() const {
  159.39 -  return MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  159.40 -}
  159.41 -
  159.42  MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
  159.43 -  size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
  159.44 +  size_t committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
  159.45    return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
  159.46  }
   160.1 --- a/src/share/vm/services/memoryPool.hpp	Fri Sep 27 13:49:57 2013 -0400
   160.2 +++ b/src/share/vm/services/memoryPool.hpp	Fri Sep 27 13:53:43 2013 -0400
   160.3 @@ -224,7 +224,6 @@
   160.4  
   160.5  class MetaspacePool : public MemoryPool {
   160.6    size_t calculate_max_size() const;
   160.7 -  size_t capacity_in_bytes() const;
   160.8   public:
   160.9    MetaspacePool();
  160.10    MemoryUsage get_memory_usage();
  160.11 @@ -232,7 +231,6 @@
  160.12  };
  160.13  
  160.14  class CompressedKlassSpacePool : public MemoryPool {
  160.15 -  size_t capacity_in_bytes() const;
  160.16   public:
  160.17    CompressedKlassSpacePool();
  160.18    MemoryUsage get_memory_usage();
   161.1 --- a/src/share/vm/services/memoryService.cpp	Fri Sep 27 13:49:57 2013 -0400
   161.2 +++ b/src/share/vm/services/memoryService.cpp	Fri Sep 27 13:53:43 2013 -0400
   161.3 @@ -409,7 +409,7 @@
   161.4    mgr->add_pool(_metaspace_pool);
   161.5    _pools_list->append(_metaspace_pool);
   161.6  
   161.7 -  if (UseCompressedKlassPointers) {
   161.8 +  if (UseCompressedClassPointers) {
   161.9      _compressed_class_pool = new CompressedKlassSpacePool();
  161.10      mgr->add_pool(_compressed_class_pool);
  161.11      _pools_list->append(_compressed_class_pool);
   162.1 --- a/src/share/vm/services/memoryUsage.hpp	Fri Sep 27 13:49:57 2013 -0400
   162.2 +++ b/src/share/vm/services/memoryUsage.hpp	Fri Sep 27 13:53:43 2013 -0400
   162.3 @@ -63,10 +63,12 @@
   162.4    size_t committed() const { return _committed; }
   162.5    size_t max_size()  const { return _maxSize; }
   162.6  
   162.7 +  static size_t undefined_size() { return (size_t) -1; }
   162.8 +
   162.9    inline static jlong convert_to_jlong(size_t val) {
  162.10      // In the 64-bit vm, a size_t can overflow a jlong (which is signed).
  162.11      jlong ret;
  162.12 -    if (val == (size_t)-1) {
  162.13 +    if (val == undefined_size()) {
  162.14        ret = -1L;
  162.15      } else {
  162.16        NOT_LP64(ret = val;)
   163.1 --- a/src/share/vm/trace/traceMacros.hpp	Fri Sep 27 13:49:57 2013 -0400
   163.2 +++ b/src/share/vm/trace/traceMacros.hpp	Fri Sep 27 13:53:43 2013 -0400
   163.3 @@ -26,6 +26,7 @@
   163.4  #define SHARE_VM_TRACE_TRACE_MACRO_HPP
   163.5  
   163.6  #define EVENT_THREAD_EXIT(thread)
   163.7 +#define EVENT_THREAD_DESTRUCT(thread)
   163.8  
   163.9  #define TRACE_INIT_ID(k)
  163.10  #define TRACE_DATA TraceThreadData
   164.1 --- a/src/share/vm/utilities/bitMap.inline.hpp	Fri Sep 27 13:49:57 2013 -0400
   164.2 +++ b/src/share/vm/utilities/bitMap.inline.hpp	Fri Sep 27 13:53:43 2013 -0400
   164.3 @@ -52,16 +52,16 @@
   164.4  
   164.5  inline bool BitMap::par_set_bit(idx_t bit) {
   164.6    verify_index(bit);
   164.7 -  volatile idx_t* const addr = word_addr(bit);
   164.8 -  const idx_t mask = bit_mask(bit);
   164.9 -  idx_t old_val = *addr;
  164.10 +  volatile bm_word_t* const addr = word_addr(bit);
  164.11 +  const bm_word_t mask = bit_mask(bit);
  164.12 +  bm_word_t old_val = *addr;
  164.13  
  164.14    do {
  164.15 -    const idx_t new_val = old_val | mask;
  164.16 +    const bm_word_t new_val = old_val | mask;
  164.17      if (new_val == old_val) {
  164.18        return false;     // Someone else beat us to it.
  164.19      }
  164.20 -    const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
  164.21 +    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
  164.22                                                        (volatile void*) addr,
  164.23                                                        (void*) old_val);
  164.24      if (cur_val == old_val) {
  164.25 @@ -73,16 +73,16 @@
  164.26  
  164.27  inline bool BitMap::par_clear_bit(idx_t bit) {
  164.28    verify_index(bit);
  164.29 -  volatile idx_t* const addr = word_addr(bit);
  164.30 -  const idx_t mask = ~bit_mask(bit);
  164.31 -  idx_t old_val = *addr;
  164.32 +  volatile bm_word_t* const addr = word_addr(bit);
  164.33 +  const bm_word_t mask = ~bit_mask(bit);
  164.34 +  bm_word_t old_val = *addr;
  164.35  
  164.36    do {
  164.37 -    const idx_t new_val = old_val & mask;
  164.38 +    const bm_word_t new_val = old_val & mask;
  164.39      if (new_val == old_val) {
  164.40        return false;     // Someone else beat us to it.
  164.41      }
  164.42 -    const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
  164.43 +    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
  164.44                                                        (volatile void*) addr,
  164.45                                                        (void*) old_val);
  164.46      if (cur_val == old_val) {
   165.1 --- a/src/share/vm/utilities/decoder.cpp	Fri Sep 27 13:49:57 2013 -0400
   165.2 +++ b/src/share/vm/utilities/decoder.cpp	Fri Sep 27 13:53:43 2013 -0400
   165.3 @@ -24,7 +24,6 @@
   165.4  
   165.5  #include "precompiled.hpp"
   165.6  #include "prims/jvm.h"
   165.7 -#include "runtime/mutexLocker.hpp"
   165.8  #include "runtime/os.hpp"
   165.9  #include "utilities/decoder.hpp"
  165.10  #include "utilities/vmError.hpp"
  165.11 @@ -80,6 +79,23 @@
  165.12    return decoder;
  165.13  }
  165.14  
  165.15 +inline bool DecoderLocker::is_first_error_thread() {
  165.16 +  return (os::current_thread_id() == VMError::get_first_error_tid());
  165.17 +}
  165.18 +
  165.19 +DecoderLocker::DecoderLocker() :
  165.20 +  MutexLockerEx(DecoderLocker::is_first_error_thread() ?
  165.21 +                NULL : Decoder::shared_decoder_lock(), true) {
  165.22 +  _decoder = is_first_error_thread() ?
  165.23 +    Decoder::get_error_handler_instance() : Decoder::get_shared_instance();
  165.24 +  assert(_decoder != NULL, "null decoder");
  165.25 +}
  165.26 +
  165.27 +Mutex* Decoder::shared_decoder_lock() {
  165.28 +  assert(_shared_decoder_lock != NULL, "Just check");
  165.29 +  return _shared_decoder_lock;
  165.30 +}
  165.31 +
  165.32  bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const char* modulepath) {
  165.33    assert(_shared_decoder_lock != NULL, "Just check");
  165.34    bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
   166.1 --- a/src/share/vm/utilities/decoder.hpp	Fri Sep 27 13:49:57 2013 -0400
   166.2 +++ b/src/share/vm/utilities/decoder.hpp	Fri Sep 27 13:53:43 2013 -0400
   166.3 @@ -28,6 +28,7 @@
   166.4  
   166.5  #include "memory/allocation.hpp"
   166.6  #include "runtime/mutex.hpp"
   166.7 +#include "runtime/mutexLocker.hpp"
   166.8  
   166.9  class AbstractDecoder : public CHeapObj<mtInternal> {
  166.10  public:
  166.11 @@ -124,6 +125,19 @@
  166.12  
  166.13  protected:
  166.14    static Mutex*               _shared_decoder_lock;
  166.15 +  static Mutex* shared_decoder_lock();
  166.16 +
  166.17 +  friend class DecoderLocker;
  166.18 +};
  166.19 +
  166.20 +class DecoderLocker : public MutexLockerEx {
  166.21 +  AbstractDecoder* _decoder;
  166.22 +  inline bool is_first_error_thread();
  166.23 +public:
  166.24 +  DecoderLocker();
  166.25 +  AbstractDecoder* decoder() {
  166.26 +    return _decoder;
  166.27 +  }
  166.28  };
  166.29  
  166.30  #endif // SHARE_VM_UTILITIES_DECODER_HPP
   167.1 --- a/src/share/vm/utilities/ostream.cpp	Fri Sep 27 13:49:57 2013 -0400
   167.2 +++ b/src/share/vm/utilities/ostream.cpp	Fri Sep 27 13:53:43 2013 -0400
   167.3 @@ -342,7 +342,7 @@
   167.4  }
   167.5  
   167.6  char* stringStream::as_string() {
   167.7 -  char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos+1);
   167.8 +  char* copy = NEW_RESOURCE_ARRAY(char, buffer_pos + 1);
   167.9    strncpy(copy, buffer, buffer_pos);
  167.10    copy[buffer_pos] = 0;  // terminating null
  167.11    return copy;
  167.12 @@ -355,14 +355,190 @@
  167.13  outputStream* gclog_or_tty;
  167.14  extern Mutex* tty_lock;
  167.15  
  167.16 +#define EXTRACHARLEN   32
  167.17 +#define CURRENTAPPX    ".current"
  167.18 +#define FILENAMEBUFLEN  1024
  167.19 +// convert YYYY-MM-DD HH:MM:SS to YYYY-MM-DD_HH-MM-SS
  167.20 +char* get_datetime_string(char *buf, size_t len) {
  167.21 +  os::local_time_string(buf, len);
  167.22 +  int i = (int)strlen(buf);
  167.23 +  while (i-- >= 0) {
  167.24 +    if (buf[i] == ' ') buf[i] = '_';
  167.25 +    else if (buf[i] == ':') buf[i] = '-';
  167.26 +  }
  167.27 +  return buf;
  167.28 +}
  167.29 +
  167.30 +static const char* make_log_name_internal(const char* log_name, const char* force_directory,
  167.31 +                                                int pid, const char* tms) {
  167.32 +  const char* basename = log_name;
  167.33 +  char file_sep = os::file_separator()[0];
  167.34 +  const char* cp;
  167.35 +  char  pid_text[32];
  167.36 +
  167.37 +  for (cp = log_name; *cp != '\0'; cp++) {
  167.38 +    if (*cp == '/' || *cp == file_sep) {
  167.39 +      basename = cp + 1;
  167.40 +    }
  167.41 +  }
  167.42 +  const char* nametail = log_name;
  167.43 +  // Compute buffer length
  167.44 +  size_t buffer_length;
  167.45 +  if (force_directory != NULL) {
  167.46 +    buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
  167.47 +                    strlen(basename) + 1;
  167.48 +  } else {
  167.49 +    buffer_length = strlen(log_name) + 1;
  167.50 +  }
  167.51 +
  167.52 +  // const char* star = strchr(basename, '*');
  167.53 +  const char* pts = strstr(basename, "%p");
  167.54 +  int pid_pos = (pts == NULL) ? -1 : (pts - nametail);
  167.55 +
  167.56 +  if (pid_pos >= 0) {
  167.57 +    jio_snprintf(pid_text, sizeof(pid_text), "pid%u", pid);
  167.58 +    buffer_length += strlen(pid_text);
  167.59 +  }
  167.60 +
  167.61 +  pts = strstr(basename, "%t");
  167.62 +  int tms_pos = (pts == NULL) ? -1 : (pts - nametail);
  167.63 +  if (tms_pos >= 0) {
  167.64 +    buffer_length += strlen(tms);
  167.65 +  }
  167.66 +
  167.67 +  // Create big enough buffer.
  167.68 +  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
  167.69 +
  167.70 +  strcpy(buf, "");
  167.71 +  if (force_directory != NULL) {
  167.72 +    strcat(buf, force_directory);
  167.73 +    strcat(buf, os::file_separator());
  167.74 +    nametail = basename;       // completely skip directory prefix
  167.75 +  }
  167.76 +
  167.77 +  // who is first, %p or %t?
  167.78 +  int first = -1, second = -1;
  167.79 +  const char *p1st = NULL;
  167.80 +  const char *p2nd = NULL;
  167.81 +
  167.82 +  if (pid_pos >= 0 && tms_pos >= 0) {
  167.83 +    // contains both %p and %t
  167.84 +    if (pid_pos < tms_pos) {
  167.85 +      // case foo%pbar%tmonkey.log
  167.86 +      first  = pid_pos;
  167.87 +      p1st   = pid_text;
  167.88 +      second = tms_pos;
  167.89 +      p2nd   = tms;
  167.90 +    } else {
  167.91 +      // case foo%tbar%pmonkey.log
  167.92 +      first  = tms_pos;
  167.93 +      p1st   = tms;
  167.94 +      second = pid_pos;
  167.95 +      p2nd   = pid_text;
  167.96 +    }
  167.97 +  } else if (pid_pos >= 0) {
  167.98 +    // contains %p only
  167.99 +    first  = pid_pos;
 167.100 +    p1st   = pid_text;
 167.101 +  } else if (tms_pos >= 0) {
 167.102 +    // contains %t only
 167.103 +    first  = tms_pos;
 167.104 +    p1st   = tms;
 167.105 +  }
 167.106 +
 167.107 +  int buf_pos = (int)strlen(buf);
 167.108 +  const char* tail = nametail;
 167.109 +
 167.110 +  if (first >= 0) {
 167.111 +    tail = nametail + first + 2;
 167.112 +    strncpy(&buf[buf_pos], nametail, first);
 167.113 +    strcpy(&buf[buf_pos + first], p1st);
 167.114 +    buf_pos = (int)strlen(buf);
 167.115 +    if (second >= 0) {
 167.116 +      strncpy(&buf[buf_pos], tail, second - first - 2);
 167.117 +      strcpy(&buf[buf_pos + second - first - 2], p2nd);
 167.118 +      tail = nametail + second + 2;
 167.119 +    }
 167.120 +  }
 167.121 +  strcat(buf, tail);      // append rest of name, or all of name
 167.122 +  return buf;
 167.123 +}
 167.124 +
 167.125 +// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
 167.126 +// in log_name, %p => pipd1234 and
 167.127 +//              %t => YYYY-MM-DD_HH-MM-SS
 167.128 +static const char* make_log_name(const char* log_name, const char* force_directory) {
 167.129 +  char timestr[32];
 167.130 +  get_datetime_string(timestr, sizeof(timestr));
 167.131 +  return make_log_name_internal(log_name, force_directory, os::current_process_id(),
 167.132 +                                timestr);
 167.133 +}
 167.134 +
 167.135 +#ifndef PRODUCT
 167.136 +void test_loggc_filename() {
 167.137 +  int pid;
 167.138 +  char  tms[32];
 167.139 +  char  i_result[FILENAMEBUFLEN];
 167.140 +  const char* o_result;
 167.141 +  get_datetime_string(tms, sizeof(tms));
 167.142 +  pid = os::current_process_id();
 167.143 +
 167.144 +  // test.log
 167.145 +  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test.log", tms);
 167.146 +  o_result = make_log_name_internal("test.log", NULL, pid, tms);
 167.147 +  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test.log\", NULL)");
 167.148 +  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 167.149 +
 167.150 +  // test-%t-%p.log
 167.151 +  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%s-pid%u.log", tms, pid);
 167.152 +  o_result = make_log_name_internal("test-%t-%p.log", NULL, pid, tms);
 167.153 +  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t-%%p.log\", NULL)");
 167.154 +  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 167.155 +
 167.156 +  // test-%t%p.log
 167.157 +  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "test-%spid%u.log", tms, pid);
 167.158 +  o_result = make_log_name_internal("test-%t%p.log", NULL, pid, tms);
 167.159 +  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"test-%%t%%p.log\", NULL)");
 167.160 +  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 167.161 +
 167.162 +  // %p%t.log
 167.163 +  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u%s.log", pid, tms);
 167.164 +  o_result = make_log_name_internal("%p%t.log", NULL, pid, tms);
 167.165 +  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p%%t.log\", NULL)");
 167.166 +  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 167.167 +
 167.168 +  // %p-test.log
 167.169 +  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "pid%u-test.log", pid);
 167.170 +  o_result = make_log_name_internal("%p-test.log", NULL, pid, tms);
 167.171 +  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%p-test.log\", NULL)");
 167.172 +  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 167.173 +
 167.174 +  // %t.log
 167.175 +  jio_snprintf(i_result, sizeof(char)*FILENAMEBUFLEN, "%s.log", tms);
 167.176 +  o_result = make_log_name_internal("%t.log", NULL, pid, tms);
 167.177 +  assert(strcmp(i_result, o_result) == 0, "failed on testing make_log_name(\"%%t.log\", NULL)");
 167.178 +  FREE_C_HEAP_ARRAY(char, o_result, mtInternal);
 167.179 +}
 167.180 +#endif // PRODUCT
 167.181 +
 167.182  fileStream::fileStream(const char* file_name) {
 167.183    _file = fopen(file_name, "w");
 167.184 -  _need_close = true;
 167.185 +  if (_file != NULL) {
 167.186 +    _need_close = true;
 167.187 +  } else {
 167.188 +    warning("Cannot open file %s due to %s\n", file_name, strerror(errno));
 167.189 +    _need_close = false;
 167.190 +  }
 167.191  }
 167.192  
 167.193  fileStream::fileStream(const char* file_name, const char* opentype) {
 167.194    _file = fopen(file_name, opentype);
 167.195 -  _need_close = true;
 167.196 +  if (_file != NULL) {
 167.197 +    _need_close = true;
 167.198 +  } else {
 167.199 +    warning("Cannot open file %s due to %s\n", file_name, strerror(errno));
 167.200 +    _need_close = false;
 167.201 +  }
 167.202  }
 167.203  
 167.204  void fileStream::write(const char* s, size_t len) {
 167.205 @@ -423,34 +599,51 @@
 167.206    update_position(s, len);
 167.207  }
 167.208  
 167.209 -rotatingFileStream::~rotatingFileStream() {
 167.210 +// dump vm version, os version, platform info, build id,
 167.211 +// memory usage and command line flags into header
 167.212 +void gcLogFileStream::dump_loggc_header() {
 167.213 +  if (is_open()) {
 167.214 +    print_cr(Abstract_VM_Version::internal_vm_info_string());
 167.215 +    os::print_memory_info(this);
 167.216 +    print("CommandLine flags: ");
 167.217 +    CommandLineFlags::printSetFlags(this);
 167.218 +  }
 167.219 +}
 167.220 +
 167.221 +gcLogFileStream::~gcLogFileStream() {
 167.222    if (_file != NULL) {
 167.223      if (_need_close) fclose(_file);
 167.224 -    _file      = NULL;
 167.225 +    _file = NULL;
 167.226 +  }
 167.227 +  if (_file_name != NULL) {
 167.228      FREE_C_HEAP_ARRAY(char, _file_name, mtInternal);
 167.229      _file_name = NULL;
 167.230    }
 167.231  }
 167.232  
 167.233 -rotatingFileStream::rotatingFileStream(const char* file_name) {
 167.234 +gcLogFileStream::gcLogFileStream(const char* file_name) {
 167.235    _cur_file_num = 0;
 167.236    _bytes_written = 0L;
 167.237 -  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
 167.238 -  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
 167.239 -  _file = fopen(_file_name, "w");
 167.240 -  _need_close = true;
 167.241 +  _file_name = make_log_name(file_name, NULL);
 167.242 +
 167.243 +  // gc log file rotation
 167.244 +  if (UseGCLogFileRotation && NumberOfGCLogFiles > 1) {
 167.245 +    char tempbuf[FILENAMEBUFLEN];
 167.246 +    jio_snprintf(tempbuf, sizeof(tempbuf), "%s.%d" CURRENTAPPX, _file_name, _cur_file_num);
 167.247 +    _file = fopen(tempbuf, "w");
 167.248 +  } else {
 167.249 +    _file = fopen(_file_name, "w");
 167.250 +  }
 167.251 +  if (_file != NULL) {
 167.252 +    _need_close = true;
 167.253 +    dump_loggc_header();
 167.254 +  } else {
 167.255 +    warning("Cannot open file %s due to %s\n", _file_name, strerror(errno));
 167.256 +    _need_close = false;
 167.257 +  }
 167.258  }
 167.259  
 167.260 -rotatingFileStream::rotatingFileStream(const char* file_name, const char* opentype) {
 167.261 -  _cur_file_num = 0;
 167.262 -  _bytes_written = 0L;
 167.263 -  _file_name = NEW_C_HEAP_ARRAY(char, strlen(file_name)+10, mtInternal);
 167.264 -  jio_snprintf(_file_name, strlen(file_name)+10, "%s.%d", file_name, _cur_file_num);
 167.265 -  _file = fopen(_file_name, opentype);
 167.266 -  _need_close = true;
 167.267 -}
 167.268 -
 167.269 -void rotatingFileStream::write(const char* s, size_t len) {
 167.270 +void gcLogFileStream::write(const char* s, size_t len) {
 167.271    if (_file != NULL) {
 167.272      size_t count = fwrite(s, 1, len, _file);
 167.273      _bytes_written += count;
 167.274 @@ -466,7 +659,12 @@
 167.275  // write to gc log file at safepoint. If in future, changes made for mutator threads or
 167.276  // concurrent GC threads to run parallel with VMThread at safepoint, write and rotate_log
 167.277  // must be synchronized.
 167.278 -void rotatingFileStream::rotate_log() {
 167.279 +void gcLogFileStream::rotate_log() {
 167.280 +  char time_msg[FILENAMEBUFLEN];
 167.281 +  char time_str[EXTRACHARLEN];
 167.282 +  char current_file_name[FILENAMEBUFLEN];
 167.283 +  char renamed_file_name[FILENAMEBUFLEN];
 167.284 +
 167.285    if (_bytes_written < (jlong)GCLogFileSize) {
 167.286      return;
 167.287    }
 167.288 @@ -481,27 +679,89 @@
 167.289      // rotate in same file
 167.290      rewind();
 167.291      _bytes_written = 0L;
 167.292 +    jio_snprintf(time_msg, sizeof(time_msg), "File  %s rotated at %s\n",
 167.293 +                 _file_name, os::local_time_string((char *)time_str, sizeof(time_str)));
 167.294 +    write(time_msg, strlen(time_msg));
 167.295 +    dump_loggc_header();
 167.296      return;
 167.297    }
 167.298  
 167.299 -  // rotate file in names file.0, file.1, file.2, ..., file.<MaxGCLogFileNumbers-1>
 167.300 -  // close current file, rotate to next file
 167.301 +#if defined(_WINDOWS)
 167.302 +#ifndef F_OK
 167.303 +#define F_OK 0
 167.304 +#endif
 167.305 +#endif // _WINDOWS
 167.306 +
 167.307 +  // rotate file in names extended_filename.0, extended_filename.1, ...,
 167.308 +  // extended_filename.<NumberOfGCLogFiles - 1>. Current rotation file name will
 167.309 +  // have a form of extended_filename.<i>.current where i is the current rotation
 167.310 +  // file number. After it reaches max file size, the file will be saved and renamed
 167.311 +  // with .current removed from its tail.
 167.312 +  size_t filename_len = strlen(_file_name);
 167.313    if (_file != NULL) {
 167.314 -    _cur_file_num ++;
 167.315 -    if (_cur_file_num >= NumberOfGCLogFiles) _cur_file_num = 0;
 167.316 -    jio_snprintf(_file_name, strlen(Arguments::gc_log_filename()) + 10, "%s.%d",
 167.317 -             Arguments::gc_log_filename(), _cur_file_num);
 167.318 +    jio_snprintf(renamed_file_name, filename_len + EXTRACHARLEN, "%s.%d",
 167.319 +                 _file_name, _cur_file_num);
 167.320 +    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
 167.321 +                 _file_name, _cur_file_num);
 167.322 +    jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file has reached the"
 167.323 +                           " maximum size. Saved as %s\n",
 167.324 +                           os::local_time_string((char *)time_str, sizeof(time_str)),
 167.325 +                           renamed_file_name);
 167.326 +    write(time_msg, strlen(time_msg));
 167.327 +
 167.328      fclose(_file);
 167.329      _file = NULL;
 167.330 +
 167.331 +    bool can_rename = true;
 167.332 +    if (access(current_file_name, F_OK) != 0) {
 167.333 +      // current file does not exist?
 167.334 +      warning("No source file exists, cannot rename\n");
 167.335 +      can_rename = false;
 167.336 +    }
 167.337 +    if (can_rename) {
 167.338 +      if (access(renamed_file_name, F_OK) == 0) {
 167.339 +        if (remove(renamed_file_name) != 0) {
 167.340 +          warning("Could not delete existing file %s\n", renamed_file_name);
 167.341 +          can_rename = false;
 167.342 +        }
 167.343 +      } else {
 167.344 +        // file does not exist, ok to rename
 167.345 +      }
 167.346 +    }
 167.347 +    if (can_rename && rename(current_file_name, renamed_file_name) != 0) {
 167.348 +      warning("Could not rename %s to %s\n", _file_name, renamed_file_name);
 167.349 +    }
 167.350    }
 167.351 -  _file = fopen(_file_name, "w");
 167.352 +
 167.353 +  _cur_file_num++;
 167.354 +  if (_cur_file_num > NumberOfGCLogFiles - 1) _cur_file_num = 0;
 167.355 +  jio_snprintf(current_file_name,  filename_len + EXTRACHARLEN, "%s.%d" CURRENTAPPX,
 167.356 +               _file_name, _cur_file_num);
 167.357 +  _file = fopen(current_file_name, "w");
 167.358 +
 167.359    if (_file != NULL) {
 167.360      _bytes_written = 0L;
 167.361      _need_close = true;
 167.362 +    // reuse current_file_name for time_msg
 167.363 +    jio_snprintf(current_file_name, filename_len + EXTRACHARLEN,
 167.364 +                 "%s.%d", _file_name, _cur_file_num);
 167.365 +    jio_snprintf(time_msg, sizeof(time_msg), "%s GC log file created %s\n",
 167.366 +                           os::local_time_string((char *)time_str, sizeof(time_str)),
 167.367 +                           current_file_name);
 167.368 +    write(time_msg, strlen(time_msg));
 167.369 +    dump_loggc_header();
 167.370 +    // remove the existing file
 167.371 +    if (access(current_file_name, F_OK) == 0) {
 167.372 +      if (remove(current_file_name) != 0) {
 167.373 +        warning("Could not delete existing file %s\n", current_file_name);
 167.374 +      }
 167.375 +    }
 167.376    } else {
 167.377 -    tty->print_cr("failed to open rotation log file %s due to %s\n",
 167.378 +    warning("failed to open rotation log file %s due to %s\n"
 167.379 +            "Turned off GC log file rotation\n",
 167.380                    _file_name, strerror(errno));
 167.381      _need_close = false;
 167.382 +    FLAG_SET_DEFAULT(UseGCLogFileRotation, false);
 167.383    }
 167.384  }
 167.385  
 167.386 @@ -530,69 +790,9 @@
 167.387    return _log_file != NULL;
 167.388  }
 167.389  
 167.390 -static const char* make_log_name(const char* log_name, const char* force_directory) {
 167.391 -  const char* basename = log_name;
 167.392 -  char file_sep = os::file_separator()[0];
 167.393 -  const char* cp;
 167.394 -  for (cp = log_name; *cp != '\0'; cp++) {
 167.395 -    if (*cp == '/' || *cp == file_sep) {
 167.396 -      basename = cp+1;
 167.397 -    }
 167.398 -  }
 167.399 -  const char* nametail = log_name;
 167.400 -
 167.401 -  // Compute buffer length
 167.402 -  size_t buffer_length;
 167.403 -  if (force_directory != NULL) {
 167.404 -    buffer_length = strlen(force_directory) + strlen(os::file_separator()) +
 167.405 -                    strlen(basename) + 1;
 167.406 -  } else {
 167.407 -    buffer_length = strlen(log_name) + 1;
 167.408 -  }
 167.409 -
 167.410 -  const char* star = strchr(basename, '*');
 167.411 -  int star_pos = (star == NULL) ? -1 : (star - nametail);
 167.412 -  int skip = 1;
 167.413 -  if (star == NULL) {
 167.414 -    // Try %p
 167.415 -    star = strstr(basename, "%p");
 167.416 -    if (star != NULL) {
 167.417 -      skip = 2;
 167.418 -    }
 167.419 -  }
 167.420 -  star_pos = (star == NULL) ? -1 : (star - nametail);
 167.421 -
 167.422 -  char pid[32];
 167.423 -  if (star_pos >= 0) {
 167.424 -    jio_snprintf(pid, sizeof(pid), "%u", os::current_process_id());
 167.425 -    buffer_length += strlen(pid);
 167.426 -  }
 167.427 -
 167.428 -  // Create big enough buffer.
 167.429 -  char *buf = NEW_C_HEAP_ARRAY(char, buffer_length, mtInternal);
 167.430 -
 167.431 -  strcpy(buf, "");
 167.432 -  if (force_directory != NULL) {
 167.433 -    strcat(buf, force_directory);
 167.434 -    strcat(buf, os::file_separator());
 167.435 -    nametail = basename;       // completely skip directory prefix
 167.436 -  }
 167.437 -
 167.438 -  if (star_pos >= 0) {
 167.439 -    // convert foo*bar.log or foo%pbar.log to foo123bar.log
 167.440 -    int buf_pos = (int) strlen(buf);
 167.441 -    strncpy(&buf[buf_pos], nametail, star_pos);
 167.442 -    strcpy(&buf[buf_pos + star_pos], pid);
 167.443 -    nametail += star_pos + skip;  // skip prefix and pid format
 167.444 -  }
 167.445 -
 167.446 -  strcat(buf, nametail);      // append rest of name, or all of name
 167.447 -  return buf;
 167.448 -}
 167.449 -
 167.450  void defaultStream::init_log() {
 167.451    // %%% Need a MutexLocker?
 167.452 -  const char* log_name = LogFile != NULL ? LogFile : "hotspot.log";
 167.453 +  const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log";
 167.454    const char* try_name = make_log_name(log_name, NULL);
 167.455    fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
 167.456    if (!file->is_open()) {
 167.457 @@ -603,14 +803,15 @@
 167.458      // Note:  This feature is for maintainer use only.  No need for L10N.
 167.459      jio_print(warnbuf);
 167.460      FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
 167.461 -    try_name = make_log_name("hs_pid%p.log", os::get_temp_directory());
 167.462 +    try_name = make_log_name(log_name, os::get_temp_directory());
 167.463      jio_snprintf(warnbuf, sizeof(warnbuf),
 167.464                   "Warning:  Forcing option -XX:LogFile=%s\n", try_name);
 167.465      jio_print(warnbuf);
 167.466      delete file;
 167.467      file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
 167.468 -    FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
 167.469    }
 167.470 +  FREE_C_HEAP_ARRAY(char, try_name, mtInternal);
 167.471 +
 167.472    if (file->is_open()) {
 167.473      _log_file = file;
 167.474      xmlStream* xs = new(ResourceObj::C_HEAP, mtInternal) xmlStream(file);
 167.475 @@ -877,11 +1078,8 @@
 167.476  
 167.477    gclog_or_tty = tty; // default to tty
 167.478    if (Arguments::gc_log_filename() != NULL) {
 167.479 -    fileStream * gclog  = UseGCLogFileRotation ?
 167.480 -                          new(ResourceObj::C_HEAP, mtInternal)
 167.481 -                             rotatingFileStream(Arguments::gc_log_filename()) :
 167.482 -                          new(ResourceObj::C_HEAP, mtInternal)
 167.483 -                             fileStream(Arguments::gc_log_filename());
 167.484 +    fileStream * gclog  = new(ResourceObj::C_HEAP, mtInternal)
 167.485 +                             gcLogFileStream(Arguments::gc_log_filename());
 167.486      if (gclog->is_open()) {
 167.487        // now we update the time stamp of the GC log to be synced up
 167.488        // with tty.
   168.1 --- a/src/share/vm/utilities/ostream.hpp	Fri Sep 27 13:49:57 2013 -0400
   168.2 +++ b/src/share/vm/utilities/ostream.hpp	Fri Sep 27 13:53:43 2013 -0400
   168.3 @@ -231,20 +231,24 @@
   168.4    void flush() {};
   168.5  };
   168.6  
   168.7 -class rotatingFileStream : public fileStream {
   168.8 +class gcLogFileStream : public fileStream {
   168.9   protected:
  168.10 -  char*  _file_name;
  168.11 +  const char*  _file_name;
  168.12    jlong  _bytes_written;
  168.13 -  uintx  _cur_file_num;             // current logfile rotation number, from 0 to MaxGCLogFileNumbers-1
  168.14 +  uintx  _cur_file_num;             // current logfile rotation number, from 0 to NumberOfGCLogFiles-1
  168.15   public:
  168.16 -  rotatingFileStream(const char* file_name);
  168.17 -  rotatingFileStream(const char* file_name, const char* opentype);
  168.18 -  rotatingFileStream(FILE* file) : fileStream(file) {}
  168.19 -  ~rotatingFileStream();
  168.20 +  gcLogFileStream(const char* file_name);
  168.21 +  ~gcLogFileStream();
  168.22    virtual void write(const char* c, size_t len);
  168.23    virtual void rotate_log();
  168.24 +  void dump_loggc_header();
  168.25  };
  168.26  
  168.27 +#ifndef PRODUCT
  168.28 +// unit test for checking -Xloggc:<filename> parsing result
  168.29 +void test_loggc_filename();
  168.30 +#endif
  168.31 +
  168.32  void ostream_init();
  168.33  void ostream_init_log();
  168.34  void ostream_exit();
   169.1 --- a/src/share/vm/utilities/vmError.cpp	Fri Sep 27 13:49:57 2013 -0400
   169.2 +++ b/src/share/vm/utilities/vmError.cpp	Fri Sep 27 13:53:43 2013 -0400
   169.3 @@ -574,6 +574,10 @@
   169.4    STEP(120, "(printing native stack)" )
   169.5  
   169.6       if (_verbose) {
   169.7 +     if (os::platform_print_native_stack(st, _context, buf, sizeof(buf))) {
   169.8 +       // We have printed the native stack in platform-specific code
   169.9 +       // Windows/x64 needs special handling.
  169.10 +     } else {
  169.11         frame fr = _context ? os::fetch_frame_from_context(_context)
  169.12                             : os::current_frame();
  169.13  
  169.14 @@ -604,6 +608,7 @@
  169.15            st->cr();
  169.16         }
  169.17       }
  169.18 +   }
  169.19  
  169.20    STEP(130, "(printing Java stack)" )
  169.21  
   170.1 --- a/src/share/vm/utilities/vmError.hpp	Fri Sep 27 13:49:57 2013 -0400
   170.2 +++ b/src/share/vm/utilities/vmError.hpp	Fri Sep 27 13:53:43 2013 -0400
   170.3 @@ -136,6 +136,10 @@
   170.4  
   170.5    // check to see if fatal error reporting is in progress
   170.6    static bool fatal_error_in_progress() { return first_error != NULL; }
   170.7 +
   170.8 +  static jlong get_first_error_tid() {
   170.9 +    return first_error_tid;
  170.10 +  }
  170.11  };
  170.12  
  170.13  #endif // SHARE_VM_UTILITIES_VMERROR_HPP
   171.1 --- a/test/TEST.groups	Fri Sep 27 13:49:57 2013 -0400
   171.2 +++ b/test/TEST.groups	Fri Sep 27 13:53:43 2013 -0400
   171.3 @@ -62,7 +62,7 @@
   171.4  #
   171.5  needs_jdk = \
   171.6    gc/TestG1ZeroPGCTJcmdThreadPrint.java \
   171.7 -  gc/metaspace/ClassMetaspaceSizeInJmapHeap.java \
   171.8 +  gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
   171.9    gc/metaspace/TestMetaspacePerfCounters.java \
  171.10    runtime/6819213/TestBootNativeLibraryPath.java \
  171.11    runtime/6878713/Test6878713.sh \
   172.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   172.2 +++ b/test/compiler/print/PrintInlining.java	Fri Sep 27 13:53:43 2013 -0400
   172.3 @@ -0,0 +1,36 @@
   172.4 +/*
   172.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   172.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   172.7 + *
   172.8 + * This code is free software; you can redistribute it and/or modify it
   172.9 + * under the terms of the GNU General Public License version 2 only, as
  172.10 + * published by the Free Software Foundation.
  172.11 + *
  172.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  172.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  172.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  172.15 + * version 2 for more details (a copy is included in the LICENSE file that
  172.16 + * accompanied this code).
  172.17 + *
  172.18 + * You should have received a copy of the GNU General Public License version
  172.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  172.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  172.21 + *
  172.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  172.23 + * or visit www.oracle.com if you need additional information or have any
  172.24 + * questions.
  172.25 + */
  172.26 +
  172.27 +/*
  172.28 + * @test
  172.29 + * @bug 8022585
  172.30 + * @summary VM crashes when ran with -XX:+PrintInlining
  172.31 + * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining
  172.32 + *
  172.33 + */
  172.34 +
  172.35 +public class PrintInlining {
  172.36 +  public static void main(String[] args) {
  172.37 +    System.out.println("Passed");
  172.38 +  }
  172.39 +}
   173.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   173.2 +++ b/test/gc/TestObjectAlignment.java	Fri Sep 27 13:53:43 2013 -0400
   173.3 @@ -0,0 +1,65 @@
   173.4 +/*
   173.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   173.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   173.7 + *
   173.8 + * This code is free software; you can redistribute it and/or modify it
   173.9 + * under the terms of the GNU General Public License version 2 only, as
  173.10 + * published by the Free Software Foundation.
  173.11 + *
  173.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  173.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  173.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  173.15 + * version 2 for more details (a copy is included in the LICENSE file that
  173.16 + * accompanied this code).
  173.17 + *
  173.18 + * You should have received a copy of the GNU General Public License version
  173.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  173.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  173.21 + *
  173.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  173.23 + * or visit www.oracle.com if you need additional information or have any
  173.24 + * questions.
  173.25 + */
  173.26 +
  173.27 +/**
  173.28 + * @test TestObjectAlignment
  173.29 + * @key gc
  173.30 + * @bug 8021823
  173.31 + * @summary G1: Concurrent marking crashes with -XX:ObjectAlignmentInBytes>=32 in 64bit VMs
  173.32 + * @library /testlibrary
  173.33 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
  173.34 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
  173.35 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
  173.36 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
  173.37 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
  173.38 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
  173.39 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
  173.40 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
  173.41 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
  173.42 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
  173.43 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
  173.44 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
  173.45 + */
  173.46 +
  173.47 +import com.oracle.java.testlibrary.ProcessTools;
  173.48 +import com.oracle.java.testlibrary.OutputAnalyzer;
  173.49 +
  173.50 +public class TestObjectAlignment {
  173.51 +
  173.52 +  public static byte[] garbage;
  173.53 +
  173.54 +  private static boolean runsOn32bit() {
  173.55 +    return System.getProperty("sun.arch.data.model").equals("32");
  173.56 +  }
  173.57 +
  173.58 +  public static void main(String[] args) throws Exception {
  173.59 +    if (runsOn32bit()) {
  173.60 +      // 32 bit VMs do not allow setting ObjectAlignmentInBytes, so there is nothing to test. We still get called.
  173.61 +      return;
  173.62 +    }
  173.63 +    for (int i = 0; i < 10; i++) {
  173.64 +      garbage = new byte[1000];
  173.65 +      System.gc();
  173.66 +    }
  173.67 +  }
  173.68 +}
   174.1 --- a/test/gc/TestVerifyDuringStartup.java	Fri Sep 27 13:49:57 2013 -0400
   174.2 +++ b/test/gc/TestVerifyDuringStartup.java	Fri Sep 27 13:53:43 2013 -0400
   174.3 @@ -48,7 +48,7 @@
   174.4                                               "-XX:+VerifyDuringStartup",
   174.5                                               "-version"});
   174.6  
   174.7 -    System.out.print("Testing:\n" + JDKToolFinder.getCurrentJDKTool("java"));
   174.8 +    System.out.print("Testing:\n" + JDKToolFinder.getJDKTool("java"));
   174.9      for (int i = 0; i < vmOpts.size(); i += 1) {
  174.10        System.out.print(" " + vmOpts.get(i));
  174.11      }
   175.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   175.2 +++ b/test/gc/arguments/TestAlignmentToUseLargePages.java	Fri Sep 27 13:53:43 2013 -0400
   175.3 @@ -0,0 +1,47 @@
   175.4 +/*
   175.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   175.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   175.7 + *
   175.8 + * This code is free software; you can redistribute it and/or modify it
   175.9 + * under the terms of the GNU General Public License version 2 only, as
  175.10 + * published by the Free Software Foundation.
  175.11 + *
  175.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  175.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  175.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  175.15 + * version 2 for more details (a copy is included in the LICENSE file that
  175.16 + * accompanied this code).
  175.17 + *
  175.18 + * You should have received a copy of the GNU General Public License version
  175.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  175.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  175.21 + *
  175.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  175.23 + * or visit www.oracle.com if you need additional information or have any
  175.24 + * questions.
  175.25 + */
  175.26 +
  175.27 +/**
  175.28 + * @test TestAlignmentToUseLargePages
  175.29 + * @summary All parallel GC variants may use large pages without the requirement that the
  175.30 + * heap alignment is large page aligned. Other collectors also need to start up with odd sized heaps.
  175.31 + * @bug 8024396
  175.32 + * @key gc
  175.33 + * @key regression
  175.34 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
  175.35 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
  175.36 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
  175.37 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
  175.38 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:+UseLargePages TestAlignmentToUseLargePages
  175.39 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:-UseLargePages TestAlignmentToUseLargePages
  175.40 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:+UseLargePages TestAlignmentToUseLargePages
  175.41 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:-UseLargePages TestAlignmentToUseLargePages
  175.42 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:+UseLargePages TestAlignmentToUseLargePages
  175.43 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:-UseLargePages TestAlignmentToUseLargePages
  175.44 + */
  175.45 +
  175.46 +public class TestAlignmentToUseLargePages {
  175.47 +  public static void main(String args[]) throws Exception {
  175.48 +    // nothing to do
  175.49 +  }
  175.50 +}
   176.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   176.2 +++ b/test/gc/arguments/TestCompressedClassFlags.java	Fri Sep 27 13:53:43 2013 -0400
   176.3 @@ -0,0 +1,49 @@
   176.4 +/*
   176.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   176.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   176.7 + *
   176.8 + * This code is free software; you can redistribute it and/or modify it
   176.9 + * under the terms of the GNU General Public License version 2 only, as
  176.10 + * published by the Free Software Foundation.
  176.11 + *
  176.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  176.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  176.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  176.15 + * version 2 for more details (a copy is included in the LICENSE file that
  176.16 + * accompanied this code).
  176.17 + *
  176.18 + * You should have received a copy of the GNU General Public License version
  176.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  176.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  176.21 + *
  176.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  176.23 + * or visit www.oracle.com if you need additional information or have any
  176.24 + * questions.
  176.25 + */
  176.26 +
  176.27 +import com.oracle.java.testlibrary.*;
  176.28 +
  176.29 +/*
  176.30 + * @test
  176.31 + * @bug 8015107
  176.32 + * @summary Tests that VM prints a warning when -XX:CompressedClassSpaceSize
  176.33 + *          is used together with -XX:-UseCompressedClassPointers
  176.34 + * @library /testlibrary
  176.35 + */
  176.36 +public class TestCompressedClassFlags {
  176.37 +    public static void main(String[] args) throws Exception {
  176.38 +        if (Platform.is64bit()) {
  176.39 +            OutputAnalyzer output = runJava("-XX:CompressedClassSpaceSize=1g",
  176.40 +                                            "-XX:-UseCompressedClassPointers",
  176.41 +                                            "-version");
  176.42 +            output.shouldContain("warning");
  176.43 +            output.shouldNotContain("error");
  176.44 +            output.shouldHaveExitValue(0);
  176.45 +        }
  176.46 +    }
  176.47 +
  176.48 +    private static OutputAnalyzer runJava(String ... args) throws Exception {
  176.49 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args);
  176.50 +        return new OutputAnalyzer(pb.start());
  176.51 +    }
  176.52 +}
   177.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   177.2 +++ b/test/gc/arguments/TestUseCompressedOopsErgo.java	Fri Sep 27 13:53:43 2013 -0400
   177.3 @@ -0,0 +1,50 @@
   177.4 +/*
   177.5 +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   177.6 +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   177.7 +*
   177.8 +* This code is free software; you can redistribute it and/or modify it
   177.9 +* under the terms of the GNU General Public License version 2 only, as
  177.10 +* published by the Free Software Foundation.
  177.11 +*
  177.12 +* This code is distributed in the hope that it will be useful, but WITHOUT
  177.13 +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  177.14 +* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  177.15 +* version 2 for more details (a copy is included in the LICENSE file that
  177.16 +* accompanied this code).
  177.17 +*
  177.18 +* You should have received a copy of the GNU General Public License version
  177.19 +* 2 along with this work; if not, write to the Free Software Foundation,
  177.20 +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  177.21 +*
  177.22 +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  177.23 +* or visit www.oracle.com if you need additional information or have any
  177.24 +* questions.
  177.25 +*/
  177.26 +
  177.27 +/*
  177.28 + * @test TestUseCompressedOopsErgo
  177.29 + * @key gc
  177.30 + * @bug 8010722
  177.31 + * @summary Tests ergonomics for UseCompressedOops.
  177.32 + * @library /testlibrary /testlibrary/whitebox
  177.33 + * @build TestUseCompressedOopsErgo TestUseCompressedOopsErgoTools
  177.34 + * @run main ClassFileInstaller sun.hotspot.WhiteBox
  177.35 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseG1GC
  177.36 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC
  177.37 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC -XX:-UseParallelOldGC
  177.38 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC
  177.39 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseSerialGC
  177.40 + */
  177.41 +
  177.42 +public class TestUseCompressedOopsErgo {
  177.43 +
  177.44 +  public static void main(String args[]) throws Exception {
  177.45 +    if (!TestUseCompressedOopsErgoTools.is64bitVM()) {
  177.46 +      // this test is relevant for 64 bit VMs only
  177.47 +      return;
  177.48 +    }
  177.49 +    final String[] gcFlags = args;
  177.50 +    TestUseCompressedOopsErgoTools.checkCompressedOopsErgo(gcFlags);
  177.51 +  }
  177.52 +}
  177.53 +
   178.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   178.2 +++ b/test/gc/arguments/TestUseCompressedOopsErgoTools.java	Fri Sep 27 13:53:43 2013 -0400
   178.3 @@ -0,0 +1,177 @@
   178.4 +/*
   178.5 +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   178.6 +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   178.7 +*
   178.8 +* This code is free software; you can redistribute it and/or modify it
   178.9 +* under the terms of the GNU General Public License version 2 only, as
  178.10 +* published by the Free Software Foundation.
  178.11 +*
  178.12 +* This code is distributed in the hope that it will be useful, but WITHOUT
  178.13 +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  178.14 +* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  178.15 +* version 2 for more details (a copy is included in the LICENSE file that
  178.16 +* accompanied this code).
  178.17 +*
  178.18 +* You should have received a copy of the GNU General Public License version
  178.19 +* 2 along with this work; if not, write to the Free Software Foundation,
  178.20 +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  178.21 +*
  178.22 +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  178.23 +* or visit www.oracle.com if you need additional information or have any
  178.24 +* questions.
  178.25 +*/
  178.26 +
  178.27 +import sun.management.ManagementFactoryHelper;
  178.28 +import com.sun.management.HotSpotDiagnosticMXBean;
  178.29 +import com.sun.management.VMOption;
  178.30 +
  178.31 +import java.util.regex.Matcher;
  178.32 +import java.util.regex.Pattern;
  178.33 +import java.util.ArrayList;
  178.34 +import java.util.Arrays;
  178.35 +
  178.36 +import com.oracle.java.testlibrary.*;
  178.37 +import sun.hotspot.WhiteBox;
  178.38 +
  178.39 +class DetermineMaxHeapForCompressedOops {
  178.40 +  public static void main(String[] args) throws Exception {
  178.41 +    WhiteBox wb = WhiteBox.getWhiteBox();
  178.42 +    System.out.print(wb.getCompressedOopsMaxHeapSize());
  178.43 +  }
  178.44 +}
  178.45 +
  178.46 +class TestUseCompressedOopsErgoTools {
  178.47 +
  178.48 +  private static long getCompressedClassSpaceSize() {
  178.49 +    HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
  178.50 +
  178.51 +    VMOption option = diagnostic.getVMOption("CompressedClassSpaceSize");
  178.52 +    return Long.parseLong(option.getValue());
  178.53 +  }
  178.54 +
  178.55 +
  178.56 +  public static long getMaxHeapForCompressedOops(String[] vmargs) throws Exception {
  178.57 +    OutputAnalyzer output = runWhiteBoxTest(vmargs, DetermineMaxHeapForCompressedOops.class.getName(), new String[] {}, false);
  178.58 +    return Long.parseLong(output.getStdout());
  178.59 +  }
  178.60 +
  178.61 +  public static boolean is64bitVM() {
  178.62 +    String val = System.getProperty("sun.arch.data.model");
  178.63 +    if (val == null) {
  178.64 +      throw new RuntimeException("Could not read sun.arch.data.model");
  178.65 +    }
  178.66 +    if (val.equals("64")) {
  178.67 +      return true;
  178.68 +    } else if (val.equals("32")) {
  178.69 +      return false;
  178.70 +    }
  178.71 +    throw new RuntimeException("Unexpected value " + val + " of sun.arch.data.model");
  178.72 +  }
  178.73 +
  178.74 +  /**
  178.75 +   * Executes a new VM process with the given class and parameters.
  178.76 +   * @param vmargs Arguments to the VM to run
  178.77 +   * @param classname Name of the class to run
  178.78 +   * @param arguments Arguments to the class
  178.79 +   * @param useTestDotJavaDotOpts Use test.java.opts as part of the VM argument string
  178.80 +   * @return The OutputAnalyzer with the results for the invocation.
  178.81 +   */
  178.82 +  public static OutputAnalyzer runWhiteBoxTest(String[] vmargs, String classname, String[] arguments, boolean useTestDotJavaDotOpts) throws Exception {
  178.83 +    ArrayList<String> finalargs = new ArrayList<String>();
  178.84 +
  178.85 +    String[] whiteboxOpts = new String[] {
  178.86 +      "-Xbootclasspath/a:.",
  178.87 +      "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
  178.88 +      "-cp", System.getProperty("java.class.path"),
  178.89 +    };
  178.90 +
  178.91 +    if (useTestDotJavaDotOpts) {
  178.92 +      // System.getProperty("test.java.opts") is '' if no options is set,
  178.93 +      // we need to skip such a result
  178.94 +      String[] externalVMOpts = new String[0];
  178.95 +      if (System.getProperty("test.java.opts") != null && System.getProperty("test.java.opts").length() != 0) {
  178.96 +        externalVMOpts = System.getProperty("test.java.opts").split(" ");
  178.97 +      }
  178.98 +      finalargs.addAll(Arrays.asList(externalVMOpts));
  178.99 +    }
 178.100 +
 178.101 +    finalargs.addAll(Arrays.asList(vmargs));
 178.102 +    finalargs.addAll(Arrays.asList(whiteboxOpts));
 178.103 +    finalargs.add(classname);
 178.104 +    finalargs.addAll(Arrays.asList(arguments));
 178.105 +
 178.106 +    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
 178.107 +    OutputAnalyzer output = new OutputAnalyzer(pb.start());
 178.108 +    output.shouldHaveExitValue(0);
 178.109 +    return output;
 178.110 +  }
 178.111 +
 178.112 +  private static String[] join(String[] part1, String part2) {
 178.113 +    ArrayList<String> result = new ArrayList<String>();
 178.114 +    result.addAll(Arrays.asList(part1));
 178.115 +    result.add(part2);
 178.116 +    return result.toArray(new String[0]);
 178.117 +  }
 178.118 +
 178.119 +  public static void checkCompressedOopsErgo(String[] gcflags) throws Exception {
 178.120 +    long maxHeapForCompressedOops = getMaxHeapForCompressedOops(gcflags);
 178.121 +
 178.122 +    checkUseCompressedOops(gcflags, maxHeapForCompressedOops, true);
 178.123 +    checkUseCompressedOops(gcflags, maxHeapForCompressedOops - 1, true);
 178.124 +    checkUseCompressedOops(gcflags, maxHeapForCompressedOops + 1, false);
 178.125 +
 178.126 +    // the use of HeapBaseMinAddress should not change the outcome
 178.127 +    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops, true);
 178.128 +    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops - 1, true);
 178.129 +    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops + 1, false);
 178.130 +
 178.131 +    // use a different object alignment
 178.132 +    maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"));
 178.133 +
 178.134 +    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops, true);
 178.135 +    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops - 1, true);
 178.136 +    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops + 1, false);
 178.137 +
 178.138 +    // use a different CompressedClassSpaceSize
 178.139 +    String compressedClassSpaceSizeArg = "-XX:CompressedClassSpaceSize=" + 2 * getCompressedClassSpaceSize();
 178.140 +    maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, compressedClassSpaceSizeArg));
 178.141 +
 178.142 +    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops, true);
 178.143 +    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops - 1, true);
 178.144 +    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops + 1, false);
 178.145 +  }
 178.146 +
 178.147 +  private static void checkUseCompressedOops(String[] args, long heapsize, boolean expectUseCompressedOops) throws Exception {
 178.148 +     ArrayList<String> finalargs = new ArrayList<String>();
 178.149 +     finalargs.addAll(Arrays.asList(args));
 178.150 +     finalargs.add("-Xmx" + heapsize);
 178.151 +     finalargs.add("-XX:+PrintFlagsFinal");
 178.152 +     finalargs.add("-version");
 178.153 +
 178.154 +     String output = expectValid(finalargs.toArray(new String[0]));
 178.155 +
 178.156 +     boolean actualUseCompressedOops = getFlagBoolValue(" UseCompressedOops", output);
 178.157 +
 178.158 +     Asserts.assertEQ(expectUseCompressedOops, actualUseCompressedOops);
 178.159 +  }
 178.160 +
 178.161 +  private static boolean getFlagBoolValue(String flag, String where) {
 178.162 +    Matcher m = Pattern.compile(flag + "\\s+:?= (true|false)").matcher(where);
 178.163 +    if (!m.find()) {
 178.164 +      throw new RuntimeException("Could not find value for flag " + flag + " in output string");
 178.165 +    }
 178.166 +    return m.group(1).equals("true");
 178.167 +  }
 178.168 +
 178.169 +  private static String expect(String[] flags, boolean hasWarning, boolean hasError, int errorcode) throws Exception {
 178.170 +    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flags);
 178.171 +    OutputAnalyzer output = new OutputAnalyzer(pb.start());
 178.172 +    output.shouldHaveExitValue(errorcode);
 178.173 +    return output.getStdout();
 178.174 +  }
 178.175 +
 178.176 +  private static String expectValid(String[] flags) throws Exception {
 178.177 +    return expect(flags, false, false, 0);
 178.178 +  }
 178.179 +}
 178.180 +
   179.1 --- a/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java	Fri Sep 27 13:49:57 2013 -0400
   179.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   179.3 @@ -1,79 +0,0 @@
   179.4 -/*
   179.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   179.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   179.7 - *
   179.8 - * This code is free software; you can redistribute it and/or modify it
   179.9 - * under the terms of the GNU General Public License version 2 only, as
  179.10 - * published by the Free Software Foundation.
  179.11 - *
  179.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
  179.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  179.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  179.15 - * version 2 for more details (a copy is included in the LICENSE file that
  179.16 - * accompanied this code).
  179.17 - *
  179.18 - * You should have received a copy of the GNU General Public License version
  179.19 - * 2 along with this work; if not, write to the Free Software Foundation,
  179.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  179.21 - *
  179.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  179.23 - * or visit www.oracle.com if you need additional information or have any
  179.24 - * questions.
  179.25 - */
  179.26 -
  179.27 -/*
  179.28 - * @test ClassMetaspaceSizeInJmapHeap
  179.29 - * @bug 8004924
  179.30 - * @summary Checks that jmap -heap contains the flag ClassMetaspaceSize
  179.31 - * @library /testlibrary
  179.32 - * @run main/othervm -XX:ClassMetaspaceSize=50m ClassMetaspaceSizeInJmapHeap
  179.33 - */
  179.34 -
  179.35 -import com.oracle.java.testlibrary.*;
  179.36 -import java.nio.file.*;
  179.37 -import java.io.File;
  179.38 -import java.nio.charset.Charset;
  179.39 -import java.util.List;
  179.40 -
  179.41 -public class ClassMetaspaceSizeInJmapHeap {
  179.42 -    public static void main(String[] args) throws Exception {
  179.43 -        String pid = Integer.toString(ProcessTools.getProcessId());
  179.44 -
  179.45 -        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
  179.46 -                                              .addToolArg("-heap")
  179.47 -                                              .addToolArg(pid);
  179.48 -        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
  179.49 -
  179.50 -        File out = new File("ClassMetaspaceSizeInJmapHeap.stdout.txt");
  179.51 -        pb.redirectOutput(out);
  179.52 -
  179.53 -        File err = new File("ClassMetaspaceSizeInJmapHeap.stderr.txt");
  179.54 -        pb.redirectError(err);
  179.55 -
  179.56 -        run(pb);
  179.57 -
  179.58 -        OutputAnalyzer output = new OutputAnalyzer(read(out));
  179.59 -        output.shouldContain("ClassMetaspaceSize = 52428800 (50.0MB)");
  179.60 -        out.delete();
  179.61 -    }
  179.62 -
  179.63 -    private static void run(ProcessBuilder pb) throws Exception {
  179.64 -        Process p = pb.start();
  179.65 -        p.waitFor();
  179.66 -        int exitValue = p.exitValue();
  179.67 -        if (exitValue != 0) {
  179.68 -            throw new Exception("jmap -heap exited with error code: " + exitValue);
  179.69 -        }
  179.70 -    }
  179.71 -
  179.72 -    private static String read(File f) throws Exception {
  179.73 -        Path p = f.toPath();
  179.74 -        List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
  179.75 -
  179.76 -        StringBuilder sb = new StringBuilder();
  179.77 -        for (String line : lines) {
  179.78 -            sb.append(line).append('\n');
  179.79 -        }
  179.80 -        return sb.toString();
  179.81 -    }
  179.82 -}
   180.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   180.2 +++ b/test/gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java	Fri Sep 27 13:53:43 2013 -0400
   180.3 @@ -0,0 +1,79 @@
   180.4 +/*
   180.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   180.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   180.7 + *
   180.8 + * This code is free software; you can redistribute it and/or modify it
   180.9 + * under the terms of the GNU General Public License version 2 only, as
  180.10 + * published by the Free Software Foundation.
  180.11 + *
  180.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  180.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  180.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  180.15 + * version 2 for more details (a copy is included in the LICENSE file that
  180.16 + * accompanied this code).
  180.17 + *
  180.18 + * You should have received a copy of the GNU General Public License version
  180.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  180.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  180.21 + *
  180.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  180.23 + * or visit www.oracle.com if you need additional information or have any
  180.24 + * questions.
  180.25 + */
  180.26 +
  180.27 +/*
  180.28 + * @test CompressedClassSpaceSizeInJmapHeap
  180.29 + * @bug 8004924
  180.30 + * @summary Checks that jmap -heap contains the flag CompressedClassSpaceSize
  180.31 + * @library /testlibrary
  180.32 + * @run main/othervm -XX:CompressedClassSpaceSize=50m CompressedClassSpaceSizeInJmapHeap
  180.33 + */
  180.34 +
  180.35 +import com.oracle.java.testlibrary.*;
  180.36 +import java.nio.file.*;
  180.37 +import java.io.File;
  180.38 +import java.nio.charset.Charset;
  180.39 +import java.util.List;
  180.40 +
  180.41 +public class CompressedClassSpaceSizeInJmapHeap {
  180.42 +    public static void main(String[] args) throws Exception {
  180.43 +        String pid = Integer.toString(ProcessTools.getProcessId());
  180.44 +
  180.45 +        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
  180.46 +                                              .addToolArg("-heap")
  180.47 +                                              .addToolArg(pid);
  180.48 +        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
  180.49 +
  180.50 +        File out = new File("CompressedClassSpaceSizeInJmapHeap.stdout.txt");
  180.51 +        pb.redirectOutput(out);
  180.52 +
  180.53 +        File err = new File("CompressedClassSpaceSizeInJmapHeap.stderr.txt");
  180.54 +        pb.redirectError(err);
  180.55 +
  180.56 +        run(pb);
  180.57 +
  180.58 +        OutputAnalyzer output = new OutputAnalyzer(read(out));
  180.59 +        output.shouldContain("CompressedClassSpaceSize = 52428800 (50.0MB)");
  180.60 +        out.delete();
  180.61 +    }
  180.62 +
  180.63 +    private static void run(ProcessBuilder pb) throws Exception {
  180.64 +        Process p = pb.start();
  180.65 +        p.waitFor();
  180.66 +        int exitValue = p.exitValue();
  180.67 +        if (exitValue != 0) {
  180.68 +            throw new Exception("jmap -heap exited with error code: " + exitValue);
  180.69 +        }
  180.70 +    }
  180.71 +
  180.72 +    private static String read(File f) throws Exception {
  180.73 +        Path p = f.toPath();
  180.74 +        List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
  180.75 +
  180.76 +        StringBuilder sb = new StringBuilder();
  180.77 +        for (String line : lines) {
  180.78 +            sb.append(line).append('\n');
  180.79 +        }
  180.80 +        return sb.toString();
  180.81 +    }
  180.82 +}
   181.1 --- a/test/gc/metaspace/TestMetaspaceMemoryPool.java	Fri Sep 27 13:49:57 2013 -0400
   181.2 +++ b/test/gc/metaspace/TestMetaspaceMemoryPool.java	Fri Sep 27 13:53:43 2013 -0400
   181.3 @@ -22,55 +22,35 @@
   181.4   */
   181.5  
   181.6  import java.util.List;
   181.7 -import java.lang.management.ManagementFactory;
   181.8 -import java.lang.management.MemoryManagerMXBean;
   181.9 -import java.lang.management.MemoryPoolMXBean;
  181.10 -import java.lang.management.MemoryUsage;
  181.11 -
  181.12 -import java.lang.management.RuntimeMXBean;
  181.13 -import java.lang.management.ManagementFactory;
  181.14 +import java.lang.management.*;
  181.15 +import com.oracle.java.testlibrary.*;
  181.16 +import static com.oracle.java.testlibrary.Asserts.*;
  181.17  
  181.18  /* @test TestMetaspaceMemoryPool
  181.19   * @bug 8000754
  181.20   * @summary Tests that a MemoryPoolMXBeans is created for metaspace and that a
  181.21   *          MemoryManagerMXBean is created.
  181.22 + * @library /testlibrary
  181.23   * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops TestMetaspaceMemoryPool
  181.24   * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:MaxMetaspaceSize=60m TestMetaspaceMemoryPool
  181.25 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers TestMetaspaceMemoryPool
  181.26 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:ClassMetaspaceSize=60m TestMetaspaceMemoryPool
  181.27 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers TestMetaspaceMemoryPool
  181.28 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:CompressedClassSpaceSize=60m TestMetaspaceMemoryPool
  181.29   */
  181.30  public class TestMetaspaceMemoryPool {
  181.31      public static void main(String[] args) {
  181.32          verifyThatMetaspaceMemoryManagerExists();
  181.33 -        verifyMemoryPool(getMemoryPool("Metaspace"), isFlagDefined("MaxMetaspaceSize"));
  181.34  
  181.35 -        if (runsOn64bit()) {
  181.36 -            if (usesCompressedOops()) {
  181.37 +        boolean isMetaspaceMaxDefined = InputArguments.containsPrefix("-XX:MaxMetaspaceSize");
  181.38 +        verifyMemoryPool(getMemoryPool("Metaspace"), isMetaspaceMaxDefined);
  181.39 +
  181.40 +        if (Platform.is64bit()) {
  181.41 +            if (InputArguments.contains("-XX:+UseCompressedOops")) {
  181.42                  MemoryPoolMXBean cksPool = getMemoryPool("Compressed Class Space");
  181.43                  verifyMemoryPool(cksPool, true);
  181.44              }
  181.45          }
  181.46      }
  181.47  
  181.48 -    private static boolean runsOn64bit() {
  181.49 -        return !System.getProperty("sun.arch.data.model").equals("32");
  181.50 -    }
  181.51 -
  181.52 -    private static boolean usesCompressedOops() {
  181.53 -        return isFlagDefined("+UseCompressedOops");
  181.54 -    }
  181.55 -
  181.56 -    private static boolean isFlagDefined(String name) {
  181.57 -        RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
  181.58 -        List<String> args = runtimeMxBean.getInputArguments();
  181.59 -        for (String arg : args) {
  181.60 -            if (arg.startsWith("-XX:" + name)) {
  181.61 -                return true;
  181.62 -            }
  181.63 -        }
  181.64 -        return false;
  181.65 -    }
  181.66 -
  181.67      private static void verifyThatMetaspaceMemoryManagerExists() {
  181.68          List<MemoryManagerMXBean> managers = ManagementFactory.getMemoryManagerMXBeans();
  181.69          for (MemoryManagerMXBean manager : managers) {
  181.70 @@ -95,32 +75,19 @@
  181.71  
  181.72      private static void verifyMemoryPool(MemoryPoolMXBean pool, boolean isMaxDefined) {
  181.73          MemoryUsage mu = pool.getUsage();
  181.74 -        assertDefined(mu.getInit(), "init");
  181.75 -        assertDefined(mu.getUsed(), "used");
  181.76 -        assertDefined(mu.getCommitted(), "committed");
  181.77 +        long init = mu.getInit();
  181.78 +        long used = mu.getUsed();
  181.79 +        long committed = mu.getCommitted();
  181.80 +        long max = mu.getMax();
  181.81 +
  181.82 +        assertGTE(init, 0L);
  181.83 +        assertGTE(used, init);
  181.84 +        assertGTE(committed, used);
  181.85  
  181.86          if (isMaxDefined) {
  181.87 -            assertDefined(mu.getMax(), "max");
  181.88 +            assertGTE(max, committed);
  181.89          } else {
  181.90 -            assertUndefined(mu.getMax(), "max");
  181.91 -        }
  181.92 -    }
  181.93 -
  181.94 -    private static void assertDefined(long value, String name) {
  181.95 -        assertTrue(value != -1, "Expected " + name + " to be defined");
  181.96 -    }
  181.97 -
  181.98 -    private static void assertUndefined(long value, String name) {
  181.99 -        assertEquals(value, -1, "Expected " + name + " to be undefined");
 181.100 -    }
 181.101 -
 181.102 -    private static void assertEquals(long actual, long expected, String msg) {
 181.103 -        assertTrue(actual == expected, msg);
 181.104 -    }
 181.105 -
 181.106 -    private static void assertTrue(boolean condition, String msg) {
 181.107 -        if (!condition) {
 181.108 -            throw new RuntimeException(msg);
 181.109 +            assertEQ(max, -1L);
 181.110          }
 181.111      }
 181.112  }
   182.1 --- a/test/gc/metaspace/TestMetaspacePerfCounters.java	Fri Sep 27 13:49:57 2013 -0400
   182.2 +++ b/test/gc/metaspace/TestMetaspacePerfCounters.java	Fri Sep 27 13:53:43 2013 -0400
   182.3 @@ -33,13 +33,13 @@
   182.4   * @summary Tests that performance counters for metaspace and compressed class
   182.5   *          space exists and works.
   182.6   *
   182.7 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
   182.8 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
   182.9 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  182.10 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
  182.11 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
  182.12 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  182.13   *
  182.14 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
  182.15 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
  182.16 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  182.17 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
  182.18 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
  182.19 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
  182.20   */
  182.21  public class TestMetaspacePerfCounters {
  182.22      public static Class fooClass = null;
  182.23 @@ -61,10 +61,15 @@
  182.24      }
  182.25  
  182.26      private static void checkPerfCounters(String ns) throws Exception {
  182.27 -        for (PerfCounter counter : countersInNamespace(ns)) {
  182.28 -            String msg = "Expected " + counter.getName() + " to be larger than 0";
  182.29 -            assertGT(counter.longValue(), 0L, msg);
  182.30 -        }
  182.31 +        long minCapacity = getMinCapacity(ns);
  182.32 +        long maxCapacity = getMaxCapacity(ns);
  182.33 +        long capacity = getCapacity(ns);
  182.34 +        long used = getUsed(ns);
  182.35 +
  182.36 +        assertGTE(minCapacity, 0L);
  182.37 +        assertGTE(used, minCapacity);
  182.38 +        assertGTE(capacity, used);
  182.39 +        assertGTE(maxCapacity, capacity);
  182.40      }
  182.41  
  182.42      private static void checkEmptyPerfCounters(String ns) throws Exception {
  182.43 @@ -75,12 +80,10 @@
  182.44      }
  182.45  
  182.46      private static void checkUsedIncreasesWhenLoadingClass(String ns) throws Exception {
  182.47 -        PerfCounter used = PerfCounters.findByName(ns + ".used");
  182.48 -
  182.49 -        long before = used.longValue();
  182.50 +        long before = getUsed(ns);
  182.51          fooClass = compileAndLoad("Foo", "public class Foo { }");
  182.52          System.gc();
  182.53 -        long after = used.longValue();
  182.54 +        long after = getUsed(ns);
  182.55  
  182.56          assertGT(after, before);
  182.57      }
  182.58 @@ -99,6 +102,22 @@
  182.59      }
  182.60  
  182.61      private static boolean isUsingCompressedClassPointers() {
  182.62 -        return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedKlassPointers");
  182.63 +        return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedClassPointers");
  182.64 +    }
  182.65 +
  182.66 +    private static long getMinCapacity(String ns) throws Exception {
  182.67 +        return PerfCounters.findByName(ns + ".minCapacity").longValue();
  182.68 +    }
  182.69 +
  182.70 +    private static long getCapacity(String ns) throws Exception {
  182.71 +        return PerfCounters.findByName(ns + ".capacity").longValue();
  182.72 +    }
  182.73 +
  182.74 +    private static long getMaxCapacity(String ns) throws Exception {
  182.75 +        return PerfCounters.findByName(ns + ".maxCapacity").longValue();
  182.76 +    }
  182.77 +
  182.78 +    private static long getUsed(String ns) throws Exception {
  182.79 +        return PerfCounters.findByName(ns + ".used").longValue();
  182.80      }
  182.81  }
   183.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   183.2 +++ b/test/gc/metaspace/TestMetaspaceSizeFlags.java	Fri Sep 27 13:53:43 2013 -0400
   183.3 @@ -0,0 +1,108 @@
   183.4 +/*
   183.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   183.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   183.7 + *
   183.8 + * This code is free software; you can redistribute it and/or modify it
   183.9 + * under the terms of the GNU General Public License version 2 only, as
  183.10 + * published by the Free Software Foundation.
  183.11 + *
  183.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  183.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  183.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  183.15 + * version 2 for more details (a copy is included in the LICENSE file that
  183.16 + * accompanied this code).
  183.17 + *
  183.18 + * You should have received a copy of the GNU General Public License version
  183.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  183.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  183.21 + *
  183.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  183.23 + * or visit www.oracle.com if you need additional information or have any
  183.24 + * questions.
  183.25 + */
  183.26 +
  183.27 +import com.oracle.java.testlibrary.Asserts;
  183.28 +import com.oracle.java.testlibrary.OutputAnalyzer;
  183.29 +import com.oracle.java.testlibrary.ProcessTools;
  183.30 +
  183.31 +/*
  183.32 + * @test TestMetaspaceSizeFlags
  183.33 + * @key gc
  183.34 + * @bug 8024650
  183.35 + * @summary Test that metaspace size flags can be set correctly
  183.36 + * @library /testlibrary
  183.37 + */
  183.38 +public class TestMetaspaceSizeFlags {
  183.39 +  public static final long K = 1024L;
  183.40 +  public static final long M = 1024L * K;
  183.41 +
  183.42 +  // HotSpot uses a number of different values to align memory size flags.
  183.43 +  // This is currently the largest alignment (unless huge large pages are used).
  183.44 +  public static final long MAX_ALIGNMENT = 32 * M;
  183.45 +
  183.46 +  public static void main(String [] args) throws Exception {
  183.47 +    testMaxMetaspaceSizeEQMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT);
  183.48 +    // 8024650: MaxMetaspaceSize was adjusted instead of MetaspaceSize.
  183.49 +    testMaxMetaspaceSizeLTMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT * 2);
  183.50 +    testMaxMetaspaceSizeGTMetaspaceSize(MAX_ALIGNMENT * 2, MAX_ALIGNMENT);
  183.51 +    testTooSmallInitialMetaspace(0, 0);
  183.52 +    testTooSmallInitialMetaspace(0, MAX_ALIGNMENT);
  183.53 +    testTooSmallInitialMetaspace(MAX_ALIGNMENT, 0);
  183.54 +  }
  183.55 +
  183.56 +  private static void testMaxMetaspaceSizeEQMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
  183.57 +    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
  183.58 +    Asserts.assertEQ(maxMetaspaceSize, metaspaceSize);
  183.59 +    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
  183.60 +    Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
  183.61 +  }
  183.62 +
  183.63 +  private static void testMaxMetaspaceSizeLTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
  183.64 +    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
  183.65 +    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
  183.66 +    Asserts.assertEQ(mf.metaspaceSize, maxMetaspaceSize);
  183.67 +  }
  183.68 +
  183.69 +  private static void testMaxMetaspaceSizeGTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
  183.70 +    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
  183.71 +    Asserts.assertGT(maxMetaspaceSize, metaspaceSize);
  183.72 +    Asserts.assertGT(mf.maxMetaspaceSize, mf.metaspaceSize);
  183.73 +    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
  183.74 +    Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
  183.75 +  }
  183.76 +
  183.77 +  private static void testTooSmallInitialMetaspace(long maxMetaspaceSize, long metaspaceSize) throws Exception {
  183.78 +    OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
  183.79 +    output.shouldContain("Too small initial Metaspace size");
  183.80 +  }
  183.81 +
  183.82 +  private static MetaspaceFlags runAndGetValue(long maxMetaspaceSize, long metaspaceSize) throws Exception {
  183.83 +    OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
  183.84 +    output.shouldNotMatch("Error occurred during initialization of VM\n.*");
  183.85 +
  183.86 +    String stringMaxMetaspaceSize = output.firstMatch(".* MaxMetaspaceSize .* := (\\d+).*", 1);
  183.87 +    String stringMetaspaceSize = output.firstMatch(".* MetaspaceSize .* := (\\d+).*", 1);
  183.88 +
  183.89 +    return new MetaspaceFlags(Long.parseLong(stringMaxMetaspaceSize),
  183.90 +                              Long.parseLong(stringMetaspaceSize));
  183.91 +  }
  183.92 +
  183.93 +  private static OutputAnalyzer run(long maxMetaspaceSize, long metaspaceSize) throws Exception {
  183.94 +    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
  183.95 +        "-XX:MaxMetaspaceSize=" + maxMetaspaceSize,
  183.96 +        "-XX:MetaspaceSize=" + metaspaceSize,
  183.97 +        "-XX:-UseLargePages", // Prevent us from using 2GB large pages on solaris + sparc.
  183.98 +        "-XX:+PrintFlagsFinal",
  183.99 +        "-version");
 183.100 +    return new OutputAnalyzer(pb.start());
 183.101 +  }
 183.102 +
 183.103 +  private static class MetaspaceFlags {
 183.104 +    public long maxMetaspaceSize;
 183.105 +    public long metaspaceSize;
 183.106 +    public MetaspaceFlags(long maxMetaspaceSize, long metaspaceSize) {
 183.107 +      this.maxMetaspaceSize = maxMetaspaceSize;
 183.108 +      this.metaspaceSize = metaspaceSize;
 183.109 +    }
 183.110 +  }
 183.111 +}
   184.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   184.2 +++ b/test/gc/metaspace/TestPerfCountersAndMemoryPools.java	Fri Sep 27 13:53:43 2013 -0400
   184.3 @@ -0,0 +1,86 @@
   184.4 +/*
   184.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   184.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   184.7 + *
   184.8 + * This code is free software; you can redistribute it and/or modify it
   184.9 + * under the terms of the GNU General Public License version 2 only, as
  184.10 + * published by the Free Software Foundation.
  184.11 + *
  184.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  184.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  184.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  184.15 + * version 2 for more details (a copy is included in the LICENSE file that
  184.16 + * accompanied this code).
  184.17 + *
  184.18 + * You should have received a copy of the GNU General Public License version
  184.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  184.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  184.21 + *
  184.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  184.23 + * or visit www.oracle.com if you need additional information or have any
  184.24 + * questions.
  184.25 + */
  184.26 +
  184.27 +import java.util.List;
  184.28 +import java.lang.management.*;
  184.29 +
  184.30 +import com.oracle.java.testlibrary.*;
  184.31 +import static com.oracle.java.testlibrary.Asserts.*;
  184.32 +
  184.33 +/* @test TestPerfCountersAndMemoryPools
  184.34 + * @bug 8023476
  184.35 + * @summary Tests that a MemoryPoolMXBeans and PerfCounters for metaspace
  184.36 + *          report the same data.
  184.37 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools
  184.38 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools
  184.39 + */
  184.40 +public class TestPerfCountersAndMemoryPools {
  184.41 +    public static void main(String[] args) throws Exception {
  184.42 +        checkMemoryUsage("Metaspace", "sun.gc.metaspace");
  184.43 +
  184.44 +        if (InputArguments.contains("-XX:+UseCompressedKlassPointers") && Platform.is64bit()) {
  184.45 +            checkMemoryUsage("Compressed Class Space", "sun.gc.compressedclassspace");
  184.46 +        }
  184.47 +    }
  184.48 +
  184.49 +    private static MemoryUsage getMemoryUsage(String memoryPoolName) {
  184.50 +        List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
  184.51 +        for (MemoryPoolMXBean pool : pools) {
  184.52 +            if (pool.getName().equals(memoryPoolName)) {
  184.53 +                return pool.getUsage();
  184.54 +            }
  184.55 +        }
  184.56 +
  184.57 +        throw new RuntimeException("Excpted to find a memory pool with name " +
  184.58 +                                   memoryPoolName);
  184.59 +    }
  184.60 +
  184.61 +    private static void checkMemoryUsage(String memoryPoolName, String perfNS)
  184.62 +        throws Exception {
  184.63 +        // Need to do a gc before each comparison to update the perf counters
  184.64 +
  184.65 +        System.gc();
  184.66 +        MemoryUsage mu = getMemoryUsage(memoryPoolName);
  184.67 +        assertEQ(getMinCapacity(perfNS), mu.getInit());
  184.68 +
  184.69 +        System.gc();
  184.70 +        mu = getMemoryUsage(memoryPoolName);
  184.71 +        assertEQ(getUsed(perfNS), mu.getUsed());
  184.72 +
  184.73 +        System.gc();
  184.74 +        mu = getMemoryUsage(memoryPoolName);
  184.75 +        assertEQ(getCapacity(perfNS), mu.getCommitted());
  184.76 +    }
  184.77 +
  184.78 +    private static long getMinCapacity(String ns) throws Exception {
  184.79 +        return PerfCounters.findByName(ns + ".minCapacity").longValue();
  184.80 +    }
  184.81 +
  184.82 +    private static long getCapacity(String ns) throws Exception {
  184.83 +        return PerfCounters.findByName(ns + ".capacity").longValue();
  184.84 +    }
  184.85 +
  184.86 +    private static long getUsed(String ns) throws Exception {
  184.87 +        return PerfCounters.findByName(ns + ".used").longValue();
  184.88 +    }
  184.89 +}
   185.1 --- a/test/runtime/6878713/Test6878713.sh	Fri Sep 27 13:49:57 2013 -0400
   185.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   185.3 @@ -1,137 +0,0 @@
   185.4 -#!/bin/sh
   185.5 -
   185.6 -# 
   185.7 -#  Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
   185.8 -#  DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   185.9 -# 
  185.10 -#  This code is free software; you can redistribute it and/or modify it
  185.11 -#  under the terms of the GNU General Public License version 2 only, as
  185.12 -#  published by the Free Software Foundation.
  185.13 -# 
  185.14 -#  This code is distributed in the hope that it will be useful, but WITHOUT
  185.15 -#  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  185.16 -#  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  185.17 -#  version 2 for more details (a copy is included in the LICENSE file that
  185.18 -#  accompanied this code).
  185.19 -# 
  185.20 -#  You should have received a copy of the GNU General Public License version
  185.21 -#  2 along with this work; if not, write to the Free Software Foundation,
  185.22 -#  Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  185.23 -# 
  185.24 -#  Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  185.25 -#  or visit www.oracle.com if you need additional information or have any
  185.26 -#  questions.
  185.27 -# 
  185.28 -
  185.29 - 
  185.30 -
  185.31 -##
  185.32 -## @test
  185.33 -## @bug 6878713
  185.34 -## @bug 7030610
  185.35 -## @bug 7037122
  185.36 -## @bug 7123945
  185.37 -## @summary Verifier heap corruption, relating to backward jsrs
  185.38 -## @run shell Test6878713.sh
  185.39 -##
  185.40 -## some tests require path to find test source dir
  185.41 -if [ "${TESTSRC}" = "" ]
  185.42 -then
  185.43 -  TESTSRC=${PWD}
  185.44 -  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
  185.45 -fi
  185.46 -echo "TESTSRC=${TESTSRC}"
  185.47 -## Adding common setup Variables for running shell tests.
  185.48 -. ${TESTSRC}/../../test_env.sh
  185.49 -
  185.50 -TARGET_CLASS=OOMCrashClass1960_2
  185.51 -
  185.52 -echo "INFO: extracting the target class."
  185.53 -${COMPILEJAVA}${FS}bin${FS}jar xvf \
  185.54 -    ${TESTSRC}${FS}testcase.jar ${TARGET_CLASS}.class
  185.55 -
  185.56 -# remove any hs_err_pid that might exist here
  185.57 -rm -f hs_err_pid*.log
  185.58 -
  185.59 -echo "INFO: checking for 32-bit versus 64-bit VM."
  185.60 -${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -version 2>&1 \
  185.61 -    | grep "64-Bit [^ ][^ ]* VM" > /dev/null 2>&1
  185.62 -status="$?"
  185.63 -if [ "$status" = 0 ]; then
  185.64 -    echo "INFO: testing a 64-bit VM."
  185.65 -    is_64_bit=true
  185.66 -else
  185.67 -    echo "INFO: testing a 32-bit VM."
  185.68 -fi
  185.69 -
  185.70 -if [ "$is_64_bit" = true ]; then
  185.71 -    # limit is 768MB in 8-byte words (1024 * 1024 * 768 / 8) == 100663296
  185.72 -    MALLOC_MAX=100663296
  185.73 -else
  185.74 -    # limit is 768MB in 4-byte words (1024 * 1024 * 768 / 4) == 201326592
  185.75 -    MALLOC_MAX=201326592
  185.76 -fi
  185.77 -echo "INFO: MALLOC_MAX=$MALLOC_MAX"
  185.78 -
  185.79 -echo "INFO: executing the target class."
  185.80 -# -XX:+PrintCommandLineFlags for debugging purposes
  185.81 -# -XX:+IgnoreUnrecognizedVMOptions so test will run on a VM without
  185.82 -#     the new -XX:MallocMaxTestWords option
  185.83 -# -XX:+UnlockDiagnosticVMOptions so we can use -XX:MallocMaxTestWords
  185.84 -# -XX:MallocMaxTestWords limits malloc to $MALLOC_MAX
  185.85 -${TESTJAVA}${FS}bin${FS}java \
  185.86 -    -XX:+PrintCommandLineFlags \
  185.87 -    -XX:+IgnoreUnrecognizedVMOptions \
  185.88 -    -XX:+UnlockDiagnosticVMOptions \
  185.89 -    -XX:MallocMaxTestWords=$MALLOC_MAX \
  185.90 -    ${TESTVMOPTS} ${TARGET_CLASS} > test.out 2>&1
  185.91 -
  185.92 -echo "INFO: begin contents of test.out:"
  185.93 -cat test.out
  185.94 -echo "INFO: end contents of test.out."
  185.95 -
  185.96 -echo "INFO: checking for memory allocation error message."
  185.97 -# We are looking for this specific memory allocation failure mesg so
  185.98 -# we know we exercised the right allocation path with the test class:
  185.99 -MESG1="Native memory allocation (malloc) failed to allocate 25696531[0-9][0-9] bytes"
 185.100 -grep "$MESG1" test.out
 185.101 -status="$?"
 185.102 -if [ "$status" = 0 ]; then
 185.103 -    echo "INFO: found expected memory allocation error message."
 185.104 -else
 185.105 -    echo "INFO: did not find expected memory allocation error message."
 185.106 -
 185.107 -    # If we didn't find MESG1 above, then there are several scenarios:
 185.108 -    # 1) -XX:MallocMaxTestWords is not supported by the current VM and we
 185.109 -    #    didn't fail TARGET_CLASS's memory allocation attempt; instead
 185.110 -    #    we failed to find TARGET_CLASS's main() method. The TARGET_CLASS
 185.111 -    #    is designed to provoke a memory allocation failure during class
 185.112 -    #    loading; we actually don't care about running the class which is
 185.113 -    #    why it doesn't have a main() method.
 185.114 -    # 2) we failed a memory allocation, but not the one we were looking
 185.115 -    #    so it might be that TARGET_CLASS no longer tickles the same
 185.116 -    #    memory allocation code path
 185.117 -    # 3) TARGET_CLASS reproduces the failure mode (SIGSEGV) fixed by
 185.118 -    #    6878713 because the test is running on a pre-fix VM.
 185.119 -    echo "INFO: checking for no main() method message."
 185.120 -    MESG2="Error: Main method not found in class"
 185.121 -    grep "$MESG2" test.out
 185.122 -    status="$?"
 185.123 -    if [ "$status" = 0 ]; then
 185.124 -        echo "INFO: found no main() method message."
 185.125 -    else
 185.126 -        echo "FAIL: did not find no main() method message."
 185.127 -        # status is non-zero for exit below
 185.128 -
 185.129 -        if [ -s hs_err_pid*.log ]; then
 185.130 -            echo "INFO: begin contents of hs_err_pid file:"
 185.131 -            cat hs_err_pid*.log
 185.132 -            echo "INFO: end contents of hs_err_pid file."
 185.133 -        fi
 185.134 -    fi
 185.135 -fi
 185.136 -
 185.137 -if [ "$status" = 0 ]; then
 185.138 -    echo "PASS: test found one of the expected messages."
 185.139 -fi
 185.140 -exit "$status"
   186.1 Binary file test/runtime/6878713/testcase.jar has changed
   187.1 --- a/test/runtime/7020373/Test7020373.sh	Fri Sep 27 13:49:57 2013 -0400
   187.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   187.3 @@ -1,43 +0,0 @@
   187.4 -#!/bin/sh
   187.5 -
   187.6 -##
   187.7 -## @test
   187.8 -## @bug 7020373 7055247 7053586 7185550
   187.9 -## @key cte_test
  187.10 -## @summary JSR rewriting can overflow memory address size variables
  187.11 -## @ignore Ignore it as 7053586 test uses lots of memory. See bug report for detail.
  187.12 -## @run shell Test7020373.sh
  187.13 -##
  187.14 -
  187.15 -if [ "${TESTSRC}" = "" ]
  187.16 -then
  187.17 -  TESTSRC=${PWD}
  187.18 -  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
  187.19 -fi
  187.20 -echo "TESTSRC=${TESTSRC}"
  187.21 -## Adding common setup Variables for running shell tests.
  187.22 -. ${TESTSRC}/../../test_env.sh
  187.23 -
  187.24 -${COMPILEJAVA}${FS}bin${FS}jar xvf ${TESTSRC}${FS}testcase.jar
  187.25 -
  187.26 -${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} OOMCrashClass4000_1 > test.out 2>&1
  187.27 -
  187.28 -cat test.out
  187.29 -
  187.30 -egrep "SIGSEGV|An unexpected error has been detected" test.out
  187.31 -
  187.32 -if [ $? = 0 ]
  187.33 -then
  187.34 -    echo "Test Failed"
  187.35 -    exit 1
  187.36 -else
  187.37 -    egrep "java.lang.LinkageError|java.lang.NoSuchMethodError|Main method not found in class OOMCrashClass4000_1|insufficient memory" test.out
  187.38 -    if [ $? = 0 ]
  187.39 -    then
  187.40 -        echo "Test Passed"
  187.41 -        exit 0
  187.42 -    else
  187.43 -        echo "Test Failed"
  187.44 -        exit 1
  187.45 -    fi
  187.46 -fi
   188.1 Binary file test/runtime/7020373/testcase.jar has changed
   189.1 --- a/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Fri Sep 27 13:49:57 2013 -0400
   189.2 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Fri Sep 27 13:53:43 2013 -0400
   189.3 @@ -24,7 +24,7 @@
   189.4  /*
   189.5   * @test
   189.6   * @bug 8003424
   189.7 - * @summary Testing UseCompressedKlassPointers with CDS
   189.8 + * @summary Testing UseCompressedClassPointers with CDS
   189.9   * @library /testlibrary
  189.10   * @run main CDSCompressedKPtrs
  189.11   */
  189.12 @@ -36,7 +36,7 @@
  189.13      ProcessBuilder pb;
  189.14      if (Platform.is64bit()) {
  189.15        pb = ProcessTools.createJavaProcessBuilder(
  189.16 -        "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
  189.17 +        "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
  189.18          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  189.19        OutputAnalyzer output = new OutputAnalyzer(pb.start());
  189.20        try {
  189.21 @@ -44,7 +44,7 @@
  189.22          output.shouldHaveExitValue(0);
  189.23  
  189.24          pb = ProcessTools.createJavaProcessBuilder(
  189.25 -          "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
  189.26 +          "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
  189.27            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
  189.28          output = new OutputAnalyzer(pb.start());
  189.29          output.shouldContain("sharing");
   190.1 --- a/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java	Fri Sep 27 13:49:57 2013 -0400
   190.2 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java	Fri Sep 27 13:53:43 2013 -0400
   190.3 @@ -24,7 +24,7 @@
   190.4  /*
   190.5   * @test
   190.6   * @bug 8003424
   190.7 - * @summary Test that cannot use CDS if UseCompressedKlassPointers is turned off.
   190.8 + * @summary Test that cannot use CDS if UseCompressedClassPointers is turned off.
   190.9   * @library /testlibrary
  190.10   * @run main CDSCompressedKPtrsError
  190.11   */
  190.12 @@ -36,7 +36,7 @@
  190.13      ProcessBuilder pb;
  190.14      if (Platform.is64bit()) {
  190.15        pb = ProcessTools.createJavaProcessBuilder(
  190.16 -        "-XX:+UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
  190.17 +        "-XX:+UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
  190.18          "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  190.19        OutputAnalyzer output = new OutputAnalyzer(pb.start());
  190.20        try {
  190.21 @@ -44,21 +44,21 @@
  190.22          output.shouldHaveExitValue(0);
  190.23  
  190.24          pb = ProcessTools.createJavaProcessBuilder(
  190.25 -          "-XX:-UseCompressedKlassPointers", "-XX:-UseCompressedOops",
  190.26 +          "-XX:-UseCompressedClassPointers", "-XX:-UseCompressedOops",
  190.27            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
  190.28          output = new OutputAnalyzer(pb.start());
  190.29          output.shouldContain("Unable to use shared archive");
  190.30          output.shouldHaveExitValue(0);
  190.31  
  190.32          pb = ProcessTools.createJavaProcessBuilder(
  190.33 -          "-XX:-UseCompressedKlassPointers", "-XX:+UseCompressedOops",
  190.34 +          "-XX:-UseCompressedClassPointers", "-XX:+UseCompressedOops",
  190.35            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
  190.36          output = new OutputAnalyzer(pb.start());
  190.37          output.shouldContain("Unable to use shared archive");
  190.38          output.shouldHaveExitValue(0);
  190.39  
  190.40          pb = ProcessTools.createJavaProcessBuilder(
  190.41 -          "-XX:+UseCompressedKlassPointers", "-XX:-UseCompressedOops",
  190.42 +          "-XX:+UseCompressedClassPointers", "-XX:-UseCompressedOops",
  190.43            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
  190.44          output = new OutputAnalyzer(pb.start());
  190.45          output.shouldContain("Unable to use shared archive");
  190.46 @@ -71,19 +71,19 @@
  190.47  
  190.48        // Test bad options with -Xshare:dump.
  190.49        pb = ProcessTools.createJavaProcessBuilder(
  190.50 -        "-XX:-UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
  190.51 +        "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
  190.52          "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  190.53        output = new OutputAnalyzer(pb.start());
  190.54        output.shouldContain("Cannot dump shared archive");
  190.55  
  190.56        pb = ProcessTools.createJavaProcessBuilder(
  190.57 -        "-XX:+UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
  190.58 +        "-XX:+UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
  190.59          "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  190.60        output = new OutputAnalyzer(pb.start());
  190.61        output.shouldContain("Cannot dump shared archive");
  190.62  
  190.63        pb = ProcessTools.createJavaProcessBuilder(
  190.64 -        "-XX:-UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
  190.65 +        "-XX:-UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
  190.66          "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  190.67        output = new OutputAnalyzer(pb.start());
  190.68        output.shouldContain("Cannot dump shared archive");
   191.1 --- a/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Fri Sep 27 13:49:57 2013 -0400
   191.2 +++ b/test/runtime/CDSCompressedKPtrs/XShareAuto.java	Fri Sep 27 13:53:43 2013 -0400
   191.3 @@ -33,16 +33,9 @@
   191.4  
   191.5  public class XShareAuto {
   191.6      public static void main(String[] args) throws Exception {
   191.7 -        if (!Platform.is64bit()) {
   191.8 -            System.out.println("ObjectAlignmentInBytes for CDS is only " +
   191.9 -                "supported on 64bit platforms; this plaform is " +
  191.10 -                System.getProperty("sun.arch.data.model"));
  191.11 -            System.out.println("Skipping the test");
  191.12 -            return;
  191.13 -        }
  191.14          ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
  191.15 -            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
  191.16 -            "-Xshare:dump");
  191.17 +            "-server", "-XX:+UnlockDiagnosticVMOptions",
  191.18 +            "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
  191.19          OutputAnalyzer output = new OutputAnalyzer(pb.start());
  191.20          output.shouldContain("Loading classes to share");
  191.21          output.shouldHaveExitValue(0);
   192.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   192.2 +++ b/test/runtime/ClassFile/JsrRewriting.java	Fri Sep 27 13:53:43 2013 -0400
   192.3 @@ -0,0 +1,102 @@
   192.4 +/*
   192.5 + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
   192.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   192.7 + *
   192.8 + * This code is free software; you can redistribute it and/or modify it
   192.9 + * under the terms of the GNU General Public License version 2 only, as
  192.10 + * published by the Free Software Foundation.
  192.11 + *
  192.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  192.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  192.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  192.15 + * version 2 for more details (a copy is included in the LICENSE file that
  192.16 + * accompanied this code).
  192.17 + *
  192.18 + * You should have received a copy of the GNU General Public License version
  192.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  192.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  192.21 + *
  192.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  192.23 + * or visit www.oracle.com if you need additional information or have any
  192.24 + * questions.
  192.25 + */
  192.26 +
  192.27 +
  192.28 +
  192.29 +/*
  192.30 + * @test JsrRewriting
  192.31 + * @summary JSR (jump local subroutine)
  192.32 + *      rewriting can overflow memory address size variables
  192.33 + * @bug 7020373
  192.34 + * @bug 7055247
  192.35 + * @bug 7053586
  192.36 + * @bug 7185550
  192.37 + * @bug 7149464
  192.38 + * @key cte_test
  192.39 + * @library /testlibrary
  192.40 + * @run main JsrRewriting
  192.41 + */
  192.42 +
  192.43 +import com.oracle.java.testlibrary.*;
  192.44 +import java.io.File;
  192.45 +
  192.46 +public class JsrRewriting {
  192.47 +
  192.48 +    public static void main(String[] args) throws Exception {
  192.49 +
  192.50 +        // ======= Configure the test
  192.51 +        String jarFile = System.getProperty("test.src") +
  192.52 +            File.separator + "JsrRewritingTestCase.jar";
  192.53 +        String className = "OOMCrashClass4000_1";
  192.54 +
  192.55 +        // limit is 768MB in native words
  192.56 +        int mallocMaxTestWords = (1024 * 1024 * 768 / 4);
  192.57 +        if (Platform.is64bit())
  192.58 +            mallocMaxTestWords = (mallocMaxTestWords / 2);
  192.59 +
  192.60 +        // ======= extract the test class
  192.61 +        ProcessBuilder pb = new ProcessBuilder(new String[] {
  192.62 +            JDKToolFinder.getJDKTool("jar"),
  192.63 +            "xvf", jarFile } );
  192.64 +        OutputAnalyzer output = new OutputAnalyzer(pb.start());
  192.65 +        output.shouldHaveExitValue(0);
  192.66 +
  192.67 +        // ======= execute the test
  192.68 +        pb = ProcessTools.createJavaProcessBuilder(
  192.69 +            "-cp", ".",
  192.70 +            "-XX:+UnlockDiagnosticVMOptions",
  192.71 +            "-XX:MallocMaxTestWords=" + mallocMaxTestWords,
  192.72 +            className);
  192.73 +
  192.74 +        output = new OutputAnalyzer(pb.start());
  192.75 +        String[] expectedMsgs = {
  192.76 +            "java.lang.LinkageError",
  192.77 +            "java.lang.NoSuchMethodError",
  192.78 +            "Main method not found in class " + className,
  192.79 +            "insufficient memory"
  192.80 +        };
  192.81 +
  192.82 +        MultipleOrMatch(output, expectedMsgs);
  192.83 +    }
  192.84 +
  192.85 +    private static void
  192.86 +        MultipleOrMatch(OutputAnalyzer analyzer, String[] whatToMatch) {
  192.87 +            String output = analyzer.getOutput();
  192.88 +
  192.89 +            for (String expected : whatToMatch)
  192.90 +                if (output.contains(expected))
  192.91 +                    return;
  192.92 +
  192.93 +            String err =
  192.94 +                " stdout: [" + analyzer.getOutput() + "];\n" +
  192.95 +                " exitValue = " + analyzer.getExitValue() + "\n";
  192.96 +            System.err.println(err);
  192.97 +
  192.98 +            StringBuilder msg = new StringBuilder("Output did not contain " +
  192.99 +                "any of the following expected messages: \n");
 192.100 +            for (String expected : whatToMatch)
 192.101 +                msg.append(expected).append(System.lineSeparator());
 192.102 +            throw new RuntimeException(msg.toString());
 192.103 +    }
 192.104 +}
 192.105 +
   193.1 Binary file test/runtime/ClassFile/JsrRewritingTestCase.jar has changed
   194.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   194.2 +++ b/test/runtime/ClassFile/OomWhileParsingRepeatedJsr.java	Fri Sep 27 13:53:43 2013 -0400
   194.3 @@ -0,0 +1,74 @@
   194.4 +/*
   194.5 + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
   194.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   194.7 + *
   194.8 + * This code is free software; you can redistribute it and/or modify it
   194.9 + * under the terms of the GNU General Public License version 2 only, as
  194.10 + * published by the Free Software Foundation.
  194.11 + *
  194.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  194.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  194.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  194.15 + * version 2 for more details (a copy is included in the LICENSE file that
  194.16 + * accompanied this code).
  194.17 + *
  194.18 + * You should have received a copy of the GNU General Public License version
  194.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  194.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  194.21 + *
  194.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  194.23 + * or visit www.oracle.com if you need additional information or have any
  194.24 + * questions.
  194.25 + */
  194.26 +
  194.27 +
  194.28 +
  194.29 +/*
  194.30 + * @test OomWhileParsingRepeatedJsr
  194.31 + * @summary Testing class file parser; specifically parsing
  194.32 + *          a file with repeated JSR (jump local subroutine)
  194.33 + *          bytecode command.
  194.34 + * @bug 6878713
  194.35 + * @bug 7030610
  194.36 + * @bug 7037122
  194.37 + * @bug 7123945
  194.38 + * @bug 8016029
  194.39 + * @library /testlibrary
  194.40 + * @run main OomWhileParsingRepeatedJsr
  194.41 + */
  194.42 +
  194.43 +import com.oracle.java.testlibrary.*;
  194.44 +
  194.45 +
  194.46 +public class OomWhileParsingRepeatedJsr {
  194.47 +
  194.48 +    public static void main(String[] args) throws Exception {
  194.49 +
  194.50 +        // ======= Configure the test
  194.51 +        String jarFile = System.getProperty("test.src") + "/testcase.jar";
  194.52 +        String className = "OOMCrashClass1960_2";
  194.53 +
  194.54 +        // limit is 768MB in native words
  194.55 +        int mallocMaxTestWords = (1024 * 1024 * 768 / 4);
  194.56 +        if (Platform.is64bit())
  194.57 +            mallocMaxTestWords = (mallocMaxTestWords / 2);
  194.58 +
  194.59 +        // ======= extract the test class
  194.60 +        ProcessBuilder pb = new ProcessBuilder(new String[] {
  194.61 +            JDKToolFinder.getJDKTool("jar"),
  194.62 +            "xvf", jarFile } );
  194.63 +        OutputAnalyzer output = new OutputAnalyzer(pb.start());
  194.64 +        output.shouldHaveExitValue(0);
  194.65 +
  194.66 +        // ======= execute the test
  194.67 +        pb = ProcessTools.createJavaProcessBuilder(
  194.68 +            "-cp", ".",
  194.69 +            "-XX:+UnlockDiagnosticVMOptions",
  194.70 +            "-XX:MallocMaxTestWords=" + mallocMaxTestWords,
  194.71 +            className );
  194.72 +
  194.73 +        output = new OutputAnalyzer(pb.start());
  194.74 +        output.shouldContain("Cannot reserve enough memory");
  194.75 +    }
  194.76 +}
  194.77 +
   195.1 Binary file test/runtime/ClassFile/testcase.jar has changed
   196.1 --- a/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java	Fri Sep 27 13:49:57 2013 -0400
   196.2 +++ b/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java	Fri Sep 27 13:53:43 2013 -0400
   196.3 @@ -25,7 +25,7 @@
   196.4   * @test
   196.5   * @bug 8000968
   196.6   * @key regression
   196.7 - * @summary NPG: UseCompressedKlassPointers asserts with ObjectAlignmentInBytes=32
   196.8 + * @summary NPG: UseCompressedClassPointers asserts with ObjectAlignmentInBytes=32
   196.9   * @library /testlibrary
  196.10   */
  196.11  
  196.12 @@ -52,7 +52,7 @@
  196.13          OutputAnalyzer output;
  196.14  
  196.15          pb = ProcessTools.createJavaProcessBuilder(
  196.16 -            "-XX:+UseCompressedKlassPointers",
  196.17 +            "-XX:+UseCompressedClassPointers",
  196.18              "-XX:+UseCompressedOops",
  196.19              "-XX:ObjectAlignmentInBytes=" + alignment,
  196.20              "-version");
   197.1 --- a/test/runtime/InitialThreadOverflow/testme.sh	Fri Sep 27 13:49:57 2013 -0400
   197.2 +++ b/test/runtime/InitialThreadOverflow/testme.sh	Fri Sep 27 13:53:43 2013 -0400
   197.3 @@ -43,9 +43,9 @@
   197.4    exit 0
   197.5  fi
   197.6  
   197.7 -gcc_cmd=`which gcc`
   197.8 -if [ "x$gcc_cmd" == "x" ]; then
   197.9 -    echo "WARNING: gcc not found. Cannot execute test." 2>&1
  197.10 +gcc_cmd=`which g++`
  197.11 +if [ "x$gcc_cmd" = "x" ]; then
  197.12 +    echo "WARNING: g++ not found. Cannot execute test." 2>&1
  197.13      exit 0;
  197.14  fi
  197.15  
   198.1 --- a/test/testlibrary/OutputAnalyzerTest.java	Fri Sep 27 13:49:57 2013 -0400
   198.2 +++ b/test/testlibrary/OutputAnalyzerTest.java	Fri Sep 27 13:53:43 2013 -0400
   198.3 @@ -172,5 +172,22 @@
   198.4      } catch (RuntimeException e) {
   198.5          // expected
   198.6      }
   198.7 +
   198.8 +    {
   198.9 +      String aaaa = "aaaa";
  198.10 +      String result = output.firstMatch(aaaa);
  198.11 +      if (!aaaa.equals(result)) {
  198.12 +        throw new Exception("firstMatch(String) faild to match. Expected: " + aaaa + " got: " + result);
  198.13 +      }
  198.14 +    }
  198.15 +
  198.16 +    {
  198.17 +      String aa = "aa";
  198.18 +      String aa_grouped_aa = aa + "(" + aa + ")";
  198.19 +      String result = output.firstMatch(aa_grouped_aa, 1);
  198.20 +      if (!aa.equals(result)) {
  198.21 +        throw new Exception("firstMatch(String, int) failed to match. Expected: " + aa + " got: " + result);
  198.22 +      }
  198.23 +    }
  198.24    }
  198.25  }
   199.1 --- a/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java	Fri Sep 27 13:49:57 2013 -0400
   199.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java	Fri Sep 27 13:53:43 2013 -0400
   199.3 @@ -41,6 +41,9 @@
   199.4      /**
   199.5       * Returns true if {@code arg} is an input argument to the VM.
   199.6       *
   199.7 +     * This is useful for checking boolean flags such as -XX:+UseSerialGC or
   199.8 +     * -XX:-UsePerfData.
   199.9 +     *
  199.10       * @param arg The name of the argument.
  199.11       * @return {@code true} if the given argument is an input argument,
  199.12       *         otherwise {@code false}.
  199.13 @@ -48,4 +51,26 @@
  199.14      public static boolean contains(String arg) {
  199.15          return args.contains(arg);
  199.16      }
  199.17 +
  199.18 +    /**
  199.19 +     * Returns true if {@code prefix} is the start of an input argument to the
  199.20 +     * VM.
  199.21 +     *
  199.22 +     * This is useful for checking if flags describing a quantity, such as
  199.23 +     * -XX:+MaxMetaspaceSize=100m, is set without having to know the quantity.
  199.24 +     * To check if the flag -XX:MaxMetaspaceSize is set, use
  199.25 +     * {@code InputArguments.containsPrefix("-XX:MaxMetaspaceSize")}.
  199.26 +     *
  199.27 +     * @param prefix The start of the argument.
  199.28 +     * @return {@code true} if the given argument is the start of an input
  199.29 +     *         argument, otherwise {@code false}.
  199.30 +     */
  199.31 +    public static boolean containsPrefix(String prefix) {
  199.32 +        for (String arg : args) {
  199.33 +            if (arg.startsWith(prefix)) {
  199.34 +                return true;
  199.35 +            }
  199.36 +        }
  199.37 +        return false;
  199.38 +    }
  199.39  }
   200.1 --- a/test/testlibrary/com/oracle/java/testlibrary/JDKToolFinder.java	Fri Sep 27 13:49:57 2013 -0400
   200.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/JDKToolFinder.java	Fri Sep 27 13:53:43 2013 -0400
   200.3 @@ -23,7 +23,9 @@
   200.4  
   200.5  package com.oracle.java.testlibrary;
   200.6  
   200.7 -import java.io.File;
   200.8 +import java.io.FileNotFoundException;
   200.9 +import java.nio.file.Path;
  200.10 +import java.nio.file.Paths;
  200.11  
  200.12  public final class JDKToolFinder {
  200.13  
  200.14 @@ -32,38 +34,73 @@
  200.15  
  200.16      /**
  200.17       * Returns the full path to an executable in jdk/bin based on System
  200.18 -     * property {@code compile.jdk} (set by jtreg test suite)
  200.19 +     * property {@code test.jdk} or {@code compile.jdk} (both are set by the jtreg test suite)
  200.20       *
  200.21       * @return Full path to an executable in jdk/bin
  200.22       */
  200.23      public static String getJDKTool(String tool) {
  200.24 -        String binPath = System.getProperty("compile.jdk");
  200.25 -        if (binPath == null) {
  200.26 -            throw new RuntimeException("System property 'compile.jdk' not set. "
  200.27 -                    + "This property is normally set by jtreg. "
  200.28 -                    + "When running test separately, set this property using "
  200.29 -                    + "'-Dcompile.jdk=/path/to/jdk'.");
  200.30 +
  200.31 +        // First try to find the executable in test.jdk
  200.32 +        try {
  200.33 +            return getTool(tool, "test.jdk");
  200.34 +        } catch (FileNotFoundException e) {
  200.35 +
  200.36          }
  200.37 -        binPath += File.separatorChar + "bin" + File.separatorChar + tool;
  200.38  
  200.39 -        return binPath;
  200.40 +        // Now see if it's available in compile.jdk
  200.41 +        try {
  200.42 +            return getTool(tool, "compile.jdk");
  200.43 +        } catch (FileNotFoundException e) {
  200.44 +            throw new RuntimeException("Failed to find " + tool +
  200.45 +                    ", looked in test.jdk (" + System.getProperty("test.jdk") +
  200.46 +                    ") and compile.jdk (" + System.getProperty("compile.jdk") + ")");
  200.47 +        }
  200.48      }
  200.49 +
  200.50      /**
  200.51 -     * Returns the full path to an executable in &lt;current jdk&gt;/bin based
  200.52 -     * on System property {@code test.jdk} (set by jtreg test suite)
  200.53 +     * Returns the full path to an executable in jdk/bin based on System
  200.54 +     * property {@code compile.jdk}
  200.55       *
  200.56       * @return Full path to an executable in jdk/bin
  200.57       */
  200.58 -    public static String getCurrentJDKTool(String tool) {
  200.59 -        String binPath = System.getProperty("test.jdk");
  200.60 -        if (binPath == null) {
  200.61 -            throw new RuntimeException("System property 'test.jdk' not set. "
  200.62 -                + "This property is normally set by jtreg. "
  200.63 -                + "When running test separately, set this property using "
  200.64 -                + "'-Dtest.jdk=/path/to/jdk'.");
  200.65 +    public static String getCompileJDKTool(String tool) {
  200.66 +        try {
  200.67 +            return getTool(tool, "compile.jdk");
  200.68 +        } catch (FileNotFoundException e) {
  200.69 +            throw new RuntimeException(e);
  200.70          }
  200.71 -        binPath += File.separatorChar + "bin" + File.separatorChar + tool;
  200.72 +    }
  200.73  
  200.74 -        return binPath;
  200.75 +    /**
  200.76 +     * Returns the full path to an executable in jdk/bin based on System
  200.77 +     * property {@code test.jdk}
  200.78 +     *
  200.79 +     * @return Full path to an executable in jdk/bin
  200.80 +     */
  200.81 +    public static String getTestJDKTool(String tool) {
  200.82 +        try {
  200.83 +            return getTool(tool, "test.jdk");
  200.84 +        } catch (FileNotFoundException e) {
  200.85 +            throw new RuntimeException(e);
  200.86 +        }
  200.87 +    }
  200.88 +
  200.89 +    private static String getTool(String tool, String property) throws FileNotFoundException {
  200.90 +        String jdkPath = System.getProperty(property);
  200.91 +
  200.92 +        if (jdkPath == null) {
  200.93 +            throw new RuntimeException(
  200.94 +                    "System property '" + property + "' not set. This property is normally set by jtreg. "
  200.95 +                    + "When running test separately, set this property using '-D" + property + "=/path/to/jdk'.");
  200.96 +        }
  200.97 +
  200.98 +        Path toolName = Paths.get("bin", tool + (Platform.isWindows() ? ".exe" : ""));
  200.99 +
 200.100 +        Path jdkTool = Paths.get(jdkPath, toolName.toString());
 200.101 +        if (!jdkTool.toFile().exists()) {
 200.102 +            throw new FileNotFoundException("Could not find file " + jdkTool.toAbsolutePath());
 200.103 +        }
 200.104 +
 200.105 +        return jdkTool.toAbsolutePath().toString();
 200.106      }
 200.107  }
   201.1 --- a/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Fri Sep 27 13:49:57 2013 -0400
   201.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Fri Sep 27 13:53:43 2013 -0400
   201.3 @@ -211,13 +211,13 @@
   201.4        if (matcher.find()) {
   201.5            reportDiagnosticSummary();
   201.6            throw new RuntimeException("'" + pattern
   201.7 -                  + "' found in stdout \n");
   201.8 +                  + "' found in stdout: '" + matcher.group() + "' \n");
   201.9        }
  201.10        matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
  201.11        if (matcher.find()) {
  201.12            reportDiagnosticSummary();
  201.13            throw new RuntimeException("'" + pattern
  201.14 -                  + "' found in stderr \n");
  201.15 +                  + "' found in stderr: '" + matcher.group() + "' \n");
  201.16        }
  201.17    }
  201.18  
  201.19 @@ -254,6 +254,37 @@
  201.20    }
  201.21  
  201.22    /**
  201.23 +   * Get the captured group of the first string matching the pattern.
  201.24 +   * stderr is searched before stdout.
  201.25 +   *
  201.26 +   * @param pattern The multi-line pattern to match
  201.27 +   * @param group The group to capture
  201.28 +   * @return The matched string or null if no match was found
  201.29 +   */
  201.30 +  public String firstMatch(String pattern, int group) {
  201.31 +    Matcher stderrMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
  201.32 +    Matcher stdoutMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
  201.33 +    if (stderrMatcher.find()) {
  201.34 +      return stderrMatcher.group(group);
  201.35 +    }
  201.36 +    if (stdoutMatcher.find()) {
  201.37 +      return stdoutMatcher.group(group);
  201.38 +    }
  201.39 +    return null;
  201.40 +  }
  201.41 +
  201.42 +  /**
  201.43 +   * Get the first string matching the pattern.
  201.44 +   * stderr is searched before stdout.
  201.45 +   *
  201.46 +   * @param pattern The multi-line pattern to match
  201.47 +   * @return The matched string or null if no match was found
  201.48 +   */
  201.49 +  public String firstMatch(String pattern) {
  201.50 +    return firstMatch(pattern, 0);
  201.51 +  }
  201.52 +
  201.53 +  /**
  201.54     * Verify the exit value of the process
  201.55     *
  201.56     * @param expectedExitValue Expected exit value from process
   202.1 --- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Sep 27 13:49:57 2013 -0400
   202.2 +++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Fri Sep 27 13:53:43 2013 -0400
   202.3 @@ -61,6 +61,8 @@
   202.4      registerNatives();
   202.5    }
   202.6  
   202.7 +  // Get the maximum heap size supporting COOPs
   202.8 +  public native long getCompressedOopsMaxHeapSize();
   202.9    // Arguments
  202.10    public native void printHeapSizes();
  202.11  

mercurial