Merge

Thu, 19 Sep 2013 09:26:08 +0200

author
tschatzl
date
Thu, 19 Sep 2013 09:26:08 +0200
changeset 5720
06ae47d9d088
parent 5687
41e6ae9f6dd7
parent 5719
719e886d4f72
child 5721
179cd89fb279

Merge

src/os/linux/vm/os_linux.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jni.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/os.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.cpp file | annotate | diff | comparison | revisions
test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java file | annotate | diff | comparison | revisions
     1.1 --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Wed Sep 18 12:52:15 2013 -0400
     1.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/bsd/BsdAddress.java	Thu Sep 19 09:26:08 2013 +0200
     1.3 @@ -81,7 +81,7 @@
     1.4  
     1.5      public Address getCompKlassAddressAt(long offset)
     1.6              throws UnalignedAddressException, UnmappedAddressException {
     1.7 -        return debugger.readCompOopAddress(addr + offset);
     1.8 +        return debugger.readCompKlassAddress(addr + offset);
     1.9      }
    1.10  
    1.11      //
     2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Wed Sep 18 12:52:15 2013 -0400
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Sep 19 09:26:08 2013 +0200
     2.3 @@ -792,7 +792,7 @@
     2.4  
     2.5    public boolean isCompressedKlassPointersEnabled() {
     2.6      if (compressedKlassPointersEnabled == null) {
     2.7 -        Flag flag = getCommandLineFlag("UseCompressedKlassPointers");
     2.8 +        Flag flag = getCommandLineFlag("UseCompressedClassPointers");
     2.9          compressedKlassPointersEnabled = (flag == null) ? Boolean.FALSE:
    2.10               (flag.getBool()? Boolean.TRUE: Boolean.FALSE);
    2.11      }
     3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Wed Sep 18 12:52:15 2013 -0400
     3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java	Thu Sep 19 09:26:08 2013 +0200
     3.3 @@ -66,18 +66,18 @@
     3.4        printGCAlgorithm(flagMap);
     3.5        System.out.println();
     3.6        System.out.println("Heap Configuration:");
     3.7 -      printValue("MinHeapFreeRatio   = ", getFlagValue("MinHeapFreeRatio", flagMap));
     3.8 -      printValue("MaxHeapFreeRatio   = ", getFlagValue("MaxHeapFreeRatio", flagMap));
     3.9 -      printValMB("MaxHeapSize        = ", getFlagValue("MaxHeapSize", flagMap));
    3.10 -      printValMB("NewSize            = ", getFlagValue("NewSize", flagMap));
    3.11 -      printValMB("MaxNewSize         = ", getFlagValue("MaxNewSize", flagMap));
    3.12 -      printValMB("OldSize            = ", getFlagValue("OldSize", flagMap));
    3.13 -      printValue("NewRatio           = ", getFlagValue("NewRatio", flagMap));
    3.14 -      printValue("SurvivorRatio      = ", getFlagValue("SurvivorRatio", flagMap));
    3.15 -      printValMB("MetaspaceSize      = ", getFlagValue("MetaspaceSize", flagMap));
    3.16 -      printValMB("ClassMetaspaceSize = ", getFlagValue("ClassMetaspaceSize", flagMap));
    3.17 -      printValMB("MaxMetaspaceSize   = ", getFlagValue("MaxMetaspaceSize", flagMap));
    3.18 -      printValMB("G1HeapRegionSize   = ", HeapRegion.grainBytes());
    3.19 +      printValue("MinHeapFreeRatio         = ", getFlagValue("MinHeapFreeRatio", flagMap));
    3.20 +      printValue("MaxHeapFreeRatio         = ", getFlagValue("MaxHeapFreeRatio", flagMap));
    3.21 +      printValMB("MaxHeapSize              = ", getFlagValue("MaxHeapSize", flagMap));
    3.22 +      printValMB("NewSize                  = ", getFlagValue("NewSize", flagMap));
    3.23 +      printValMB("MaxNewSize               = ", getFlagValue("MaxNewSize", flagMap));
    3.24 +      printValMB("OldSize                  = ", getFlagValue("OldSize", flagMap));
    3.25 +      printValue("NewRatio                 = ", getFlagValue("NewRatio", flagMap));
    3.26 +      printValue("SurvivorRatio            = ", getFlagValue("SurvivorRatio", flagMap));
    3.27 +      printValMB("MetaspaceSize            = ", getFlagValue("MetaspaceSize", flagMap));
    3.28 +      printValMB("CompressedClassSpaceSize = ", getFlagValue("CompressedClassSpaceSize", flagMap));
    3.29 +      printValMB("MaxMetaspaceSize         = ", getFlagValue("MaxMetaspaceSize", flagMap));
    3.30 +      printValMB("G1HeapRegionSize         = ", HeapRegion.grainBytes());
    3.31  
    3.32        System.out.println();
    3.33        System.out.println("Heap Usage:");
     4.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Sep 18 12:52:15 2013 -0400
     4.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Sep 19 09:26:08 2013 +0200
     4.3 @@ -105,7 +105,7 @@
     4.4          if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
     4.5        }
     4.6  
     4.7 -      if (UseCompressedKlassPointers) {
     4.8 +      if (UseCompressedClassPointers) {
     4.9          if (src->is_address() && !src->is_stack() && src->type() == T_ADDRESS &&
    4.10              src->as_address_ptr()->disp() == oopDesc::klass_offset_in_bytes()) return false;
    4.11        }
    4.12 @@ -963,7 +963,7 @@
    4.13        case T_METADATA:  __ ld_ptr(base, offset, to_reg->as_register()); break;
    4.14        case T_ADDRESS:
    4.15  #ifdef _LP64
    4.16 -        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedKlassPointers) {
    4.17 +        if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
    4.18            __ lduw(base, offset, to_reg->as_register());
    4.19            __ decode_klass_not_null(to_reg->as_register());
    4.20          } else
    4.21 @@ -2208,7 +2208,7 @@
    4.22      // We don't know the array types are compatible
    4.23      if (basic_type != T_OBJECT) {
    4.24        // Simple test for basic type arrays
    4.25 -      if (UseCompressedKlassPointers) {
    4.26 +      if (UseCompressedClassPointers) {
    4.27          // We don't need decode because we just need to compare
    4.28          __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
    4.29          __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
    4.30 @@ -2342,7 +2342,7 @@
    4.31      // but not necessarily exactly of type default_type.
    4.32      Label known_ok, halt;
    4.33      metadata2reg(op->expected_type()->constant_encoding(), tmp);
    4.34 -    if (UseCompressedKlassPointers) {
    4.35 +    if (UseCompressedClassPointers) {
    4.36        // tmp holds the default type. It currently comes uncompressed after the
    4.37        // load of a constant, so encode it.
    4.38        __ encode_klass_not_null(tmp);
     5.1 --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Wed Sep 18 12:52:15 2013 -0400
     5.2 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Thu Sep 19 09:26:08 2013 +0200
     5.3 @@ -186,7 +186,7 @@
     5.4      set((intx)markOopDesc::prototype(), t1);
     5.5    }
     5.6    st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
     5.7 -  if (UseCompressedKlassPointers) {
     5.8 +  if (UseCompressedClassPointers) {
     5.9      // Save klass
    5.10      mov(klass, t1);
    5.11      encode_klass_not_null(t1);
    5.12 @@ -196,7 +196,7 @@
    5.13    }
    5.14    if (len->is_valid()) {
    5.15      st(len, obj, arrayOopDesc::length_offset_in_bytes());
    5.16 -  } else if (UseCompressedKlassPointers) {
    5.17 +  } else if (UseCompressedClassPointers) {
    5.18      // otherwise length is in the class gap
    5.19      store_klass_gap(G0, obj);
    5.20    }
     6.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Wed Sep 18 12:52:15 2013 -0400
     6.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Sep 19 09:26:08 2013 +0200
     6.3 @@ -3911,7 +3911,7 @@
     6.4    // The number of bytes in this code is used by
     6.5    // MachCallDynamicJavaNode::ret_addr_offset()
     6.6    // if this changes, change that.
     6.7 -  if (UseCompressedKlassPointers) {
     6.8 +  if (UseCompressedClassPointers) {
     6.9      lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
    6.10      decode_klass_not_null(klass);
    6.11    } else {
    6.12 @@ -3920,7 +3920,7 @@
    6.13  }
    6.14  
    6.15  void MacroAssembler::store_klass(Register klass, Register dst_oop) {
    6.16 -  if (UseCompressedKlassPointers) {
    6.17 +  if (UseCompressedClassPointers) {
    6.18      assert(dst_oop != klass, "not enough registers");
    6.19      encode_klass_not_null(klass);
    6.20      st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
    6.21 @@ -3930,7 +3930,7 @@
    6.22  }
    6.23  
    6.24  void MacroAssembler::store_klass_gap(Register s, Register d) {
    6.25 -  if (UseCompressedKlassPointers) {
    6.26 +  if (UseCompressedClassPointers) {
    6.27      assert(s != d, "not enough registers");
    6.28      st(s, d, oopDesc::klass_gap_offset_in_bytes());
    6.29    }
    6.30 @@ -4089,7 +4089,7 @@
    6.31  }
    6.32  
    6.33  void MacroAssembler::encode_klass_not_null(Register r) {
    6.34 -  assert (UseCompressedKlassPointers, "must be compressed");
    6.35 +  assert (UseCompressedClassPointers, "must be compressed");
    6.36    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
    6.37    assert(r != G6_heapbase, "bad register choice");
    6.38    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
    6.39 @@ -4105,7 +4105,7 @@
    6.40    if (src == dst) {
    6.41      encode_klass_not_null(src);
    6.42    } else {
    6.43 -    assert (UseCompressedKlassPointers, "must be compressed");
    6.44 +    assert (UseCompressedClassPointers, "must be compressed");
    6.45      assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
    6.46      set((intptr_t)Universe::narrow_klass_base(), dst);
    6.47      sub(src, dst, dst);
    6.48 @@ -4119,7 +4119,7 @@
    6.49  // generated by decode_klass_not_null() and reinit_heapbase().  Hence, if
    6.50  // the instructions they generate change, then this method needs to be updated.
    6.51  int MacroAssembler::instr_size_for_decode_klass_not_null() {
    6.52 -  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
    6.53 +  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
    6.54    // set + add + set
    6.55    int num_instrs = insts_for_internal_set((intptr_t)Universe::narrow_klass_base()) + 1 +
    6.56      insts_for_internal_set((intptr_t)Universe::narrow_ptrs_base());
    6.57 @@ -4135,7 +4135,7 @@
    6.58  void  MacroAssembler::decode_klass_not_null(Register r) {
    6.59    // Do not add assert code to this unless you change vtableStubs_sparc.cpp
    6.60    // pd_code_size_limit.
    6.61 -  assert (UseCompressedKlassPointers, "must be compressed");
    6.62 +  assert (UseCompressedClassPointers, "must be compressed");
    6.63    assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
    6.64    assert(r != G6_heapbase, "bad register choice");
    6.65    set((intptr_t)Universe::narrow_klass_base(), G6_heapbase);
    6.66 @@ -4151,7 +4151,7 @@
    6.67    } else {
    6.68      // Do not add assert code to this unless you change vtableStubs_sparc.cpp
    6.69      // pd_code_size_limit.
    6.70 -    assert (UseCompressedKlassPointers, "must be compressed");
    6.71 +    assert (UseCompressedClassPointers, "must be compressed");
    6.72      assert(Universe::narrow_klass_base() != NULL, "narrow_klass_base should be initialized");
    6.73      if (Universe::narrow_klass_shift() != 0) {
    6.74        assert((src != G6_heapbase) && (dst != G6_heapbase), "bad register choice");
    6.75 @@ -4167,7 +4167,7 @@
    6.76  }
    6.77  
    6.78  void MacroAssembler::reinit_heapbase() {
    6.79 -  if (UseCompressedOops || UseCompressedKlassPointers) {
    6.80 +  if (UseCompressedOops || UseCompressedClassPointers) {
    6.81      if (Universe::heap() != NULL) {
    6.82        set((intptr_t)Universe::narrow_ptrs_base(), G6_heapbase);
    6.83      } else {
     7.1 --- a/src/cpu/sparc/vm/sparc.ad	Wed Sep 18 12:52:15 2013 -0400
     7.2 +++ b/src/cpu/sparc/vm/sparc.ad	Thu Sep 19 09:26:08 2013 +0200
     7.3 @@ -557,7 +557,7 @@
     7.4      int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
     7.5      int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
     7.6      int klass_load_size;
     7.7 -    if (UseCompressedKlassPointers) {
     7.8 +    if (UseCompressedClassPointers) {
     7.9        assert(Universe::heap() != NULL, "java heap should be initialized");
    7.10        klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
    7.11      } else {
    7.12 @@ -1657,7 +1657,7 @@
    7.13  void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
    7.14    st->print_cr("\nUEP:");
    7.15  #ifdef    _LP64
    7.16 -  if (UseCompressedKlassPointers) {
    7.17 +  if (UseCompressedClassPointers) {
    7.18      assert(Universe::heap() != NULL, "java heap should be initialized");
    7.19      st->print_cr("\tLDUW   [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
    7.20      st->print_cr("\tSET    Universe::narrow_klass_base,R_G6_heap_base");
    7.21 @@ -1897,7 +1897,7 @@
    7.22  
    7.23  bool Matcher::narrow_klass_use_complex_address() {
    7.24    NOT_LP64(ShouldNotCallThis());
    7.25 -  assert(UseCompressedKlassPointers, "only for compressed klass code");
    7.26 +  assert(UseCompressedClassPointers, "only for compressed klass code");
    7.27    return false;
    7.28  }
    7.29  
    7.30 @@ -2561,7 +2561,7 @@
    7.31        int off = __ offset();
    7.32        __ load_klass(O0, G3_scratch);
    7.33        int klass_load_size;
    7.34 -      if (UseCompressedKlassPointers) {
    7.35 +      if (UseCompressedClassPointers) {
    7.36          assert(Universe::heap() != NULL, "java heap should be initialized");
    7.37          klass_load_size = MacroAssembler::instr_size_for_decode_klass_not_null() + 1*BytesPerInstWord;
    7.38        } else {
     8.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Sep 18 12:52:15 2013 -0400
     8.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Thu Sep 19 09:26:08 2013 +0200
     8.3 @@ -2945,7 +2945,7 @@
     8.4  
     8.5      BLOCK_COMMENT("arraycopy argument klass checks");
     8.6      //  get src->klass()
     8.7 -    if (UseCompressedKlassPointers) {
     8.8 +    if (UseCompressedClassPointers) {
     8.9        __ delayed()->nop(); // ??? not good
    8.10        __ load_klass(src, G3_src_klass);
    8.11      } else {
    8.12 @@ -2980,7 +2980,7 @@
    8.13      // Load 32-bits signed value. Use br() instruction with it to check icc.
    8.14      __ lduw(G3_src_klass, lh_offset, G5_lh);
    8.15  
    8.16 -    if (UseCompressedKlassPointers) {
    8.17 +    if (UseCompressedClassPointers) {
    8.18        __ load_klass(dst, G4_dst_klass);
    8.19      }
    8.20      // Handle objArrays completely differently...
    8.21 @@ -2988,7 +2988,7 @@
    8.22      __ set(objArray_lh, O5_temp);
    8.23      __ cmp(G5_lh,       O5_temp);
    8.24      __ br(Assembler::equal, false, Assembler::pt, L_objArray);
    8.25 -    if (UseCompressedKlassPointers) {
    8.26 +    if (UseCompressedClassPointers) {
    8.27        __ delayed()->nop();
    8.28      } else {
    8.29        __ delayed()->ld_ptr(dst, oopDesc::klass_offset_in_bytes(), G4_dst_klass);
     9.1 --- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Wed Sep 18 12:52:15 2013 -0400
     9.2 +++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Sep 19 09:26:08 2013 +0200
     9.3 @@ -218,13 +218,13 @@
     9.4        // ld;ld;ld,jmp,nop
     9.5        const int basic = 5*BytesPerInstWord +
     9.6                          // shift;add for load_klass (only shift with zero heap based)
     9.7 -                        (UseCompressedKlassPointers ?
     9.8 +                        (UseCompressedClassPointers ?
     9.9                            MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
    9.10        return basic + slop;
    9.11      } else {
    9.12        const int basic = (28 LP64_ONLY(+ 6)) * BytesPerInstWord +
    9.13                          // shift;add for load_klass (only shift with zero heap based)
    9.14 -                        (UseCompressedKlassPointers ?
    9.15 +                        (UseCompressedClassPointers ?
    9.16                            MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
    9.17        return (basic + slop);
    9.18      }
    10.1 --- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Wed Sep 18 12:52:15 2013 -0400
    10.2 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp	Thu Sep 19 09:26:08 2013 +0200
    10.3 @@ -148,7 +148,7 @@
    10.4  
    10.5    static int adjust_reg_range(int range) {
    10.6      // Reduce the number of available regs (to free r12) in case of compressed oops
    10.7 -    if (UseCompressedOops || UseCompressedKlassPointers) return range - 1;
    10.8 +    if (UseCompressedOops || UseCompressedClassPointers) return range - 1;
    10.9      return range;
   10.10    }
   10.11  
    11.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Sep 18 12:52:15 2013 -0400
    11.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Sep 19 09:26:08 2013 +0200
    11.3 @@ -341,7 +341,7 @@
    11.4    Register receiver = FrameMap::receiver_opr->as_register();
    11.5    Register ic_klass = IC_Klass;
    11.6    const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
    11.7 -  const bool do_post_padding = VerifyOops || UseCompressedKlassPointers;
    11.8 +  const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
    11.9    if (!do_post_padding) {
   11.10      // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
   11.11      while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
   11.12 @@ -1263,7 +1263,7 @@
   11.13        break;
   11.14  
   11.15      case T_ADDRESS:
   11.16 -      if (UseCompressedKlassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
   11.17 +      if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
   11.18          __ movl(dest->as_register(), from_addr);
   11.19        } else {
   11.20          __ movptr(dest->as_register(), from_addr);
   11.21 @@ -1371,7 +1371,7 @@
   11.22      __ verify_oop(dest->as_register());
   11.23    } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
   11.24  #ifdef _LP64
   11.25 -    if (UseCompressedKlassPointers) {
   11.26 +    if (UseCompressedClassPointers) {
   11.27        __ decode_klass_not_null(dest->as_register());
   11.28      }
   11.29  #endif
   11.30 @@ -1716,7 +1716,7 @@
   11.31    } else if (obj == klass_RInfo) {
   11.32      klass_RInfo = dst;
   11.33    }
   11.34 -  if (k->is_loaded() && !UseCompressedKlassPointers) {
   11.35 +  if (k->is_loaded() && !UseCompressedClassPointers) {
   11.36      select_different_registers(obj, dst, k_RInfo, klass_RInfo);
   11.37    } else {
   11.38      Rtmp1 = op->tmp3()->as_register();
   11.39 @@ -1754,7 +1754,7 @@
   11.40      // get object class
   11.41      // not a safepoint as obj null check happens earlier
   11.42  #ifdef _LP64
   11.43 -    if (UseCompressedKlassPointers) {
   11.44 +    if (UseCompressedClassPointers) {
   11.45        __ load_klass(Rtmp1, obj);
   11.46        __ cmpptr(k_RInfo, Rtmp1);
   11.47      } else {
   11.48 @@ -3294,7 +3294,7 @@
   11.49      // We don't know the array types are compatible
   11.50      if (basic_type != T_OBJECT) {
   11.51        // Simple test for basic type arrays
   11.52 -      if (UseCompressedKlassPointers) {
   11.53 +      if (UseCompressedClassPointers) {
   11.54          __ movl(tmp, src_klass_addr);
   11.55          __ cmpl(tmp, dst_klass_addr);
   11.56        } else {
   11.57 @@ -3456,21 +3456,21 @@
   11.58      Label known_ok, halt;
   11.59      __ mov_metadata(tmp, default_type->constant_encoding());
   11.60  #ifdef _LP64
   11.61 -    if (UseCompressedKlassPointers) {
   11.62 +    if (UseCompressedClassPointers) {
   11.63        __ encode_klass_not_null(tmp);
   11.64      }
   11.65  #endif
   11.66  
   11.67      if (basic_type != T_OBJECT) {
   11.68  
   11.69 -      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
   11.70 +      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
   11.71        else                   __ cmpptr(tmp, dst_klass_addr);
   11.72        __ jcc(Assembler::notEqual, halt);
   11.73 -      if (UseCompressedKlassPointers)          __ cmpl(tmp, src_klass_addr);
   11.74 +      if (UseCompressedClassPointers)          __ cmpl(tmp, src_klass_addr);
   11.75        else                   __ cmpptr(tmp, src_klass_addr);
   11.76        __ jcc(Assembler::equal, known_ok);
   11.77      } else {
   11.78 -      if (UseCompressedKlassPointers)          __ cmpl(tmp, dst_klass_addr);
   11.79 +      if (UseCompressedClassPointers)          __ cmpl(tmp, dst_klass_addr);
   11.80        else                   __ cmpptr(tmp, dst_klass_addr);
   11.81        __ jcc(Assembler::equal, known_ok);
   11.82        __ cmpptr(src, dst);
    12.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Sep 18 12:52:15 2013 -0400
    12.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Thu Sep 19 09:26:08 2013 +0200
    12.3 @@ -1239,7 +1239,7 @@
    12.4    }
    12.5    LIR_Opr reg = rlock_result(x);
    12.6    LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
    12.7 -  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
    12.8 +  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
    12.9      tmp3 = new_register(objectType);
   12.10    }
   12.11    __ checkcast(reg, obj.result(), x->klass(),
   12.12 @@ -1261,7 +1261,7 @@
   12.13    }
   12.14    obj.load_item();
   12.15    LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
   12.16 -  if (!x->klass()->is_loaded() || UseCompressedKlassPointers) {
   12.17 +  if (!x->klass()->is_loaded() || UseCompressedClassPointers) {
   12.18      tmp3 = new_register(objectType);
   12.19    }
   12.20    __ instanceof(reg, obj.result(), x->klass(),
    13.1 --- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Wed Sep 18 12:52:15 2013 -0400
    13.2 +++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp	Thu Sep 19 09:26:08 2013 +0200
    13.3 @@ -157,7 +157,7 @@
    13.4      movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
    13.5    }
    13.6  #ifdef _LP64
    13.7 -  if (UseCompressedKlassPointers) { // Take care not to kill klass
    13.8 +  if (UseCompressedClassPointers) { // Take care not to kill klass
    13.9      movptr(t1, klass);
   13.10      encode_klass_not_null(t1);
   13.11      movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
   13.12 @@ -171,7 +171,7 @@
   13.13      movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
   13.14    }
   13.15  #ifdef _LP64
   13.16 -  else if (UseCompressedKlassPointers) {
   13.17 +  else if (UseCompressedClassPointers) {
   13.18      xorptr(t1, t1);
   13.19      store_klass_gap(obj, t1);
   13.20    }
   13.21 @@ -334,7 +334,7 @@
   13.22    assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
   13.23    int start_offset = offset();
   13.24  
   13.25 -  if (UseCompressedKlassPointers) {
   13.26 +  if (UseCompressedClassPointers) {
   13.27      load_klass(rscratch1, receiver);
   13.28      cmpptr(rscratch1, iCache);
   13.29    } else {
   13.30 @@ -345,7 +345,7 @@
   13.31    jump_cc(Assembler::notEqual,
   13.32            RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
   13.33    const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
   13.34 -  assert(UseCompressedKlassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
   13.35 +  assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
   13.36  }
   13.37  
   13.38  
    14.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Sep 18 12:52:15 2013 -0400
    14.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Sep 19 09:26:08 2013 +0200
    14.3 @@ -1635,7 +1635,7 @@
    14.4  #ifdef ASSERT
    14.5    // TraceBytecodes does not use r12 but saves it over the call, so don't verify
    14.6    // r12 is the heapbase.
    14.7 -  LP64_ONLY(if ((UseCompressedOops || UseCompressedKlassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
    14.8 +  LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
    14.9  #endif // ASSERT
   14.10  
   14.11    assert(java_thread != oop_result  , "cannot use the same register for java_thread & oop_result");
   14.12 @@ -4802,7 +4802,7 @@
   14.13  
   14.14  void MacroAssembler::load_klass(Register dst, Register src) {
   14.15  #ifdef _LP64
   14.16 -  if (UseCompressedKlassPointers) {
   14.17 +  if (UseCompressedClassPointers) {
   14.18      movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
   14.19      decode_klass_not_null(dst);
   14.20    } else
   14.21 @@ -4817,7 +4817,7 @@
   14.22  
   14.23  void MacroAssembler::store_klass(Register dst, Register src) {
   14.24  #ifdef _LP64
   14.25 -  if (UseCompressedKlassPointers) {
   14.26 +  if (UseCompressedClassPointers) {
   14.27      encode_klass_not_null(src);
   14.28      movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
   14.29    } else
   14.30 @@ -4892,7 +4892,7 @@
   14.31  
   14.32  #ifdef _LP64
   14.33  void MacroAssembler::store_klass_gap(Register dst, Register src) {
   14.34 -  if (UseCompressedKlassPointers) {
   14.35 +  if (UseCompressedClassPointers) {
   14.36      // Store to klass gap in destination
   14.37      movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
   14.38    }
   14.39 @@ -5075,7 +5075,7 @@
   14.40  // when (Universe::heap() != NULL).  Hence, if the instructions they
   14.41  // generate change, then this method needs to be updated.
   14.42  int MacroAssembler::instr_size_for_decode_klass_not_null() {
   14.43 -  assert (UseCompressedKlassPointers, "only for compressed klass ptrs");
   14.44 +  assert (UseCompressedClassPointers, "only for compressed klass ptrs");
   14.45    // mov64 + addq + shlq? + mov64  (for reinit_heapbase()).
   14.46    return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
   14.47  }
   14.48 @@ -5085,7 +5085,7 @@
   14.49  void  MacroAssembler::decode_klass_not_null(Register r) {
   14.50    // Note: it will change flags
   14.51    assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
   14.52 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   14.53 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   14.54    assert(r != r12_heapbase, "Decoding a klass in r12");
   14.55    // Cannot assert, unverified entry point counts instructions (see .ad file)
   14.56    // vtableStubs also counts instructions in pd_code_size_limit.
   14.57 @@ -5103,7 +5103,7 @@
   14.58  void  MacroAssembler::decode_klass_not_null(Register dst, Register src) {
   14.59    // Note: it will change flags
   14.60    assert(Universe::narrow_klass_base() != NULL, "Base should be initialized");
   14.61 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   14.62 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   14.63    if (dst == src) {
   14.64      decode_klass_not_null(dst);
   14.65    } else {
   14.66 @@ -5141,7 +5141,7 @@
   14.67  }
   14.68  
   14.69  void  MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
   14.70 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   14.71 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   14.72    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   14.73    int klass_index = oop_recorder()->find_index(k);
   14.74    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
   14.75 @@ -5149,7 +5149,7 @@
   14.76  }
   14.77  
   14.78  void  MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
   14.79 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   14.80 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   14.81    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   14.82    int klass_index = oop_recorder()->find_index(k);
   14.83    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
   14.84 @@ -5175,7 +5175,7 @@
   14.85  }
   14.86  
   14.87  void  MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
   14.88 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   14.89 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   14.90    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
   14.91    int klass_index = oop_recorder()->find_index(k);
   14.92    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
   14.93 @@ -5183,7 +5183,7 @@
   14.94  }
   14.95  
   14.96  void  MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
   14.97 -  assert (UseCompressedKlassPointers, "should only be used for compressed headers");
   14.98 +  assert (UseCompressedClassPointers, "should only be used for compressed headers");
   14.99    assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
  14.100    int klass_index = oop_recorder()->find_index(k);
  14.101    RelocationHolder rspec = metadata_Relocation::spec(klass_index);
  14.102 @@ -5191,7 +5191,7 @@
  14.103  }
  14.104  
  14.105  void MacroAssembler::reinit_heapbase() {
  14.106 -  if (UseCompressedOops || UseCompressedKlassPointers) {
  14.107 +  if (UseCompressedOops || UseCompressedClassPointers) {
  14.108      if (Universe::heap() != NULL) {
  14.109        if (Universe::narrow_oop_base() == NULL) {
  14.110          MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
    15.1 --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Wed Sep 18 12:52:15 2013 -0400
    15.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Thu Sep 19 09:26:08 2013 +0200
    15.3 @@ -211,11 +211,11 @@
    15.4    if (is_vtable_stub) {
    15.5      // Vtable stub size
    15.6      return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0) +
    15.7 -           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
    15.8 +           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
    15.9    } else {
   15.10      // Itable stub size
   15.11      return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
   15.12 -           (UseCompressedKlassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   15.13 +           (UseCompressedClassPointers ?  MacroAssembler::instr_size_for_decode_klass_not_null() : 0);
   15.14    }
   15.15    // In order to tune these parameters, run the JVM with VM options
   15.16    // +PrintMiscellaneous and +WizardMode to see information about
    16.1 --- a/src/cpu/x86/vm/x86_64.ad	Wed Sep 18 12:52:15 2013 -0400
    16.2 +++ b/src/cpu/x86/vm/x86_64.ad	Thu Sep 19 09:26:08 2013 +0200
    16.3 @@ -1391,7 +1391,7 @@
    16.4  #ifndef PRODUCT
    16.5  void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
    16.6  {
    16.7 -  if (UseCompressedKlassPointers) {
    16.8 +  if (UseCompressedClassPointers) {
    16.9      st->print_cr("movl    rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
   16.10      st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
   16.11      st->print_cr("\tcmpq    rax, rscratch1\t # Inline cache check");
   16.12 @@ -1408,7 +1408,7 @@
   16.13  {
   16.14    MacroAssembler masm(&cbuf);
   16.15    uint insts_size = cbuf.insts_size();
   16.16 -  if (UseCompressedKlassPointers) {
   16.17 +  if (UseCompressedClassPointers) {
   16.18      masm.load_klass(rscratch1, j_rarg0);
   16.19      masm.cmpptr(rax, rscratch1);
   16.20    } else {
   16.21 @@ -1557,7 +1557,7 @@
   16.22  }
   16.23  
   16.24  bool Matcher::narrow_klass_use_complex_address() {
   16.25 -  assert(UseCompressedKlassPointers, "only for compressed klass code");
   16.26 +  assert(UseCompressedClassPointers, "only for compressed klass code");
   16.27    return (LogKlassAlignmentInBytes <= 3);
   16.28  }
   16.29  
    17.1 --- a/src/os/bsd/vm/os_bsd.cpp	Wed Sep 18 12:52:15 2013 -0400
    17.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Thu Sep 19 09:26:08 2013 +0200
    17.3 @@ -3589,8 +3589,6 @@
    17.4  #endif
    17.5    }
    17.6  
    17.7 -  os::large_page_init();
    17.8 -
    17.9    // initialize suspend/resume support - must do this before signal_sets_init()
   17.10    if (SR_initialize() != 0) {
   17.11      perror("SR_initialize failed");
    18.1 --- a/src/os/linux/vm/os_linux.cpp	Wed Sep 18 12:52:15 2013 -0400
    18.2 +++ b/src/os/linux/vm/os_linux.cpp	Thu Sep 19 09:26:08 2013 +0200
    18.3 @@ -4805,8 +4805,6 @@
    18.4  #endif
    18.5    }
    18.6  
    18.7 -  os::large_page_init();
    18.8 -
    18.9    // initialize suspend/resume support - must do this before signal_sets_init()
   18.10    if (SR_initialize() != 0) {
   18.11      perror("SR_initialize failed");
    19.1 --- a/src/os/solaris/vm/os_solaris.cpp	Wed Sep 18 12:52:15 2013 -0400
    19.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Thu Sep 19 09:26:08 2013 +0200
    19.3 @@ -5178,9 +5178,7 @@
    19.4      if(Verbose && PrintMiscellaneous)
    19.5        tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
    19.6  #endif
    19.7 -}
    19.8 -
    19.9 -  os::large_page_init();
   19.10 +  }
   19.11  
   19.12    // Check minimum allowable stack size for thread creation and to initialize
   19.13    // the java system classes, including StackOverflowError - depends on page
    20.1 --- a/src/os/windows/vm/os_windows.cpp	Wed Sep 18 12:52:15 2013 -0400
    20.2 +++ b/src/os/windows/vm/os_windows.cpp	Thu Sep 19 09:26:08 2013 +0200
    20.3 @@ -3189,9 +3189,12 @@
    20.4      return p_buf;
    20.5  
    20.6    } else {
    20.7 +    if (TracePageSizes && Verbose) {
    20.8 +       tty->print_cr("Reserving large pages in a single large chunk.");
    20.9 +    }
   20.10      // normal policy just allocate it all at once
   20.11      DWORD flag = MEM_RESERVE | MEM_COMMIT | MEM_LARGE_PAGES;
   20.12 -    char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
   20.13 +    char * res = (char *)VirtualAlloc(addr, bytes, flag, prot);
   20.14      if (res != NULL) {
   20.15        address pc = CALLER_PC;
   20.16        MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
   20.17 @@ -3917,8 +3920,6 @@
   20.18  #endif
   20.19    }
   20.20  
   20.21 -  os::large_page_init();
   20.22 -
   20.23    // Setup Windows Exceptions
   20.24  
   20.25    // for debugging float code generation bugs
   20.26 @@ -5714,7 +5715,66 @@
   20.27  #endif
   20.28  
   20.29  #ifndef PRODUCT
   20.30 +
   20.31 +// test the code path in reserve_memory_special() that tries to allocate memory in a single
   20.32 +// contiguous memory block at a particular address.
   20.33 +// The test first tries to find a good approximate address to allocate at by using the same
   20.34 +// method to allocate some memory at any address. The test then tries to allocate memory in
   20.35 +// the vicinity (not directly after it to avoid possible by-chance use of that location)
   20.36 +// This is of course only some dodgy assumption, there is no guarantee that the vicinity of
   20.37 +// the previously allocated memory is available for allocation. The only actual failure
   20.38 +// that is reported is when the test tries to allocate at a particular location but gets a
   20.39 +// different valid one. A NULL return value at this point is not considered an error but may
   20.40 +// be legitimate.
   20.41 +// If -XX:+VerboseInternalVMTests is enabled, print some explanatory messages.
   20.42  void TestReserveMemorySpecial_test() {
   20.43 -  // No tests available for this platform
   20.44 -}
   20.45 -#endif
   20.46 +  if (!UseLargePages) {
   20.47 +    if (VerboseInternalVMTests) {
   20.48 +      gclog_or_tty->print("Skipping test because large pages are disabled");
   20.49 +    }
   20.50 +    return;
   20.51 +  }
   20.52 +  // save current value of globals
   20.53 +  bool old_use_large_pages_individual_allocation = UseLargePagesIndividualAllocation;
   20.54 +  bool old_use_numa_interleaving = UseNUMAInterleaving;
   20.55 +
   20.56 +  // set globals to make sure we hit the correct code path
   20.57 +  UseLargePagesIndividualAllocation = UseNUMAInterleaving = false;
   20.58 +
   20.59 +  // do an allocation at an address selected by the OS to get a good one.
   20.60 +  const size_t large_allocation_size = os::large_page_size() * 4;
   20.61 +  char* result = os::reserve_memory_special(large_allocation_size, os::large_page_size(), NULL, false);
   20.62 +  if (result == NULL) {
   20.63 +    if (VerboseInternalVMTests) {
   20.64 +      gclog_or_tty->print("Failed to allocate control block with size "SIZE_FORMAT". Skipping remainder of test.",
   20.65 +        large_allocation_size);
   20.66 +    }
   20.67 +  } else {
   20.68 +    os::release_memory_special(result, large_allocation_size);
   20.69 +
   20.70 +    // allocate another page within the recently allocated memory area which seems to be a good location. At least
   20.71 +    // we managed to get it once.
   20.72 +    const size_t expected_allocation_size = os::large_page_size();
   20.73 +    char* expected_location = result + os::large_page_size();
   20.74 +    char* actual_location = os::reserve_memory_special(expected_allocation_size, os::large_page_size(), expected_location, false);
   20.75 +    if (actual_location == NULL) {
   20.76 +      if (VerboseInternalVMTests) {
   20.77 +        gclog_or_tty->print("Failed to allocate any memory at "PTR_FORMAT" size "SIZE_FORMAT". Skipping remainder of test.",
   20.78 +          expected_location, large_allocation_size);
   20.79 +      }
   20.80 +    } else {
   20.81 +      // release memory
   20.82 +      os::release_memory_special(actual_location, expected_allocation_size);
   20.83 +      // only now check, after releasing any memory to avoid any leaks.
   20.84 +      assert(actual_location == expected_location,
   20.85 +        err_msg("Failed to allocate memory at requested location "PTR_FORMAT" of size "SIZE_FORMAT", is "PTR_FORMAT" instead",
   20.86 +          expected_location, expected_allocation_size, actual_location));
   20.87 +    }
   20.88 +  }
   20.89 +
   20.90 +  // restore globals
   20.91 +  UseLargePagesIndividualAllocation = old_use_large_pages_individual_allocation;
   20.92 +  UseNUMAInterleaving = old_use_numa_interleaving;
   20.93 +}
   20.94 +#endif // PRODUCT
   20.95 +
    21.1 --- a/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Wed Sep 18 12:52:15 2013 -0400
    21.2 +++ b/src/os_cpu/solaris_sparc/vm/globals_solaris_sparc.hpp	Thu Sep 19 09:26:08 2013 +0200
    21.3 @@ -35,7 +35,9 @@
    21.4  
    21.5  // Used on 64 bit platforms for UseCompressedOops base address
    21.6  #ifdef _LP64
    21.7 -define_pd_global(uintx, HeapBaseMinAddress,      CONST64(4)*G);
    21.8 +// use 6G as default base address because by default the OS maps the application
    21.9 +// to 4G on Solaris-Sparc. This leaves at least 2G for the native heap.
   21.10 +define_pd_global(uintx, HeapBaseMinAddress,      CONST64(6)*G);
   21.11  #else
   21.12  define_pd_global(uintx, HeapBaseMinAddress,      2*G);
   21.13  #endif
    22.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Sep 18 12:52:15 2013 -0400
    22.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Sep 19 09:26:08 2013 +0200
    22.3 @@ -230,7 +230,7 @@
    22.4    // depends on this property.
    22.5    debug_only(
    22.6      FreeChunk* junk = NULL;
    22.7 -    assert(UseCompressedKlassPointers ||
    22.8 +    assert(UseCompressedClassPointers ||
    22.9             junk->prev_addr() == (void*)(oop(junk)->klass_addr()),
   22.10             "Offset of FreeChunk::_prev within FreeChunk must match"
   22.11             "  that of OopDesc::_klass within OopDesc");
   22.12 @@ -1407,7 +1407,7 @@
   22.13    assert(!((FreeChunk*)obj_ptr)->is_free(), "Error, block will look free but show wrong size");
   22.14    OrderAccess::storestore();
   22.15  
   22.16 -  if (UseCompressedKlassPointers) {
   22.17 +  if (UseCompressedClassPointers) {
   22.18      // Copy gap missed by (aligned) header size calculation below
   22.19      obj->set_klass_gap(old->klass_gap());
   22.20    }
    23.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Sep 18 12:52:15 2013 -0400
    23.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Sep 19 09:26:08 2013 +0200
    23.3 @@ -481,9 +481,8 @@
    23.4  
    23.5  ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
    23.6    _g1h(g1h),
    23.7 -  _markBitMap1(MinObjAlignment - 1),
    23.8 -  _markBitMap2(MinObjAlignment - 1),
    23.9 -
   23.10 +  _markBitMap1(log2_intptr(MinObjAlignment)),
   23.11 +  _markBitMap2(log2_intptr(MinObjAlignment)),
   23.12    _parallel_marking_threads(0),
   23.13    _max_parallel_marking_threads(0),
   23.14    _sleep_factor(0.0),
    24.1 --- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Wed Sep 18 12:52:15 2013 -0400
    24.2 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu Sep 19 09:26:08 2013 +0200
    24.3 @@ -33,8 +33,8 @@
    24.4  
    24.5  void G1CardCounts::clear_range(size_t from_card_num, size_t to_card_num) {
    24.6    if (has_count_table()) {
    24.7 -    check_card_num(from_card_num,
    24.8 -                   err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
    24.9 +    assert(from_card_num >= 0 && from_card_num < _committed_max_card_num,
   24.10 +           err_msg("from card num out of range: "SIZE_FORMAT, from_card_num));
   24.11      assert(from_card_num < to_card_num,
   24.12             err_msg("Wrong order? from: " SIZE_FORMAT ", to: "SIZE_FORMAT,
   24.13                     from_card_num, to_card_num));
    25.1 --- a/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Wed Sep 18 12:52:15 2013 -0400
    25.2 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.hpp	Thu Sep 19 09:26:08 2013 +0200
    25.3 @@ -72,25 +72,21 @@
    25.4      return has_reserved_count_table() && _committed_max_card_num > 0;
    25.5    }
    25.6  
    25.7 -  void check_card_num(size_t card_num, const char* msg) {
    25.8 -    assert(card_num >= 0 && card_num < _committed_max_card_num, msg);
    25.9 -  }
   25.10 -
   25.11    size_t ptr_2_card_num(const jbyte* card_ptr) {
   25.12      assert(card_ptr >= _ct_bot,
   25.13 -           err_msg("Inavalied card pointer: "
   25.14 +           err_msg("Invalid card pointer: "
   25.15                     "card_ptr: " PTR_FORMAT ", "
   25.16                     "_ct_bot: " PTR_FORMAT,
   25.17                     card_ptr, _ct_bot));
   25.18      size_t card_num = pointer_delta(card_ptr, _ct_bot, sizeof(jbyte));
   25.19 -    check_card_num(card_num,
   25.20 -                   err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
   25.21 +    assert(card_num >= 0 && card_num < _committed_max_card_num,
   25.22 +           err_msg("card pointer out of range: " PTR_FORMAT, card_ptr));
   25.23      return card_num;
   25.24    }
   25.25  
   25.26    jbyte* card_num_2_ptr(size_t card_num) {
   25.27 -    check_card_num(card_num,
   25.28 -                   err_msg("card num out of range: "SIZE_FORMAT, card_num));
   25.29 +    assert(card_num >= 0 && card_num < _committed_max_card_num,
   25.30 +           err_msg("card num out of range: "SIZE_FORMAT, card_num));
   25.31      return (jbyte*) (_ct_bot + card_num);
   25.32    }
   25.33  
    26.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Sep 18 12:52:15 2013 -0400
    26.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Sep 19 09:26:08 2013 +0200
    26.3 @@ -2191,6 +2191,10 @@
    26.4    return JNI_OK;
    26.5  }
    26.6  
    26.7 +size_t G1CollectedHeap::conservative_max_heap_alignment() {
    26.8 +  return HeapRegion::max_region_size();
    26.9 +}
   26.10 +
   26.11  void G1CollectedHeap::ref_processing_init() {
   26.12    // Reference processing in G1 currently works as follows:
   26.13    //
    27.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Sep 18 12:52:15 2013 -0400
    27.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Sep 19 09:26:08 2013 +0200
    27.3 @@ -1092,6 +1092,9 @@
    27.4    // specified by the policy object.
    27.5    jint initialize();
    27.6  
    27.7 +  // Return the (conservative) maximum heap alignment for any G1 heap
    27.8 +  static size_t conservative_max_heap_alignment();
    27.9 +
   27.10    // Initialize weak reference processing.
   27.11    virtual void ref_processing_init();
   27.12  
    28.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed Sep 18 12:52:15 2013 -0400
    28.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Sep 19 09:26:08 2013 +0200
    28.3 @@ -1,5 +1,5 @@
    28.4  /*
    28.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    28.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    28.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.8   *
    28.9   * This code is free software; you can redistribute it and/or modify it
   28.10 @@ -149,6 +149,10 @@
   28.11  // many regions in the heap (based on the min heap size).
   28.12  #define TARGET_REGION_NUMBER          2048
   28.13  
   28.14 +size_t HeapRegion::max_region_size() {
   28.15 +  return (size_t)MAX_REGION_SIZE;
   28.16 +}
   28.17 +
   28.18  void HeapRegion::setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size) {
   28.19    uintx region_size = G1HeapRegionSize;
   28.20    if (FLAG_IS_DEFAULT(G1HeapRegionSize)) {
    29.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed Sep 18 12:52:15 2013 -0400
    29.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Sep 19 09:26:08 2013 +0200
    29.3 @@ -1,5 +1,5 @@
    29.4  /*
    29.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    29.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    29.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.8   *
    29.9   * This code is free software; you can redistribute it and/or modify it
   29.10 @@ -355,6 +355,8 @@
   29.11                                        ~((1 << (size_t) LogOfHRGrainBytes) - 1);
   29.12    }
   29.13  
   29.14 +  static size_t max_region_size();
   29.15 +
   29.16    // It sets up the heap region size (GrainBytes / GrainWords), as
   29.17    // well as other related fields that are based on the heap region
   29.18    // size (LogOfHRGrainBytes / LogOfHRGrainWords /
    30.1 --- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Wed Sep 18 12:52:15 2013 -0400
    30.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Thu Sep 19 09:26:08 2013 +0200
    30.3 @@ -1,5 +1,5 @@
    30.4  /*
    30.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    30.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    30.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    30.8   *
    30.9   * This code is free software; you can redistribute it and/or modify it
   30.10 @@ -68,9 +68,6 @@
   30.11    size_t min_old_gen_size()   { return _min_gen1_size; }
   30.12    size_t old_gen_size()       { return _initial_gen1_size; }
   30.13    size_t max_old_gen_size()   { return _max_gen1_size; }
   30.14 -
   30.15 -  size_t metaspace_size()      { return MetaspaceSize; }
   30.16 -  size_t max_metaspace_size()  { return MaxMetaspaceSize; }
   30.17  };
   30.18  
   30.19  #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GENERATIONSIZER_HPP
    31.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Sep 18 12:52:15 2013 -0400
    31.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Thu Sep 19 09:26:08 2013 +0200
    31.3 @@ -86,6 +86,11 @@
    31.4      set_alignment(_old_gen_alignment, intra_heap_alignment());
    31.5    }
    31.6  
    31.7 +  // Return the (conservative) maximum heap alignment
    31.8 +  static size_t conservative_max_heap_alignment() {
    31.9 +    return intra_heap_alignment();
   31.10 +  }
   31.11 +
   31.12    // For use by VM operations
   31.13    enum CollectionType {
   31.14      Scavenge,
   31.15 @@ -122,7 +127,7 @@
   31.16  
   31.17    // The alignment used for eden and survivors within the young gen
   31.18    // and for boundary between young gen and old gen.
   31.19 -  size_t intra_heap_alignment() const { return 64 * K * HeapWordSize; }
   31.20 +  static size_t intra_heap_alignment() { return 64 * K * HeapWordSize; }
   31.21  
   31.22    size_t capacity() const;
   31.23    size_t used() const;
    32.1 --- a/src/share/vm/gc_interface/collectedHeap.cpp	Wed Sep 18 12:52:15 2013 -0400
    32.2 +++ b/src/share/vm/gc_interface/collectedHeap.cpp	Thu Sep 19 09:26:08 2013 +0200
    32.3 @@ -87,15 +87,15 @@
    32.4    const MetaspaceSizes meta_space(
    32.5        MetaspaceAux::allocated_capacity_bytes(),
    32.6        MetaspaceAux::allocated_used_bytes(),
    32.7 -      MetaspaceAux::reserved_in_bytes());
    32.8 +      MetaspaceAux::reserved_bytes());
    32.9    const MetaspaceSizes data_space(
   32.10        MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
   32.11        MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
   32.12 -      MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType));
   32.13 +      MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
   32.14    const MetaspaceSizes class_space(
   32.15        MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
   32.16        MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
   32.17 -      MetaspaceAux::reserved_in_bytes(Metaspace::ClassType));
   32.18 +      MetaspaceAux::reserved_bytes(Metaspace::ClassType));
   32.19  
   32.20    return MetaspaceSummary(meta_space, data_space, class_space);
   32.21  }
    33.1 --- a/src/share/vm/memory/collectorPolicy.cpp	Wed Sep 18 12:52:15 2013 -0400
    33.2 +++ b/src/share/vm/memory/collectorPolicy.cpp	Thu Sep 19 09:26:08 2013 +0200
    33.3 @@ -47,6 +47,11 @@
    33.4  
    33.5  // CollectorPolicy methods.
    33.6  
    33.7 +// Align down. If the aligning result in 0, return 'alignment'.
    33.8 +static size_t restricted_align_down(size_t size, size_t alignment) {
    33.9 +  return MAX2(alignment, align_size_down_(size, alignment));
   33.10 +}
   33.11 +
   33.12  void CollectorPolicy::initialize_flags() {
   33.13    assert(max_alignment() >= min_alignment(),
   33.14        err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
   33.15 @@ -59,18 +64,24 @@
   33.16      vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   33.17    }
   33.18  
   33.19 -  if (MetaspaceSize > MaxMetaspaceSize) {
   33.20 -    MaxMetaspaceSize = MetaspaceSize;
   33.21 -  }
   33.22 -  MetaspaceSize = MAX2(min_alignment(), align_size_down_(MetaspaceSize, min_alignment()));
   33.23 -  // Don't increase Metaspace size limit above specified.
   33.24 -  MaxMetaspaceSize = align_size_down(MaxMetaspaceSize, max_alignment());
   33.25 -  if (MetaspaceSize > MaxMetaspaceSize) {
   33.26 -    MetaspaceSize = MaxMetaspaceSize;
   33.27 +  if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
   33.28 +    FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
   33.29 +        restricted_align_down(MaxMetaspaceSize, max_alignment()));
   33.30    }
   33.31  
   33.32 -  MinMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MinMetaspaceExpansion, min_alignment()));
   33.33 -  MaxMetaspaceExpansion = MAX2(min_alignment(), align_size_down_(MaxMetaspaceExpansion, min_alignment()));
   33.34 +  if (MetaspaceSize > MaxMetaspaceSize) {
   33.35 +    FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
   33.36 +  }
   33.37 +
   33.38 +  if (!is_size_aligned(MetaspaceSize, min_alignment())) {
   33.39 +    FLAG_SET_ERGO(uintx, MetaspaceSize,
   33.40 +        restricted_align_down(MetaspaceSize, min_alignment()));
   33.41 +  }
   33.42 +
   33.43 +  assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
   33.44 +
   33.45 +  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
   33.46 +  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
   33.47  
   33.48    MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
   33.49  
   33.50 @@ -145,6 +156,30 @@
   33.51    _all_soft_refs_clear = true;
   33.52  }
   33.53  
   33.54 +size_t CollectorPolicy::compute_max_alignment() {
   33.55 +  // The card marking array and the offset arrays for old generations are
   33.56 +  // committed in os pages as well. Make sure they are entirely full (to
   33.57 +  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
   33.58 +  // byte entry and the os page size is 4096, the maximum heap size should
   33.59 +  // be 512*4096 = 2MB aligned.
   33.60 +
   33.61 +  // There is only the GenRemSet in Hotspot and only the GenRemSet::CardTable
   33.62 +  // is supported.
   33.63 +  // Requirements of any new remembered set implementations must be added here.
   33.64 +  size_t alignment = GenRemSet::max_alignment_constraint(GenRemSet::CardTable);
   33.65 +
   33.66 +  // Parallel GC does its own alignment of the generations to avoid requiring a
   33.67 +  // large page (256M on some platforms) for the permanent generation.  The
   33.68 +  // other collectors should also be updated to do their own alignment and then
   33.69 +  // this use of lcm() should be removed.
   33.70 +  if (UseLargePages && !UseParallelGC) {
   33.71 +      // in presence of large pages we have to make sure that our
   33.72 +      // alignment is large page aware
   33.73 +      alignment = lcm(os::large_page_size(), alignment);
   33.74 +  }
   33.75 +
   33.76 +  return alignment;
   33.77 +}
   33.78  
   33.79  // GenCollectorPolicy methods.
   33.80  
   33.81 @@ -175,29 +210,6 @@
   33.82                                          GCTimeRatio);
   33.83  }
   33.84  
   33.85 -size_t GenCollectorPolicy::compute_max_alignment() {
   33.86 -  // The card marking array and the offset arrays for old generations are
   33.87 -  // committed in os pages as well. Make sure they are entirely full (to
   33.88 -  // avoid partial page problems), e.g. if 512 bytes heap corresponds to 1
   33.89 -  // byte entry and the os page size is 4096, the maximum heap size should
   33.90 -  // be 512*4096 = 2MB aligned.
   33.91 -  size_t alignment = GenRemSet::max_alignment_constraint(rem_set_name());
   33.92 -
   33.93 -  // Parallel GC does its own alignment of the generations to avoid requiring a
   33.94 -  // large page (256M on some platforms) for the permanent generation.  The
   33.95 -  // other collectors should also be updated to do their own alignment and then
   33.96 -  // this use of lcm() should be removed.
   33.97 -  if (UseLargePages && !UseParallelGC) {
   33.98 -      // in presence of large pages we have to make sure that our
   33.99 -      // alignment is large page aware
  33.100 -      alignment = lcm(os::large_page_size(), alignment);
  33.101 -  }
  33.102 -
  33.103 -  assert(alignment >= min_alignment(), "Must be");
  33.104 -
  33.105 -  return alignment;
  33.106 -}
  33.107 -
  33.108  void GenCollectorPolicy::initialize_flags() {
  33.109    // All sizes must be multiples of the generation granularity.
  33.110    set_min_alignment((uintx) Generation::GenGrain);
    34.1 --- a/src/share/vm/memory/collectorPolicy.hpp	Wed Sep 18 12:52:15 2013 -0400
    34.2 +++ b/src/share/vm/memory/collectorPolicy.hpp	Thu Sep 19 09:26:08 2013 +0200
    34.3 @@ -98,6 +98,9 @@
    34.4    {}
    34.5  
    34.6   public:
    34.7 +  // Return maximum heap alignment that may be imposed by the policy
    34.8 +  static size_t compute_max_alignment();
    34.9 +
   34.10    void set_min_alignment(size_t align)         { _min_alignment = align; }
   34.11    size_t min_alignment()                       { return _min_alignment; }
   34.12    void set_max_alignment(size_t align)         { _max_alignment = align; }
   34.13 @@ -234,9 +237,6 @@
   34.14    // Try to allocate space by expanding the heap.
   34.15    virtual HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
   34.16  
   34.17 -  // compute max heap alignment
   34.18 -  size_t compute_max_alignment();
   34.19 -
   34.20   // Scale the base_size by NewRation according to
   34.21   //     result = base_size / (NewRatio + 1)
   34.22   // and align by min_alignment()
    35.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Wed Sep 18 12:52:15 2013 -0400
    35.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Thu Sep 19 09:26:08 2013 +0200
    35.3 @@ -1,5 +1,5 @@
    35.4  /*
    35.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
    35.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    35.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    35.8   *
    35.9   * This code is free software; you can redistribute it and/or modify it
   35.10 @@ -148,6 +148,11 @@
   35.11      return gen_policy()->size_policy();
   35.12    }
   35.13  
   35.14 +  // Return the (conservative) maximum heap alignment
   35.15 +  static size_t conservative_max_heap_alignment() {
   35.16 +    return Generation::GenGrain;
   35.17 +  }
   35.18 +
   35.19    size_t capacity() const;
   35.20    size_t used() const;
   35.21  
    36.1 --- a/src/share/vm/memory/metablock.cpp	Wed Sep 18 12:52:15 2013 -0400
    36.2 +++ b/src/share/vm/memory/metablock.cpp	Thu Sep 19 09:26:08 2013 +0200
    36.3 @@ -50,13 +50,6 @@
    36.4  // Chunks, change Chunks so that they can be allocated out of a VirtualSpace.
    36.5  size_t Metablock::_min_block_byte_size = sizeof(Metablock);
    36.6  
    36.7 -#ifdef ASSERT
    36.8 -size_t Metablock::_overhead =
    36.9 -  Chunk::aligned_overhead_size(sizeof(Metablock)) / BytesPerWord;
   36.10 -#else
   36.11 -size_t Metablock::_overhead = 0;
   36.12 -#endif
   36.13 -
   36.14  // New blocks returned by the Metaspace are zero initialized.
   36.15  // We should fix the constructors to not assume this instead.
   36.16  Metablock* Metablock::initialize(MetaWord* p, size_t word_size) {
    37.1 --- a/src/share/vm/memory/metablock.hpp	Wed Sep 18 12:52:15 2013 -0400
    37.2 +++ b/src/share/vm/memory/metablock.hpp	Thu Sep 19 09:26:08 2013 +0200
    37.3 @@ -48,7 +48,6 @@
    37.4      } _header;
    37.5    } _block;
    37.6    static size_t _min_block_byte_size;
    37.7 -  static size_t _overhead;
    37.8  
    37.9    typedef union block_t Block;
   37.10    typedef struct header_t Header;
   37.11 @@ -73,7 +72,6 @@
   37.12    void set_prev(Metablock* v) { _block._header._prev = v; }
   37.13  
   37.14    static size_t min_block_byte_size() { return _min_block_byte_size; }
   37.15 -  static size_t overhead() { return _overhead; }
   37.16  
   37.17    bool is_free()                 { return header()->_word_size != 0; }
   37.18    void clear_next()              { set_next(NULL); }
    38.1 --- a/src/share/vm/memory/metaspace.cpp	Wed Sep 18 12:52:15 2013 -0400
    38.2 +++ b/src/share/vm/memory/metaspace.cpp	Thu Sep 19 09:26:08 2013 +0200
    38.3 @@ -51,7 +51,7 @@
    38.4  // Parameters for stress mode testing
    38.5  const uint metadata_deallocate_a_lot_block = 10;
    38.6  const uint metadata_deallocate_a_lock_chunk = 3;
    38.7 -size_t const allocation_from_dictionary_limit = 64 * K;
    38.8 +size_t const allocation_from_dictionary_limit = 4 * K;
    38.9  
   38.10  MetaWord* last_allocated = 0;
   38.11  
   38.12 @@ -177,8 +177,8 @@
   38.13    void return_chunks(ChunkIndex index, Metachunk* chunks);
   38.14  
   38.15    // Total of the space in the free chunks list
   38.16 -  size_t free_chunks_total();
   38.17 -  size_t free_chunks_total_in_bytes();
   38.18 +  size_t free_chunks_total_words();
   38.19 +  size_t free_chunks_total_bytes();
   38.20  
   38.21    // Number of chunks in the free chunks list
   38.22    size_t free_chunks_count();
   38.23 @@ -228,6 +228,10 @@
   38.24    BlockTreeDictionary* _dictionary;
   38.25    static Metablock* initialize_free_chunk(MetaWord* p, size_t word_size);
   38.26  
   38.27 +  // Only allocate and split from freelist if the size of the allocation
   38.28 +  // is at least 1/4th the size of the available block.
   38.29 +  const static int WasteMultiplier = 4;
   38.30 +
   38.31    // Accessors
   38.32    BlockTreeDictionary* dictionary() const { return _dictionary; }
   38.33  
   38.34 @@ -287,6 +291,10 @@
   38.35    MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
   38.36    MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   38.37  
   38.38 +  size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
   38.39 +  size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
   38.40 +  size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
   38.41 +
   38.42    // address of next available space in _virtual_space;
   38.43    // Accessors
   38.44    VirtualSpaceNode* next() { return _next; }
   38.45 @@ -323,12 +331,10 @@
   38.46  
   38.47    // Allocate a chunk from the virtual space and return it.
   38.48    Metachunk* get_chunk_vs(size_t chunk_word_size);
   38.49 -  Metachunk* get_chunk_vs_with_expand(size_t chunk_word_size);
   38.50  
   38.51    // Expands/shrinks the committed space in a virtual space.  Delegates
   38.52    // to Virtualspace
   38.53    bool expand_by(size_t words, bool pre_touch = false);
   38.54 -  bool shrink_by(size_t words);
   38.55  
   38.56    // In preparation for deleting this node, remove all the chunks
   38.57    // in the node from any freelist.
   38.58 @@ -336,8 +342,6 @@
   38.59  
   38.60  #ifdef ASSERT
   38.61    // Debug support
   38.62 -  static void verify_virtual_space_total();
   38.63 -  static void verify_virtual_space_count();
   38.64    void mangle();
   38.65  #endif
   38.66  
   38.67 @@ -423,10 +427,13 @@
   38.68    // Can this virtual list allocate >1 spaces?  Also, used to determine
   38.69    // whether to allocate unlimited small chunks in this virtual space
   38.70    bool _is_class;
   38.71 -  bool can_grow() const { return !is_class() || !UseCompressedKlassPointers; }
   38.72 -
   38.73 -  // Sum of space in all virtual spaces and number of virtual spaces
   38.74 -  size_t _virtual_space_total;
   38.75 +  bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
   38.76 +
   38.77 +  // Sum of reserved and committed memory in the virtual spaces
   38.78 +  size_t _reserved_words;
   38.79 +  size_t _committed_words;
   38.80 +
   38.81 +  // Number of virtual spaces
   38.82    size_t _virtual_space_count;
   38.83  
   38.84    ~VirtualSpaceList();
   38.85 @@ -440,7 +447,7 @@
   38.86      _current_virtual_space = v;
   38.87    }
   38.88  
   38.89 -  void link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size);
   38.90 +  void link_vs(VirtualSpaceNode* new_entry);
   38.91  
   38.92    // Get another virtual space and add it to the list.  This
   38.93    // is typically prompted by a failed attempt to allocate a chunk
   38.94 @@ -457,6 +464,8 @@
   38.95                             size_t grow_chunks_by_words,
   38.96                             size_t medium_chunk_bunch);
   38.97  
   38.98 +  bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
   38.99 +
  38.100    // Get the first chunk for a Metaspace.  Used for
  38.101    // special cases such as the boot class loader, reflection
  38.102    // class loader and anonymous class loader.
  38.103 @@ -472,10 +481,15 @@
  38.104    // Allocate the first virtualspace.
  38.105    void initialize(size_t word_size);
  38.106  
  38.107 -  size_t virtual_space_total() { return _virtual_space_total; }
  38.108 -
  38.109 -  void inc_virtual_space_total(size_t v);
  38.110 -  void dec_virtual_space_total(size_t v);
  38.111 +  size_t reserved_words()  { return _reserved_words; }
  38.112 +  size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
  38.113 +  size_t committed_words() { return _committed_words; }
  38.114 +  size_t committed_bytes() { return committed_words() * BytesPerWord; }
  38.115 +
  38.116 +  void inc_reserved_words(size_t v);
  38.117 +  void dec_reserved_words(size_t v);
  38.118 +  void inc_committed_words(size_t v);
  38.119 +  void dec_committed_words(size_t v);
  38.120    void inc_virtual_space_count();
  38.121    void dec_virtual_space_count();
  38.122  
  38.123 @@ -623,6 +637,7 @@
  38.124  
  38.125    // Add chunk to the list of chunks in use
  38.126    void add_chunk(Metachunk* v, bool make_current);
  38.127 +  void retire_current_chunk();
  38.128  
  38.129    Mutex* lock() const { return _lock; }
  38.130  
  38.131 @@ -722,9 +737,7 @@
  38.132      // MinChunkSize is a placeholder for the real minimum size JJJ
  38.133      size_t byte_size = word_size * BytesPerWord;
  38.134  
  38.135 -    size_t byte_size_with_overhead = byte_size + Metablock::overhead();
  38.136 -
  38.137 -    size_t raw_bytes_size = MAX2(byte_size_with_overhead,
  38.138 +    size_t raw_bytes_size = MAX2(byte_size,
  38.139                                   Metablock::min_block_byte_size());
  38.140      raw_bytes_size = ARENA_ALIGN(raw_bytes_size);
  38.141      size_t raw_word_size = raw_bytes_size / BytesPerWord;
  38.142 @@ -807,12 +820,25 @@
  38.143    }
  38.144  
  38.145    Metablock* free_block =
  38.146 -    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::exactly);
  38.147 +    dictionary()->get_chunk(word_size, FreeBlockDictionary<Metablock>::atLeast);
  38.148    if (free_block == NULL) {
  38.149      return NULL;
  38.150    }
  38.151  
  38.152 -  return (MetaWord*) free_block;
  38.153 +  const size_t block_size = free_block->size();
  38.154 +  if (block_size > WasteMultiplier * word_size) {
  38.155 +    return_block((MetaWord*)free_block, block_size);
  38.156 +    return NULL;
  38.157 +  }
  38.158 +
  38.159 +  MetaWord* new_block = (MetaWord*)free_block;
  38.160 +  assert(block_size >= word_size, "Incorrect size of block from freelist");
  38.161 +  const size_t unused = block_size - word_size;
  38.162 +  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
  38.163 +    return_block(new_block + word_size, unused);
  38.164 +  }
  38.165 +
  38.166 +  return new_block;
  38.167  }
  38.168  
  38.169  void BlockFreelist::print_on(outputStream* st) const {
  38.170 @@ -855,9 +881,9 @@
  38.171  
  38.172    if (!is_available(chunk_word_size)) {
  38.173      if (TraceMetadataChunkAllocation) {
  38.174 -      tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
  38.175 +      gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
  38.176        // Dump some information about the virtual space that is nearly full
  38.177 -      print_on(tty);
  38.178 +      print_on(gclog_or_tty);
  38.179      }
  38.180      return NULL;
  38.181    }
  38.182 @@ -878,20 +904,11 @@
  38.183    if (TraceMetavirtualspaceAllocation && !result) {
  38.184      gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
  38.185                             "for byte size " SIZE_FORMAT, bytes);
  38.186 -    virtual_space()->print();
  38.187 +    virtual_space()->print_on(gclog_or_tty);
  38.188    }
  38.189    return result;
  38.190  }
  38.191  
  38.192 -// Shrink the virtual space (commit more of the reserved space)
  38.193 -bool VirtualSpaceNode::shrink_by(size_t words) {
  38.194 -  size_t bytes = words * BytesPerWord;
  38.195 -  virtual_space()->shrink_by(bytes);
  38.196 -  return true;
  38.197 -}
  38.198 -
  38.199 -// Add another chunk to the chunk list.
  38.200 -
  38.201  Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) {
  38.202    assert_lock_strong(SpaceManager::expand_lock());
  38.203    Metachunk* result = take_from_committed(chunk_word_size);
  38.204 @@ -901,23 +918,6 @@
  38.205    return result;
  38.206  }
  38.207  
  38.208 -Metachunk* VirtualSpaceNode::get_chunk_vs_with_expand(size_t chunk_word_size) {
  38.209 -  assert_lock_strong(SpaceManager::expand_lock());
  38.210 -
  38.211 -  Metachunk* new_chunk = get_chunk_vs(chunk_word_size);
  38.212 -
  38.213 -  if (new_chunk == NULL) {
  38.214 -    // Only a small part of the virtualspace is committed when first
  38.215 -    // allocated so committing more here can be expected.
  38.216 -    size_t page_size_words = os::vm_page_size() / BytesPerWord;
  38.217 -    size_t aligned_expand_vs_by_words = align_size_up(chunk_word_size,
  38.218 -                                                    page_size_words);
  38.219 -    expand_by(aligned_expand_vs_by_words, false);
  38.220 -    new_chunk = get_chunk_vs(chunk_word_size);
  38.221 -  }
  38.222 -  return new_chunk;
  38.223 -}
  38.224 -
  38.225  bool VirtualSpaceNode::initialize() {
  38.226  
  38.227    if (!_rs.is_reserved()) {
  38.228 @@ -977,13 +977,22 @@
  38.229    }
  38.230  }
  38.231  
  38.232 -void VirtualSpaceList::inc_virtual_space_total(size_t v) {
  38.233 +void VirtualSpaceList::inc_reserved_words(size_t v) {
  38.234    assert_lock_strong(SpaceManager::expand_lock());
  38.235 -  _virtual_space_total = _virtual_space_total + v;
  38.236 +  _reserved_words = _reserved_words + v;
  38.237  }
  38.238 -void VirtualSpaceList::dec_virtual_space_total(size_t v) {
  38.239 +void VirtualSpaceList::dec_reserved_words(size_t v) {
  38.240    assert_lock_strong(SpaceManager::expand_lock());
  38.241 -  _virtual_space_total = _virtual_space_total - v;
  38.242 +  _reserved_words = _reserved_words - v;
  38.243 +}
  38.244 +
  38.245 +void VirtualSpaceList::inc_committed_words(size_t v) {
  38.246 +  assert_lock_strong(SpaceManager::expand_lock());
  38.247 +  _committed_words = _committed_words + v;
  38.248 +}
  38.249 +void VirtualSpaceList::dec_committed_words(size_t v) {
  38.250 +  assert_lock_strong(SpaceManager::expand_lock());
  38.251 +  _committed_words = _committed_words - v;
  38.252  }
  38.253  
  38.254  void VirtualSpaceList::inc_virtual_space_count() {
  38.255 @@ -1034,7 +1043,8 @@
  38.256        }
  38.257  
  38.258        vsl->purge(chunk_manager());
  38.259 -      dec_virtual_space_total(vsl->reserved()->word_size());
  38.260 +      dec_reserved_words(vsl->reserved_words());
  38.261 +      dec_committed_words(vsl->committed_words());
  38.262        dec_virtual_space_count();
  38.263        purged_vsl = vsl;
  38.264        delete vsl;
  38.265 @@ -1062,12 +1072,12 @@
  38.266      // Sum used region [bottom, top) in each virtualspace
  38.267      allocated_by_vs += vsl->used_words_in_vs();
  38.268    }
  38.269 -  assert(allocated_by_vs >= chunk_manager()->free_chunks_total(),
  38.270 +  assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
  38.271      err_msg("Total in free chunks " SIZE_FORMAT
  38.272              " greater than total from virtual_spaces " SIZE_FORMAT,
  38.273 -            allocated_by_vs, chunk_manager()->free_chunks_total()));
  38.274 +            allocated_by_vs, chunk_manager()->free_chunks_total_words()));
  38.275    size_t used =
  38.276 -    allocated_by_vs - chunk_manager()->free_chunks_total();
  38.277 +    allocated_by_vs - chunk_manager()->free_chunks_total_words();
  38.278    return used;
  38.279  }
  38.280  
  38.281 @@ -1088,7 +1098,8 @@
  38.282                                     _is_class(false),
  38.283                                     _virtual_space_list(NULL),
  38.284                                     _current_virtual_space(NULL),
  38.285 -                                   _virtual_space_total(0),
  38.286 +                                   _reserved_words(0),
  38.287 +                                   _committed_words(0),
  38.288                                     _virtual_space_count(0) {
  38.289    MutexLockerEx cl(SpaceManager::expand_lock(),
  38.290                     Mutex::_no_safepoint_check_flag);
  38.291 @@ -1105,7 +1116,8 @@
  38.292                                     _is_class(true),
  38.293                                     _virtual_space_list(NULL),
  38.294                                     _current_virtual_space(NULL),
  38.295 -                                   _virtual_space_total(0),
  38.296 +                                   _reserved_words(0),
  38.297 +                                   _committed_words(0),
  38.298                                     _virtual_space_count(0) {
  38.299    MutexLockerEx cl(SpaceManager::expand_lock(),
  38.300                     Mutex::_no_safepoint_check_flag);
  38.301 @@ -1115,7 +1127,7 @@
  38.302    _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  38.303    _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
  38.304    assert(succeeded, " VirtualSpaceList initialization should not fail");
  38.305 -  link_vs(class_entry, rs.size()/BytesPerWord);
  38.306 +  link_vs(class_entry);
  38.307  }
  38.308  
  38.309  size_t VirtualSpaceList::free_bytes() {
  38.310 @@ -1138,31 +1150,47 @@
  38.311      delete new_entry;
  38.312      return false;
  38.313    } else {
  38.314 +    assert(new_entry->reserved_words() == vs_word_size, "Must be");
  38.315      // ensure lock-free iteration sees fully initialized node
  38.316      OrderAccess::storestore();
  38.317 -    link_vs(new_entry, vs_word_size);
  38.318 +    link_vs(new_entry);
  38.319      return true;
  38.320    }
  38.321  }
  38.322  
  38.323 -void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry, size_t vs_word_size) {
  38.324 +void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) {
  38.325    if (virtual_space_list() == NULL) {
  38.326        set_virtual_space_list(new_entry);
  38.327    } else {
  38.328      current_virtual_space()->set_next(new_entry);
  38.329    }
  38.330    set_current_virtual_space(new_entry);
  38.331 -  inc_virtual_space_total(vs_word_size);
  38.332 +  inc_reserved_words(new_entry->reserved_words());
  38.333 +  inc_committed_words(new_entry->committed_words());
  38.334    inc_virtual_space_count();
  38.335  #ifdef ASSERT
  38.336    new_entry->mangle();
  38.337  #endif
  38.338    if (TraceMetavirtualspaceAllocation && Verbose) {
  38.339      VirtualSpaceNode* vsl = current_virtual_space();
  38.340 -    vsl->print_on(tty);
  38.341 +    vsl->print_on(gclog_or_tty);
  38.342    }
  38.343  }
  38.344  
  38.345 +bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
  38.346 +  size_t before = node->committed_words();
  38.347 +
  38.348 +  bool result = node->expand_by(word_size, pre_touch);
  38.349 +
  38.350 +  size_t after = node->committed_words();
  38.351 +
  38.352 +  // after and before can be the same if the memory was pre-committed.
  38.353 +  assert(after >= before, "Must be");
  38.354 +  inc_committed_words(after - before);
  38.355 +
  38.356 +  return result;
  38.357 +}
  38.358 +
  38.359  Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
  38.360                                             size_t grow_chunks_by_words,
  38.361                                             size_t medium_chunk_bunch) {
  38.362 @@ -1186,7 +1214,7 @@
  38.363      size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
  38.364                                                          page_size_words);
  38.365      bool vs_expanded =
  38.366 -      current_virtual_space()->expand_by(aligned_expand_vs_by_words, false);
  38.367 +      expand_by(current_virtual_space(), aligned_expand_vs_by_words);
  38.368      if (!vs_expanded) {
  38.369        // Should the capacity of the metaspaces be expanded for
  38.370        // this allocation?  If it's the virtual space for classes and is
  38.371 @@ -1197,7 +1225,14 @@
  38.372              MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
  38.373          if (grow_vs(grow_vs_words)) {
  38.374            // Got it.  It's on the list now.  Get a chunk from it.
  38.375 -          next = current_virtual_space()->get_chunk_vs_with_expand(grow_chunks_by_words);
  38.376 +          assert(current_virtual_space()->expanded_words() == 0,
  38.377 +              "New virtuals space nodes should not have expanded");
  38.378 +
  38.379 +          size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
  38.380 +                                                              page_size_words);
  38.381 +          // We probably want to expand by aligned_expand_vs_by_words here.
  38.382 +          expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
  38.383 +          next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  38.384          }
  38.385        } else {
  38.386          // Allocation will fail and induce a GC
  38.387 @@ -1307,7 +1342,7 @@
  38.388    // reserved space, because this is a larger space prereserved for compressed
  38.389    // class pointers.
  38.390    if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
  38.391 -    size_t real_allocated = Metaspace::space_list()->virtual_space_total() +
  38.392 +    size_t real_allocated = Metaspace::space_list()->reserved_words() +
  38.393                MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  38.394      if (real_allocated >= MaxMetaspaceSize) {
  38.395        return false;
  38.396 @@ -1508,7 +1543,7 @@
  38.397                                 sm->sum_count_in_chunks_in_use());
  38.398          dummy_chunk->print_on(gclog_or_tty);
  38.399          gclog_or_tty->print_cr("  Free chunks total %d  count %d",
  38.400 -                               vsl->chunk_manager()->free_chunks_total(),
  38.401 +                               vsl->chunk_manager()->free_chunks_total_words(),
  38.402                                 vsl->chunk_manager()->free_chunks_count());
  38.403        }
  38.404      }
  38.405 @@ -1565,12 +1600,12 @@
  38.406  
  38.407  // ChunkManager methods
  38.408  
  38.409 -size_t ChunkManager::free_chunks_total() {
  38.410 +size_t ChunkManager::free_chunks_total_words() {
  38.411    return _free_chunks_total;
  38.412  }
  38.413  
  38.414 -size_t ChunkManager::free_chunks_total_in_bytes() {
  38.415 -  return free_chunks_total() * BytesPerWord;
  38.416 +size_t ChunkManager::free_chunks_total_bytes() {
  38.417 +  return free_chunks_total_words() * BytesPerWord;
  38.418  }
  38.419  
  38.420  size_t ChunkManager::free_chunks_count() {
  38.421 @@ -1698,9 +1733,9 @@
  38.422    assert_lock_strong(SpaceManager::expand_lock());
  38.423    slow_locked_verify();
  38.424    if (TraceMetadataChunkAllocation) {
  38.425 -    tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
  38.426 -                  PTR_FORMAT "  size " SIZE_FORMAT,
  38.427 -                  chunk, chunk->word_size());
  38.428 +    gclog_or_tty->print_cr("ChunkManager::chunk_freelist_deallocate: chunk "
  38.429 +                           PTR_FORMAT "  size " SIZE_FORMAT,
  38.430 +                           chunk, chunk->word_size());
  38.431    }
  38.432    free_chunks_put(chunk);
  38.433  }
  38.434 @@ -1729,9 +1764,9 @@
  38.435      dec_free_chunks_total(chunk->capacity_word_size());
  38.436  
  38.437      if (TraceMetadataChunkAllocation && Verbose) {
  38.438 -      tty->print_cr("ChunkManager::free_chunks_get: free_list "
  38.439 -                    PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  38.440 -                    free_list, chunk, chunk->word_size());
  38.441 +      gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
  38.442 +                             PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  38.443 +                             free_list, chunk, chunk->word_size());
  38.444      }
  38.445    } else {
  38.446      chunk = humongous_dictionary()->get_chunk(
  38.447 @@ -1741,10 +1776,10 @@
  38.448      if (chunk != NULL) {
  38.449        if (TraceMetadataHumongousAllocation) {
  38.450          size_t waste = chunk->word_size() - word_size;
  38.451 -        tty->print_cr("Free list allocate humongous chunk size " SIZE_FORMAT
  38.452 -                      " for requested size " SIZE_FORMAT
  38.453 -                      " waste " SIZE_FORMAT,
  38.454 -                      chunk->word_size(), word_size, waste);
  38.455 +        gclog_or_tty->print_cr("Free list allocate humongous chunk size "
  38.456 +                               SIZE_FORMAT " for requested size " SIZE_FORMAT
  38.457 +                               " waste " SIZE_FORMAT,
  38.458 +                               chunk->word_size(), word_size, waste);
  38.459        }
  38.460        // Chunk is being removed from the chunks free list.
  38.461        dec_free_chunks_total(chunk->capacity_word_size());
  38.462 @@ -1786,10 +1821,10 @@
  38.463      } else {
  38.464        list_count = humongous_dictionary()->total_count();
  38.465      }
  38.466 -    tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
  38.467 -               PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
  38.468 -               this, chunk, chunk->word_size(), list_count);
  38.469 -    locked_print_free_chunks(tty);
  38.470 +    gclog_or_tty->print("ChunkManager::chunk_freelist_allocate: " PTR_FORMAT " chunk "
  38.471 +                        PTR_FORMAT "  size " SIZE_FORMAT " count " SIZE_FORMAT " ",
  38.472 +                        this, chunk, chunk->word_size(), list_count);
  38.473 +    locked_print_free_chunks(gclog_or_tty);
  38.474    }
  38.475  
  38.476    return chunk;
  38.477 @@ -2278,6 +2313,7 @@
  38.478    ChunkIndex index = ChunkManager::list_index(new_chunk->word_size());
  38.479  
  38.480    if (index != HumongousIndex) {
  38.481 +    retire_current_chunk();
  38.482      set_current_chunk(new_chunk);
  38.483      new_chunk->set_next(chunks_in_use(index));
  38.484      set_chunks_in_use(index, new_chunk);
  38.485 @@ -2308,11 +2344,21 @@
  38.486                          sum_count_in_chunks_in_use());
  38.487      new_chunk->print_on(gclog_or_tty);
  38.488      if (vs_list() != NULL) {
  38.489 -      vs_list()->chunk_manager()->locked_print_free_chunks(tty);
  38.490 +      vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  38.491      }
  38.492    }
  38.493  }
  38.494  
  38.495 +void SpaceManager::retire_current_chunk() {
  38.496 +  if (current_chunk() != NULL) {
  38.497 +    size_t remaining_words = current_chunk()->free_word_size();
  38.498 +    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
  38.499 +      block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
  38.500 +      inc_used_metrics(remaining_words);
  38.501 +    }
  38.502 +  }
  38.503 +}
  38.504 +
  38.505  Metachunk* SpaceManager::get_new_chunk(size_t word_size,
  38.506                                         size_t grow_chunks_by_words) {
  38.507  
  38.508 @@ -2320,10 +2366,10 @@
  38.509                                               grow_chunks_by_words,
  38.510                                               medium_chunk_bunch());
  38.511  
  38.512 -  if (TraceMetadataHumongousAllocation &&
  38.513 +  if (TraceMetadataHumongousAllocation && next != NULL &&
  38.514        SpaceManager::is_humongous(next->word_size())) {
  38.515 -    gclog_or_tty->print_cr("  new humongous chunk word size " PTR_FORMAT,
  38.516 -                           next->word_size());
  38.517 +    gclog_or_tty->print_cr("  new humongous chunk word size "
  38.518 +                           PTR_FORMAT, next->word_size());
  38.519    }
  38.520  
  38.521    return next;
  38.522 @@ -2441,9 +2487,6 @@
  38.523           curr = curr->next()) {
  38.524        out->print("%d) ", i++);
  38.525        curr->print_on(out);
  38.526 -      if (TraceMetadataChunkAllocation && Verbose) {
  38.527 -        block_freelists()->print_on(out);
  38.528 -      }
  38.529        curr_total += curr->word_size();
  38.530        used += curr->used_word_size();
  38.531        capacity += curr->capacity_word_size();
  38.532 @@ -2451,6 +2494,10 @@
  38.533      }
  38.534    }
  38.535  
  38.536 +  if (TraceMetadataChunkAllocation && Verbose) {
  38.537 +    block_freelists()->print_on(out);
  38.538 +  }
  38.539 +
  38.540    size_t free = current_chunk() == NULL ? 0 : current_chunk()->free_word_size();
  38.541    // Free space isn't wasted.
  38.542    waste -= free;
  38.543 @@ -2538,13 +2585,13 @@
  38.544    return used * BytesPerWord;
  38.545  }
  38.546  
  38.547 -size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
  38.548 +size_t MetaspaceAux::free_bytes_slow(Metaspace::MetadataType mdtype) {
  38.549    size_t free = 0;
  38.550    ClassLoaderDataGraphMetaspaceIterator iter;
  38.551    while (iter.repeat()) {
  38.552      Metaspace* msp = iter.get_next();
  38.553      if (msp != NULL) {
  38.554 -      free += msp->free_words(mdtype);
  38.555 +      free += msp->free_words_slow(mdtype);
  38.556      }
  38.557    }
  38.558    return free * BytesPerWord;
  38.559 @@ -2567,34 +2614,56 @@
  38.560    return capacity * BytesPerWord;
  38.561  }
  38.562  
  38.563 -size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
  38.564 +size_t MetaspaceAux::capacity_bytes_slow() {
  38.565 +#ifdef PRODUCT
  38.566 +  // Use allocated_capacity_bytes() in PRODUCT instead of this function.
  38.567 +  guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
  38.568 +#endif
  38.569 +  size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
  38.570 +  size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
  38.571 +  assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
  38.572 +      err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
  38.573 +        " class_capacity + non_class_capacity " SIZE_FORMAT
  38.574 +        " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
  38.575 +        allocated_capacity_bytes(), class_capacity + non_class_capacity,
  38.576 +        class_capacity, non_class_capacity));
  38.577 +
  38.578 +  return class_capacity + non_class_capacity;
  38.579 +}
  38.580 +
  38.581 +size_t MetaspaceAux::reserved_bytes(Metaspace::MetadataType mdtype) {
  38.582    VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  38.583 -  return list == NULL ? 0 : list->virtual_space_total();
  38.584 +  return list == NULL ? 0 : list->reserved_bytes();
  38.585  }
  38.586  
  38.587 -size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
  38.588 -
  38.589 -size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
  38.590 +size_t MetaspaceAux::committed_bytes(Metaspace::MetadataType mdtype) {
  38.591 +  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  38.592 +  return list == NULL ? 0 : list->committed_bytes();
  38.593 +}
  38.594 +
  38.595 +size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
  38.596 +
  38.597 +size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
  38.598    VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  38.599    if (list == NULL) {
  38.600      return 0;
  38.601    }
  38.602    ChunkManager* chunk = list->chunk_manager();
  38.603    chunk->slow_verify();
  38.604 -  return chunk->free_chunks_total();
  38.605 +  return chunk->free_chunks_total_words();
  38.606  }
  38.607  
  38.608 -size_t MetaspaceAux::free_chunks_total_in_bytes(Metaspace::MetadataType mdtype) {
  38.609 -  return free_chunks_total(mdtype) * BytesPerWord;
  38.610 +size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
  38.611 +  return free_chunks_total_words(mdtype) * BytesPerWord;
  38.612  }
  38.613  
  38.614 -size_t MetaspaceAux::free_chunks_total() {
  38.615 -  return free_chunks_total(Metaspace::ClassType) +
  38.616 -         free_chunks_total(Metaspace::NonClassType);
  38.617 +size_t MetaspaceAux::free_chunks_total_words() {
  38.618 +  return free_chunks_total_words(Metaspace::ClassType) +
  38.619 +         free_chunks_total_words(Metaspace::NonClassType);
  38.620  }
  38.621  
  38.622 -size_t MetaspaceAux::free_chunks_total_in_bytes() {
  38.623 -  return free_chunks_total() * BytesPerWord;
  38.624 +size_t MetaspaceAux::free_chunks_total_bytes() {
  38.625 +  return free_chunks_total_words() * BytesPerWord;
  38.626  }
  38.627  
  38.628  void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
  38.629 @@ -2605,14 +2674,14 @@
  38.630                          "("  SIZE_FORMAT ")",
  38.631                          prev_metadata_used,
  38.632                          allocated_used_bytes(),
  38.633 -                        reserved_in_bytes());
  38.634 +                        reserved_bytes());
  38.635    } else {
  38.636      gclog_or_tty->print(" "  SIZE_FORMAT "K"
  38.637                          "->" SIZE_FORMAT "K"
  38.638                          "("  SIZE_FORMAT "K)",
  38.639 -                        prev_metadata_used / K,
  38.640 -                        allocated_used_bytes() / K,
  38.641 -                        reserved_in_bytes()/ K);
  38.642 +                        prev_metadata_used/K,
  38.643 +                        allocated_used_bytes()/K,
  38.644 +                        reserved_bytes()/K);
  38.645    }
  38.646  
  38.647    gclog_or_tty->print("]");
  38.648 @@ -2625,14 +2694,14 @@
  38.649    out->print_cr(" Metaspace total "
  38.650                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  38.651                  " reserved " SIZE_FORMAT "K",
  38.652 -                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_in_bytes()/K);
  38.653 +                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
  38.654  
  38.655    out->print_cr("  data space     "
  38.656                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  38.657                  " reserved " SIZE_FORMAT "K",
  38.658                  allocated_capacity_bytes(nct)/K,
  38.659                  allocated_used_bytes(nct)/K,
  38.660 -                reserved_in_bytes(nct)/K);
  38.661 +                reserved_bytes(nct)/K);
  38.662    if (Metaspace::using_class_space()) {
  38.663      Metaspace::MetadataType ct = Metaspace::ClassType;
  38.664      out->print_cr("  class space    "
  38.665 @@ -2640,17 +2709,17 @@
  38.666                    " reserved " SIZE_FORMAT "K",
  38.667                    allocated_capacity_bytes(ct)/K,
  38.668                    allocated_used_bytes(ct)/K,
  38.669 -                  reserved_in_bytes(ct)/K);
  38.670 +                  reserved_bytes(ct)/K);
  38.671    }
  38.672  }
  38.673  
  38.674  // Print information for class space and data space separately.
  38.675  // This is almost the same as above.
  38.676  void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
  38.677 -  size_t free_chunks_capacity_bytes = free_chunks_total_in_bytes(mdtype);
  38.678 +  size_t free_chunks_capacity_bytes = free_chunks_total_bytes(mdtype);
  38.679    size_t capacity_bytes = capacity_bytes_slow(mdtype);
  38.680    size_t used_bytes = used_bytes_slow(mdtype);
  38.681 -  size_t free_bytes = free_in_bytes(mdtype);
  38.682 +  size_t free_bytes = free_bytes_slow(mdtype);
  38.683    size_t used_and_free = used_bytes + free_bytes +
  38.684                             free_chunks_capacity_bytes;
  38.685    out->print_cr("  Chunk accounting: used in chunks " SIZE_FORMAT
  38.686 @@ -2836,7 +2905,7 @@
  38.687  // to work with compressed klass pointers.
  38.688  bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
  38.689    assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
  38.690 -  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
  38.691 +  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
  38.692    address lower_base = MIN2((address)metaspace_base, cds_base);
  38.693    address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
  38.694                                  (address)(metaspace_base + class_metaspace_size()));
  38.695 @@ -2846,7 +2915,7 @@
  38.696  // Try to allocate the metaspace at the requested addr.
  38.697  void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
  38.698    assert(using_class_space(), "called improperly");
  38.699 -  assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
  38.700 +  assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
  38.701    assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
  38.702           "Metaspace size is too big");
  38.703  
  38.704 @@ -2869,9 +2938,9 @@
  38.705  
  38.706      // If no successful allocation then try to allocate the space anywhere.  If
  38.707      // that fails then OOM doom.  At this point we cannot try allocating the
  38.708 -    // metaspace as if UseCompressedKlassPointers is off because too much
  38.709 -    // initialization has happened that depends on UseCompressedKlassPointers.
  38.710 -    // So, UseCompressedKlassPointers cannot be turned off at this point.
  38.711 +    // metaspace as if UseCompressedClassPointers is off because too much
  38.712 +    // initialization has happened that depends on UseCompressedClassPointers.
  38.713 +    // So, UseCompressedClassPointers cannot be turned off at this point.
  38.714      if (!metaspace_rs.is_reserved()) {
  38.715        metaspace_rs = ReservedSpace(class_metaspace_size(),
  38.716                                     os::vm_allocation_granularity(), false);
  38.717 @@ -2904,12 +2973,12 @@
  38.718    }
  38.719  }
  38.720  
  38.721 -// For UseCompressedKlassPointers the class space is reserved above the top of
  38.722 +// For UseCompressedClassPointers the class space is reserved above the top of
  38.723  // the Java heap.  The argument passed in is at the base of the compressed space.
  38.724  void Metaspace::initialize_class_space(ReservedSpace rs) {
  38.725    // The reserved space size may be bigger because of alignment, esp with UseLargePages
  38.726 -  assert(rs.size() >= ClassMetaspaceSize,
  38.727 -         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
  38.728 +  assert(rs.size() >= CompressedClassSpaceSize,
  38.729 +         err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
  38.730    assert(using_class_space(), "Must be using class space");
  38.731    _class_space_list = new VirtualSpaceList(rs);
  38.732  }
  38.733 @@ -2921,7 +2990,7 @@
  38.734    int max_alignment = os::vm_page_size();
  38.735    size_t cds_total = 0;
  38.736  
  38.737 -  set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
  38.738 +  set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
  38.739                                           os::vm_allocation_granularity()));
  38.740  
  38.741    MetaspaceShared::set_max_alignment(max_alignment);
  38.742 @@ -2941,8 +3010,8 @@
  38.743  #ifdef _LP64
  38.744      // Set the compressed klass pointer base so that decoding of these pointers works
  38.745      // properly when creating the shared archive.
  38.746 -    assert(UseCompressedOops && UseCompressedKlassPointers,
  38.747 -      "UseCompressedOops and UseCompressedKlassPointers must be set");
  38.748 +    assert(UseCompressedOops && UseCompressedClassPointers,
  38.749 +      "UseCompressedOops and UseCompressedClassPointers must be set");
  38.750      Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
  38.751      if (TraceMetavirtualspaceAllocation && Verbose) {
  38.752        gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
  38.753 @@ -2979,7 +3048,7 @@
  38.754      }
  38.755  
  38.756  #ifdef _LP64
  38.757 -    // If UseCompressedKlassPointers is set then allocate the metaspace area
  38.758 +    // If UseCompressedClassPointers is set then allocate the metaspace area
  38.759      // above the heap and above the CDS area (if it exists).
  38.760      if (using_class_space()) {
  38.761        if (UseSharedSpaces) {
  38.762 @@ -2997,7 +3066,7 @@
  38.763      // on the medium chunk list.   The next chunk will be small and progress
  38.764      // from there.  This size calculated by -version.
  38.765      _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
  38.766 -                                       (ClassMetaspaceSize/BytesPerWord)*2);
  38.767 +                                       (CompressedClassSpaceSize/BytesPerWord)*2);
  38.768      _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
  38.769      // Arbitrarily set the initial virtual space to a multiple
  38.770      // of the boot class loader size.
  38.771 @@ -3064,7 +3133,7 @@
  38.772  
  38.773  MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
  38.774    // DumpSharedSpaces doesn't use class metadata area (yet)
  38.775 -  // Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
  38.776 +  // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
  38.777    if (mdtype == ClassType && using_class_space()) {
  38.778      return  class_vsm()->allocate(word_size);
  38.779    } else {
  38.780 @@ -3103,7 +3172,7 @@
  38.781    }
  38.782  }
  38.783  
  38.784 -size_t Metaspace::free_words(MetadataType mdtype) const {
  38.785 +size_t Metaspace::free_words_slow(MetadataType mdtype) const {
  38.786    if (mdtype == ClassType) {
  38.787      return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
  38.788    } else {
  38.789 @@ -3213,7 +3282,7 @@
  38.790          MetaspaceAux::dump(gclog_or_tty);
  38.791        }
  38.792        // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
  38.793 -      const char* space_string = (mdtype == ClassType) ? "Class Metadata space" :
  38.794 +      const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
  38.795                                                           "Metadata space";
  38.796        report_java_out_of_memory(space_string);
  38.797  
  38.798 @@ -3311,3 +3380,59 @@
  38.799      class_vsm()->dump(out);
  38.800    }
  38.801  }
  38.802 +
  38.803 +/////////////// Unit tests ///////////////
  38.804 +
  38.805 +#ifndef PRODUCT
  38.806 +
  38.807 +class MetaspaceAuxTest : AllStatic {
  38.808 + public:
  38.809 +  static void test_reserved() {
  38.810 +    size_t reserved = MetaspaceAux::reserved_bytes();
  38.811 +
  38.812 +    assert(reserved > 0, "assert");
  38.813 +
  38.814 +    size_t committed  = MetaspaceAux::committed_bytes();
  38.815 +    assert(committed <= reserved, "assert");
  38.816 +
  38.817 +    size_t reserved_metadata = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
  38.818 +    assert(reserved_metadata > 0, "assert");
  38.819 +    assert(reserved_metadata <= reserved, "assert");
  38.820 +
  38.821 +    if (UseCompressedClassPointers) {
  38.822 +      size_t reserved_class    = MetaspaceAux::reserved_bytes(Metaspace::ClassType);
  38.823 +      assert(reserved_class > 0, "assert");
  38.824 +      assert(reserved_class < reserved, "assert");
  38.825 +    }
  38.826 +  }
  38.827 +
  38.828 +  static void test_committed() {
  38.829 +    size_t committed = MetaspaceAux::committed_bytes();
  38.830 +
  38.831 +    assert(committed > 0, "assert");
  38.832 +
  38.833 +    size_t reserved  = MetaspaceAux::reserved_bytes();
  38.834 +    assert(committed <= reserved, "assert");
  38.835 +
  38.836 +    size_t committed_metadata = MetaspaceAux::committed_bytes(Metaspace::NonClassType);
  38.837 +    assert(committed_metadata > 0, "assert");
  38.838 +    assert(committed_metadata <= committed, "assert");
  38.839 +
  38.840 +    if (UseCompressedClassPointers) {
  38.841 +      size_t committed_class    = MetaspaceAux::committed_bytes(Metaspace::ClassType);
  38.842 +      assert(committed_class > 0, "assert");
  38.843 +      assert(committed_class < committed, "assert");
  38.844 +    }
  38.845 +  }
  38.846 +
  38.847 +  static void test() {
  38.848 +    test_reserved();
  38.849 +    test_committed();
  38.850 +  }
  38.851 +};
  38.852 +
  38.853 +void MetaspaceAux_test() {
  38.854 +  MetaspaceAuxTest::test();
  38.855 +}
  38.856 +
  38.857 +#endif
    39.1 --- a/src/share/vm/memory/metaspace.hpp	Wed Sep 18 12:52:15 2013 -0400
    39.2 +++ b/src/share/vm/memory/metaspace.hpp	Thu Sep 19 09:26:08 2013 +0200
    39.3 @@ -182,9 +182,8 @@
    39.4  
    39.5    char*  bottom() const;
    39.6    size_t used_words_slow(MetadataType mdtype) const;
    39.7 -  size_t free_words(MetadataType mdtype) const;
    39.8 +  size_t free_words_slow(MetadataType mdtype) const;
    39.9    size_t capacity_words_slow(MetadataType mdtype) const;
   39.10 -  size_t waste_words(MetadataType mdtype) const;
   39.11  
   39.12    size_t used_bytes_slow(MetadataType mdtype) const;
   39.13    size_t capacity_bytes_slow(MetadataType mdtype) const;
   39.14 @@ -213,27 +212,22 @@
   39.15  
   39.16    void iterate(AllocRecordClosure *closure);
   39.17  
   39.18 -  // Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
   39.19 +  // Return TRUE only if UseCompressedClassPointers is True and DumpSharedSpaces is False.
   39.20    static bool using_class_space() {
   39.21 -    return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
   39.22 +    return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
   39.23    }
   39.24  
   39.25  };
   39.26  
   39.27  class MetaspaceAux : AllStatic {
   39.28 -  static size_t free_chunks_total(Metaspace::MetadataType mdtype);
   39.29 -
   39.30 - public:
   39.31 -  // Statistics for class space and data space in metaspace.
   39.32 +  static size_t free_chunks_total_words(Metaspace::MetadataType mdtype);
   39.33  
   39.34    // These methods iterate over the classloader data graph
   39.35    // for the given Metaspace type.  These are slow.
   39.36    static size_t used_bytes_slow(Metaspace::MetadataType mdtype);
   39.37 -  static size_t free_in_bytes(Metaspace::MetadataType mdtype);
   39.38 +  static size_t free_bytes_slow(Metaspace::MetadataType mdtype);
   39.39    static size_t capacity_bytes_slow(Metaspace::MetadataType mdtype);
   39.40 -
   39.41 -  // Iterates over the virtual space list.
   39.42 -  static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
   39.43 +  static size_t capacity_bytes_slow();
   39.44  
   39.45    // Running sum of space in all Metachunks that has been
   39.46    // allocated to a Metaspace.  This is used instead of
   39.47 @@ -263,17 +257,16 @@
   39.48    }
   39.49  
   39.50    // Used by MetaspaceCounters
   39.51 -  static size_t free_chunks_total();
   39.52 -  static size_t free_chunks_total_in_bytes();
   39.53 -  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
   39.54 +  static size_t free_chunks_total_words();
   39.55 +  static size_t free_chunks_total_bytes();
   39.56 +  static size_t free_chunks_total_bytes(Metaspace::MetadataType mdtype);
   39.57  
   39.58    static size_t allocated_capacity_words(Metaspace::MetadataType mdtype) {
   39.59      return _allocated_capacity_words[mdtype];
   39.60    }
   39.61    static size_t allocated_capacity_words() {
   39.62 -    return _allocated_capacity_words[Metaspace::NonClassType] +
   39.63 -           (Metaspace::using_class_space() ?
   39.64 -           _allocated_capacity_words[Metaspace::ClassType] : 0);
   39.65 +    return allocated_capacity_words(Metaspace::NonClassType) +
   39.66 +           allocated_capacity_words(Metaspace::ClassType);
   39.67    }
   39.68    static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
   39.69      return allocated_capacity_words(mdtype) * BytesPerWord;
   39.70 @@ -286,9 +279,8 @@
   39.71      return _allocated_used_words[mdtype];
   39.72    }
   39.73    static size_t allocated_used_words() {
   39.74 -    return _allocated_used_words[Metaspace::NonClassType] +
   39.75 -           (Metaspace::using_class_space() ?
   39.76 -           _allocated_used_words[Metaspace::ClassType] : 0);
   39.77 +    return allocated_used_words(Metaspace::NonClassType) +
   39.78 +           allocated_used_words(Metaspace::ClassType);
   39.79    }
   39.80    static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
   39.81      return allocated_used_words(mdtype) * BytesPerWord;
   39.82 @@ -300,31 +292,22 @@
   39.83    static size_t free_bytes();
   39.84    static size_t free_bytes(Metaspace::MetadataType mdtype);
   39.85  
   39.86 -  // Total capacity in all Metaspaces
   39.87 -  static size_t capacity_bytes_slow() {
   39.88 -#ifdef PRODUCT
   39.89 -    // Use allocated_capacity_bytes() in PRODUCT instead of this function.
   39.90 -    guarantee(false, "Should not call capacity_bytes_slow() in the PRODUCT");
   39.91 -#endif
   39.92 -    size_t class_capacity = capacity_bytes_slow(Metaspace::ClassType);
   39.93 -    size_t non_class_capacity = capacity_bytes_slow(Metaspace::NonClassType);
   39.94 -    assert(allocated_capacity_bytes() == class_capacity + non_class_capacity,
   39.95 -           err_msg("bad accounting: allocated_capacity_bytes() " SIZE_FORMAT
   39.96 -             " class_capacity + non_class_capacity " SIZE_FORMAT
   39.97 -             " class_capacity " SIZE_FORMAT " non_class_capacity " SIZE_FORMAT,
   39.98 -             allocated_capacity_bytes(), class_capacity + non_class_capacity,
   39.99 -             class_capacity, non_class_capacity));
  39.100 -
  39.101 -    return class_capacity + non_class_capacity;
  39.102 +  static size_t reserved_bytes(Metaspace::MetadataType mdtype);
  39.103 +  static size_t reserved_bytes() {
  39.104 +    return reserved_bytes(Metaspace::ClassType) +
  39.105 +           reserved_bytes(Metaspace::NonClassType);
  39.106    }
  39.107  
  39.108 -  // Total space reserved in all Metaspaces
  39.109 -  static size_t reserved_in_bytes() {
  39.110 -    return reserved_in_bytes(Metaspace::ClassType) +
  39.111 -           reserved_in_bytes(Metaspace::NonClassType);
  39.112 +  static size_t committed_bytes(Metaspace::MetadataType mdtype);
  39.113 +  static size_t committed_bytes() {
  39.114 +    return committed_bytes(Metaspace::ClassType) +
  39.115 +           committed_bytes(Metaspace::NonClassType);
  39.116    }
  39.117  
  39.118 -  static size_t min_chunk_size();
  39.119 +  static size_t min_chunk_size_words();
  39.120 +  static size_t min_chunk_size_bytes() {
  39.121 +    return min_chunk_size_words() * BytesPerWord;
  39.122 +  }
  39.123  
  39.124    // Print change in used metadata.
  39.125    static void print_metaspace_change(size_t prev_metadata_used);
    40.1 --- a/src/share/vm/memory/metaspaceCounters.cpp	Wed Sep 18 12:52:15 2013 -0400
    40.2 +++ b/src/share/vm/memory/metaspaceCounters.cpp	Thu Sep 19 09:26:08 2013 +0200
    40.3 @@ -65,26 +65,25 @@
    40.4  
    40.5  MetaspacePerfCounters* MetaspaceCounters::_perf_counters = NULL;
    40.6  
    40.7 -size_t MetaspaceCounters::calculate_capacity() {
    40.8 -  // The total capacity is the sum of
    40.9 -  //   1) capacity of Metachunks in use by all Metaspaces
   40.10 -  //   2) unused space at the end of each Metachunk
   40.11 -  //   3) space in the freelist
   40.12 -  size_t total_capacity = MetaspaceAux::allocated_capacity_bytes()
   40.13 -    + MetaspaceAux::free_bytes() + MetaspaceAux::free_chunks_total_in_bytes();
   40.14 -  return total_capacity;
   40.15 +size_t MetaspaceCounters::used() {
   40.16 +  return MetaspaceAux::allocated_used_bytes();
   40.17 +}
   40.18 +
   40.19 +size_t MetaspaceCounters::capacity() {
   40.20 +  return MetaspaceAux::committed_bytes();
   40.21 +}
   40.22 +
   40.23 +size_t MetaspaceCounters::max_capacity() {
   40.24 +  return MetaspaceAux::reserved_bytes();
   40.25  }
   40.26  
   40.27  void MetaspaceCounters::initialize_performance_counters() {
   40.28    if (UsePerfData) {
   40.29      assert(_perf_counters == NULL, "Should only be initialized once");
   40.30  
   40.31 -    size_t min_capacity = MetaspaceAux::min_chunk_size();
   40.32 -    size_t capacity = calculate_capacity();
   40.33 -    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
   40.34 -    size_t used = MetaspaceAux::allocated_used_bytes();
   40.35 -
   40.36 -    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity, capacity, max_capacity, used);
   40.37 +    size_t min_capacity = 0;
   40.38 +    _perf_counters = new MetaspacePerfCounters("metaspace", min_capacity,
   40.39 +                                               capacity(), max_capacity(), used());
   40.40    }
   40.41  }
   40.42  
   40.43 @@ -92,31 +91,29 @@
   40.44    if (UsePerfData) {
   40.45      assert(_perf_counters != NULL, "Should be initialized");
   40.46  
   40.47 -    size_t capacity = calculate_capacity();
   40.48 -    size_t max_capacity = MetaspaceAux::reserved_in_bytes();
   40.49 -    size_t used = MetaspaceAux::allocated_used_bytes();
   40.50 -
   40.51 -    _perf_counters->update(capacity, max_capacity, used);
   40.52 +    _perf_counters->update(capacity(), max_capacity(), used());
   40.53    }
   40.54  }
   40.55  
   40.56  MetaspacePerfCounters* CompressedClassSpaceCounters::_perf_counters = NULL;
   40.57  
   40.58 -size_t CompressedClassSpaceCounters::calculate_capacity() {
   40.59 -    return MetaspaceAux::allocated_capacity_bytes(_class_type) +
   40.60 -           MetaspaceAux::free_bytes(_class_type) +
   40.61 -           MetaspaceAux::free_chunks_total_in_bytes(_class_type);
   40.62 +size_t CompressedClassSpaceCounters::used() {
   40.63 +  return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
   40.64 +}
   40.65 +
   40.66 +size_t CompressedClassSpaceCounters::capacity() {
   40.67 +  return MetaspaceAux::committed_bytes(Metaspace::ClassType);
   40.68 +}
   40.69 +
   40.70 +size_t CompressedClassSpaceCounters::max_capacity() {
   40.71 +  return MetaspaceAux::reserved_bytes(Metaspace::ClassType);
   40.72  }
   40.73  
   40.74  void CompressedClassSpaceCounters::update_performance_counters() {
   40.75 -  if (UsePerfData && UseCompressedKlassPointers) {
   40.76 +  if (UsePerfData && UseCompressedClassPointers) {
   40.77      assert(_perf_counters != NULL, "Should be initialized");
   40.78  
   40.79 -    size_t capacity = calculate_capacity();
   40.80 -    size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
   40.81 -    size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
   40.82 -
   40.83 -    _perf_counters->update(capacity, max_capacity, used);
   40.84 +    _perf_counters->update(capacity(), max_capacity(), used());
   40.85    }
   40.86  }
   40.87  
   40.88 @@ -125,13 +122,10 @@
   40.89      assert(_perf_counters == NULL, "Should only be initialized once");
   40.90      const char* ns = "compressedclassspace";
   40.91  
   40.92 -    if (UseCompressedKlassPointers) {
   40.93 -      size_t min_capacity = MetaspaceAux::min_chunk_size();
   40.94 -      size_t capacity = calculate_capacity();
   40.95 -      size_t max_capacity = MetaspaceAux::reserved_in_bytes(_class_type);
   40.96 -      size_t used = MetaspaceAux::allocated_used_bytes(_class_type);
   40.97 -
   40.98 -      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity, max_capacity, used);
   40.99 +    if (UseCompressedClassPointers) {
  40.100 +      size_t min_capacity = 0;
  40.101 +      _perf_counters = new MetaspacePerfCounters(ns, min_capacity, capacity(),
  40.102 +                                                 max_capacity(), used());
  40.103      } else {
  40.104        _perf_counters = new MetaspacePerfCounters(ns, 0, 0, 0, 0);
  40.105      }
    41.1 --- a/src/share/vm/memory/metaspaceCounters.hpp	Wed Sep 18 12:52:15 2013 -0400
    41.2 +++ b/src/share/vm/memory/metaspaceCounters.hpp	Thu Sep 19 09:26:08 2013 +0200
    41.3 @@ -25,13 +25,15 @@
    41.4  #ifndef SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
    41.5  #define SHARE_VM_MEMORY_METASPACECOUNTERS_HPP
    41.6  
    41.7 -#include "memory/metaspace.hpp"
    41.8 +#include "memory/allocation.hpp"
    41.9  
   41.10  class MetaspacePerfCounters;
   41.11  
   41.12  class MetaspaceCounters: public AllStatic {
   41.13    static MetaspacePerfCounters* _perf_counters;
   41.14 -  static size_t calculate_capacity();
   41.15 +  static size_t used();
   41.16 +  static size_t capacity();
   41.17 +  static size_t max_capacity();
   41.18  
   41.19   public:
   41.20    static void initialize_performance_counters();
   41.21 @@ -40,8 +42,9 @@
   41.22  
   41.23  class CompressedClassSpaceCounters: public AllStatic {
   41.24    static MetaspacePerfCounters* _perf_counters;
   41.25 -  static size_t calculate_capacity();
   41.26 -  static const Metaspace::MetadataType _class_type = Metaspace::ClassType;
   41.27 +  static size_t used();
   41.28 +  static size_t capacity();
   41.29 +  static size_t max_capacity();
   41.30  
   41.31   public:
   41.32    static void initialize_performance_counters();
    42.1 --- a/src/share/vm/memory/universe.cpp	Wed Sep 18 12:52:15 2013 -0400
    42.2 +++ b/src/share/vm/memory/universe.cpp	Thu Sep 19 09:26:08 2013 +0200
    42.3 @@ -872,13 +872,16 @@
    42.4  
    42.5  // Reserve the Java heap, which is now the same for all GCs.
    42.6  ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
    42.7 +  assert(alignment <= Arguments::conservative_max_heap_alignment(),
    42.8 +      err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
    42.9 +          alignment, Arguments::conservative_max_heap_alignment()));
   42.10    size_t total_reserved = align_size_up(heap_size, alignment);
   42.11    assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
   42.12        "heap size is too big for compressed oops");
   42.13  
   42.14    bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
   42.15    assert(!UseLargePages
   42.16 -      || UseParallelOldGC
   42.17 +      || UseParallelGC
   42.18        || use_large_pages, "Wrong alignment to use large pages");
   42.19  
   42.20    char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
   42.21 @@ -1028,7 +1031,7 @@
   42.22  
   42.23      msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
   42.24      java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
   42.25 -    msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false);
   42.26 +    msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
   42.27      java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
   42.28  
   42.29      msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
    43.1 --- a/src/share/vm/memory/universe.hpp	Wed Sep 18 12:52:15 2013 -0400
    43.2 +++ b/src/share/vm/memory/universe.hpp	Thu Sep 19 09:26:08 2013 +0200
    43.3 @@ -181,7 +181,7 @@
    43.4  
    43.5    // For UseCompressedOops.
    43.6    static struct NarrowPtrStruct _narrow_oop;
    43.7 -  // For UseCompressedKlassPointers.
    43.8 +  // For UseCompressedClassPointers.
    43.9    static struct NarrowPtrStruct _narrow_klass;
   43.10    static address _narrow_ptrs_base;
   43.11  
   43.12 @@ -229,7 +229,7 @@
   43.13      _narrow_oop._base    = base;
   43.14    }
   43.15    static void     set_narrow_klass_base(address base) {
   43.16 -    assert(UseCompressedKlassPointers, "no compressed klass ptrs?");
   43.17 +    assert(UseCompressedClassPointers, "no compressed klass ptrs?");
   43.18      _narrow_klass._base   = base;
   43.19    }
   43.20    static void     set_narrow_oop_use_implicit_null_checks(bool use) {
   43.21 @@ -353,7 +353,7 @@
   43.22    static int      narrow_oop_shift()                      { return  _narrow_oop._shift; }
   43.23    static bool     narrow_oop_use_implicit_null_checks()   { return  _narrow_oop._use_implicit_null_checks; }
   43.24  
   43.25 -  // For UseCompressedKlassPointers
   43.26 +  // For UseCompressedClassPointers
   43.27    static address  narrow_klass_base()                     { return  _narrow_klass._base; }
   43.28    static bool  is_narrow_klass_base(void* addr)           { return (narrow_klass_base() == (address)addr); }
   43.29    static int      narrow_klass_shift()                    { return  _narrow_klass._shift; }
    44.1 --- a/src/share/vm/oops/arrayOop.hpp	Wed Sep 18 12:52:15 2013 -0400
    44.2 +++ b/src/share/vm/oops/arrayOop.hpp	Thu Sep 19 09:26:08 2013 +0200
    44.3 @@ -65,7 +65,7 @@
    44.4    // declared nonstatic fields in arrayOopDesc if not compressed, otherwise
    44.5    // it occupies the second half of the _klass field in oopDesc.
    44.6    static int length_offset_in_bytes() {
    44.7 -    return UseCompressedKlassPointers ? klass_gap_offset_in_bytes() :
    44.8 +    return UseCompressedClassPointers ? klass_gap_offset_in_bytes() :
    44.9                                 sizeof(arrayOopDesc);
   44.10    }
   44.11  
    45.1 --- a/src/share/vm/oops/instanceOop.hpp	Wed Sep 18 12:52:15 2013 -0400
    45.2 +++ b/src/share/vm/oops/instanceOop.hpp	Thu Sep 19 09:26:08 2013 +0200
    45.3 @@ -37,9 +37,9 @@
    45.4  
    45.5    // If compressed, the offset of the fields of the instance may not be aligned.
    45.6    static int base_offset_in_bytes() {
    45.7 -    // offset computation code breaks if UseCompressedKlassPointers
    45.8 +    // offset computation code breaks if UseCompressedClassPointers
    45.9      // only is true
   45.10 -    return (UseCompressedOops && UseCompressedKlassPointers) ?
   45.11 +    return (UseCompressedOops && UseCompressedClassPointers) ?
   45.12               klass_gap_offset_in_bytes() :
   45.13               sizeof(instanceOopDesc);
   45.14    }
    46.1 --- a/src/share/vm/oops/oop.inline.hpp	Wed Sep 18 12:52:15 2013 -0400
    46.2 +++ b/src/share/vm/oops/oop.inline.hpp	Thu Sep 19 09:26:08 2013 +0200
    46.3 @@ -69,7 +69,7 @@
    46.4  }
    46.5  
    46.6  inline Klass* oopDesc::klass() const {
    46.7 -  if (UseCompressedKlassPointers) {
    46.8 +  if (UseCompressedClassPointers) {
    46.9      return Klass::decode_klass_not_null(_metadata._compressed_klass);
   46.10    } else {
   46.11      return _metadata._klass;
   46.12 @@ -78,7 +78,7 @@
   46.13  
   46.14  inline Klass* oopDesc::klass_or_null() const volatile {
   46.15    // can be NULL in CMS
   46.16 -  if (UseCompressedKlassPointers) {
   46.17 +  if (UseCompressedClassPointers) {
   46.18      return Klass::decode_klass(_metadata._compressed_klass);
   46.19    } else {
   46.20      return _metadata._klass;
   46.21 @@ -86,19 +86,19 @@
   46.22  }
   46.23  
   46.24  inline int oopDesc::klass_gap_offset_in_bytes() {
   46.25 -  assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
   46.26 +  assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
   46.27    return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
   46.28  }
   46.29  
   46.30  inline Klass** oopDesc::klass_addr() {
   46.31    // Only used internally and with CMS and will not work with
   46.32    // UseCompressedOops
   46.33 -  assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
   46.34 +  assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
   46.35    return (Klass**) &_metadata._klass;
   46.36  }
   46.37  
   46.38  inline narrowKlass* oopDesc::compressed_klass_addr() {
   46.39 -  assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
   46.40 +  assert(UseCompressedClassPointers, "only called by compressed klass pointers");
   46.41    return &_metadata._compressed_klass;
   46.42  }
   46.43  
   46.44 @@ -106,7 +106,7 @@
   46.45    // since klasses are promoted no store check is needed
   46.46    assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
   46.47    assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
   46.48 -  if (UseCompressedKlassPointers) {
   46.49 +  if (UseCompressedClassPointers) {
   46.50      *compressed_klass_addr() = Klass::encode_klass_not_null(k);
   46.51    } else {
   46.52      *klass_addr() = k;
   46.53 @@ -118,7 +118,7 @@
   46.54  }
   46.55  
   46.56  inline void oopDesc::set_klass_gap(int v) {
   46.57 -  if (UseCompressedKlassPointers) {
   46.58 +  if (UseCompressedClassPointers) {
   46.59      *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
   46.60    }
   46.61  }
   46.62 @@ -126,7 +126,7 @@
   46.63  inline void oopDesc::set_klass_to_list_ptr(oop k) {
   46.64    // This is only to be used during GC, for from-space objects, so no
   46.65    // barrier is needed.
   46.66 -  if (UseCompressedKlassPointers) {
   46.67 +  if (UseCompressedClassPointers) {
   46.68      _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k);  // may be null (parnew overflow handling)
   46.69    } else {
   46.70      _metadata._klass = (Klass*)(address)k;
   46.71 @@ -135,7 +135,7 @@
   46.72  
   46.73  inline oop oopDesc::list_ptr_from_klass() {
   46.74    // This is only to be used during GC, for from-space objects.
   46.75 -  if (UseCompressedKlassPointers) {
   46.76 +  if (UseCompressedClassPointers) {
   46.77      return decode_heap_oop((narrowOop)_metadata._compressed_klass);
   46.78    } else {
   46.79      // Special case for GC
    47.1 --- a/src/share/vm/opto/cfgnode.cpp	Wed Sep 18 12:52:15 2013 -0400
    47.2 +++ b/src/share/vm/opto/cfgnode.cpp	Thu Sep 19 09:26:08 2013 +0200
    47.3 @@ -1932,7 +1932,7 @@
    47.4  #ifdef _LP64
    47.5    // Push DecodeN/DecodeNKlass down through phi.
    47.6    // The rest of phi graph will transform by split EncodeP node though phis up.
    47.7 -  if ((UseCompressedOops || UseCompressedKlassPointers) && can_reshape && progress == NULL) {
    47.8 +  if ((UseCompressedOops || UseCompressedClassPointers) && can_reshape && progress == NULL) {
    47.9      bool may_push = true;
   47.10      bool has_decodeN = false;
   47.11      bool is_decodeN = false;
    48.1 --- a/src/share/vm/opto/compile.cpp	Wed Sep 18 12:52:15 2013 -0400
    48.2 +++ b/src/share/vm/opto/compile.cpp	Thu Sep 19 09:26:08 2013 +0200
    48.3 @@ -2646,7 +2646,7 @@
    48.4              addp->in(AddPNode::Base) == n->in(AddPNode::Base),
    48.5              "Base pointers must match" );
    48.6  #ifdef _LP64
    48.7 -    if ((UseCompressedOops || UseCompressedKlassPointers) &&
    48.8 +    if ((UseCompressedOops || UseCompressedClassPointers) &&
    48.9          addp->Opcode() == Op_ConP &&
   48.10          addp == n->in(AddPNode::Base) &&
   48.11          n->in(AddPNode::Offset)->is_Con()) {
   48.12 @@ -3033,7 +3033,7 @@
   48.13  
   48.14    // Skip next transformation if compressed oops are not used.
   48.15    if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) ||
   48.16 -      (!UseCompressedOops && !UseCompressedKlassPointers))
   48.17 +      (!UseCompressedOops && !UseCompressedClassPointers))
   48.18      return;
   48.19  
   48.20    // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges.
    49.1 --- a/src/share/vm/opto/connode.cpp	Wed Sep 18 12:52:15 2013 -0400
    49.2 +++ b/src/share/vm/opto/connode.cpp	Thu Sep 19 09:26:08 2013 +0200
    49.3 @@ -630,7 +630,7 @@
    49.4    if (t == Type::TOP) return Type::TOP;
    49.5    assert (t != TypePtr::NULL_PTR, "null klass?");
    49.6  
    49.7 -  assert(UseCompressedKlassPointers && t->isa_klassptr(), "only klass ptr here");
    49.8 +  assert(UseCompressedClassPointers && t->isa_klassptr(), "only klass ptr here");
    49.9    return t->make_narrowklass();
   49.10  }
   49.11  
    50.1 --- a/src/share/vm/opto/library_call.cpp	Wed Sep 18 12:52:15 2013 -0400
    50.2 +++ b/src/share/vm/opto/library_call.cpp	Thu Sep 19 09:26:08 2013 +0200
    50.3 @@ -4204,7 +4204,7 @@
    50.4    // 12 - 64-bit VM, compressed klass
    50.5    // 16 - 64-bit VM, normal klass
    50.6    if (base_off % BytesPerLong != 0) {
    50.7 -    assert(UseCompressedKlassPointers, "");
    50.8 +    assert(UseCompressedClassPointers, "");
    50.9      if (is_array) {
   50.10        // Exclude length to copy by 8 bytes words.
   50.11        base_off += sizeof(int);
    51.1 --- a/src/share/vm/opto/live.cpp	Wed Sep 18 12:52:15 2013 -0400
    51.2 +++ b/src/share/vm/opto/live.cpp	Thu Sep 19 09:26:08 2013 +0200
    51.3 @@ -321,7 +321,7 @@
    51.4  #ifdef _LP64
    51.5                        UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
    51.6                        UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
    51.7 -                      UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
    51.8 +                      UseCompressedClassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
    51.9  #endif
   51.10                        check->as_Mach()->ideal_Opcode() == Op_LoadP ||
   51.11                        check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
    52.1 --- a/src/share/vm/opto/macro.cpp	Wed Sep 18 12:52:15 2013 -0400
    52.2 +++ b/src/share/vm/opto/macro.cpp	Thu Sep 19 09:26:08 2013 +0200
    52.3 @@ -2191,7 +2191,7 @@
    52.4        Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
    52.5        klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
    52.6  #ifdef _LP64
    52.7 -      if (UseCompressedKlassPointers && klass_node->is_DecodeNKlass()) {
    52.8 +      if (UseCompressedClassPointers && klass_node->is_DecodeNKlass()) {
    52.9          assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
   52.10          klass_node->in(1)->init_req(0, ctrl);
   52.11        } else
    53.1 --- a/src/share/vm/opto/memnode.cpp	Wed Sep 18 12:52:15 2013 -0400
    53.2 +++ b/src/share/vm/opto/memnode.cpp	Thu Sep 19 09:26:08 2013 +0200
    53.3 @@ -2031,7 +2031,7 @@
    53.4    assert(adr_type != NULL, "expecting TypeKlassPtr");
    53.5  #ifdef _LP64
    53.6    if (adr_type->is_ptr_to_narrowklass()) {
    53.7 -    assert(UseCompressedKlassPointers, "no compressed klasses");
    53.8 +    assert(UseCompressedClassPointers, "no compressed klasses");
    53.9      Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass()));
   53.10      return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr());
   53.11    }
   53.12 @@ -2369,7 +2369,7 @@
   53.13        val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop()));
   53.14        return new (C) StoreNNode(ctl, mem, adr, adr_type, val);
   53.15      } else if (adr->bottom_type()->is_ptr_to_narrowklass() ||
   53.16 -               (UseCompressedKlassPointers && val->bottom_type()->isa_klassptr() &&
   53.17 +               (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() &&
   53.18                  adr->bottom_type()->isa_rawptr())) {
   53.19        val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass()));
   53.20        return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val);
    54.1 --- a/src/share/vm/opto/type.cpp	Wed Sep 18 12:52:15 2013 -0400
    54.2 +++ b/src/share/vm/opto/type.cpp	Thu Sep 19 09:26:08 2013 +0200
    54.3 @@ -2416,7 +2416,7 @@
    54.4  #ifdef _LP64
    54.5    if (_offset != 0) {
    54.6      if (_offset == oopDesc::klass_offset_in_bytes()) {
    54.7 -      _is_ptr_to_narrowklass = UseCompressedKlassPointers;
    54.8 +      _is_ptr_to_narrowklass = UseCompressedClassPointers;
    54.9      } else if (klass() == NULL) {
   54.10        // Array with unknown body type
   54.11        assert(this->isa_aryptr(), "only arrays without klass");
    55.1 --- a/src/share/vm/prims/jni.cpp	Wed Sep 18 12:52:15 2013 -0400
    55.2 +++ b/src/share/vm/prims/jni.cpp	Thu Sep 19 09:26:08 2013 +0200
    55.3 @@ -5049,12 +5049,16 @@
    55.4  // Forward declaration
    55.5  void TestReservedSpace_test();
    55.6  void TestReserveMemorySpecial_test();
    55.7 +void TestVirtualSpace_test();
    55.8 +void MetaspaceAux_test();
    55.9  
   55.10  void execute_internal_vm_tests() {
   55.11    if (ExecuteInternalVMTests) {
   55.12      tty->print_cr("Running internal VM tests");
   55.13      run_unit_test(TestReservedSpace_test());
   55.14      run_unit_test(TestReserveMemorySpecial_test());
   55.15 +    run_unit_test(TestVirtualSpace_test());
   55.16 +    run_unit_test(MetaspaceAux_test());
   55.17      run_unit_test(GlobalDefinitions::test_globals());
   55.18      run_unit_test(GCTimerAllTest::all());
   55.19      run_unit_test(arrayOopDesc::test_max_array_length());
    56.1 --- a/src/share/vm/prims/whitebox.cpp	Wed Sep 18 12:52:15 2013 -0400
    56.2 +++ b/src/share/vm/prims/whitebox.cpp	Thu Sep 19 09:26:08 2013 +0200
    56.3 @@ -33,6 +33,7 @@
    56.4  #include "prims/whitebox.hpp"
    56.5  #include "prims/wbtestmethods/parserTests.hpp"
    56.6  
    56.7 +#include "runtime/arguments.hpp"
    56.8  #include "runtime/interfaceSupport.hpp"
    56.9  #include "runtime/os.hpp"
   56.10  #include "utilities/debug.hpp"
   56.11 @@ -94,6 +95,11 @@
   56.12    return closure.found();
   56.13  WB_END
   56.14  
   56.15 +WB_ENTRY(jlong, WB_GetCompressedOopsMaxHeapSize(JNIEnv* env, jobject o)) {
   56.16 +  return (jlong)Arguments::max_heap_for_compressed_oops();
   56.17 +}
   56.18 +WB_END
   56.19 +
   56.20  WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
   56.21    CollectorPolicy * p = Universe::heap()->collector_policy();
   56.22    gclog_or_tty->print_cr("Minimum heap "SIZE_FORMAT" Initial heap "
   56.23 @@ -436,6 +442,8 @@
   56.24        CC"(Ljava/lang/String;[Lsun/hotspot/parser/DiagnosticCommand;)[Ljava/lang/Object;",
   56.25        (void*) &WB_ParseCommandLine
   56.26    },
   56.27 +  {CC"getCompressedOopsMaxHeapSize", CC"()J",
   56.28 +      (void*)&WB_GetCompressedOopsMaxHeapSize},
   56.29    {CC"printHeapSizes",     CC"()V",                   (void*)&WB_PrintHeapSizes    },
   56.30  #if INCLUDE_ALL_GCS
   56.31    {CC"g1InConcurrentMark", CC"()Z",                   (void*)&WB_G1InConcurrentMark},
    57.1 --- a/src/share/vm/runtime/arguments.cpp	Wed Sep 18 12:52:15 2013 -0400
    57.2 +++ b/src/share/vm/runtime/arguments.cpp	Thu Sep 19 09:26:08 2013 +0200
    57.3 @@ -28,6 +28,7 @@
    57.4  #include "compiler/compilerOracle.hpp"
    57.5  #include "memory/allocation.inline.hpp"
    57.6  #include "memory/cardTableRS.hpp"
    57.7 +#include "memory/genCollectedHeap.hpp"
    57.8  #include "memory/referenceProcessor.hpp"
    57.9  #include "memory/universe.inline.hpp"
   57.10  #include "oops/oop.inline.hpp"
   57.11 @@ -54,6 +55,8 @@
   57.12  #endif
   57.13  #if INCLUDE_ALL_GCS
   57.14  #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
   57.15 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
   57.16 +#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
   57.17  #endif // INCLUDE_ALL_GCS
   57.18  
   57.19  // Note: This is a special bug reporting site for the JVM
   57.20 @@ -90,6 +93,7 @@
   57.21  SystemProperty* Arguments::_system_properties   = NULL;
   57.22  const char*  Arguments::_gc_log_filename        = NULL;
   57.23  bool   Arguments::_has_profile                  = false;
   57.24 +size_t Arguments::_conservative_max_heap_alignment = 0;
   57.25  uintx  Arguments::_min_heap_size                = 0;
   57.26  Arguments::Mode Arguments::_mode                = _mixed;
   57.27  bool   Arguments::_java_compiler                = false;
   57.28 @@ -1391,10 +1395,17 @@
   57.29    return true;
   57.30  }
   57.31  
   57.32 -inline uintx max_heap_for_compressed_oops() {
   57.33 +uintx Arguments::max_heap_for_compressed_oops() {
   57.34    // Avoid sign flip.
   57.35    assert(OopEncodingHeapMax > (uint64_t)os::vm_page_size(), "Unusual page size");
   57.36 -  LP64_ONLY(return OopEncodingHeapMax - os::vm_page_size());
   57.37 +  // We need to fit both the NULL page and the heap into the memory budget, while
   57.38 +  // keeping alignment constraints of the heap. To guarantee the latter, as the
   57.39 +  // NULL page is located before the heap, we pad the NULL page to the conservative
   57.40 +  // maximum alignment that the GC may ever impose upon the heap.
   57.41 +  size_t displacement_due_to_null_page = align_size_up_(os::vm_page_size(),
   57.42 +    Arguments::conservative_max_heap_alignment());
   57.43 +
   57.44 +  LP64_ONLY(return OopEncodingHeapMax - displacement_due_to_null_page);
   57.45    NOT_LP64(ShouldNotReachHere(); return 0);
   57.46  }
   57.47  
   57.48 @@ -1439,7 +1450,7 @@
   57.49      if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
   57.50        warning("Max heap size too large for Compressed Oops");
   57.51        FLAG_SET_DEFAULT(UseCompressedOops, false);
   57.52 -      FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
   57.53 +      FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
   57.54      }
   57.55    }
   57.56  #endif // _LP64
   57.57 @@ -1452,22 +1463,22 @@
   57.58  void Arguments::set_use_compressed_klass_ptrs() {
   57.59  #ifndef ZERO
   57.60  #ifdef _LP64
   57.61 -  // UseCompressedOops must be on for UseCompressedKlassPointers to be on.
   57.62 +  // UseCompressedOops must be on for UseCompressedClassPointers to be on.
   57.63    if (!UseCompressedOops) {
   57.64 -    if (UseCompressedKlassPointers) {
   57.65 -      warning("UseCompressedKlassPointers requires UseCompressedOops");
   57.66 +    if (UseCompressedClassPointers) {
   57.67 +      warning("UseCompressedClassPointers requires UseCompressedOops");
   57.68      }
   57.69 -    FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
   57.70 +    FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
   57.71    } else {
   57.72 -    // Turn on UseCompressedKlassPointers too
   57.73 -    if (FLAG_IS_DEFAULT(UseCompressedKlassPointers)) {
   57.74 -      FLAG_SET_ERGO(bool, UseCompressedKlassPointers, true);
   57.75 +    // Turn on UseCompressedClassPointers too
   57.76 +    if (FLAG_IS_DEFAULT(UseCompressedClassPointers)) {
   57.77 +      FLAG_SET_ERGO(bool, UseCompressedClassPointers, true);
   57.78      }
   57.79 -    // Check the ClassMetaspaceSize to make sure we use compressed klass ptrs.
   57.80 -    if (UseCompressedKlassPointers) {
   57.81 -      if (ClassMetaspaceSize > KlassEncodingMetaspaceMax) {
   57.82 -        warning("Class metaspace size is too large for UseCompressedKlassPointers");
   57.83 -        FLAG_SET_DEFAULT(UseCompressedKlassPointers, false);
   57.84 +    // Check the CompressedClassSpaceSize to make sure we use compressed klass ptrs.
   57.85 +    if (UseCompressedClassPointers) {
   57.86 +      if (CompressedClassSpaceSize > KlassEncodingMetaspaceMax) {
   57.87 +        warning("CompressedClassSpaceSize is too large for UseCompressedClassPointers");
   57.88 +        FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
   57.89        }
   57.90      }
   57.91    }
   57.92 @@ -1475,6 +1486,23 @@
   57.93  #endif // !ZERO
   57.94  }
   57.95  
   57.96 +void Arguments::set_conservative_max_heap_alignment() {
   57.97 +  // The conservative maximum required alignment for the heap is the maximum of
   57.98 +  // the alignments imposed by several sources: any requirements from the heap
   57.99 +  // itself, the collector policy and the maximum page size we may run the VM
  57.100 +  // with.
  57.101 +  size_t heap_alignment = GenCollectedHeap::conservative_max_heap_alignment();
  57.102 +#if INCLUDE_ALL_GCS
  57.103 +  if (UseParallelGC) {
  57.104 +    heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
  57.105 +  } else if (UseG1GC) {
  57.106 +    heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
  57.107 +  }
  57.108 +#endif // INCLUDE_ALL_GCS
  57.109 +  _conservative_max_heap_alignment = MAX3(heap_alignment, os::max_page_size(),
  57.110 +    CollectorPolicy::compute_max_alignment());
  57.111 +}
  57.112 +
  57.113  void Arguments::set_ergonomics_flags() {
  57.114  
  57.115    if (os::is_server_class_machine()) {
  57.116 @@ -1503,6 +1531,8 @@
  57.117      }
  57.118    }
  57.119  
  57.120 +  set_conservative_max_heap_alignment();
  57.121 +
  57.122  #ifndef ZERO
  57.123  #ifdef _LP64
  57.124    set_use_compressed_oops();
  57.125 @@ -2193,8 +2223,8 @@
  57.126  
  57.127    status = status && verify_object_alignment();
  57.128  
  57.129 -  status = status && verify_interval(ClassMetaspaceSize, 1*M, 3*G,
  57.130 -                                      "ClassMetaspaceSize");
  57.131 +  status = status && verify_interval(CompressedClassSpaceSize, 1*M, 3*G,
  57.132 +                                      "CompressedClassSpaceSize");
  57.133  
  57.134    status = status && verify_interval(MarkStackSizeMax,
  57.135                                    1, (max_jint - 1), "MarkStackSizeMax");
  57.136 @@ -3326,13 +3356,13 @@
  57.137      }
  57.138      UseSharedSpaces = false;
  57.139  #ifdef _LP64
  57.140 -    if (!UseCompressedOops || !UseCompressedKlassPointers) {
  57.141 +    if (!UseCompressedOops || !UseCompressedClassPointers) {
  57.142        vm_exit_during_initialization(
  57.143 -        "Cannot dump shared archive when UseCompressedOops or UseCompressedKlassPointers is off.", NULL);
  57.144 +        "Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
  57.145      }
  57.146    } else {
  57.147 -    // UseCompressedOops and UseCompressedKlassPointers must be on for UseSharedSpaces.
  57.148 -    if (!UseCompressedOops || !UseCompressedKlassPointers) {
  57.149 +    // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.
  57.150 +    if (!UseCompressedOops || !UseCompressedClassPointers) {
  57.151        no_shared_spaces();
  57.152      }
  57.153  #endif
  57.154 @@ -3558,6 +3588,11 @@
  57.155    no_shared_spaces();
  57.156  #endif // INCLUDE_CDS
  57.157  
  57.158 +  return JNI_OK;
  57.159 +}
  57.160 +
  57.161 +jint Arguments::apply_ergo() {
  57.162 +
  57.163    // Set flags based on ergonomics.
  57.164    set_ergonomics_flags();
  57.165  
  57.166 @@ -3633,7 +3668,7 @@
  57.167    FLAG_SET_DEFAULT(ProfileInterpreter, false);
  57.168    FLAG_SET_DEFAULT(UseBiasedLocking, false);
  57.169    LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
  57.170 -  LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedKlassPointers, false));
  57.171 +  LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedClassPointers, false));
  57.172  #endif // CC_INTERP
  57.173  
  57.174  #ifdef COMPILER2
  57.175 @@ -3662,6 +3697,10 @@
  57.176      DebugNonSafepoints = true;
  57.177    }
  57.178  
  57.179 +  if (FLAG_IS_CMDLINE(CompressedClassSpaceSize) && !UseCompressedClassPointers) {
  57.180 +    warning("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used");
  57.181 +  }
  57.182 +
  57.183  #ifndef PRODUCT
  57.184    if (CompileTheWorld) {
  57.185      // Force NmethodSweeper to sweep whole CodeCache each time.
    58.1 --- a/src/share/vm/runtime/arguments.hpp	Wed Sep 18 12:52:15 2013 -0400
    58.2 +++ b/src/share/vm/runtime/arguments.hpp	Thu Sep 19 09:26:08 2013 +0200
    58.3 @@ -280,6 +280,9 @@
    58.4    // Option flags
    58.5    static bool   _has_profile;
    58.6    static const char*  _gc_log_filename;
    58.7 +  // Value of the conservative maximum heap alignment needed
    58.8 +  static size_t  _conservative_max_heap_alignment;
    58.9 +
   58.10    static uintx  _min_heap_size;
   58.11  
   58.12    // -Xrun arguments
   58.13 @@ -327,6 +330,7 @@
   58.14    // Garbage-First (UseG1GC)
   58.15    static void set_g1_gc_flags();
   58.16    // GC ergonomics
   58.17 +  static void set_conservative_max_heap_alignment();
   58.18    static void set_use_compressed_oops();
   58.19    static void set_use_compressed_klass_ptrs();
   58.20    static void set_ergonomics_flags();
   58.21 @@ -430,8 +434,10 @@
   58.22    static char*  SharedArchivePath;
   58.23  
   58.24   public:
   58.25 -  // Parses the arguments
   58.26 +  // Parses the arguments, first phase
   58.27    static jint parse(const JavaVMInitArgs* args);
   58.28 +  // Apply ergonomics
   58.29 +  static jint apply_ergo();
   58.30    // Adjusts the arguments after the OS have adjusted the arguments
   58.31    static jint adjust_after_os();
   58.32    // Check for consistency in the selection of the garbage collector.
   58.33 @@ -445,6 +451,10 @@
   58.34    // Used by os_solaris
   58.35    static bool process_settings_file(const char* file_name, bool should_exist, jboolean ignore_unrecognized);
   58.36  
   58.37 +  static size_t conservative_max_heap_alignment() { return _conservative_max_heap_alignment; }
   58.38 +  // Return the maximum size a heap with compressed oops can take
   58.39 +  static size_t max_heap_for_compressed_oops();
   58.40 +
   58.41    // return a char* array containing all options
   58.42    static char** jvm_flags_array()          { return _jvm_flags_array; }
   58.43    static char** jvm_args_array()           { return _jvm_args_array; }
    59.1 --- a/src/share/vm/runtime/globals.hpp	Wed Sep 18 12:52:15 2013 -0400
    59.2 +++ b/src/share/vm/runtime/globals.hpp	Thu Sep 19 09:26:08 2013 +0200
    59.3 @@ -443,8 +443,8 @@
    59.4              "Use 32-bit object references in 64-bit VM  "                   \
    59.5              "lp64_product means flag is always constant in 32 bit VM")      \
    59.6                                                                              \
    59.7 -  lp64_product(bool, UseCompressedKlassPointers, false,                     \
    59.8 -            "Use 32-bit klass pointers in 64-bit VM  "                      \
    59.9 +  lp64_product(bool, UseCompressedClassPointers, false,                     \
   59.10 +            "Use 32-bit class pointers in 64-bit VM  "                      \
   59.11              "lp64_product means flag is always constant in 32 bit VM")      \
   59.12                                                                              \
   59.13    notproduct(bool, CheckCompressedOops, true,                               \
   59.14 @@ -3039,9 +3039,9 @@
   59.15    product(uintx, MaxMetaspaceSize, max_uintx,                               \
   59.16            "Maximum size of Metaspaces (in bytes)")                          \
   59.17                                                                              \
   59.18 -  product(uintx, ClassMetaspaceSize, 1*G,                                   \
   59.19 -          "Maximum size of InstanceKlass area in Metaspace used for "       \
   59.20 -          "UseCompressedKlassPointers")                                     \
   59.21 +  product(uintx, CompressedClassSpaceSize, 1*G,                             \
   59.22 +          "Maximum size of class area in Metaspace when compressed "        \
   59.23 +          "class pointers are used")                                        \
   59.24                                                                              \
   59.25    product(uintx, MinHeapFreeRatio,    40,                                   \
   59.26            "Min percentage of heap free after GC to avoid expansion")        \
    60.1 --- a/src/share/vm/runtime/os.cpp	Wed Sep 18 12:52:15 2013 -0400
    60.2 +++ b/src/share/vm/runtime/os.cpp	Thu Sep 19 09:26:08 2013 +0200
    60.3 @@ -314,6 +314,11 @@
    60.4    }
    60.5  }
    60.6  
    60.7 +void os::init_before_ergo() {
    60.8 +  // We need to initialize large page support here because ergonomics takes some
    60.9 +  // decisions depending on large page support and the calculated large page size.
   60.10 +  large_page_init();
   60.11 +}
   60.12  
   60.13  void os::signal_init() {
   60.14    if (!ReduceSignalUsage) {
    61.1 --- a/src/share/vm/runtime/os.hpp	Wed Sep 18 12:52:15 2013 -0400
    61.2 +++ b/src/share/vm/runtime/os.hpp	Thu Sep 19 09:26:08 2013 +0200
    61.3 @@ -139,7 +139,10 @@
    61.4  
    61.5   public:
    61.6    static void init(void);                      // Called before command line parsing
    61.7 +  static void init_before_ergo(void);          // Called after command line parsing
    61.8 +                                               // before VM ergonomics processing.
    61.9    static jint init_2(void);                    // Called after command line parsing
   61.10 +                                               // and VM ergonomics processing
   61.11    static void init_globals(void) {             // Called from init_globals() in init.cpp
   61.12      init_globals_ext();
   61.13    }
   61.14 @@ -254,6 +257,11 @@
   61.15    static size_t page_size_for_region(size_t region_min_size,
   61.16                                       size_t region_max_size,
   61.17                                       uint min_pages);
   61.18 +  // Return the largest page size that can be used
   61.19 +  static size_t max_page_size() {
   61.20 +    // The _page_sizes array is sorted in descending order.
   61.21 +    return _page_sizes[0];
   61.22 +  }
   61.23  
   61.24    // Methods for tracing page sizes returned by the above method; enabled by
   61.25    // TracePageSizes.  The region_{min,max}_size parameters should be the values
    62.1 --- a/src/share/vm/runtime/thread.cpp	Wed Sep 18 12:52:15 2013 -0400
    62.2 +++ b/src/share/vm/runtime/thread.cpp	Thu Sep 19 09:26:08 2013 +0200
    62.3 @@ -3331,6 +3331,11 @@
    62.4    jint parse_result = Arguments::parse(args);
    62.5    if (parse_result != JNI_OK) return parse_result;
    62.6  
    62.7 +  os::init_before_ergo();
    62.8 +
    62.9 +  jint ergo_result = Arguments::apply_ergo();
   62.10 +  if (ergo_result != JNI_OK) return ergo_result;
   62.11 +
   62.12    if (PauseAtStartup) {
   62.13      os::pause();
   62.14    }
    63.1 --- a/src/share/vm/runtime/virtualspace.cpp	Wed Sep 18 12:52:15 2013 -0400
    63.2 +++ b/src/share/vm/runtime/virtualspace.cpp	Thu Sep 19 09:26:08 2013 +0200
    63.3 @@ -453,6 +453,42 @@
    63.4    return reserved_size() - committed_size();
    63.5  }
    63.6  
    63.7 +size_t VirtualSpace::actual_committed_size() const {
    63.8 +  // Special VirtualSpaces commit all reserved space up front.
    63.9 +  if (special()) {
   63.10 +    return reserved_size();
   63.11 +  }
   63.12 +
   63.13 +  size_t committed_low    = pointer_delta(_lower_high,  _low_boundary,         sizeof(char));
   63.14 +  size_t committed_middle = pointer_delta(_middle_high, _lower_high_boundary,  sizeof(char));
   63.15 +  size_t committed_high   = pointer_delta(_upper_high,  _middle_high_boundary, sizeof(char));
   63.16 +
   63.17 +#ifdef ASSERT
   63.18 +  size_t lower  = pointer_delta(_lower_high_boundary,  _low_boundary,         sizeof(char));
   63.19 +  size_t middle = pointer_delta(_middle_high_boundary, _lower_high_boundary,  sizeof(char));
   63.20 +  size_t upper  = pointer_delta(_upper_high_boundary,  _middle_high_boundary, sizeof(char));
   63.21 +
   63.22 +  if (committed_high > 0) {
   63.23 +    assert(committed_low == lower, "Must be");
   63.24 +    assert(committed_middle == middle, "Must be");
   63.25 +  }
   63.26 +
   63.27 +  if (committed_middle > 0) {
   63.28 +    assert(committed_low == lower, "Must be");
   63.29 +  }
   63.30 +  if (committed_middle < middle) {
   63.31 +    assert(committed_high == 0, "Must be");
   63.32 +  }
   63.33 +
   63.34 +  if (committed_low < lower) {
   63.35 +    assert(committed_high == 0, "Must be");
   63.36 +    assert(committed_middle == 0, "Must be");
   63.37 +  }
   63.38 +#endif
   63.39 +
   63.40 +  return committed_low + committed_middle + committed_high;
   63.41 +}
   63.42 +
   63.43  
   63.44  bool VirtualSpace::contains(const void* p) const {
   63.45    return low() <= (const char*) p && (const char*) p < high();
   63.46 @@ -718,16 +754,19 @@
   63.47    assert(high() <= upper_high(), "upper high");
   63.48  }
   63.49  
   63.50 -void VirtualSpace::print() {
   63.51 -  tty->print   ("Virtual space:");
   63.52 -  if (special()) tty->print(" (pinned in memory)");
   63.53 -  tty->cr();
   63.54 -  tty->print_cr(" - committed: " SIZE_FORMAT, committed_size());
   63.55 -  tty->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
   63.56 -  tty->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
   63.57 -  tty->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
   63.58 +void VirtualSpace::print_on(outputStream* out) {
   63.59 +  out->print   ("Virtual space:");
   63.60 +  if (special()) out->print(" (pinned in memory)");
   63.61 +  out->cr();
   63.62 +  out->print_cr(" - committed: " SIZE_FORMAT, committed_size());
   63.63 +  out->print_cr(" - reserved:  " SIZE_FORMAT, reserved_size());
   63.64 +  out->print_cr(" - [low, high]:     [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low(), high());
   63.65 +  out->print_cr(" - [low_b, high_b]: [" INTPTR_FORMAT ", " INTPTR_FORMAT "]",  low_boundary(), high_boundary());
   63.66  }
   63.67  
   63.68 +void VirtualSpace::print() {
   63.69 +  print_on(tty);
   63.70 +}
   63.71  
   63.72  /////////////// Unit tests ///////////////
   63.73  
   63.74 @@ -910,6 +949,109 @@
   63.75    TestReservedSpace::test_reserved_space();
   63.76  }
   63.77  
   63.78 +#define assert_equals(actual, expected)     \
   63.79 +  assert(actual == expected,                \
   63.80 +    err_msg("Got " SIZE_FORMAT " expected " \
   63.81 +      SIZE_FORMAT, actual, expected));
   63.82 +
   63.83 +#define assert_ge(value1, value2)                  \
   63.84 +  assert(value1 >= value2,                         \
   63.85 +    err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
   63.86 +      #value2 "': " SIZE_FORMAT, value1, value2));
   63.87 +
   63.88 +#define assert_lt(value1, value2)                  \
   63.89 +  assert(value1 < value2,                          \
   63.90 +    err_msg("'" #value1 "': " SIZE_FORMAT " '"     \
   63.91 +      #value2 "': " SIZE_FORMAT, value1, value2));
   63.92 +
   63.93 +
   63.94 +class TestVirtualSpace : AllStatic {
   63.95 + public:
   63.96 +  static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) {
   63.97 +    size_t granularity = os::vm_allocation_granularity();
   63.98 +    size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
   63.99 +
  63.100 +    ReservedSpace reserved(reserve_size_aligned);
  63.101 +
  63.102 +    assert(reserved.is_reserved(), "Must be");
  63.103 +
  63.104 +    VirtualSpace vs;
  63.105 +    bool initialized = vs.initialize(reserved, 0);
  63.106 +    assert(initialized, "Failed to initialize VirtualSpace");
  63.107 +
  63.108 +    vs.expand_by(commit_size, false);
  63.109 +
  63.110 +    if (vs.special()) {
  63.111 +      assert_equals(vs.actual_committed_size(), reserve_size_aligned);
  63.112 +    } else {
  63.113 +      assert_ge(vs.actual_committed_size(), commit_size);
  63.114 +      // Approximate the commit granularity.
  63.115 +      size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size();
  63.116 +      assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
  63.117 +    }
  63.118 +
  63.119 +    reserved.release();
  63.120 +  }
  63.121 +
  63.122 +  static void test_virtual_space_actual_committed_space_one_large_page() {
  63.123 +    if (!UseLargePages) {
  63.124 +      return;
  63.125 +    }
  63.126 +
  63.127 +    size_t large_page_size = os::large_page_size();
  63.128 +
  63.129 +    ReservedSpace reserved(large_page_size, large_page_size, true, false);
  63.130 +
  63.131 +    assert(reserved.is_reserved(), "Must be");
  63.132 +
  63.133 +    VirtualSpace vs;
  63.134 +    bool initialized = vs.initialize(reserved, 0);
  63.135 +    assert(initialized, "Failed to initialize VirtualSpace");
  63.136 +
  63.137 +    vs.expand_by(large_page_size, false);
  63.138 +
  63.139 +    assert_equals(vs.actual_committed_size(), large_page_size);
  63.140 +
  63.141 +    reserved.release();
  63.142 +  }
  63.143 +
  63.144 +  static void test_virtual_space_actual_committed_space() {
  63.145 +    test_virtual_space_actual_committed_space(4 * K, 0);
  63.146 +    test_virtual_space_actual_committed_space(4 * K, 4 * K);
  63.147 +    test_virtual_space_actual_committed_space(8 * K, 0);
  63.148 +    test_virtual_space_actual_committed_space(8 * K, 4 * K);
  63.149 +    test_virtual_space_actual_committed_space(8 * K, 8 * K);
  63.150 +    test_virtual_space_actual_committed_space(12 * K, 0);
  63.151 +    test_virtual_space_actual_committed_space(12 * K, 4 * K);
  63.152 +    test_virtual_space_actual_committed_space(12 * K, 8 * K);
  63.153 +    test_virtual_space_actual_committed_space(12 * K, 12 * K);
  63.154 +    test_virtual_space_actual_committed_space(64 * K, 0);
  63.155 +    test_virtual_space_actual_committed_space(64 * K, 32 * K);
  63.156 +    test_virtual_space_actual_committed_space(64 * K, 64 * K);
  63.157 +    test_virtual_space_actual_committed_space(2 * M, 0);
  63.158 +    test_virtual_space_actual_committed_space(2 * M, 4 * K);
  63.159 +    test_virtual_space_actual_committed_space(2 * M, 64 * K);
  63.160 +    test_virtual_space_actual_committed_space(2 * M, 1 * M);
  63.161 +    test_virtual_space_actual_committed_space(2 * M, 2 * M);
  63.162 +    test_virtual_space_actual_committed_space(10 * M, 0);
  63.163 +    test_virtual_space_actual_committed_space(10 * M, 4 * K);
  63.164 +    test_virtual_space_actual_committed_space(10 * M, 8 * K);
  63.165 +    test_virtual_space_actual_committed_space(10 * M, 1 * M);
  63.166 +    test_virtual_space_actual_committed_space(10 * M, 2 * M);
  63.167 +    test_virtual_space_actual_committed_space(10 * M, 5 * M);
  63.168 +    test_virtual_space_actual_committed_space(10 * M, 10 * M);
  63.169 +  }
  63.170 +
  63.171 +  static void test_virtual_space() {
  63.172 +    test_virtual_space_actual_committed_space();
  63.173 +    test_virtual_space_actual_committed_space_one_large_page();
  63.174 +  }
  63.175 +};
  63.176 +
  63.177 +void TestVirtualSpace_test() {
  63.178 +  TestVirtualSpace::test_virtual_space();
  63.179 +}
  63.180 +
  63.181  #endif // PRODUCT
  63.182  
  63.183  #endif
    64.1 --- a/src/share/vm/runtime/virtualspace.hpp	Wed Sep 18 12:52:15 2013 -0400
    64.2 +++ b/src/share/vm/runtime/virtualspace.hpp	Thu Sep 19 09:26:08 2013 +0200
    64.3 @@ -183,11 +183,16 @@
    64.4    // Destruction
    64.5    ~VirtualSpace();
    64.6  
    64.7 -  // Testers (all sizes are byte sizes)
    64.8 -  size_t committed_size()   const;
    64.9 -  size_t reserved_size()    const;
   64.10 +  // Reserved memory
   64.11 +  size_t reserved_size() const;
   64.12 +  // Actually committed OS memory
   64.13 +  size_t actual_committed_size() const;
   64.14 +  // Memory used/expanded in this virtual space
   64.15 +  size_t committed_size() const;
   64.16 +  // Memory left to use/expand in this virtual space
   64.17    size_t uncommitted_size() const;
   64.18 -  bool   contains(const void* p)  const;
   64.19 +
   64.20 +  bool   contains(const void* p) const;
   64.21  
   64.22    // Operations
   64.23    // returns true on success, false otherwise
   64.24 @@ -198,7 +203,8 @@
   64.25    void check_for_contiguity() PRODUCT_RETURN;
   64.26  
   64.27    // Debugging
   64.28 -  void print() PRODUCT_RETURN;
   64.29 +  void print_on(outputStream* out) PRODUCT_RETURN;
   64.30 +  void print();
   64.31  };
   64.32  
   64.33  #endif // SHARE_VM_RUNTIME_VIRTUALSPACE_HPP
    65.1 --- a/src/share/vm/services/memoryPool.cpp	Wed Sep 18 12:52:15 2013 -0400
    65.2 +++ b/src/share/vm/services/memoryPool.cpp	Thu Sep 19 09:26:08 2013 +0200
    65.3 @@ -260,10 +260,10 @@
    65.4  }
    65.5  
    65.6  MetaspacePool::MetaspacePool() :
    65.7 -  MemoryPool("Metaspace", NonHeap, capacity_in_bytes(), calculate_max_size(), true, false) { }
    65.8 +  MemoryPool("Metaspace", NonHeap, 0, calculate_max_size(), true, false) { }
    65.9  
   65.10  MemoryUsage MetaspacePool::get_memory_usage() {
   65.11 -  size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
   65.12 +  size_t committed = MetaspaceAux::committed_bytes();
   65.13    return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
   65.14  }
   65.15  
   65.16 @@ -271,26 +271,19 @@
   65.17    return MetaspaceAux::allocated_used_bytes();
   65.18  }
   65.19  
   65.20 -size_t MetaspacePool::capacity_in_bytes() const {
   65.21 -  return MetaspaceAux::allocated_capacity_bytes();
   65.22 -}
   65.23 -
   65.24  size_t MetaspacePool::calculate_max_size() const {
   65.25 -  return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize : max_uintx;
   65.26 +  return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize :
   65.27 +                                             MemoryUsage::undefined_size();
   65.28  }
   65.29  
   65.30  CompressedKlassSpacePool::CompressedKlassSpacePool() :
   65.31 -  MemoryPool("Compressed Class Space", NonHeap, capacity_in_bytes(), ClassMetaspaceSize, true, false) { }
   65.32 +  MemoryPool("Compressed Class Space", NonHeap, 0, CompressedClassSpaceSize, true, false) { }
   65.33  
   65.34  size_t CompressedKlassSpacePool::used_in_bytes() {
   65.35    return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType);
   65.36  }
   65.37  
   65.38 -size_t CompressedKlassSpacePool::capacity_in_bytes() const {
   65.39 -  return MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
   65.40 -}
   65.41 -
   65.42  MemoryUsage CompressedKlassSpacePool::get_memory_usage() {
   65.43 -  size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size());
   65.44 +  size_t committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
   65.45    return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size());
   65.46  }
    66.1 --- a/src/share/vm/services/memoryPool.hpp	Wed Sep 18 12:52:15 2013 -0400
    66.2 +++ b/src/share/vm/services/memoryPool.hpp	Thu Sep 19 09:26:08 2013 +0200
    66.3 @@ -224,7 +224,6 @@
    66.4  
    66.5  class MetaspacePool : public MemoryPool {
    66.6    size_t calculate_max_size() const;
    66.7 -  size_t capacity_in_bytes() const;
    66.8   public:
    66.9    MetaspacePool();
   66.10    MemoryUsage get_memory_usage();
   66.11 @@ -232,7 +231,6 @@
   66.12  };
   66.13  
   66.14  class CompressedKlassSpacePool : public MemoryPool {
   66.15 -  size_t capacity_in_bytes() const;
   66.16   public:
   66.17    CompressedKlassSpacePool();
   66.18    MemoryUsage get_memory_usage();
    67.1 --- a/src/share/vm/services/memoryService.cpp	Wed Sep 18 12:52:15 2013 -0400
    67.2 +++ b/src/share/vm/services/memoryService.cpp	Thu Sep 19 09:26:08 2013 +0200
    67.3 @@ -409,7 +409,7 @@
    67.4    mgr->add_pool(_metaspace_pool);
    67.5    _pools_list->append(_metaspace_pool);
    67.6  
    67.7 -  if (UseCompressedKlassPointers) {
    67.8 +  if (UseCompressedClassPointers) {
    67.9      _compressed_class_pool = new CompressedKlassSpacePool();
   67.10      mgr->add_pool(_compressed_class_pool);
   67.11      _pools_list->append(_compressed_class_pool);
    68.1 --- a/src/share/vm/services/memoryUsage.hpp	Wed Sep 18 12:52:15 2013 -0400
    68.2 +++ b/src/share/vm/services/memoryUsage.hpp	Thu Sep 19 09:26:08 2013 +0200
    68.3 @@ -63,10 +63,12 @@
    68.4    size_t committed() const { return _committed; }
    68.5    size_t max_size()  const { return _maxSize; }
    68.6  
    68.7 +  static size_t undefined_size() { return (size_t) -1; }
    68.8 +
    68.9    inline static jlong convert_to_jlong(size_t val) {
   68.10      // In the 64-bit vm, a size_t can overflow a jlong (which is signed).
   68.11      jlong ret;
   68.12 -    if (val == (size_t)-1) {
   68.13 +    if (val == undefined_size()) {
   68.14        ret = -1L;
   68.15      } else {
   68.16        NOT_LP64(ret = val;)
    69.1 --- a/src/share/vm/utilities/bitMap.inline.hpp	Wed Sep 18 12:52:15 2013 -0400
    69.2 +++ b/src/share/vm/utilities/bitMap.inline.hpp	Thu Sep 19 09:26:08 2013 +0200
    69.3 @@ -52,16 +52,16 @@
    69.4  
    69.5  inline bool BitMap::par_set_bit(idx_t bit) {
    69.6    verify_index(bit);
    69.7 -  volatile idx_t* const addr = word_addr(bit);
    69.8 -  const idx_t mask = bit_mask(bit);
    69.9 -  idx_t old_val = *addr;
   69.10 +  volatile bm_word_t* const addr = word_addr(bit);
   69.11 +  const bm_word_t mask = bit_mask(bit);
   69.12 +  bm_word_t old_val = *addr;
   69.13  
   69.14    do {
   69.15 -    const idx_t new_val = old_val | mask;
   69.16 +    const bm_word_t new_val = old_val | mask;
   69.17      if (new_val == old_val) {
   69.18        return false;     // Someone else beat us to it.
   69.19      }
   69.20 -    const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
   69.21 +    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
   69.22                                                        (volatile void*) addr,
   69.23                                                        (void*) old_val);
   69.24      if (cur_val == old_val) {
   69.25 @@ -73,16 +73,16 @@
   69.26  
   69.27  inline bool BitMap::par_clear_bit(idx_t bit) {
   69.28    verify_index(bit);
   69.29 -  volatile idx_t* const addr = word_addr(bit);
   69.30 -  const idx_t mask = ~bit_mask(bit);
   69.31 -  idx_t old_val = *addr;
   69.32 +  volatile bm_word_t* const addr = word_addr(bit);
   69.33 +  const bm_word_t mask = ~bit_mask(bit);
   69.34 +  bm_word_t old_val = *addr;
   69.35  
   69.36    do {
   69.37 -    const idx_t new_val = old_val & mask;
   69.38 +    const bm_word_t new_val = old_val & mask;
   69.39      if (new_val == old_val) {
   69.40        return false;     // Someone else beat us to it.
   69.41      }
   69.42 -    const idx_t cur_val = (idx_t) Atomic::cmpxchg_ptr((void*) new_val,
   69.43 +    const bm_word_t cur_val = (bm_word_t) Atomic::cmpxchg_ptr((void*) new_val,
   69.44                                                        (volatile void*) addr,
   69.45                                                        (void*) old_val);
   69.46      if (cur_val == old_val) {
    70.1 --- a/test/TEST.groups	Wed Sep 18 12:52:15 2013 -0400
    70.2 +++ b/test/TEST.groups	Thu Sep 19 09:26:08 2013 +0200
    70.3 @@ -62,7 +62,7 @@
    70.4  #
    70.5  needs_jdk = \
    70.6    gc/TestG1ZeroPGCTJcmdThreadPrint.java \
    70.7 -  gc/metaspace/ClassMetaspaceSizeInJmapHeap.java \
    70.8 +  gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
    70.9    gc/metaspace/TestMetaspacePerfCounters.java \
   70.10    runtime/6819213/TestBootNativeLibraryPath.java \
   70.11    runtime/6878713/Test6878713.sh \
    71.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    71.2 +++ b/test/gc/TestObjectAlignment.java	Thu Sep 19 09:26:08 2013 +0200
    71.3 @@ -0,0 +1,65 @@
    71.4 +/*
    71.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    71.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    71.7 + *
    71.8 + * This code is free software; you can redistribute it and/or modify it
    71.9 + * under the terms of the GNU General Public License version 2 only, as
   71.10 + * published by the Free Software Foundation.
   71.11 + *
   71.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   71.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   71.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   71.15 + * version 2 for more details (a copy is included in the LICENSE file that
   71.16 + * accompanied this code).
   71.17 + *
   71.18 + * You should have received a copy of the GNU General Public License version
   71.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   71.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   71.21 + *
   71.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   71.23 + * or visit www.oracle.com if you need additional information or have any
   71.24 + * questions.
   71.25 + */
   71.26 +
   71.27 +/**
   71.28 + * @test TestObjectAlignment
   71.29 + * @key gc
   71.30 + * @bug 8021823
   71.31 + * @summary G1: Concurrent marking crashes with -XX:ObjectAlignmentInBytes>=32 in 64bit VMs
   71.32 + * @library /testlibrary
   71.33 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
   71.34 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
   71.35 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
   71.36 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
   71.37 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
   71.38 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:+ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
   71.39 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=8
   71.40 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=16
   71.41 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=32
   71.42 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=64
   71.43 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=128
   71.44 + * @run main/othervm TestObjectAlignment -Xmx20M -XX:-ExplicitGCInvokesConcurrent -XX:+IgnoreUnrecognizedVMOptions -XX:ObjectAlignmentInBytes=256
   71.45 + */
   71.46 +
   71.47 +import com.oracle.java.testlibrary.ProcessTools;
   71.48 +import com.oracle.java.testlibrary.OutputAnalyzer;
   71.49 +
   71.50 +public class TestObjectAlignment {
   71.51 +
   71.52 +  public static byte[] garbage;
   71.53 +
   71.54 +  private static boolean runsOn32bit() {
   71.55 +    return System.getProperty("sun.arch.data.model").equals("32");
   71.56 +  }
   71.57 +
   71.58 +  public static void main(String[] args) throws Exception {
   71.59 +    if (runsOn32bit()) {
   71.60 +      // 32 bit VMs do not allow setting ObjectAlignmentInBytes, so there is nothing to test. We still get called.
   71.61 +      return;
   71.62 +    }
   71.63 +    for (int i = 0; i < 10; i++) {
   71.64 +      garbage = new byte[1000];
   71.65 +      System.gc();
   71.66 +    }
   71.67 +  }
   71.68 +}
    72.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    72.2 +++ b/test/gc/arguments/TestAlignmentToUseLargePages.java	Thu Sep 19 09:26:08 2013 +0200
    72.3 @@ -0,0 +1,47 @@
    72.4 +/*
    72.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    72.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    72.7 + *
    72.8 + * This code is free software; you can redistribute it and/or modify it
    72.9 + * under the terms of the GNU General Public License version 2 only, as
   72.10 + * published by the Free Software Foundation.
   72.11 + *
   72.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   72.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   72.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   72.15 + * version 2 for more details (a copy is included in the LICENSE file that
   72.16 + * accompanied this code).
   72.17 + *
   72.18 + * You should have received a copy of the GNU General Public License version
   72.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   72.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   72.21 + *
   72.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   72.23 + * or visit www.oracle.com if you need additional information or have any
   72.24 + * questions.
   72.25 + */
   72.26 +
   72.27 +/**
   72.28 + * @test TestAlignmentToUseLargePages
   72.29 + * @summary All parallel GC variants may use large pages without the requirement that the
   72.30 + * heap alignment is large page aligned. Other collectors also need to start up with odd sized heaps.
   72.31 + * @bug 8024396
   72.32 + * @key gc
   72.33 + * @key regression
   72.34 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
   72.35 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:-UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
   72.36 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:+UseLargePages TestAlignmentToUseLargePages
   72.37 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseParallelGC -XX:+UseParallelOldGC -XX:-UseLargePages TestAlignmentToUseLargePages
   72.38 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:+UseLargePages TestAlignmentToUseLargePages
   72.39 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseSerialGC -XX:-UseLargePages TestAlignmentToUseLargePages
   72.40 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:+UseLargePages TestAlignmentToUseLargePages
   72.41 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseConcMarkSweepGC -XX:-UseLargePages TestAlignmentToUseLargePages
   72.42 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:+UseLargePages TestAlignmentToUseLargePages
   72.43 + * @run main/othervm -Xms7M -Xmx9M -XX:+UseG1GC -XX:-UseLargePages TestAlignmentToUseLargePages
   72.44 + */
   72.45 +
   72.46 +public class TestAlignmentToUseLargePages {
   72.47 +  public static void main(String args[]) throws Exception {
   72.48 +    // nothing to do
   72.49 +  }
   72.50 +}
    73.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    73.2 +++ b/test/gc/arguments/TestCompressedClassFlags.java	Thu Sep 19 09:26:08 2013 +0200
    73.3 @@ -0,0 +1,49 @@
    73.4 +/*
    73.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    73.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.7 + *
    73.8 + * This code is free software; you can redistribute it and/or modify it
    73.9 + * under the terms of the GNU General Public License version 2 only, as
   73.10 + * published by the Free Software Foundation.
   73.11 + *
   73.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   73.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   73.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   73.15 + * version 2 for more details (a copy is included in the LICENSE file that
   73.16 + * accompanied this code).
   73.17 + *
   73.18 + * You should have received a copy of the GNU General Public License version
   73.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   73.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   73.21 + *
   73.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   73.23 + * or visit www.oracle.com if you need additional information or have any
   73.24 + * questions.
   73.25 + */
   73.26 +
   73.27 +import com.oracle.java.testlibrary.*;
   73.28 +
   73.29 +/*
   73.30 + * @test
   73.31 + * @bug 8015107
   73.32 + * @summary Tests that VM prints a warning when -XX:CompressedClassSpaceSize
   73.33 + *          is used together with -XX:-UseCompressedClassPointers
   73.34 + * @library /testlibrary
   73.35 + */
   73.36 +public class TestCompressedClassFlags {
   73.37 +    public static void main(String[] args) throws Exception {
   73.38 +        if (Platform.is64bit()) {
   73.39 +            OutputAnalyzer output = runJava("-XX:CompressedClassSpaceSize=1g",
   73.40 +                                            "-XX:-UseCompressedClassPointers",
   73.41 +                                            "-version");
   73.42 +            output.shouldContain("warning");
   73.43 +            output.shouldNotContain("error");
   73.44 +            output.shouldHaveExitValue(0);
   73.45 +        }
   73.46 +    }
   73.47 +
   73.48 +    private static OutputAnalyzer runJava(String ... args) throws Exception {
   73.49 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args);
   73.50 +        return new OutputAnalyzer(pb.start());
   73.51 +    }
   73.52 +}
    74.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    74.2 +++ b/test/gc/arguments/TestUseCompressedOopsErgo.java	Thu Sep 19 09:26:08 2013 +0200
    74.3 @@ -0,0 +1,50 @@
    74.4 +/*
    74.5 +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    74.6 +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    74.7 +*
    74.8 +* This code is free software; you can redistribute it and/or modify it
    74.9 +* under the terms of the GNU General Public License version 2 only, as
   74.10 +* published by the Free Software Foundation.
   74.11 +*
   74.12 +* This code is distributed in the hope that it will be useful, but WITHOUT
   74.13 +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   74.14 +* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   74.15 +* version 2 for more details (a copy is included in the LICENSE file that
   74.16 +* accompanied this code).
   74.17 +*
   74.18 +* You should have received a copy of the GNU General Public License version
   74.19 +* 2 along with this work; if not, write to the Free Software Foundation,
   74.20 +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   74.21 +*
   74.22 +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   74.23 +* or visit www.oracle.com if you need additional information or have any
   74.24 +* questions.
   74.25 +*/
   74.26 +
   74.27 +/*
   74.28 + * @test TestUseCompressedOopsErgo
   74.29 + * @key gc
   74.30 + * @bug 8010722
   74.31 + * @summary Tests ergonomics for UseCompressedOops.
   74.32 + * @library /testlibrary /testlibrary/whitebox
   74.33 + * @build TestUseCompressedOopsErgo TestUseCompressedOopsErgoTools
   74.34 + * @run main ClassFileInstaller sun.hotspot.WhiteBox
   74.35 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseG1GC
   74.36 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC
   74.37 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseParallelGC -XX:-UseParallelOldGC
   74.38 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseConcMarkSweepGC
   74.39 + * @run main/othervm TestUseCompressedOopsErgo -XX:+UseSerialGC
   74.40 + */
   74.41 +
   74.42 +public class TestUseCompressedOopsErgo {
   74.43 +
   74.44 +  public static void main(String args[]) throws Exception {
   74.45 +    if (!TestUseCompressedOopsErgoTools.is64bitVM()) {
   74.46 +      // this test is relevant for 64 bit VMs only
   74.47 +      return;
   74.48 +    }
   74.49 +    final String[] gcFlags = args;
   74.50 +    TestUseCompressedOopsErgoTools.checkCompressedOopsErgo(gcFlags);
   74.51 +  }
   74.52 +}
   74.53 +
    75.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    75.2 +++ b/test/gc/arguments/TestUseCompressedOopsErgoTools.java	Thu Sep 19 09:26:08 2013 +0200
    75.3 @@ -0,0 +1,177 @@
    75.4 +/*
    75.5 +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    75.6 +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    75.7 +*
    75.8 +* This code is free software; you can redistribute it and/or modify it
    75.9 +* under the terms of the GNU General Public License version 2 only, as
   75.10 +* published by the Free Software Foundation.
   75.11 +*
   75.12 +* This code is distributed in the hope that it will be useful, but WITHOUT
   75.13 +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   75.14 +* FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   75.15 +* version 2 for more details (a copy is included in the LICENSE file that
   75.16 +* accompanied this code).
   75.17 +*
   75.18 +* You should have received a copy of the GNU General Public License version
   75.19 +* 2 along with this work; if not, write to the Free Software Foundation,
   75.20 +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   75.21 +*
   75.22 +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   75.23 +* or visit www.oracle.com if you need additional information or have any
   75.24 +* questions.
   75.25 +*/
   75.26 +
   75.27 +import sun.management.ManagementFactoryHelper;
   75.28 +import com.sun.management.HotSpotDiagnosticMXBean;
   75.29 +import com.sun.management.VMOption;
   75.30 +
   75.31 +import java.util.regex.Matcher;
   75.32 +import java.util.regex.Pattern;
   75.33 +import java.util.ArrayList;
   75.34 +import java.util.Arrays;
   75.35 +
   75.36 +import com.oracle.java.testlibrary.*;
   75.37 +import sun.hotspot.WhiteBox;
   75.38 +
   75.39 +class DetermineMaxHeapForCompressedOops {
   75.40 +  public static void main(String[] args) throws Exception {
   75.41 +    WhiteBox wb = WhiteBox.getWhiteBox();
   75.42 +    System.out.print(wb.getCompressedOopsMaxHeapSize());
   75.43 +  }
   75.44 +}
   75.45 +
   75.46 +class TestUseCompressedOopsErgoTools {
   75.47 +
   75.48 +  private static long getCompressedClassSpaceSize() {
   75.49 +    HotSpotDiagnosticMXBean diagnostic = ManagementFactoryHelper.getDiagnosticMXBean();
   75.50 +
   75.51 +    VMOption option = diagnostic.getVMOption("CompressedClassSpaceSize");
   75.52 +    return Long.parseLong(option.getValue());
   75.53 +  }
   75.54 +
   75.55 +
   75.56 +  public static long getMaxHeapForCompressedOops(String[] vmargs) throws Exception {
   75.57 +    OutputAnalyzer output = runWhiteBoxTest(vmargs, DetermineMaxHeapForCompressedOops.class.getName(), new String[] {}, false);
   75.58 +    return Long.parseLong(output.getStdout());
   75.59 +  }
   75.60 +
   75.61 +  public static boolean is64bitVM() {
   75.62 +    String val = System.getProperty("sun.arch.data.model");
   75.63 +    if (val == null) {
   75.64 +      throw new RuntimeException("Could not read sun.arch.data.model");
   75.65 +    }
   75.66 +    if (val.equals("64")) {
   75.67 +      return true;
   75.68 +    } else if (val.equals("32")) {
   75.69 +      return false;
   75.70 +    }
   75.71 +    throw new RuntimeException("Unexpected value " + val + " of sun.arch.data.model");
   75.72 +  }
   75.73 +
   75.74 +  /**
   75.75 +   * Executes a new VM process with the given class and parameters.
   75.76 +   * @param vmargs Arguments to the VM to run
   75.77 +   * @param classname Name of the class to run
   75.78 +   * @param arguments Arguments to the class
   75.79 +   * @param useTestDotJavaDotOpts Use test.java.opts as part of the VM argument string
   75.80 +   * @return The OutputAnalyzer with the results for the invocation.
   75.81 +   */
   75.82 +  public static OutputAnalyzer runWhiteBoxTest(String[] vmargs, String classname, String[] arguments, boolean useTestDotJavaDotOpts) throws Exception {
   75.83 +    ArrayList<String> finalargs = new ArrayList<String>();
   75.84 +
   75.85 +    String[] whiteboxOpts = new String[] {
   75.86 +      "-Xbootclasspath/a:.",
   75.87 +      "-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
   75.88 +      "-cp", System.getProperty("java.class.path"),
   75.89 +    };
   75.90 +
   75.91 +    if (useTestDotJavaDotOpts) {
   75.92 +      // System.getProperty("test.java.opts") is '' if no options is set,
   75.93 +      // we need to skip such a result
   75.94 +      String[] externalVMOpts = new String[0];
   75.95 +      if (System.getProperty("test.java.opts") != null && System.getProperty("test.java.opts").length() != 0) {
   75.96 +        externalVMOpts = System.getProperty("test.java.opts").split(" ");
   75.97 +      }
   75.98 +      finalargs.addAll(Arrays.asList(externalVMOpts));
   75.99 +    }
  75.100 +
  75.101 +    finalargs.addAll(Arrays.asList(vmargs));
  75.102 +    finalargs.addAll(Arrays.asList(whiteboxOpts));
  75.103 +    finalargs.add(classname);
  75.104 +    finalargs.addAll(Arrays.asList(arguments));
  75.105 +
  75.106 +    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(finalargs.toArray(new String[0]));
  75.107 +    OutputAnalyzer output = new OutputAnalyzer(pb.start());
  75.108 +    output.shouldHaveExitValue(0);
  75.109 +    return output;
  75.110 +  }
  75.111 +
  75.112 +  private static String[] join(String[] part1, String part2) {
  75.113 +    ArrayList<String> result = new ArrayList<String>();
  75.114 +    result.addAll(Arrays.asList(part1));
  75.115 +    result.add(part2);
  75.116 +    return result.toArray(new String[0]);
  75.117 +  }
  75.118 +
  75.119 +  public static void checkCompressedOopsErgo(String[] gcflags) throws Exception {
  75.120 +    long maxHeapForCompressedOops = getMaxHeapForCompressedOops(gcflags);
  75.121 +
  75.122 +    checkUseCompressedOops(gcflags, maxHeapForCompressedOops, true);
  75.123 +    checkUseCompressedOops(gcflags, maxHeapForCompressedOops - 1, true);
  75.124 +    checkUseCompressedOops(gcflags, maxHeapForCompressedOops + 1, false);
  75.125 +
  75.126 +    // the use of HeapBaseMinAddress should not change the outcome
  75.127 +    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops, true);
  75.128 +    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops - 1, true);
  75.129 +    checkUseCompressedOops(join(gcflags, "-XX:HeapBaseMinAddress=32G"), maxHeapForCompressedOops + 1, false);
  75.130 +
  75.131 +    // use a different object alignment
  75.132 +    maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"));
  75.133 +
  75.134 +    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops, true);
  75.135 +    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops - 1, true);
  75.136 +    checkUseCompressedOops(join(gcflags, "-XX:ObjectAlignmentInBytes=16"), maxHeapForCompressedOops + 1, false);
  75.137 +
  75.138 +    // use a different CompressedClassSpaceSize
  75.139 +    String compressedClassSpaceSizeArg = "-XX:CompressedClassSpaceSize=" + 2 * getCompressedClassSpaceSize();
  75.140 +    maxHeapForCompressedOops = getMaxHeapForCompressedOops(join(gcflags, compressedClassSpaceSizeArg));
  75.141 +
  75.142 +    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops, true);
  75.143 +    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops - 1, true);
  75.144 +    checkUseCompressedOops(join(gcflags, compressedClassSpaceSizeArg), maxHeapForCompressedOops + 1, false);
  75.145 +  }
  75.146 +
  75.147 +  private static void checkUseCompressedOops(String[] args, long heapsize, boolean expectUseCompressedOops) throws Exception {
  75.148 +     ArrayList<String> finalargs = new ArrayList<String>();
  75.149 +     finalargs.addAll(Arrays.asList(args));
  75.150 +     finalargs.add("-Xmx" + heapsize);
  75.151 +     finalargs.add("-XX:+PrintFlagsFinal");
  75.152 +     finalargs.add("-version");
  75.153 +
  75.154 +     String output = expectValid(finalargs.toArray(new String[0]));
  75.155 +
  75.156 +     boolean actualUseCompressedOops = getFlagBoolValue(" UseCompressedOops", output);
  75.157 +
  75.158 +     Asserts.assertEQ(expectUseCompressedOops, actualUseCompressedOops);
  75.159 +  }
  75.160 +
  75.161 +  private static boolean getFlagBoolValue(String flag, String where) {
  75.162 +    Matcher m = Pattern.compile(flag + "\\s+:?= (true|false)").matcher(where);
  75.163 +    if (!m.find()) {
  75.164 +      throw new RuntimeException("Could not find value for flag " + flag + " in output string");
  75.165 +    }
  75.166 +    return m.group(1).equals("true");
  75.167 +  }
  75.168 +
  75.169 +  private static String expect(String[] flags, boolean hasWarning, boolean hasError, int errorcode) throws Exception {
  75.170 +    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(flags);
  75.171 +    OutputAnalyzer output = new OutputAnalyzer(pb.start());
  75.172 +    output.shouldHaveExitValue(errorcode);
  75.173 +    return output.getStdout();
  75.174 +  }
  75.175 +
  75.176 +  private static String expectValid(String[] flags) throws Exception {
  75.177 +    return expect(flags, false, false, 0);
  75.178 +  }
  75.179 +}
  75.180 +
    76.1 --- a/test/gc/metaspace/ClassMetaspaceSizeInJmapHeap.java	Wed Sep 18 12:52:15 2013 -0400
    76.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    76.3 @@ -1,79 +0,0 @@
    76.4 -/*
    76.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    76.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    76.7 - *
    76.8 - * This code is free software; you can redistribute it and/or modify it
    76.9 - * under the terms of the GNU General Public License version 2 only, as
   76.10 - * published by the Free Software Foundation.
   76.11 - *
   76.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   76.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   76.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   76.15 - * version 2 for more details (a copy is included in the LICENSE file that
   76.16 - * accompanied this code).
   76.17 - *
   76.18 - * You should have received a copy of the GNU General Public License version
   76.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   76.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   76.21 - *
   76.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   76.23 - * or visit www.oracle.com if you need additional information or have any
   76.24 - * questions.
   76.25 - */
   76.26 -
   76.27 -/*
   76.28 - * @test ClassMetaspaceSizeInJmapHeap
   76.29 - * @bug 8004924
   76.30 - * @summary Checks that jmap -heap contains the flag ClassMetaspaceSize
   76.31 - * @library /testlibrary
   76.32 - * @run main/othervm -XX:ClassMetaspaceSize=50m ClassMetaspaceSizeInJmapHeap
   76.33 - */
   76.34 -
   76.35 -import com.oracle.java.testlibrary.*;
   76.36 -import java.nio.file.*;
   76.37 -import java.io.File;
   76.38 -import java.nio.charset.Charset;
   76.39 -import java.util.List;
   76.40 -
   76.41 -public class ClassMetaspaceSizeInJmapHeap {
   76.42 -    public static void main(String[] args) throws Exception {
   76.43 -        String pid = Integer.toString(ProcessTools.getProcessId());
   76.44 -
   76.45 -        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
   76.46 -                                              .addToolArg("-heap")
   76.47 -                                              .addToolArg(pid);
   76.48 -        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
   76.49 -
   76.50 -        File out = new File("ClassMetaspaceSizeInJmapHeap.stdout.txt");
   76.51 -        pb.redirectOutput(out);
   76.52 -
   76.53 -        File err = new File("ClassMetaspaceSizeInJmapHeap.stderr.txt");
   76.54 -        pb.redirectError(err);
   76.55 -
   76.56 -        run(pb);
   76.57 -
   76.58 -        OutputAnalyzer output = new OutputAnalyzer(read(out));
   76.59 -        output.shouldContain("ClassMetaspaceSize = 52428800 (50.0MB)");
   76.60 -        out.delete();
   76.61 -    }
   76.62 -
   76.63 -    private static void run(ProcessBuilder pb) throws Exception {
   76.64 -        Process p = pb.start();
   76.65 -        p.waitFor();
   76.66 -        int exitValue = p.exitValue();
   76.67 -        if (exitValue != 0) {
   76.68 -            throw new Exception("jmap -heap exited with error code: " + exitValue);
   76.69 -        }
   76.70 -    }
   76.71 -
   76.72 -    private static String read(File f) throws Exception {
   76.73 -        Path p = f.toPath();
   76.74 -        List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
   76.75 -
   76.76 -        StringBuilder sb = new StringBuilder();
   76.77 -        for (String line : lines) {
   76.78 -            sb.append(line).append('\n');
   76.79 -        }
   76.80 -        return sb.toString();
   76.81 -    }
   76.82 -}
    77.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    77.2 +++ b/test/gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java	Thu Sep 19 09:26:08 2013 +0200
    77.3 @@ -0,0 +1,79 @@
    77.4 +/*
    77.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    77.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    77.7 + *
    77.8 + * This code is free software; you can redistribute it and/or modify it
    77.9 + * under the terms of the GNU General Public License version 2 only, as
   77.10 + * published by the Free Software Foundation.
   77.11 + *
   77.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   77.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   77.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   77.15 + * version 2 for more details (a copy is included in the LICENSE file that
   77.16 + * accompanied this code).
   77.17 + *
   77.18 + * You should have received a copy of the GNU General Public License version
   77.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   77.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   77.21 + *
   77.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   77.23 + * or visit www.oracle.com if you need additional information or have any
   77.24 + * questions.
   77.25 + */
   77.26 +
   77.27 +/*
   77.28 + * @test CompressedClassSpaceSizeInJmapHeap
   77.29 + * @bug 8004924
   77.30 + * @summary Checks that jmap -heap contains the flag CompressedClassSpaceSize
   77.31 + * @library /testlibrary
   77.32 + * @run main/othervm -XX:CompressedClassSpaceSize=50m CompressedClassSpaceSizeInJmapHeap
   77.33 + */
   77.34 +
   77.35 +import com.oracle.java.testlibrary.*;
   77.36 +import java.nio.file.*;
   77.37 +import java.io.File;
   77.38 +import java.nio.charset.Charset;
   77.39 +import java.util.List;
   77.40 +
   77.41 +public class CompressedClassSpaceSizeInJmapHeap {
   77.42 +    public static void main(String[] args) throws Exception {
   77.43 +        String pid = Integer.toString(ProcessTools.getProcessId());
   77.44 +
   77.45 +        JDKToolLauncher jmap = JDKToolLauncher.create("jmap")
   77.46 +                                              .addToolArg("-heap")
   77.47 +                                              .addToolArg(pid);
   77.48 +        ProcessBuilder pb = new ProcessBuilder(jmap.getCommand());
   77.49 +
   77.50 +        File out = new File("CompressedClassSpaceSizeInJmapHeap.stdout.txt");
   77.51 +        pb.redirectOutput(out);
   77.52 +
   77.53 +        File err = new File("CompressedClassSpaceSizeInJmapHeap.stderr.txt");
   77.54 +        pb.redirectError(err);
   77.55 +
   77.56 +        run(pb);
   77.57 +
   77.58 +        OutputAnalyzer output = new OutputAnalyzer(read(out));
   77.59 +        output.shouldContain("CompressedClassSpaceSize = 52428800 (50.0MB)");
   77.60 +        out.delete();
   77.61 +    }
   77.62 +
   77.63 +    private static void run(ProcessBuilder pb) throws Exception {
   77.64 +        Process p = pb.start();
   77.65 +        p.waitFor();
   77.66 +        int exitValue = p.exitValue();
   77.67 +        if (exitValue != 0) {
   77.68 +            throw new Exception("jmap -heap exited with error code: " + exitValue);
   77.69 +        }
   77.70 +    }
   77.71 +
   77.72 +    private static String read(File f) throws Exception {
   77.73 +        Path p = f.toPath();
   77.74 +        List<String> lines = Files.readAllLines(p, Charset.defaultCharset());
   77.75 +
   77.76 +        StringBuilder sb = new StringBuilder();
   77.77 +        for (String line : lines) {
   77.78 +            sb.append(line).append('\n');
   77.79 +        }
   77.80 +        return sb.toString();
   77.81 +    }
   77.82 +}
    78.1 --- a/test/gc/metaspace/TestMetaspaceMemoryPool.java	Wed Sep 18 12:52:15 2013 -0400
    78.2 +++ b/test/gc/metaspace/TestMetaspaceMemoryPool.java	Thu Sep 19 09:26:08 2013 +0200
    78.3 @@ -22,55 +22,35 @@
    78.4   */
    78.5  
    78.6  import java.util.List;
    78.7 -import java.lang.management.ManagementFactory;
    78.8 -import java.lang.management.MemoryManagerMXBean;
    78.9 -import java.lang.management.MemoryPoolMXBean;
   78.10 -import java.lang.management.MemoryUsage;
   78.11 -
   78.12 -import java.lang.management.RuntimeMXBean;
   78.13 -import java.lang.management.ManagementFactory;
   78.14 +import java.lang.management.*;
   78.15 +import com.oracle.java.testlibrary.*;
   78.16 +import static com.oracle.java.testlibrary.Asserts.*;
   78.17  
   78.18  /* @test TestMetaspaceMemoryPool
   78.19   * @bug 8000754
   78.20   * @summary Tests that a MemoryPoolMXBeans is created for metaspace and that a
   78.21   *          MemoryManagerMXBean is created.
   78.22 + * @library /testlibrary
   78.23   * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops TestMetaspaceMemoryPool
   78.24   * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:MaxMetaspaceSize=60m TestMetaspaceMemoryPool
   78.25 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers TestMetaspaceMemoryPool
   78.26 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:ClassMetaspaceSize=60m TestMetaspaceMemoryPool
   78.27 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers TestMetaspaceMemoryPool
   78.28 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:CompressedClassSpaceSize=60m TestMetaspaceMemoryPool
   78.29   */
   78.30  public class TestMetaspaceMemoryPool {
   78.31      public static void main(String[] args) {
   78.32          verifyThatMetaspaceMemoryManagerExists();
   78.33 -        verifyMemoryPool(getMemoryPool("Metaspace"), isFlagDefined("MaxMetaspaceSize"));
   78.34  
   78.35 -        if (runsOn64bit()) {
   78.36 -            if (usesCompressedOops()) {
   78.37 +        boolean isMetaspaceMaxDefined = InputArguments.containsPrefix("-XX:MaxMetaspaceSize");
   78.38 +        verifyMemoryPool(getMemoryPool("Metaspace"), isMetaspaceMaxDefined);
   78.39 +
   78.40 +        if (Platform.is64bit()) {
   78.41 +            if (InputArguments.contains("-XX:+UseCompressedOops")) {
   78.42                  MemoryPoolMXBean cksPool = getMemoryPool("Compressed Class Space");
   78.43                  verifyMemoryPool(cksPool, true);
   78.44              }
   78.45          }
   78.46      }
   78.47  
   78.48 -    private static boolean runsOn64bit() {
   78.49 -        return !System.getProperty("sun.arch.data.model").equals("32");
   78.50 -    }
   78.51 -
   78.52 -    private static boolean usesCompressedOops() {
   78.53 -        return isFlagDefined("+UseCompressedOops");
   78.54 -    }
   78.55 -
   78.56 -    private static boolean isFlagDefined(String name) {
   78.57 -        RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean();
   78.58 -        List<String> args = runtimeMxBean.getInputArguments();
   78.59 -        for (String arg : args) {
   78.60 -            if (arg.startsWith("-XX:" + name)) {
   78.61 -                return true;
   78.62 -            }
   78.63 -        }
   78.64 -        return false;
   78.65 -    }
   78.66 -
   78.67      private static void verifyThatMetaspaceMemoryManagerExists() {
   78.68          List<MemoryManagerMXBean> managers = ManagementFactory.getMemoryManagerMXBeans();
   78.69          for (MemoryManagerMXBean manager : managers) {
   78.70 @@ -95,32 +75,19 @@
   78.71  
   78.72      private static void verifyMemoryPool(MemoryPoolMXBean pool, boolean isMaxDefined) {
   78.73          MemoryUsage mu = pool.getUsage();
   78.74 -        assertDefined(mu.getInit(), "init");
   78.75 -        assertDefined(mu.getUsed(), "used");
   78.76 -        assertDefined(mu.getCommitted(), "committed");
   78.77 +        long init = mu.getInit();
   78.78 +        long used = mu.getUsed();
   78.79 +        long committed = mu.getCommitted();
   78.80 +        long max = mu.getMax();
   78.81 +
   78.82 +        assertGTE(init, 0L);
   78.83 +        assertGTE(used, init);
   78.84 +        assertGTE(committed, used);
   78.85  
   78.86          if (isMaxDefined) {
   78.87 -            assertDefined(mu.getMax(), "max");
   78.88 +            assertGTE(max, committed);
   78.89          } else {
   78.90 -            assertUndefined(mu.getMax(), "max");
   78.91 -        }
   78.92 -    }
   78.93 -
   78.94 -    private static void assertDefined(long value, String name) {
   78.95 -        assertTrue(value != -1, "Expected " + name + " to be defined");
   78.96 -    }
   78.97 -
   78.98 -    private static void assertUndefined(long value, String name) {
   78.99 -        assertEquals(value, -1, "Expected " + name + " to be undefined");
  78.100 -    }
  78.101 -
  78.102 -    private static void assertEquals(long actual, long expected, String msg) {
  78.103 -        assertTrue(actual == expected, msg);
  78.104 -    }
  78.105 -
  78.106 -    private static void assertTrue(boolean condition, String msg) {
  78.107 -        if (!condition) {
  78.108 -            throw new RuntimeException(msg);
  78.109 +            assertEQ(max, -1L);
  78.110          }
  78.111      }
  78.112  }
    79.1 --- a/test/gc/metaspace/TestMetaspacePerfCounters.java	Wed Sep 18 12:52:15 2013 -0400
    79.2 +++ b/test/gc/metaspace/TestMetaspacePerfCounters.java	Thu Sep 19 09:26:08 2013 +0200
    79.3 @@ -33,13 +33,13 @@
    79.4   * @summary Tests that performance counters for metaspace and compressed class
    79.5   *          space exists and works.
    79.6   *
    79.7 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
    79.8 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
    79.9 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
   79.10 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
   79.11 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
   79.12 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
   79.13   *
   79.14 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
   79.15 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
   79.16 - * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
   79.17 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseSerialGC TestMetaspacePerfCounters
   79.18 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseParallelGC -XX:+UseParallelOldGC TestMetaspacePerfCounters
   79.19 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedClassPointers -XX:+UsePerfData -XX:+UseG1GC TestMetaspacePerfCounters
   79.20   */
   79.21  public class TestMetaspacePerfCounters {
   79.22      public static Class fooClass = null;
   79.23 @@ -61,10 +61,15 @@
   79.24      }
   79.25  
   79.26      private static void checkPerfCounters(String ns) throws Exception {
   79.27 -        for (PerfCounter counter : countersInNamespace(ns)) {
   79.28 -            String msg = "Expected " + counter.getName() + " to be larger than 0";
   79.29 -            assertGT(counter.longValue(), 0L, msg);
   79.30 -        }
   79.31 +        long minCapacity = getMinCapacity(ns);
   79.32 +        long maxCapacity = getMaxCapacity(ns);
   79.33 +        long capacity = getCapacity(ns);
   79.34 +        long used = getUsed(ns);
   79.35 +
   79.36 +        assertGTE(minCapacity, 0L);
   79.37 +        assertGTE(used, minCapacity);
   79.38 +        assertGTE(capacity, used);
   79.39 +        assertGTE(maxCapacity, capacity);
   79.40      }
   79.41  
   79.42      private static void checkEmptyPerfCounters(String ns) throws Exception {
   79.43 @@ -75,12 +80,10 @@
   79.44      }
   79.45  
   79.46      private static void checkUsedIncreasesWhenLoadingClass(String ns) throws Exception {
   79.47 -        PerfCounter used = PerfCounters.findByName(ns + ".used");
   79.48 -
   79.49 -        long before = used.longValue();
   79.50 +        long before = getUsed(ns);
   79.51          fooClass = compileAndLoad("Foo", "public class Foo { }");
   79.52          System.gc();
   79.53 -        long after = used.longValue();
   79.54 +        long after = getUsed(ns);
   79.55  
   79.56          assertGT(after, before);
   79.57      }
   79.58 @@ -99,6 +102,22 @@
   79.59      }
   79.60  
   79.61      private static boolean isUsingCompressedClassPointers() {
   79.62 -        return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedKlassPointers");
   79.63 +        return Platform.is64bit() && InputArguments.contains("-XX:+UseCompressedClassPointers");
   79.64 +    }
   79.65 +
   79.66 +    private static long getMinCapacity(String ns) throws Exception {
   79.67 +        return PerfCounters.findByName(ns + ".minCapacity").longValue();
   79.68 +    }
   79.69 +
   79.70 +    private static long getCapacity(String ns) throws Exception {
   79.71 +        return PerfCounters.findByName(ns + ".capacity").longValue();
   79.72 +    }
   79.73 +
   79.74 +    private static long getMaxCapacity(String ns) throws Exception {
   79.75 +        return PerfCounters.findByName(ns + ".maxCapacity").longValue();
   79.76 +    }
   79.77 +
   79.78 +    private static long getUsed(String ns) throws Exception {
   79.79 +        return PerfCounters.findByName(ns + ".used").longValue();
   79.80      }
   79.81  }
    80.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    80.2 +++ b/test/gc/metaspace/TestMetaspaceSizeFlags.java	Thu Sep 19 09:26:08 2013 +0200
    80.3 @@ -0,0 +1,108 @@
    80.4 +/*
    80.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    80.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    80.7 + *
    80.8 + * This code is free software; you can redistribute it and/or modify it
    80.9 + * under the terms of the GNU General Public License version 2 only, as
   80.10 + * published by the Free Software Foundation.
   80.11 + *
   80.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   80.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   80.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   80.15 + * version 2 for more details (a copy is included in the LICENSE file that
   80.16 + * accompanied this code).
   80.17 + *
   80.18 + * You should have received a copy of the GNU General Public License version
   80.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   80.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   80.21 + *
   80.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   80.23 + * or visit www.oracle.com if you need additional information or have any
   80.24 + * questions.
   80.25 + */
   80.26 +
   80.27 +import com.oracle.java.testlibrary.Asserts;
   80.28 +import com.oracle.java.testlibrary.OutputAnalyzer;
   80.29 +import com.oracle.java.testlibrary.ProcessTools;
   80.30 +
   80.31 +/*
   80.32 + * @test TestMetaspaceSizeFlags
   80.33 + * @key gc
   80.34 + * @bug 8024650
   80.35 + * @summary Test that metaspace size flags can be set correctly
   80.36 + * @library /testlibrary
   80.37 + */
   80.38 +public class TestMetaspaceSizeFlags {
   80.39 +  public static final long K = 1024L;
   80.40 +  public static final long M = 1024L * K;
   80.41 +
   80.42 +  // HotSpot uses a number of different values to align memory size flags.
   80.43 +  // This is currently the largest alignment (unless huge large pages are used).
   80.44 +  public static final long MAX_ALIGNMENT = 32 * M;
   80.45 +
   80.46 +  public static void main(String [] args) throws Exception {
   80.47 +    testMaxMetaspaceSizeEQMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT);
   80.48 +    // 8024650: MaxMetaspaceSize was adjusted instead of MetaspaceSize.
   80.49 +    testMaxMetaspaceSizeLTMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT * 2);
   80.50 +    testMaxMetaspaceSizeGTMetaspaceSize(MAX_ALIGNMENT * 2, MAX_ALIGNMENT);
   80.51 +    testTooSmallInitialMetaspace(0, 0);
   80.52 +    testTooSmallInitialMetaspace(0, MAX_ALIGNMENT);
   80.53 +    testTooSmallInitialMetaspace(MAX_ALIGNMENT, 0);
   80.54 +  }
   80.55 +
   80.56 +  private static void testMaxMetaspaceSizeEQMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
   80.57 +    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
   80.58 +    Asserts.assertEQ(maxMetaspaceSize, metaspaceSize);
   80.59 +    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
   80.60 +    Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
   80.61 +  }
   80.62 +
   80.63 +  private static void testMaxMetaspaceSizeLTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
   80.64 +    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
   80.65 +    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
   80.66 +    Asserts.assertEQ(mf.metaspaceSize, maxMetaspaceSize);
   80.67 +  }
   80.68 +
   80.69 +  private static void testMaxMetaspaceSizeGTMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
   80.70 +    MetaspaceFlags mf = runAndGetValue(maxMetaspaceSize, metaspaceSize);
   80.71 +    Asserts.assertGT(maxMetaspaceSize, metaspaceSize);
   80.72 +    Asserts.assertGT(mf.maxMetaspaceSize, mf.metaspaceSize);
   80.73 +    Asserts.assertEQ(mf.maxMetaspaceSize, maxMetaspaceSize);
   80.74 +    Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
   80.75 +  }
   80.76 +
   80.77 +  private static void testTooSmallInitialMetaspace(long maxMetaspaceSize, long metaspaceSize) throws Exception {
   80.78 +    OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
   80.79 +    output.shouldContain("Too small initial Metaspace size");
   80.80 +  }
   80.81 +
   80.82 +  private static MetaspaceFlags runAndGetValue(long maxMetaspaceSize, long metaspaceSize) throws Exception {
   80.83 +    OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
   80.84 +    output.shouldNotMatch("Error occurred during initialization of VM\n.*");
   80.85 +
   80.86 +    String stringMaxMetaspaceSize = output.firstMatch(".* MaxMetaspaceSize .* := (\\d+).*", 1);
   80.87 +    String stringMetaspaceSize = output.firstMatch(".* MetaspaceSize .* := (\\d+).*", 1);
   80.88 +
   80.89 +    return new MetaspaceFlags(Long.parseLong(stringMaxMetaspaceSize),
   80.90 +                              Long.parseLong(stringMetaspaceSize));
   80.91 +  }
   80.92 +
   80.93 +  private static OutputAnalyzer run(long maxMetaspaceSize, long metaspaceSize) throws Exception {
   80.94 +    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
   80.95 +        "-XX:MaxMetaspaceSize=" + maxMetaspaceSize,
   80.96 +        "-XX:MetaspaceSize=" + metaspaceSize,
   80.97 +        "-XX:-UseLargePages", // Prevent us from using 2GB large pages on solaris + sparc.
   80.98 +        "-XX:+PrintFlagsFinal",
   80.99 +        "-version");
  80.100 +    return new OutputAnalyzer(pb.start());
  80.101 +  }
  80.102 +
  80.103 +  private static class MetaspaceFlags {
  80.104 +    public long maxMetaspaceSize;
  80.105 +    public long metaspaceSize;
  80.106 +    public MetaspaceFlags(long maxMetaspaceSize, long metaspaceSize) {
  80.107 +      this.maxMetaspaceSize = maxMetaspaceSize;
  80.108 +      this.metaspaceSize = metaspaceSize;
  80.109 +    }
  80.110 +  }
  80.111 +}
    81.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    81.2 +++ b/test/gc/metaspace/TestPerfCountersAndMemoryPools.java	Thu Sep 19 09:26:08 2013 +0200
    81.3 @@ -0,0 +1,86 @@
    81.4 +/*
    81.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    81.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    81.7 + *
    81.8 + * This code is free software; you can redistribute it and/or modify it
    81.9 + * under the terms of the GNU General Public License version 2 only, as
   81.10 + * published by the Free Software Foundation.
   81.11 + *
   81.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   81.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   81.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   81.15 + * version 2 for more details (a copy is included in the LICENSE file that
   81.16 + * accompanied this code).
   81.17 + *
   81.18 + * You should have received a copy of the GNU General Public License version
   81.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   81.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   81.21 + *
   81.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   81.23 + * or visit www.oracle.com if you need additional information or have any
   81.24 + * questions.
   81.25 + */
   81.26 +
   81.27 +import java.util.List;
   81.28 +import java.lang.management.*;
   81.29 +
   81.30 +import com.oracle.java.testlibrary.*;
   81.31 +import static com.oracle.java.testlibrary.Asserts.*;
   81.32 +
   81.33 +/* @test TestPerfCountersAndMemoryPools
   81.34 + * @bug 8023476
   81.35 + * @summary Tests that a MemoryPoolMXBeans and PerfCounters for metaspace
   81.36 + *          report the same data.
   81.37 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:-UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools
   81.38 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:+UseSerialGC -XX:+UsePerfData TestPerfCountersAndMemoryPools
   81.39 + */
   81.40 +public class TestPerfCountersAndMemoryPools {
   81.41 +    public static void main(String[] args) throws Exception {
   81.42 +        checkMemoryUsage("Metaspace", "sun.gc.metaspace");
   81.43 +
   81.44 +        if (InputArguments.contains("-XX:+UseCompressedKlassPointers") && Platform.is64bit()) {
   81.45 +            checkMemoryUsage("Compressed Class Space", "sun.gc.compressedclassspace");
   81.46 +        }
   81.47 +    }
   81.48 +
   81.49 +    private static MemoryUsage getMemoryUsage(String memoryPoolName) {
   81.50 +        List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans();
   81.51 +        for (MemoryPoolMXBean pool : pools) {
   81.52 +            if (pool.getName().equals(memoryPoolName)) {
   81.53 +                return pool.getUsage();
   81.54 +            }
   81.55 +        }
   81.56 +
   81.57 +        throw new RuntimeException("Excpted to find a memory pool with name " +
   81.58 +                                   memoryPoolName);
   81.59 +    }
   81.60 +
   81.61 +    private static void checkMemoryUsage(String memoryPoolName, String perfNS)
   81.62 +        throws Exception {
   81.63 +        // Need to do a gc before each comparison to update the perf counters
   81.64 +
   81.65 +        System.gc();
   81.66 +        MemoryUsage mu = getMemoryUsage(memoryPoolName);
   81.67 +        assertEQ(getMinCapacity(perfNS), mu.getInit());
   81.68 +
   81.69 +        System.gc();
   81.70 +        mu = getMemoryUsage(memoryPoolName);
   81.71 +        assertEQ(getUsed(perfNS), mu.getUsed());
   81.72 +
   81.73 +        System.gc();
   81.74 +        mu = getMemoryUsage(memoryPoolName);
   81.75 +        assertEQ(getCapacity(perfNS), mu.getCommitted());
   81.76 +    }
   81.77 +
   81.78 +    private static long getMinCapacity(String ns) throws Exception {
   81.79 +        return PerfCounters.findByName(ns + ".minCapacity").longValue();
   81.80 +    }
   81.81 +
   81.82 +    private static long getCapacity(String ns) throws Exception {
   81.83 +        return PerfCounters.findByName(ns + ".capacity").longValue();
   81.84 +    }
   81.85 +
   81.86 +    private static long getUsed(String ns) throws Exception {
   81.87 +        return PerfCounters.findByName(ns + ".used").longValue();
   81.88 +    }
   81.89 +}
    82.1 --- a/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Wed Sep 18 12:52:15 2013 -0400
    82.2 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrs.java	Thu Sep 19 09:26:08 2013 +0200
    82.3 @@ -24,7 +24,7 @@
    82.4  /*
    82.5   * @test
    82.6   * @bug 8003424
    82.7 - * @summary Testing UseCompressedKlassPointers with CDS
    82.8 + * @summary Testing UseCompressedClassPointers with CDS
    82.9   * @library /testlibrary
   82.10   * @run main CDSCompressedKPtrs
   82.11   */
   82.12 @@ -36,7 +36,7 @@
   82.13      ProcessBuilder pb;
   82.14      if (Platform.is64bit()) {
   82.15        pb = ProcessTools.createJavaProcessBuilder(
   82.16 -        "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
   82.17 +        "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
   82.18          "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
   82.19        OutputAnalyzer output = new OutputAnalyzer(pb.start());
   82.20        try {
   82.21 @@ -44,7 +44,7 @@
   82.22          output.shouldHaveExitValue(0);
   82.23  
   82.24          pb = ProcessTools.createJavaProcessBuilder(
   82.25 -          "-XX:+UseCompressedKlassPointers", "-XX:+UseCompressedOops",
   82.26 +          "-XX:+UseCompressedClassPointers", "-XX:+UseCompressedOops",
   82.27            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
   82.28          output = new OutputAnalyzer(pb.start());
   82.29          output.shouldContain("sharing");
    83.1 --- a/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java	Wed Sep 18 12:52:15 2013 -0400
    83.2 +++ b/test/runtime/CDSCompressedKPtrs/CDSCompressedKPtrsError.java	Thu Sep 19 09:26:08 2013 +0200
    83.3 @@ -24,7 +24,7 @@
    83.4  /*
    83.5   * @test
    83.6   * @bug 8003424
    83.7 - * @summary Test that cannot use CDS if UseCompressedKlassPointers is turned off.
    83.8 + * @summary Test that cannot use CDS if UseCompressedClassPointers is turned off.
    83.9   * @library /testlibrary
   83.10   * @run main CDSCompressedKPtrsError
   83.11   */
   83.12 @@ -36,7 +36,7 @@
   83.13      ProcessBuilder pb;
   83.14      if (Platform.is64bit()) {
   83.15        pb = ProcessTools.createJavaProcessBuilder(
   83.16 -        "-XX:+UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
   83.17 +        "-XX:+UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
   83.18          "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
   83.19        OutputAnalyzer output = new OutputAnalyzer(pb.start());
   83.20        try {
   83.21 @@ -44,21 +44,21 @@
   83.22          output.shouldHaveExitValue(0);
   83.23  
   83.24          pb = ProcessTools.createJavaProcessBuilder(
   83.25 -          "-XX:-UseCompressedKlassPointers", "-XX:-UseCompressedOops",
   83.26 +          "-XX:-UseCompressedClassPointers", "-XX:-UseCompressedOops",
   83.27            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
   83.28          output = new OutputAnalyzer(pb.start());
   83.29          output.shouldContain("Unable to use shared archive");
   83.30          output.shouldHaveExitValue(0);
   83.31  
   83.32          pb = ProcessTools.createJavaProcessBuilder(
   83.33 -          "-XX:-UseCompressedKlassPointers", "-XX:+UseCompressedOops",
   83.34 +          "-XX:-UseCompressedClassPointers", "-XX:+UseCompressedOops",
   83.35            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
   83.36          output = new OutputAnalyzer(pb.start());
   83.37          output.shouldContain("Unable to use shared archive");
   83.38          output.shouldHaveExitValue(0);
   83.39  
   83.40          pb = ProcessTools.createJavaProcessBuilder(
   83.41 -          "-XX:+UseCompressedKlassPointers", "-XX:-UseCompressedOops",
   83.42 +          "-XX:+UseCompressedClassPointers", "-XX:-UseCompressedOops",
   83.43            "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:on", "-version");
   83.44          output = new OutputAnalyzer(pb.start());
   83.45          output.shouldContain("Unable to use shared archive");
   83.46 @@ -71,19 +71,19 @@
   83.47  
   83.48        // Test bad options with -Xshare:dump.
   83.49        pb = ProcessTools.createJavaProcessBuilder(
   83.50 -        "-XX:-UseCompressedOops", "-XX:+UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
   83.51 +        "-XX:-UseCompressedOops", "-XX:+UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
   83.52          "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
   83.53        output = new OutputAnalyzer(pb.start());
   83.54        output.shouldContain("Cannot dump shared archive");
   83.55  
   83.56        pb = ProcessTools.createJavaProcessBuilder(
   83.57 -        "-XX:+UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
   83.58 +        "-XX:+UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
   83.59          "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
   83.60        output = new OutputAnalyzer(pb.start());
   83.61        output.shouldContain("Cannot dump shared archive");
   83.62  
   83.63        pb = ProcessTools.createJavaProcessBuilder(
   83.64 -        "-XX:-UseCompressedOops", "-XX:-UseCompressedKlassPointers", "-XX:+UnlockDiagnosticVMOptions",
   83.65 +        "-XX:-UseCompressedOops", "-XX:-UseCompressedClassPointers", "-XX:+UnlockDiagnosticVMOptions",
   83.66          "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
   83.67        output = new OutputAnalyzer(pb.start());
   83.68        output.shouldContain("Cannot dump shared archive");
    84.1 --- a/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java	Wed Sep 18 12:52:15 2013 -0400
    84.2 +++ b/test/runtime/CompressedOops/CompressedKlassPointerAndOops.java	Thu Sep 19 09:26:08 2013 +0200
    84.3 @@ -25,7 +25,7 @@
    84.4   * @test
    84.5   * @bug 8000968
    84.6   * @key regression
    84.7 - * @summary NPG: UseCompressedKlassPointers asserts with ObjectAlignmentInBytes=32
    84.8 + * @summary NPG: UseCompressedClassPointers asserts with ObjectAlignmentInBytes=32
    84.9   * @library /testlibrary
   84.10   */
   84.11  
   84.12 @@ -52,7 +52,7 @@
   84.13          OutputAnalyzer output;
   84.14  
   84.15          pb = ProcessTools.createJavaProcessBuilder(
   84.16 -            "-XX:+UseCompressedKlassPointers",
   84.17 +            "-XX:+UseCompressedClassPointers",
   84.18              "-XX:+UseCompressedOops",
   84.19              "-XX:ObjectAlignmentInBytes=" + alignment,
   84.20              "-version");
    85.1 --- a/test/testlibrary/OutputAnalyzerTest.java	Wed Sep 18 12:52:15 2013 -0400
    85.2 +++ b/test/testlibrary/OutputAnalyzerTest.java	Thu Sep 19 09:26:08 2013 +0200
    85.3 @@ -172,5 +172,22 @@
    85.4      } catch (RuntimeException e) {
    85.5          // expected
    85.6      }
    85.7 +
    85.8 +    {
    85.9 +      String aaaa = "aaaa";
   85.10 +      String result = output.firstMatch(aaaa);
   85.11 +      if (!aaaa.equals(result)) {
   85.12 +        throw new Exception("firstMatch(String) faild to match. Expected: " + aaaa + " got: " + result);
   85.13 +      }
   85.14 +    }
   85.15 +
   85.16 +    {
   85.17 +      String aa = "aa";
   85.18 +      String aa_grouped_aa = aa + "(" + aa + ")";
   85.19 +      String result = output.firstMatch(aa_grouped_aa, 1);
   85.20 +      if (!aa.equals(result)) {
   85.21 +        throw new Exception("firstMatch(String, int) failed to match. Expected: " + aa + " got: " + result);
   85.22 +      }
   85.23 +    }
   85.24    }
   85.25  }
    86.1 --- a/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java	Wed Sep 18 12:52:15 2013 -0400
    86.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/InputArguments.java	Thu Sep 19 09:26:08 2013 +0200
    86.3 @@ -41,6 +41,9 @@
    86.4      /**
    86.5       * Returns true if {@code arg} is an input argument to the VM.
    86.6       *
    86.7 +     * This is useful for checking boolean flags such as -XX:+UseSerialGC or
    86.8 +     * -XX:-UsePerfData.
    86.9 +     *
   86.10       * @param arg The name of the argument.
   86.11       * @return {@code true} if the given argument is an input argument,
   86.12       *         otherwise {@code false}.
   86.13 @@ -48,4 +51,26 @@
   86.14      public static boolean contains(String arg) {
   86.15          return args.contains(arg);
   86.16      }
   86.17 +
   86.18 +    /**
   86.19 +     * Returns true if {@code prefix} is the start of an input argument to the
   86.20 +     * VM.
   86.21 +     *
   86.22 +     * This is useful for checking if flags describing a quantity, such as
   86.23 +     * -XX:+MaxMetaspaceSize=100m, is set without having to know the quantity.
   86.24 +     * To check if the flag -XX:MaxMetaspaceSize is set, use
   86.25 +     * {@code InputArguments.containsPrefix("-XX:MaxMetaspaceSize")}.
   86.26 +     *
   86.27 +     * @param prefix The start of the argument.
   86.28 +     * @return {@code true} if the given argument is the start of an input
   86.29 +     *         argument, otherwise {@code false}.
   86.30 +     */
   86.31 +    public static boolean containsPrefix(String prefix) {
   86.32 +        for (String arg : args) {
   86.33 +            if (arg.startsWith(prefix)) {
   86.34 +                return true;
   86.35 +            }
   86.36 +        }
   86.37 +        return false;
   86.38 +    }
   86.39  }
    87.1 --- a/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Wed Sep 18 12:52:15 2013 -0400
    87.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Thu Sep 19 09:26:08 2013 +0200
    87.3 @@ -211,13 +211,13 @@
    87.4        if (matcher.find()) {
    87.5            reportDiagnosticSummary();
    87.6            throw new RuntimeException("'" + pattern
    87.7 -                  + "' found in stdout \n");
    87.8 +                  + "' found in stdout: '" + matcher.group() + "' \n");
    87.9        }
   87.10        matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
   87.11        if (matcher.find()) {
   87.12            reportDiagnosticSummary();
   87.13            throw new RuntimeException("'" + pattern
   87.14 -                  + "' found in stderr \n");
   87.15 +                  + "' found in stderr: '" + matcher.group() + "' \n");
   87.16        }
   87.17    }
   87.18  
   87.19 @@ -254,6 +254,37 @@
   87.20    }
   87.21  
   87.22    /**
   87.23 +   * Get the captured group of the first string matching the pattern.
   87.24 +   * stderr is searched before stdout.
   87.25 +   *
   87.26 +   * @param pattern The multi-line pattern to match
   87.27 +   * @param group The group to capture
   87.28 +   * @return The matched string or null if no match was found
   87.29 +   */
   87.30 +  public String firstMatch(String pattern, int group) {
   87.31 +    Matcher stderrMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
   87.32 +    Matcher stdoutMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
   87.33 +    if (stderrMatcher.find()) {
   87.34 +      return stderrMatcher.group(group);
   87.35 +    }
   87.36 +    if (stdoutMatcher.find()) {
   87.37 +      return stdoutMatcher.group(group);
   87.38 +    }
   87.39 +    return null;
   87.40 +  }
   87.41 +
   87.42 +  /**
   87.43 +   * Get the first string matching the pattern.
   87.44 +   * stderr is searched before stdout.
   87.45 +   *
   87.46 +   * @param pattern The multi-line pattern to match
   87.47 +   * @return The matched string or null if no match was found
   87.48 +   */
   87.49 +  public String firstMatch(String pattern) {
   87.50 +    return firstMatch(pattern, 0);
   87.51 +  }
   87.52 +
   87.53 +  /**
   87.54     * Verify the exit value of the process
   87.55     *
   87.56     * @param expectedExitValue Expected exit value from process
    88.1 --- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Sep 18 12:52:15 2013 -0400
    88.2 +++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Thu Sep 19 09:26:08 2013 +0200
    88.3 @@ -61,6 +61,8 @@
    88.4      registerNatives();
    88.5    }
    88.6  
    88.7 +  // Get the maximum heap size supporting COOPs
    88.8 +  public native long getCompressedOopsMaxHeapSize();
    88.9    // Arguments
   88.10    public native void printHeapSizes();
   88.11  

mercurial