Merge

Fri, 27 Sep 2013 10:08:56 -0400

author
zgu
date
Fri, 27 Sep 2013 10:08:56 -0400
changeset 5785
a5ac0873476c
parent 5784
190899198332
parent 5780
24250c363d7f
child 5786
36b97be47bde
child 5788
90b27e931639

Merge

src/share/vm/classfile/symbolTable.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/sharedRuntime.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Thu Sep 26 10:25:02 2013 -0400
     1.2 +++ b/.hgtags	Fri Sep 27 10:08:56 2013 -0400
     1.3 @@ -379,3 +379,5 @@
     1.4  a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
     1.5  85072013aad46050a362d10ab78e963121c8014c jdk8-b108
     1.6  566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
     1.7 +c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
     1.8 +58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
     2.1 --- a/make/excludeSrc.make	Thu Sep 26 10:25:02 2013 -0400
     2.2 +++ b/make/excludeSrc.make	Fri Sep 27 10:08:56 2013 -0400
     2.3 @@ -88,7 +88,7 @@
     2.4  	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
     2.5  	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
     2.6  	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
     2.7 -	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
     2.8 +	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
     2.9  	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
    2.10  	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
    2.11  	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
     3.1 --- a/make/hotspot_version	Thu Sep 26 10:25:02 2013 -0400
     3.2 +++ b/make/hotspot_version	Fri Sep 27 10:08:56 2013 -0400
     3.3 @@ -35,7 +35,7 @@
     3.4  
     3.5  HS_MAJOR_VER=25
     3.6  HS_MINOR_VER=0
     3.7 -HS_BUILD_NUMBER=51
     3.8 +HS_BUILD_NUMBER=53
     3.9  
    3.10  JDK_MAJOR_VER=1
    3.11  JDK_MINOR_VER=8
     4.1 --- a/make/jprt.properties	Thu Sep 26 10:25:02 2013 -0400
     4.2 +++ b/make/jprt.properties	Fri Sep 27 10:08:56 2013 -0400
     4.3 @@ -120,13 +120,13 @@
     4.4  jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
     4.5  jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
     4.6  
     4.7 -jprt.my.windows.i586.jdk8=windows_i586_5.1
     4.8 -jprt.my.windows.i586.jdk7=windows_i586_5.1
     4.9 +jprt.my.windows.i586.jdk8=windows_i586_6.1
    4.10 +jprt.my.windows.i586.jdk7=windows_i586_6.1
    4.11  jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
    4.12  jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
    4.13  
    4.14 -jprt.my.windows.x64.jdk8=windows_x64_5.2
    4.15 -jprt.my.windows.x64.jdk7=windows_x64_5.2
    4.16 +jprt.my.windows.x64.jdk8=windows_x64_6.1
    4.17 +jprt.my.windows.x64.jdk7=windows_x64_6.1
    4.18  jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
    4.19  jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
    4.20  
     5.1 --- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Thu Sep 26 10:25:02 2013 -0400
     5.2 +++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Fri Sep 27 10:08:56 2013 -0400
     5.3 @@ -52,6 +52,11 @@
     5.4  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
     5.5    const int sparc_code_length = VtableStub::pd_code_size_limit(true);
     5.6    VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
     5.7 +  // Can be NULL if there is no free space in the code cache.
     5.8 +  if (s == NULL) {
     5.9 +    return NULL;
    5.10 +  }
    5.11 +
    5.12    ResourceMark rm;
    5.13    CodeBuffer cb(s->entry_point(), sparc_code_length);
    5.14    MacroAssembler* masm = new MacroAssembler(&cb);
    5.15 @@ -125,6 +130,11 @@
    5.16  VtableStub* VtableStubs::create_itable_stub(int itable_index) {
    5.17    const int sparc_code_length = VtableStub::pd_code_size_limit(false);
    5.18    VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
    5.19 +  // Can be NULL if there is no free space in the code cache.
    5.20 +  if (s == NULL) {
    5.21 +    return NULL;
    5.22 +  }
    5.23 +
    5.24    ResourceMark rm;
    5.25    CodeBuffer cb(s->entry_point(), sparc_code_length);
    5.26    MacroAssembler* masm = new MacroAssembler(&cb);
     6.1 --- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Thu Sep 26 10:25:02 2013 -0400
     6.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Fri Sep 27 10:08:56 2013 -0400
     6.3 @@ -58,6 +58,11 @@
     6.4  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
     6.5    const int i486_code_length = VtableStub::pd_code_size_limit(true);
     6.6    VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
     6.7 +  // Can be NULL if there is no free space in the code cache.
     6.8 +  if (s == NULL) {
     6.9 +    return NULL;
    6.10 +  }
    6.11 +
    6.12    ResourceMark rm;
    6.13    CodeBuffer cb(s->entry_point(), i486_code_length);
    6.14    MacroAssembler* masm = new MacroAssembler(&cb);
    6.15 @@ -132,6 +137,11 @@
    6.16    //            add code here, bump the code stub size returned by pd_code_size_limit!
    6.17    const int i486_code_length = VtableStub::pd_code_size_limit(false);
    6.18    VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
    6.19 +  // Can be NULL if there is no free space in the code cache.
    6.20 +  if (s == NULL) {
    6.21 +    return NULL;
    6.22 +  }
    6.23 +
    6.24    ResourceMark rm;
    6.25    CodeBuffer cb(s->entry_point(), i486_code_length);
    6.26    MacroAssembler* masm = new MacroAssembler(&cb);
     7.1 --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Thu Sep 26 10:25:02 2013 -0400
     7.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Fri Sep 27 10:08:56 2013 -0400
     7.3 @@ -49,6 +49,11 @@
     7.4  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
     7.5    const int amd64_code_length = VtableStub::pd_code_size_limit(true);
     7.6    VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
     7.7 +  // Can be NULL if there is no free space in the code cache.
     7.8 +  if (s == NULL) {
     7.9 +    return NULL;
    7.10 +  }
    7.11 +
    7.12    ResourceMark rm;
    7.13    CodeBuffer cb(s->entry_point(), amd64_code_length);
    7.14    MacroAssembler* masm = new MacroAssembler(&cb);
    7.15 @@ -126,6 +131,11 @@
    7.16    // returned by pd_code_size_limit!
    7.17    const int amd64_code_length = VtableStub::pd_code_size_limit(false);
    7.18    VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
    7.19 +  // Can be NULL if there is no free space in the code cache.
    7.20 +  if (s == NULL) {
    7.21 +    return NULL;
    7.22 +  }
    7.23 +
    7.24    ResourceMark rm;
    7.25    CodeBuffer cb(s->entry_point(), amd64_code_length);
    7.26    MacroAssembler* masm = new MacroAssembler(&cb);
     8.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Sep 26 10:25:02 2013 -0400
     8.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Sep 27 10:08:56 2013 -0400
     8.3 @@ -4219,7 +4219,9 @@
     8.4      }
     8.5    }
     8.6  
     8.7 -  if (!PrintInlining)  return;
     8.8 +  if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
     8.9 +    return;
    8.10 +  }
    8.11    CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
    8.12    if (success && CIPrintMethodCodes) {
    8.13      callee->print_codes();
     9.1 --- a/src/share/vm/classfile/symbolTable.cpp	Thu Sep 26 10:25:02 2013 -0400
     9.2 +++ b/src/share/vm/classfile/symbolTable.cpp	Fri Sep 27 10:08:56 2013 -0400
     9.3 @@ -341,7 +341,7 @@
     9.4  
     9.5  Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
     9.6                                 unsigned int hashValue_arg, bool c_heap, TRAPS) {
     9.7 -  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
     9.8 +  assert(!Universe::heap()->is_in_reserved(name),
     9.9           "proposed name of symbol must be stable");
    9.10  
    9.11    // Don't allow symbols to be created which cannot fit in a Symbol*.
    9.12 @@ -685,7 +685,7 @@
    9.13    if (found_string != NULL) return found_string;
    9.14  
    9.15    debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
    9.16 -  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
    9.17 +  assert(!Universe::heap()->is_in_reserved(name),
    9.18           "proposed name of symbol must be stable");
    9.19  
    9.20    Handle string;
    10.1 --- a/src/share/vm/code/compiledIC.cpp	Thu Sep 26 10:25:02 2013 -0400
    10.2 +++ b/src/share/vm/code/compiledIC.cpp	Fri Sep 27 10:08:56 2013 -0400
    10.3 @@ -160,7 +160,7 @@
    10.4  // High-level access to an inline cache. Guaranteed to be MT-safe.
    10.5  
    10.6  
    10.7 -void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
    10.8 +bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
    10.9    assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   10.10    assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   10.11    assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
   10.12 @@ -170,8 +170,10 @@
   10.13      assert(bytecode == Bytecodes::_invokeinterface, "");
   10.14      int itable_index = call_info->itable_index();
   10.15      entry = VtableStubs::find_itable_stub(itable_index);
   10.16 +    if (entry == false) {
   10.17 +      return false;
   10.18 +    }
   10.19  #ifdef ASSERT
   10.20 -    assert(entry != NULL, "entry not computed");
   10.21      int index = call_info->resolved_method()->itable_index();
   10.22      assert(index == itable_index, "CallInfo pre-computes this");
   10.23  #endif //ASSERT
   10.24 @@ -184,6 +186,9 @@
   10.25      int vtable_index = call_info->vtable_index();
   10.26      assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
   10.27      entry = VtableStubs::find_vtable_stub(vtable_index);
   10.28 +    if (entry == NULL) {
   10.29 +      return false;
   10.30 +    }
   10.31      InlineCacheBuffer::create_transition_stub(this, NULL, entry);
   10.32    }
   10.33  
   10.34 @@ -200,6 +205,7 @@
   10.35    // race because the IC entry was complete when we safepointed so
   10.36    // cleaning it immediately is harmless.
   10.37    // assert(is_megamorphic(), "sanity check");
   10.38 +  return true;
   10.39  }
   10.40  
   10.41  
    11.1 --- a/src/share/vm/code/compiledIC.hpp	Thu Sep 26 10:25:02 2013 -0400
    11.2 +++ b/src/share/vm/code/compiledIC.hpp	Fri Sep 27 10:08:56 2013 -0400
    11.3 @@ -226,7 +226,10 @@
    11.4    //
    11.5    void set_to_clean();  // Can only be called during a safepoint operation
    11.6    void set_to_monomorphic(CompiledICInfo& info);
    11.7 -  void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
    11.8 +
    11.9 +  // Returns true if successful and false otherwise. The call can fail if memory
   11.10 +  // allocation in the code cache fails.
   11.11 +  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
   11.12  
   11.13    static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
   11.14                                          bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
    12.1 --- a/src/share/vm/code/vtableStubs.cpp	Thu Sep 26 10:25:02 2013 -0400
    12.2 +++ b/src/share/vm/code/vtableStubs.cpp	Fri Sep 27 10:08:56 2013 -0400
    12.3 @@ -46,12 +46,9 @@
    12.4  address VtableStub::_chunk_end         = NULL;
    12.5  VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
    12.6  
    12.7 -static int num_vtable_chunks = 0;
    12.8 -
    12.9  
   12.10  void* VtableStub::operator new(size_t size, int code_size) throw() {
   12.11    assert(size == sizeof(VtableStub), "mismatched size");
   12.12 -  num_vtable_chunks++;
   12.13    // compute real VtableStub size (rounded to nearest word)
   12.14    const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
   12.15    // malloc them in chunks to minimize header overhead
   12.16 @@ -60,7 +57,7 @@
   12.17      const int bytes = chunk_factor * real_size + pd_code_alignment();
   12.18      BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
   12.19      if (blob == NULL) {
   12.20 -      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
   12.21 +      return NULL;
   12.22      }
   12.23      _chunk = blob->content_begin();
   12.24      _chunk_end = _chunk + bytes;
   12.25 @@ -121,6 +118,12 @@
   12.26      } else {
   12.27        s = create_itable_stub(vtable_index);
   12.28      }
   12.29 +
   12.30 +    // Creation of vtable or itable can fail if there is not enough free space in the code cache.
   12.31 +    if (s == NULL) {
   12.32 +      return NULL;
   12.33 +    }
   12.34 +
   12.35      enter(is_vtable_stub, vtable_index, s);
   12.36      if (PrintAdapterHandlers) {
   12.37        tty->print_cr("Decoding VtableStub %s[%d]@%d",
    13.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.2 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp	Fri Sep 27 10:08:56 2013 -0400
    13.3 @@ -0,0 +1,141 @@
    13.4 +/*
    13.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    13.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.7 + *
    13.8 + * This code is free software; you can redistribute it and/or modify it
    13.9 + * under the terms of the GNU General Public License version 2 only, as
   13.10 + * published by the Free Software Foundation.
   13.11 + *
   13.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   13.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13.15 + * version 2 for more details (a copy is included in the LICENSE file that
   13.16 + * accompanied this code).
   13.17 + *
   13.18 + * You should have received a copy of the GNU General Public License version
   13.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   13.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   13.21 + *
   13.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   13.23 + * or visit www.oracle.com if you need additional information or have any
   13.24 + * questions.
   13.25 + *
   13.26 + */
   13.27 +
   13.28 +#include "precompiled.hpp"
   13.29 +#include "gc_implementation/g1/g1BiasedArray.hpp"
   13.30 +
   13.31 +#ifndef PRODUCT
   13.32 +void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
   13.33 +  guarantee(_base != NULL, "Array not initialized");
   13.34 +  guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
   13.35 +}
   13.36 +
   13.37 +void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
   13.38 +  guarantee(_biased_base != NULL, "Array not initialized");
   13.39 +  guarantee(biased_index >= bias() && biased_index < (bias() + length()),
   13.40 +    err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
   13.41 +}
   13.42 +
   13.43 +void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
   13.44 +  guarantee(_biased_base != NULL, "Array not initialized");
   13.45 +  guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
   13.46 +    err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
   13.47 +}
   13.48 +
   13.49 +class TestMappedArray : public G1BiasedMappedArray<int> {
   13.50 +protected:
   13.51 +  virtual int default_value() const { return 0xBAADBABE; }
   13.52 +public:
   13.53 +  static void test_biasedarray() {
   13.54 +    const size_t REGION_SIZE_IN_WORDS = 512;
   13.55 +    const size_t NUM_REGIONS = 20;
   13.56 +    HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
   13.57 +
   13.58 +    TestMappedArray array;
   13.59 +    array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
   13.60 +            REGION_SIZE_IN_WORDS * HeapWordSize);
   13.61 +    // Check address calculation (bounds)
   13.62 +    assert(array.bottom_address_mapped() == fake_heap,
   13.63 +      err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
   13.64 +    assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
   13.65 +
   13.66 +    int* bottom = array.address_mapped_to(fake_heap);
   13.67 +    assert((void*)bottom == (void*) array.base(), "must be");
   13.68 +    int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
   13.69 +    assert((void*)end == (void*)(array.base() + array.length()), "must be");
   13.70 +    // The entire array should contain default value elements
   13.71 +    for (int* current = bottom; current < end; current++) {
   13.72 +      assert(*current == array.default_value(), "must be");
   13.73 +    }
   13.74 +
   13.75 +    // Test setting values in the table
   13.76 +
   13.77 +    HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
   13.78 +    HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
   13.79 +
   13.80 +    // Set/get by address tests: invert some value; first retrieve one
   13.81 +    int actual_value = array.get_by_index(NUM_REGIONS / 2);
   13.82 +    array.set_by_index(NUM_REGIONS / 2, ~actual_value);
   13.83 +    // Get the same value by address, should correspond to the start of the "region"
   13.84 +    int value = array.get_by_address(region_start_address);
   13.85 +    assert(value == ~actual_value, "must be");
   13.86 +    // Get the same value by address, at one HeapWord before the start
   13.87 +    value = array.get_by_address(region_start_address - 1);
   13.88 +    assert(value == array.default_value(), "must be");
   13.89 +    // Get the same value by address, at the end of the "region"
   13.90 +    value = array.get_by_address(region_end_address);
   13.91 +    assert(value == ~actual_value, "must be");
   13.92 +    // Make sure the next value maps to another index
   13.93 +    value = array.get_by_address(region_end_address + 1);
   13.94 +    assert(value == array.default_value(), "must be");
   13.95 +
   13.96 +    // Reset the value in the array
   13.97 +    array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
   13.98 +
   13.99 +    // The entire array should have the default value again
  13.100 +    for (int* current = bottom; current < end; current++) {
  13.101 +      assert(*current == array.default_value(), "must be");
  13.102 +    }
  13.103 +
  13.104 +    // Set/get by index tests: invert some value
  13.105 +    idx_t index = NUM_REGIONS / 2;
  13.106 +    actual_value = array.get_by_index(index);
  13.107 +    array.set_by_index(index, ~actual_value);
  13.108 +
  13.109 +    value = array.get_by_index(index);
  13.110 +    assert(value == ~actual_value, "must be");
  13.111 +
  13.112 +    value = array.get_by_index(index - 1);
  13.113 +    assert(value == array.default_value(), "must be");
  13.114 +
  13.115 +    value = array.get_by_index(index + 1);
  13.116 +    assert(value == array.default_value(), "must be");
  13.117 +
  13.118 +    array.set_by_index(0, 0);
  13.119 +    value = array.get_by_index(0);
  13.120 +    assert(value == 0, "must be");
  13.121 +
  13.122 +    array.set_by_index(array.length() - 1, 0);
  13.123 +    value = array.get_by_index(array.length() - 1);
  13.124 +    assert(value == 0, "must be");
  13.125 +
  13.126 +    array.set_by_index(index, 0);
  13.127 +
  13.128 +    // The array should have three zeros, and default values otherwise
  13.129 +    size_t num_zeros = 0;
  13.130 +    for (int* current = bottom; current < end; current++) {
  13.131 +      assert(*current == array.default_value() || *current == 0, "must be");
  13.132 +      if (*current == 0) {
  13.133 +        num_zeros++;
  13.134 +      }
  13.135 +    }
  13.136 +    assert(num_zeros == 3, "must be");
  13.137 +  }
  13.138 +};
  13.139 +
  13.140 +void TestG1BiasedArray_test() {
  13.141 +  TestMappedArray::test_biasedarray();
  13.142 +}
  13.143 +
  13.144 +#endif
    14.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.2 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Fri Sep 27 10:08:56 2013 -0400
    14.3 @@ -0,0 +1,181 @@
    14.4 +/*
    14.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    14.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.7 + *
    14.8 + * This code is free software; you can redistribute it and/or modify it
    14.9 + * under the terms of the GNU General Public License version 2 only, as
   14.10 + * published by the Free Software Foundation.
   14.11 + *
   14.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   14.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   14.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   14.15 + * version 2 for more details (a copy is included in the LICENSE file that
   14.16 + * accompanied this code).
   14.17 + *
   14.18 + * You should have received a copy of the GNU General Public License version
   14.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   14.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   14.21 + *
   14.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   14.23 + * or visit www.oracle.com if you need additional information or have any
   14.24 + * questions.
   14.25 + *
   14.26 + */
   14.27 +
   14.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
   14.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
   14.30 +
   14.31 +#include "utilities/debug.hpp"
   14.32 +#include "memory/allocation.inline.hpp"
   14.33 +
   14.34 +// Implements the common base functionality for arrays that contain provisions
   14.35 +// for accessing its elements using a biased index.
   14.36 +// The element type is defined by the instantiating the template.
   14.37 +class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
   14.38 +  friend class VMStructs;
   14.39 +public:
   14.40 +  typedef size_t idx_t;
   14.41 +protected:
   14.42 +  address _base;          // the real base address
   14.43 +  size_t _length;         // the length of the array
   14.44 +  address _biased_base;   // base address biased by "bias" elements
   14.45 +  size_t _bias;           // the bias, i.e. the offset biased_base is located to the right in elements
   14.46 +  uint _shift_by;         // the amount of bits to shift right when mapping to an index of the array.
   14.47 +
   14.48 +protected:
   14.49 +
   14.50 +  G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
   14.51 +    _bias(0), _shift_by(0) { }
   14.52 +
   14.53 +  // Allocate a new array, generic version.
   14.54 +  static address create_new_base_array(size_t length, size_t elem_size) {
   14.55 +    assert(length > 0, "just checking");
   14.56 +    assert(elem_size > 0, "just checking");
   14.57 +    return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
   14.58 +  }
   14.59 +
   14.60 +  // Initialize the members of this class. The biased start address of this array
   14.61 +  // is the bias (in elements) multiplied by the element size.
   14.62 +  void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
   14.63 +    assert(base != NULL, "just checking");
   14.64 +    assert(length > 0, "just checking");
   14.65 +    assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
   14.66 +    _base = base;
   14.67 +    _length = length;
   14.68 +    _biased_base = base - (bias * elem_size);
   14.69 +    _bias = bias;
   14.70 +    _shift_by = shift_by;
   14.71 +  }
   14.72 +
   14.73 +  // Allocate and initialize this array to cover the heap addresses in the range
   14.74 +  // of [bottom, end).
   14.75 +  void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
   14.76 +    assert(mapping_granularity_in_bytes > 0, "just checking");
   14.77 +    assert(is_power_of_2(mapping_granularity_in_bytes),
   14.78 +      err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
   14.79 +    assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
   14.80 +      err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
   14.81 +        mapping_granularity_in_bytes, bottom));
   14.82 +    assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
   14.83 +      err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
   14.84 +        mapping_granularity_in_bytes, end));
   14.85 +    size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
   14.86 +    idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
   14.87 +    address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
   14.88 +    initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
   14.89 +  }
   14.90 +
   14.91 +  size_t bias() const { return _bias; }
   14.92 +  uint shift_by() const { return _shift_by; }
   14.93 +
   14.94 +  void verify_index(idx_t index) const PRODUCT_RETURN;
   14.95 +  void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
   14.96 +  void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
   14.97 +
   14.98 +public:
   14.99 +   // Return the length of the array in elements.
  14.100 +   size_t length() const { return _length; }
  14.101 +};
  14.102 +
  14.103 +// Array that provides biased access and mapping from (valid) addresses in the
  14.104 +// heap into this array.
  14.105 +template<class T>
  14.106 +class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
  14.107 +public:
  14.108 +  typedef G1BiasedMappedArrayBase::idx_t idx_t;
  14.109 +
  14.110 +  T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
  14.111 +  // Return the element of the given array at the given index. Assume
  14.112 +  // the index is valid. This is a convenience method that does sanity
  14.113 +  // checking on the index.
  14.114 +  T get_by_index(idx_t index) const {
  14.115 +    verify_index(index);
  14.116 +    return this->base()[index];
  14.117 +  }
  14.118 +
  14.119 +  // Set the element of the given array at the given index to the
  14.120 +  // given value. Assume the index is valid. This is a convenience
  14.121 +  // method that does sanity checking on the index.
  14.122 +  void set_by_index(idx_t index, T value) {
  14.123 +    verify_index(index);
  14.124 +    this->base()[index] = value;
  14.125 +  }
  14.126 +
  14.127 +  // The raw biased base pointer.
  14.128 +  T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
  14.129 +
  14.130 +  // Return the element of the given array that covers the given word in the
  14.131 +  // heap. Assumes the index is valid.
  14.132 +  T get_by_address(HeapWord* value) const {
  14.133 +    idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
  14.134 +    this->verify_biased_index(biased_index);
  14.135 +    return biased_base()[biased_index];
  14.136 +  }
  14.137 +
  14.138 +  // Set the value of the array entry that corresponds to the given array.
  14.139 +  void set_by_address(HeapWord * address, T value) {
  14.140 +    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
  14.141 +    this->verify_biased_index(biased_index);
  14.142 +    biased_base()[biased_index] = value;
  14.143 +  }
  14.144 +
  14.145 +protected:
  14.146 +  // Returns the address of the element the given address maps to
  14.147 +  T* address_mapped_to(HeapWord* address) {
  14.148 +    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
  14.149 +    this->verify_biased_index_inclusive_end(biased_index);
  14.150 +    return biased_base() + biased_index;
  14.151 +  }
  14.152 +
  14.153 +public:
  14.154 +  // Return the smallest address (inclusive) in the heap that this array covers.
  14.155 +  HeapWord* bottom_address_mapped() const {
  14.156 +    return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
  14.157 +  }
  14.158 +
  14.159 +  // Return the highest address (exclusive) in the heap that this array covers.
  14.160 +  HeapWord* end_address_mapped() const {
  14.161 +    return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
  14.162 +  }
  14.163 +
  14.164 +protected:
  14.165 +  virtual T default_value() const = 0;
  14.166 +  // Set all elements of the given array to the given value.
  14.167 +  void clear() {
  14.168 +    T value = default_value();
  14.169 +    for (idx_t i = 0; i < length(); i++) {
  14.170 +      set_by_index(i, value);
  14.171 +    }
  14.172 +  }
  14.173 +public:
  14.174 +  G1BiasedMappedArray() {}
  14.175 +
  14.176 +  // Allocate and initialize this array to cover the heap addresses in the range
  14.177 +  // of [bottom, end).
  14.178 +  void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
  14.179 +    G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
  14.180 +    this->clear();
  14.181 +  }
  14.182 +};
  14.183 +
  14.184 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
    15.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Sep 26 10:25:02 2013 -0400
    15.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Sep 27 10:08:56 2013 -0400
    15.3 @@ -2069,8 +2069,10 @@
    15.4    _g1_storage.initialize(g1_rs, 0);
    15.5    _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
    15.6    _hrs.initialize((HeapWord*) _g1_reserved.start(),
    15.7 -                  (HeapWord*) _g1_reserved.end(),
    15.8 -                  _expansion_regions);
    15.9 +                  (HeapWord*) _g1_reserved.end());
   15.10 +  assert(_hrs.max_length() == _expansion_regions,
   15.11 +         err_msg("max length: %u expansion regions: %u",
   15.12 +                 _hrs.max_length(), _expansion_regions));
   15.13  
   15.14    // Do later initialization work for concurrent refinement.
   15.15    _cg1r->init();
    16.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Thu Sep 26 10:25:02 2013 -0400
    16.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Fri Sep 27 10:08:56 2013 -0400
    16.3 @@ -71,27 +71,16 @@
    16.4  
    16.5  // Public
    16.6  
    16.7 -void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
    16.8 -                               uint max_length) {
    16.9 +void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
   16.10    assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
   16.11           "bottom should be heap region aligned");
   16.12    assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
   16.13           "end should be heap region aligned");
   16.14  
   16.15 -  _length = 0;
   16.16 -  _heap_bottom = bottom;
   16.17 -  _heap_end = end;
   16.18 -  _region_shift = HeapRegion::LogOfHRGrainBytes;
   16.19    _next_search_index = 0;
   16.20    _allocated_length = 0;
   16.21 -  _max_length = max_length;
   16.22  
   16.23 -  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
   16.24 -  memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
   16.25 -  _regions_biased = _regions - ((uintx) bottom >> _region_shift);
   16.26 -
   16.27 -  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
   16.28 -         "bottom should be included in the region with index 0");
   16.29 +  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
   16.30  }
   16.31  
   16.32  MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
   16.33 @@ -101,15 +90,15 @@
   16.34    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   16.35  
   16.36    HeapWord* next_bottom = old_end;
   16.37 -  assert(_heap_bottom <= next_bottom, "invariant");
   16.38 +  assert(heap_bottom() <= next_bottom, "invariant");
   16.39    while (next_bottom < new_end) {
   16.40 -    assert(next_bottom < _heap_end, "invariant");
   16.41 +    assert(next_bottom < heap_end(), "invariant");
   16.42      uint index = length();
   16.43  
   16.44 -    assert(index < _max_length, "otherwise we cannot expand further");
   16.45 +    assert(index < max_length(), "otherwise we cannot expand further");
   16.46      if (index == 0) {
   16.47        // We have not allocated any regions so far
   16.48 -      assert(next_bottom == _heap_bottom, "invariant");
   16.49 +      assert(next_bottom == heap_bottom(), "invariant");
   16.50      } else {
   16.51        // next_bottom should match the end of the last/previous region
   16.52        assert(next_bottom == at(index - 1)->end(), "invariant");
   16.53 @@ -122,8 +111,8 @@
   16.54          // allocation failed, we bail out and return what we have done so far
   16.55          return MemRegion(old_end, next_bottom);
   16.56        }
   16.57 -      assert(_regions[index] == NULL, "invariant");
   16.58 -      _regions[index] = new_hr;
   16.59 +      assert(_regions.get_by_index(index) == NULL, "invariant");
   16.60 +      _regions.set_by_index(index, new_hr);
   16.61        increment_allocated_length();
   16.62      }
   16.63      // Have to increment the length first, otherwise we will get an
   16.64 @@ -228,26 +217,26 @@
   16.65  
   16.66  #ifndef PRODUCT
   16.67  void HeapRegionSeq::verify_optional() {
   16.68 -  guarantee(_length <= _allocated_length,
   16.69 +  guarantee(length() <= _allocated_length,
   16.70              err_msg("invariant: _length: %u _allocated_length: %u",
   16.71 -                    _length, _allocated_length));
   16.72 -  guarantee(_allocated_length <= _max_length,
   16.73 +                    length(), _allocated_length));
   16.74 +  guarantee(_allocated_length <= max_length(),
   16.75              err_msg("invariant: _allocated_length: %u _max_length: %u",
   16.76 -                    _allocated_length, _max_length));
   16.77 -  guarantee(_next_search_index <= _length,
   16.78 +                    _allocated_length, max_length()));
   16.79 +  guarantee(_next_search_index <= length(),
   16.80              err_msg("invariant: _next_search_index: %u _length: %u",
   16.81 -                    _next_search_index, _length));
   16.82 +                    _next_search_index, length()));
   16.83  
   16.84 -  HeapWord* prev_end = _heap_bottom;
   16.85 +  HeapWord* prev_end = heap_bottom();
   16.86    for (uint i = 0; i < _allocated_length; i += 1) {
   16.87 -    HeapRegion* hr = _regions[i];
   16.88 +    HeapRegion* hr = _regions.get_by_index(i);
   16.89      guarantee(hr != NULL, err_msg("invariant: i: %u", i));
   16.90      guarantee(hr->bottom() == prev_end,
   16.91                err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
   16.92                        i, HR_FORMAT_PARAMS(hr), prev_end));
   16.93      guarantee(hr->hrs_index() == i,
   16.94                err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
   16.95 -    if (i < _length) {
   16.96 +    if (i < length()) {
   16.97        // Asserts will fire if i is >= _length
   16.98        HeapWord* addr = hr->bottom();
   16.99        guarantee(addr_to_region(addr) == hr, "sanity");
  16.100 @@ -265,8 +254,8 @@
  16.101        prev_end = hr->end();
  16.102      }
  16.103    }
  16.104 -  for (uint i = _allocated_length; i < _max_length; i += 1) {
  16.105 -    guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
  16.106 +  for (uint i = _allocated_length; i < max_length(); i += 1) {
  16.107 +    guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
  16.108    }
  16.109  }
  16.110  #endif // PRODUCT
    17.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Thu Sep 26 10:25:02 2013 -0400
    17.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Fri Sep 27 10:08:56 2013 -0400
    17.3 @@ -25,10 +25,17 @@
    17.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
    17.5  #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
    17.6  
    17.7 +#include "gc_implementation/g1/g1BiasedArray.hpp"
    17.8 +
    17.9  class HeapRegion;
   17.10  class HeapRegionClosure;
   17.11  class FreeRegionList;
   17.12  
   17.13 +class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
   17.14 + protected:
   17.15 +   virtual HeapRegion* default_value() const { return NULL; }
   17.16 +};
   17.17 +
   17.18  // This class keeps track of the region metadata (i.e., HeapRegion
   17.19  // instances). They are kept in the _regions array in address
   17.20  // order. A region's index in the array corresponds to its index in
   17.21 @@ -44,35 +51,21 @@
   17.22  //
   17.23  // We keep track of three lengths:
   17.24  //
   17.25 -// * _length (returned by length()) is the number of currently
   17.26 +// * _committed_length (returned by length()) is the number of currently
   17.27  //   committed regions.
   17.28  // * _allocated_length (not exposed outside this class) is the
   17.29  //   number of regions for which we have HeapRegions.
   17.30 -// * _max_length (returned by max_length()) is the maximum number of
   17.31 -//   regions the heap can have.
   17.32 +// * max_length() returns the maximum number of regions the heap can have.
   17.33  //
   17.34 -// and maintain that: _length <= _allocated_length <= _max_length
   17.35 +// and maintain that: _committed_length <= _allocated_length <= max_length()
   17.36  
   17.37  class HeapRegionSeq: public CHeapObj<mtGC> {
   17.38    friend class VMStructs;
   17.39  
   17.40 -  // The array that holds the HeapRegions.
   17.41 -  HeapRegion** _regions;
   17.42 -
   17.43 -  // Version of _regions biased to address 0
   17.44 -  HeapRegion** _regions_biased;
   17.45 +  G1HeapRegionTable _regions;
   17.46  
   17.47    // The number of regions committed in the heap.
   17.48 -  uint _length;
   17.49 -
   17.50 -  // The address of the first reserved word in the heap.
   17.51 -  HeapWord* _heap_bottom;
   17.52 -
   17.53 -  // The address of the last reserved word in the heap - 1.
   17.54 -  HeapWord* _heap_end;
   17.55 -
   17.56 -  // The log of the region byte size.
   17.57 -  uint _region_shift;
   17.58 +  uint _committed_length;
   17.59  
   17.60    // A hint for which index to start searching from for humongous
   17.61    // allocations.
   17.62 @@ -81,37 +74,33 @@
   17.63    // The number of regions for which we have allocated HeapRegions for.
   17.64    uint _allocated_length;
   17.65  
   17.66 -  // The maximum number of regions in the heap.
   17.67 -  uint _max_length;
   17.68 -
   17.69    // Find a contiguous set of empty regions of length num, starting
   17.70    // from the given index.
   17.71    uint find_contiguous_from(uint from, uint num);
   17.72  
   17.73 -  // Map a heap address to a biased region index. Assume that the
   17.74 -  // address is valid.
   17.75 -  inline uintx addr_to_index_biased(HeapWord* addr) const;
   17.76 -
   17.77    void increment_allocated_length() {
   17.78 -    assert(_allocated_length < _max_length, "pre-condition");
   17.79 +    assert(_allocated_length < max_length(), "pre-condition");
   17.80      _allocated_length++;
   17.81    }
   17.82  
   17.83    void increment_length() {
   17.84 -    assert(_length < _max_length, "pre-condition");
   17.85 -    _length++;
   17.86 +    assert(length() < max_length(), "pre-condition");
   17.87 +    _committed_length++;
   17.88    }
   17.89  
   17.90    void decrement_length() {
   17.91 -    assert(_length > 0, "pre-condition");
   17.92 -    _length--;
   17.93 +    assert(length() > 0, "pre-condition");
   17.94 +    _committed_length--;
   17.95    }
   17.96  
   17.97 +  HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
   17.98 +  HeapWord* heap_end() const {return _regions.end_address_mapped(); }
   17.99 +
  17.100   public:
  17.101    // Empty contructor, we'll initialize it with the initialize() method.
  17.102 -  HeapRegionSeq() { }
  17.103 +  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
  17.104  
  17.105 -  void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
  17.106 +  void initialize(HeapWord* bottom, HeapWord* end);
  17.107  
  17.108    // Return the HeapRegion at the given index. Assume that the index
  17.109    // is valid.
  17.110 @@ -126,10 +115,10 @@
  17.111    inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
  17.112  
  17.113    // Return the number of regions that have been committed in the heap.
  17.114 -  uint length() const { return _length; }
  17.115 +  uint length() const { return _committed_length; }
  17.116  
  17.117    // Return the maximum number of regions in the heap.
  17.118 -  uint max_length() const { return _max_length; }
  17.119 +  uint max_length() const { return (uint)_regions.length(); }
  17.120  
  17.121    // Expand the sequence to reflect that the heap has grown from
  17.122    // old_end to new_end. Either create new HeapRegions, or re-use
    18.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Thu Sep 26 10:25:02 2013 -0400
    18.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Fri Sep 27 10:08:56 2013 -0400
    18.3 @@ -28,28 +28,16 @@
    18.4  #include "gc_implementation/g1/heapRegion.hpp"
    18.5  #include "gc_implementation/g1/heapRegionSeq.hpp"
    18.6  
    18.7 -inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
    18.8 -  assert(_heap_bottom <= addr && addr < _heap_end,
    18.9 -         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
   18.10 -                 addr, _heap_bottom, _heap_end));
   18.11 -  uintx index = (uintx) addr >> _region_shift;
   18.12 -  return index;
   18.13 -}
   18.14 -
   18.15  inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
   18.16 -  assert(_heap_bottom <= addr && addr < _heap_end,
   18.17 -         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
   18.18 -                 addr, _heap_bottom, _heap_end));
   18.19 -  uintx index_biased = addr_to_index_biased(addr);
   18.20 -  HeapRegion* hr = _regions_biased[index_biased];
   18.21 +  HeapRegion* hr = _regions.get_by_address(addr);
   18.22    assert(hr != NULL, "invariant");
   18.23    return hr;
   18.24  }
   18.25  
   18.26  inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
   18.27 -  if (addr != NULL && addr < _heap_end) {
   18.28 -    assert(addr >= _heap_bottom,
   18.29 -          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
   18.30 +  if (addr != NULL && addr < heap_end()) {
   18.31 +    assert(addr >= heap_bottom(),
   18.32 +          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
   18.33      return addr_to_region_unsafe(addr);
   18.34    }
   18.35    return NULL;
   18.36 @@ -57,7 +45,7 @@
   18.37  
   18.38  inline HeapRegion* HeapRegionSeq::at(uint index) const {
   18.39    assert(index < length(), "pre-condition");
   18.40 -  HeapRegion* hr = _regions[index];
   18.41 +  HeapRegion* hr = _regions.get_by_index(index);
   18.42    assert(hr != NULL, "sanity");
   18.43    assert(hr->hrs_index() == index, "sanity");
   18.44    return hr;
    19.1 --- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Thu Sep 26 10:25:02 2013 -0400
    19.2 +++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Fri Sep 27 10:08:56 2013 -0400
    19.3 @@ -34,8 +34,14 @@
    19.4    static_field(HeapRegion, GrainBytes,        size_t)                         \
    19.5    static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
    19.6                                                                                \
    19.7 -  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
    19.8 -  nonstatic_field(HeapRegionSeq,   _length,  uint)                            \
    19.9 +  nonstatic_field(G1HeapRegionTable, _base,             address)              \
   19.10 +  nonstatic_field(G1HeapRegionTable, _length,           size_t)               \
   19.11 +  nonstatic_field(G1HeapRegionTable, _biased_base,      address)              \
   19.12 +  nonstatic_field(G1HeapRegionTable, _bias,             size_t)               \
   19.13 +  nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
   19.14 +                                                                              \
   19.15 +  nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
   19.16 +  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
   19.17                                                                                \
   19.18    nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
   19.19    nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   19.20 @@ -58,6 +64,8 @@
   19.21  
   19.22  #define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
   19.23                                                                                \
   19.24 +  declare_toplevel_type(G1HeapRegionTable)                                    \
   19.25 +                                                                              \
   19.26    declare_type(G1CollectedHeap, SharedHeap)                                   \
   19.27                                                                                \
   19.28    declare_type(HeapRegion, ContiguousSpace)                                   \
    20.1 --- a/src/share/vm/memory/gcLocker.cpp	Thu Sep 26 10:25:02 2013 -0400
    20.2 +++ b/src/share/vm/memory/gcLocker.cpp	Fri Sep 27 10:08:56 2013 -0400
    20.3 @@ -122,7 +122,7 @@
    20.4      // strictly needed. It's added here to make it clear that
    20.5      // the GC will NOT be performed if any other caller
    20.6      // of GC_locker::lock() still needs GC locked.
    20.7 -    if (!is_active()) {
    20.8 +    if (!is_active_internal()) {
    20.9        _doing_gc = true;
   20.10        {
   20.11          // Must give up the lock while at a safepoint
    21.1 --- a/src/share/vm/memory/gcLocker.hpp	Thu Sep 26 10:25:02 2013 -0400
    21.2 +++ b/src/share/vm/memory/gcLocker.hpp	Fri Sep 27 10:08:56 2013 -0400
    21.3 @@ -88,7 +88,7 @@
    21.4   public:
    21.5    // Accessors
    21.6    static bool is_active() {
    21.7 -    assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
    21.8 +    assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
    21.9      return is_active_internal();
   21.10    }
   21.11    static bool needs_gc()       { return _needs_gc;                        }
    22.1 --- a/src/share/vm/memory/metaspace.cpp	Thu Sep 26 10:25:02 2013 -0400
    22.2 +++ b/src/share/vm/memory/metaspace.cpp	Fri Sep 27 10:08:56 2013 -0400
    22.3 @@ -23,6 +23,7 @@
    22.4   */
    22.5  #include "precompiled.hpp"
    22.6  #include "gc_interface/collectedHeap.hpp"
    22.7 +#include "memory/allocation.hpp"
    22.8  #include "memory/binaryTreeDictionary.hpp"
    22.9  #include "memory/freeList.hpp"
   22.10  #include "memory/collectorPolicy.hpp"
   22.11 @@ -111,7 +112,7 @@
   22.12  // Has three lists of free chunks, and a total size and
   22.13  // count that includes all three
   22.14  
   22.15 -class ChunkManager VALUE_OBJ_CLASS_SPEC {
   22.16 +class ChunkManager : public CHeapObj<mtInternal> {
   22.17  
   22.18    // Free list of chunks of different sizes.
   22.19    //   SpecializedChunk
   22.20 @@ -158,7 +159,12 @@
   22.21  
   22.22   public:
   22.23  
   22.24 -  ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
   22.25 +  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
   22.26 +      : _free_chunks_total(0), _free_chunks_count(0) {
   22.27 +    _free_chunks[SpecializedIndex].set_size(specialized_size);
   22.28 +    _free_chunks[SmallIndex].set_size(small_size);
   22.29 +    _free_chunks[MediumIndex].set_size(medium_size);
   22.30 +  }
   22.31  
   22.32    // add or delete (return) a chunk to the global freelist.
   22.33    Metachunk* chunk_freelist_allocate(size_t word_size);
   22.34 @@ -219,7 +225,7 @@
   22.35    void locked_print_free_chunks(outputStream* st);
   22.36    void locked_print_sum_free_chunks(outputStream* st);
   22.37  
   22.38 -  void print_on(outputStream* st);
   22.39 +  void print_on(outputStream* st) const;
   22.40  };
   22.41  
   22.42  // Used to manage the free list of Metablocks (a block corresponds
   22.43 @@ -276,11 +282,6 @@
   22.44    // VirtualSpace
   22.45    Metachunk* first_chunk() { return (Metachunk*) bottom(); }
   22.46  
   22.47 -  void inc_container_count();
   22.48 -#ifdef ASSERT
   22.49 -  uint container_count_slow();
   22.50 -#endif
   22.51 -
   22.52   public:
   22.53  
   22.54    VirtualSpaceNode(size_t byte_size);
   22.55 @@ -314,8 +315,10 @@
   22.56    void inc_top(size_t word_size) { _top += word_size; }
   22.57  
   22.58    uintx container_count() { return _container_count; }
   22.59 +  void inc_container_count();
   22.60    void dec_container_count();
   22.61  #ifdef ASSERT
   22.62 +  uint container_count_slow();
   22.63    void verify_container_count();
   22.64  #endif
   22.65  
   22.66 @@ -421,8 +424,6 @@
   22.67    VirtualSpaceNode* _virtual_space_list;
   22.68    // virtual space currently being used for allocations
   22.69    VirtualSpaceNode* _current_virtual_space;
   22.70 -  // Free chunk list for all other metadata
   22.71 -  ChunkManager      _chunk_manager;
   22.72  
   22.73    // Can this virtual list allocate >1 spaces?  Also, used to determine
   22.74    // whether to allocate unlimited small chunks in this virtual space
   22.75 @@ -475,7 +476,6 @@
   22.76      return _current_virtual_space;
   22.77    }
   22.78  
   22.79 -  ChunkManager* chunk_manager() { return &_chunk_manager; }
   22.80    bool is_class() const { return _is_class; }
   22.81  
   22.82    // Allocate the first virtualspace.
   22.83 @@ -494,14 +494,7 @@
   22.84    void dec_virtual_space_count();
   22.85  
   22.86    // Unlink empty VirtualSpaceNodes and free it.
   22.87 -  void purge();
   22.88 -
   22.89 -  // Used and capacity in the entire list of virtual spaces.
   22.90 -  // These are global values shared by all Metaspaces
   22.91 -  size_t capacity_words_sum();
   22.92 -  size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
   22.93 -  size_t used_words_sum();
   22.94 -  size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
   22.95 +  void purge(ChunkManager* chunk_manager);
   22.96  
   22.97    bool contains(const void *ptr);
   22.98  
   22.99 @@ -582,18 +575,12 @@
  22.100    // Type of metadata allocated.
  22.101    Metaspace::MetadataType _mdtype;
  22.102  
  22.103 -  // Chunk related size
  22.104 -  size_t _medium_chunk_bunch;
  22.105 -
  22.106    // List of chunks in use by this SpaceManager.  Allocations
  22.107    // are done from the current chunk.  The list is used for deallocating
  22.108    // chunks when the SpaceManager is freed.
  22.109    Metachunk* _chunks_in_use[NumberOfInUseLists];
  22.110    Metachunk* _current_chunk;
  22.111  
  22.112 -  // Virtual space where allocation comes from.
  22.113 -  VirtualSpaceList* _vs_list;
  22.114 -
  22.115    // Number of small chunks to allocate to a manager
  22.116    // If class space manager, small chunks are unlimited
  22.117    static uint const _small_chunk_limit;
  22.118 @@ -626,7 +613,9 @@
  22.119    }
  22.120  
  22.121    Metaspace::MetadataType mdtype() { return _mdtype; }
  22.122 -  VirtualSpaceList* vs_list() const    { return _vs_list; }
  22.123 +
  22.124 +  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
  22.125 +  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
  22.126  
  22.127    Metachunk* current_chunk() const { return _current_chunk; }
  22.128    void set_current_chunk(Metachunk* v) {
  22.129 @@ -648,18 +637,19 @@
  22.130  
  22.131   public:
  22.132    SpaceManager(Metaspace::MetadataType mdtype,
  22.133 -               Mutex* lock,
  22.134 -               VirtualSpaceList* vs_list);
  22.135 +               Mutex* lock);
  22.136    ~SpaceManager();
  22.137  
  22.138    enum ChunkMultiples {
  22.139      MediumChunkMultiple = 4
  22.140    };
  22.141  
  22.142 +  bool is_class() { return _mdtype == Metaspace::ClassType; }
  22.143 +
  22.144    // Accessors
  22.145    size_t specialized_chunk_size() { return SpecializedChunk; }
  22.146 -  size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
  22.147 -  size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
  22.148 +  size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
  22.149 +  size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
  22.150    size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
  22.151  
  22.152    size_t allocated_blocks_words() const { return _allocated_blocks_words; }
  22.153 @@ -762,7 +752,7 @@
  22.154    _container_count++;
  22.155    assert(_container_count == container_count_slow(),
  22.156           err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
  22.157 -                 "container_count_slow() " SIZE_FORMAT,
  22.158 +                 " container_count_slow() " SIZE_FORMAT,
  22.159                   _container_count, container_count_slow()));
  22.160  }
  22.161  
  22.162 @@ -775,7 +765,7 @@
  22.163  void VirtualSpaceNode::verify_container_count() {
  22.164    assert(_container_count == container_count_slow(),
  22.165      err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
  22.166 -            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
  22.167 +            " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
  22.168  }
  22.169  #endif
  22.170  
  22.171 @@ -1020,7 +1010,7 @@
  22.172  // Walk the list of VirtualSpaceNodes and delete
  22.173  // nodes with a 0 container_count.  Remove Metachunks in
  22.174  // the node from their respective freelists.
  22.175 -void VirtualSpaceList::purge() {
  22.176 +void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
  22.177    assert_lock_strong(SpaceManager::expand_lock());
  22.178    // Don't use a VirtualSpaceListIterator because this
  22.179    // list is being changed and a straightforward use of an iterator is not safe.
  22.180 @@ -1042,7 +1032,7 @@
  22.181          prev_vsl->set_next(vsl->next());
  22.182        }
  22.183  
  22.184 -      vsl->purge(chunk_manager());
  22.185 +      vsl->purge(chunk_manager);
  22.186        dec_reserved_words(vsl->reserved_words());
  22.187        dec_committed_words(vsl->committed_words());
  22.188        dec_virtual_space_count();
  22.189 @@ -1064,36 +1054,6 @@
  22.190  #endif
  22.191  }
  22.192  
  22.193 -size_t VirtualSpaceList::used_words_sum() {
  22.194 -  size_t allocated_by_vs = 0;
  22.195 -  VirtualSpaceListIterator iter(virtual_space_list());
  22.196 -  while (iter.repeat()) {
  22.197 -    VirtualSpaceNode* vsl = iter.get_next();
  22.198 -    // Sum used region [bottom, top) in each virtualspace
  22.199 -    allocated_by_vs += vsl->used_words_in_vs();
  22.200 -  }
  22.201 -  assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
  22.202 -    err_msg("Total in free chunks " SIZE_FORMAT
  22.203 -            " greater than total from virtual_spaces " SIZE_FORMAT,
  22.204 -            allocated_by_vs, chunk_manager()->free_chunks_total_words()));
  22.205 -  size_t used =
  22.206 -    allocated_by_vs - chunk_manager()->free_chunks_total_words();
  22.207 -  return used;
  22.208 -}
  22.209 -
  22.210 -// Space available in all MetadataVirtualspaces allocated
  22.211 -// for metadata.  This is the upper limit on the capacity
  22.212 -// of chunks allocated out of all the MetadataVirtualspaces.
  22.213 -size_t VirtualSpaceList::capacity_words_sum() {
  22.214 -  size_t capacity = 0;
  22.215 -  VirtualSpaceListIterator iter(virtual_space_list());
  22.216 -  while (iter.repeat()) {
  22.217 -    VirtualSpaceNode* vsl = iter.get_next();
  22.218 -    capacity += vsl->capacity_words_in_vs();
  22.219 -  }
  22.220 -  return capacity;
  22.221 -}
  22.222 -
  22.223  VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
  22.224                                     _is_class(false),
  22.225                                     _virtual_space_list(NULL),
  22.226 @@ -1104,10 +1064,6 @@
  22.227    MutexLockerEx cl(SpaceManager::expand_lock(),
  22.228                     Mutex::_no_safepoint_check_flag);
  22.229    bool initialization_succeeded = grow_vs(word_size);
  22.230 -
  22.231 -  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  22.232 -  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
  22.233 -  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
  22.234    assert(initialization_succeeded,
  22.235      " VirtualSpaceList initialization should not fail");
  22.236  }
  22.237 @@ -1123,9 +1079,6 @@
  22.238                     Mutex::_no_safepoint_check_flag);
  22.239    VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  22.240    bool succeeded = class_entry->initialize();
  22.241 -  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  22.242 -  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  22.243 -  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
  22.244    assert(succeeded, " VirtualSpaceList initialization should not fail");
  22.245    link_vs(class_entry);
  22.246  }
  22.247 @@ -1142,7 +1095,7 @@
  22.248    }
  22.249    // Reserve the space
  22.250    size_t vs_byte_size = vs_word_size * BytesPerWord;
  22.251 -  assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
  22.252 +  assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
  22.253  
  22.254    // Allocate the meta virtual space and initialize it.
  22.255    VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  22.256 @@ -1195,15 +1148,8 @@
  22.257                                             size_t grow_chunks_by_words,
  22.258                                             size_t medium_chunk_bunch) {
  22.259  
  22.260 -  // Get a chunk from the chunk freelist
  22.261 -  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  22.262 -
  22.263 -  if (next != NULL) {
  22.264 -    next->container()->inc_container_count();
  22.265 -  } else {
  22.266 -    // Allocate a chunk out of the current virtual space.
  22.267 -    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  22.268 -  }
  22.269 +  // Allocate a chunk out of the current virtual space.
  22.270 +  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  22.271  
  22.272    if (next == NULL) {
  22.273      // Not enough room in current virtual space.  Try to commit
  22.274 @@ -1221,12 +1167,14 @@
  22.275        // being used for CompressedHeaders, don't allocate a new virtualspace.
  22.276        if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
  22.277          // Get another virtual space.
  22.278 -          size_t grow_vs_words =
  22.279 -            MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
  22.280 +        size_t allocation_aligned_expand_words =
  22.281 +            align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
  22.282 +        size_t grow_vs_words =
  22.283 +            MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
  22.284          if (grow_vs(grow_vs_words)) {
  22.285            // Got it.  It's on the list now.  Get a chunk from it.
  22.286            assert(current_virtual_space()->expanded_words() == 0,
  22.287 -              "New virtuals space nodes should not have expanded");
  22.288 +              "New virtual space nodes should not have expanded");
  22.289  
  22.290            size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
  22.291                                                                page_size_words);
  22.292 @@ -1342,8 +1290,9 @@
  22.293    // reserved space, because this is a larger space prereserved for compressed
  22.294    // class pointers.
  22.295    if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
  22.296 -    size_t real_allocated = Metaspace::space_list()->reserved_words() +
  22.297 -              MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  22.298 +    size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
  22.299 +    size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  22.300 +    size_t real_allocated     = nonclass_allocated + class_allocated;
  22.301      if (real_allocated >= MaxMetaspaceSize) {
  22.302        return false;
  22.303      }
  22.304 @@ -1536,15 +1485,15 @@
  22.305        if (dummy_chunk == NULL) {
  22.306          break;
  22.307        }
  22.308 -      vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  22.309 +      sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  22.310  
  22.311        if (TraceMetadataChunkAllocation && Verbose) {
  22.312          gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
  22.313                                 sm->sum_count_in_chunks_in_use());
  22.314          dummy_chunk->print_on(gclog_or_tty);
  22.315          gclog_or_tty->print_cr("  Free chunks total %d  count %d",
  22.316 -                               vsl->chunk_manager()->free_chunks_total_words(),
  22.317 -                               vsl->chunk_manager()->free_chunks_count());
  22.318 +                               sm->chunk_manager()->free_chunks_total_words(),
  22.319 +                               sm->chunk_manager()->free_chunks_count());
  22.320        }
  22.321      }
  22.322    } else {
  22.323 @@ -1796,6 +1745,8 @@
  22.324    // work.
  22.325    chunk->set_is_free(false);
  22.326  #endif
  22.327 +  chunk->container()->inc_container_count();
  22.328 +
  22.329    slow_locked_verify();
  22.330    return chunk;
  22.331  }
  22.332 @@ -1830,9 +1781,9 @@
  22.333    return chunk;
  22.334  }
  22.335  
  22.336 -void ChunkManager::print_on(outputStream* out) {
  22.337 +void ChunkManager::print_on(outputStream* out) const {
  22.338    if (PrintFLSStatistics != 0) {
  22.339 -    humongous_dictionary()->report_statistics();
  22.340 +    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
  22.341    }
  22.342  }
  22.343  
  22.344 @@ -1979,8 +1930,8 @@
  22.345      }
  22.346    }
  22.347  
  22.348 -  vs_list()->chunk_manager()->locked_print_free_chunks(st);
  22.349 -  vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
  22.350 +  chunk_manager()->locked_print_free_chunks(st);
  22.351 +  chunk_manager()->locked_print_sum_free_chunks(st);
  22.352  }
  22.353  
  22.354  size_t SpaceManager::calc_chunk_size(size_t word_size) {
  22.355 @@ -2084,9 +2035,7 @@
  22.356  }
  22.357  
  22.358  SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
  22.359 -                           Mutex* lock,
  22.360 -                           VirtualSpaceList* vs_list) :
  22.361 -  _vs_list(vs_list),
  22.362 +                           Mutex* lock) :
  22.363    _mdtype(mdtype),
  22.364    _allocated_blocks_words(0),
  22.365    _allocated_chunks_words(0),
  22.366 @@ -2172,9 +2121,7 @@
  22.367    MutexLockerEx fcl(SpaceManager::expand_lock(),
  22.368                      Mutex::_no_safepoint_check_flag);
  22.369  
  22.370 -  ChunkManager* chunk_manager = vs_list()->chunk_manager();
  22.371 -
  22.372 -  chunk_manager->slow_locked_verify();
  22.373 +  chunk_manager()->slow_locked_verify();
  22.374  
  22.375    dec_total_from_size_metrics();
  22.376  
  22.377 @@ -2188,8 +2135,8 @@
  22.378  
  22.379    // Have to update before the chunks_in_use lists are emptied
  22.380    // below.
  22.381 -  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
  22.382 -                                       sum_count_in_chunks_in_use());
  22.383 +  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
  22.384 +                                         sum_count_in_chunks_in_use());
  22.385  
  22.386    // Add all the chunks in use by this space manager
  22.387    // to the global list of free chunks.
  22.388 @@ -2204,11 +2151,11 @@
  22.389                               chunk_size_name(i));
  22.390      }
  22.391      Metachunk* chunks = chunks_in_use(i);
  22.392 -    chunk_manager->return_chunks(i, chunks);
  22.393 +    chunk_manager()->return_chunks(i, chunks);
  22.394      set_chunks_in_use(i, NULL);
  22.395      if (TraceMetadataChunkAllocation && Verbose) {
  22.396        gclog_or_tty->print_cr("updated freelist count %d %s",
  22.397 -                             chunk_manager->free_chunks(i)->count(),
  22.398 +                             chunk_manager()->free_chunks(i)->count(),
  22.399                               chunk_size_name(i));
  22.400      }
  22.401      assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
  22.402 @@ -2245,16 +2192,16 @@
  22.403                     humongous_chunks->word_size(), HumongousChunkGranularity));
  22.404      Metachunk* next_humongous_chunks = humongous_chunks->next();
  22.405      humongous_chunks->container()->dec_container_count();
  22.406 -    chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
  22.407 +    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
  22.408      humongous_chunks = next_humongous_chunks;
  22.409    }
  22.410    if (TraceMetadataChunkAllocation && Verbose) {
  22.411      gclog_or_tty->print_cr("");
  22.412      gclog_or_tty->print_cr("updated dictionary count %d %s",
  22.413 -                     chunk_manager->humongous_dictionary()->total_count(),
  22.414 +                     chunk_manager()->humongous_dictionary()->total_count(),
  22.415                       chunk_size_name(HumongousIndex));
  22.416    }
  22.417 -  chunk_manager->slow_locked_verify();
  22.418 +  chunk_manager()->slow_locked_verify();
  22.419  }
  22.420  
  22.421  const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  22.422 @@ -2343,9 +2290,7 @@
  22.423      gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
  22.424                          sum_count_in_chunks_in_use());
  22.425      new_chunk->print_on(gclog_or_tty);
  22.426 -    if (vs_list() != NULL) {
  22.427 -      vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  22.428 -    }
  22.429 +    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  22.430    }
  22.431  }
  22.432  
  22.433 @@ -2361,10 +2306,14 @@
  22.434  
  22.435  Metachunk* SpaceManager::get_new_chunk(size_t word_size,
  22.436                                         size_t grow_chunks_by_words) {
  22.437 -
  22.438 -  Metachunk* next = vs_list()->get_new_chunk(word_size,
  22.439 -                                             grow_chunks_by_words,
  22.440 -                                             medium_chunk_bunch());
  22.441 +  // Get a chunk from the chunk freelist
  22.442 +  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  22.443 +
  22.444 +  if (next == NULL) {
  22.445 +    next = vs_list()->get_new_chunk(word_size,
  22.446 +                                    grow_chunks_by_words,
  22.447 +                                    medium_chunk_bunch());
  22.448 +  }
  22.449  
  22.450    if (TraceMetadataHumongousAllocation && next != NULL &&
  22.451        SpaceManager::is_humongous(next->word_size())) {
  22.452 @@ -2644,13 +2593,12 @@
  22.453  size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
  22.454  
  22.455  size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
  22.456 -  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  22.457 -  if (list == NULL) {
  22.458 +  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
  22.459 +  if (chunk_manager == NULL) {
  22.460      return 0;
  22.461    }
  22.462 -  ChunkManager* chunk = list->chunk_manager();
  22.463 -  chunk->slow_verify();
  22.464 -  return chunk->free_chunks_total_words();
  22.465 +  chunk_manager->slow_verify();
  22.466 +  return chunk_manager->free_chunks_total_words();
  22.467  }
  22.468  
  22.469  size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
  22.470 @@ -2801,9 +2749,9 @@
  22.471  }
  22.472  
  22.473  void MetaspaceAux::verify_free_chunks() {
  22.474 -  Metaspace::space_list()->chunk_manager()->verify();
  22.475 +  Metaspace::chunk_manager_metadata()->verify();
  22.476    if (Metaspace::using_class_space()) {
  22.477 -    Metaspace::class_space_list()->chunk_manager()->verify();
  22.478 +    Metaspace::chunk_manager_class()->verify();
  22.479    }
  22.480  }
  22.481  
  22.482 @@ -2874,6 +2822,9 @@
  22.483  VirtualSpaceList* Metaspace::_space_list = NULL;
  22.484  VirtualSpaceList* Metaspace::_class_space_list = NULL;
  22.485  
  22.486 +ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
  22.487 +ChunkManager* Metaspace::_chunk_manager_class = NULL;
  22.488 +
  22.489  #define VIRTUALSPACEMULTIPLIER 2
  22.490  
  22.491  #ifdef _LP64
  22.492 @@ -2981,6 +2932,7 @@
  22.493           err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
  22.494    assert(using_class_space(), "Must be using class space");
  22.495    _class_space_list = new VirtualSpaceList(rs);
  22.496 +  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
  22.497  }
  22.498  
  22.499  #endif
  22.500 @@ -3006,6 +2958,7 @@
  22.501      // remainder is the misc code and data chunks.
  22.502      cds_total = FileMapInfo::shared_spaces_size();
  22.503      _space_list = new VirtualSpaceList(cds_total/wordSize);
  22.504 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  22.505  
  22.506  #ifdef _LP64
  22.507      // Set the compressed klass pointer base so that decoding of these pointers works
  22.508 @@ -3073,15 +3026,30 @@
  22.509      size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
  22.510      // Initialize the list of virtual spaces.
  22.511      _space_list = new VirtualSpaceList(word_size);
  22.512 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  22.513    }
  22.514  }
  22.515  
  22.516 +Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
  22.517 +                                               size_t chunk_word_size,
  22.518 +                                               size_t chunk_bunch) {
  22.519 +  // Get a chunk from the chunk freelist
  22.520 +  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
  22.521 +  if (chunk != NULL) {
  22.522 +    return chunk;
  22.523 +  }
  22.524 +
  22.525 +  return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
  22.526 +}
  22.527 +
  22.528  void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  22.529  
  22.530    assert(space_list() != NULL,
  22.531      "Metadata VirtualSpaceList has not been initialized");
  22.532 -
  22.533 -  _vsm = new SpaceManager(NonClassType, lock, space_list());
  22.534 +  assert(chunk_manager_metadata() != NULL,
  22.535 +    "Metadata ChunkManager has not been initialized");
  22.536 +
  22.537 +  _vsm = new SpaceManager(NonClassType, lock);
  22.538    if (_vsm == NULL) {
  22.539      return;
  22.540    }
  22.541 @@ -3090,11 +3058,13 @@
  22.542    vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
  22.543  
  22.544    if (using_class_space()) {
  22.545 -    assert(class_space_list() != NULL,
  22.546 -      "Class VirtualSpaceList has not been initialized");
  22.547 +  assert(class_space_list() != NULL,
  22.548 +    "Class VirtualSpaceList has not been initialized");
  22.549 +  assert(chunk_manager_class() != NULL,
  22.550 +    "Class ChunkManager has not been initialized");
  22.551  
  22.552      // Allocate SpaceManager for classes.
  22.553 -    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
  22.554 +    _class_vsm = new SpaceManager(ClassType, lock);
  22.555      if (_class_vsm == NULL) {
  22.556        return;
  22.557      }
  22.558 @@ -3103,9 +3073,9 @@
  22.559    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  22.560  
  22.561    // Allocate chunk for metadata objects
  22.562 -  Metachunk* new_chunk =
  22.563 -     space_list()->get_initialization_chunk(word_size,
  22.564 -                                            vsm()->medium_chunk_bunch());
  22.565 +  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
  22.566 +                                                  word_size,
  22.567 +                                                  vsm()->medium_chunk_bunch());
  22.568    assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  22.569    if (new_chunk != NULL) {
  22.570      // Add to this manager's list of chunks in use and current_chunk().
  22.571 @@ -3114,9 +3084,9 @@
  22.572  
  22.573    // Allocate chunk for class metadata objects
  22.574    if (using_class_space()) {
  22.575 -    Metachunk* class_chunk =
  22.576 -       class_space_list()->get_initialization_chunk(class_word_size,
  22.577 -                                                    class_vsm()->medium_chunk_bunch());
  22.578 +    Metachunk* class_chunk = get_initialization_chunk(ClassType,
  22.579 +                                                      class_word_size,
  22.580 +                                                      class_vsm()->medium_chunk_bunch());
  22.581      if (class_chunk != NULL) {
  22.582        class_vsm()->add_chunk(class_chunk, true);
  22.583      }
  22.584 @@ -3333,12 +3303,16 @@
  22.585    }
  22.586  }
  22.587  
  22.588 +void Metaspace::purge(MetadataType mdtype) {
  22.589 +  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
  22.590 +}
  22.591 +
  22.592  void Metaspace::purge() {
  22.593    MutexLockerEx cl(SpaceManager::expand_lock(),
  22.594                     Mutex::_no_safepoint_check_flag);
  22.595 -  space_list()->purge();
  22.596 +  purge(NonClassType);
  22.597    if (using_class_space()) {
  22.598 -    class_space_list()->purge();
  22.599 +    purge(ClassType);
  22.600    }
  22.601  }
  22.602  
  22.603 @@ -3385,7 +3359,7 @@
  22.604  
  22.605  #ifndef PRODUCT
  22.606  
  22.607 -class MetaspaceAuxTest : AllStatic {
  22.608 +class TestMetaspaceAuxTest : AllStatic {
  22.609   public:
  22.610    static void test_reserved() {
  22.611      size_t reserved = MetaspaceAux::reserved_bytes();
  22.612 @@ -3425,14 +3399,25 @@
  22.613      }
  22.614    }
  22.615  
  22.616 +  static void test_virtual_space_list_large_chunk() {
  22.617 +    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
  22.618 +    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  22.619 +    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
  22.620 +    // vm_allocation_granularity aligned on Windows.
  22.621 +    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
  22.622 +    large_size += (os::vm_page_size()/BytesPerWord);
  22.623 +    vs_list->get_new_chunk(large_size, large_size, 0);
  22.624 +  }
  22.625 +
  22.626    static void test() {
  22.627      test_reserved();
  22.628      test_committed();
  22.629 +    test_virtual_space_list_large_chunk();
  22.630    }
  22.631  };
  22.632  
  22.633 -void MetaspaceAux_test() {
  22.634 -  MetaspaceAuxTest::test();
  22.635 +void TestMetaspaceAux_test() {
  22.636 +  TestMetaspaceAuxTest::test();
  22.637  }
  22.638  
  22.639  #endif
    23.1 --- a/src/share/vm/memory/metaspace.hpp	Thu Sep 26 10:25:02 2013 -0400
    23.2 +++ b/src/share/vm/memory/metaspace.hpp	Fri Sep 27 10:08:56 2013 -0400
    23.3 @@ -56,12 +56,15 @@
    23.4  //                       +-------------------+
    23.5  //
    23.6  
    23.7 +class ChunkManager;
    23.8  class ClassLoaderData;
    23.9  class Metablock;
   23.10 +class Metachunk;
   23.11  class MetaWord;
   23.12  class Mutex;
   23.13  class outputStream;
   23.14  class SpaceManager;
   23.15 +class VirtualSpaceList;
   23.16  
   23.17  // Metaspaces each have a  SpaceManager and allocations
   23.18  // are done by the SpaceManager.  Allocations are done
   23.19 @@ -76,8 +79,6 @@
   23.20  // allocate() method returns a block for use as a
   23.21  // quantum of metadata.
   23.22  
   23.23 -class VirtualSpaceList;
   23.24 -
   23.25  class Metaspace : public CHeapObj<mtClass> {
   23.26    friend class VMStructs;
   23.27    friend class SpaceManager;
   23.28 @@ -102,6 +103,10 @@
   23.29   private:
   23.30    void initialize(Mutex* lock, MetaspaceType type);
   23.31  
   23.32 +  Metachunk* get_initialization_chunk(MetadataType mdtype,
   23.33 +                                      size_t chunk_word_size,
   23.34 +                                      size_t chunk_bunch);
   23.35 +
   23.36    // Align up the word size to the allocation word size
   23.37    static size_t align_word_size_up(size_t);
   23.38  
   23.39 @@ -134,6 +139,10 @@
   23.40    static VirtualSpaceList* _space_list;
   23.41    static VirtualSpaceList* _class_space_list;
   23.42  
   23.43 +  static ChunkManager* _chunk_manager_metadata;
   23.44 +  static ChunkManager* _chunk_manager_class;
   23.45 +
   23.46 + public:
   23.47    static VirtualSpaceList* space_list()       { return _space_list; }
   23.48    static VirtualSpaceList* class_space_list() { return _class_space_list; }
   23.49    static VirtualSpaceList* get_space_list(MetadataType mdtype) {
   23.50 @@ -141,6 +150,14 @@
   23.51      return mdtype == ClassType ? class_space_list() : space_list();
   23.52    }
   23.53  
   23.54 +  static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
   23.55 +  static ChunkManager* chunk_manager_class()    { return _chunk_manager_class; }
   23.56 +  static ChunkManager* get_chunk_manager(MetadataType mdtype) {
   23.57 +    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
   23.58 +    return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
   23.59 +  }
   23.60 +
   23.61 + private:
   23.62    // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
   23.63    // maintain a single list for now.
   23.64    void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
   23.65 @@ -199,6 +216,7 @@
   23.66    void dump(outputStream* const out) const;
   23.67  
   23.68    // Free empty virtualspaces
   23.69 +  static void purge(MetadataType mdtype);
   23.70    static void purge();
   23.71  
   23.72    void print_on(outputStream* st) const;
    24.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Thu Sep 26 10:25:02 2013 -0400
    24.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Fri Sep 27 10:08:56 2013 -0400
    24.3 @@ -123,7 +123,7 @@
    24.4    // Allows targeted inlining
    24.5    if(callee_method->should_inline()) {
    24.6      *wci_result = *(WarmCallInfo::always_hot());
    24.7 -    if (PrintInlining && Verbose) {
    24.8 +    if (C->print_inlining() && Verbose) {
    24.9        CompileTask::print_inline_indent(inline_level());
   24.10        tty->print_cr("Inlined method is hot: ");
   24.11      }
   24.12 @@ -137,7 +137,7 @@
   24.13    if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
   24.14       size < InlineThrowMaxSize ) {
   24.15      wci_result->set_profit(wci_result->profit() * 100);
   24.16 -    if (PrintInlining && Verbose) {
   24.17 +    if (C->print_inlining() && Verbose) {
   24.18        CompileTask::print_inline_indent(inline_level());
   24.19        tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
   24.20      }
   24.21 @@ -491,7 +491,7 @@
   24.22        C->log()->inline_fail(inline_msg);
   24.23      }
   24.24    }
   24.25 -  if (PrintInlining) {
   24.26 +  if (C->print_inlining()) {
   24.27      C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
   24.28      if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
   24.29      if (Verbose && callee_method) {
   24.30 @@ -540,7 +540,7 @@
   24.31  
   24.32  #ifndef PRODUCT
   24.33    if (UseOldInlining && InlineWarmCalls
   24.34 -      && (PrintOpto || PrintOptoInlining || PrintInlining)) {
   24.35 +      && (PrintOpto || C->print_inlining())) {
   24.36      bool cold = wci.is_cold();
   24.37      bool hot  = !cold && wci.is_hot();
   24.38      bool old_cold = !success;
   24.39 @@ -617,7 +617,7 @@
   24.40               callee_method->is_compiled_lambda_form()) {
   24.41        max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
   24.42      }
   24.43 -    if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
   24.44 +    if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
   24.45        CompileTask::print_inline_indent(inline_level());
   24.46        tty->print_cr(" \\-> discounting inline depth");
   24.47      }
    25.1 --- a/src/share/vm/opto/callGenerator.hpp	Thu Sep 26 10:25:02 2013 -0400
    25.2 +++ b/src/share/vm/opto/callGenerator.hpp	Fri Sep 27 10:08:56 2013 -0400
    25.3 @@ -159,8 +159,9 @@
    25.4    virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
    25.5  
    25.6    static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
    25.7 -    if (PrintInlining)
    25.8 +    if (C->print_inlining()) {
    25.9        C->print_inlining(callee, inline_level, bci, msg);
   25.10 +    }
   25.11    }
   25.12  };
   25.13  
    26.1 --- a/src/share/vm/opto/compile.cpp	Thu Sep 26 10:25:02 2013 -0400
    26.2 +++ b/src/share/vm/opto/compile.cpp	Fri Sep 27 10:08:56 2013 -0400
    26.3 @@ -654,7 +654,7 @@
    26.4                    _inlining_progress(false),
    26.5                    _inlining_incrementally(false),
    26.6                    _print_inlining_list(NULL),
    26.7 -                  _print_inlining(0) {
    26.8 +                  _print_inlining_idx(0) {
    26.9    C = this;
   26.10  
   26.11    CompileWrapper cw(this);
   26.12 @@ -679,6 +679,8 @@
   26.13    set_print_assembly(print_opto_assembly);
   26.14    set_parsed_irreducible_loop(false);
   26.15  #endif
   26.16 +  set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
   26.17 +  set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
   26.18  
   26.19    if (ProfileTraps) {
   26.20      // Make sure the method being compiled gets its own MDO,
   26.21 @@ -710,7 +712,7 @@
   26.22    PhaseGVN gvn(node_arena(), estimated_size);
   26.23    set_initial_gvn(&gvn);
   26.24  
   26.25 -  if (PrintInlining  || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
   26.26 +  if (print_inlining() || print_intrinsics()) {
   26.27      _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
   26.28    }
   26.29    { // Scope for timing the parser
   26.30 @@ -937,7 +939,7 @@
   26.31      _inlining_progress(false),
   26.32      _inlining_incrementally(false),
   26.33      _print_inlining_list(NULL),
   26.34 -    _print_inlining(0) {
   26.35 +    _print_inlining_idx(0) {
   26.36    C = this;
   26.37  
   26.38  #ifndef PRODUCT
   26.39 @@ -3611,7 +3613,7 @@
   26.40  }
   26.41  
   26.42  void Compile::dump_inlining() {
   26.43 -  if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
   26.44 +  if (print_inlining() || print_intrinsics()) {
   26.45      // Print inlining message for candidates that we couldn't inline
   26.46      // for lack of space or non constant receiver
   26.47      for (int i = 0; i < _late_inlines.length(); i++) {
   26.48 @@ -3635,7 +3637,7 @@
   26.49        }
   26.50      }
   26.51      for (int i = 0; i < _print_inlining_list->length(); i++) {
   26.52 -      tty->print(_print_inlining_list->at(i).ss()->as_string());
   26.53 +      tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
   26.54      }
   26.55    }
   26.56  }
    27.1 --- a/src/share/vm/opto/compile.hpp	Thu Sep 26 10:25:02 2013 -0400
    27.2 +++ b/src/share/vm/opto/compile.hpp	Fri Sep 27 10:08:56 2013 -0400
    27.3 @@ -312,6 +312,8 @@
    27.4    bool                  _do_method_data_update; // True if we generate code to update MethodData*s
    27.5    int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
    27.6    bool                  _print_assembly;        // True if we should dump assembly code for this compilation
    27.7 +  bool                  _print_inlining;        // True if we should print inlining for this compilation
    27.8 +  bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
    27.9  #ifndef PRODUCT
   27.10    bool                  _trace_opto_output;
   27.11    bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
   27.12 @@ -414,7 +416,7 @@
   27.13    };
   27.14  
   27.15    GrowableArray<PrintInliningBuffer>* _print_inlining_list;
   27.16 -  int _print_inlining;
   27.17 +  int _print_inlining_idx;
   27.18  
   27.19    // Only keep nodes in the expensive node list that need to be optimized
   27.20    void cleanup_expensive_nodes(PhaseIterGVN &igvn);
   27.21 @@ -426,24 +428,24 @@
   27.22   public:
   27.23  
   27.24    outputStream* print_inlining_stream() const {
   27.25 -    return _print_inlining_list->at(_print_inlining).ss();
   27.26 +    return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
   27.27    }
   27.28  
   27.29    void print_inlining_skip(CallGenerator* cg) {
   27.30 -    if (PrintInlining) {
   27.31 -      _print_inlining_list->at(_print_inlining).set_cg(cg);
   27.32 -      _print_inlining++;
   27.33 -      _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
   27.34 +    if (_print_inlining) {
   27.35 +      _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
   27.36 +      _print_inlining_idx++;
   27.37 +      _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
   27.38      }
   27.39    }
   27.40  
   27.41    void print_inlining_insert(CallGenerator* cg) {
   27.42 -    if (PrintInlining) {
   27.43 +    if (_print_inlining) {
   27.44        for (int i = 0; i < _print_inlining_list->length(); i++) {
   27.45 -        if (_print_inlining_list->at(i).cg() == cg) {
   27.46 +        if (_print_inlining_list->adr_at(i)->cg() == cg) {
   27.47            _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
   27.48 -          _print_inlining = i+1;
   27.49 -          _print_inlining_list->at(i).set_cg(NULL);
   27.50 +          _print_inlining_idx = i+1;
   27.51 +          _print_inlining_list->adr_at(i)->set_cg(NULL);
   27.52            return;
   27.53          }
   27.54        }
   27.55 @@ -572,6 +574,10 @@
   27.56    int               AliasLevel() const          { return _AliasLevel; }
   27.57    bool              print_assembly() const       { return _print_assembly; }
   27.58    void          set_print_assembly(bool z)       { _print_assembly = z; }
   27.59 +  bool              print_inlining() const       { return _print_inlining; }
   27.60 +  void          set_print_inlining(bool z)       { _print_inlining = z; }
   27.61 +  bool              print_intrinsics() const     { return _print_intrinsics; }
   27.62 +  void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
   27.63    // check the CompilerOracle for special behaviours for this compile
   27.64    bool          method_has_option(const char * option) {
   27.65      return method() != NULL && method()->has_option(option);
    28.1 --- a/src/share/vm/opto/doCall.cpp	Thu Sep 26 10:25:02 2013 -0400
    28.2 +++ b/src/share/vm/opto/doCall.cpp	Fri Sep 27 10:08:56 2013 -0400
    28.3 @@ -41,9 +41,9 @@
    28.4  #include "runtime/sharedRuntime.hpp"
    28.5  
    28.6  void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
    28.7 -  if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
    28.8 +  if (TraceTypeProfile || C->print_inlining()) {
    28.9      outputStream* out = tty;
   28.10 -    if (!PrintInlining) {
   28.11 +    if (!C->print_inlining()) {
   28.12        if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
   28.13          method->print_short_name();
   28.14          tty->cr();
    29.1 --- a/src/share/vm/opto/library_call.cpp	Thu Sep 26 10:25:02 2013 -0400
    29.2 +++ b/src/share/vm/opto/library_call.cpp	Fri Sep 27 10:08:56 2013 -0400
    29.3 @@ -543,7 +543,7 @@
    29.4    Compile* C = kit.C;
    29.5    int nodes = C->unique();
    29.6  #ifndef PRODUCT
    29.7 -  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
    29.8 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
    29.9      char buf[1000];
   29.10      const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
   29.11      tty->print_cr("Intrinsic %s", str);
   29.12 @@ -554,7 +554,7 @@
   29.13  
   29.14    // Try to inline the intrinsic.
   29.15    if (kit.try_to_inline()) {
   29.16 -    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
   29.17 +    if (C->print_intrinsics() || C->print_inlining()) {
   29.18        C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
   29.19      }
   29.20      C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
   29.21 @@ -570,7 +570,7 @@
   29.22    }
   29.23  
   29.24    // The intrinsic bailed out
   29.25 -  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
   29.26 +  if (C->print_intrinsics() || C->print_inlining()) {
   29.27      if (jvms->has_method()) {
   29.28        // Not a root compile.
   29.29        const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
   29.30 @@ -592,7 +592,7 @@
   29.31    int nodes = C->unique();
   29.32  #ifndef PRODUCT
   29.33    assert(is_predicted(), "sanity");
   29.34 -  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
   29.35 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
   29.36      char buf[1000];
   29.37      const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
   29.38      tty->print_cr("Predicate for intrinsic %s", str);
   29.39 @@ -603,7 +603,7 @@
   29.40  
   29.41    Node* slow_ctl = kit.try_to_predicate();
   29.42    if (!kit.failing()) {
   29.43 -    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
   29.44 +    if (C->print_intrinsics() || C->print_inlining()) {
   29.45        C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
   29.46      }
   29.47      C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
   29.48 @@ -617,7 +617,7 @@
   29.49    }
   29.50  
   29.51    // The intrinsic bailed out
   29.52 -  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
   29.53 +  if (C->print_intrinsics() || C->print_inlining()) {
   29.54      if (jvms->has_method()) {
   29.55        // Not a root compile.
   29.56        const char* msg = "failed to generate predicate for intrinsic";
   29.57 @@ -2299,7 +2299,7 @@
   29.58      const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
   29.59  
   29.60  #ifndef PRODUCT
   29.61 -    if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
   29.62 +    if (C->print_intrinsics() || C->print_inlining()) {
   29.63        tty->print("  from base type: ");  adr_type->dump();
   29.64        tty->print("  sharpened value: ");  tjp->dump();
   29.65      }
   29.66 @@ -3260,7 +3260,7 @@
   29.67    if (mirror_con == NULL)  return false;  // cannot happen?
   29.68  
   29.69  #ifndef PRODUCT
   29.70 -  if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
   29.71 +  if (C->print_intrinsics() || C->print_inlining()) {
   29.72      ciType* k = mirror_con->java_mirror_type();
   29.73      if (k) {
   29.74        tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
   29.75 @@ -3952,14 +3952,14 @@
   29.76  // caller sensitive methods.
   29.77  bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
   29.78  #ifndef PRODUCT
   29.79 -  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
   29.80 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
   29.81      tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
   29.82    }
   29.83  #endif
   29.84  
   29.85    if (!jvms()->has_method()) {
   29.86  #ifndef PRODUCT
   29.87 -    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
   29.88 +    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
   29.89        tty->print_cr("  Bailing out because intrinsic was inlined at top level");
   29.90      }
   29.91  #endif
   29.92 @@ -3983,7 +3983,7 @@
   29.93        // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
   29.94        if (!m->caller_sensitive()) {
   29.95  #ifndef PRODUCT
   29.96 -        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
   29.97 +        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
   29.98            tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
   29.99          }
  29.100  #endif
  29.101 @@ -3999,7 +3999,7 @@
  29.102          set_result(makecon(TypeInstPtr::make(caller_mirror)));
  29.103  
  29.104  #ifndef PRODUCT
  29.105 -        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
  29.106 +        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
  29.107            tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
  29.108            tty->print_cr("  JVM state at this point:");
  29.109            for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
  29.110 @@ -4015,7 +4015,7 @@
  29.111    }
  29.112  
  29.113  #ifndef PRODUCT
  29.114 -  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
  29.115 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
  29.116      tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
  29.117      tty->print_cr("  JVM state at this point:");
  29.118      for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
    30.1 --- a/src/share/vm/prims/jni.cpp	Thu Sep 26 10:25:02 2013 -0400
    30.2 +++ b/src/share/vm/prims/jni.cpp	Fri Sep 27 10:08:56 2013 -0400
    30.3 @@ -5046,7 +5046,10 @@
    30.4  void TestReservedSpace_test();
    30.5  void TestReserveMemorySpecial_test();
    30.6  void TestVirtualSpace_test();
    30.7 -void MetaspaceAux_test();
    30.8 +void TestMetaspaceAux_test();
    30.9 +#if INCLUDE_ALL_GCS
   30.10 +void TestG1BiasedArray_test();
   30.11 +#endif
   30.12  
   30.13  void execute_internal_vm_tests() {
   30.14    if (ExecuteInternalVMTests) {
   30.15 @@ -5054,7 +5057,7 @@
   30.16      run_unit_test(TestReservedSpace_test());
   30.17      run_unit_test(TestReserveMemorySpecial_test());
   30.18      run_unit_test(TestVirtualSpace_test());
   30.19 -    run_unit_test(MetaspaceAux_test());
   30.20 +    run_unit_test(TestMetaspaceAux_test());
   30.21      run_unit_test(GlobalDefinitions::test_globals());
   30.22      run_unit_test(GCTimerAllTest::all());
   30.23      run_unit_test(arrayOopDesc::test_max_array_length());
   30.24 @@ -5066,6 +5069,7 @@
   30.25      run_unit_test(VMStructs::test());
   30.26  #endif
   30.27  #if INCLUDE_ALL_GCS
   30.28 +    run_unit_test(TestG1BiasedArray_test());
   30.29      run_unit_test(HeapRegionRemSet::test_prt());
   30.30  #endif
   30.31      tty->print_cr("All internal VM tests passed");
    31.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Thu Sep 26 10:25:02 2013 -0400
    31.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Fri Sep 27 10:08:56 2013 -0400
    31.3 @@ -1506,8 +1506,11 @@
    31.4                                                  info, CHECK_(methodHandle()));
    31.5          inline_cache->set_to_monomorphic(info);
    31.6        } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
    31.7 -        // Change to megamorphic
    31.8 -        inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
    31.9 +        // Potential change to megamorphic
   31.10 +        bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
   31.11 +        if (!successful) {
   31.12 +          inline_cache->set_to_clean();
   31.13 +        }
   31.14        } else {
   31.15          // Either clean or megamorphic
   31.16        }
    32.1 --- a/src/share/vm/runtime/vm_version.hpp	Thu Sep 26 10:25:02 2013 -0400
    32.2 +++ b/src/share/vm/runtime/vm_version.hpp	Fri Sep 27 10:08:56 2013 -0400
    32.3 @@ -78,7 +78,13 @@
    32.4    static const char* jre_release_version();
    32.5  
    32.6    // does HW support an 8-byte compare-exchange operation?
    32.7 -  static bool supports_cx8()  {return _supports_cx8;}
    32.8 +  static bool supports_cx8()  {
    32.9 +#ifdef SUPPORTS_NATIVE_CX8
   32.10 +    return true;
   32.11 +#else
   32.12 +    return _supports_cx8;
   32.13 +#endif
   32.14 +  }
   32.15    // does HW support atomic get-and-set or atomic get-and-add?  Used
   32.16    // to guide intrinsification decisions for Unsafe atomic ops
   32.17    static bool supports_atomic_getset4()  {return _supports_atomic_getset4;}
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/test/compiler/print/PrintInlining.java	Fri Sep 27 10:08:56 2013 -0400
    33.3 @@ -0,0 +1,36 @@
    33.4 +/*
    33.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    33.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    33.7 + *
    33.8 + * This code is free software; you can redistribute it and/or modify it
    33.9 + * under the terms of the GNU General Public License version 2 only, as
   33.10 + * published by the Free Software Foundation.
   33.11 + *
   33.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   33.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   33.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   33.15 + * version 2 for more details (a copy is included in the LICENSE file that
   33.16 + * accompanied this code).
   33.17 + *
   33.18 + * You should have received a copy of the GNU General Public License version
   33.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   33.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   33.21 + *
   33.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   33.23 + * or visit www.oracle.com if you need additional information or have any
   33.24 + * questions.
   33.25 + */
   33.26 +
   33.27 +/*
   33.28 + * @test
   33.29 + * @bug 8022585
   33.30 + * @summary VM crashes when ran with -XX:+PrintInlining
   33.31 + * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining
   33.32 + *
   33.33 + */
   33.34 +
   33.35 +public class PrintInlining {
   33.36 +  public static void main(String[] args) {
   33.37 +    System.out.println("Passed");
   33.38 +  }
   33.39 +}

mercurial